├── .github
├── actions
│ └── setup-torchts
│ │ └── action.yml
├── dependabot.yml
├── labeler.yml
└── workflows
│ ├── docs.yml
│ ├── labeler.yml
│ ├── release.yml
│ ├── security.yml
│ └── test.yml
├── .gitignore
├── .pre-commit-config.yaml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── codecov.yml
├── docs
├── Makefile
├── make.bat
└── source
│ ├── _static
│ ├── css
│ │ └── custom.css
│ └── images
│ │ ├── favicon.ico
│ │ └── torchTS_logo.png
│ ├── _templates
│ └── theme_variables.jinja
│ ├── conf.py
│ ├── index.rst
│ ├── modules.rst
│ └── torchts.rst
├── examples
├── dcrnn
│ ├── README.md
│ ├── main.py
│ └── requirements.txt
├── mis-regression
│ ├── README.md
│ ├── lstm-mis-regression.ipynb
│ └── requirements.txt
├── ode
│ ├── SEIR
│ │ ├── SEIR model with ODESolver.ipynb
│ │ ├── SIR_SEIR_Test.ipynb
│ │ └── SIR_data_SD_county.pt
│ └── lorenz63
│ │ ├── Lorenz63Test.ipynb
│ │ └── Lorenz63Test_UnobservedMethod.ipynb
├── quantile-regression
│ ├── README.md
│ ├── lstm-quantile-regression.ipynb
│ └── requirements.txt
└── seq2seq
│ └── End2end pipeline with traffic dataset (GPU Tesla T4, Train 25mins).ipynb
├── poetry.lock
├── pyproject.toml
├── scripts
└── build_docs.sh
├── tests
├── nn
│ ├── test_loss.py
│ └── test_ode.py
├── test_model.py
└── test_sliding_window.py
├── torchts
├── __init__.py
├── nn
│ ├── __init__.py
│ ├── graph.py
│ ├── loss.py
│ ├── model.py
│ └── models
│ │ ├── __init__.py
│ │ ├── dcrnn.py
│ │ ├── ode.py
│ │ └── seq2seq.py
└── utils
│ ├── __init__.py
│ ├── data.py
│ └── graph.py
└── website
├── .gitignore
├── README.md
├── babel.config.js
├── docs
├── deep-sequence-to-sequence-models
│ ├── _category_.json
│ ├── seq2seq-gru.md
│ └── seq2seq-lstm.md
├── deep-spatiotemporal-models
│ ├── _category_.json
│ ├── convolutional-lstm.md
│ └── diffusion-convolutional-lstm.md
├── intro.md
├── linear-time-series-models
│ ├── _category_.json
│ ├── autoregressive.md
│ └── vector-autoregressive.md
├── physics-guided-deep-sequence-models
│ ├── _category_.json
│ ├── autoode.md
│ └── hybrid-autoode.md
└── uncertainty-quantification-methods
│ ├── _category_.json
│ ├── mean-interval-score-regression.md
│ ├── quantile-regression.md
│ └── stochastic-gradient-mcmc.md
├── docusaurus.config.js
├── package-lock.json
├── package.json
├── sidebars.js
├── src
├── components
│ ├── Container
│ │ └── index.jsx
│ ├── GridBlock
│ │ └── index.jsx
│ ├── HomepageFeatures.js
│ ├── HomepageFeatures.module.css
│ └── MarkdownBlock
│ │ └── index.jsx
├── css
│ └── custom.css
└── pages
│ ├── index.js
│ └── index.module.css
└── static
├── .nojekyll
└── img
├── logo.png
├── logo2.png
├── puzzle.png
├── pytorch-logo.png
├── scalable.png
├── time-series-graph.png
├── torchTS_logo.png
└── why.png
/.github/actions/setup-torchts/action.yml:
--------------------------------------------------------------------------------
1 | name: Set up TorchTS
2 | description: Set up a development environment for TorchTS
3 |
4 | inputs:
5 | python-version:
6 | description: Python version to use with actions/setup-python
7 | default: 3.x
8 |
9 | outputs:
10 | python-version:
11 | description: Selected Python version information
12 | value: ${{ steps.python-version.outputs.version }}
13 |
14 | runs:
15 | using: composite
16 | steps:
17 | - name: Set up Python ${{ inputs.python-version }}
18 | uses: actions/setup-python@v5
19 | with:
20 | python-version: ${{ inputs.python-version }}
21 |
22 | - name: Get Python version
23 | id: python-version
24 | run: echo version=$(python -c "import sys; print('-'.join(str(v) for v in sys.version_info))") >> $GITHUB_OUTPUT
25 | shell: bash
26 |
27 | - name: Install Poetry
28 | uses: snok/install-poetry@v1
29 | with:
30 | virtualenvs-create: true
31 | virtualenvs-in-project: true
32 |
33 | - name: Restore Poetry cache
34 | uses: actions/cache@v4
35 | with:
36 | path: .venv
37 | key: poetry-${{ runner.os }}-${{ steps.python-version.outputs.version }}-${{ hashFiles('poetry.lock') }}
38 |
39 | - name: Install dependencies
40 | run: poetry install
41 | shell: bash
42 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: pip
4 | directory: /
5 | schedule:
6 | interval: monthly
7 | labels:
8 | - dependencies
9 | - python
10 |
11 | - package-ecosystem: github-actions
12 | directory: /
13 | schedule:
14 | interval: monthly
15 | labels:
16 | - dependencies
17 | - github actions
18 |
--------------------------------------------------------------------------------
/.github/labeler.yml:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - changed-files:
3 | - any-glob-to-any-file:
4 | - poetry.lock
5 | - website/package-lock.json
6 |
7 | documentation:
8 | - changed-files:
9 | - any-glob-to-any-file:
10 | - docs/**
11 | - website/**
12 | - scripts/build_docs.sh
13 | - README.md
14 |
15 | examples:
16 | - changed-files:
17 | - any-glob-to-any-file: examples/**
18 |
19 | github actions:
20 | - changed-files:
21 | - any-glob-to-any-file: .github/**
22 |
23 | pre-commit:
24 | - changed-files:
25 | - any-glob-to-any-file: .pre-commit-config.yaml
26 |
27 | python:
28 | - changed-files:
29 | - any-glob-to-any-file:
30 | - '*.py'
31 | - poetry.lock
32 |
33 | source:
34 | - changed-files:
35 | - any-glob-to-any-file: torchts/**
36 |
37 | test:
38 | - changed-files:
39 | - any-glob-to-any-file: tests/**
40 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Docs
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths:
8 | - 'docs/**'
9 | - 'scripts/**'
10 | - 'torchts/**'
11 | - 'website/**'
12 | - 'poetry.lock'
13 | pull_request:
14 | branches:
15 | - main
16 | paths:
17 | - 'docs/**'
18 | - 'scripts/**'
19 | - 'torchts/**'
20 | - 'website/**'
21 | - 'poetry.lock'
22 |
23 | jobs:
24 | build:
25 | name: Build
26 | runs-on: ubuntu-latest
27 |
28 | steps:
29 | - name: Checkout
30 | uses: actions/checkout@v4
31 |
32 | - name: Set up TorchTS
33 | uses: ./.github/actions/setup-torchts
34 | with:
35 | python-version: 3.9
36 |
37 | - name: Set up Node
38 | uses: actions/setup-node@v4
39 | with:
40 | node-version: 14
41 |
42 | - name: Build Sphinx documentation
43 | run: scripts/build_docs.sh
44 |
45 | - name: Build Docusaurus website
46 | run: |
47 | cd website
48 | npm install
49 | npm run build
50 |
51 | - name: Generate token
52 | if: success() && github.event_name == 'push'
53 | id: generate-token
54 | uses: tibdex/github-app-token@v2
55 | with:
56 | app_id: ${{ secrets.APP_ID }}
57 | private_key: ${{ secrets.APP_PRIVATE_KEY }}
58 |
59 | - name: Deploy documentation
60 | if: success() && github.event_name == 'push'
61 | uses: JamesIves/github-pages-deploy-action@v4
62 | with:
63 | token: ${{ steps.generate-token.outputs.token }}
64 | git-config-name: torchts-bot[bot]
65 | git-config-email: 88511308+torchts-bot[bot]@users.noreply.github.com
66 | branch: gh-pages
67 | folder: website/build
68 | clean: true
69 | clean-exclude: |
70 | README.md
71 |
--------------------------------------------------------------------------------
/.github/workflows/labeler.yml:
--------------------------------------------------------------------------------
1 | name: Label pull request
2 |
3 | on:
4 | - pull_request_target
5 |
6 | jobs:
7 | label:
8 | name: Label pull request
9 | runs-on: ubuntu-latest
10 |
11 | steps:
12 | - name: Checkout
13 | uses: actions/checkout@v4
14 |
15 | - name: Generate token
16 | id: generate-token
17 | uses: tibdex/github-app-token@v2
18 | with:
19 | app_id: ${{ secrets.APP_ID }}
20 | private_key: ${{ secrets.APP_PRIVATE_KEY }}
21 |
22 | - name: Label pull request
23 | uses: actions/labeler@v5
24 | with:
25 | repo-token: ${{ steps.generate-token.outputs.token }}
26 | sync-labels: true
27 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - v*.*.*
7 | workflow_dispatch:
8 |
9 | jobs:
10 | build:
11 | name: Build
12 | runs-on: ubuntu-latest
13 |
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@v4
17 |
18 | - name: Set up Python
19 | uses: actions/setup-python@v5
20 | with:
21 | python-version: 3.9
22 |
23 | - name: Install Poetry
24 | uses: snok/install-poetry@v1
25 | with:
26 | virtualenvs-create: true
27 | virtualenvs-in-project: true
28 |
29 | - name: Install dependencies and build project
30 | run: |
31 | poetry install --without test,docs
32 | poetry build
33 |
34 | - name: Upload build artifacts
35 | uses: actions/upload-artifact@v4
36 | with:
37 | name: poetry-build
38 | path: dist/
39 |
40 | github:
41 | name: GitHub
42 | runs-on: ubuntu-latest
43 | needs: build
44 |
45 | steps:
46 | - name: Checkout
47 | uses: actions/checkout@v4
48 |
49 | - name: Download build artifacts
50 | uses: actions/download-artifact@v4
51 | with:
52 | name: poetry-build
53 | path: dist
54 |
55 | - name: Get changes
56 | if: github.event_name == 'push'
57 | run: |
58 | tag=${GITHUB_REF#refs/tags/v}
59 | pattern="0,/$tag/d;/[0-9]\+\.[0-9]\+\.[0-9]\+/Q"
60 | sed $pattern CHANGELOG.md | head -n -1 | tail -n +2 > RELEASE.md
61 | cat RELEASE.md
62 |
63 | - name: Get changes
64 | if: github.event_name == 'workflow_dispatch'
65 | run: |
66 | pattern='0,/[0-9]\+\.[0-9]\+\.[0-9]\+/d;/[0-9]\+\.[0-9]\+\.[0-9]\+/Q'
67 | sed $pattern CHANGELOG.md | head -n -1 | tail -n +2 > RELEASE.md
68 | cat RELEASE.md
69 |
70 | - name: Generate token
71 | if: success() && github.event_name == 'push'
72 | id: generate-token
73 | uses: tibdex/github-app-token@v2
74 | with:
75 | app_id: ${{ secrets.APP_ID }}
76 | private_key: ${{ secrets.APP_PRIVATE_KEY }}
77 |
78 | - name: Create release
79 | if: success() && github.event_name == 'push'
80 | uses: softprops/action-gh-release@v2
81 | with:
82 | token: ${{ steps.generate-token.outputs.token }}
83 | body_path: RELEASE.md
84 | prerelease: ${{ contains(github.ref, '-') }}
85 | files: dist/*
86 |
87 | pypi:
88 | name: PyPI
89 | runs-on: ubuntu-latest
90 | needs: build
91 |
92 | steps:
93 | - name: Checkout
94 | uses: actions/checkout@v4
95 |
96 | - name: Download build artifacts
97 | uses: actions/download-artifact@v4
98 | with:
99 | name: poetry-build
100 | path: dist
101 |
102 | - name: Set up Python
103 | uses: actions/setup-python@v5
104 | with:
105 | python-version: 3.9
106 |
107 | - name: Install Poetry
108 | uses: snok/install-poetry@v1
109 | with:
110 | virtualenvs-create: true
111 | virtualenvs-in-project: true
112 |
113 | - name: Publish to PyPI
114 | if: success() && github.event_name == 'push'
115 | run: |
116 | poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }}
117 | poetry publish
118 |
119 | - name: Publish to TestPyPI
120 | if: success() && github.event_name == 'workflow_dispatch'
121 | run: |
122 | poetry config repositories.testpypi https://test.pypi.org/legacy/
123 | poetry config pypi-token.testpypi ${{ secrets.TEST_PYPI_TOKEN }}
124 | poetry publish -r testpypi
125 |
--------------------------------------------------------------------------------
/.github/workflows/security.yml:
--------------------------------------------------------------------------------
1 | name: Security
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths-ignore:
8 | - '**.md'
9 | # every Sunday at midnight
10 | schedule:
11 | - cron: '0 0 * * 0'
12 |
13 | permissions:
14 | actions: read
15 | contents: read
16 | security-events: write
17 |
18 | jobs:
19 | codeql:
20 | name: CodeQL
21 | runs-on: ubuntu-latest
22 |
23 | steps:
24 | - name: Checkout
25 | uses: actions/checkout@v4
26 |
27 | - name: Initialize CodeQL
28 | uses: github/codeql-action/init@v3
29 | with:
30 | languages: python
31 |
32 | - name: Run CodeQL
33 | uses: github/codeql-action/analyze@v3
34 |
35 | ossar:
36 | name: OSSAR
37 | runs-on: windows-latest
38 |
39 | steps:
40 | - name: Checkout
41 | uses: actions/checkout@v4
42 |
43 | - name: Run OSSAR
44 | id: ossar
45 | uses: github/ossar-action@v1
46 |
47 | - name: Upload OSSAR results
48 | if: always()
49 | uses: github/codeql-action/upload-sarif@v3
50 | with:
51 | sarif_file: ${{ steps.ossar.outputs.sarifFile }}
52 |
53 | semgrep:
54 | name: Semgrep
55 | runs-on: ubuntu-latest
56 | container:
57 | image: semgrep/semgrep
58 |
59 | steps:
60 | - name: Checkout
61 | uses: actions/checkout@v4
62 |
63 | - name: Run Semgrep
64 | run: semgrep scan --config auto --sarif > semgrep.sarif
65 |
66 | - name: Upload Semgrep results
67 | if: always()
68 | uses: github/codeql-action/upload-sarif@v3
69 | with:
70 | sarif_file: semgrep.sarif
71 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths-ignore:
8 | - 'docs/**'
9 | - '**.md'
10 | pull_request:
11 | paths-ignore:
12 | - 'docs/**'
13 | - '**.md'
14 |
15 | jobs:
16 | test:
17 | name: ${{ matrix.os }} / ${{ matrix.python-version }}
18 | runs-on: ${{ matrix.os }}-latest
19 | defaults:
20 | run:
21 | shell: bash
22 |
23 | strategy:
24 | fail-fast: false
25 | matrix:
26 | os: [Ubuntu, macOS, Windows]
27 | python-version: [3.8, 3.9]
28 |
29 | steps:
30 | - name: Checkout
31 | uses: actions/checkout@v4
32 |
33 | - name: Set up TorchTS
34 | uses: ./.github/actions/setup-torchts
35 | with:
36 | python-version: ${{ matrix.python-version }}
37 |
38 | - name: Run tests
39 | run: poetry run pytest tests/ --cov=torchts
40 |
41 | - name: Generate coverage report
42 | run: poetry run coverage xml
43 |
44 | - name: Upload coverage report
45 | if: success()
46 | uses: codecov/codecov-action@v4
47 | with:
48 | file: ./coverage.xml
49 | fail_ci_if_error: true
50 | token: ${{ secrets.CODECOV_TOKEN }}
51 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # macOS
2 | .DS_Store
3 |
4 | # Python byte code
5 | *.pyc
6 |
7 | # Jupyter notebook checkpoints
8 | .ipynb_checkpoints/
9 |
10 | # PyTorch Lightning logs
11 | lightning_logs/
12 |
13 | # pytest coverage files
14 | .coverage*
15 |
16 | # build artifacts
17 | build/
18 | dist/
19 |
20 | # data files
21 | *.h5
22 | *.npz
23 | *.pkl
24 |
25 | # vscode
26 | .vscode
27 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | ci:
2 | autofix_commit_msg: Add pre-commit fixes
3 | autofix_prs: true
4 | autoupdate_commit_msg: Update pre-commit hooks
5 | autoupdate_schedule: monthly
6 |
7 | repos:
8 | - repo: https://github.com/psf/black
9 | rev: 24.4.0
10 | hooks:
11 | - id: black
12 |
13 | - repo: https://github.com/asottile/pyupgrade
14 | rev: v3.15.2
15 | hooks:
16 | - id: pyupgrade
17 | args: [--py36-plus]
18 |
19 | - repo: https://github.com/PyCQA/isort
20 | rev: 5.13.2
21 | hooks:
22 | - id: isort
23 | additional_dependencies: [toml]
24 |
25 | - repo: https://github.com/flakeheaven/flakeheaven
26 | rev: 3.3.0
27 | hooks:
28 | - id: flakeheaven
29 | additional_dependencies:
30 | - flake8-bugbear
31 | - flake8-comprehensions
32 |
33 | - repo: https://github.com/pre-commit/pre-commit-hooks
34 | rev: v4.6.0
35 | hooks:
36 | - id: end-of-file-fixer
37 | - id: mixed-line-ending
38 | - id: trailing-whitespace
39 |
40 | - repo: https://github.com/Lucas-C/pre-commit-hooks
41 | rev: v1.5.5
42 | hooks:
43 | - id: forbid-crlf
44 | exclude: docs/make.bat
45 | - id: forbid-tabs
46 | exclude: (?i)docs/make*
47 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
3 | ## [Unreleased]
4 |
5 | ### Added
6 |
7 | - Added conda-forge README badge. ([#149](https://github.com/Rose-STL-Lab/torchTS/pull/149))
8 | - Added pre-commit.ci config and README badge. ([#156](https://github.com/Rose-STL-Lab/torchTS/pull/156))
9 | - Added quantile loss function for uncertainty quantification. ([#168](https://github.com/Rose-STL-Lab/torchTS/pull/168))
10 | - Added input to base model constructor for loss function arguments. ([#168](https://github.com/Rose-STL-Lab/torchTS/pull/168))
11 | - Added pytest-mock to dev dependencies. ([#168](https://github.com/Rose-STL-Lab/torchTS/pull/168))
12 | - Added website descriptions for new features. ([#165](https://github.com/Rose-STL-Lab/torchTS/pull/165), [#169](https://github.com/Rose-STL-Lab/torchTS/pull/169))
13 | - Added mean interval score loss function. ([#188](https://github.com/Rose-STL-Lab/torchTS/pull/188))
14 | - Added API documentation to website. ([#206](https://github.com/Rose-STL-Lab/torchTS/pull/206), [#237](https://github.com/Rose-STL-Lab/torchTS/pull/237), [#238](https://github.com/Rose-STL-Lab/torchTS/pull/238))
15 | - Added ODE solver and examples. ([#134](https://github.com/Rose-STL-Lab/torchTS/pull/134))
16 |
17 | ### Changed
18 |
19 | - Updated documentation website. ([#125](https://github.com/Rose-STL-Lab/torchTS/pull/125))
20 | - Replaced loop with list comprehension. ([#148](https://github.com/Rose-STL-Lab/torchTS/pull/148))
21 | - Expanded automatic pull request labeling. ([#154](https://github.com/Rose-STL-Lab/torchTS/pull/154), [#204](https://github.com/Rose-STL-Lab/torchTS/pull/204))
22 | - Expanded gitignore patterns. ([#155](https://github.com/Rose-STL-Lab/torchTS/pull/155))
23 | - Updated flakehell pre-commit hook. ([#177](https://github.com/Rose-STL-Lab/torchTS/pull/177))
24 | - Removed pull requests from security workflow runs. ([#185](https://github.com/Rose-STL-Lab/torchTS/pull/185))
25 | - Switched from flakehell to flakeheaven. ([#203](https://github.com/Rose-STL-Lab/torchTS/pull/203))
26 | - Removed pre-commit actions. ([#224](https://github.com/Rose-STL-Lab/torchTS/pull/224))
27 |
28 | ### Fixed
29 |
30 | - Fixed equation parentheses in spatiotemporal documentation. ([#153](https://github.com/Rose-STL-Lab/torchTS/pull/153))
31 |
32 | ## [0.1.1] - 2021-08-31
33 |
34 | This patch release sets dependency requirements for a `conda` installation. The original requirements were too strict for [conda-forge](https://conda-forge.org/).
35 |
36 | ### Added
37 |
38 | - Added pre-commit to dev dependencies. ([#127](https://github.com/Rose-STL-Lab/torchTS/pull/127))
39 |
40 | ### Changed
41 |
42 | - Changed CI workflows to run pre-commit with poetry. ([#131](https://github.com/Rose-STL-Lab/torchTS/pull/131))
43 | - Moved common workflow steps to a composite action. ([#132](https://github.com/Rose-STL-Lab/torchTS/pull/132))
44 | - Updated pre-commit hooks. ([#133](https://github.com/Rose-STL-Lab/torchTS/pull/133), [#135](https://github.com/Rose-STL-Lab/torchTS/pull/135))
45 | - Relaxed dependency requirements. ([#139](https://github.com/Rose-STL-Lab/torchTS/pull/139))
46 |
47 | ### Fixed
48 |
49 | - Fixed change log links. ([#126](https://github.com/Rose-STL-Lab/torchTS/pull/126), [#128](https://github.com/Rose-STL-Lab/torchTS/pull/128))
50 | - Fixed contributing file link. ([#137](https://github.com/Rose-STL-Lab/torchTS/pull/137))
51 | - Fixed Sphinx config metadata. ([#138](https://github.com/Rose-STL-Lab/torchTS/pull/138))
52 |
53 | ## [0.1.0] - 2021-08-16
54 |
55 | Initial release
56 |
57 | [unreleased]: https://github.com/Rose-STL-Lab/torchTS/compare/v0.1.1...main
58 | [0.1.1]: https://github.com/Rose-STL-Lab/torchTS/releases/tag/v0.1.1
59 | [0.1.0]: https://github.com/Rose-STL-Lab/torchTS/releases/tag/v0.1.0
60 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to TorchTS
2 |
3 | :wave: Welcome to TorchTS! Thank you for showing interest and taking the time to contribute. The TorchTS team would love to have your contribution.
4 |
5 | The following is a set of guidelines for contributing to TorchTS on GitHub. Please read carefully, but note that these are mostly guidelines, not rules. Use your best judgement, and feel free to propose changes to this document in a pull request.
6 |
7 | Table of contents:
8 |
9 | - [Issues](#issues)
10 | - [Reporting Bugs](#reporting-bugs)
11 | - [Before submitting a bug report](#before-submitting-a-bug-report)
12 | - [How do I submit a bug report?](#how-do-i-submit-a-bug-report)
13 | - [Suggesting Enhancements](#suggesting-enhancements)
14 | - [Before submitting an enhancement suggestion](#before-submitting-an-enhancement-suggestion)
15 | - [How do I submit an enhancement suggestion?](#how-do-i-submit-an-enhancement-suggestion)
16 | - [Pull Requests](#pull-requests)
17 | - [Contributing to Code](#contributing-to-code)
18 | - [Picking an issue](#picking-an-issue)
19 | - [Local development](#local-development)
20 | - [Running tests](#running-tests)
21 | - [Contributing to Documentation](#contributing-to-documentation)
22 | - [Creating Pull Requests](#creating-pull-requests)
23 | - [License](#license)
24 |
25 | ## Issues
26 |
27 | ### Reporting Bugs
28 |
29 | This section guides you through submitting a bug report for TorchTS. Following these guidelines helps maintainers and the community understand your report, reproduce the behavior, and find related reports.
30 |
31 | #### Before submitting a bug report
32 |
33 | Before submitting bug reports, please search the existing issues on the [issue tracker](https://github.com/Rose-STL-Lab/torchTS/issues) to verify your issue has not already been submitted. Issues pertaining to bugs are usually marked with the [bug](https://github.com/Rose-STL-Lab/torchTS/issues?q=is%3Aissue+label%3Abug) label.
34 |
35 | > **Note:** If you find a **Closed** issue that seems to be the same thing you are experiencing, open a new issue and include a link to the original issue in the body of your new one.
36 |
37 | #### How do I submit a bug report?
38 |
39 | Bugs are tracked on the [issue tracker](https://github.com/Rose-STL-Lab/torchTS/issues) where you can create a new one. When creating a bug report, please include as many details as possible. Explain the problem and include additional details to help maintainers reproduce the problem:
40 |
41 | - **Use a clear and descriptive title** for the issue to identify the problem.
42 | - **Describe the exact steps which reproduce the problem** in as many details as possible.
43 | - **Provide specific examples to demonstrate the steps to reproduce the issue**. Include links to files or GitHub projects, or copy-paste-able snippets, which you use in those examples.
44 | - **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior.
45 | - **Explain which behavior you expected to see instead and why.**
46 |
47 | Provide additional context by answering these questions:
48 |
49 | - **Did the problem start happening recently** (e.g. after updating to a new version of TorchTS) or has this always been a problem?
50 | - If the problem started happening recently, **can you reproduce the problem in an older version of TorchTS?** What is the most recent version in which the problem does not happen?
51 | - **Can you reliably reproduce the issue?** If not, provide details about how often the problem happens and under what conditions it normally happens.
52 |
53 | Include details about your configuration and environment:
54 |
55 | - **Which version of TorchTS are you using?**
56 | - **Which version of Python are you using?**
57 | - **Which OS type and version are you using?**
58 |
59 | ### Suggesting Enhancements
60 |
61 | This section guides you through submitting a suggestion for enhancements to TorchTS, including completely new features and minor improvements to existing functionality. Following these guidelines helps maintainers and the community understand your suggestion and find related suggestions.
62 |
63 | #### Before submitting an enhancement suggestion
64 |
65 | Before submitting enhancement suggestions, please search the existing issues on the [issue tracker](https://github.com/Rose-STL-Lab/torchTS/issues) to verify your issue has not already been submitted. Issues pertaining to enhancements are usually marked with the [enhancement](https://github.com/Rose-STL-Lab/torchTS/issues?q=is%3Aissue+label%3Aenhancement) label.
66 |
67 | > **Note:** If you find a **Closed** issue that seems to be the same thing you are suggesting, please read the conversation to learn why it was not incorporated.
68 |
69 | #### How do I submit an enhancement suggestion?
70 |
71 | Enhancement suggestions are tracked on the [issue tracker](https://github.com/Rose-STL-Lab/torchTS/issues) where you can create a new one. When creating an enhancement suggestion, please include as many details as possible and provide the following information:
72 |
73 | - **Use a clear and descriptive title** for the issue to identify the suggestion.
74 | - **Provide a step-by-step description of the suggested enhancement** in as many details as possible.
75 | - **Provide specific examples to demonstrate the steps**.
76 | - **Describe the current behavior** and **explain which behavior you expected to see instead** and why.
77 |
78 | ## Pull Requests
79 |
80 | ### Contributing to Code
81 |
82 | #### Picking an issue
83 |
84 | If you would like to take on an open issue, feel free to comment on it in the [issue tracker](https://github.com/Rose-STL-Lab/torchTS/issues). We are more than happy to discuss solutions to open issues. If you are particularly adventurous, consider addressing an issue labeled [help wanted](https://github.com/Rose-STL-Lab/torchTS/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22).
85 |
86 | > **Note:** If you are a first time contributor and are looking for an issue to take on, you might want to look through the issues labeled [good first issue](https://github.com/Rose-STL-Lab/torchTS/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22).
87 |
88 | #### Local development
89 |
90 | You will need the TorchTS source to start contributing to the codebase. Refer to the [documentation](https://rose-stl-lab.github.io/torchTS/docs/) to start using TorchTS. You will first need to clone the repository using `git` and place yourself in the new local directory:
91 |
92 | ```bash
93 | git clone git@github.com:Rose-STL-Lab/torchTS.git
94 | cd torchTS
95 | ```
96 |
97 | > **Note:** We recommend that you use a personal [fork](https://docs.github.com/en/get-started/quickstart/fork-a-repo) for this step. If you are new to GitHub collaboration, you can refer to the [Forking Projects Guide](https://guides.github.com/activities/forking/).
98 |
99 | TorchTS uses [Poetry](https://python-poetry.org/) for dependency management and we recommend you do the same when contributing to TorchTS. Refer to the [installation instructions](https://python-poetry.org/docs/#installation) to determine how to install Poetry on your system. Once installed, you can create a virtual environment and install TorchTS including all dependencies with the `install` command:
100 |
101 | ```bash
102 | poetry install
103 | ```
104 |
105 | > **Note:** Poetry uses the active Python installation to create the virtual environment. You can determine your current Python version with `python --version` and find the location of your Python executable with `which python`.
106 |
107 | #### Running tests
108 |
109 | Once your changes are complete, make sure that the tests pass on your machine:
110 |
111 | ```bash
112 | poetry run pytest tests/
113 | ```
114 |
115 | > **Note:** Your code must always be accompanied by corresponding tests. Your pull request **will not be merged** if tests are not present.
116 |
117 | TorchTS uses [pre-commit](https://pre-commit.com/) to run a series of checks that ensure all files adhere to a consistent code style and satisfy desired coding standards. These include [black](https://github.com/psf/black) and [pyupgrade](https://github.com/asottile/pyupgrade) to format code, [isort](https://github.com/PyCQA/isort) to sort import statements, and [Flake8](https://github.com/PyCQA/flake8) to check for common coding errors.
118 |
119 | To make sure that you do not accidentally commit code that does not follow the coding style, you can run these checks with the following command:
120 |
121 | ```bash
122 | poetry run pre-commit run --all-files
123 | ```
124 |
125 | > **Note:** Many of the pre-commit hooks modify your code if necessary. If pre-commit fails, it will oftentimes pass when run a second time.
126 |
127 | Failure to satisfy these checks will cause the CI to fail and your pull request **will not be merged**.
128 |
129 | ### Contributing to Documentation
130 |
131 | One of the simplest ways to get started contributing to a project is through improving documentation. You can help by adding missing sections, editing the existing content so it is more accessible, or creating new content (tutorials, FAQs, etc).
132 |
133 | Issues pertaining to the documentation are usually marked with the [documentation](https://github.com/Rose-STL-Lab/torchTS/issues?q=is%3Aissue+is%3Aopen+label%3Adocumentation) label.
134 |
135 | ### Creating Pull Requests
136 |
137 | - Be sure that your pull request contains tests that cover the changed or added code.
138 | - If your changes warrant a documentation change, the pull request must also update the documentation.
139 |
140 | > **Note:** Make sure your branch is [rebased](https://docs.github.com/en/get-started/using-git/about-git-rebase) against the latest `main` branch. A maintainer might ask you to ensure the branch is up-to-date prior to merging your pull request if changes have conflicts.
141 |
142 | All pull requests, unless otherwise instructed, need to be first accepted into the `main` branch.
143 |
144 | ## License
145 |
146 | By contributing to TorchTS, you agree that your contributions will be licensed under the [LICENSE](LICENSE) file in the root directory of this source tree.
147 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020-present UCSD STL Lab
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 | of the Software, and to permit persons to whom the Software is furnished to do
10 | so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | ---
6 |
7 | [](https://github.com/Rose-STL-Lab/torchTS/actions/workflows/test.yml)
8 | [](https://github.com/Rose-STL-Lab/torchTS/actions/workflows/docs.yml)
9 | [](https://results.pre-commit.ci/latest/github/Rose-STL-Lab/torchTS/main)
10 | [](https://app.codecov.io/gh/Rose-STL-Lab/torchTS)
11 | [](https://pypi.org/project/torchts)
12 | [](https://anaconda.org/conda-forge/torchts)
13 | [](LICENSE)
14 |
15 | TorchTS is a PyTorch-based library for time series data.
16 |
17 | ***Currently under active development!***
18 |
19 | #### Why Time Series?
20 |
21 | Time series data modeling has broad significance in public health, finance and engineering. Traditional time series methods from statistics often rely on strong modeling assumptions, or are computationally expensive. Given the rise of large-scale sensing data and significant advances in deep learning, the goal of the project is to develop an efficient and user-friendly deep learning library that would benefit the entire research community and beyond.
22 |
23 | #### Why TorchTS?
24 |
25 | Existing time series analysis libraries include [statsmodels](https://www.statsmodels.org/stable/index.html) and [sktime](https://github.com/alan-turing-institute/sktime). However, these libraries only include traditional statistics tools such as ARMA or ARIMA, which do not have the state-of-the-art forecasting tools based on deep learning. [GluonTS](https://ts.gluon.ai/) is an open-source time series library developed by Amazon AWS, but is based on MXNet. [Pyro](https://pyro.ai/) is a probabilistic programming framework based on PyTorch, but is not focused on time series forecasting.
26 |
27 | ## Installation
28 |
29 | ### Installation Requirements
30 |
31 | TorchTS supports Python 3.8+ and has the following dependencies:
32 |
33 | - [PyTorch](https://pytorch.org/)
34 | - [PyTorch Lightning](https://pytorchlightning.ai/)
35 | - [SciPy](https://www.scipy.org/)
36 |
37 | ### Installing the latest release
38 |
39 | The latest release of TorchTS is easily installed either via `pip`:
40 |
41 | ```bash
42 | pip install torchts
43 | ```
44 |
45 | or via [conda](https://docs.conda.io/projects/conda/) from the [conda-forge](https://conda-forge.org/) channel:
46 |
47 | ```bash
48 | conda install -c conda-forge torchts
49 | ```
50 |
51 | You can customize your PyTorch installation (i.e. CUDA version, CPU only option)
52 | by following the [PyTorch installation instructions](https://pytorch.org/get-started/locally/).
53 |
54 | ***Important note for MacOS users:***
55 |
56 | - Make sure your PyTorch build is linked against MKL (the non-optimized version
57 | of TorchTS can be up to an order of magnitude slower in some settings).
58 | Setting this up manually on MacOS can be tricky - to ensure this works properly,
59 | please follow the [PyTorch installation instructions](https://pytorch.org/get-started/locally/).
60 | - If you need CUDA on MacOS, you will need to build PyTorch from source. Please
61 | consult the PyTorch installation instructions above.
62 |
63 | ## Getting Started
64 |
65 | Check out our [documentation](https://rose-stl-lab.github.io/torchTS/) and
66 | [tutorials](https://rose-stl-lab.github.io/torchTS/tutorials) (coming soon).
67 |
68 | ## Citing TorchTS
69 |
70 | If you use TorchTS, please cite the following paper (coming soon):
71 |
72 | > [TorchTS: A Framework for Efficient Time Series Modeling](TBD)
73 |
74 | ```bibtex
75 | @inproceedings{TBD,
76 | title={{TorchTS: A Framework for Efficient Time Series Modeling}},
77 | author={TBD},
78 | booktitle = {TBD},
79 | year={TBD},
80 | url = {TBD}
81 | }
82 | ```
83 |
84 | See [here](https://rose-stl-lab.github.io/torchTS/papers) (coming soon) for a selection of peer-reviewed papers that either build off of TorchTS or were integrated into TorchTS.
85 |
86 | ## Contributing
87 |
88 | Interested in contributing to TorchTS? Please see the [contributing guide](CONTRIBUTING.md) to learn how to help out.
89 |
90 | ## License
91 |
92 | TorchTS is [MIT licensed](LICENSE).
93 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | status:
3 | project:
4 | default:
5 | informational: true
6 | patch:
7 | default:
8 | informational: true
9 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/source/_static/css/custom.css:
--------------------------------------------------------------------------------
1 | .tutorials-header .header-logo {
2 | background-image: url("../images/torchTS_logo.png");
3 | background-size: cover;
4 | background-repeat: no-repeat;
5 | background-position: 50% 50%;
6 | width: 110px;
7 | }
8 |
9 | @media only screen and (max-width: 600px) {
10 | .tutorials-header .header-logo {
11 | width: 50px;
12 | }
13 | }
14 |
15 | span.pre {
16 | background: inherit;
17 | }
18 |
19 | code span.pre {
20 | color: #262626;
21 | }
22 |
23 | article.pytorch-article .class .method dt {
24 | font-weight: 500;
25 | }
26 |
--------------------------------------------------------------------------------
/docs/source/_static/images/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/docs/source/_static/images/favicon.ico
--------------------------------------------------------------------------------
/docs/source/_static/images/torchTS_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/docs/source/_static/images/torchTS_logo.png
--------------------------------------------------------------------------------
/docs/source/_templates/theme_variables.jinja:
--------------------------------------------------------------------------------
1 | {%- set external_urls = {
2 | 'github': 'https://github.com/Rose-STL-Lab/torchTS',
3 | 'home': 'https://rose-stl-lab.github.io/torchTS/api/',
4 | 'get_started': ' ',
5 | 'blog': 'https://github.com/Rose-STL-Lab/torchTS',
6 | 'resources': 'https://github.com/Rose-STL-Lab/torchTS',
7 | 'docs': 'https://rose-stl-lab.github.io/torchTS/docs/',
8 | 'twitter': 'https://twitter.com/',
9 | 'discuss': 'https://discuss.pytorch.org',
10 | 'tutorials': 'https://github.com/Rose-STL-Lab/torchTS/docs',
11 | 'previous_pytorch_versions': 'https://pytorch-lightning.rtfd.io/en/latest/',
12 | }
13 | -%}
14 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 |
13 | import os
14 | import sys
15 |
16 | import asteroid_sphinx_theme
17 |
18 | PATH_HERE = os.path.abspath(os.path.dirname(__file__))
19 | PATH_ROOT = os.path.join(PATH_HERE, "..", "..")
20 | sys.path.insert(0, os.path.abspath(PATH_ROOT))
21 |
22 |
23 | # -- Project information -----------------------------------------------------
24 |
25 | project = "TorchTS"
26 | copyright = "2020-present UCSD STL Lab"
27 | author = "TorchTS Team"
28 |
29 | # The full version, including alpha/beta/rc tags
30 | release = "0.1.1"
31 |
32 |
33 | # -- General configuration ---------------------------------------------------
34 |
35 | # Add any Sphinx extension module names here, as strings. They can be
36 | # extensions coming with Sphinx (named "sphinx.ext.*") or your custom
37 | # ones.
38 | extensions = [
39 | "sphinx.ext.autodoc",
40 | "sphinx.ext.autosummary",
41 | "sphinx.ext.doctest",
42 | "sphinx.ext.intersphinx",
43 | "sphinx.ext.todo",
44 | "sphinx.ext.coverage",
45 | "sphinx.ext.napoleon",
46 | "sphinx.ext.viewcode",
47 | "sphinx.ext.githubpages",
48 | ]
49 |
50 | # Add any paths that contain templates here, relative to this directory.
51 | templates_path = ["_templates"]
52 |
53 | # List of patterns, relative to source directory, that match files and
54 | # directories to ignore when looking for source files.
55 | # This pattern also affects html_static_path and html_extra_path.
56 | exclude_patterns = []
57 |
58 | # The name of the Pygments (syntax highlighting) style to use.
59 | pygments_style = None
60 |
61 |
62 | # -- Extension configuration -------------------------------------------------
63 |
64 | # Napoleon config
65 | napoleon_use_ivar = True
66 | napoleon_use_rtype = False
67 |
68 | # Autodoc config
69 | autodoc_inherit_docstrings = False
70 | autodoc_default_options = {"members": True, "show-inheritance": True}
71 |
72 | # Intersphinx config
73 | intersphinx_mapping = {
74 | "python": ("https://docs.python.org/3/", None),
75 | "torch": ("https://pytorch.org/docs/master/", None),
76 | }
77 |
78 |
79 | # -- Options for HTML output -------------------------------------------------
80 |
81 | # The theme to use for HTML and HTML Help pages. See the documentation for
82 | # a list of builtin themes.
83 | html_theme = "asteroid_sphinx_theme"
84 | html_theme_path = [asteroid_sphinx_theme.get_html_theme_path()]
85 |
86 | # Theme options are theme-specific and customize the look and feel of a theme
87 | # further. For a list of options available for each theme, see the
88 | # documentation.
89 | html_theme_options = {
90 | "pytorch_project": "tutorials",
91 | "canonical_url": "https://rose-stl-lab.github.io/torchTS/api/",
92 | "collapse_navigation": False,
93 | "display_version": True,
94 | "logo": "_static/images/torchTS_logo.png",
95 | "logo_only": True,
96 | }
97 |
98 | html_logo = "_static/images/torchTS_logo.png"
99 |
100 | # html_context = {
101 | # "display_github": True,
102 | # "github_user": "Rose-STL-Lab",
103 | # "github_repo": "torchTS",
104 | # "github_version": "master",
105 | # "conf_py_path": "/docs/", # needs leading and trailing slashes!
106 | # }
107 |
108 | # Add any paths that contain custom static files (such as style sheets) here,
109 | # relative to this directory. They are copied after the builtin static files,
110 | # so a file named "default.css" will overwrite the builtin "default.css".
111 | html_static_path = ["_static"]
112 |
113 | html_css_files = [
114 | "css/custom.css",
115 | ]
116 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. TorchTS documentation master file, created by
2 | sphinx-quickstart on Tue Dec 22 18:49:00 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to TorchTS's documentation!
7 | ===================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 | modules
14 | torchts
15 |
16 |
17 |
18 | Indices and tables
19 | ==================
20 |
21 | * :ref:`genindex`
22 | * :ref:`modindex`
23 | * :ref:`search`
24 |
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | torchts
2 | =======
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | torchts
8 |
--------------------------------------------------------------------------------
/docs/source/torchts.rst:
--------------------------------------------------------------------------------
1 | torchts package
2 | ===============
3 |
4 | Submodules
5 | ----------
6 |
7 | Base classes
8 | ------------
9 |
10 | .. automodule:: torchts.nn.model
11 |
12 | Module contents
13 | ---------------
14 |
15 | .. automodule:: torchts
16 | :members:
17 | :undoc-members:
18 | :show-inheritance:
19 |
--------------------------------------------------------------------------------
/examples/dcrnn/README.md:
--------------------------------------------------------------------------------
1 | # This tutorial serves as a basic example of training the DCRNN model.
2 |
3 | ### Download sample data to the project directory [here](https://drive.google.com/drive/folders/1VB5OGQudoEOHCf0Y-J0wuDByU8OCcsMY?usp=sharing).
4 |
5 |
6 |
7 | The traffic dataset stated above is obtained from CalTrans monitoring system and is used to predict traffic at California's road network.
8 |
9 | Major requirements include numpy , torch and pytorch-lightning and can be found in the requirements.txt
10 | The tutorial code can be found in main.py
11 |
12 |
13 | ```
14 | pip install -r requirements.txt
15 |
16 | # Run tutorial
17 | python main.py
18 | ```
19 |
--------------------------------------------------------------------------------
/examples/dcrnn/main.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import numpy as np
4 | import torch.optim
5 |
6 | from torchts.nn.models.dcrnn import DCRNN
7 | from torchts.utils import data as utils
8 |
9 | model_config = {
10 | "cl_decay_steps": 2000,
11 | "filter_type": "dual_random_walk",
12 | "horizon": 12,
13 | "seq_len": 12,
14 | "input_dim": 2,
15 | "max_diffusion_step": 2,
16 | "num_layers": 2,
17 | "output_dim": 2,
18 | "use_curriculum_learning": True,
19 | }
20 |
21 | optimizer_args = {"lr": 0.01}
22 |
23 | # Code to retrieve the graph in the form of an adjacency matrix.
24 | # This corresponds to the distance between 2 traffic sensors in a traffic network.
25 | # For other applications it can mean anything that defines the adjacency between nodes
26 | # eg. distance between airports of different cities when predicting
27 | # covid infection rate.
28 |
29 | graph_pkl_filename = ""
30 |
31 | _, _, adj_mx = utils.load_graph_data(graph_pkl_filename)
32 |
33 | num_units = adj_mx.shape[0]
34 |
35 | model_config["num_nodes"] = num_units
36 |
37 | data = np.load(
38 | ""
39 | ) # Absolute path of train, test, val needed.
40 |
41 |
42 | def run():
43 | model = DCRNN(
44 | adj_mx,
45 | num_units,
46 | optimizer=torch.optim.SGD,
47 | optimizer_args=optimizer_args,
48 | **model_config
49 | )
50 | start = time.time()
51 | model.fit(
52 | torch.from_numpy(data["x"].astype("float32")),
53 | torch.from_numpy(data["y"].astype("float32")),
54 | max_epochs=10,
55 | batch_size=8,
56 | )
57 | end = time.time() - start
58 | print("Training time taken %f" % (end - start))
59 |
60 |
61 | if __name__ == "__main__":
62 | run()
63 |
--------------------------------------------------------------------------------
/examples/dcrnn/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | torch>=1.7.1
3 | pytorch-lightning>=1.3.1
4 |
--------------------------------------------------------------------------------
/examples/mis-regression/README.md:
--------------------------------------------------------------------------------
1 | # LSTM with Mean Interval Score Regression
2 |
3 | This notebook provides an example of using a mean interval score loss function with a LSTM network.
4 |
5 | Assuming TorchTS is already installed, the additional dependencies can be installed with `pip`:
6 |
7 | ```bash
8 | pip install -r requirements.txt
9 | ```
10 |
--------------------------------------------------------------------------------
/examples/mis-regression/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib
2 | ipykernel
3 |
--------------------------------------------------------------------------------
/examples/ode/SEIR/SEIR model with ODESolver.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## SEIR Epidemiologic Model with TorchTS ODE Solver\n",
8 | "\n",
9 | "This example solves a compartmental model used in epidemiology, known as \n",
10 | "[SEIR](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model) \n",
11 | "model, using the TorchTS ODE Solver.\n",
12 | "\n",
13 | "The ODE (ordinary differential equation) system, in generic form, is\n",
14 | "\n",
15 | "\\begin{equation*}\n",
16 | " \\frac{d \\mathbf{A}}{d t} = \\mathbf{F}(a_n)\n",
17 | "\\end{equation*}\n",
18 | "\n",
19 | "or in uncollapsed form\n",
20 | "\n",
21 | "\\begin{align*}\n",
22 | "\\frac{d a_1}{d t} =& f_1(a_1, a_2, \\dots a_n) \\\\\n",
23 | "\\frac{d a_2}{d t} =& f_2(a_1, a_2, \\dots a_n) \\\\\n",
24 | "\\vdots \\\\\n",
25 | "\\frac{d a_n}{d t} =& f_n(a_1, a_2, \\dots a_n) \\\\\n",
26 | "\\end{align*}\n",
27 | "\n",
28 | "In the case of the SEIR model, the ODEs are:\n",
29 | " \n",
30 | "\\begin{align}\n",
31 | "\\frac{d S_t}{dt} &= - \\frac{\\beta_t I_t S_t}{N}, \\\\\n",
32 | "\\frac{d E_t}{dt} &= \\frac{\\beta_t I_t S_t}{N} - \\sigma_t E_t \\\\\n",
33 | "\\frac{d I_t}{dt} &= \\sigma_t E_t - \\gamma_t I_t \\\\\n",
34 | "\\frac{d R_t}{dt} &= \\gamma_t I_t\n",
35 | "\\end{align}\n",
36 | "\n",
37 | "Here, the the compartment $S$ (susceptible population) represents the first variable $a_1$, and $f_1$ is denoted by the right-hand-side of the top equation.\n",
38 | "The coefficients $\\beta$, $\\sigma$ and $\\gamma$ (either constant or time-dependent, still to be implemented) are optimized using PyTorch."
39 | ]
40 | },
41 | {
42 | "cell_type": "markdown",
43 | "metadata": {},
44 | "source": [
45 | "In order to solve various different systems of ODEs, the following quantities must be somehow parameterized and passed to the solver:\n",
46 | "- The equations $f_n$. \n",
47 | "- The coefficients (and a flag denoting whether they are time-dependent)\n",
48 | "- The data used to train the model. \n",
49 | "- An optional output modifier which takes the numerical solution and brings it into a shape consistent with the training data such that the loss can be calculated, as explained below.\n",
50 | "- Other user-controllable parameters including the temporal discretization (time step),\n",
51 | "optimizer, scheduler learning rate and loss function."
52 | ]
53 | },
54 | {
55 | "cell_type": "markdown",
56 | "metadata": {},
57 | "source": [
58 | "### Specification of the variables, parameters and functions\n",
59 | "\n",
60 | "Variables, ODEs and initial values for coefficients are passed to the function during initialization:\n",
61 | "\n",
62 | " ODESolver(inivals, cfuncs, inicoeffs, dt, time-dependent=False, \n",
63 | " solver='Euler', outvar=None)\n",
64 | "\n",
65 | "##### Variables\n",
66 | "For working with the solver it's easier and more intuitive to assign actual variable names to each quantity.\n",
67 | "They are provided as the keys in the dictionary passed to \n",
68 | "the positional argument `inivals`. The values of the dictionary provide the initial values assigned to each quantity.\n",
69 | "For the SEIR model, one could use, for example:\n",
70 | "\n",
71 | " inivals = {\"S\": 0.95, \"E\": 0.,\"I\" 0.05 : ,\"R\": 0}\n",
72 | " \n",
73 | "(Here, the population in each compartments is normalized by the total population size).\n",
74 | "\n",
75 | "##### Functions\n",
76 | "\n",
77 | "A function specifying the right-hand term in each of the system of ODE's is passed to the solver as a dictionary in the\n",
78 | "positional argument `cfuncs`. The equation pertaining to each variable is stored under the key representing the respective variable.\n",
79 | "Each function receives two positional arguments, `cvars` and `coeffs`. These will be dictionaries containing the \n",
80 | "system's current variables and coefficients. As an example, the function describing the ODE for quantity $S$ would be defined as:\n",
81 | "\n",
82 | " def fS(cvar, coeffs):\n",
83 | " return (-coeffs[\"beta\"] * cvar[\"I\"] * cvar[\"S\"])\n",
84 | "\n",
85 | "##### Inicoeffs\n",
86 | "\n",
87 | "Initial values for the coefficients are provided in the dictionary inicoeffs. Each coefficient must be present,\n",
88 | "and the keys in the dictionary passed to the solver must represent the names of the coefficients that will be optimized through data.\n",
89 | "\n",
90 | "In the SEIR example, one could use\n",
91 | " \n",
92 | " inicoeffs={\"beta\": 0.50, \"gamma\": 0.20, \"sigma\": 0.20}\n",
93 | "\n",
94 | "##### Output quantities (and time skip, still ToDo):\n",
95 | "\n",
96 | "By default, the network returns a time-dependent value for every variable and every discrete time step resolved \n",
97 | "during numerical integration. Depending on the model and data, a training value may not be available for quantity.\n",
98 | "For example, only data on the currently infected and susceptible population was typically be available during the Covid-19 pandemic, but not on the exposed population.\n",
99 | "(Alternatively, one might only have data on cumulative reported cases (`cumsum(I)`), not currently infectious cases.\n",
100 | "Handling such cases will require functionaly that is not yet implemented.)\n",
101 | "\n",
102 | "The keyword variable `outvar` designates the names of the output quantities that are present in the data and used \n",
103 | "for computation of the loss. In addition, it indicates the order in which they are present in the training dataset\n",
104 | "(format described below).\n",
105 | "By default, `outvar` is the same as `variables`. In the case of the compartmental model, one would use\n",
106 | "`outvar = [\"S\",\"I\",\"R\"]`, as no data on the exposed population $E$ is available."
107 | ]
108 | },
109 | {
110 | "cell_type": "markdown",
111 | "metadata": {},
112 | "source": [
113 | "### Training the network\n",
114 | "\n",
115 | "The solver is trained using \n",
116 | "\n",
117 | " ODESolver.fit(train_data, num_epochs=100, lr=0.001, optimizer= None, \n",
118 | " scheduler = None, loss_fun=torch.nn.MSELoss()):\n",
119 | "\n",
120 | "The PyTorch tensor `train_data` is assumed to be of the shape `(nt,nvar)`, where `nt` is the number of time steps used for training and `nvar` is the number of output variables (consistent with `len(outvar)`). The sampling interval of the data is expected to be the same as the timestep `dt` passed to the solver during initialization.\n",
121 | "\n",
122 | "By default, the value `None` is passed for the optimizer and scheduler, \n",
123 | "and the network uses \n",
124 | "\n",
125 | " optimizer = torch.optim.Adam(self.coeffs.values(), 0.001) \n",
126 | " scheduler=torch.optim.lr_scheduler.StepLR(optimizer, step_size= 1, gamma=0.95)\n",
127 | " \n",
128 | "To learning rate can be changed using the keyword argument `lr`. If a custom optimizer is provided,\n",
129 | "the optimizer's coded learning rate is used. A warning is issued if the user tries to set both.\n",
130 | "\n",
131 | "### Predicting\n",
132 | "\n",
133 | "Predictions are made using \n",
134 | "\n",
135 | " ODESolver.predict(nt)\n",
136 | " \n",
137 | "Where `nt` represents the total number of time steps in the prediction (starting from the same origin time as used in the training data)."
138 | ]
139 | },
140 | {
141 | "cell_type": "code",
142 | "execution_count": 1,
143 | "metadata": {},
144 | "outputs": [
145 | {
146 | "name": "stdout",
147 | "output_type": "stream",
148 | "text": [
149 | "cpu\n"
150 | ]
151 | }
152 | ],
153 | "source": [
154 | "import torch\n",
155 | "import numpy as np\n",
156 | "import torch.nn as nn\n",
157 | "from scipy.integrate import odeint\n",
158 | "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
159 | "import os\n",
160 | "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
161 | "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"4\"\n",
162 | "#device = torch.device(\"cpu\")\n",
163 | "print(device)\n",
164 | "import matplotlib.pyplot as plt\n",
165 | "\n",
166 | "from tqdm import tqdm"
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {},
172 | "source": [
173 | "If torchts is properly installed, the module should be in the path already. This code adds the\n",
174 | "module `ode` to the current path assuming the example is called directly from the repository's tree:"
175 | ]
176 | },
177 | {
178 | "cell_type": "code",
179 | "execution_count": 2,
180 | "metadata": {},
181 | "outputs": [],
182 | "source": [
183 | "try:\n",
184 | " from torchts.nn.models import ode\n",
185 | "except ModuleNotFoundError:\n",
186 | " import sys\n",
187 | " sys.path.append(\"../../..\")\n",
188 | " from torchts.nn.models import ode"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "metadata": {},
194 | "source": [
195 | "Defining all the functions. Here, the coefficient $\\beta$ is assumed to be normalized by the total population $N$ already. Population sizes are assumed normalized by the total population as well."
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": 3,
201 | "metadata": {},
202 | "outputs": [],
203 | "source": [
204 | "def fS(cvar, coeffs):\n",
205 | " return -coeffs[\"beta\"] * cvar[\"I\"] * cvar[\"S\"]\n",
206 | "\n",
207 | "def fE(cvar, coeffs):\n",
208 | " return coeffs[\"beta\"] * cvar[\"I\"] * cvar[\"S\"] - coeffs[\"sigma\"] * cvar[\"E\"]\n",
209 | "\n",
210 | "def fI(cvar, coeffs):\n",
211 | " return coeffs[\"sigma\"] * cvar[\"E\"] - coeffs[\"gamma\"] * cvar[\"I\"]\n",
212 | "\n",
213 | "def fR(cvar, coeffs):\n",
214 | " return coeffs[\"gamma\"] * cvar[\"I\"]\n",
215 | "\n",
216 | "cfuncs={\"S\": fS, \"E\": fE, \"I\": fI, \"R\": fR}"
217 | ]
218 | },
219 | {
220 | "cell_type": "markdown",
221 | "metadata": {},
222 | "source": [
223 | "The training and validation data is loaded from the provided `PyTorch` file and normalized:"
224 | ]
225 | },
226 | {
227 | "cell_type": "code",
228 | "execution_count": 4,
229 | "metadata": {},
230 | "outputs": [
231 | {
232 | "name": "stdout",
233 | "output_type": "stream",
234 | "text": [
235 | "Total population: 3338330\n"
236 | ]
237 | }
238 | ],
239 | "source": [
240 | "SIR=torch.load(\"SIR_data_SD_county.pt\")\n",
241 | "\n",
242 | "npop=SIR[0,0].numpy().copy()\n",
243 | "print(\"Total population: %d\" % npop)\n",
244 | "SIR[:,:] = SIR[:,:] / torch.tensor(npop)"
245 | ]
246 | },
247 | {
248 | "cell_type": "markdown",
249 | "metadata": {},
250 | "source": [
251 | "Preparing the training data. Here, just a short excerpt of the full time range is used.\n",
252 | "Time-dependent coefficients are needed to fit longer time windows. This is not yet implemented in the ODE library, but in the specific solver for the SEIR model:"
253 | ]
254 | },
255 | {
256 | "cell_type": "code",
257 | "execution_count": 5,
258 | "metadata": {},
259 | "outputs": [
260 | {
261 | "name": "stdout",
262 | "output_type": "stream",
263 | "text": [
264 | "nt=37\n"
265 | ]
266 | }
267 | ],
268 | "source": [
269 | "training_data=SIR.float()[350:380,:]\n",
270 | "nt_train=training_data.shape[0]\n",
271 | "test_data=SIR.float()[350:410,:]\n",
272 | "nt=test_data.shape[0]\n",
273 | "print(\"nt=%d\" % nt)"
274 | ]
275 | },
276 | {
277 | "cell_type": "code",
278 | "execution_count": 12,
279 | "metadata": {},
280 | "outputs": [
281 | {
282 | "name": "stdout",
283 | "output_type": "stream",
284 | "text": [
285 | "{'S': tensor(0.9471), 'I': tensor(0.0126), 'R': tensor(0.0403), 'E': tensor(0.0251)}\n"
286 | ]
287 | }
288 | ],
289 | "source": [
290 | "#The values at the beginning of the observation are taken as initial values\n",
291 | "inivals={}\n",
292 | "for n,var in enumerate([\"S\",\"I\",\"R\"]):\n",
293 | " inivals[var] = training_data[0,n]\n",
294 | "\n",
295 | "#The fraction of the initial exposed population is assumed twice the infected fraction\n",
296 | "inivals[\"E\"] = inivals[\"I\"] * 2.\n",
297 | "print(inivals)"
298 | ]
299 | },
300 | {
301 | "cell_type": "code",
302 | "execution_count": 13,
303 | "metadata": {},
304 | "outputs": [],
305 | "source": [
306 | "inicoeffs={\"beta\": 0.50, \"gamma\": 0.20, \"sigma\": 0.20}"
307 | ]
308 | },
309 | {
310 | "cell_type": "markdown",
311 | "metadata": {},
312 | "source": [
313 | "The function is initialized using the initial values, initial coefficients and functions (right-hand-sides of ODEs) provided above. Also specified are the output variables given in the training data, in the order in which they are present:"
314 | ]
315 | },
316 | {
317 | "cell_type": "code",
318 | "execution_count": 22,
319 | "metadata": {},
320 | "outputs": [
321 | {
322 | "name": "stderr",
323 | "output_type": "stream",
324 | "text": [
325 | "/Users/marty/Documents/GitHub/torchTS/examples/ode/SEIR/../../../torchts/nn/models/ode.py:39: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n",
326 | " name: torch.tensor(value, device=self.device)\n"
327 | ]
328 | }
329 | ],
330 | "source": [
331 | "# myopt=torch.optim.SGD(seir.coeffs.values(), 0.005)\n",
332 | "seir=ode.ODESolver(\n",
333 | " cfuncs, \n",
334 | " inivals, \n",
335 | " inicoeffs, \n",
336 | " dt=1, \n",
337 | " outvar=[\"S\",\"I\",\"R\"], \n",
338 | " optimizer=torch.optim.SGD,\n",
339 | " optimizer_args={\"lr\":0.005}\n",
340 | " )"
341 | ]
342 | },
343 | {
344 | "cell_type": "code",
345 | "execution_count": 23,
346 | "metadata": {},
347 | "outputs": [
348 | {
349 | "name": "stderr",
350 | "output_type": "stream",
351 | "text": [
352 | "GPU available: False, used: False\n",
353 | "TPU available: False, using: 0 TPU cores\n",
354 | "IPU available: False, using: 0 IPUs\n",
355 | "/opt/anaconda3/envs/torchTS/lib/python3.9/site-packages/pytorch_lightning/trainer/configuration_validator.py:122: UserWarning: You defined a `validation_step` but have no `val_dataloader`. Skipping val loop.\n",
356 | " rank_zero_warn(\"You defined a `validation_step` but have no `val_dataloader`. Skipping val loop.\")\n",
357 | "\n",
358 | " | Name | Type | Params\n",
359 | "------------------------------\n",
360 | "------------------------------\n",
361 | "3 Trainable params\n",
362 | "0 Non-trainable params\n",
363 | "3 Total params\n",
364 | "0.000 Total estimated model params size (MB)\n",
365 | "/opt/anaconda3/envs/torchTS/lib/python3.9/site-packages/pytorch_lightning/trainer/data_loading.py:116: UserWarning: The dataloader, train_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 4 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.\n",
366 | " rank_zero_warn(\n",
367 | "/opt/anaconda3/envs/torchTS/lib/python3.9/site-packages/pytorch_lightning/trainer/data_loading.py:412: UserWarning: The number of training samples (1) is smaller than the logging interval Trainer(log_every_n_steps=50). Set a lower value for log_every_n_steps if you want to see logs for the training epoch.\n",
368 | " rank_zero_warn(\n"
369 | ]
370 | },
371 | {
372 | "name": "stdout",
373 | "output_type": "stream",
374 | "text": [
375 | "Epoch 999: 100%|██████████| 1/1 [00:00<00:00, 10.96it/s, loss=0.00016, v_num=18, train_loss_step=0.000148, train_loss_epoch=0.000148] \n"
376 | ]
377 | }
378 | ],
379 | "source": [
380 | "seir.fit(training_data, training_data, max_epochs=1000)"
381 | ]
382 | },
383 | {
384 | "cell_type": "markdown",
385 | "metadata": {},
386 | "source": [
387 | "The values of the optimized coefficients can be retrieved like this:"
388 | ]
389 | },
390 | {
391 | "cell_type": "code",
392 | "execution_count": 24,
393 | "metadata": {},
394 | "outputs": [
395 | {
396 | "data": {
397 | "text/plain": [
398 | "{'beta': 0.40758606791496277,\n",
399 | " 'gamma': 0.3061103820800781,\n",
400 | " 'sigma': 0.016801264137029648}"
401 | ]
402 | },
403 | "execution_count": 24,
404 | "metadata": {},
405 | "output_type": "execute_result"
406 | }
407 | ],
408 | "source": [
409 | "seir.get_coeffs()"
410 | ]
411 | },
412 | {
413 | "cell_type": "markdown",
414 | "metadata": {},
415 | "source": [
416 | "Training using custom optimizer:"
417 | ]
418 | },
419 | {
420 | "cell_type": "code",
421 | "execution_count": 25,
422 | "metadata": {},
423 | "outputs": [],
424 | "source": [
425 | "#myopt=torch.optim.SGD(seir.coeffs.values(), 0.005)\n",
426 | "#seir.fit(training_data, optimizer=myopt)"
427 | ]
428 | },
429 | {
430 | "cell_type": "code",
431 | "execution_count": 26,
432 | "metadata": {},
433 | "outputs": [],
434 | "source": [
435 | "y_predict=seir.predict(nt)"
436 | ]
437 | },
438 | {
439 | "cell_type": "code",
440 | "execution_count": 27,
441 | "metadata": {},
442 | "outputs": [],
443 | "source": [
444 | "import matplotlib.pyplot as plt"
445 | ]
446 | },
447 | {
448 | "cell_type": "code",
449 | "execution_count": 28,
450 | "metadata": {},
451 | "outputs": [
452 | {
453 | "data": {
454 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAG2CAYAAACDLKdOAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAA9hAAAPYQGoP6dpAAByS0lEQVR4nO3deXgT1d4H8G+SNt3SfYVCS9l3ZKcgKItlFRAEXKGyKCqbKCjXhcV7ZVFZBEFRBH3lFgUBERBBBNkE2XdlaUtLaWmhNqVt2rTJvH/kZpK0SZpA26Tp9/M882Tm5MzMmabt/HLOmXMkgiAIICIiInIRUkcXgIiIiKgiMbghIiIil8LghoiIiFwKgxsiIiJyKQxuiIiIyKUwuCEiIiKXwuCGiIiIXAqDGyIiInIpDG6IiIjIpTC4ISIiIpfi0ODmwIEDePzxx1G7dm1IJBJs3bq13H1+//13tG/fHp6enqhfvz4+++yzyi8oERERVRsODW7y8/PRpk0brFixwqb8SUlJGDBgALp3747Tp0/jX//6F6ZMmYIffvihkktKRERE1YXEWSbOlEgk2LJlC4YOHWoxz5tvvolt27bh8uXLYtrEiRNx9uxZ/PHHH1VQSiIiInJ2bo4ugD3++OMPxMXFmaT17dsXa9asQXFxMdzd3cvsU1RUhKKiInFbq9UiOzsbwcHBkEgklV5mIiIienCCIODevXuoXbs2pFLrDU/VKrjJyMhAeHi4SVp4eDhKSkpw584d1KpVq8w+8+fPx9y5c6uqiERERFSJUlNTUadOHat5qlVwA6BMbYu+Vc1SLcysWbMwffp0cVupVCIqKgqpqanw8/OrvIISERFRhcnNzUXdunXh6+tbbt5qFdxEREQgIyPDJC0zMxNubm4IDg42u4+Hhwc8PDzKpPv5+TG4ISIiqmZs6VJSrca5iY2NxZ49e0zSdu/ejQ4dOpjtb0NERPSgsrOz0axZMzRr1gzZ2dmOLg7ZwKHBTV5eHs6cOYMzZ84A0D3qfebMGaSkpADQNSmNHj1azD9x4kTcuHED06dPx+XLl/HVV19hzZo1eOONNxxRfCIiqgGUSiX++usv/PXXX1AqlY4uDtnAoc1SJ06cQM+ePcVtfd+YMWPGYN26dUhPTxcDHQCIiYnBzp078dprr+HTTz9F7dq18cknn2D48OFVXnYiIqoZFAqF2M9DoVA4uDRkC6cZ56aq5Obmwt/fH0qlkn1uiIiIqgl77t/Vqs8NERERUXkY3BAREZFLYXBDRERkRVJSEiQSCSQSCZKSkhxdHLIBgxsiIiJyKdVqED8iIqKqFhkZibVr14rr5PwY3BAREVkhl8sRHx/v6GKQHdgsRURERC6FwQ0REZEVSqUS7dq1Q7t27ThCcTXBZikiIiIrsrOzcfr0aXHd39/fwSWi8jC4ISIissLLywuenp7iOjk/BjdERERWREREQKVSOboYZAcGNxWkqAj473+Bq1eByEggOhrw8wM8PQEvL8Oi3/b0BKTs8URERFThGNxUkH/+AcaOtW8fuVwX6Pj4AN7ehsV429y6PjiyZ/HwANz4aRMRUQ3A210FkUqBnj2B48eBggJAqy2bRyIBjOdgV6t1S1V1vpfJdEGOPtgxXjeX5uGhC8CMX62t2xpkeXoy0CKi6iMlJQXR0dEAgBs3biAqKsrBJaLy8BZTQcLCgN9+060LApCRoWuiunZN93r1KjBwIPD880BhIXD2LPDww5aP17MnMHQokJ8P5OToji2X6wIUmUx3DkEANBpdk1hhofmlpMRwTI1GF3gVFFTmT8I2Mln5gZW5dbnc9sXd3TQ4s2c/icTRPyEichYajcbsOjkvBjeVQCIBatXSLT16lH1foQCaNgW+/BK4fh1ISwNu3TK8KpXAI48AU6bo8v/1F7BokeVzvf468OGHuu2cHGD6dCA4GAgJAQIDdX1/vL11AUJgoO78RUWGoMjcq35Rq8uum0szPp6lpbjYUG6NRhe45edX6I++whjXSJVeSr+nD4jc3AxLedulAzBLgZm5NOP3jNNkMkf/1IhcU0REBBYsWCCuk/NjcOMgwcHAuHHm38vLM23W8vYGpk3TBT9paUBWFnDnjq6fjyDo3tdLTwf+NwWKWVOmAMuW6dZv3wbatwf8/Q1LQIDu1c8P6N4dePxxXV61Gti9WxcY+frqXo0XW26s5mqZSgdU5a3rm/KMl+Ji8+n6wMva+/o8xs2F+utVq4F798q/LmchlZoGPsbBlKVFJjPdlkoNtYP6dXNp+nXj4MtcM6alV0tBovE2gzVyFl5eXnjzzTcdXQyyA4MbJ6RQmG5HRQFLlpTNV1ICZGfrbmh6gYHABx/ogh/9cveurkZHqdQ1n+n9848hYDKnsNAQ3GRnG9bNmTABWL1at56fr6uxMtcp2tsb6NwZePppXV6tFvjmG9OnygIDDetBQboaMD1BqJwmo5IS22qmLNVqaTS6IKqkRLdYWtdvGwdcpYMvc9vGQZp+u3RAptUayuMKZDJD4GQchJW3bvxaet2W7cpOs6Ws5gJPc9tSKZtQicxhcFONubmZBisAEBEBzJpl2/716gEnThgCH/2Sk6Orseje3ZBXowE6dtTVKumXe/cMfXrkckPe/Hzg1CnL5x092hDcFBYCL7xgOe+wYcAPP+jWBUEXyLm7m++s/MgjwNKlhn1feEG3j7nO0w0aAE89Zci7Y4fu1bhvj36/kBBdgKlXVKQrg6Mf5dcHVJaCIo3GNKjSL5bSS0p0AZJGo1ssrRtv6wMt41qw0uul06wFjqWvT6XSLWRZ6eBH/zeibwo13jaXZq5WztxS+n1rNXvmXu9nKb2vtW1zQWF5aaXXLf1NK5VKPP6/b3c//fQTRyiuBhjc1GCenrpmKVtERgJ//mmaJgi6G1RenmkTgp8fsHOnofNyfr5hvaAAeOghQ16tFujf33ATKyw0rKtUumYyveJiw821sLBsGUs/wLB+vWk/H2O9epkGNy+8oKvJMqdTJ+DYMcN2o0ZAaqrun2LpZpWWLYHt2w154+N1ncvNNb/UqgW8+64h7//9n64M5ppyfH11ncz1rl/XXZtxPh8fXU2XXF49v80Lgi7AKh3wmAvKrK0b/57oF/175ratvVdeXlvTbD2m8fWUTrNEf+304CQS8zVuEokC2dnfA9CgWTMfsdnUWvBX3nvW0uwJACWSsq/lrZfe19IxLQWbtrx6egJdujjus2RwQ/dNIjHcsI15euoCFlsoFLpAyBbu7rpAwVyfncJC3Y3d2Icflu0grd9u0sQ0b6dOuhorc01PwcGmefU1DPqbinGn6NJlOHgQSEw0fz2NG5sGNx9+CJw/bz5v7dqmzYfPPw/88Yf5vP7+umvRe+opXd7ST455eOiaCX/+2ZB32TLgwgXzeeVyXWd1fSB7+LCuTKWfRtNvt2plyKuv5dO/Z24oAInEUJtQumm2ptM/GVm61s1cLZy+2bP0Yuk9S7VzpRdzNXm2vgqCaVp5S3n5Sr9fOnAsHSQap1kLFPUBdtlgUQZA15E4Pb2yPmXXEhHh2J8VgxuqNiQSIDzc9vxTp9qed9cu2/Neu6YLksw1sxg3zwG6vlL//GO+L49xrRQADBgAtGhh/um00gGWr6+ub5L+feN/xqXLkJ4OpKSYvxbjzugA8MsvpsFOaa+/blhftgzYuNFy3nv3DEHK1KmmHd2NOyN7eOgCKn0T64IFuqZIS4/xL11qyLtzJ3DoUNnASr8+dKjh55yYqPs5lD6efp+wMEP/tcrq23W/JBJDs0rpLxNkH32gZa2WzJYaO0sBoaUmXFvT7ifg01+TfogQ43VL75Xe19IxS5+7vABXvx4S4tjPmcENkZ18fXWLLQYPtv24/3vS1Ca//GK6bdz/pfS3zi+/1NXkmOsLU7pTcnw80K2b+T40xcWmfRKaN9f1c7LU58Y4yFKrTc+j1Rpq3ADTmpykJF1fMEsWLjSs790LLF5sOW+XLobgZt064P33Lec9fhzo0EG3vmgR8Pbb5muvPDyAr7825P3xR+Dzzy0HYxMnAs2a6fJeuAD8+qv54Eou1zUT6580zsnR1YxZCtzc3JwrAKsujJueyHUxuCFyAfp/1v+buNhEo0a2H2fkSNvzzplje97/+z/gq69Mn/oyDoSM+2dOmaILCkvn0S+BgYa8PXroAjtLHZj9/Ax5g4N1AZm5IK+4uGwwptFYHvTSuC9XYqL12q5BgwzBzZEjwGuvWc67eTPwxBO69Z07gWeftZz3//4PeO453fquXbrpX0r31dKvv/GGbhBRQBdgffSR5cf0H3vMELhlZZkGY6WPW6eOoTa1uBjIzTUN3Bh8kaMwuCGiSieRGG545WnRQrfYYsgQ3WKLqVNtb6qcPl03DpWlJ8CaNzfk7dtX1+RmKW/9+oa8DRoAzzxTNmjTbxtX5ctkum3j5kxjxs1T9+5Z798werRhPSVFV/NkiUJhCG4uX9aV15L584G33tKtnz9f9gEF41HCZ8ww5E1OBoYPN18zJpfrhp3QnzcnR1dbZ2ncpObNdX3mAF2Adfiw5cBNP06XvTj9QvXD4IaIqBQfH91ii+bNTYMda3r31i22GDVKt+jpOxXrgx3j/lKPPQacPm25ibBjR0PeJk10TaCWgjHjwNLXV1deS4/4G/cbK930CBg6Lefnm9Z25eZaHy4iKsoQ3GRnW2+yffVVQ3Dzzz+mTxWWNmaMrnkS0JVJ/3ShuUCof39DE2hJiQbAVgDFmDw5CEFBpsFYq1a6Y+utXl22X5n+NTQUaNPGkDcpyTDvn3Fezr/3YPjjIyKqBow7FZfuCB4QYDrEgjUNGgC2Drbbtq2uWcoWXboYBsMsPa5R6U7x9erpmt0sjYVkXAOkUOhq3GwJxgRBN7WNpXGXSjc96pe8vLLX06qVYT04OAJADABg27ayeQcPNg1uJk2yPAxF796mP9N27UyfbtSTSnX93w4cMKR1764blNVcMNaokek4X++/b3loiZAQ0xq5gwd1Q2+Yy+vlBdSta8hbUqJ/PN789TkLBjdERFQhZDLD6OLW+PnZPlxEWJjpTdua8HBdU5olxtPa+Pnpmugs1UoZD5Dq4+OFL76wPIq5cc2dIOiaSs31AysqMm2mBAy1O+Y63Zd25QqQmWn+2tq1M91eu1ZXK2RO48amwc2rr1oehiIyErh507Ddowdw9Kj5WqmQkLLjoTkKgxsiIqoRjJ/2k8lMaySscXMDxo+3La9EYn2IhNJu39a96gdFNQ60So+Y/NNPug7utgwt8eqrug7h5gIs4yltAF1tl1RqPsgz7pQPGJ6y1J/beP49Z5oIWSIIpR8GdW25ubnw9/eHUqmEX+lPjYiIqJS8vDw89b8hzTds2ABFDR5lUqk0HRy1dIf32NjKO7c992/W3BAREVmRlZWFHf+bgC4rK6tGBzf+/qZDNzgrBjdERERWyOVyyP436p/clvEMyOEY3BAREVkRGRmJEs5OWq1YmOCdiIiIqHpicENEREQuhcENERGRFWlpaZBKpZBKpUhLS3N0ccgG7HNDRERkhVqthn7UFLW5eSbI6TC4ISIisiIiIgJjx44V18n5MbghIiKywsvLC2vWrHF0McgO7HNDRERELoXBDRERkRV5eXl45pln8MwzzyDP3BTi5HQY3BAREVmRlZWFhIQEJCQkICsry9HFIRuwzw0REZEVcrkc0v9N0c3pF6oHBjdERERWREZGQqPROLoYZAc2SxEREZFLYXBDRERELoXBDRERkRVpaWmQyWSQyWScfqGaYJ8bIiIiK9RqNbRarbhOzo/BDRERkRWhoaEYNmyYuE7Oj8ENERGRFQqFAj/88IOji0F2YJ8bIiIicikMboiIiKxQqVSYNGkSJk2aBJVK5ejikA0kgiAIji5EVcrNzYW/vz+USiX8/PwcXRwiInJySUlJqF+/PgAgMTERMTExDi5RzWTP/Zt9boiIiKyQyWSQSCTiOjk/BjdERERWREVFiY+CU/XAPjdERETkUhjcEBERkUthcENERGRFWloa3N3d4e7uzukXqgn2uSEiIrJCrVajpKREXCfnx+CGiIjIitDQUPTu3VtcJ+fH4IaIiMgKhUKBX3/91dHFIDuwzw0RERG5FNbcEBERWaFSqbBgwQIAwFtvvQUvLy8Hl4jK4/Cam5UrVyImJgaenp5o3749Dh48aDX/+vXr0aZNG3h7e6NWrVp44YUXcPfu3SoqLRER1TQZGRmYN28e5s2bh4yMDEcXh2zg0ODmu+++w7Rp0/D222/j9OnT6N69O/r374+UlBSz+Q8dOoTRo0dj3LhxuHjxIjZu3Ijjx49j/PjxVVxyIiKqKYynXOD0C9WDQyfO7Ny5M9q1a4dVq1aJac2aNcPQoUMxf/78Mvk/+ugjrFq1CtevXxfTli9fjkWLFiE1NdWmc3LiTCIiourHnvu3w2pu1Go1Tp48ibi4OJP0uLg4HDlyxOw+Xbt2xc2bN7Fz504IgoDbt29j06ZNGDhwoMXzFBUVITc312QhIiIi1+Ww4ObOnTvQaDQIDw83SQ8PD7fYptm1a1esX78eo0aNglwuR0REBAICArB8+XKL55k/fz78/f3FpW7duhV6HURERORcHN6hWD+NvJ4gCGXS9C5duoQpU6bgvffew8mTJ7Fr1y4kJSVh4sSJFo8/a9YsKJVKcbG1+YqIiAjQdSj29PSEp6cnOxRXEw57FDwkJAQymazML0pmZmaZ2hy9+fPno1u3bpgxYwYAoHXr1vDx8UH37t3x73//G7Vq1Sqzj4eHBzw8PCr+AoiIqEZQqVQoKioS18n5OazmRi6Xo3379tizZ49J+p49e9C1a1ez+xQUFEAqNS2yvue6A/tFExGRCwsKCkKXLl3QpUsXBAUFObo4ZAOHDuI3ffp0PP/88+jQoQNiY2OxevVqpKSkiM1Ms2bNQlpaGr755hsAwOOPP44JEyZg1apV6Nu3L9LT0zFt2jR06tQJtWvXduSlEBGRi/L398cff/zh6GKQHRwa3IwaNQp3797FvHnzkJ6ejpYtW2Lnzp2Ijo4GAKSnp5uMeRMfH4979+5hxYoVeP311xEQEIBevXph4cKFjroEIiIicjIOHefGETjODRER2UOtVuPzzz8HALz00kuQy+UOLlHNZM/9m3NLERERWZGWloYpU6YAAAYNGoSYmBgHl4jK4/BHwYmIiIgqEmtuiIiIrIiJieETudUMa26IiIjIpTC4ISIiIpfC4IaIiMiKjIwM+Pj4wMfHh9MvVBPsc0NERGSFSqVCQUGBuE7Oj8ENERGRFUFBQWjZsqW4Ts6PwQ0REZEV/v7+OH/+vKOLQXZgnxsiIiJyKay5ISIiskKtVuPHH38EAAwZMoTTL1QDDG6IiIisSEtLw8iRIwEAiYmJnH6hGmCzFBEREbkU1twQERFZwekXqh/W3BAREZFLYXBDRERELoXBDRERkRVZWVkICAhAQEAAsrKyHF0csgGDGyIiIivy8vKgVCqhVCqRl5fn6OKQDdihmIiIyAp/f380aNBAXCfnx+CGiIjIiqCgIFy7ds3RxSA7sFmKiIiIXAprboiIiKzQaDQ4evQoAKBLly6QyWQOLhGVh8ENERGRFSkpKXj44YcBcPqF6oLNUkRERORSWHNDRERkBadfqH5Yc0NEREQuhcENERERuRQGN0RERFZkZWUhNDQUoaGhnH6hmmBwQ0REZEVeXh7u3LmDO3fucPqFaoIdiomIiKzw9/dHZGSkuE7Oj8ENERGRFUFBQbh586aji0F2YLMUERERuRTW3BAREVmh0WiQkpICAIiKiuL0C9UAa26IiIisSElJQf369VG/fn0xyCHnxuCGiIiIXAqbpYiIiKyIiopCYmKiuE7Oj8ENERGRFTKZjDOBVzNsliIiIiKXwpobCzQaDYqLix1djBrL3d2dTyQQkVPIzs5G69atAQDnzp1DUFCQg0tE5bnv4EatViMzMxNardYkvbq3RwqCgIyMDOTk5Di6KDVeQEAAIiIiIJFIHF0UIqrBlEol0tLSxHUGN87P7uDm6tWrGDt2LI4cOWKSLggCJBIJNBpNhRXOEfSBTVhYGLy9vXljdQBBEFBQUIDMzEwAQK1atRxcIiKqyRQKBUJCQsR1cn52Bzfx8fFwc3PD9u3bUatWLZe6+Ws0GjGwCQ4OdnRxajQvLy8AQGZmJsLCwthERUQOw9nAqx+7g5szZ87g5MmTaNq0aWWUx6H0fWy8vb0dXBICDJ9DcXExgxsiIrKZ3U9LNW/eHHfu3KmMsjgNV6qNqs74ORAR0f2wO7hZuHAhZs6cif379+Pu3bvIzc01WYiIiFxJUlISJBIJJBIJkpKSHF0csoHdzVJ9+vQBAPTu3dsk3VU6FNc0arUazZs3x9dff41u3brZtE98fDxycnKwdevW+zrnG2+8AbVajU8++eS+9iciIrLG7uBm3759lVEOekCZmZl499138fPPP+P27dsIDAxEmzZtMGfOHMTGxlrcb/Xq1YiOjrY5sKkIM2fORIMGDfDaa69x1E8icnpRUVE4dOiQuE7Oz+7g5pFHHqmMctADGj58OIqLi/H111+jfv36uH37Nvbu3Yvs7Gyr+y1fvhxz5sypmkL+T1hYGOLi4vDZZ59h4cKFVXpuIiJ7yWSyKv0CSA/uvqZfyMnJwccff4zx48djwoQJWLJkCZRKZUWXjWyUk5ODQ4cOYeHChejZsyeio6PRqVMnzJo1CwMHDrS436lTp3Dt2rUyec6fP49evXrBy8sLwcHBePHFF5GXl1dm/7lz5yIsLAx+fn546aWXoFarxfc2bdqEVq1aicfo06cP8vPzxfcHDx6MhISECrh6IiIiU3YHNydOnECDBg2wZMkSZGdn486dO1i8eDEaNGiAU6dOVUYZHUoQgPx8xyyCYFsZFQoFFAoFtm7diqKiIpuv7cCBA2jcuDH8/PzEtIKCAvTr1w+BgYE4fvw4Nm7ciF9//RWTJk0y2Xfv3r24fPky9u3bh4SEBGzZsgVz584FAKSnp+Ppp5/G2LFjcfnyZezfvx/Dhg2DYHRBnTp1QmpqKm7cuGFzeYmIHCE7OxsNGzZEw4YNy60NJych2Onhhx8W4uPjheLiYjGtuLhYGDNmjNC9e3d7D1fllEqlAEBQKpVl3lOpVMKlS5cElUolpuXlCYIuzKj6JS/P9uvatGmTEBgYKHh6egpdu3YVZs2aJZw9e9bqPlOnThV69eplkrZ69WohMDBQyDM6+Y4dOwSpVCpkZGQIgiAIY8aMEYKCgoT8/Hwxz6pVqwSFQiFoNBrh5MmTAgAhOTnZ4rn1n8P+/fst5jH3eRARVbXExEQBgABASExMdHRxaixr9+/S7qvm5s0334Sbm6G7jpubG2bOnIkTJ05UUMhF9ho+fDhu3bqFbdu2oW/fvti/fz/atWuHdevWWdxHpVLB09PTJO3y5cto06YNfHx8xLRu3bpBq9Xi77//FtPatGljMthhbGws8vLykJqaijZt2qB3795o1aoVRowYgS+++AL//POPyXn0IxAXFBQ8yGUTEVU6hUIBf39/+Pv7c/qFasLu4MbPzw8pKSll0lNTU+Hr61shhXIm3t5AXp5jFnsHSvb09MRjjz2G9957D0eOHEF8fDxmz55tMX9ISEiZoEP43yP95tgyqJ5EIoFMJsOePXvw888/o3nz5li+fDmaNGliMj6Evmo3NDTUlksjInKY0NBQ5OTkICcnh/+zqgm7g5tRo0Zh3Lhx+O6775CamoqbN29iw4YNGD9+PJ5++unKKKNDSSSAj49jlgcdoLd58+YmnXhLa9u2Lf766y+TvjDNmzfHmTNnTPY7fPgwpFIpGjduLKadPXsWKpVK3D569CgUCgXq1Knzv5+bBN26dcPcuXNx+vRpyOVybNmyRcx/4cIFuLu7o0WLFg92kURERKXY/Sj4Rx99BIlEgtGjR6OkpAQA4O7ujpdffhkLFiyo8AJS+e7evYsRI0Zg7NixaN26NXx9fXHixAksWrQIQ4YMsbhfz549kZ+fj4sXL6Jly5YAgGeffRazZ8/GmDFjMGfOHGRlZWHy5Ml4/vnnER4eLu6rVqsxbtw4vPPOO7hx4wZmz56NSZMmQSqV4tixY9i7dy/i4uIQFhaGY8eOISsrC82aNRP3P3jwILp37y42TxEREVUUu4MbuVyOZcuWYf78+bh+/ToEQUDDhg052aQDKRQKdO7cGUuWLMH169dRXFyMunXrYsKECfjXv/5lcb/g4GAMGzYM69evx/z58wHoJqv85ZdfMHXqVHTs2BHe3t4YPnw4Fi9ebLJv79690ahRI/To0QNFRUV46qmnxPFy/Pz8cODAASxduhS5ubmIjo7Gxx9/jP79+4v7JyQkiE9XERE5s6SkJNSvXx8AkJiYyMFHqwGJYNwmUQPk5ubC398fSqXS5BFoACgsLERSUhJiYmLKdLR1VefPn0efPn1w7dq1KusztWPHDsyYMQPnzp0z6ZheWk38PIjI+TC4cQ7W7t+l2VRzM2zYMKxbtw5+fn4YNmyY1bybN2+2vaTkcK1atcKiRYuQnJyMVq1aVck58/PzsXbtWquBDRGRs4iMjMT3338vrpPzs+nu4u/vLz4p4+fnZ9NTM1R9jBkzpkrPN3LkyCo9HxHRg5DL5RgxYoSji0F2sCm4Wbt2rbhubdwUIiIiIkez+1HwXr16IScnp0x6bm4uevXqVRFlIiIichpKpRKtWrVCq1atOI9iNWF3p4f9+/ebTJCoV1hYiIMHD1ZIoYiIiJxFdnY2Lly4IK77+/s7uERUHpuDm3Pnzonrly5dQkZGhrit0Wiwa9cudrQiIiKX4+XlJQ53wrG5qgebg5uHHnoIEokEEonEbPOTl5cXli9fbncBVq5ciQ8//BDp6elo0aIFli5diu7du1vMX1RUhHnz5uHbb79FRkYG6tSpg7fffhtjx461+9xERETliYiIsDraOzkfm4ObpKQkCIKA+vXr488//zSZX0MulyMsLAwymcyuk3/33XeYNm0aVq5ciW7duuHzzz9H//79cenSJURFRZndZ+TIkbh9+zbWrFmDhg0bIjMzUxwpmYiIiMihg/h17twZ7dq1w6pVq8S0Zs2aYejQoeKIucZ27dqFp556ComJiQgKCrqvc3IQv+qDnwcREenZM4if3U9LzZ8/H1999VWZ9K+++goLFy60+ThqtRonT55EXFycSXpcXByOHDlidp9t27ahQ4cOWLRoESIjI9G4cWO88cYbJhM4llZUVITc3FyThQzUajUaNmyIw4cP25RfIpFg69atD3TON954A1OmTHmgYxARVZWkpCSxW0ZSUpKji0M2sDu4+fzzz9G0adMy6S1atMBnn31m83Hu3LkDjUZjMhkjAISHh5t0VjaWmJiIQ4cO4cKFC9iyZQuWLl2KTZs24dVXX7V4nvnz58Pf319c6tata3MZq5PMzEy89NJLiIqKgoeHByIiItC3b1/88ccfVvdbvXo1oqOj0a1bN5vOk56ebjJH1P2YOXMm1q5dy38SRERUKex+FDwjIwO1atUqkx4aGor09HS7C1B6tGNBECyOgKzVaiGRSLB+/XrxUbzFixfjySefxKeffmq2F/usWbMwffp0cTs3N9clA5zhw4ejuLgYX3/9NerXr4/bt29j7969yM7Otrrf8uXLxQkvbREREfGAJQXCwsIQFxeHzz77zK7aPiIiR4iMjMQnn3wirpPzs7vmpm7dumabMA4fPozatWvbfJyQkBDIZLIytTSZmZllanP0atWqhcjISJMxBpo1awZBEHDz5k2z+3h4eMDPz89kcTU5OTk4dOgQFi5ciJ49eyI6OhqdOnXCrFmzMHDgQIv7nTp1CteuXTPJo1arMWnSJNSqVQuenp6oV6+eSf8n42ap5ORkSCQSfP/99+jevTu8vLzQsWNHXLlyBcePH0eHDh2gUCjQr18/ZGVlmZx78ODBSEhIqNgfBBFRJZDL5Zg8eTImT54MuVzu6OKQDewObsaPH49p06Zh7dq1uHHjBm7cuIGvvvoKr732GiZMmGDzceRyOdq3b489e/aYpO/Zswddu3Y1u0+3bt1w69Yt5OXliWlXrlyBVCpFnTp17L0Uu+TnW14KC23PW7p7kKV89lAoFFAoFNi6dSuKiops3u/AgQNo3LixScD3ySefYNu2bfj+++/x999/49tvv0W9evWsHmf27Nl45513cOrUKbi5ueHpp5/GzJkzsWzZMhw8eBDXr1/He++9Z7JPp06dkJqaihs3bth1rUREROUS7KTVaoWZM2cKnp6eglQqFaRSqeDt7S3MnTvX3kMJGzZsENzd3YU1a9YIly5dEqZNmyb4+PgIycnJgiAIwltvvSU8//zzYv579+4JderUEZ588knh4sWLwu+//y40atRIGD9+vM3nVCqVAgBBqVSWeU+lUgmXLl0SVCpVmfcAy8uAAaZ5vb0t533kEdO8ISHm89lr06ZNQmBgoODp6Sl07dpVmDVrlnD27Fmr+0ydOlXo1auXSdrkyZOFXr16CVqt1uw+AIQtW7YIgiAISUlJAgDhyy+/FN9PSEgQAAh79+4V0+bPny80adLE5Dj6z2H//v0Wy2ft8yAiqio5OTlCly5dhC5dugg5OTmOLk6NZe3+XZrdNTcSiQQLFy5EVlYWjh49irNnzyI7O7vMN3NbjBo1CkuXLsW8efPw0EMP4cCBA9i5cyeio6MB6DqvpqSkiPkVCgX27NmDnJwcdOjQAc8++ywef/xxsS20Jhs+fDhu3bqFbdu2oW/fvti/fz/atWtndaJTlUpV5hHr+Ph4nDlzBk2aNMGUKVOwe/fucs/dunVrcV3fpNiqVSuTtMzMTJN99P2jCgoKyj0+EZEjZWdn4+jRozh69Gi5/RjJOdjdoVhPoVCgY8eOD1yAV155Ba+88orZ98zdmJs2bVqmKasqGLWElVF67MJS93ET0lLhZHLyfRepDE9PTzz22GN47LHH8N5772H8+PGYPXs24uPjzeYPCQnB+fPnTdLatWuHpKQk/Pzzz/j1118xcuRI9OnTB5s2bbJ4Xnd3d3Fd3xm8dJpWqzXZR/8PwngwSCIiZ+Tl5QUPDw9xnZzffQU3x48fx8aNG5GSklJmEs3NmzdXSMGcjY+P4/Paq3nz5lbHpGnbti1WrVpV5gk1Pz8/jBo1CqNGjcKTTz6Jfv36ITs7+74HTjTnwoULcHd3R4sWLSrsmERElSEiIgKFpTtXklOzu1lqw4YN6NatGy5duoQtW7aguLgYly5dwm+//caZUh3k7t276NWrF7799lucO3cOSUlJ2LhxIxYtWoQhQ4ZY3K9nz57Iz8/HxYsXxbQlS5Zgw4YN+Ouvv3DlyhVs3LgRERERCAgIqNAyHzx4UHzCioiIqCLZXXPzwQcfYMmSJXj11Vfh6+uLZcuWISYmBi+99JLZ8W+o8ikUCnTu3BlLlizB9evXUVxcjLp162LChAn417/+ZXG/4OBgDBs2DOvXrxcf91YoFFi4cCGuXr0KmUyGjh07YufOnZCWbk97QAkJCZg7d26FHpOIiAi4j7mlfHx8cPHiRdSrVw8hISHYt28fWrVqhcuXL6NXr173NZBfVeLcUqbOnz+PPn364Nq1a/D19a2Sc+7YsQMzZszAuXPn4OZmOb6uiZ8HETmflJQU8UGXGzduWJzYmSpXpc4tFRQUhHv37gHQjdR44cIFALqB5PjkS/XTqlUrLFq0CMkV2bO5HPn5+Vi7dq3VwIaIyFloNBqz6+S87L67dO/eHXv27EGrVq0wcuRITJ06Fb/99hv27NmD3r17V0YZqZKNGTOmSs83cuTIKj0fEdGDiIiIEIc7qYgpaKjy2R3crFixQuw1PmvWLLi7u+PQoUMYNmwY3n333QovIBERkSN5eXmxj2A1Y1dwU1JSgp9++gl9+/YFAEilUsycORMzZ86slMIRERER2cuuPjdubm54+eWX7Zq/iIiIqDrLy8tDnz590KdPH5O5Dcl52d0s1blzZ5w+fVrsOU5EROTKsrKysHfvXnFdoVA4uERUHruDm1deeQWvv/46bt68ifbt28On1BC7xvMMERERVXdyuVx8ulMulzu4NGQLu4ObUaNGAQCmTJkipkkkEnEIfz4mR0REriQyMhLFxcWOLgbZwe7gJikpqTLKQURERFQhbO5Q3KNHD+Tk5CA6OhrR0dE4e/YswsLCxG39Qs7t+eefxwcffGBz/nXr1j3QvFLbt29H27Zty8wKTkREVFlsDm4OHTpkMgP4c8895/RTLdQk8fHxGDp0qNU8586dw44dOzB58uSqKRSAQYMGQSKR4L///W+VnZOIqCKlpKRAKpVCKpUiJSXF0cUhG9z3bIh2TklFTmDFihUYMWJElc0hpffCCy9g+fLlVXpOIqKKotFoIAgCBEFgv9JqomKneianpdVqsXHjRgwePNgk/Z9//sHo0aMRGBgIb29v9O/fH1evXi2z/9atW9G4cWN4enriscceQ2pqqvje2bNn0bNnT/j6+sLPzw/t27fHiRMnxPcHDx6MP//8E4mJiZV3gURElSQiIgKvvvoqXn31VU6/UE3Y1aH4l19+gb+/PwDdzXLv3r3ixJl6pW+e1Z4gAI6aENTbG5BIKuRQ586dQ05ODjp06GCSHh8fj6tXr2Lbtm3w8/PDm2++iQEDBuDSpUtwd3cHABQUFOA///kPvv76a8jlcrzyyit46qmncPjwYQDAs88+i7Zt22LVqlWQyWQ4c+aMuC8AREdHIywsDAcPHkT9+vUr5HqIiKqKl5cXVqxY4ehikB3sCm5KT7D40ksvmWy75KPgBQWAowZsyssDSo0jdL+Sk5Mhk8kQFhYmpumDmsOHD6Nr164AgPXr16Nu3brYunUrRowYAQAoLi7GihUr0LlzZwDA119/jWbNmuHPP/9Ep06dkJKSghkzZqBp06YAgEaNGpU5f2RkZJXOPE5ERDWXzc1SWq223MXlAhsXolKp4OHhAYlRTdDly5fh5uYmBi0AEBwcjCZNmuDy5ctimpubm0mNT9OmTREQECDmmT59OsaPH48+ffpgwYIFuH79epnze3l5ocBRNWBERA8gLy8Pw4cPx/Dhwzn9QjXBPjfl8fbW1aA4YvH2rrDLCAkJQUFBgckTb5Y6hesHZDRWets4bc6cObh48SIGDhyI3377Dc2bN8eWLVtM8mZnZyM0NPRBL4OIqMplZWVh8+bN2Lx5M7KyshxdHLIBg5vySCS6piFHLBXU3wYAHnroIQDApUuXxLTmzZujpKQEx44dE9Pu3r2LK1euoFmzZmJaSUmJSQfhv//+Gzk5OWIzFAA0btwYr732Gnbv3o1hw4Zh7dq14nuFhYW4fv062rZtW2HXQ0RUVeRyufgoOKdfqB4Y3NQQoaGhaNeuHQ4dOiSmNWrUCEOGDMGECRNw6NAhnD17Fs899xwiIyMxZMgQMZ+7uzsmT56MY8eO4dSpU3jhhRfQpUsXdOrUCSqVCpMmTcL+/ftx48YNHD58GMePHzcJjo4ePQoPDw/ExsZW6TUTEVWEyMhIaDQaaDQaREZGOro4ZAMGNzXIiy++iPXr15ukrV27Fu3bt8egQYMQGxsLQRCwc+dOk6edvL298eabb+KZZ55BbGwsvLy8sGHDBgCATCbD3bt3MXr0aDRu3BgjR45E//79MXfuXHH/hIQEPPvss/CuwGY2IiIiSyRCDRuNLzc3F/7+/lAqlfDz8zN5r7CwEElJSYiJiYGnp6eDSlh5CgsL0aRJE2zYsKHKalGysrLQtGlTnDhxAjExMXbt6+qfBxER2c7a/bs0uyfO1FOr1cjMzCwzZ1BUVNT9HpIqmaenJ7755hvcuXOnys6ZlJSElStX2h3YEBE5i7S0NPHelpKSwqapasDu4Obq1asYO3Ysjhw5YpKuf8KGj4M7t0ceeaRKz9epUyd06tSpSs9JRFSR1Gq1+EXe+IlTcl52Bzfx8fFwc3PD9u3bUatWLbOPCBMREbmK0NBQPP300+I6OT+7g5szZ87g5MmTJo8BExERuSqFQoH//ve/ji4G2cHup6WaN29epX02iIiIiOxhd3CzcOFCzJw5E/v378fdu3eRm5trshAREbkSlUqFcePGYdy4cVCpVI4uDtnA7mapPn36AAB69+5tks4OxURE5IoyMjLw1VdfAQDeeecdPv1ZDdgd3Ozbt68yykFEROSU5HK5+PAMp1+oHuwObqr6UWIiIiJHioyMLDOmGzm3+5p+IScnBx9//DHGjx+PCRMmYMmSJVAqlRVdNqoEzz//PD744AOb8j766KOYNm3aA51v+/btaNu2Lf8xEBFRlbE7uDlx4gQaNGiAJUuWIDs7G3fu3MHixYvRoEEDnDp1qjLKSDaIj4/H0KFDreY5d+4cduzYgcmTJ9t0zM2bN+P9999/oHINGjQIEomEj1ESEVGVsTu4ee211zB48GAkJydj8+bN2LJlC5KSkjBo0KAH/pZPlWvFihUYMWIEfH19bcofFBRkc15rXnjhBSxfvvyBj0NE5AhpaWlwc3ODm5sb0tLSHF0cssF91dy8+eabcHMzdNdxc3PDzJkzceLEiQotHFUcrVaLjRs3YvDgwSbpK1euRKNGjeDp6Ynw8HA8+eST4nulm6Xq1auHf//73xg9ejQUCgWio6Px448/IisrC0OGDIFCoUCrVq3K/B4MHjwYf/75JxITEyv1GomIKoNarYZGo4FGo+H0C9WE3cGNn58fUlJSyqSnpqZWyLd8p5Wfb3kpLLQ9b+kxEizlq2Dnzp1DTk4OOnToIKadOHECU6ZMwbx58/D3339j165d6NGjh9XjLFmyBN26dcPp06cxcOBAPP/88xg9ejSee+45nDp1Cg0bNsTo0aNhPNl8dHQ0wsLCcPDgwQq/LiKiyhYaGoqBAwdi4MCBnH6hmrD7aalRo0Zh3Lhx+Oijj9C1a1dIJBIcOnQIM2bMEOfecEkKheX3BgwAduwwbIeFAQUF5vM+8giwf79hu149wNyIz0bBQUVITk6GTCZDWFiYmJaSkgIfHx8MGjQIvr6+iI6ORtu2ba0eZ8CAAXjppZcAAO+99x5WrVqFjh07YsSIEQCAN998E7Gxsbh9+zYiIiLE/SIjI5GcnFyh10REVBUUCgW2b9/u6GKQHewObj766CNIJBKMHj0aJSUlAAB3d3e8/PLLWLBgQYUXkCqGSqWCh4eHyUSnjz32GKKjo1G/fn3069cP/fr1wxNPPAFvb2+Lx2ndurW4Hh4eDgBo1apVmbTMzEyT4MbLywsFlgI+IiKiCmR3cCOXy7Fs2TLMnz8f169fhyAIaNiwodUbokvIy7P8nkxmup2ZaTmvtFRLYBXVZoSEhKCgoABqtVochMrX1xenTp3C/v37sXv3brz33nuYM2cOjh8/joCAALPHcXd3F9f1gZK5tNKPfmdnZ7M6l4iqJZVKhXfffRcA8P7778PLy8vBJaLy2B3c6Hl7e5t8Y3d5Pj6Oz/sAHnroIQDApUuXxHVA1xm8T58+6NOnD2bPno2AgAD89ttvGDZsWIWdu7CwENevXy+3yYuIyBllZGTg448/BgC8+uqrnH6hGrApuBk2bBjWrVsHPz+/cm96mzdvrpCCUcUKDQ1Fu3btcOjQITG42b59OxITE9GjRw8EBgZi586d0Gq1aNKkSYWe++jRo/Dw8EBsbGyFHpeIqCrIjGrnZaVr6skp2RTc+Pv7i80Nfn5+Jv02qPp48cUXsW7dOkyaNAkAEBAQgM2bN2POnDkoLCxEo0aNkJCQgBYtWlToeRMSEvDss8+6ftMlEbmkqKgokydAyflJhBr2ieXm5sLf3x9KpRJ+fn4m7xUWFiIpKQkxMTHw9PR0UAkrT2FhIZo0aYINGzZUWS1KVlYWmjZtihMnTthdlevqnwcREdnO2v27NLvHuenVqxdycnLMnrRXr172Ho6qkKenJ7755hvcMffoeSVJSkrCypUr2UZNRERVxu4Oxfv37zc7QmNhYSEHaasGqnpW906dOqFTp05Vek4iooqUkZGB6OhoAMCNGzdMhrkg52RzcHPu3Dlx/dKlS8jIyBC3NRoNdu3ahcjIyIotHRERkYOpVCrxS72q9Cjz5JRsDm4eeughSCQSSCQSs81PXl5enByRiIhcTlBQELp37y6uk/OzObhJSkqCIAioX78+/vzzT5MB2eRyOcLCwviIHBERuRx/f38cOHDA0cUgO9gc3OjbG0uPPEtERETkTO57hOJLly4hJSWlTOfiwYMHP3ChiIiInIVKpcInn3wCAJgyZQqnX6gG7A5uEhMT8cQTT+D8+fOQSCTiwEb6gf00Gk3FlpCIiMiBMjIy8NZbbwEARo4cyaEtqgG7x7mZOnUqYmJicPv2bXh7e+PixYs4cOAAOnTogP3791dCEYmIiByH0y9UP3YHN3/88QfmzZuH0NBQSKVSSKVSPPzww5g/fz6mTJlSGWWkSqZWq9GwYUMcPnzY5n3i4+MxdOjQ+z7nG2+8wd8XIqoW9NMvCIKAqKgoRxeHbGB3cKPRaKBQKAAAISEhuHXrFgBdh+O///67YktHNouPjxcf1Xdzc0NUVBRefvll/PPPP+Xuu3r1akRHR6Nbt25VUFKdmTNnYu3atUhKSqqycxIRUc1gd3DTsmVLcUC/zp07Y9GiRTh8+DDmzZuH+vXrV3gByXb9+vVDeno6kpOT8eWXX+Knn37CK6+8Uu5+y5cvx/jx46ughAZhYWGIi4vDZ599VqXnJSIi12d3cPPOO++Ij4P/+9//xo0bN9C9e3fs3LlT7E3uSgRBQL463yGLvXOaenh4ICIiAnXq1EFcXBxGjRqF3bt3W93n1KlTuHbtGgYOHGiSfv78efTq1QteXl4IDg7Giy++iLy8vDL7z507F2FhYfDz88NLL71k8vTcpk2b0KpVK/EYffr0QX5+vvj+4MGDkZCQYNc1EhFVtYyMDHh5ecHLy8tkdH5yXnY/LdW3b19xvX79+rh06RKys7MRGBgoPjHlSgqKC6CYr3DIufNm5cFH7nNf+yYmJmLXrl1wd3e3mu/AgQNo3LixyQyrBQUF6NevH7p06YLjx48jMzMT48ePx6RJk7Bu3Tox3969e+Hp6Yl9+/YhOTkZL7zwAkJCQvCf//wH6enpePrpp7Fo0SI88cQTuHfvHg4ePGgSsHXq1Ampqam4ceOGOI4SEZGzUalUKCwsFNfJ+d33ODfGOBy1c9i+fTsUCgU0Go34h7h48WKr+yQnJ6N27domaevXr4dKpcI333wDHx9dcLVixQo8/vjjWLhwIcLDwwHoRqb+6quv4O3tjRYtWmDevHmYMWMG3n//faSnp6OkpATDhg0TA5dWrVqZnEc/F1lycjKDGyJyWkFBQWjbtq24Ts7PpuBm2LBhNh9w8+bN910YZ+Tt7o28WWWbY6rq3Pbo2bMnVq1ahYKCAnz55Ze4cuUKJk+ebHUflUoFT09Pk7TLly+jTZs2YmADAN26dYNWq8Xff/8tBjdt2rSBt7ehjLGxscjLy0NqairatGmD3r17o1WrVujbty/i4uLw5JNPIjAwUMyvHwiroKDAruskIqpK/v7+OHXqlKOLQXawKbjx9/ev7HI4LYlEct9NQ1XNx8cHDRs2BAB88skn6NmzJ+bOnYv333/f4j4hISE4f/68SZogCBabGG1pepRIJJDJZNizZw+OHDmC3bt3Y/ny5Xj77bdx7NgxcQCs7OxsADCZp4yIiOhB2RTcrF27ttIKsHLlSnz44YdIT09HixYtsHTpUnH2VWsOHz6MRx55BC1btsSZM2cqrXzV2ezZs9G/f3+8/PLLZZqe9Nq2bYtVq1aZBDTNmzfH119/jfz8fLH25vDhw5BKpWjcuLG479mzZ6FSqcQamKNHj0KhUKBOnToAdEFOt27d0K1bN7z33nuIjo7Gli1bMH36dADAhQsX4O7ujhYtWlTaz4CI6EGp1Wr897//BQA888wzkMvlDi4Rlcfup6Uq0nfffYdp06bh7bffxunTp9G9e3f0798fKSkpVvdTKpUYPXo0evfuXUUlrZ4effRRtGjRAh988IHFPD179kR+fj4uXrwopj377LPw9PTEmDFjcOHCBezbtw+TJ0/G888/LzZJAbo/+HHjxuHSpUv4+eefMXv2bEyaNAlSqRTHjh3DBx98gBMnTiAlJQWbN29GVlYWmjVrJu5/8OBBdO/enfO0EJFTS0tLwwsvvIAXXngBaWlpji4O2cDu4CYmJgb169e3uNhj8eLFGDduHMaPH49mzZph6dKlqFu3LlatWmV1v5deegnPPPMMYmNj7S1+jTN9+nR88cUXSE1NNft+cHAwhg0bhvXr14tp3t7e+OWXX5CdnY2OHTviySefRO/evbFixQqTfXv37o1GjRqhR48eGDlyJB5//HHMmTMHAODn54cDBw5gwIABaNy4Md555x18/PHH6N+/v7h/QkICJkyYUPEXTURENZpEsHMwlWXLlplsFxcX4/Tp09i1axdmzJghTi5WHrVaDW9vb2zcuBFPPPGEmD516lScOXMGv//+u9n91q5di5UrV+KPP/7Av//9b2zdutVqs1RRURGKiorE7dzcXNStWxdKpdLk8WcAKCwsRFJSEmJiYsp0snVl58+fR58+fXDt2jX4+vpWyTl37NiBGTNm4Ny5c3BzM986WlM/DyIiKis3Nxf+/v5m79+l2f0o+NSpU82mf/rppzhx4oTNx7lz5w40Go1JMwcAhIeHWxwk6erVq3jrrbdw8OBBizfE0ubPn4+5c+faXK6aqFWrVli0aBGSk5PLPK5dWfLz87F27VqbP0ciIiJbVVifm/79++OHH36we7/ST99YelJHo9HgmWeewdy5c006tZZn1qxZUCqV4mKpeaamGzNmTJUFNgAwcuRIdO7cucrOR0RENUeFfW3etGmTXYMbhYSEQCaTlamlyczMLFObAwD37t3DiRMncPr0aUyaNAkAoNVqIQgC3NzcsHv3bvTq1avMfh4eHvDw8LDzaoiIiHSysrLQoEEDAMD169c5fEU1YHdw07ZtW5OaFUEQkJGRgaysLKxcudLm48jlcrRv3x579uwx6XOzZ88eDBkypEx+Pz+/MuOxrFy5Er/99hs2bdokjp1CRERUkfLy8nDv3j1xncGN87M7uBk6dKjJtlQqRWhoKB599FE0bdrUrmNNnz4dzz//PDp06IDY2FisXr0aKSkpmDhxIgBdk1JaWhq++eYbSKVStGzZ0mT/sLAweHp6lkknIiKqKP7+/uL9rSYPalud2B3czJ49u8JOPmrUKNy9exfz5s1Deno6WrZsiZ07d4rzDKWnp5c75g0REVFlCgoKwuXLlx1dDLKD3Y+CA7rOvVu2bMHly5chkUjQrFkzDBkypFo8+WLtUTI+euxc+HkQEZFepT4KfuHCBQwZMgQZGRlo0qQJAODKlSsIDQ3Ftm3bqvSJGyIiosqmVqvx66+/AgD69OnD6ReqAbsfBR8/fjxatGiBmzdv4tSpUzh16hRSU1PRunVrvPjii5VRRiIiIodJS0vDwIEDMXDgQE6/UE3YHdycPXsW8+fPR2BgoJgWGBiI//znP5zAsppSq9Vo2LAhDh8+bFN+iUSCrVu3PtA533jjDUyZMuWBjkFERGSO3cFNkyZNcPv27TLpmZmZaNiwYYUUiuwXHx8PiUQCiUQCNzc3REVF4eWXX8Y///xT7r6rV69GdHQ0unXrZtO50tPTTeaIuh8zZ87E2rVrkZSU9EDHISKqbDExMRAEAYIgcNiRasLu4OaDDz7AlClTsGnTJty8eRM3b97Epk2bMG3aNCxcuBC5ubniQlWrX79+SE9PR3JyMr788kv89NNPeOWVV8rdb/ny5Rg/frzN54mIiHjggRHDwsIQFxeHzz777IGOQ0REVJrdwc2gQYNw6dIljBw5EtHR0YiOjsbIkSNx4cIFPP744wgMDERAQIBJs5UryFfnW1wKSwptzqsqVtmU9354eHggIiICderUQVxcHEaNGoXdu3db3efUqVO4du0aBg4cKKap1WpMmjQJtWrVgqenJ+rVq4f58+eL7xs3SyUnJ0MikeD7779H9+7d4eXlhY4dO+LKlSs4fvw4OnToAIVCgX79+iErK8vk3IMHD0ZCQsJ9XSsREZEldj8ttW/fvsooh9NTzFdYfG9AowHY8cwOcTvsozAUFBeYzftI9CPYH79f3K63rB7uFNwpk0+YbfcT+iYSExOxa9cuuLu7W8134MABNG7c2OSxuk8++QTbtm3D999/j6ioKKSmppY7J9fs2bOxdOlSREVFYezYsXj66afh5+eHZcuWwdvbGyNHjsR7772HVatWift06tQJqampuHHjhji2ERGRs8nKyhIH8fvrr784QnE1YHdw88gjj1RGOagCbN++HQqFAhqNBoWFutqkxYsXW90nOTkZtWvXNklLSUlBo0aN8PDDD0MikdgUeLzxxhvo27cvAN3M8U8//TT27t0r9uMZN24c1q1bZ7JPZGSkWAYGN0TkrPLy8pCdnS2uM7hxfvc16l5OTg7WrFkjDuLXvHlzjB071qWHpc6blWfxPZlUZrKd+UamxbxSiWlLYPLU5Acql7GePXti1apVKCgowJdffokrV65g8uTJVvdRqVRlBsiLj4/HY489hiZNmqBfv34YNGgQ4uLirB6ndevW4rp+4lPjMY/Cw8ORmWn6c/Hy8gIAFBSYr+UiInIG/v7+4hcwV77PuRK7+9ycOHECDRo0wJIlS5CdnY07d+5g8eLFaNCgAU6dOlUZZXQKPnIfi4unm6fNeb3cvWzKe19l9PFBw4YN0bp1a3zyyScoKirC3Llzre4TEhJS5omqdu3aISkpCe+//z5UKhVGjhyJJ5980upxjJu/9BOrlk7TarUm++i/CfFbEBE5s6CgICQnJyM5ORlBQUGOLg7ZwO7g5rXXXsPgwYORnJyMzZs3Y8uWLUhKSsKgQYMwbdq0Sigi3a/Zs2fjo48+wq1btyzmadu2Lf766y+UnoXDz88Po0aNwhdffIHvvvsOP/zwgxiMVJQLFy7A3d0dLVq0qNDjEhFRzXZfNTdvvvmmyTxSbm5umDlzJk6cOFGhhaMH8+ijj6JFixb44IMPLObp2bMn8vPzcfHiRTFtyZIl2LBhA/766y9cuXIFGzduREREBAICAiq0fAcPHhSfsCIiclYajQbnzp3DuXPnoNFoHF0csoHdwY2fn5/ZmbpTU1Ph6+tbIYWiijN9+nR88cUXFp92Cg4OxrBhw7B+/XoxTaFQYOHChejQoQM6duyI5ORk7Ny5E1Kp3b8uViUkJGDChAkVekwiooqWkpKCNm3aoE2bNmbvf+R87J4VfMqUKdiyZQs++ugjdO3aFRKJBIcOHcKMGTMwfPhwLF26tJKKWjE4K3hZ58+fR58+fXDt2rUqC1B37NiBGTNm4Ny5cxZnk6+pnwcROZekpCTUr18fgG6YDY5S7BiVOiv4Rx99BIlEgtGjR6OkpASAruPoyy+/jAULFtxficmhWrVqhUWLFiE5ObnKZnXPz8/H2rVrLQY2RETOIioqCjk5OQB0Ndvk/OyuudErKCjA9evXIQgCGjZsCG9v74ouW6VgzU31wc+DiIj07Km5sbkTRUFBAV599VVERkYiLCwM48ePR61atdC6detqE9gQERGR67M5uJk9ezbWrVuHgQMH4qmnnsKePXvw8ssvV2bZiIiIHC47OxsRERGIiIio8CExqHLY3OFh8+bNWLNmDZ566ikAwHPPPYdu3bpBo9FAJpOVs3f1cp8tdVTB+DkQkTNQKpW4ffu2uM6B/JyfzTU3qamp6N69u7jdqVMnuLm5WR0grrrRj6jL6QCcg/5zKG/yTyKiyuTv74/w8HCEh4dz+oVqwuaaG41GA7lcbrqzm5v4xJQrkMlkCAgIEOdA8vb2FqcSoKojCAIKCgqQmZmJgIAAl6sZJKLqJSgoCBkZGY4uBtnB5uBGEATEx8fDw8NDTCssLMTEiRPh42OYC2nz5s0VW8IqFhERAQBlJnmkqhcQECB+HkRERLayObgZM2ZMmbTnnnuuQgvjDCQSCWrVqoWwsDAUFxc7ujg1lru7O2tsiMgpaDQa5OXlAdCNc8P/Tc7P5uBm7dq1lVkOpyOTyfgLTERESElJ4QjF1UzFThZERERE5GAc+56IiMiKqKgonD17Vlwn58fghoiIyAqZTIbWrVs7uhhkBzZLERERkUthcENERGRFdnY26tWrh3r16nH6hWqCzVJERERWKJVK3LhxQ1zn9AvOj8ENERGRFQqFQgxoFAqFg0tDtmBwQ0REZEVoaCju3r3r6GKQHdjnhoiIiFwKgxsiIiJyKQxuiIiIrEhKSoJEIoFEIkFSUpKji0M2YHBDRERELoUdiomIiKyIjIzEjh07xHVyfgxuiIiIrJDL5RgwYICji0F2YLMUERERuRQGN0RERFZkZ2ejWbNmaNasGadfqCbYLEVERGSFUqnEX3/9Ja5z+gXnx+CGiIjICoVCAV9fX3GdnB+DGyIiIitCQ0ORm5vr6GKQHdjnhoiIiFwKgxsiIiJyKQxuiIiIrOD0C9UPgxsiIiJyKexQTEREZEVkZCTWrl0rrpPzY3BDRERkhVwuR3x8vKOLQXZgsxQRERG5FAY3REREViiVSrRr1w7t2rWDUql0dHHIBmyWIiIisiI7OxunT58W1/39/R1cIioPgxsiIiIrvLy84OnpKa6T82NwQ0REZEVERARUKpWji0F2YJ8bIiIicikMboiIiMilMLghIiKyIiUlRZx+ISUlxdHFIRswuCEiIrJCo9GYXSfnxQ7FREREVkRERGDBggXiOjk/iSAIgqMLUZVyc3Ph7+8PpVIJPz8/RxeHiIiIbGDP/ZvNUkRERORSGNwQERFZoVQq0aNHD/To0YPTL1QT7HNDRERkRXZ2Ng4ePCiuc/oF5+fwmpuVK1ciJiYGnp6eaN++vfgLZM7mzZvx2GOPITQ0FH5+foiNjcUvv/xShaUlIqKaxsvLC3K5HHK5nNMvVBMODW6+++47TJs2DW+//TZOnz6N7t27o3///hbHEThw4AAee+wx7Ny5EydPnkTPnj3x+OOPixOaERERVbSIiAgUFRWhqKiIT0tVEw59Wqpz585o164dVq1aJaY1a9YMQ4cOxfz58206RosWLTBq1Ci89957NuXn01JERETVT7V4WkqtVuPkyZOIi4szSY+Li8ORI0dsOoZWq8W9e/cQFBRkMU9RURFyc3NNFiIiInJdDgtu7ty5A41Gg/DwcJP08PBwZGRk2HSMjz/+GPn5+Rg5cqTFPPPnz4e/v7+41K1b94HKTURENQunX6h+HN6hWCKRmGwLglAmzZyEhATMmTMH3333HcLCwizmmzVrFpRKpbikpqY+cJmJiKjm4PQL1Y/DHgUPCQmBTCYrU0uTmZlZpjantO+++w7jxo3Dxo0b0adPH6t5PTw84OHh8cDlJSKimikiIgKvv/66uE7Oz2HBjVwuR/v27bFnzx488cQTYvqePXswZMgQi/slJCRg7NixSEhIwMCBA6uiqEREVIN5eXnho48+cnQxyA4OHcRv+vTpeP7559GhQwfExsZi9erVSElJwcSJEwHompTS0tLwzTffANAFNqNHj8ayZcvQpUsXsdbHy8uLgyoRERERAAf3uRk1ahSWLl2KefPm4aGHHsKBAwewc+dOREdHAwDS09NNOm99/vnnKCkpwauvvopatWqJy9SpUx11CURE5OLy8vIwaNAgDBo0CHl5eY4uDtmAs4ITERFZkZSUhPr16wMAEhMTERMT4+AS1Uz23L85txQREZEVcrkcMplMXCfnx+CGiIjIisjISJSUlDi6GGQHh49zQ0RERFSRGNwQERGRS2FwQ0REZEVaWhqkUimkUinS0tIcXRyyAfvcEBERWaFWq6F/sFitVju4NGQLBjdERERWREREYOzYseI6OT8GN0RERFZ4eXlhzZo1ji4G2YF9boiIiMilMLghIiKyIi8vD8888wyeeeYZTr9QTTC4ISIisiIrKwsJCQlISEhAVlaWo4tDNmCfGyIiIivkcjmkUqm4Ts6PwQ0REZEVkZGR0Gg0ji4G2YHNUkRERORSGNwQERGRS2FwQ0REZEVaWhpkMhlkMhmnX6gm2OeGiIjICrVaDa1WK66T82NwQ0REZEVoaCiGDRsmrpPzY3BDRERkhUKhwA8//ODoYpAd2OeGiIiIXAqDGyIiIitUKhUmTZqESZMmQaVSObo4ZAOJIAiCowtRlXJzc+Hv7w+lUgk/Pz9HF4eIiJxcUlIS6tevDwBITExETEyMg0tUM9lz/2afGyIiIitkMhkkEom4Ts6PwQ0REZEVUVFR4qPgVD2wzw0RERG5FAY3RERE5FIY3BAREVmRlpYGd3d3uLu7c/qFaoJ9boiIiKxQq9UoKSkR18n5MbghIiKyIjQ0FL179xbXyfkxuCEiIrJCoVDg119/dXQxyA7sc0NEREQuhTU3REREVqhUKixYsAAA8NZbb8HLy8vBJaLycPoFIiIiKzj9gnOw5/7NZikiIiIrjKdc4PQL1QObpYiIiKyIiopCDWvkqPZYc0NEREQuhcENERERuRQGN0RERFZkZGTA09MTnp6eyMjIcHRxyAbsc0NERGSFSqVCUVGRuO5oGq0GxdpiqDVqFGt0rxpBgzp+dcQ8f9/5G9mqbF0eo7zF2mJoBS1Gthgp5v3xrx9xNfuqyfGM91kxYAUkEgkAYMkfS/D7jd/FPKXz/zHuD3i7e1f5z6Q0BjdERFQj3Su6Z3KTNr6he7h5oHFwYwBAUFAQmg1oBq1Mi4N3D+JY/jGTm3qAZwCeavmUeNwlfyzB7fzbJnmKNcVQa9UI9Q7F4r6LxbwTtk3AX3f/KptXo0aIdwj+nPCnmPeRdY/g4I2DEFC2c3OAZwD+efMfcfvVna9ib9Jes9ftJnUzCW7WnlmLH//+0eLPaUm/JZDL5ACAE+knrOa9kXMDvh6+AGASbFU1BjdERGQzQRDEG7BUIoWXu25AuxJtCZL+SSpTS6C/UUcoItAqvBUAQK1RI+F8Qplv/vr15qHNMaLFCAC6WopXdryie7/UsdUaNTrV7oT3e70vlq/d5+2QX5xfJlgp1hbj4aiH8fOzP4t5o5ZGIacwx+x1do7sjKPjjwIA/P39kds7F2n30jBmx5gyeRsHNUbLsJYoKilCkaYIS44uQWpuqtnjhnqHokVoC2gEDUq0Jdh1fRdu5t40m/d2/m28uedNlGhLoBE0uJZ9zWxgAwD56nwM/344NFrdcS/fuQwvNy9IJBJIoKt10de+SCBBm1VtoBE00Aga3Cm4A4W7Qjy2AAGCoFu00CJwQaCYt0RbYvb8es1XNgcA1PatjbTpjptBnYP4ERFVEUEQzFbn62++od6hCPQKBADkFObgTMaZMkGCfr1drXZisJB+Lx3rzqwzG1gUa4sxoNEADG4yGACQokzBpJ2Typxbf/wxbcbg9a6vi3nbr25vcm7jm9srHV7BpwM/BQDczruNiI8jLF77yBYj8eFjH0KtUUNZqESHLzpYzNslsgumdJ6CEm0JijXFGPfTOIt5GwY1xKgWo6DR6m6+S44usXgDDvMJQ98GfcWb9Na/tkKt0c3yLYHEJBCQy+QI8Q5BkaYIao0auUW50Apai+UgHalECplEhtq+tZE8LblCj23P/ZvBDRE5PUEQdDc6bTE8ZB6QSXUDqSkLlcgqyLIYALSt1RZBXkEAgKt3r+LozaNlbuj61xEtRojNEH+m/Ymvz3xttragWFOMGV1n4JF6jwAAfkv6DW/sfsPscdUaNZb1W4bnWj8HAPj56s8Y8N8BFq/zk36fYHLnyQCAAzcO4JF1j1jMO7PrTExoPwFqjRpnMs7g2c3PWszbv2F/9G/YH0WaItxU3sSyP5dZzNskuAnaRLRBibYE94ruYU/iHot5/T38EeQVBLVGjcKSQtxV3bWY11V5yDzg4eYBuUxeZt1d5g53qTvcpG5wk7pBJpWJ625SN8gk5rdlUhlkEpmYX79e+v3SeS2lmTuOPWnG5SyvHPraocrA4MYKBjdU05RoS8w2E+jXGwc3FoOFK3evIFWZavYbfbG2GKNajIKP3AcA8Gvirzh285jZGoBibTHe7/k+whXhAIBvz32L7y5+Z7ZWoVhTjO9HfI+mIU0B6Por/Ofgf0zyGH8TPzz2MLrW7QoAWPzHYry++3WL1777ud14rMFjAIDVJ1fjpe0vWcy7ZdQWDG06FIIgYN3ZdRj741iLeWd0nYGudbuiqKQIR1KP4JM/P7GY9+G6D6N+UH2oNWrczL2JQymHTN43bjII8gyCl7sX1Bo1CooLkKfOs9gMUR3pb/hymRxymRzuUqN1o3Q3qZvVoMBN6gY3iVuZG67+VSqRllnX1yjo192kbvCQ/S8QMQpI9GUwTstMz0Sfnn0ADXDy2Ek0btBYLH9l3szJlD33b/a5ISqHVtBCKjGMmpCVn4WC4gKoNeoyTxVIJBLxxgsAexP3Ij0v3ewTCFKJFDO6zRDzrjq+Cuczz5ttttAKWvz09E9i3pl7ZmL39d1m+yAUa4px+43b8HDzAADEb43H+vPrLV7f3Zl3xdqNj498jNWnVlvM26d+HzG42Xl1J5YcXWIx72tdXhODm7/v/I3tV7ZbzJtblAtVsQqFJYW4U3DHag3A7uu7cS37GopKinDs5jF4yDwglUjFRSKRQAopIAEWHV6Ez09+jiJNEW7du4VAz0BDfwII0ApaXb8CQYvnNz+PYm0xijRFFs+t9+GRD8vNo3co9RAOpR6y+L7Yz0EQcEd1B7DhYRwJJCY3YONFX3tgrkbBQ+Zhsq4PKowDCf3iLjNNM85T+pzGgYmloKWyv9VXpqSCJCBHtx7oGQiFXOHQ8lD5GNyQw5VoS1BUUoTCkkIUaYrETnlFJUXwdPNEk5AmYt6f/v4Jeeo8qDVqsS28qET3GuoTivHtxot53/r1LWTkZZgEFfqljl8dfD30azFvv2/74fKdyyaPQur3i/KPQtLUJDFv//X9cTL9pNlrCfUOReaMTHF73oF5OHDjgNm83u7eJsHN9qvbsfPqTos/J0EQxJtDck4yzt4+azFvsbYYHtAFN+4yd5P3pBKpeANyl7mb1IrU8auDFqEtxBueTCKDVCqFFLrA4a87fyFVmQpViQruMnf0rNdTLJtW0EIraKERNNAKWsz7fR4ECMgvzkdGXgbqB9Y3qYkxDt46f9nZ4rWUNvf3uTbn/TXpV5vzmiOBBJ5unmUWDzcP3avMQ9wW143S9NvmagbMBQXGwYG7zN1kX+OlOgcK1VFMTAynX6hmGNyQCUEQoCpRIV+dD6lEimDvYAC6Jxb2Ju1FQXGByaIqVqGguACNghuJ/QoEQcCTG59EYUmhyaL/Zt4tqhv+74n/E8/pv8AfBcUFZsvTPao7DrxgCA7G/zQemfmZZvO2jWhrEtxsvLQRif8kms2r71uhd+veLaQoU8zmLdYUm2x7uXvBy81LvDHpv6W6y9wR7BVskrdj7Y4mVfH6m5ZcJoenzFPsYKrWqDGo0SA0CW4i1hLpOzjq7bq2C1pBixJtCTpFdkLDoIbie/qmDX0twKd/fopibTFUxSoo3BUY33a8GAwWlhRCVaISP5Pe3/QW143TzTWHHPjWfKBmzrG0YzbnNSaBBF7uXmJg4OXuJd7kLQYPpYIMczUX+poOc+vmAhdPN082OxBVUwxuXIxao8ap9FNQFiqRU5gjLnnqPOSp89Chdgc821rX8VBZqESvb3ohT52He0X3xDz6m9ozrZ7B+mG65gytoEXfb/taPO/gJoPF4EYikWD7le3iUwilxQTGmGx7yDzE4EYqkZp829U3l+g9HPUwcgpzyraPyzxQL6CeSd7XY19HblFumW/LcpkcAZ4BJnm/eeIbqEvU0EIrdl7Vd2DVaDXYfX23GNCNaTMGI5qPMAnwxCBOU4gRG0eUCexKL/raqc9Ofmbl0zRlT1NIRdLXXuiDOn3g4eXmBW93b3HxkfvA281oXZ/ubljXH8NcbYh+cZO6MaAgogfC4MbJaAUtslXZkEAi1pr8o/oHK/5cgayCLGTmZyJblY2cwhwoi3QBzHOtnsPHfT8W88auibV4/OdbPy8GNx5uHjiVfspiXuPmCneZO9rVagcPmYfJDc3b3Rtebl54KOIhk31X9F8BmVRmciPT3xRLByyJUxN1zR5SKbSCtkz/lMtZl8Xt6V2ml2m6KtIUiQHDR0c+KvNeQXEB8ovzy9Q6Td89vUyao5Xu5KgP8vQ1Pub6RphbZFIZPGWeJsGEPevGAYxcJmewQTVaRkYGGjRoAAC4fv06IiIsP/JOzoHBTRXRCloUlRSJA15l5mdiwaEFSLuXhsz8TGTlZyGrIAt3C+5CI2gwpdMULOuve1yzSFOE9/a/Z/HYWQVZ4rq/hz/q+deDn6cf/OR+8PXwhY+7j1i13yi4EY6kHkFRSRFUxSq80/0dwyN8kEAqlQICAImuzHP2zxFrMXrW6ymuGy+56lzsTdqLXdd3GfqqaEz7uJTu81K6X4uzjR9hLoiztBgHA2X6Zsg8yqaV6othHMwYd1wmIuegUqlQUFAgrpPzY3BTQYpKinA49TCOph5Fen46bufdxu3828jKz8KdgjvIVmWjV0wvDGs2DMWaYtxV3bX6pMn2q9uRdi8NRRpdEBKhiBCfAAF0/Vo0ggYarQY/XfkJvvN9xWABAKCsgouuRPqnQYw7XbpL3c32qTD7atRHw7hZpEwziplAxdvdW3w0mogoKCgILVu2FNfJ+XGcmwpy694tRC6OrLDjVSSbgoFSr/Y0gRg/Hmr8CKitj4gad8jVbzO4ICIiYxznxgE8ZB6oH1Aft/JuiR1c9c0V3u7e8JX7QuGhMAkc9K8Wx6MoJ83cI6Slx77gI6NERFTTMLipIMHewbg+9bqji0FERBVMrVbjxx91M2EPGTIEcrncwSWi8jC4ISIisiItLQ0jR44EACQmJiImJqacPcjR+GgGERERuRTW3BAREVnB6ReqH9bcEBERkUthcENEREQuhcENERGRFVlZWQgICEBAQACysrLK34Ecjn1uKosgABoNUFKiW/Tr1tLKe9WvazS642u1ti/G5TJ+tZSm1erOcz+v+qW8bVsX42sFAInEsEilptulF5kMcHMrf3F3173q88tkpkt5afrj2LpeXrnNLcY/C+OfiaU0/Tnd3Q1L6W13d0NZiMisvLw8KJVKcT00NNTBJaLyMLipKLduAQ0aAIWFji4Jkf3c3QG53P5FHzCVDuCMl/ICO+Ntc++VDiDNnctcIGktEC39nnGQ6+7OYI9M+Pv7ixNn+vv7O7g0ZAsGNxVFJrM9sHFz0/3TLinRvRr/A5fJdP9cPTwAhcLwz1ci0d1M9DcH/Wt5i/FNQs/4H7e1dX15jMtnfG7jNHPb5sppvK0/h/F2edcB6GoljBd9TYW5RV/Tpa/5Km8pLjbsY7wY15pZSitdI2duW79urcyWltI/J2uv+pqe4mLdor+2EsNM7yb0+fLzbfsddnWla70s1YCZq/2ztK3/uy/9N2TLq7V1W7bL+x9h7vfH2np5waK5bePgUyarVgFkUFAQrl275uhikB0cHtysXLkSH374IdLT09GiRQssXboU3bt3t5j/999/x/Tp03Hx4kXUrl0bM2fOxMSJE6uwxBaEhABXrgDJycCdO0BWFpCZCdy+bVgGDQLeeUf3R33rFhAZadpkZOy554D/+z/delER4OkJeHsDgYGAv78u8FEoAB8f4NFHgenTdXkFAfjwQ126j48hj5eX7hihoboaJr2cHF0g5eFhGgCRaxIE00DOOPBRqw2v1paiIsO6uQDRUjCpTzcXkBpvl17Xam0LHPXb5gJUc4Goft0c/XtFRVX7+dQk+iCpdG2bcQ2fuS89lratfTEAzKfZ0vxsrem5vMVa8zNgvRna1hrO8r4cmksvL6A1954tZTdOl8uBNm2q9nfKiEODm++++w7Tpk3DypUr0a1bN3z++efo378/Ll26hKioqDL5k5KSMGDAAEyYMAHffvstDh8+jFdeeQWhoaEYPny4A67AiEwGNGqkW2wREADs2AGkpwP//ANkZ5u+Gv9S/POP7rWgQLekpZkey7iaVK0G3nzT8nmHDgW2bDFsh4Yavs3L5boAyNNTt96rF/D114a8vXvrjl+6z4a7O9CqFfDuu4a8s2bpagHM/eOqUwcYP96Qd80aIC/P/LfOoCBg2DBD3p9/BnJzy/7RSiS6IK5PH0Peo0d1ec39g5DLga5dDXnPny+bFzDULnXoYMh79appXn0+/avxZ3fjhi6vXun8zZoZgspbtwClsmxevYYNdT8/QBcs5+SYvm+cPyZG97kAukC7dF5j0dG6nweg+/3LztYFw+bUqaP7/QAMv6+W1K5tOI5SqQv6LYmI0H1+gO7nlZlpPa9CoVu/d0/3s7AkLAzQT7CXn6/7ezOmD6A0Gt3fkY+PLjjKzQVSUw3BUumaPf0XhpIS3d9kenrZfnL6df3fkz5Yys42nFNfu6gP4uRy3RcNrVb3t5aTY5rHeN3NTZdfH/zdu2e5v5pUqvt90J83P99wLP2NXp9fItEdW79vYWHZ2lDj8gOWA0trY8Por1GttpyHqq9atXT/0xxFcKBOnToJEydONElr2rSp8NZbb5nNP3PmTKFp06YmaS+99JLQpUsXm8+pVCoFAIJSqbS/wI6i1QpCdrYgXL8uCMePC8LevYLw44+C8N//CsLq1YLw66+GvPn5ghAfLwhPPikI/foJQvfugtC2rSA0ayYIMTGC8PLLhrzFxdYbQgYMMC2Hp6flvI8+apo3JMRy3g4dTPNGRVnO26yZad5mzSznjY42zduhg+W8oaGmeXv0sJzX29s0b//+1n9uxoYPt543P9+Qd/Ro63kzMw15X37Zet7kZEPeN96wnvfiRUPe2bOt5/3zT0PehQut5923z5B3xQrreXfsMORdu9Z63u+/N+T97jvredeuNeTdvt163k8/NeTdt8963kWLDHmPHbOed/ZsQ94LF6znnTHDkDcpyXreV14x5L1923reMWMMefPyrOcdMUIwYS1v6f8RXl6W8z78sCAolYJw966uvEFBlvM2b677X3fsmCAcPSoIERGW80ZHC8KuXYLwyy+CsHu39f8nERGCcOCAIBw8qFuaNLGc189PEBISBOHbb4XMjz4SLlv7Obi76z7nt98WhDff1P2vtZb/xRcFYcIEQRg/XhDq1bOed+RIQXj6aUF46qny8/bpo/v/1LevINSpYz3vQw8JQqdOuv+T4eHW88bE6H5WjRtb/9z0P+OoKEHo2FGoaPbcvx1Wc6NWq3Hy5Em89dZbJulxcXE4cuSI2X3++OMPxMXFmaT17dsXa9asQXFxMdz131SNFBUVocioalnf4z3X+Nt0dSCT6Zq+QkLMv298PcuWWT+WPq8g6L5NFxbqvlHqX/WLQmF63K+/1qXrv8nqmzGKi3Xfpo3zvvqq7tuhuSaByEjTvP3768pR+okqjUZXU2Cct21bIDhYV/bSTwiVLkNMTNlvnfr8gYGmecPDgfr1Dc2EgmB49fQ0zevnp6uVMM5j/Gqc19tbVzumP7dxPn1e45qzwEDzn5kg6Gq3PDx021KpaY1daXl5hnJIJIbaC3MKCkx/J3x9LedVqQx5tVpDDYo5hYWGvCUl1vMWFRnyFhdbz6tWG/Kq1dbLq6+F0Z/DWl6NxpC3sND6z0wQDHlVKut5AUPeggLrn5tUasibn2973rw8XW2wJe7upsc1zlu6hlAuN/0dtvQ7CZT92wgIMNTslab/2eubbKwdNzwcaNzYsB0WZrmGJyICiI013bbUdyw83LRmtXZt4O5d83mDg4EBAwAA6cnJOA9dU0dwYCBkMplpXm9vQ9cAALh+XfeZWPLhh4b1yZOt93Vbtkx3fACYOdO05r20L77Q1XYDwOzZQEKC5bzr1+v+vwLAwoW6GnRLNm3S1RwDwPLlwIoVlvNu3Ai0bq1br+D7rP6+LRj/D7WkwkMrG6WlpQkAhMOHD5uk/+c//xEaN25sdp9GjRoJ//nPf0zSDh8+LAAQbt26ZXaf2bNnCwC4cOHChQsXLi6wpKamlhtjOLxDsaTUtwZBEMqklZffXLrerFmzMN0ootZqtcjOzkZwcLDV89yP3Nxc1K1bF6mpqfAr75uci6mp115TrxuouddeU68b4LXXxGt3pusWBAH37t1D7dq1y83rsOAmJCQEMpkMGRkZJumZmZkIDw83u09ERITZ/G5ubggODja7j4eHBzz0Vfn/E2Ct+rYC+Pn5OfyXwFFq6rXX1OsGau6119TrBnjtNfHaneW6bR1nyGHP/srlcrRv3x579uwxSd+zZw+6Gj/FYiQ2NrZM/t27d6NDhw5m+9sQERFRzePQgU2mT5+OL7/8El999RUuX76M1157DSkpKeK4NbNmzcLo0aPF/BMnTsSNGzcwffp0XL58GV999RXWrFmDN954w1GXQERERE7GoX1uRo0ahbt372LevHlIT09Hy5YtsXPnTkRHRwMA0tPTkZKSIuaPiYnBzp078dprr+HTTz9F7dq18cknnzh+jJv/8fDwwOzZs8s0g9UENfXaa+p1AzX32mvqdQO89pp47dX1uiWCYMszVURERETVA8fbJyIiIpfC4IaIiIhcCoMbIiIicikMboiIiMilMLipICtXrkRMTAw8PT3Rvn17HDx40NFFqnRz5syBRCIxWSIiIhxdrEpx4MABPP7446hduzYkEgm2bt1q8r4gCJgzZw5q164NLy8vPProo7h48aJjClvByrv2+Pj4Mr8HXbp0cUxhK9D8+fPRsWNH+Pr6IiwsDEOHDsXff/9tkscVP3dbrttVP/NVq1ahdevW4oB1sbGx+Pnnn8X3XfHz1ivv2qvbZ87gpgJ89913mDZtGt5++22cPn0a3bt3R//+/U0eY3dVLVq0QHp6uricP3/e0UWqFPn5+WjTpg1WWJgwbtGiRVi8eDFWrFiB48ePIyIiAo899hju3btXxSWteOVdOwD069fP5Pdg586dVVjCyvH777/j1VdfxdGjR7Fnzx6UlJQgLi4O+UaTHLri527LdQOu+ZnXqVMHCxYswIkTJ3DixAn06tULQ4YMEQMYV/y89cq7dqCafeblzj5F5erUqZMwceJEk7SmTZsKb731loNKVDVmz54ttGnTxtHFqHIAhC1btojbWq1WiIiIEBYsWCCmFRYWCv7+/sJnn33mgBJWntLXLgiCMGbMGGHIkCEOKU9VyszMFAAIv//+uyAINedzL33dglBzPnNBEITAwEDhyy+/rDGftzH9tQtC9fvMWXPzgNRqNU6ePIm4uDiT9Li4OBw5csRBpao6V69eRe3atRETE4OnnnoKiYmJji5SlUtKSkJGRobJ74CHhwceeeSRGvE7AAD79+9HWFgYGjdujAkTJiAzM9PRRapwSqUSABAUFASg5nzupa9bz9U/c41Ggw0bNiA/Px+xsbE15vMGyl67XnX6zB0+K3h1d+fOHWg0mjKTfYaHh5eZ5NPVdO7cGd988w0aN26M27dv49///je6du2KixcvWpzI1BXpP2dzvwM3btxwRJGqVP/+/TFixAhER0cjKSkJ7777Lnr16oWTJ09Wu1FNLREEAdOnT8fDDz+Mli1bAqgZn7u56wZc+zM/f/48YmNjUVhYCIVCgS1btqB58+ZiAOPKn7elaweq32fO4KaCSCQSk21BEMqkuZr+/fuL661atUJsbCwaNGiAr7/+GtOnT3dgyRyjJv4OALppVPRatmyJDh06IDo6Gjt27MCwYcMcWLKKM2nSJJw7dw6HDh0q854rf+6WrtuVP/MmTZrgzJkzyMnJwQ8//IAxY8bg999/F9935c/b0rU3b9682n3mbJZ6QCEhIZDJZGVqaTIzM8tE+K7Ox8cHrVq1wtWrVx1dlCqlf0KMvwM6tWrVQnR0tMv8HkyePBnbtm3Dvn37UKdOHTHd1T93S9dtjit95nK5HA0bNkSHDh0wf/58tGnTBsuWLXP5zxuwfO3mOPtnzuDmAcnlcrRv3x579uwxSd+zZw+6du3qoFI5RlFRES5fvoxatWo5uihVKiYmBhERESa/A2q1Gr///nuN+x0AgLt37yI1NbXa/x4IgoBJkyZh8+bN+O233xATE2Pyvqt+7uVdtzmu8pmbIwgCioqKXPbztkZ/7eY4/WfuqJ7MrmTDhg2Cu7u7sGbNGuHSpUvCtGnTBB8fHyE5OdnRRatUr7/+urB//34hMTFROHr0qDBo0CDB19fXJa/73r17wunTp4XTp08LAITFixcLp0+fFm7cuCEIgiAsWLBA8Pf3FzZv3iycP39eePrpp4VatWoJubm5Di75g7N27ffu3RNef/114ciRI0JSUpKwb98+ITY2VoiMjKz21/7yyy8L/v7+wv79+4X09HRxKSgoEPO44ude3nW78mc+a9Ys4cCBA0JSUpJw7tw54V//+pcglUqF3bt3C4Lgmp+3nrVrr46fOYObCvLpp58K0dHRglwuF9q1a2fy2KSrGjVqlFCrVi3B3d1dqF27tjBs2DDh4sWLji5Wpdi3b58AoMwyZswYQRB0jwXPnj1biIiIEDw8PIQePXoI58+fd2yhK4i1ay8oKBDi4uKE0NBQwd3dXYiKihLGjBkjpKSkOLrYD8zcNQMQ1q5dK+Zxxc+9vOt25c987Nix4v/x0NBQoXfv3mJgIwiu+XnrWbv26viZSwRBEKqunoiIiIiocrHPDREREbkUBjdERETkUhjcEBERkUthcENEREQuhcENERERuRQGN0RERORSGNwQERGRS2FwQ0QVas6cOXjooYccdv53330XL774otU8jz76KKZNm1Y1BQLQsWNHbN68ucrOR1TTMbghIptJJBKrS3x8PN544w3s3bvXIeW7ffs2li1bhn/9618OOb8l7777Lt566y1otVpHF4WoRmBwQ0Q2S09PF5elS5fCz8/PJG3ZsmVQKBQIDg52SPnWrFmD2NhY1KtXzyHnt2TgwIFQKpX45ZdfHF0UohqBwQ0R2SwiIkJc/P39IZFIyqSVbpaKj4/H0KFD8cEHHyA8PBwBAQGYO3cuSkpKMGPGDAQFBaFOnTr46quvTM6VlpaGUaNGITAwEMHBwRgyZAiSk5Otlm/Dhg0YPHiwSVp+fj5Gjx4NhUKBWrVq4eOPPy6z37fffosOHTrA19cXEREReOaZZ5CZmQlANzNyw4YN8dFHH5nsc+HCBUilUly/fh2ArjkuKioKHh4eqF27NqZMmSLmlclkGDBgABISEsr9GRPRg2NwQ0SV7rfffsOtW7dw4MABLF68GHPmzMGgQYMQGBiIY8eOYeLEiZg4cSJSU1MBAAUFBejZsycUCgUOHDiAQ4cOQaFQoF+/flCr1WbP8c8//+DChQvo0KGDSfqMGTOwb98+bNmyBbt378b+/ftx8uRJkzxqtRrvv/8+zp49i61btyIpKQnx8fEAdE1xY8eOxdq1a032+eqrr9C9e3c0aNAAmzZtwpIlS/D555/j6tWr2Lp1K1q1amWSv1OnTjh48OCD/BiJyFYOnriTiKqptWvXCv7+/mXSZ8+eLbRp00bcHjNmjBAdHS1oNBoxrUmTJkL37t3F7ZKSEsHHx0dISEgQBEEQ1qxZIzRp0kTQarVinqKiIsHLy0v45ZdfzJbn9OnTAgCTmYrv3bsnyOVyYcOGDWLa3bt3BS8vL2Hq1KkWr+3PP/8UAAj37t0TBEEQbt26JchkMuHYsWOCIAiCWq0WQkNDhXXr1gmCIAgff/yx0LhxY0GtVls85o8//ihIpVKTnwMRVQ7W3BBRpWvRogWkUsO/m/DwcJOaDZlMhuDgYLEp6OTJk7h27Rp8fX2hUCigUCgQFBSEwsJCsRmoNJVKBQDw9PQU065fvw61Wo3Y2FgxLSgoCE2aNDHZ9/Tp0xgyZAiio6Ph6+uLRx99FACQkpICAKhVqxYGDhwoNp1t374dhYWFGDFiBABgxIgRUKlUqF+/PiZMmIAtW7agpKTE5BxeXl7QarUoKiqy/QdHRPeFwQ0RVTp3d3eTbYlEYjZN/zSRVqtF+/btcebMGZPlypUreOaZZ8yeIyQkBICueUpPEIRyy5afn4+4uDgoFAp8++23OH78OLZs2QIAJk1g48ePx4YNG6BSqbB27VqMGjUK3t7eAIC6devi77//xqeffgovLy+88sor6NGjB4qLi8X9s7Oz4e3tDS8vr3LLREQPhsENETmddu3a4erVqwgLC0PDhg1NFn9/f7P7NGjQAH5+frh06ZKY1rBhQ7i7u+Po0aNi2j///IMrV66I23/99Rfu3LmDBQsWoHv37mjatKlYg2RswIAB8PHxwapVq/Dzzz9j7NixJu97eXlh8ODB+OSTT7B//3788ccfOH/+vPj+hQsX0K5du/v+mRCR7RjcEJHTefbZZxESEoIhQ4bg4MGDSEpKwu+//46pU6fi5s2bZveRSqXo06cPDh06JKYpFAqMGzcOM2bMwN69e3HhwgXEx8ebNJFFRUVBLpdj+fLlSExMxLZt2/D++++XOb5MJkN8fDxmzZqFhg0bmjR1rVu3DmvWrMGFCxeQmJiI//u//4OXlxeio6PFPAcPHkRcXFxF/HiIqBwMbojI6Xh7e+PAgQOIiorCsGHD0KxZM4wdOxYqlQp+fn4W93vxxRexYcMGk8HyPvzwQ/To0QODBw9Gnz598PDDD6N9+/bi+6GhoVi3bh02btyI5s2bY8GCBWUe+9YbN24c1Gp1mVqbgIAAfPHFF+jWrRtat26NvXv34qeffhLH+0lLS8ORI0fwwgsvPMiPhYhsJBFsaZQmIqoGBEFAly5dMG3aNDz99NMVfvzDhw/j0Ucfxc2bNxEeHm7zfjNmzIBSqcTq1asrvExEVBZrbojIZUgkEqxevbrMk0oPqqioCNeuXcO7776LkSNH2hXYAEBYWJjZpi4iqhysuSEiKse6deswbtw4PPTQQ9i2bRsiIyMdXSQisoLBDREREbkUNksRERGRS2FwQ0RERC6FwQ0RERG5FAY3RERE5FIY3BAREZFLYXBDRERELoXBDREREbkUBjdERETkUhjcEBERkUv5fzjI0r/NqQ95AAAAAElFTkSuQmCC",
455 | "text/plain": [
456 | ""
457 | ]
458 | },
459 | "metadata": {},
460 | "output_type": "display_data"
461 | }
462 | ],
463 | "source": [
464 | "fig,ax=plt.subplots()\n",
465 | "clr=[\"b\",\"r\",\"g\"]\n",
466 | "\n",
467 | "for n,var in enumerate(seir.outvar):\n",
468 | " ax.plot(test_data[:nt,n], label=\"%s (obs)\" % var, color=clr[n])\n",
469 | " ax.plot(y_predict.detach()[:,n], label=\"%s (sim)\" % var, \n",
470 | " linestyle=\"dashed\", color=clr[n])\n",
471 | " \n",
472 | " ax.set_ylim(0, 1.00)\n",
473 | " ax.vlines(nt_train, 0, 1.00, color='k', linestyle='dotted')\n",
474 | " ax.legend(loc='center left')\n",
475 | " ax.set_xlabel(\"Time (days)\")\n",
476 | " ax.set_ylabel(\"Population Fraction\")\n",
477 | "\n",
478 | "plt.show()"
479 | ]
480 | },
481 | {
482 | "cell_type": "markdown",
483 | "metadata": {},
484 | "source": [
485 | "The Figure above shows the observed (solid) against the simulated (dashed) compartment fractions. The vertical, dotted line marks the end of the training window; values beyond 30 days are predicted from the minimum misfit model."
486 | ]
487 | },
488 | {
489 | "cell_type": "code",
490 | "execution_count": null,
491 | "metadata": {},
492 | "outputs": [],
493 | "source": []
494 | }
495 | ],
496 | "metadata": {
497 | "kernelspec": {
498 | "display_name": "Python 3",
499 | "language": "python",
500 | "name": "python3"
501 | },
502 | "language_info": {
503 | "codemirror_mode": {
504 | "name": "ipython",
505 | "version": 3
506 | },
507 | "file_extension": ".py",
508 | "mimetype": "text/x-python",
509 | "name": "python",
510 | "nbconvert_exporter": "python",
511 | "pygments_lexer": "ipython3",
512 | "version": "3.9.6"
513 | }
514 | },
515 | "nbformat": 4,
516 | "nbformat_minor": 4
517 | }
518 |
--------------------------------------------------------------------------------
/examples/ode/SEIR/SIR_data_SD_county.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/examples/ode/SEIR/SIR_data_SD_county.pt
--------------------------------------------------------------------------------
/examples/quantile-regression/README.md:
--------------------------------------------------------------------------------
1 | # LSTM with Quantile Regression
2 |
3 | This notebook provides an example of using a quantile loss function with a LSTM network.
4 |
5 | Assuming TorchTS is already installed, the additional dependencies can be installed with `pip`:
6 |
7 | ```bash
8 | pip install -r requirements.txt
9 | ```
10 |
--------------------------------------------------------------------------------
/examples/quantile-regression/requirements.txt:
--------------------------------------------------------------------------------
1 | matplotlib
2 | ipykernel
3 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "torchts"
3 | version = "0.1.1"
4 | description = "Time series forecasting with PyTorch"
5 | authors = [
6 | "TorchTS Team "
7 | ]
8 | license = "MIT"
9 | readme = "README.md"
10 | homepage = "https://rose-stl-lab.github.io/torchTS"
11 | repository = "https://github.com/Rose-STL-Lab/torchTS"
12 | documentation = "https://rose-stl-lab.github.io/torchTS/docs"
13 | keywords = ["deep-learning", "machine-learning", "time-series", "pytorch"]
14 | classifiers = [
15 | "Development Status :: 3 - Alpha",
16 | "Intended Audience :: Developers",
17 | "Intended Audience :: Science/Research",
18 | "Topic :: Scientific/Engineering",
19 | "Topic :: Software Development :: Libraries :: Python Modules"
20 | ]
21 |
22 | [tool.poetry.dependencies]
23 | python = ">=3.8,<3.10"
24 | torch = "^1.4"
25 | pytorch-lightning = "^1.2"
26 | scipy = "^1.7.1"
27 |
28 | [tool.poetry.group.test.dependencies]
29 | pytest = "^7.0.1"
30 | pytest-cov = "^3.0.0"
31 | pytest-mock = "^3.7.0"
32 | pre-commit = "^2.17.0"
33 |
34 | [tool.poetry.group.docs.dependencies]
35 | sphinx = "^4.4.0"
36 | asteroid-sphinx-theme = "^0.0.3"
37 |
38 | [build-system]
39 | requires = ["poetry-core>=1.0.0"]
40 | build-backend = "poetry.core.masonry.api"
41 |
42 | [tool.black]
43 | line-length = 88
44 |
45 | [tool.isort]
46 | profile = "black"
47 |
48 | [tool.flakeheaven]
49 | exclude = ["*.ipynb"]
50 | format = "grouped"
51 | max_complexity = 10
52 | max_line_length = 88
53 | show_source = true
54 |
55 | [tool.flakeheaven.plugins]
56 | pycodestyle = ["+*", "-E203", "-E741"]
57 | pyflakes = ["+*"]
58 | mccabe = ["+*"]
59 | flake8-bugbear = ["+*"]
60 | flake8-comprehensions = ["+*"]
61 |
--------------------------------------------------------------------------------
/scripts/build_docs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # run this script from the project root using `./scripts/build_docs.sh`
3 |
4 | echo "----------------------------------------"
5 | echo "Generating API documentation with Sphinx"
6 | echo "----------------------------------------"
7 |
8 | poetry run make -C docs html
9 |
10 | echo "-----------------------------------------"
11 | echo "Moving Sphinx documentation to Docusaurus"
12 | echo "-----------------------------------------"
13 |
14 | SPHINX_HTML_DIR="website/static/api/"
15 | cp -R "./docs/build/html/" "./${SPHINX_HTML_DIR}"
16 | echo "Sucessfully moved Sphinx docs to ${SPHINX_HTML_DIR}"
17 |
--------------------------------------------------------------------------------
/tests/nn/test_loss.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 |
4 | from torchts.nn.loss import masked_mae_loss, mis_loss, quantile_loss
5 |
6 |
7 | @pytest.fixture
8 | def y_true():
9 | data = [1, 2, 3]
10 | return torch.tensor(data)
11 |
12 |
13 | @pytest.fixture
14 | def y_pred():
15 | data = [1.1, 1.9, 3.1]
16 | return torch.tensor(data)
17 |
18 |
19 | def test_masked_mae_loss(y_true, y_pred):
20 | """Test masked_mae_loss()"""
21 | loss = masked_mae_loss(y_pred, y_true)
22 | assert loss == pytest.approx(0.1)
23 |
24 |
25 | @pytest.mark.parametrize(
26 | "lower, upper, interval, expected_loss",
27 | [
28 | ([1, 2, 3], [1.1, 2.1, 3.1], 0.8, 0.1),
29 | ([0.9, 1.9, 2.9], [1.1, 2.1, 3.1], 0.8, 0.2),
30 | ([0.9, 1.9, 2.9], [1.1, 2.1, 3.1], 0.95, 0.2),
31 | ([0.7, 1.9, 2.9], [0.9, 2.1, 3.1], 0.8, 1.6 / 3),
32 | ([0.7, 1.9, 2.9], [0.9, 2.1, 3.1], 0.95, 4.6 / 3),
33 | ([0.9, 1.9, 3.1], [1.1, 2.1, 3.3], 0.8, 1.6 / 3),
34 | ([0.9, 1.9, 3.1], [1.1, 2.1, 3.3], 0.95, 4.6 / 3),
35 | ],
36 | )
37 | def test_mis_loss(y_true, lower, upper, interval, expected_loss):
38 | """Test quantile_loss()"""
39 | y_true = y_true.reshape(-1, 1)
40 | y_pred = torch.transpose(torch.tensor([lower, upper]), 0, 1)
41 | loss = mis_loss(y_pred, y_true, interval)
42 | assert loss == pytest.approx(expected_loss)
43 |
44 |
45 | @pytest.mark.parametrize(
46 | "quantile, expected_loss", [(0.05, 0.065), (0.5, 0.05), (0.95, 0.035)]
47 | )
48 | def test_quantile_loss(y_true, y_pred, quantile, expected_loss):
49 | """Test quantile_loss()"""
50 | loss = quantile_loss(y_pred, y_true, quantile)
51 | assert loss == pytest.approx(expected_loss)
52 |
--------------------------------------------------------------------------------
/tests/nn/test_ode.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import torch
4 |
5 | from torchts.nn.models.ode import ODESolver
6 | from torchts.utils.data import generate_ode_dataset
7 |
8 |
9 | @pytest.fixture
10 | def euler_model():
11 | # ODE: x'(t) = 2x
12 | model = ODESolver(
13 | {"x": lambda prev_val, coeffs: coeffs["alpha"] * prev_val["x"]},
14 | {"x": 1.0},
15 | {"alpha": 2.0},
16 | 0.1,
17 | solver="euler",
18 | optimizer=torch.optim.Adam,
19 | optimizer_args={"lr": 0.5},
20 | )
21 | preds = model(2)
22 | return model, preds
23 |
24 |
25 | @pytest.fixture
26 | def rk4_model():
27 | # ODE: x'(t) = x
28 | model = ODESolver(
29 | {"x": lambda prev_val, coeffs: prev_val["x"]},
30 | {"x": 1.0},
31 | {},
32 | 0.1,
33 | solver="rk4",
34 | optimizer=None,
35 | )
36 | preds = model(2)
37 | return model, preds
38 |
39 |
40 | def test_euler(euler_model):
41 | """Test Euler's Method"""
42 | model, preds = euler_model
43 | assert model.step_solver == model._euler_step
44 | assert model.get_coeffs() == {"alpha": 2.0}
45 | assert preds[1, 0].item() == pytest.approx(1.2, abs=1e-6)
46 |
47 |
48 | def test_rk4(rk4_model):
49 | """Test 4th order Runge-Kutta Method"""
50 | model, preds = rk4_model
51 | assert model.step_solver == model._runge_kutta_4_step
52 | assert preds[1, 0].item() == pytest.approx(np.exp(0.1), abs=1e-6)
53 |
54 |
55 | def test_generate_ode_dataset(euler_model):
56 | """Test the generate_ode_dataset function"""
57 | model, preds = euler_model
58 | x1, x2 = generate_ode_dataset(preds)
59 | assert x1 == preds[:-1, :]
60 | assert x2 == preds[1:, :]
61 |
62 |
63 | def test_value_errors():
64 | """Detects ValueError in the __init__ function"""
65 | with pytest.raises(ValueError, match="Unrecognized solver .*"):
66 | _ = ODESolver(
67 | {"x": lambda prev_val, coeffs: coeffs["alpha"] * prev_val["x"]},
68 | {"x": 1.0},
69 | {"alpha": 2.0},
70 | 0.1,
71 | solver="a",
72 | optimizer=None,
73 | )
74 |
75 | with pytest.raises(ValueError, match="Inconsistent keys in ode and init_vars"):
76 | _ = ODESolver(
77 | {"x": lambda prev_val, coeffs: coeffs["alpha"] * prev_val["x"]},
78 | {"x": 1.0, "y": 2.0},
79 | {"alpha": 2.0},
80 | 0.1,
81 | solver="euler",
82 | optimizer=None,
83 | )
84 |
85 |
86 | def test_step_backward(euler_model):
87 | """Test the step and backward function"""
88 | torch.manual_seed(0)
89 | batch = torch.Tensor([[1.0]]), torch.Tensor([[1.1]])
90 | model, _ = euler_model
91 | loss = model._step(batch, 0, 0)
92 | assert (loss.item() - (1.2 - 1.1) ** 2) < 1e-6
93 | model.backward(loss, None, 0)
94 | model.optimizer(model.parameters()).step()
95 | coeffs = model.get_coeffs()
96 | assert coeffs["alpha"] < 2
97 |
98 |
99 | def test_fit(euler_model):
100 | """Test the step and backward function"""
101 | torch.manual_seed(0)
102 | model, _ = euler_model
103 | model.fit(torch.Tensor([[1.0]]), torch.Tensor([[1.1]]), max_epochs=1, batch_size=1)
104 | coeffs = model.get_coeffs()
105 | assert coeffs["alpha"] < 2
106 |
--------------------------------------------------------------------------------
/tests/test_model.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 | import pytest
4 | import torch
5 | from torch import nn, optim
6 | from torch.utils.data import DataLoader, TensorDataset
7 |
8 | from torchts.nn.model import TimeSeriesModel
9 |
10 |
11 | class LinearModel(TimeSeriesModel):
12 | def __init__(self, slope, intercept, **kwargs):
13 | super().__init__(**kwargs)
14 | self.line = nn.Linear(1, 1)
15 | self.line.weight = nn.Parameter(slope * torch.ones_like(self.line.weight))
16 | self.line.bias = nn.Parameter(intercept * torch.ones_like(self.line.bias))
17 |
18 | def forward(self, x, y=None, batches_seen=None):
19 | return self.line(x)
20 |
21 |
22 | def test_forward():
23 | slope = 2
24 | intercept = -1
25 | model = LinearModel(
26 | slope, intercept, optimizer=optim.SGD, optimizer_args={"lr": 0.01}
27 | )
28 |
29 | x = torch.Tensor([-1, 0, 1]).reshape(-1, 1)
30 | y = slope * x + intercept
31 |
32 | assert (model(x) == y).all()
33 | assert (model.predict(x) == y).all()
34 |
35 |
36 | def test_step(mocker):
37 | slope = 2
38 | intercept = -1
39 | quantile = 0.5
40 | quantile_loss = mocker.MagicMock()
41 | model = LinearModel(
42 | slope,
43 | intercept,
44 | optimizer=optim.SGD,
45 | criterion=quantile_loss,
46 | criterion_args={"quantile": quantile},
47 | )
48 |
49 | x = torch.Tensor([0]).reshape(-1, 1)
50 | y = slope * x + intercept
51 | dataset = TensorDataset(x, y)
52 | loader = DataLoader(dataset, batch_size=1, shuffle=False)
53 |
54 | for i, batch in enumerate(loader):
55 | model._step(batch, i, len(loader))
56 | x, y = batch
57 | quantile_loss.assert_called_once_with(y, y, quantile=quantile)
58 |
59 |
60 | def test_train():
61 | torch.manual_seed(0)
62 |
63 | slope_init = 2
64 | intercept_init = -1
65 | optimizer = partial(optim.SGD, lr=0.1)
66 | model = LinearModel(slope_init, intercept_init, optimizer=optimizer)
67 |
68 | slope_true = 1
69 | intercept_true = 0
70 | n = 1000
71 | x = torch.rand(n, 1)
72 | y = slope_true * x + intercept_true
73 |
74 | max_epochs = 100
75 | model.fit(x, y, max_epochs=max_epochs)
76 |
77 | tol = 1e-4
78 | assert pytest.approx(model.line.weight.detach(), abs=tol) == slope_true
79 | assert pytest.approx(model.line.bias.detach(), abs=tol) == intercept_true
80 |
--------------------------------------------------------------------------------
/tests/test_sliding_window.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 |
4 | from torchts.utils.data import sliding_window
5 |
6 |
7 | @pytest.fixture
8 | def tensor():
9 | n = 10
10 | return torch.IntTensor(range(n))
11 |
12 |
13 | @pytest.mark.parametrize("lag", [2, 5, [1, 2, 3], {1, 2, 3}, [1, 3, 5]])
14 | @pytest.mark.parametrize("horizon", [1, 2])
15 | def test_shape(tensor, lag, horizon):
16 | x, y = sliding_window(tensor, lag, horizon=horizon)
17 |
18 | if isinstance(lag, int):
19 | rows = len(tensor) - lag - horizon + 1
20 | cols = lag
21 | else:
22 | rows = len(tensor) - max(lag) - horizon + 1
23 | cols = len(lag)
24 |
25 | assert len(x.shape) == 2
26 | assert x.shape[0] == rows
27 | assert x.shape[1] == cols
28 |
29 | assert len(y.shape) == 1
30 | assert y.shape[0] == rows
31 |
32 |
33 | @pytest.mark.parametrize("lag", [2, 5, [1, 2, 3], {1, 2, 3}, [1, 3, 5]])
34 | @pytest.mark.parametrize("horizon", [1, 2])
35 | def test_value(tensor, lag, horizon):
36 | x, y = sliding_window(tensor, lag, horizon=horizon)
37 |
38 | if isinstance(lag, int):
39 | for i in range(x.shape[0]):
40 | j = lag + i
41 | assert (x[i, :] == tensor[i:j]).all()
42 | else:
43 | for i in range(x.shape[0]):
44 | assert (x[i, :] == tensor[[x - 1 + i for x in lag]]).all()
45 |
46 | assert (y - x[:, -1] == horizon).all()
47 |
48 |
49 | @pytest.mark.parametrize("lag", ["1", 1.0, ["1"], [1, "2", 3], {1, 2.0, 3}])
50 | def test_non_int(tensor, lag):
51 | with pytest.raises(TypeError):
52 | sliding_window(tensor, lag)
53 |
54 |
55 | @pytest.mark.parametrize("lag", [-1, 0, [0, 1, 2], {0, 1, 2}, [-1, 1, 2]])
56 | def test_non_positive(tensor, lag):
57 | with pytest.raises(ValueError):
58 | sliding_window(tensor, lag)
59 |
--------------------------------------------------------------------------------
/torchts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/torchts/__init__.py
--------------------------------------------------------------------------------
/torchts/nn/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/torchts/nn/__init__.py
--------------------------------------------------------------------------------
/torchts/nn/graph.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | from torchts.utils import graph
5 | from torchts.utils.data import concat
6 |
7 |
8 | class DCGRUCell(nn.Module):
9 | def __init__(
10 | self,
11 | num_units,
12 | adj_mx,
13 | max_diffusion_step,
14 | num_nodes,
15 | input_dim,
16 | activation=torch.tanh,
17 | filter_type="laplacian",
18 | use_gc_for_ru=True,
19 | ):
20 | super().__init__()
21 | self._activation = activation
22 | self._num_nodes = num_nodes
23 | self._num_units = num_units
24 | self._max_diffusion_step = max_diffusion_step
25 | self._use_gc_for_ru = use_gc_for_ru
26 |
27 | supports = []
28 |
29 | if filter_type == "laplacian":
30 | supports.append(graph.scaled_laplacian(adj_mx, lambda_max=None))
31 | elif filter_type == "random_walk":
32 | supports.append(graph.random_walk(adj_mx).T)
33 | elif filter_type == "dual_random_walk":
34 | supports.append(graph.random_walk(adj_mx).T)
35 | supports.append(graph.reverse_random_walk(adj_mx).T)
36 | else:
37 | supports.append(graph.scaled_laplacian(adj_mx))
38 |
39 | supports = [graph.sparse_matrix(s) for s in supports]
40 | supports = torch.cat([s.unsqueeze(dim=0) for s in supports])
41 | self.register_buffer("_supports", supports)
42 |
43 | num_matrices = len(supports) * self._max_diffusion_step + 1
44 | input_size_fc = self._num_units + input_dim
45 | input_size_gconv = input_size_fc * num_matrices
46 | input_size_ru = input_size_gconv if self._use_gc_for_ru else input_size_fc
47 |
48 | output_size = 2 * self._num_units
49 | self._ru_weights = nn.Parameter(torch.empty(input_size_ru, output_size))
50 | self._ru_biases = nn.Parameter(torch.empty(output_size))
51 | nn.init.xavier_normal_(self._ru_weights, gain=1.0)
52 | nn.init.constant_(self._ru_biases, val=1.0)
53 |
54 | output_size = self._num_units
55 | self._gconv_weights = nn.Parameter(torch.empty(input_size_gconv, output_size))
56 | self._gconv_biases = nn.Parameter(torch.empty(output_size))
57 | nn.init.xavier_normal_(self._gconv_weights, gain=1.0)
58 | nn.init.constant_(self._gconv_biases, val=0.0)
59 |
60 | def forward(self, inputs, hx):
61 | fn = self._gconv if self._use_gc_for_ru else self._fc
62 | output_size = 2 * self._num_units
63 | value = torch.sigmoid(fn(inputs, hx, output_size, bias_start=1.0, reset=True))
64 | value = torch.reshape(value, (-1, self._num_nodes, output_size))
65 | r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1)
66 | r = torch.reshape(r, (-1, self._num_nodes * self._num_units))
67 | u = torch.reshape(u, (-1, self._num_nodes * self._num_units))
68 | c = self._gconv(inputs, r * hx, self._num_units)
69 |
70 | if self._activation is not None:
71 | c = self._activation(c)
72 |
73 | return u * hx + (1.0 - u) * c
74 |
75 | def _fc(self, inputs, state, output_size, bias_start=0.0, reset=True):
76 | batch_size = inputs.shape[0]
77 | shape = (batch_size * self._num_nodes, -1)
78 | inputs = torch.reshape(inputs, shape)
79 | state = torch.reshape(state, shape)
80 | x = torch.cat([inputs, state], dim=-1)
81 |
82 | return torch.matmul(x, self._ru_weights) + self._ru_biases
83 |
84 | def _gconv(self, inputs, state, output_size, bias_start=0.0, reset=False):
85 | batch_size = inputs.shape[0]
86 | shape = (batch_size, self._num_nodes, -1)
87 | inputs = torch.reshape(inputs, shape)
88 | state = torch.reshape(state, shape)
89 | x = torch.cat([inputs, state], dim=2)
90 | input_size = x.size(2)
91 |
92 | x0 = x.permute(1, 2, 0)
93 | x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
94 | x = torch.unsqueeze(x0, 0)
95 |
96 | if self._max_diffusion_step > 0:
97 | for support in self._supports:
98 | x1 = torch.sparse.mm(support, x0)
99 | x = concat(x, x1)
100 |
101 | for _ in range(2, self._max_diffusion_step + 1):
102 | x2 = 2 * torch.sparse.mm(support, x1) - x0
103 | x = concat(x, x2)
104 | x1, x0 = x2, x1
105 |
106 | num_matrices = len(self._supports) * self._max_diffusion_step + 1
107 | x = torch.reshape(
108 | x, shape=[num_matrices, self._num_nodes, input_size, batch_size]
109 | )
110 | x = x.permute(3, 1, 2, 0)
111 | x = torch.reshape(
112 | x, shape=[batch_size * self._num_nodes, input_size * num_matrices]
113 | )
114 |
115 | if reset:
116 | weights, biases = self._ru_weights, self._ru_biases
117 | else:
118 | weights, biases = self._gconv_weights, self._gconv_biases
119 |
120 | x = torch.matmul(x, weights) + biases
121 |
122 | return torch.reshape(x, [batch_size, self._num_nodes * output_size])
123 |
124 |
125 | class DCGRU(nn.Module):
126 | def __init__(
127 | self,
128 | num_layers,
129 | num_units,
130 | adj_mx,
131 | max_diffusion_step,
132 | num_nodes,
133 | input_dim,
134 | activation=torch.tanh,
135 | filter_type="laplacian",
136 | use_gc_for_ru=True,
137 | ):
138 | super().__init__()
139 | self.layers = nn.ModuleList(
140 | DCGRUCell(
141 | num_units,
142 | adj_mx,
143 | max_diffusion_step,
144 | num_nodes,
145 | input_dim if i == 0 else num_units,
146 | filter_type=filter_type,
147 | use_gc_for_ru=use_gc_for_ru,
148 | )
149 | for i in range(num_layers)
150 | )
151 |
152 | def forward(self, inputs, hidden_state):
153 | hidden_states = []
154 | output = inputs
155 |
156 | for i, layer in enumerate(self.layers):
157 | output = layer(output, hidden_state[i])
158 | hidden_states.append(output)
159 |
160 | return output, torch.stack(hidden_states)
161 |
--------------------------------------------------------------------------------
/torchts/nn/loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 |
4 | def masked_mae_loss(y_pred, y_true):
5 | """Calculate masked mean absolute error loss
6 |
7 | Args:
8 | y_pred (torch.Tensor): Predicted values
9 | y_true (torch.Tensor): True values
10 |
11 | Returns:
12 | torch.Tensor: Loss
13 | """
14 | mask = (y_true != 0).float()
15 | mask /= mask.mean()
16 |
17 | loss = torch.abs(y_pred - y_true)
18 | loss = loss * mask
19 | loss[torch.isnan(loss)] = 0
20 |
21 | return loss.mean()
22 |
23 |
24 | def mis_loss(
25 | y_pred: torch.tensor, y_true: torch.tensor, interval: float
26 | ) -> torch.tensor:
27 | """Calculate MIS loss
28 |
29 | Args:
30 | y_pred (torch.tensor): Predicted values
31 | y_true (torch.tensor): True values
32 | interval (float): confidence interval (e.g. 0.95 for 95% confidence interval)
33 |
34 | Returns:
35 | torch.tensor: output losses
36 | """
37 | alpha = 1 - interval
38 | lower = y_pred[:, 0::2]
39 | upper = y_pred[:, 1::2]
40 |
41 | loss = upper - lower
42 | loss = torch.max(loss, loss + (2 / alpha) * (lower - y_true))
43 | loss = torch.max(loss, loss + (2 / alpha) * (y_true - upper))
44 | loss = torch.mean(loss)
45 |
46 | return loss
47 |
48 |
49 | def quantile_loss(y_pred: torch.tensor, y_true: torch.tensor, quantile: float) -> float:
50 | """Calculate quantile loss
51 |
52 | Args:
53 | y_pred (torch.tensor): Predicted values
54 | y_true (torch.tensor): True values
55 | quantile (float): quantile (e.g. 0.5 for median)
56 |
57 | Returns:
58 | float: output losses
59 | """
60 | errors = y_true - y_pred
61 | loss = torch.max((quantile - 1) * errors, quantile * errors)
62 | loss = torch.mean(loss)
63 | return loss
64 |
--------------------------------------------------------------------------------
/torchts/nn/model.py:
--------------------------------------------------------------------------------
1 | from abc import abstractmethod
2 | from functools import partial
3 |
4 | import torch.nn.functional as F
5 | from pytorch_lightning import LightningModule, Trainer
6 | from torch.utils.data import DataLoader, TensorDataset
7 |
8 |
9 | class TimeSeriesModel(LightningModule):
10 | """Base class for all TorchTS models.
11 |
12 | Args:
13 | optimizer (torch.optim.Optimizer): Optimizer
14 | opimizer_args (dict): Arguments for the optimizer
15 | criterion: Loss function
16 | criterion_args (dict): Arguments for the loss function
17 | scheduler (torch.optim.lr_scheduler): Learning rate scheduler
18 | scheduler_args (dict): Arguments for the scheduler
19 | scaler (torchts.utils.scaler.Scaler): Scaler
20 | """
21 |
22 | def __init__(
23 | self,
24 | optimizer,
25 | optimizer_args=None,
26 | criterion=F.mse_loss,
27 | criterion_args=None,
28 | scheduler=None,
29 | scheduler_args=None,
30 | scaler=None,
31 | ):
32 | super().__init__()
33 | self.criterion = criterion
34 | self.criterion_args = criterion_args
35 | self.scaler = scaler
36 |
37 | if optimizer_args is not None:
38 | self.optimizer = partial(optimizer, **optimizer_args)
39 | else:
40 | self.optimizer = optimizer
41 |
42 | if scheduler is not None and scheduler_args is not None:
43 | self.scheduler = partial(scheduler, **scheduler_args)
44 | else:
45 | self.scheduler = scheduler
46 |
47 | def fit(self, x, y, max_epochs=10, batch_size=128):
48 | """Fits model to the given data.
49 |
50 | Args:
51 | x (torch.Tensor): Input data
52 | y (torch.Tensor): Output data
53 | max_epochs (int): Number of training epochs
54 | batch_size (int): Batch size for torch.utils.data.DataLoader
55 | """
56 | dataset = TensorDataset(x, y)
57 | loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
58 | trainer = Trainer(max_epochs=max_epochs)
59 | trainer.fit(self, loader)
60 |
61 | def prepare_batch(self, batch):
62 | return batch
63 |
64 | def _step(self, batch, batch_idx, num_batches):
65 | """
66 |
67 | Args:
68 | batch: Output of the torch.utils.data.DataLoader
69 | batch_idx: Integer displaying index of this batch
70 | dataset: Data set to use
71 |
72 | Returns: loss for the batch
73 | """
74 | x, y = self.prepare_batch(batch)
75 |
76 | if self.training:
77 | batches_seen = batch_idx + self.current_epoch * num_batches
78 | else:
79 | batches_seen = batch_idx
80 |
81 | pred = self(x, y, batches_seen)
82 |
83 | if self.scaler is not None:
84 | y = self.scaler.inverse_transform(y)
85 | pred = self.scaler.inverse_transform(pred)
86 |
87 | if self.criterion_args is not None:
88 | loss = self.criterion(pred, y, **self.criterion_args)
89 | else:
90 | loss = self.criterion(pred, y)
91 |
92 | return loss
93 |
94 | def training_step(self, batch, batch_idx):
95 | """Trains model for one step.
96 |
97 | Args:
98 | batch (torch.Tensor): Output of the torch.utils.data.DataLoader
99 | batch_idx (int): Integer displaying index of this batch
100 | """
101 | train_loss = self._step(batch, batch_idx, len(self.trainer.train_dataloader))
102 | self.log(
103 | "train_loss",
104 | train_loss,
105 | on_step=True,
106 | on_epoch=True,
107 | prog_bar=True,
108 | logger=True,
109 | )
110 | return train_loss
111 |
112 | def validation_step(self, batch, batch_idx):
113 | """Validates model for one step.
114 |
115 | Args:
116 | batch (torch.Tensor): Output of the torch.utils.data.DataLoader
117 | batch_idx (int): Integer displaying index of this batch
118 | """
119 | val_loss = self._step(batch, batch_idx, len(self.trainer.val_dataloader))
120 | self.log("val_loss", val_loss)
121 | return val_loss
122 |
123 | def test_step(self, batch, batch_idx):
124 | """Tests model for one step.
125 |
126 | Args:
127 | batch (torch.Tensor): Output of the torch.utils.data.DataLoader
128 | batch_idx (int): Integer displaying index of this batch
129 | """
130 | test_loss = self._step(batch, batch_idx, len(self.trainer.test_dataloader))
131 | self.log("test_loss", test_loss)
132 | return test_loss
133 |
134 | @abstractmethod
135 | def forward(self, x, y=None, batches_seen=None):
136 | """Forward pass.
137 |
138 | Args:
139 | x (torch.Tensor): Input data
140 |
141 | Returns:
142 | torch.Tensor: Predicted data
143 | """
144 |
145 | def predict(self, x):
146 | """Runs model inference.
147 |
148 | Args:
149 | x (torch.Tensor): Input data
150 |
151 | Returns:
152 | torch.Tensor: Predicted data
153 | """
154 | return self(x).detach()
155 |
156 | def configure_optimizers(self):
157 | """Configure optimizer.
158 |
159 | Returns:
160 | torch.optim.Optimizer: Optimizer
161 | """
162 | optimizer = self.optimizer(self.parameters())
163 |
164 | if self.scheduler is not None:
165 | scheduler = self.scheduler(optimizer)
166 | return [optimizer], [scheduler]
167 |
168 | return optimizer
169 |
--------------------------------------------------------------------------------
/torchts/nn/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/torchts/nn/models/__init__.py
--------------------------------------------------------------------------------
/torchts/nn/models/dcrnn.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | from torch import nn
4 |
5 | from torchts.nn.graph import DCGRU
6 | from torchts.nn.model import TimeSeriesModel
7 |
8 |
9 | class Encoder(nn.Module):
10 | def __init__(self, input_dim, seq_len, **kwargs):
11 | super().__init__()
12 | self.input_dim = input_dim
13 | self.seq_len = seq_len
14 | self.dcgru = DCGRU(input_dim=self.input_dim, **kwargs)
15 |
16 | def forward(self, inputs, hidden_state):
17 | output, hidden = self.dcgru(inputs, hidden_state)
18 | return output, hidden
19 |
20 |
21 | class Decoder(nn.Module):
22 | def __init__(self, output_dim, horizon, **kwargs):
23 | super().__init__()
24 | self.output_dim = output_dim
25 | self.horizon = horizon
26 | self.num_nodes = kwargs["num_nodes"]
27 | self.num_units = kwargs["num_units"]
28 | self.dcgru = DCGRU(input_dim=self.output_dim, **kwargs)
29 | self.projection_layer = nn.Linear(self.num_units, self.output_dim)
30 |
31 | def forward(self, inputs, hidden_state):
32 | output, hidden = self.dcgru(inputs, hidden_state)
33 | projected = self.projection_layer(output.view(-1, self.num_units))
34 | output = projected.view(-1, self.num_nodes * self.output_dim)
35 | return output, hidden
36 |
37 |
38 | class DCRNN(TimeSeriesModel):
39 | def __init__(
40 | self,
41 | adj_mx,
42 | num_units,
43 | seq_len=1,
44 | horizon=1,
45 | input_dim=1,
46 | output_dim=1,
47 | max_diffusion_step=2,
48 | filter_type="laplacian",
49 | num_nodes=1,
50 | num_layers=1,
51 | use_gc_for_ru=True,
52 | use_curriculum_learning=False,
53 | cl_decay_steps=1000,
54 | **kwargs,
55 | ):
56 | super().__init__(**kwargs)
57 |
58 | dcgru_args = {
59 | "adj_mx": adj_mx,
60 | "num_nodes": num_nodes,
61 | "num_layers": num_layers,
62 | "num_units": num_units,
63 | "max_diffusion_step": max_diffusion_step,
64 | "filter_type": filter_type,
65 | "use_gc_for_ru": use_gc_for_ru,
66 | }
67 |
68 | self.encoder_model = Encoder(input_dim, seq_len, **dcgru_args)
69 | self.decoder_model = Decoder(output_dim, horizon, **dcgru_args)
70 |
71 | self.num_nodes = num_nodes
72 | self.num_layers = num_layers
73 | self.hidden_state_size = num_nodes * num_units
74 | self.use_curriculum_learning = use_curriculum_learning
75 | self.cl_decay_steps = cl_decay_steps
76 |
77 | def _compute_sampling_threshold(self, batches_seen):
78 | return self.cl_decay_steps / (
79 | self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps)
80 | )
81 |
82 | def encoder(self, inputs):
83 | batch_size = inputs.size(1)
84 | shape = self.num_layers, batch_size, self.hidden_state_size
85 | encoder_hidden_state = torch.zeros(shape, device=self.device)
86 |
87 | for t in range(self.encoder_model.seq_len):
88 | _, encoder_hidden_state = self.encoder_model(
89 | inputs[t], encoder_hidden_state
90 | )
91 |
92 | return encoder_hidden_state
93 |
94 | def decoder(self, encoder_hidden_state, labels=None, batches_seen=None):
95 | batch_size = encoder_hidden_state.size(1)
96 | shape = batch_size, self.num_nodes * self.decoder_model.output_dim
97 | go_symbol = torch.zeros(shape, device=self.device)
98 | decoder_hidden_state = encoder_hidden_state
99 | decoder_input = go_symbol
100 | outputs = []
101 |
102 | for t in range(self.decoder_model.horizon):
103 | decoder_output, decoder_hidden_state = self.decoder_model(
104 | decoder_input, decoder_hidden_state
105 | )
106 | decoder_input = decoder_output
107 | outputs.append(decoder_output)
108 |
109 | if self.training and self.use_curriculum_learning:
110 | c = np.random.uniform(0, 1)
111 |
112 | if c < self._compute_sampling_threshold(batches_seen):
113 | decoder_input = labels[t]
114 |
115 | outputs = torch.stack(outputs)
116 | return outputs
117 |
118 | def forward(self, inputs, labels=None, batches_seen=None):
119 | encoder_hidden_state = self.encoder(inputs)
120 | outputs = self.decoder(encoder_hidden_state, labels, batches_seen=batches_seen)
121 |
122 | return outputs
123 |
124 | def prepare_batch(self, batch):
125 | x, y = batch
126 | x = x.permute(1, 0, 2, 3)
127 | y = y.permute(1, 0, 2, 3)
128 |
129 | batch_size = x.size(1)
130 | x = x.view(
131 | self.encoder_model.seq_len,
132 | batch_size,
133 | self.num_nodes * self.encoder_model.input_dim,
134 | )
135 | y = y[..., : self.decoder_model.output_dim].view(
136 | self.decoder_model.horizon,
137 | batch_size,
138 | self.num_nodes * self.decoder_model.output_dim,
139 | )
140 |
141 | return x, y
142 |
--------------------------------------------------------------------------------
/torchts/nn/models/ode.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | from torchts.nn.model import TimeSeriesModel
5 |
6 |
7 | class ODESolver(TimeSeriesModel):
8 | def __init__(
9 | self, ode, init_vars, init_coeffs, dt, solver="euler", outvar=None, **kwargs
10 | ):
11 | """TimeSeriesModel for modeling ordinary differential equations.
12 |
13 | Args:
14 | ode (dict): ODE in dictionary form
15 | init_vars (dict): Initial values for each variable
16 | init_coeffs (dict): Initial values for each parameter
17 | dt (float): Time step
18 | solver (str): Numerical method for solving the ODE ("euler"/"rk4")
19 | outvar (list): Observed variables
20 | kwargs (Any): Additional arguments passed to TimeSeriesModel
21 | """
22 | super().__init__(**kwargs)
23 |
24 | if ode.keys() != init_vars.keys():
25 | raise ValueError("Inconsistent keys in ode and init_vars")
26 |
27 | if solver == "euler":
28 | self.step_solver = self._euler_step
29 | elif solver == "rk4":
30 | self.step_solver = self._runge_kutta_4_step
31 | else:
32 | raise ValueError(f"Unrecognized solver {solver}")
33 |
34 | for name, value in init_coeffs.items():
35 | self.register_parameter(name, nn.Parameter(torch.tensor(value)))
36 |
37 | self.ode = ode
38 | self.var_names = ode.keys()
39 | self.init_vars = {
40 | name: torch.tensor(value, device=self.device)
41 | for name, value in init_vars.items()
42 | }
43 | self.coeffs = dict(self.named_parameters())
44 | self.outvar = self.var_names if outvar is None else outvar
45 |
46 | # determines method of training
47 | self.observed = set(self.outvar) == set(self.var_names)
48 |
49 | self.dt = dt
50 |
51 | def _euler_step(self, prev_val):
52 | """Computes a single step of the ODE using Euler's method.
53 |
54 | Args:
55 | prev_val (dict): Previous values for each variable
56 |
57 | Returns:
58 | dict: Euler's method step prediction
59 | """
60 | pred = {name: value.unsqueeze(0) for name, value in self.init_vars.items()}
61 |
62 | for var in self.var_names:
63 | pred[var] = prev_val[var] + self.ode[var](prev_val, self.coeffs) * self.dt
64 |
65 | return pred
66 |
67 | def _runge_kutta_4_step(self, prev_val):
68 | """Computes a single step of the ODE using a 4th order Runge-Kutta method.
69 |
70 | Args:
71 | prev_val (dict): Previous values for each variable
72 |
73 | Returns:
74 | dict: 4th order Runge-Kutta method step prediction
75 | """
76 | pred = {name: value.unsqueeze(0) for name, value in self.init_vars.items()}
77 |
78 | k_1 = prev_val
79 | k_2 = {}
80 | k_3 = {}
81 | k_4 = {}
82 |
83 | for var in self.var_names:
84 | k_2[var] = prev_val[var] + self.ode[var](k_1, self.coeffs) * 0.5 * self.dt
85 |
86 | for var in self.var_names:
87 | k_3[var] = prev_val[var] + self.ode[var](k_2, self.coeffs) * 0.5 * self.dt
88 |
89 | for var in self.var_names:
90 | k_4[var] = prev_val[var] + self.ode[var](k_3, self.coeffs) * self.dt
91 |
92 | for var in self.var_names:
93 | result = self.ode[var](k_1, self.coeffs) / 6
94 | result += self.ode[var](k_2, self.coeffs) / 3
95 | result += self.ode[var](k_3, self.coeffs) / 3
96 | result += self.ode[var](k_4, self.coeffs) / 6
97 | pred[var] = prev_val[var] + result * self.dt
98 |
99 | return pred
100 |
101 | def solver(self, nt, initial=None):
102 | """Numerical simulation of the ODE using the selected solver method.
103 |
104 | Args:
105 | nt (int): Number of time-steps
106 | initial (dict): Initial values for each variable
107 |
108 | Returns:
109 | dict: Prediction of each variable after nt time steps
110 | """
111 | if initial is None:
112 | initial = self.init_vars
113 |
114 | pred = {name: value.unsqueeze(0) for name, value in initial.items()}
115 |
116 | for n in range(nt - 1):
117 | # create dictionary containing values from previous time step
118 | prev_val = {var: pred[var][[n]] for var in self.var_names}
119 | new_val = self.step_solver(prev_val)
120 |
121 | for var in self.var_names:
122 | pred[var] = torch.cat([pred[var], new_val[var]])
123 |
124 | # reformat output to contain desired (observed) variables
125 | return torch.stack([pred[var] for var in self.outvar], dim=1)
126 |
127 | def fit(self, x, y, max_epochs=10, batch_size=128):
128 | """Fits the model to the given data.
129 |
130 | Args:
131 | x (torch.Tensor): Input data
132 | y (torch.Tensor): Output data
133 | max_epochs (int): Number of training epochs
134 | batch_size (int): Batch size for torch.utils.data.DataLoader
135 | Set to x.shape[0] if unobserved variables are present
136 | """
137 | if self.observed:
138 | super().fit(x, y, max_epochs, batch_size)
139 | else:
140 | super().fit(x, y, max_epochs, x.shape[0])
141 |
142 | def forward(self, nt):
143 | return self.solver(nt)
144 |
145 | def get_coeffs(self):
146 | return {name: param.item() for name, param in self.named_parameters()}
147 |
148 | def _step(self, batch, batch_idx, num_batches):
149 | x, y = self.prepare_batch(batch)
150 |
151 | if self.observed:
152 | # retrieve numerical simulation of single time-steps for each datapoint
153 | self.zero_grad()
154 | init_point = {var: x[:, i] for i, var in enumerate(self.outvar)}
155 | pred = self.step_solver(init_point)
156 | predictions = torch.stack([pred[var] for var in self.outvar], dim=1)
157 | else:
158 | # retrieve numerical simulation of the whole dataset
159 | nt = x.shape[0]
160 | predictions = self(nt)
161 |
162 | loss = self.criterion(predictions, y)
163 | return loss
164 |
165 | def backward(self, loss, optimizer, optimizer_idx):
166 | # use retain_graph=True to mitigate RuntimeError
167 | loss.backward(retain_graph=True)
168 |
--------------------------------------------------------------------------------
/torchts/nn/models/seq2seq.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 |
4 | from torchts.nn.model import TimeSeriesModel
5 |
6 |
7 | class Encoder(nn.Module):
8 | def __init__(self, input_dim, hidden_dim, num_layers, dropout_rate):
9 | """
10 | Args:
11 | input_dim: the dimension of input sequences.
12 | hidden_dim: number hidden units.
13 | num_layers: number of encode layers.
14 | dropout_rate: recurrent dropout rate.
15 | """
16 | super().__init__()
17 | self.num_layers = num_layers
18 | self.hidden_dim = hidden_dim
19 | self.lstm = nn.LSTM(
20 | input_dim,
21 | hidden_dim,
22 | num_layers=num_layers,
23 | bidirectional=True,
24 | dropout=dropout_rate,
25 | batch_first=True,
26 | )
27 |
28 | def forward(self, source):
29 | """
30 | Args:
31 | source: input tensor(batch_size*input dimension)
32 | Return:
33 | outputs: Prediction
34 | concat_hidden: hidden states
35 | """
36 | outputs, hidden = self.lstm(source)
37 | return outputs, hidden
38 |
39 |
40 | class Decoder(nn.Module):
41 | def __init__(self, output_dim, hidden_dim, num_layers, dropout_rate):
42 | """
43 | Args:
44 | output_dim: the dimension of output sequences.
45 | hidden_dim: number hidden units.
46 | num_layers: number of code layers.
47 | dropout_rate: recurrent dropout rate.
48 | """
49 | super().__init__()
50 |
51 | # Since the encoder is bidirectional, decoder has double hidden size
52 | self.lstm = nn.LSTM(
53 | output_dim,
54 | hidden_dim * 2,
55 | num_layers=num_layers,
56 | dropout=dropout_rate,
57 | batch_first=True,
58 | )
59 |
60 | self.out = nn.Linear(hidden_dim * 2, output_dim)
61 |
62 | def forward(self, x, hidden):
63 | """
64 | Args:
65 | x: prediction from previous prediction.
66 | hidden: hidden states from previous cell.
67 | Returns:
68 | 1. prediction for current step.
69 | 2. hidden state pass to next cell.
70 | """
71 | output, hidden = self.lstm(x, hidden)
72 | prediction = self.out(output)
73 | return prediction, hidden
74 |
75 |
76 | class Seq2Seq(TimeSeriesModel):
77 | def __init__(self, encoder, decoder, output_dim, horizon, **kwargs):
78 | """
79 | Args:
80 | encoder: Encoder object.
81 | decoder: Decoder object.
82 | """
83 | super().__init__(**kwargs)
84 | self.encoder = encoder
85 | self.decoder = decoder
86 | self.output_dim = output_dim
87 | self.horizon = horizon
88 |
89 | def forward(self, source, target=None, batches_seen=None):
90 | """
91 | Args:
92 | source: input tensor.
93 | Returns:
94 | total prediction
95 | """
96 | encoder_output, encoder_hidden = self.encoder(source)
97 |
98 | # Concatenate the hidden states of both directions.
99 | h = torch.cat(
100 | [
101 | encoder_hidden[0][0 : self.encoder.num_layers, :, :],
102 | encoder_hidden[0][-self.encoder.num_layers :, :, :],
103 | ],
104 | dim=2,
105 | out=None,
106 | )
107 |
108 | c = torch.cat(
109 | [
110 | encoder_hidden[1][0 : self.encoder.num_layers, :, :],
111 | encoder_hidden[1][-self.encoder.num_layers :, :, :],
112 | ],
113 | dim=2,
114 | out=None,
115 | )
116 |
117 | batch_size = source.size(0)
118 | shape = (batch_size, 1, self.output_dim)
119 | decoder_output = torch.zeros(shape, device=self.device)
120 | decoder_hidden = (h, c)
121 | outputs = []
122 |
123 | for _ in range(self.horizon):
124 | decoder_output, decoder_hidden = self.decoder(
125 | decoder_output, decoder_hidden
126 | )
127 | outputs.append(decoder_output)
128 |
129 | return torch.cat(outputs, dim=1)
130 |
--------------------------------------------------------------------------------
/torchts/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/torchts/utils/__init__.py
--------------------------------------------------------------------------------
/torchts/utils/data.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pickle
3 | from collections.abc import Iterable
4 |
5 | import numpy as np
6 | import torch
7 | from torch.utils.data import DataLoader, TensorDataset
8 |
9 |
10 | class PaddedDataset(TensorDataset):
11 | def __init__(self, batch_size, *data, pad_with_last_sample=True):
12 | data_pad = [None] * len(data)
13 |
14 | if pad_with_last_sample:
15 | num_padding = (batch_size - (len(data[0]) % batch_size)) % batch_size
16 |
17 | for i in range(len(data)):
18 | padding = np.repeat(data[i][-1:], num_padding, axis=0)
19 | data_pad[i] = np.concatenate([data[i], padding], axis=0)
20 |
21 | super().__init__(*(torch.from_numpy(d).float() for d in data_pad))
22 |
23 |
24 | class StandardScaler:
25 | def __init__(self, data):
26 | self.mean = data.mean()
27 | self.std = data.std()
28 |
29 | def transform(self, data):
30 | return (data - self.mean) / self.std
31 |
32 | def inverse_transform(self, data):
33 | return (data * self.std) + self.mean
34 |
35 |
36 | def concat(a, b):
37 | return torch.cat([a, b.unsqueeze(0)], dim=0)
38 |
39 |
40 | def generate_ode_dataset(x):
41 | """Generates dataset for ODESolver when training with zero unobserved variables.
42 |
43 | Args:
44 | x (torch.Tensor): Original time series data
45 |
46 | Returns:
47 | torch.Tensor: Time series data from time step 0 to n-1
48 | torch.Tensor: Time series data from time step 1 to n
49 | """
50 | n = x.shape[0]
51 | return x[: n - 1], x[1:]
52 |
53 |
54 | def load_dataset(dataset_dir, batch_size, val_batch_size=None, test_batch_size=None):
55 | if val_batch_size is None:
56 | val_batch_size = batch_size
57 |
58 | if test_batch_size is None:
59 | test_batch_size = batch_size
60 |
61 | data = {}
62 |
63 | for category in ["train", "val", "test"]:
64 | cat_data = np.load(os.path.join(dataset_dir, category + ".npz"))
65 | data["x_" + category] = cat_data["x"]
66 | data["y_" + category] = cat_data["y"]
67 |
68 | scaler = StandardScaler(data["x_train"][..., 0])
69 |
70 | for category in ["train", "val", "test"]:
71 | data["x_" + category][..., 0] = scaler.transform(data["x_" + category][..., 0])
72 | data["y_" + category][..., 0] = scaler.transform(data["y_" + category][..., 0])
73 |
74 | data_train = PaddedDataset(batch_size, data["x_train"], data["y_train"])
75 | data["train_loader"] = DataLoader(data_train, batch_size, shuffle=True)
76 |
77 | data_val = PaddedDataset(val_batch_size, data["x_val"], data["y_val"])
78 | data["val_loader"] = DataLoader(data_val, val_batch_size, shuffle=False)
79 |
80 | data_test = PaddedDataset(test_batch_size, data["x_test"], data["y_test"])
81 | data["test_loader"] = DataLoader(data_test, test_batch_size, shuffle=False)
82 |
83 | data["scaler"] = scaler
84 |
85 | return data
86 |
87 |
88 | def load_graph_data(pkl_filename):
89 | sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
90 | return sensor_ids, sensor_id_to_ind, adj_mx
91 |
92 |
93 | def load_pickle(pickle_file):
94 | try:
95 | with open(pickle_file, "rb") as f:
96 | pickle_data = pickle.load(f)
97 | except UnicodeDecodeError:
98 | with open(pickle_file, "rb") as f:
99 | pickle_data = pickle.load(f, encoding="latin1")
100 | except Exception as e:
101 | print(f"Unable to load data {pickle_file} : {e}")
102 | raise e
103 |
104 | return pickle_data
105 |
106 |
107 | def sliding_window(tensor, lags, horizon=1, dim=0, step=1):
108 | is_int = isinstance(lags, int)
109 | is_iter = isinstance(lags, Iterable) and all(isinstance(lag, int) for lag in lags)
110 |
111 | if not is_int and not is_iter:
112 | raise TypeError("lags must be of type int or Iterable[int]")
113 |
114 | if (is_int and lags < 1) or (is_iter and any(lag < 1 for lag in lags)):
115 | raise ValueError(f"lags must be positive but found {lags}")
116 |
117 | if is_int:
118 | data = tensor.unfold(dim, lags + horizon, step)
119 | x, y = data[:, :lags], data[:, -1]
120 | else:
121 | data = tensor.unfold(dim, max(lags) + horizon, step)
122 | x, y = data[:, [lag - 1 for lag in lags]], data[:, -1]
123 |
124 | return x, y
125 |
--------------------------------------------------------------------------------
/torchts/utils/graph.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.sparse as sp
3 | import torch
4 | from scipy.sparse import linalg
5 |
6 |
7 | def normalized_laplacian(adj):
8 | adj = sp.coo_matrix(adj)
9 | d = np.array(adj.sum(1))
10 | d_inv_sqrt = np.power(d, -0.5).flatten()
11 | d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
12 | d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
13 | I = sp.eye(adj.shape[0])
14 | L = I - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
15 | return L
16 |
17 |
18 | def random_walk(adj_mx):
19 | adj_mx = sp.coo_matrix(adj_mx)
20 | d = np.array(adj_mx.sum(1))
21 | d_inv = np.power(d, -1).flatten()
22 | d_inv[np.isinf(d_inv)] = 0.0
23 | d_mat_inv = sp.diags(d_inv)
24 | random_walk_mx = d_mat_inv.dot(adj_mx).tocoo()
25 | return random_walk_mx
26 |
27 |
28 | def reverse_random_walk(adj_mx):
29 | return random_walk(np.transpose(adj_mx))
30 |
31 |
32 | def scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
33 | if sp.issparse(adj_mx):
34 | adj_mx = adj_mx.todense()
35 |
36 | if undirected:
37 | adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
38 |
39 | L = normalized_laplacian(adj_mx)
40 |
41 | if lambda_max is None:
42 | lambda_max, _ = linalg.eigsh(L, 1, which="LM")
43 | lambda_max = lambda_max[0]
44 |
45 | L = sp.csr_matrix(L)
46 | M, _ = L.shape
47 | I = sp.identity(M, format="csr", dtype=L.dtype)
48 | L = (2 / lambda_max * L) - I
49 | return L
50 |
51 |
52 | def sparse_matrix(L):
53 | L = L.tocoo()
54 | indices = np.column_stack((L.row, L.col))
55 | # this is to ensure row-major ordering to equal torch.sparse.sparse_reorder(L)
56 | indices = indices[np.lexsort((indices[:, 0], indices[:, 1]))]
57 | L = torch.sparse_coo_tensor(indices.T, L.data, L.shape)
58 | return L
59 |
--------------------------------------------------------------------------------
/website/.gitignore:
--------------------------------------------------------------------------------
1 | # Dependencies
2 | /node_modules
3 | yarn.lock
4 |
5 | # Production
6 | /build
7 | /static/api
8 |
9 | # Generated files
10 | .docusaurus
11 | .cache-loader
12 |
13 | # Misc
14 | .DS_Store
15 | .env.local
16 | .env.development.local
17 | .env.test.local
18 | .env.production.local
19 |
20 | npm-debug.log*
21 | yarn-debug.log*
22 | yarn-error.log*
23 |
--------------------------------------------------------------------------------
/website/README.md:
--------------------------------------------------------------------------------
1 | # Website
2 |
3 | This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
4 |
5 | ## Installation
6 |
7 | ```console
8 | yarn install
9 | ```
10 |
11 | ## Local Development
12 |
13 | ```console
14 | yarn start
15 | ```
16 |
17 | This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
18 |
19 | ## Build
20 |
21 | ```console
22 | yarn build
23 | ```
24 |
25 | This command generates static content into the `build` directory and can be served using any static contents hosting service.
26 |
27 | ## Deployment
28 |
29 | ```console
30 | GIT_USER= USE_SSH=true yarn deploy
31 | ```
32 |
33 | If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
34 |
--------------------------------------------------------------------------------
/website/babel.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
3 | }
4 |
--------------------------------------------------------------------------------
/website/docs/deep-sequence-to-sequence-models/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Deep Sequence to Sequence Models",
3 | "position": 3
4 | }
5 |
--------------------------------------------------------------------------------
/website/docs/deep-sequence-to-sequence-models/seq2seq-gru.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Seq2Seq with GRU
3 | slug: /seq2seq-gru
4 | ---
5 |
6 | The [sequence to sequence model](https://proceedings.neurips.cc/paper/2014/file/a14ac55a4f27472c5d894ec1c3c743d2-Paper.pdf) originates from language translation. Our implementation adapts the model for multi-step time series forecasting. Specifically, given the input series $x_1, \ldots, x_{t}$, the model maps the input series to the output series:
7 |
8 | $$
9 | x_{t-p}, x_{t-p+1}, \ldots, x_{t-1} \longrightarrow x_t, x_{t+1}, \ldots, x_{t+h-1}
10 | $$
11 |
12 | where $p$ is the input history length and $h$ is the forecasting horizon.
13 |
14 | Sequence to sequence (Seq2Seq) models consist of an encoder and a decoder. The final state of the encoder is fed as the initial state of the decoder. We can use various models for both the encoder and decoder. This function implements a Gated Recurrent Unit (GRU).
15 |
--------------------------------------------------------------------------------
/website/docs/deep-sequence-to-sequence-models/seq2seq-lstm.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Seq2Seq with LSTM
3 | slug: /seq2seq-lstm
4 | ---
5 |
6 | The [sequence to sequence model](https://proceedings.neurips.cc/paper/2014/file/a14ac55a4f27472c5d894ec1c3c743d2-Paper.pdf) originates from language translation. Our implementation adapts the model for multi-step time series forecasting. Specifically, given the input series $x_1, \ldots, x_{t}$, the model maps the input series to the output series:
7 |
8 | $$
9 | x_{t-p}, x_{t-p+1}, \ldots, x_{t-1} \longrightarrow x_t, x_{t+1}, \ldots, x_{t+h-1}
10 | $$
11 |
12 | where $p$ is the input history length and $h$ is the forecasting horizon.
13 |
14 | Sequence to sequence (Seq2Seq) models consist of an encoder and a decoder. The final state of the encoder is fed as the initial state of the decoder. We can use various models tor both the encoder and decoder. This function implements a Long Short Term Memory (LSTM).
15 |
--------------------------------------------------------------------------------
/website/docs/deep-spatiotemporal-models/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Deep Spatiotemporal Models",
3 | "position": 4
4 | }
5 |
--------------------------------------------------------------------------------
/website/docs/deep-spatiotemporal-models/convolutional-lstm.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Convolutional LSTM
3 | slug: /convolutional-lstm
4 | ---
5 |
6 | In spatiotemporal forecasting, assume we have multiple time series generated from a fixed space $x(s,t)$. [Convolutional LSTM](https://papers.nips.cc/paper/2015/file/07563a3fe3bbe7e3ba84431ad9d055af-Paper.pdf) models the time series on a regular grid, similar to a video.
7 |
8 | Convolutional LSTM replaces the matrix multiplication in a regular LSTM with convolution. It determines the future state of a certain cell in the grid by the inputs and past states of its local neighbors:
9 |
10 | $$
11 | \begin{bmatrix} i_t \\ f_t \\ o_t \end{bmatrix} = \sigma\big(W^{x} \star x_t + W^h \star h_{t-1} + W^c \circ c_{t-1} + b\big)
12 | $$
13 |
--------------------------------------------------------------------------------
/website/docs/deep-spatiotemporal-models/diffusion-convolutional-lstm.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Diffusion Convolutional LSTM
3 | slug: /diffusion-convolutional-lstm
4 | ---
5 |
6 | In spatiotemporal forecasting, assume we have multiple time series generated from a fixed space $x(s,t)$. [Diffusion Convolutional LSTM](https://openreview.net/pdf?id=SJiHXGWAZ) models the time series on an irregular grid (graph) as a diffusion process.
7 |
8 | Diffusion Convolutional LSTM replaces the matrix multiplication in a regular LSTM with diffusion convolution. It determines the future state of a certain cell in the graph by the inputs and past states of its local neighbors:
9 |
10 | $$
11 | \begin{bmatrix} i_t \\ f_t \\ o_t \end{bmatrix} = \sigma\big(W^{x} \star_g x_t + W^h \star_g h_{t-1} + W^c \circ c_{t-1} + b\big)
12 | $$
13 |
14 | where $W \star_g x = \sum_{i=1}^k \big(D^{-1}A\big)^i \cdot W \cdot x$ is the diffusion convolution.
15 |
--------------------------------------------------------------------------------
/website/docs/intro.md:
--------------------------------------------------------------------------------
1 | ---
2 | sidebar_position: 1
3 | title: Introduction
4 | slug: /
5 | ---
6 |
7 | Time series is one of the fastest growing and richest types of data. In a variety of domains including dynamical systems, healthcare, climate science and economics, there have been increasing amounts of complex dynamic data due to a shift away from parsimonious, infrequent measurements to nearly continuous real-time monitoring and recording. This burgeoning amount of new data calls for novel theoretical and algorithmic tools and insights.
8 |
9 | [TorchTS](https://rose-stl-lab.github.io/torchTS/) is a deep learning library for time series forecasting built on [Pytorch](https://pytorch.org/). The tools in this library mostly come out the research from [Spatiotemporal Machine Learning Lab](https://github.com/Rose-STL-Lab). The library is designed with minimal dependencies and user-friendly interfaces. A particular emphasis of the library is scalability, which exploits auto-differentiation and various inductive biases in the data.
10 |
--------------------------------------------------------------------------------
/website/docs/linear-time-series-models/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Linear Time Series Models",
3 | "position": 2
4 | }
5 |
--------------------------------------------------------------------------------
/website/docs/linear-time-series-models/autoregressive.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Autoregressive Model (AR)
3 | slug: /autoregressive
4 | ---
5 |
6 | Given a time series $x_1, \ldots, x_t$, a $p$-th order autoregressive model (denoted AR($p$)) is defined as a linear function of the input series $x$:
7 |
8 | $$
9 | x_t = \sum_{i=1}^p a_i x_{t-i} + e_t
10 | $$
11 |
12 | where $\{a_i\}$ are the model coefficients and the series $\{e_t\}$ can represent either a controlled external input or noise. Note that the expression
13 |
14 | $$
15 | \sum_{i=1}^p a_i x_{t-i} = a_1 x_{t-1} + a_2 x_{t-2} + \cdots + a_p x_{t-p}
16 | $$
17 |
18 | describes a convolution filter. We can implement AR($p$) using either a feedforward neural network with a rolling window or a convolutional network on the entire series.
19 |
--------------------------------------------------------------------------------
/website/docs/linear-time-series-models/vector-autoregressive.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Vector Autoregressive Model (VAR)
3 | slug: /vector-autoregressive
4 | ---
5 |
6 | Given $k$ time series $x_1, \ldots, x_t$ with $x_t \in \mathbb{R}^k$, a $p$-th order vector autoregressive model (denoted VAR($p$)) generalizes the univariate AR model. It models the output as linear functions of the input series $x$:
7 |
8 | $$
9 | x_t = \sum_{i=1}^p A_i x_{t-i} + e_t
10 | $$
11 |
12 | where $A_i \in \mathbb{R}^{k{\times}k}$ is a $k{\times}k$ matrix. The series $\{e_t\}$ can represent either a controlled external input or noise.
13 |
--------------------------------------------------------------------------------
/website/docs/physics-guided-deep-sequence-models/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Physics-Guided Deep Sequence Models",
3 | "position": 5
4 | }
5 |
--------------------------------------------------------------------------------
/website/docs/physics-guided-deep-sequence-models/autoode.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: AutoODE
3 | slug: /autoode
4 | ---
5 |
6 | Assume the time series $x_t \in \mathbb{R}^d$ is governed by unknown differential equations:
7 |
8 | $$
9 | \begin{aligned}
10 | &\frac{dx}{dt} = f_\theta(t, x, u) \\
11 | &\frac{du}{dt} = g_\theta(t, x, u) \\
12 | &x(t_0) = x_0 \\
13 | &u(t_0) = u_0
14 | \end{aligned}
15 | $$
16 |
17 | where $u \in \mathbb{R}^p$ are the unobserved variables. [AutoODE](https://arxiv.org/pdf/2011.10616.pdf) uses auto-differentiation to estimate the parameters $\theta$ of the equations.
18 |
--------------------------------------------------------------------------------
/website/docs/physics-guided-deep-sequence-models/hybrid-autoode.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Hybrid AutoODE
3 | slug: /hybrid-autoode
4 | ---
5 |
6 | Assume the time series $x_t \in \mathbb{R}^d$ is governed by unknown differential equations and by other unknown factors that could affect its trajectory. The Hybrid AutoODE uses physics-guided models in conjunction with neural networks to improve the prediction of $x_t$. It is modelled by the following equations:
7 |
8 | $$
9 | \begin{aligned}
10 | &\frac{dx}{dt} = f_\theta(t, x, u, F) \\
11 | &\frac{du}{dt} = g_\theta(t, x, u, F) \\
12 | &x(t_0) = x_0 \\
13 | &u(t_0) = u_0
14 | \end{aligned}
15 | $$
16 |
17 | where $u \in \mathbb{R}^p$ are the unobserved variables and $F$ is a neural network. The Hybrid AutoODE uses auto-differentiation to estimate the parameters $\theta$ of the equations and the neural network.
18 |
--------------------------------------------------------------------------------
/website/docs/uncertainty-quantification-methods/_category_.json:
--------------------------------------------------------------------------------
1 | {
2 | "label": "Uncertainty Quantification Methods",
3 | "position": 6
4 | }
5 |
--------------------------------------------------------------------------------
/website/docs/uncertainty-quantification-methods/mean-interval-score-regression.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: MIS Regression
3 | slug: /mis-regression
4 | ---
5 |
6 | Mean interval score (MIS) regression directly minimizes MIS, a scoring function for predictions of confidence intervals. This is done by using MIS as the loss function for deep neural networks. The formula to calculate MIS is also known as Winkler loss and is expressed as:
7 |
8 | $$
9 | MIS = \frac{1}{h}\sum_{j=1}^{h}\left((u_{t+j} - l_{t+j}) + \frac{2}{\alpha}(l_{t+j} - y_{t+j})\mathbb{1}(y_{t+j} < l_{t+j}) + \frac{2}{\alpha}(y_{t+j} - u_{t+j})\mathbb{1}(y_{t+j} > u_{t+j})\right)
10 | $$
11 |
12 | where $u$ and $l$ are the upper and lower bounds respectively, and $\alpha$ is a fixed confidence level. Here $\alpha$ is equivalent to $1-CI$, where $CI$ is the desired confidence interval. Therefore, $\alpha=0.05$ for a $95\%$ confidence interval. There are 3 parts to this loss function, which summed together equal the total mean interval score.
13 |
14 | 1. Penalize distance between the upper and lower bounds.
15 | 2. Penalize distance between the lower bound and actual value by a ratio of $2/\alpha$ when the actual value is lower than the lower bound.
16 | 3. Penalize distance between the actual value and upper bound by a ratio of $2/\alpha$ when the actual value is higher than the upper bound.
17 |
18 | Since the loss function for this model jointly includes the upper and lower bounds, the result outputs both, unlike with quantile regression.
19 |
--------------------------------------------------------------------------------
/website/docs/uncertainty-quantification-methods/quantile-regression.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Quantile Regression
3 | slug: /quantile-regression
4 | ---
5 |
6 | Quantile regression uses the one-sided quantile loss to predict specific percentiles of the dependent variable. The quantile regression model uses the pinball loss function written as:
7 |
8 | $$
9 | L_{Quantile}\big(y,f(x),\theta,p\big) = min_\theta\{\mathbb{E}_{(x,y)\sim D}[(y - f(x))(p - \mathbb{1}\{y < f(x)\})]\}
10 | $$
11 |
12 | where $p$ is our fixed confidence interval parameterized by $\theta$. When the pinball loss is minimized, the result is the optimal quantile.
13 |
--------------------------------------------------------------------------------
/website/docs/uncertainty-quantification-methods/stochastic-gradient-mcmc.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Stochastic Gradient MCMC (coming soon)
3 | slug: /stochastic-gradient-mcmc
4 | ---
5 |
6 | Stochastic gradient Markov chain Monte Carlo (SG-MCMC) is a Bayesian uncertainty quantification method. This form of gradient descent is useful in calculating quantiles according to subsets of the training data, which are selected based on the posterior distribution over the parameter space. This implementation also follows the stochastic gradient thermostat method (SGNHT), whose purpose is to control gradient noise, which keeps the distribution Gaussian. We generate samples of model parameters $\theta$ as a function of the loss function $L(\theta)$, diffusion coefficients $A$, and learning rate $h$, in addition to auxiliary variables $p \in \mathbb{R}^d$ and $\zeta \in \mathbb{R}^d$. The values of $\theta$, $p$, and $\zeta$ are randomly initialized and updated according to the rule:
7 |
8 | $$
9 | \begin{aligned}
10 | \theta_{k+1} &= \theta_k + p_kh \\
11 | p_{k+1} &= p_k - \triangledown L(\theta)h - \zeta_kp_kh + \mathcal{N}(0,2Ah) \\
12 | \zeta_{k+1} &= \zeta_k + \left(\frac{p^t_kp_k}{d} - 1\right)h
13 | \end{aligned}
14 | $$
15 |
16 | where after the $k$th iteration, $\theta$ follows the distribution of the posterior. We quantify the uncertainty of our prediction by running for multiple $\theta$ with different samples according to the posterior.
17 |
--------------------------------------------------------------------------------
/website/docusaurus.config.js:
--------------------------------------------------------------------------------
1 | const lightCodeTheme = require("prism-react-renderer/themes/github");
2 | const darkCodeTheme = require("prism-react-renderer/themes/dracula");
3 | const math = require("remark-math");
4 | const katex = require("rehype-katex");
5 |
6 | /** @type {import('@docusaurus/types').DocusaurusConfig} */
7 | module.exports = {
8 | title: "TorchTS",
9 | tagline: "Time series forecasting with PyTorch",
10 | url: "https://rose-stl-lab.github.io",
11 | baseUrl: "/torchTS/",
12 | onBrokenLinks: "throw",
13 | onBrokenMarkdownLinks: "warn",
14 | favicon: "img/logo2.png",
15 | scripts: [
16 | "https://buttons.github.io/buttons.js",
17 | "https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.0/clipboard.min.js",
18 | ],
19 | stylesheets: [
20 | "https://fonts.googleapis.com/css?family=IBM+Plex+Mono:500,700|Source+Code+Pro:500,700|Source+Sans+Pro:400,400i,700",
21 | {
22 | href: "https://cdn.jsdelivr.net/npm/katex@0.13.11/dist/katex.min.css",
23 | integrity:
24 | "sha384-Um5gpz1odJg5Z4HAmzPtgZKdTBHZdw8S29IecapCSB31ligYPhHQZMIlWLYQGVoc",
25 | crossorigin: "anonymous",
26 | },
27 | ],
28 |
29 | organizationName: "Rose-STL-Lab", // Usually your GitHub org/user name.
30 | projectName: "torchTS", // Usually your repo name.
31 | themeConfig: {
32 | // colorMode: {
33 | // defaultMode: "light",
34 | // disableSwitch: true,
35 | // },
36 | navbar: {
37 | title: "TorchTS",
38 | logo: {
39 | alt: "My Site Logo",
40 | src: "img/logo2.png",
41 | },
42 | items: [
43 | {
44 | type: "doc",
45 | docId: "intro",
46 | position: "left",
47 | label: "Docs",
48 | },
49 | {
50 | href: "https://rose-stl-lab.github.io/torchTS/api",
51 | label: "API Reference",
52 | position: "left",
53 | },
54 | {
55 | href: "https://github.com/Rose-STL-Lab/torchTS",
56 | label: "GitHub",
57 | position: "right",
58 | },
59 | ],
60 | },
61 | footer: {
62 | links: [
63 | {
64 | title: "Docs",
65 | items: [
66 | {
67 | label: "Getting Started",
68 | to: "docs",
69 | },
70 | // {
71 | // label: 'Tutorials',
72 | // to: '/tutorials',
73 | // },
74 | // {
75 | // label: 'API',
76 | // to: '/api',
77 | // },
78 | ],
79 | },
80 | {
81 | title: "Community",
82 | items: [
83 | {
84 | label: "Slack",
85 | href: "https://github.com/Rose-STL-Lab/torchTS",
86 | },
87 | {
88 | label: "Discord",
89 | href: "https://github.com/Rose-STL-Lab/torchTS",
90 | },
91 | ],
92 | },
93 | {
94 | title: "More",
95 | items: [
96 | {
97 | html: `
98 | Star
105 | `,
106 | },
107 | {
108 | label: "GitHub",
109 | href: "https://github.com/Rose-STL-Lab/torchTS",
110 | },
111 | {
112 | label: "Edit Docs on GitHub",
113 | href: "https://github.com/Rose-STL-Lab/torchTS",
114 | },
115 | ],
116 | },
117 | ],
118 | copyright: `Copyright © ${new Date().getFullYear()} TorchTS Team`,
119 | logo: {
120 | src: "img/octopus-128x128.png",
121 | },
122 | },
123 | prism: {
124 | theme: lightCodeTheme,
125 | darkTheme: darkCodeTheme,
126 | },
127 | fonts: {
128 | fontMain: ["Source Sans Pro", "sans-serif"],
129 | fontCode: ["IBM Plex Mono", "monospace"],
130 | },
131 | },
132 | presets: [
133 | [
134 | "@docusaurus/preset-classic",
135 | {
136 | docs: {
137 | sidebarPath: require.resolve("./sidebars.js"),
138 | remarkPlugins: [math],
139 | showLastUpdateAuthor: true,
140 | showLastUpdateTime: true,
141 | rehypePlugins: [katex],
142 | // Please change this to your repo.
143 | editUrl: "https://github.com/Rose-STL-Lab/torchTS/edit/main/website/",
144 | },
145 | theme: {
146 | customCss: require.resolve("./src/css/custom.css"),
147 | },
148 | },
149 | ],
150 | ],
151 | };
152 |
--------------------------------------------------------------------------------
/website/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "website",
3 | "version": "0.0.0",
4 | "private": true,
5 | "scripts": {
6 | "docusaurus": "docusaurus",
7 | "start": "docusaurus start",
8 | "build": "docusaurus build",
9 | "swizzle": "docusaurus swizzle",
10 | "deploy": "docusaurus deploy",
11 | "clear": "docusaurus clear",
12 | "serve": "docusaurus serve",
13 | "write-translations": "docusaurus write-translations",
14 | "write-heading-ids": "docusaurus write-heading-ids"
15 | },
16 | "dependencies": {
17 | "@docusaurus/core": "2.0.0-beta.3",
18 | "@docusaurus/preset-classic": "2.0.0-beta.3",
19 | "@mdx-js/react": "^1.6.21",
20 | "@svgr/webpack": "^5.5.0",
21 | "classnames": "^2.3.1",
22 | "clsx": "^1.1.1",
23 | "file-loader": "^6.2.0",
24 | "hast-util-is-element": "^1.1.0",
25 | "prism-react-renderer": "^1.2.1",
26 | "react": "^17.0.1",
27 | "react-dom": "^17.0.1",
28 | "rehype-katex": "^4.0.0",
29 | "remark-math": "^3.0.1",
30 | "remarkable": "^2.0.1",
31 | "url-loader": "^4.1.1"
32 | },
33 | "browserslist": {
34 | "production": [
35 | ">0.5%",
36 | "not dead",
37 | "not op_mini all"
38 | ],
39 | "development": [
40 | "last 1 chrome version",
41 | "last 1 firefox version",
42 | "last 1 safari version"
43 | ]
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/website/sidebars.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Creating a sidebar enables you to:
3 | - create an ordered group of docs
4 | - render a sidebar for each doc of that group
5 | - provide next/previous navigation
6 |
7 | The sidebars can be generated from the filesystem, or explicitly defined here.
8 |
9 | Create as many sidebars as you want.
10 | */
11 |
12 | module.exports = {
13 | // By default, Docusaurus generates a sidebar from the docs folder structure
14 | tutorialSidebar: [{ type: 'autogenerated', dirName: '.' }],
15 |
16 | // But you can create a sidebar manually
17 | /*
18 | tutorialSidebar: [
19 | {
20 | type: 'category',
21 | label: 'Tutorial',
22 | items: ['hello'],
23 | },
24 | ],
25 | */
26 | }
27 |
--------------------------------------------------------------------------------
/website/src/components/Container/index.jsx:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2017-present, Facebook, Inc.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | import React from 'react'
9 | import classNames from 'classnames'
10 |
11 | export const Container = props => {
12 | const containerClasses = classNames('block-container', props.className, {
13 | darkBackground: props.background === 'dark',
14 | highlightBackground: props.background === 'highlight',
15 | lightBackground: props.background === 'light',
16 | paddingAll: props.padding.indexOf('all') >= 0,
17 | paddingBottom: props.padding.indexOf('bottom') >= 0,
18 | paddingLeft: props.padding.indexOf('left') >= 0,
19 | paddingRight: props.padding.indexOf('right') >= 0,
20 | paddingTop: props.padding.indexOf('top') >= 0,
21 | })
22 | let wrappedChildren
23 |
24 | if (props.wrapper) {
25 | wrappedChildren = {props.children}
26 | } else {
27 | wrappedChildren = props.children
28 | }
29 | return (
30 |
31 | {wrappedChildren}
32 |
33 | )
34 | }
35 |
36 | Container.defaultProps = {
37 | background: null,
38 | padding: [],
39 | wrapper: true,
40 | }
41 |
--------------------------------------------------------------------------------
/website/src/components/GridBlock/index.jsx:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2017-present, Facebook, Inc.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | import React from 'react'
9 | import classNames from 'classnames'
10 | import {MarkdownBlock} from '../MarkdownBlock'
11 |
12 | const renderBlockImage = (image, imageLink, imageAlt) => {
13 | if (!image) {
14 | return null
15 | }
16 |
17 | return (
18 |
19 | {imageLink ? (
20 |
21 |
22 |
23 | ) : (
24 |
25 | )}
26 |
27 | )
28 | }
29 |
30 | const renderBlockTitle = title => {
31 | if (!title) {
32 | return null
33 | }
34 |
35 | return (
36 |
37 | {title}
38 |
39 | )
40 | }
41 |
42 | export const GridBlock = props => {
43 | const renderBlock = origBlock => {
44 | const blockDefaults = {
45 | imageAlign: 'left',
46 | }
47 |
48 | const block = {
49 | ...blockDefaults,
50 | ...origBlock,
51 | }
52 |
53 | const blockClasses = classNames('blockElement', props.className, {
54 | alignCenter: props.align === 'center',
55 | alignRight: props.align === 'right',
56 | fourByGridBlock: props.layout === 'fourColumn',
57 | imageAlignSide:
58 | block.image &&
59 | (block.imageAlign === 'left' || block.imageAlign === 'right'),
60 | imageAlignTop: block.image && block.imageAlign === 'top',
61 | imageAlignRight: block.image && block.imageAlign === 'right',
62 | imageAlignBottom: block.image && block.imageAlign === 'bottom',
63 | imageAlignLeft: block.image && block.imageAlign === 'left',
64 | threeByGridBlock: props.layout === 'threeColumn',
65 | twoByGridBlock: props.layout === 'twoColumn',
66 | })
67 |
68 | const topLeftImage =
69 | (block.imageAlign === 'top' || block.imageAlign === 'left') &&
70 | renderBlockImage(block.image, block.imageLink, block.imageAlt)
71 |
72 | const bottomRightImage =
73 | (block.imageAlign === 'bottom' || block.imageAlign === 'right') &&
74 | renderBlockImage(block.image, block.imageLink, block.imageAlt)
75 |
76 | return (
77 |
78 | {topLeftImage}
79 |
80 | {renderBlockTitle(block.title)}
81 | {block.content}
82 |
83 | {bottomRightImage}
84 |
85 | )
86 | }
87 |
88 | return (
89 | {props.contents.map(renderBlock, this)}
90 | )
91 | }
92 |
93 | GridBlock.defaultProps = {
94 | align: 'left',
95 | contents: [],
96 | layout: 'twoColumn',
97 | }
98 |
--------------------------------------------------------------------------------
/website/src/components/HomepageFeatures.js:
--------------------------------------------------------------------------------
1 | import React from 'react'
2 | import clsx from 'clsx'
3 | import styles from './HomepageFeatures.module.css'
4 |
5 | const FeatureList = [
6 | {
7 | title: 'Easy to Use',
8 | Svg: require('../../static/img/undraw_docusaurus_mountain.svg').default,
9 | description: (
10 | <>
11 | Docusaurus was designed from the ground up to be easily installed and used to
12 | get your website up and running quickly.
13 | >
14 | ),
15 | },
16 | {
17 | title: 'Focus on What Matters',
18 | Svg: require('../../static/img/undraw_docusaurus_tree.svg').default,
19 | description: (
20 | <>
21 | Docusaurus lets you focus on your docs, and we'll do the chores. Go ahead
22 | and move your docs into the docs
directory.
23 | >
24 | ),
25 | },
26 | {
27 | title: 'Powered by React',
28 | Svg: require('../../static/img/undraw_docusaurus_react.svg').default,
29 | description: (
30 | <>
31 | Extend or customize your website layout by reusing React. Docusaurus can be
32 | extended while reusing the same header and footer.
33 | >
34 | ),
35 | },
36 | ]
37 |
38 | function Feature({ Svg, title, description }) {
39 | return (
40 |
41 |
42 |
43 |
44 |
45 |
{title}
46 |
{description}
47 |
48 |
49 | )
50 | }
51 |
52 | export default function HomepageFeatures() {
53 | return (
54 |
55 |
56 |
57 | {FeatureList.map((props, idx) => (
58 |
59 | ))}
60 |
61 |
62 |
63 | )
64 | }
65 |
--------------------------------------------------------------------------------
/website/src/components/HomepageFeatures.module.css:
--------------------------------------------------------------------------------
1 | /* stylelint-disable docusaurus/copyright-header */
2 |
3 | .features {
4 | display: flex;
5 | align-items: center;
6 | padding: 2rem 0;
7 | width: 100%;
8 | }
9 |
10 | .featureSvg {
11 | height: 200px;
12 | width: 200px;
13 | }
14 |
--------------------------------------------------------------------------------
/website/src/components/MarkdownBlock/index.jsx:
--------------------------------------------------------------------------------
1 | import React from 'react'
2 | import {Remarkable} from 'remarkable'
3 | const md = new Remarkable()
4 |
5 | export const MarkdownBlock = ({children}) => {
6 | const markdown = md.render(children)
7 | return
8 | }
9 |
--------------------------------------------------------------------------------
/website/src/css/custom.css:
--------------------------------------------------------------------------------
1 | :root {
2 | --ifm-light-background-color: var(--ifm-color-emphasis-100);
3 | }
4 |
5 | html[data-theme='dark'] {
6 | --ifm-link-color: #679df5;
7 | }
8 |
9 | .wrapper {
10 | margin: 0 auto;
11 | max-width: 1100px;
12 | padding: 0 20px;
13 | }
14 |
15 | .center {
16 | display: block;
17 | }
18 |
19 | .center,
20 | .homeContainer {
21 | text-align: center;
22 | }
23 |
24 | .homeContainer .homeWrapper {
25 | padding: 2em 10px;
26 | }
27 |
28 | .homeContainer .homeWrapper .wrapper {
29 | margin: 0 auto;
30 | max-width: 900px;
31 | padding: 0 20px;
32 | }
33 |
34 | .homeContainer .homeWrapper .projectLogo img {
35 | height: 200px;
36 | margin-bottom: 0;
37 | }
38 |
39 | .homeContainer .homeWrapper #project_title {
40 | font-size: 300%;
41 | letter-spacing: -0.08em;
42 | line-height: 1em;
43 | margin-bottom: 80px;
44 | }
45 |
46 | .homeContainer .homeWrapper #project_tagline {
47 | font-size: 200%;
48 | letter-spacing: -0.04em;
49 | line-height: 1em;
50 | }
51 |
52 | .projectLogo {
53 | display: none;
54 | pointer-events: none;
55 | }
56 |
57 | .projectLogo img {
58 | height: 100px;
59 | margin-bottom: 0;
60 | }
61 |
62 | .projectTitle {
63 | font-size: 250%;
64 | line-height: 1em;
65 | }
66 |
67 | @media only screen and (min-width: 480px) {
68 | .projectTitle {
69 | font-size: 300%;
70 | margin: 0.3em 0;
71 | }
72 |
73 | .projectLogo img {
74 | height: 200px;
75 | margin-bottom: 10px;
76 | }
77 |
78 | .homeContainer .homeWrapper {
79 | padding-left: 10px;
80 | padding-right: 10px;
81 | }
82 | }
83 |
84 | @media only screen and (min-width: 736px) {
85 | .homeContainer .homeWrapper {
86 | position: relative;
87 | }
88 |
89 | .homeContainer .homeWrapper #inner {
90 | max-width: 600px;
91 | padding-right: 40px;
92 | }
93 | }
94 |
95 | @media only screen and (min-width: 1200px) {
96 | .homeContainer .homeWrapper #inner {
97 | max-width: 750px;
98 | }
99 |
100 | .homeContainer .homeWrapper .projectLogo {
101 | align-items: center;
102 | bottom: 0;
103 | display: flex;
104 | justify-content: flex-end;
105 | left: 0;
106 | padding: 2em 100px 4em;
107 | position: absolute;
108 | right: 0;
109 | top: 0;
110 | }
111 |
112 | .homeContainer .homeWrapper .projectLogo img {
113 | height: 100%;
114 | max-height: 250px;
115 | }
116 | }
117 |
118 | @media only screen and (min-width: 1500px) {
119 | .homeContainer .homeWrapper #inner {
120 | max-width: 1100px;
121 | padding-bottom: 40px;
122 | padding-top: 40px;
123 | }
124 |
125 | .wrapper {
126 | max-width: 1400px;
127 | }
128 | }
129 |
130 | .mainContainer {
131 | flex: 1 1 0%;
132 | max-width: 100%;
133 | padding: 40px 0;
134 | }
135 |
136 | .mainContainer .wrapper {
137 | text-align: left;
138 | }
139 |
140 | .gridBlock {
141 | padding: 0;
142 | }
143 |
144 | .gridBlock > * {
145 | box-sizing: border-box;
146 | }
147 |
148 | .gridBlock .fourByGridBlock img,
149 | .gridBlock .threeByGridBlock img,
150 | .gridBlock .twoByGridBlock img {
151 | max-width: 100%;
152 | }
153 |
154 | .gridBlock .gridClear {
155 | clear: both;
156 | }
157 |
158 | @media only screen and (max-width: 735px) {
159 | .gridBlock .fourByGridBlock {
160 | flex: 1 0 26%;
161 | }
162 | }
163 |
164 | @media only screen and (min-width: 736px) {
165 | .gridBlock {
166 | display: flex;
167 | flex-direction: row;
168 | flex-wrap: wrap;
169 | }
170 |
171 | .gridBlock > * {
172 | margin: 0 12px;
173 | }
174 |
175 | .gridBlock > :first-child {
176 | margin-left: 0;
177 | }
178 |
179 | .gridBlock > :last-child {
180 | margin-right: 0;
181 | }
182 |
183 | .gridBlock .twoByGridBlock {
184 | flex: 1 0 40%;
185 | }
186 |
187 | .gridBlock .threeByGridBlock {
188 | flex: 1 0 26%;
189 | }
190 |
191 | .gridBlock .fourByGridBlock {
192 | flex: 1 0 20%;
193 | }
194 |
195 | h2 + .gridBlock {
196 | padding-top: 20px;
197 | }
198 | }
199 |
200 | @media only screen and (min-width: 1400px) {
201 | .gridBlock {
202 | display: flex;
203 | flex-direction: row;
204 | flex-wrap: wrap;
205 | }
206 | }
207 |
208 | .alignCenter {
209 | text-align: center;
210 | }
211 |
212 | .alignRight {
213 | text-align: right;
214 | }
215 |
216 | .imageAlignSide {
217 | display: flex;
218 | flex-flow: row wrap;
219 | }
220 |
221 | .blockImage {
222 | max-width: 730px;
223 | }
224 |
225 | .imageAlignSide .blockImage {
226 | flex: 0 1 500px;
227 | max-width: 500px;
228 | }
229 |
230 | @media only screen and (max-width: 735px) {
231 | .imageAlignSide .blockImage {
232 | display: none;
233 | }
234 | }
235 |
236 | .imageAlignSide .blockContent {
237 | flex: 1 1;
238 | }
239 |
240 | .imageAlignBottom .blockImage {
241 | margin: 0 auto 20px;
242 | max-width: 730px;
243 | }
244 |
245 | .imageAlignBottom.alignCenter .blockImage {
246 | margin-left: auto;
247 | margin-right: auto;
248 | }
249 |
250 | .imageAlignTop .blockImage {
251 | margin-bottom: 20px;
252 | max-width: 80px;
253 | }
254 |
255 | .imageAlignTop.alignCenter .blockImage {
256 | margin-left: auto;
257 | margin-right: auto;
258 | }
259 |
260 | .imageAlignRight .blockImage {
261 | margin-left: 40px;
262 | }
263 |
264 | .imageAlignLeft .blockImage {
265 | margin-right: 40px;
266 | }
267 |
268 | .block-container .gridBlock .blockContent p {
269 | padding: 0;
270 | }
271 |
272 | .block-container .wrapper .alignCenter h2 {
273 | text-align: center;
274 | }
275 |
276 | .block-container .wrapper .imageAlignSide h2 {
277 | text-align: left;
278 | }
279 |
280 | .block-container .wrapper .imageAlignSide p {
281 | margin: 0 0 40px;
282 | max-width: 560px;
283 | }
284 |
285 | .block-container {
286 | padding-left: 1rem;
287 | padding-left: var(--ifm-spacing-horizontal);
288 | padding-right: 1rem;
289 | padding-right: var(--ifm-spacing-horizontal);
290 | width: 100%;
291 | }
292 |
293 | .block-container.lightBackground {
294 | background: var(--ifm-light-background-color);
295 | }
296 |
297 | .block-container .wrapper {
298 | margin: 0 auto;
299 | max-width: 1140px;
300 | max-width: var(--ifm-container-width);
301 | }
302 |
303 | .highlightBackground {
304 | background: rgba(153, 66, 79, 0.7);
305 | color: #fff;
306 | }
307 |
308 | .highlightBackground a {
309 | font-weight: 800;
310 | }
311 |
312 | .block-container.paddingAll {
313 | padding: 40px;
314 | }
315 |
316 | .block-container.paddingBottom {
317 | padding-bottom: 80px;
318 | }
319 |
320 | .block-container.paddingLeft {
321 | padding-left: 40px;
322 | }
323 |
324 | .block-container.paddingRight {
325 | padding-right: 40px;
326 | }
327 |
328 | .block-container.paddingTop {
329 | padding-top: 80px;
330 | }
331 |
332 | @media only screen and (max-width: 735px) {
333 | .block-container.paddingBottom {
334 | padding-bottom: 40px;
335 | }
336 |
337 | .block-container.paddingTop {
338 | padding-top: 20px;
339 | }
340 | }
341 |
342 |
343 | body {
344 | font-size: 18px;
345 | }
346 |
347 |
348 | .projectTaglineWrapper {
349 | display: flex;
350 | justify-content: center;
351 | }
352 |
353 | .projectTagline {
354 | max-width: 500px;
355 | flex: 1;
356 | font-size: 115%;
357 | }
358 |
359 | .imageAlignSide .blockImage {
360 | text-align: center;
361 | display: flex;
362 | justify-content: center;
363 | align-items: center;
364 | }
365 |
366 | .logos img,
367 | .showcaseSection .logos img {
368 | width: auto;
369 | max-width: 250px;
370 | }
371 |
372 | details > summary {
373 | cursor: pointer;
374 | }
375 |
376 | @media only screen and (min-width: 1024px) {
377 | .projectTitle {
378 | margin-top: 80px;
379 | }
380 | }
381 |
382 | @media only screen and (min-width: 1200px) {
383 | .homeContainer .homeWrapper .projectLogo img {
384 | max-height: 128px;
385 | max-width: 128px;
386 | height: auto;
387 | }
388 | }
389 |
--------------------------------------------------------------------------------
/website/src/pages/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) 2017-present, Facebook, Inc.
3 | *
4 | * This source code is licensed under the MIT license found in the
5 | * LICENSE file in the root directory of this source tree.
6 | */
7 |
8 | import React from 'react'
9 | import useDocusaurusContext from '@docusaurus/useDocusaurusContext'
10 | import { GridBlock } from '../components/GridBlock'
11 | import { Container } from '../components/Container'
12 | import Layout from '@theme/Layout'
13 |
14 | const HomeSplash = (props) => {
15 | const { language = '' } = props
16 | const { siteConfig } = useDocusaurusContext()
17 | const { baseUrl, customFields } = siteConfig
18 | const docsPart = `${customFields.docsPath ? `${customFields.docsPath}/` : ''}`
19 | const langPart = `${language ? `${language}/` : ''}`
20 | const docUrl = (doc) => `${baseUrl}${docsPart}${langPart}${doc}`
21 |
22 | const SplashContainer = (props) => (
23 |
24 |
25 |
{props.children}
26 |
27 |
28 | )
29 |
30 | const Logo = (props) => (
31 |
32 |
33 |
34 | )
35 |
36 | const ProjectTitle = () => (
37 |
38 |
{siteConfig.title}
39 |
40 |
{siteConfig.tagline}
41 |
42 |
43 | )
44 |
45 | const Button = (props) => (
46 |
51 | {props.children}
52 |
53 | )
54 |
55 | return (
56 |
57 |
58 |
59 |
60 |
61 | Get Started
62 |
63 |
64 |
65 | )
66 | }
67 |
68 | export default class Index extends React.Component {
69 | render() {
70 | const { config: siteConfig, language = '' } = this.props
71 | const { baseUrl } = siteConfig
72 |
73 | const Block = (props) => (
74 |
79 |
85 |
86 | )
87 |
88 | const FeatureCallout = () => (
89 |
90 |
91 |
92 |
93 | Time series data modeling has broad significance in public health, finance
94 | and engineering
95 |
96 |
97 |
98 |
99 | )
100 |
101 | const Problem = () => (
102 |
103 |
104 | {[
105 | {
106 | title: '',
107 | content: '## Why Time Series? \n - Time series data modeling has broad significance in public health, finance and engineering. \n - Traditional time series methods from statistics often rely on strong modeling assumptions, or are computationally expensive. \n - Given the rise of large-scale sensing data and significant advances in deep learning, the goal of the project is to develop an efficient and user-friendly deep learning library that would benefit the entire research community and beyond.',
108 | image: `${baseUrl}img/time-series-graph.png`,
109 | imageAlt: 'The problem (picture of a question mark)',
110 | imageAlign: 'right',
111 | },
112 | ]}
113 |
114 |
115 | )
116 |
117 | const Solution = () => [
118 |
119 | {[
120 | {
121 | title: '',
122 | image: `${baseUrl}img/why.png`,
123 | imageAlign: 'left',
124 | imageAlt: 'The solution (picture of a star)',
125 | content: '## Why TorchTS? \n - Existing time series analysis libraries include [statsmodels](https://www.statsmodels.org/stable/index.html), [sktime](https://github.com/alan-turing-institute/sktime). However, these libraries only include traditional statistics tools such as ARMA or ARIMA, which do not have the state-of-the-art forecasting tools based on deep learning. \n - [GluonTS](https://ts.gluon.ai/) is an open-source time series library developed by Amazon AWS, but is based on MXNet. \n - [Pyro](https://pyro.ai/) is a probabilistic programming framework based on PyTorch, but is not focused on time series forecasting.',
126 | },
127 | ]}
128 | ,
129 | ]
130 |
131 | const Features = () => (
132 |
133 | {[
134 | {
135 | content: 'Library built on pytorch',
136 | image: `${baseUrl}img/pytorch-logo.png`,
137 | imageAlign: 'top',
138 | title: 'Built On Pytorch',
139 | },
140 | {
141 | content: 'Easy to use with model.predict',
142 | image: `${baseUrl}img/puzzle.png`,
143 | imageAlign: 'top',
144 | title: 'User Friendly',
145 | },
146 | {
147 | content: 'Easily Scalable Library',
148 | image: `${baseUrl}img/scalable.png`,
149 | imageAlign: 'top',
150 | title: 'Scalable',
151 | },
152 | ]}
153 |
154 | )
155 |
156 | return (
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 | )
167 | }
168 | }
169 |
--------------------------------------------------------------------------------
/website/src/pages/index.module.css:
--------------------------------------------------------------------------------
1 | /* stylelint-disable docusaurus/copyright-header */
2 |
3 | /**
4 | * CSS files with the .module.css suffix will be treated as CSS modules
5 | * and scoped locally.
6 | */
7 |
8 | .heroBanner {
9 | padding: 4rem 0;
10 | text-align: center;
11 | position: relative;
12 | overflow: hidden;
13 | }
14 |
15 | @media screen and (max-width: 966px) {
16 | .heroBanner {
17 | padding: 2rem;
18 | }
19 | }
20 |
21 | .buttons {
22 | display: flex;
23 | align-items: center;
24 | justify-content: center;
25 | }
26 |
--------------------------------------------------------------------------------
/website/static/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/.nojekyll
--------------------------------------------------------------------------------
/website/static/img/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/img/logo.png
--------------------------------------------------------------------------------
/website/static/img/logo2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/img/logo2.png
--------------------------------------------------------------------------------
/website/static/img/puzzle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/img/puzzle.png
--------------------------------------------------------------------------------
/website/static/img/pytorch-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/img/pytorch-logo.png
--------------------------------------------------------------------------------
/website/static/img/scalable.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/img/scalable.png
--------------------------------------------------------------------------------
/website/static/img/time-series-graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/img/time-series-graph.png
--------------------------------------------------------------------------------
/website/static/img/torchTS_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/img/torchTS_logo.png
--------------------------------------------------------------------------------
/website/static/img/why.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Rose-STL-Lab/torchTS/93597cac5363a1377342146682d6af998cfc9734/website/static/img/why.png
--------------------------------------------------------------------------------