├── .binder
└── requirements.txt
├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── doc_improvement.md
│ └── feature_request.md
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ └── build-and-test.yml
├── .gitignore
├── .travis.yml
├── CHANGELOG.rst
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── MANIFEST.in
├── Makefile
├── README.rst
├── azure-pipelines.yml
├── build_tools
├── azure
│ ├── build_and_test.yml
│ └── publish.yml
├── build.sh
├── requirements.txt
└── test.sh
├── examples
├── multivariate_time_series_classification.ipynb
├── univariate_time_series_classification.ipynb
└── univariate_time_series_regression_and_forecasting.ipynb
├── maint_tools
├── linting.sh
├── make_release.py
└── requirements.txt
├── setup.cfg
├── setup.py
└── sktime_dl
├── .coveragerc
├── .dockerignore
├── __init__.py
├── classification
├── __init__.py
├── _classifier.py
├── _cnn.py
├── _cntc.py
├── _encoder.py
├── _fcn.py
├── _inceptiontime.py
├── _lstmfcn.py
├── _macnn.py
├── _mcdcnn.py
├── _mcnn.py
├── _mlp.py
├── _resnet.py
├── _tapnet.py
├── _tlenet.py
└── _twiesn.py
├── experimental
├── __init__.py
├── classifier_experiments.py
├── reduction_examples.py
├── regression_experiments.py
└── reproductions.py
├── meta
├── __init__.py
├── _dlensemble.py
├── _dltuner.py
└── tests
│ ├── __init__.py
│ ├── test_ensembling.py
│ └── test_tuning.py
├── networks
├── __init__.py
├── _cnn.py
├── _cntc.py
├── _encoder.py
├── _fcn.py
├── _inceptiontime.py
├── _lstm.py
├── _lstmfcn.py
├── _macnn.py
├── _mcdcnn.py
├── _mlp.py
├── _network.py
├── _resnet.py
├── _tapnet.py
└── _tlenet.py
├── regression
├── __init__.py
├── _cnn.py
├── _cntc.py
├── _encoder.py
├── _fcn.py
├── _inceptiontime.py
├── _lstm.py
├── _lstmfcn.py
├── _mcdcnn.py
├── _mlp.py
├── _regressor.py
├── _resnet.py
├── _rnn.py
├── _tapnet.py
└── _tlenet.py
├── tests
├── __init__.py
├── test_accuracy.py
├── test_classifiers.py
├── test_is_fitted.py
├── test_regressors.py
└── test_validation.py
└── utils
├── __init__.py
├── _data.py
├── _models.py
├── layer_utils.py
└── model_lists.py
/.binder/requirements.txt:
--------------------------------------------------------------------------------
1 | sktime-dl
2 | matplotlib
3 | seaborn
4 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | sktime/datasets/data/* linguist-vendored
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: "[BUG]"
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 |
14 |
15 | **To Reproduce**
16 |
23 |
24 | ```python
25 | [Past your code here.]
26 | ```
27 |
28 | **Expected behavior**
29 |
32 |
33 | **Additional context**
34 |
37 |
38 | **Versions**
39 |
40 |
41 |
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/doc_improvement.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Documentation improvement
3 | about: Create a report to help us improve the documentation. Alternatively you can just open a pull request with the suggested change.
4 | title: "[DOC]"
5 | labels: documentation
6 | assignees: ''
7 |
8 | ---
9 |
10 | #### Describe the issue linked to the documentation
11 |
12 |
15 |
16 | #### Suggest a potential alternative/fix
17 |
18 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is.
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen, ideally taking into consideration the existing toolbox design, classes and methods.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
5 |
6 | #### Reference Issues/PRs
7 |
14 |
15 |
16 | #### What does this implement/fix? Explain your changes.
17 |
20 |
21 | #### Does your contribution introduce a new dependency? If yes, which one?
22 |
23 |
27 |
28 |
29 | #### Any other comments?
30 |
40 |
--------------------------------------------------------------------------------
/.github/workflows/build-and-test.yml:
--------------------------------------------------------------------------------
1 | name: Build and test sktime-dl
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 | branches:
9 | - master
10 |
11 | env:
12 | TEST_DIR: tmp/
13 | REQUIREMENTS: build_tools/requirements.txt
14 | TEST_SLOW: false
15 |
16 | jobs:
17 | build-linux:
18 | runs-on: ${{ matrix.os }}
19 | strategy:
20 | matrix:
21 | include:
22 | - os: ubuntu-latest
23 | python-version: 3.7
24 | tf-version: 2.3
25 | - os: ubuntu-latest
26 | python-version: 3.6
27 | tf-version: 1.9
28 | env:
29 | PYTHON_VERSION: ${{ matrix.python-version }}
30 | TF_VERSION: ${{ matrix.tf-version }}
31 | steps:
32 | - uses: actions/checkout@v2
33 | - name: Set up Python
34 | uses: actions/setup-python@v2
35 | with:
36 | python-version: ${{ matrix.python-version }}
37 | - name: Add conda to system path
38 | run: |
39 | # $CONDA is an environment variable pointing to the root of the miniconda directory
40 | echo $CONDA/bin >> $GITHUB_PATH
41 | - name: Run build script
42 | run: ./build_tools/build.sh
43 | shell: bash
44 | - name: Run test script
45 | run: ./build_tools/test.sh
46 | shell: bash
47 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 | *.c
9 |
10 | # Distribution / packaging
11 | .Python
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 |
29 | #TSC results
30 | results/
31 |
32 | MANIFEST
33 |
34 | # PyInstaller
35 | # Usually these files are written by a python script from a template
36 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
37 | *.manifest
38 | *.spec
39 |
40 | # Installer logs
41 | pip-log.txt
42 | pip-delete-this-directory.txt
43 |
44 | # Unit test / coverage reports
45 | htmlcov/
46 | .tox/
47 | .nox/
48 | .coverage
49 | .coverage.*
50 | .cache
51 | nosetests.xml
52 | coverage.xml
53 | *.cover
54 | .hypothesis/
55 | .pytest_cache/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 | db.sqlite3
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 | doc/_build/
76 | doc/generated/
77 |
78 | # PyBuilder
79 | target/
80 |
81 | # Jupyter Notebook
82 | .ipynb_checkpoints
83 |
84 | # IPython
85 | profile_default/
86 | ipython_config.py
87 |
88 | # pyenv
89 | .python-version
90 |
91 | # celery beat schedule file
92 | celerybeat-schedule
93 |
94 | # SageMath parsed files
95 | *.sage.py
96 |
97 | # Environments
98 | .env
99 | .venv
100 | env/
101 | venv/
102 | ENV/
103 | env.bak/
104 | venv.bak/
105 |
106 | # mkdocs documentation
107 | /site
108 |
109 | # mypy
110 | .mypy_cache/
111 | .dmypy.json
112 | dmypy.json
113 |
114 | # Pyre type checker
115 | .pyre/
116 |
117 | # IDE files
118 | *.prefs
119 | .pydevproject
120 | .idea
121 | .vscode
122 | .spyderproject
123 | .spyproject
124 | .ropeproject
125 |
126 | # scikit-learn specific
127 | doc/_build/
128 | doc/auto_examples/
129 | doc/modules/generated/
130 | doc/datasets/generated/
131 |
132 | # vim swap files
133 | *.swp
134 |
135 | # autogen stuff
136 | /documentation/source/autogen
137 |
138 | # macOS files
139 | .DS_Store
140 |
141 | # dask-worker-space
142 | dask-worker-space
143 |
144 | # documentation build files
145 | build_doc_site/
146 |
147 | envs/
148 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | # whitelist the following branches for building
2 | branches:
3 | only:
4 | - master
5 | - dev
6 |
7 | env:
8 | global:
9 | - TEST_DIR=tmp/
10 | - REQUIREMENTS=build_tools/requirements.txt
11 | - TEST_SLOW=false
12 |
13 | dist: xenial
14 | sudo: false
15 |
16 | language: python
17 |
18 | cache:
19 | directories:
20 | - $HOME/.cache/pip
21 |
22 | matrix:
23 | include:
24 | # add more combinations here as per requirement
25 | - env: PYTHON_VERSION="3.6" TF_VERSION="1.9"
26 | - env: PYTHON_VERSION="3.6" TF_VERSION="1.15"
27 | - env: PYTHON_VERSION="3.7" TF_VERSION="2.1"
28 | - env: PYTHON_VERSION="3.7" TF_VERSION="2.3"
29 |
30 | install:
31 | # Download and install miniconda
32 | - deactivate
33 | - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
34 | - MINICONDA_PATH=/home/travis/miniconda
35 | - chmod +x miniconda.sh && ./miniconda.sh -b -p $MINICONDA_PATH
36 | - export PATH=$MINICONDA_PATH/bin:$PATH
37 |
38 | # Build
39 | - source build_tools/build.sh
40 |
41 | script:
42 | - source build_tools/test.sh
43 |
44 | after_success:
45 | - coveralls
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/CHANGELOG.rst:
--------------------------------------------------------------------------------
1 | Changelog
2 | =========
3 |
4 | All notable changes to this project will be documented in this file.
5 |
6 | The format is based on `Keep a Changelog `_ and we adhere to `Semantic Versioning `_.
7 |
8 | We keep track of changes in this file since v0.4.0.
9 |
10 |
11 | [Unreleased]
12 | ------------
13 | Added
14 | ~~~~~
15 | -
16 |
17 | Changed
18 | ~~~~~~~
19 | -
20 |
21 | Removed
22 | ~~~~~~~
23 | -
24 |
25 | Fixed
26 | ~~~~~
27 | -
28 |
29 | Deprecated
30 | ~~~~~~~~~~
31 | -
32 |
33 |
34 | [0.4.0] - 2019-04-xx
35 | --------------------
36 |
37 | Added
38 | ~~~~~
39 | - Added changelog.
40 |
41 | Changed
42 | ~~~~~~~
43 | -
44 |
45 | Removed
46 | ~~~~~~~
47 | -
48 |
49 | Fixed
50 | ~~~~~
51 | -
52 |
53 | Deprecated
54 | ~~~~~~~~~~
55 | -
56 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | How to contribute
2 | -----------------
3 |
4 | The preferred workflow for contributing to sktime is to fork the
5 | [main repository](https://github.com/alan-turing-institute/sktime/) on
6 | GitHub, clone, and develop on a branch. Steps:
7 |
8 | 1. Fork the [project repository](https://github.com/alan-turing-institute/sktime)
9 | by clicking on the 'Fork' button near the top right of the page. This creates
10 | a copy of the code under your GitHub user account. For more details on
11 | how to fork a repository see [this guide](https://help.github.com/articles/fork-a-repo/).
12 |
13 | 2. Clone your fork of the sktime repo from your GitHub account to your local
14 | disk:
15 |
16 | ```bash
17 | $ git clone git@github.com:YourLogin/sktime.git
18 | $ cd sktime
19 | ```
20 |
21 | 3. Create a new ``feature`` branch from the ``dev`` branch to hold your changes:
22 |
23 | ```bash
24 | $ git checkout dev
25 | $ git checkout -b my-feature-branch
26 | ```
27 |
28 | Always use a ``feature`` branch. It's good practice to never work on the ``master`` branch!
29 |
30 | 4. Develop the feature on your feature branch. Add changed files using ``git
31 | add`` and then ``git commit`` files to record your changes in Git:
32 | ```bash
33 | $ git add modified_files
34 | $ git commit
35 | ```
36 |
37 | 5. When finished, push the changes to your GitHub account with:
38 |
39 | ```bash
40 | $ git push -u origin my-feature-branch
41 | ```
42 |
43 | 5. Follow [these instructions](https://help.github.com/articles/creating-a-pull-request-from-a-fork)
44 | to create a pull request from your fork. This will send an email to the committers.
45 |
46 | If any of the above seems like magic to you, please look up the
47 | [Git documentation](https://git-scm.com/documentation) on the web, or ask a friend
48 | or another contributor for help.
49 |
50 | Pull Request Checklist
51 | ----------------------
52 |
53 | We recommended that your contribution complies with the
54 | following rules before you submit a pull request:
55 |
56 | - Follow the [PEP8](https://www.python.org/dev/peps/pep-0008/) coding
57 | guidelines. A good example can be found [here](https://gist.github.com/nateGeorge/5455d2c57fb33c1ae04706f2dc4fee01).
58 | In addition, we add the following guidelines:
59 | - Use underscores to separate words in non class names: `n_samples` rather than
60 | `nsamples`.
61 | - Avoid multiple statements on one line. Prefer a line return after a
62 | control flow statement (`if`/`for`).
63 | - Use relative imports for references inside sktime.
64 | - Unit tests are an exception to the previous rule; they should use
65 | absolute imports, exactly as client code would. A corollary is that, if
66 | `sktime.foo` exports a class or function that is implemented in `sktime.foo.bar.baz`,
67 | the test should import it from `sktime.foo`.
68 | - Please don’t use `import *` in any case. It is considered harmful by the
69 | official Python recommendations. It makes the code harder to read as the
70 | origin of symbols is no longer explicitly referenced, but most important,
71 | it prevents using a static analysis tool like pyflakes to automatically
72 | find bugs.
73 | - Use the [numpy docstring standard](https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard) in all your docstrings.
74 |
75 | - Give your pull request a helpful title that summarises what your
76 | contribution does. In some cases `Fix ` is enough.
77 | `Fix #` is not enough.
78 |
79 | - Often pull requests resolve one or more other issues (or pull requests).
80 | If merging your pull request means that some other issues/PRs should
81 | be closed, you should
82 | [use keywords to create link to them](https://github.com/blog/1506-closing-issues-via-pull-requests/)
83 | (e.g., `Fixes #1234`; multiple issues/PRs are allowed as long as each one
84 | is preceded by a keyword). Upon merging, those issues/PRs will
85 | automatically be closed by GitHub. If your pull request is simply related
86 | to some other issues/PRs, create a link to them without using the keywords
87 | (e.g., `See also #1234`).
88 |
89 | - All public methods should have informative docstrings with sample
90 | usage presented as doctests when appropriate.
91 |
92 |
93 | Filing bugs
94 | -----------
95 | We use GitHub issues to track all bugs and feature requests; feel free to
96 | open an issue if you have found a bug or wish to see a feature implemented.
97 |
98 | It is recommended to check that your issue complies with the
99 | following rules before submitting:
100 |
101 | - Verify that your issue is not being currently addressed by other
102 | [issues](https://github.com/alan-turing-institute/sktime/issues)
103 | or [pull requests](https://github.com/alan-turing-institute/sktime/pulls).
104 |
105 | - Please ensure all code snippets and error messages are formatted in
106 | appropriate code blocks.
107 | See [Creating and highlighting code blocks](https://help.github.com/articles/creating-and-highlighting-code-blocks).
108 |
109 | - Please be specific about what estimators and/or functions are involved
110 | and the shape of the data, as appropriate; please include a
111 | [reproducible](https://stackoverflow.com/help/mcve) code snippet
112 | or link to a [gist](https://gist.github.com). If an exception is raised,
113 | please provide the traceback.
114 |
115 |
116 | Coding tips:
117 | ------------
118 |
119 | - When writing new classes, inherit from appropriate base classes (`BaseTransformer`, `BaseClassifier`, `BaseRegressor`),
120 |
121 | - Use relative imports when importing functions or classes from within sktime, except for unit tests where you should use absolute imports.
122 |
123 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:2.1.0-gpu-py3
2 | RUN pip install Cython==0.29.14
3 | COPY build_tools/requirements.txt .
4 | RUN pip install -r requirements.txt
5 | WORKDIR /usr/src/app
6 | COPY sktime_dl sktime_dl
7 |
8 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2019, The Alan Turing Institute, University College London, University of East Anglia, Anthony Bagnall, Sajaysurya Ganesh, Viktor Kazakov, Franz Király, Jason Lines, Markus Löning
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | * Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | * Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | * Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.rst
2 | recursive-include examples *
3 | recursive-include sktime_dl *.c *.h *.pyx *.pxd *.pxi
4 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for easier installation and cleanup.
2 | #
3 | # Uses self-documenting macros from here:
4 | # http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
5 |
6 | PACKAGE=sktime_dl
7 | MAINT_DIR = './maint_tools/'
8 |
9 | .PHONY: help cover dist venv
10 |
11 | .DEFAULT_GOAL := help
12 |
13 | help:
14 | @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) |\
15 | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m\
16 | %s\n", $$1, $$2}'
17 |
18 | release: ## Make a release
19 | python $(MAINT_DIR)/make_release.py
20 |
21 | install: ## Install for the current user using the default python command
22 | python setup.py build_ext --inplace && python setup.py install --user
23 |
24 | test: ## Run unit tests
25 | pytest --cov-report html --cov=sktime_dl --showlocals --durations=20 --pyargs $(PACKAGE)
26 |
27 | lint: ## Run linting
28 | $(MAINT_DIR)/linting.sh
29 |
30 | clean: ## Clean build dist and egg directories left after install
31 | rm -rf ./dist
32 | rm -rf ./build
33 | rm -rf ./pytest_cache
34 | rm -rf ./htmlcov
35 | rm -rf ./$(PACKAGE).egg-info
36 | rm -rf ./cover
37 | rm -rf $(VENV_DIR)
38 | rm -f MANIFEST
39 | rm -f ./$(PACKAGE)/*.so
40 | find . -type f -iname '*.pyc' -delete
41 | find . -type d -name '__pycache__' -empty -delete
42 |
43 | dist: ## Make Python source distribution
44 | python setup.py bdist_wheel
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | ``sktime-dl`` is currently being ported to mini-packages within ``sktime``, and no longer maintained as a separate package.
2 |
3 | Most estimators formerly in ``sktime-dl`` are now available in the ``sktime.classification.deep_learning`` and ``sktime.regression.deep_learning`` modules, and maintained there.
4 |
5 | Contributions are appreciated to port the rest!
6 |
7 | To contribute, follow instructions in the umbrella planning issue https://github.com/sktime/sktime/issues/3351 on the ``sktime`` repo.
8 |
--------------------------------------------------------------------------------
/azure-pipelines.yml:
--------------------------------------------------------------------------------
1 | # author: Markus Löning
2 | # runs continuous integration checks for commits and PRs
3 |
4 | # adapted from
5 | # - https://iscinumpy.gitlab.io/post/azure-devops-python-wheels/
6 | # - https://iscinumpy.gitlab.io/post/azure-devops-releases/
7 |
8 | name: CI.$(Date:yyyyMMdd).$(Rev:r)
9 |
10 | variables:
11 | REQUIREMENTS: build_tools/requirements.txt
12 | TEST_DIR: tmp/
13 | TEST_SLOW: false # whether or not to run slow tests
14 |
15 | trigger:
16 | branches:
17 | include:
18 | - master
19 | - dev
20 | tags:
21 | include:
22 | - '*'
23 | pr:
24 | # Cancel if new commits are pushed to the same PR
25 | autoCancel: true
26 |
27 | stages:
28 | - stage: 'Linting'
29 | jobs:
30 | - job: 'Linting'
31 | pool:
32 | vmImage: 'ubuntu-latest'
33 | steps:
34 | - task: UsePythonVersion@0
35 | displayName: 'Use Python version'
36 | inputs:
37 | versionSpec: 3.x
38 | - script: pip install flake8
39 | displayName: 'Installing flake8'
40 | - bash: maint_tools/linting.sh
41 | displayName: 'Linting'
42 |
43 | - stage: 'Build'
44 | dependsOn: 'Linting'
45 | condition: succeeded('Linting')
46 | jobs:
47 | - job: 'Linux_latest'
48 | pool:
49 | vmImage: 'ubuntu-latest'
50 | steps:
51 | - bash: echo "##vso[task.prependpath]$CONDA/bin"
52 | displayName: 'Add conda to PATH'
53 | - template: build_tools/azure/build_and_test.yml
54 | strategy:
55 | matrix:
56 | py36_tf19:
57 | PYTHON_VERSION: 3.6
58 | TF_VERSION: 1.9
59 | py36_tf115:
60 | PYTHON_VERSION: 3.6
61 | TF_VERSION: 1.15
62 | py37_tf21:
63 | PYTHON_VERSION: 3.7
64 | TF_VERSION: 2.1
65 | # runs all checks, including slow ones, to check if we can still
66 | # reproduce published results
67 | py37_tf23:
68 | PYTHON_VERSION: 3.7
69 | TF_VERSION: 2.3
70 | TEST_SLOW: true
71 | - job: 'Linux_xenial'
72 | pool:
73 | vmImage: 'ubuntu-16.04'
74 | steps:
75 | - bash: echo "##vso[task.prependpath]$CONDA/bin"
76 | displayName: 'Add conda to PATH'
77 | - template: build_tools/azure/build_and_test.yml
78 | strategy:
79 | matrix:
80 | py36_tf19:
81 | PYTHON_VERSION: 3.6
82 | TF_VERSION: 1.9
83 | - job: 'Windows'
84 | timeoutInMinutes: 120 # use 0 set to time out to maximum
85 | pool:
86 | vmImage: 'vs2017-win2016'
87 | steps:
88 | - powershell: Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
89 | displayName: 'Add conda to PATH'
90 | - template: build_tools/azure/build_and_test.yml
91 | strategy:
92 | matrix:
93 | py36_tf19:
94 | PYTHON_VERSION: 3.6
95 | TF_VERSION: 1.9
96 | py36_tf115:
97 | PYTHON_VERSION: 3.6
98 | TF_VERSION: 1.15
99 | py37_tf21:
100 | PYTHON_VERSION: 3.7
101 | TF_VERSION: 2.1
102 | py37_tf23:
103 | PYTHON_VERSION: 3.7
104 | TF_VERSION: 2.3
105 | - job: 'macOS_latest'
106 | pool:
107 | vmImage: 'macOS-latest'
108 | steps:
109 | - bash: echo "##vso[task.prependpath]$CONDA/bin"
110 | displayName: 'Add conda to PATH'
111 | - bash: sudo chown -R $USER $CONDA
112 | displayName: 'Take ownership of conda installation'
113 | - template: build_tools/azure/build_and_test.yml
114 | strategy:
115 | matrix:
116 | py36_tf115:
117 | PYTHON_VERSION: 3.6
118 | TF_VERSION: 1.15
119 | - job: 'macOS_mojave'
120 | pool:
121 | vmImage: 'macOS-10.14'
122 | steps:
123 | - bash: echo "##vso[task.prependpath]$CONDA/bin"
124 | displayName: 'Add conda to PATH'
125 | - bash: sudo chown -R $USER $CONDA
126 | displayName: 'Take ownership of conda installation'
127 | - template: build_tools/azure/build_and_test.yml
128 | strategy:
129 | matrix:
130 | py36_tf19:
131 | PYTHON_VERSION: 3.6
132 | TF_VERSION: 1.9
133 |
134 | - stage: 'Deploy'
135 | dependsOn: 'Build'
136 | condition: and(succeeded('Build'), eq(variables['Build.SourceBranch'], 'refs/heads/master'), startsWith(variables['Build.SourceBranch'], 'refs/tags/'))
137 | jobs:
138 | - job: 'deploy_to_pypi'
139 | pool:
140 | vmImage: 'ubuntu-latest'
141 | steps:
142 | - task: DownloadPipelineArtifact@2
143 | displayName: 'Collect wheels'
144 | inputs:
145 | source: 'specific'
146 | project: 'sktime-dl'
147 | pipeline: 'alan-turing-institute.sktime-dl'
148 | runVersion: 'latestFromBranch'
149 | runBranch: 'refs/heads/master'
150 | tags: '^v[0-9]\.[0-9]\.[0-9]$'
151 | patterns: 'wheels_*/*.whl'
152 | path: 'download/'
153 | - script: |
154 | mkdir dist
155 | cp download/wheels_*/*.whl dist/
156 | ls -lh dist/
157 | displayName: 'Select and list wheels'
158 | - script: |
159 | pip install --upgrade twine
160 | displayName: 'Install twine'
161 | # - task: TwineAuthenticate@1
162 | # displayName: 'Twine Authenticate'
163 | # inputs:
164 | # # configured in https://dev.azure.com///_settings/adminservices
165 | # pythonUploadServiceConnection: PyPI
166 | # - script: |
167 | # ls -lh dist/*.whl
168 | # # twine upload -r pypi --config-file $(PYPIRC_PATH) --skip-existing --verbose dist/*.whl
169 | # displayName: 'Upload wheels to PyPI'
170 |
--------------------------------------------------------------------------------
/build_tools/azure/build_and_test.yml:
--------------------------------------------------------------------------------
1 | # adapted from:
2 | # - https://github.com/scikit-hep/azure-wheel-helpers/blob/master/azure-publish-dist.yml
3 |
4 | steps:
5 | - bash: build_tools/build.sh
6 | displayName: 'Build'
7 | - bash: build_tools/test.sh
8 | displayName: 'Test'
9 | - template: publish.yml
10 |
11 |
12 |
--------------------------------------------------------------------------------
/build_tools/azure/publish.yml:
--------------------------------------------------------------------------------
1 | # adapted from:
2 | # - https://github.com/scikit-hep/azure-wheel-helpers/blob/master/azure-publish-dist.yml
3 |
4 | steps:
5 | - script: |
6 | ls -lh dist/
7 | displayName: 'List wheels'
8 |
9 | # publish wheels on azure: https://dev.azure.com/mloning/sktime-dl/_build
10 | - task: PublishPipelineArtifact@0
11 | condition: succeeded()
12 | displayName: 'Publish wheels'
13 | inputs:
14 | artifactName: 'wheels_$(Agent.OS)_$(Agent.JobName)_$(PYTHON_VERSION)'
15 | targetPath: 'dist'
16 |
17 | # publish test results on azure: https://dev.azure.com/mloning/sktime-dl/_build
18 | - task: PublishTestResults@2
19 | condition: succeededOrFailed()
20 | displayName: 'Publish test results'
21 | inputs:
22 | testResultsFiles: '**/test-*.xml'
23 | testRunTitle: 'pytest_$(Agent.OS)_$(Agent.JobName)_$(PYTHON_VERSION)'
24 |
25 | - task: PublishCodeCoverageResults@1
26 | condition: succeededOrFailed()
27 | displayName: 'Publish coverage results'
28 | inputs:
29 | codeCoverageTool: Cobertura
30 | summaryFileLocation: '$(System.DefaultWorkingDirectory)/**/coverage.xml'
31 | reportDirectory: '$(System.DefaultWorkingDirectory)/**/htmlcov'
32 |
--------------------------------------------------------------------------------
/build_tools/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # adapted from https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/travis/install.sh
4 |
5 | set -e
6 |
7 | echo "Setting up conda env ..."
8 | echo "Python version: " "$PYTHON_VERSION"
9 | echo "TF version: " "$TF_VERSION"
10 |
11 | # Deactivate the any previously set virtual environment and setup a
12 | # conda-based environment instead
13 | deactivate || :
14 |
15 | # Configure conda
16 | conda config --set always_yes true
17 | conda update --quiet conda
18 |
19 | # Set up test environment
20 | conda create --name testenv python="$PYTHON_VERSION"
21 |
22 | # Activate environment
23 | source activate testenv
24 |
25 | # Install tensorflow via pip, tests fail when installed via conda
26 | pip install tensorflow=="$TF_VERSION"
27 |
28 | # Install requirements from inside conda environment
29 | pip install cython # only needed until we provide sktime wheels
30 | pip install -r "$REQUIREMENTS"
31 |
32 | # Build sktime-dl
33 | # invokes build_ext -i to compile files
34 | # builds universal wheel, as specified in setup.cfg
35 | python setup.py bdist_wheel
36 |
37 | # Install from built wheels
38 | pip install --pre --no-index --no-deps --find-links dist/ sktime-dl
39 |
40 | # now need to install keras-contrib for tf.keras instead of standalone keras
41 | # not needed for the tf_version 2.1 env, but does not hurt either. investigate
42 | # conditional installation
43 | echo "Installing keras-contrib ..."
44 | git clone https://www.github.com/keras-team/keras-contrib.git
45 | cd keras-contrib/
46 | python convert_to_tf_keras.py
47 | USE_TF_KERAS=1 python setup.py install
48 | cd ..
49 |
50 | set +e
51 |
--------------------------------------------------------------------------------
/build_tools/requirements.txt:
--------------------------------------------------------------------------------
1 | sktime==0.6.1
2 | tensorflow==2.5.1
3 | tensorflow_addons==0.9.*
4 | keras==2.5.0rc0
5 | h5py>=3.1.0
6 | pytest
7 | pytest-cov
8 | flaky
9 | coveralls
10 | numpy==1.19.2
11 | matplotlib
12 | seaborn
13 | keras-self-attention
14 |
--------------------------------------------------------------------------------
/build_tools/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # adapted from https://github.com/scikit-learn/scikit-learn/blob/master/build_tools/travis/test_script.sh
4 |
5 | # Exit the script if any statement returns a non-true return value
6 | set -e
7 |
8 | # Activate conda environment
9 | deactivate || :
10 | source activate testenv
11 |
12 | # Print test environment
13 | conda list
14 |
15 | # Get into a temp directory to run test from the installed scikit-learn and
16 | # check if we do not leave artifacts
17 | mkdir -p "$TEST_DIR"
18 |
19 | # We need to copy the setup.cfg for the pytest settings
20 | cp setup.cfg "$TEST_DIR"
21 |
22 | # Change directory
23 | cd "$TEST_DIR"
24 |
25 | # Define test command
26 | TEST_CMD="pytest"
27 | TEST_ARGS=(--verbose --showlocals "--durations=20" --cov-report html
28 | --cov-report xml "--junitxml=junit/test-results.xml" "--cov=sktime_dl"
29 | --pyargs)
30 |
31 | if [[ "$TEST_SLOW" == "false" ]]; then
32 | TEST_ARGS+=("-m=not slow")
33 | fi
34 |
35 | # Print command before executing
36 | set -o xtrace
37 |
38 | # Run tests
39 | "$TEST_CMD" "${TEST_ARGS[@]}" "../sktime_dl/"
40 |
41 | set +o xtrace
42 | set +e
43 |
--------------------------------------------------------------------------------
/maint_tools/linting.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # author: Markus Löning
4 | # code quality check using flake8
5 |
6 | set -e -x
7 | set -o pipefail
8 |
9 | if ! flake8 --verbose --filename=*.py sktime_dl/; then
10 | echo 'Linting failed.'
11 | # uncomment to make CI fail when linting fails
12 | exit 1
13 | fi
14 |
--------------------------------------------------------------------------------
/maint_tools/requirements.txt:
--------------------------------------------------------------------------------
1 | colorama
2 | twine
3 | flake8
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [aliases]
2 | test = pytest
3 |
4 | [tool:pytest]
5 | # ignore certain folders and pytest warnings
6 | addopts =
7 | --ignore build_tools
8 | --ignore maint_tools
9 | --ignore examples
10 | # --disable-pytest-warnings
11 |
12 | filterwarnings =
13 | # Warnings that we raise:
14 | ignore::UserWarning
15 |
16 | [flake8]
17 | # Default flake8 3.5 ignored flags
18 | ignore=E121,E123,E126,E226,E24,E704,W503,W504
19 | exclude=sktime_dl/experimental/*
20 |
21 | [metadata]
22 | description-file = README.md
23 |
24 | [bdist_wheel]
25 | universal=1
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | """Install script for sktime-dl"""
3 |
4 | import codecs
5 | import os
6 | import platform
7 | import re
8 | import sys
9 |
10 | from pkg_resources import Requirement
11 | from pkg_resources import working_set
12 | from setuptools import find_packages
13 | from setuptools import setup
14 |
15 | # raise early warning for incompatible Python versions
16 | if sys.version_info < (3, 6) or sys.version_info >= (3, 8):
17 | raise RuntimeError(
18 | "sktime-dl requires Python 3.6 or 3.7 (only with tensorflow>=1.13.1). "
19 | "The current Python version is %s installed in %s."
20 | % (platform.python_version(), sys.executable))
21 |
22 | HERE = os.path.abspath(os.path.dirname(__file__))
23 |
24 |
25 | def read(*parts):
26 | # intentionally *not* adding an encoding option to open, See:
27 | # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
28 | with codecs.open(os.path.join(HERE, *parts), 'r') as fp:
29 | return fp.read()
30 |
31 |
32 | def find_version(*file_paths):
33 | version_file = read(*file_paths)
34 | version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
35 | version_file, re.M)
36 | if version_match:
37 | return version_match.group(1)
38 | else:
39 | raise RuntimeError("Unable to find version string.")
40 |
41 |
42 | def find_install_requires():
43 | """Return a list of dependencies and non-pypi dependency links.
44 |
45 | A supported version of tensorflow and/or tensorflow-gpu is required. If not
46 | found, then tensorflow is added to the install_requires list.
47 |
48 | Depending on the version of tensorflow found or installed, either
49 | keras-contrib or tensorflow-addons needs to be installed as well.
50 | """
51 |
52 | install_requires = [
53 | 'sktime==0.7.0',
54 | 'h5py>=3.1.0',
55 | 'matplotlib',
56 | 'seaborn',
57 | 'keras-self-attention',
58 | 'keras==2.5.0.rc0'
59 | ]
60 |
61 | # tensorflow version requirements
62 | # by default, make sure anything already installed is above 1.8.0,
63 | # or if installing from new get the most recent stable (i.e. not
64 | # nightly) version
65 | MINIMUM_TF_VERSION = '2.5.0'
66 | tf_requires = 'tensorflow==' + MINIMUM_TF_VERSION
67 |
68 | has_tf_gpu = False
69 | has_tf = False
70 | tf = working_set.find(Requirement.parse('tensorflow'))
71 | tf_gpu = working_set.find(Requirement.parse('tensorflow-gpu'))
72 |
73 | if tf is not None:
74 | has_tf = True
75 | tf_version = tf._version
76 |
77 | if tf_gpu is not None:
78 | has_tf_gpu = True
79 | tf_gpu_version = tf_gpu._version
80 |
81 | if has_tf_gpu and not has_tf:
82 | # have -gpu only (1.x), make sure it's above 1.9.0
83 | # Specify tensorflow-gpu version if it is already installed.
84 | tf_requires = 'tensorflow-gpu>=' + MINIMUM_TF_VERSION
85 |
86 | install_requires.append(tf_requires)
87 |
88 | # tensorflow itself handled, now find out what add-on package to use
89 | if (not has_tf and not has_tf_gpu) or (has_tf and tf_version >= '2.1.0'):
90 | # tensorflow will be up-to-date enough to use most recent
91 | # tensorflow-addons, the replacement for keras-contrib
92 | install_requires.append('tensorflow-addons')
93 | else:
94 | # fall back to keras-contrib, not on pypi so need to install it
95 | # separately not printing. TODO
96 | print(
97 | 'Existing version of tensorflow older than version 2.1.0 '
98 | 'detected. You shall need to install keras-contrib (for tf.keras) '
99 | 'in order to use all the features of sktime-dl. '
100 | 'See https://github.com/keras-team/keras-contrib#install-keras_contrib-for-tensorflowkeras')
101 |
102 | return install_requires
103 |
104 |
105 | DISTNAME = 'sktime-dl' # package name is sktime-dl, to have a valid module path, module name is sktime_dl
106 | DESCRIPTION = 'Deep learning extension package for sktime, a scikit-learn ' \
107 | 'compatible toolbox for learning with time series data'
108 | with codecs.open('README.rst', encoding='utf-8-sig') as f:
109 | LONG_DESCRIPTION = f.read()
110 | MAINTAINER = 'F. Király'
111 | MAINTAINER_EMAIL = 'f.kiraly@ucl.ac.uk'
112 | URL = 'https://github.com/sktime/sktime-dl'
113 | LICENSE = 'BSD-3-Clause'
114 | DOWNLOAD_URL = 'https://pypi.org/project/sktime-dl/#files'
115 | PROJECT_URLS = {
116 | 'Issue Tracker': 'https://github.com/sktime/sktime-dl/issues',
117 | 'Documentation': 'https://sktime.github.io/sktime-dl/',
118 | 'Source Code': 'https://github.com/sktime/sktime-dl'
119 | }
120 | VERSION = find_version('sktime_dl', '__init__.py')
121 | INSTALL_REQUIRES = find_install_requires()
122 | CLASSIFIERS = ['Intended Audience :: Science/Research',
123 | 'Intended Audience :: Developers',
124 | 'License :: OSI Approved',
125 | 'Programming Language :: Python',
126 | 'Topic :: Software Development',
127 | 'Topic :: Scientific/Engineering',
128 | 'Operating System :: Microsoft :: Windows',
129 | 'Operating System :: POSIX',
130 | 'Operating System :: Unix',
131 | 'Operating System :: MacOS',
132 | 'Programming Language :: Python :: 3.6',
133 | 'Programming Language :: Python :: 3.7']
134 |
135 | EXTRAS_REQUIRE = {
136 | 'tests': [
137 | 'pytest',
138 | 'pytest-cov'
139 | 'flaky'],
140 | 'docs': [
141 | 'sphinx',
142 | 'sphinx-gallery',
143 | 'sphinx_rtd_theme',
144 | 'numpydoc',
145 | 'matplotlib'
146 | ]
147 | }
148 |
149 | setup(name=DISTNAME,
150 | maintainer=MAINTAINER,
151 | maintainer_email=MAINTAINER_EMAIL,
152 | description=DESCRIPTION,
153 | license=LICENSE,
154 | url=URL,
155 | version=VERSION,
156 | download_url=DOWNLOAD_URL,
157 | long_description=LONG_DESCRIPTION,
158 | zip_safe=False, # the package can run out of an .egg file
159 | classifiers=CLASSIFIERS,
160 | packages=find_packages(),
161 | include_package_data=True,
162 | install_requires=INSTALL_REQUIRES,
163 | extras_require=EXTRAS_REQUIRE,
164 | )
165 |
--------------------------------------------------------------------------------
/sktime_dl/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | source = sktime_dl/*
3 | omit = sktime_dl/classifiers/deeplearning/tests/*
4 | omit = sktime_dl/meta/tests/*
5 |
6 | [report]
7 | include = sktime_dl/*
8 | omit = sktime_dl/classifiers/deeplearning/tests/*
9 | omit = sktime_dl/meta/tests/*
10 |
--------------------------------------------------------------------------------
/sktime_dl/.dockerignore:
--------------------------------------------------------------------------------
1 | **/*.pyc
2 |
--------------------------------------------------------------------------------
/sktime_dl/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.2.0"
2 |
--------------------------------------------------------------------------------
/sktime_dl/classification/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "CNNClassifier",
3 | "FCNClassifier",
4 | "InceptionTimeClassifier",
5 | "LSTMFCNClassifier",
6 | "CNTCClassifier",
7 | "EncoderClassifier",
8 | "MCDCNNClassifier",
9 | "MCNNClassifier",
10 | "MLPClassifier",
11 | "ResNetClassifier",
12 | "TLENETClassifier",
13 | "TWIESNClassifier",
14 | "TapNetClassifier",
15 | "MACNNClassifier"
16 | ]
17 |
18 | from sktime_dl.classification._cnn import CNNClassifier
19 | from sktime_dl.classification._fcn import FCNClassifier
20 | from sktime_dl.classification._inceptiontime import InceptionTimeClassifier
21 | from sktime_dl.classification._lstmfcn import LSTMFCNClassifier
22 | from sktime_dl.classification._cntc import CNTCClassifier
23 | from sktime_dl.classification._encoder import EncoderClassifier
24 | from sktime_dl.classification._mcdcnn import MCDCNNClassifier
25 | from sktime_dl.classification._mcnn import MCNNClassifier
26 | from sktime_dl.classification._mlp import MLPClassifier
27 | from sktime_dl.classification._resnet import ResNetClassifier
28 | from sktime_dl.classification._tlenet import TLENETClassifier
29 | from sktime_dl.classification._twiesn import TWIESNClassifier
30 | from sktime_dl.classification._tapnet import TapNetClassifier
31 | from sktime_dl.classification._macnn import MACNNClassifier
32 |
--------------------------------------------------------------------------------
/sktime_dl/classification/_classifier.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Base class for the Keras neural network classifiers adapted from Fawaz et. al
4 | # https://github.com/hfawaz/dl-4-tsc
5 | """
6 | __author__ = "James Large, Aaron Bostrom"
7 | __all__ = ["BaseDeepClassifier"]
8 |
9 | import numpy as np
10 | from sklearn.preprocessing import LabelEncoder
11 | from sklearn.preprocessing import OneHotEncoder
12 | # from sktime.classifiers.base import BaseClassifier
13 | from sktime.classification.base import BaseClassifier
14 |
15 | from sktime_dl.utils import check_and_clean_data
16 | from sktime_dl.utils import check_is_fitted
17 | from sktime_dl.utils import save_trained_model
18 |
19 |
20 | class BaseDeepClassifier(BaseClassifier):
21 | def __init__(self, model_name=None, model_save_directory=None):
22 | self.classes_ = None
23 | self.nb_classes = None
24 | self.model_save_directory = model_save_directory
25 | self.model = None
26 | self.model_name = model_name
27 |
28 | def build_model(self, input_shape, nb_classes, **kwargs):
29 | """
30 | Construct a compiled, un-trained, keras model that is ready for
31 | training
32 |
33 | Parameters
34 | ----------
35 | input_shape : tuple
36 | The shape of the data fed into the input layer
37 | nb_classes: int
38 | The number of classes, which shall become the size of the output
39 | layer
40 | Returns
41 | -------
42 | output : a compiled Keras Model
43 | """
44 | raise NotImplementedError("this is an abstract method")
45 |
46 | def predict_proba(self, X, input_checks=True, **kwargs):
47 | """
48 | Find probability estimates for each class for all cases in X.
49 | Parameters
50 | ----------
51 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
52 | shape = (n_instances, series_length, n_dimensions)
53 | The training input samples. If a 2D array-like is passed,
54 | n_dimensions is assumed to be 1.
55 | input_checks: boolean
56 | whether to check the X parameter
57 | Returns
58 | -------
59 | output : array of shape = [n_instances, n_classes] of probabilities
60 | """
61 | check_is_fitted(self)
62 |
63 | X = check_and_clean_data(X, input_checks=input_checks)
64 |
65 | probs = self.model.predict(X, batch_size=40,**kwargs)
66 |
67 | # check if binary classification
68 | if probs.shape[1] == 1:
69 | # first column is probability of class 0 and second is of class 1
70 | probs = np.hstack([1 - probs, probs])
71 |
72 | return probs
73 |
74 | def save_trained_model(self):
75 | save_trained_model(
76 | self.model, self.model_save_directory, self.model_name
77 | )
78 |
79 | def convert_y(self, y, label_encoder=None, onehot_encoder=None):
80 | if (label_encoder is None) and (onehot_encoder is None):
81 | # make the encoders and store in self
82 | self.label_encoder = LabelEncoder()
83 | self.onehot_encoder = OneHotEncoder(sparse=False,
84 | categories="auto")
85 | # categories='auto' to get rid of FutureWarning
86 |
87 | y = self.label_encoder.fit_transform(y)
88 | self.classes_ = self.label_encoder.classes_
89 | self.nb_classes = len(self.classes_)
90 |
91 | y = y.reshape(len(y), 1)
92 | y = self.onehot_encoder.fit_transform(y)
93 | else:
94 | # encoders given, just transform using those. used for e.g.
95 | # validation data, where the train data has already been converted
96 | y = label_encoder.fit_transform(y)
97 | y = y.reshape(len(y), 1)
98 | y = onehot_encoder.fit_transform(y)
99 |
100 | return y
101 |
--------------------------------------------------------------------------------
/sktime_dl/classification/_cnn.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Time Convolutional Neural Network (CNN) for classification"""
3 |
4 | __author__ = "James Large"
5 | __all__ = ["CNNClassifier"]
6 |
7 | from sktime_dl.classification._classifier import BaseDeepClassifier
8 | from sktime_dl.networks._cnn import CNNNetwork
9 | from sktime_dl.utils import check_and_clean_data, \
10 | check_and_clean_validation_data
11 | from sklearn.utils import check_random_state
12 | from tensorflow import keras
13 |
14 |
15 | class CNNClassifier(BaseDeepClassifier, CNNNetwork):
16 | """Time Convolutional Neural Network (CNN).
17 |
18 | Parameters
19 | ----------
20 | nb_epochs: int, the number of epochs to train the model
21 | batch_size: int, the number of samples per gradient update.
22 | kernel_size: int, specifying the length of the 1D convolution
23 | window
24 | avg_pool_size: int, size of the average pooling windows
25 | nb_conv_layers: int, the number of convolutional plus average
26 | pooling layers
27 | filter_sizes: int, array of shape = (nb_conv_layers)
28 | callbacks: list of tf.keras.callbacks.Callback objects
29 | random_state: int, or sklearn Random.state
30 | verbose: boolean, whether to output extra information
31 | model_name: string, the name of this model for printing and
32 | file writing purposes
33 | model_save_directory: string, if not None; location to save
34 | the trained keras model in hdf5 format
35 |
36 | Notes
37 | -----
38 | ..[1] Zhao et. al, Convolutional neural networks for
39 | time series classification, Journal of
40 | Systems Engineering and Electronics, 28(1):2017.
41 |
42 | Adapted from the implementation from Fawaz et. al
43 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/cnn.py
44 | """
45 |
46 | def __init__(
47 | self,
48 | nb_epochs=2000,
49 | batch_size=16,
50 | kernel_size=7,
51 | avg_pool_size=3,
52 | nb_conv_layers=2,
53 | filter_sizes=[6, 12],
54 | callbacks=None,
55 | random_state=0,
56 | verbose=False,
57 | model_name="cnn",
58 | model_save_directory=None,
59 | ):
60 | super(CNNClassifier, self).__init__(
61 | model_save_directory=model_save_directory,
62 | model_name=model_name)
63 | self.filter_sizes = filter_sizes
64 | self.nb_conv_layers = nb_conv_layers
65 | self.avg_pool_size = avg_pool_size
66 | self.random_state = random_state
67 | self.kernel_size = kernel_size
68 | self.verbose = verbose
69 | self.callbacks = callbacks
70 | self.nb_epochs = nb_epochs
71 | self.batch_size = batch_size
72 |
73 | self._is_fitted = False
74 |
75 | def build_model(self, input_shape, nb_classes, **kwargs):
76 | """
77 | Construct a compiled, un-trained, keras model that is ready for
78 | training
79 |
80 | Parameters
81 | ----------
82 | input_shape : tuple
83 | The shape of the data fed into the input layer
84 | nb_classes: int
85 | The number of classes, which shall become the size of the output
86 | layer
87 |
88 | Returns
89 | -------
90 | output : a compiled Keras Model
91 | """
92 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
93 |
94 | output_layer = keras.layers.Dense(
95 | units=nb_classes, activation="sigmoid"
96 | )(output_layer)
97 |
98 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
99 | model.compile(
100 | loss="mean_squared_error",
101 | optimizer=keras.optimizers.Adam(),
102 | metrics=["accuracy"],
103 | )
104 |
105 | return model
106 |
107 | def fit(self, X, y, input_checks=True, validation_X=None,
108 | validation_y=None, **kwargs):
109 | """
110 | Fit the classifier on the training set (X, y)
111 |
112 | Parameters
113 | ----------
114 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
115 | shape = (n_instances, series_length, n_dimensions)
116 | The training input samples. If a 2D array-like is passed,
117 | n_dimensions is assumed to be 1.
118 | y : array-like, shape = [n_instances]
119 | The training data class labels.
120 | input_checks : boolean
121 | whether to check the X and y parameters
122 | validation_X : a nested pd.Dataframe, or array-like of shape =
123 | (n_instances, series_length, n_dimensions)
124 | The validation samples. If a 2D array-like is passed,
125 | n_dimensions is assumed to be 1.
126 | Unless strictly defined by the user via callbacks (such as
127 | EarlyStopping), the presence or state of the validation
128 | data does not alter training in any way. Predictions at each epoch
129 | are stored in the model's fit history.
130 | validation_y : array-like, shape = [n_instances]
131 | The validation class labels.
132 |
133 | Returns
134 | -------
135 | self : object
136 | """
137 | self.random_state = check_random_state(self.random_state)
138 |
139 | if self.callbacks is None:
140 | self.callbacks = []
141 |
142 | X = check_and_clean_data(X, y, input_checks=input_checks)
143 | y_onehot = self.convert_y(y)
144 |
145 | validation_data = \
146 | check_and_clean_validation_data(validation_X, validation_y,
147 | self.label_encoder,
148 | self.onehot_encoder)
149 |
150 | # ignore the number of instances, X.shape[0],
151 | # just want the shape of each instance
152 | self.input_shape = X.shape[1:]
153 |
154 | self.model = self.build_model(self.input_shape, self.nb_classes)
155 |
156 | if self.verbose:
157 | self.model.summary()
158 |
159 | self.history = self.model.fit(
160 | X,
161 | y_onehot,
162 | batch_size=self.batch_size,
163 | epochs=self.nb_epochs,
164 | verbose=self.verbose,
165 | callbacks=self.callbacks,
166 | validation_data=validation_data,
167 | )
168 |
169 | self._is_fitted = True
170 | self.save_trained_model()
171 |
172 | return self
173 |
--------------------------------------------------------------------------------
/sktime_dl/classification/_encoder.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large"
2 |
3 | from sktime_dl.classification._classifier import BaseDeepClassifier
4 | from sktime_dl.networks._encoder import EncoderNetwork
5 | from sktime_dl.utils import check_and_clean_data, \
6 | check_and_clean_validation_data
7 | from tensorflow import keras
8 | from sklearn.utils import check_random_state
9 |
10 |
11 | class EncoderClassifier(BaseDeepClassifier, EncoderNetwork):
12 | """Encoder
13 |
14 | Adapted from the implementation from Fawaz et. al
15 |
16 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/encoder.py
17 |
18 | Network originally defined in:
19 |
20 | @article{serra2018towards,
21 | title={Towards a universal neural network encoder for time series},
22 | author={Serrà, J and Pascual, S and Karatzoglou, A},
23 | journal={Artif Intell Res Dev Curr Chall New Trends Appl},
24 | volume={308},
25 | pages={120},
26 | year={2018}
27 | }
28 |
29 | :param nb_epochs: int, the number of epochs to train the model
30 | :param batch_size: int, specifying the length of the 1D convolution
31 | window
32 | :param callbacks: list of tf.keras.callbacks.Callback objects
33 | :param random_state: int, seed to any needed random actions
34 | :param verbose: boolean, whether to output extra information
35 | :param model_name: string, the name of this model for printing and
36 | file writing purposes
37 | :param model_save_directory: string, if not None; location to save
38 | the trained keras model in hdf5 format
39 | """
40 |
41 | def __init__(
42 | self,
43 | nb_epochs=100,
44 | batch_size=12,
45 | callbacks=None,
46 | random_state=0,
47 | verbose=False,
48 | model_name="encoder",
49 | model_save_directory=None,
50 | ):
51 | super(EncoderClassifier, self).__init__(
52 | model_name=model_name, model_save_directory=model_save_directory
53 | )
54 |
55 | self.nb_epochs = nb_epochs
56 | self.batch_size = batch_size
57 |
58 | self.callbacks = callbacks
59 | self.random_state = random_state
60 | self.verbose = verbose
61 |
62 | self._is_fitted = False
63 |
64 | def build_model(self, input_shape, nb_classes, **kwargs):
65 | """
66 | Construct a compiled, un-trained, keras model that is ready for
67 | training
68 | ----------
69 | input_shape : tuple
70 | The shape of the data fed into the input layer
71 | nb_classes: int
72 | The number of classes, which shall become the size of the output
73 | layer
74 | Returns
75 | -------
76 | output : a compiled Keras Model
77 | """
78 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
79 | output_layer = keras.layers.Dense(nb_classes, activation="softmax")(
80 | output_layer
81 | )
82 |
83 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
84 |
85 | model.compile(
86 | loss="categorical_crossentropy",
87 | optimizer=keras.optimizers.Adam(0.00001),
88 | metrics=["accuracy"],
89 | )
90 |
91 | return model
92 |
93 | def fit(self, X, y, input_checks=True, validation_X=None,
94 | validation_y=None, **kwargs):
95 | """
96 | Fit the classifier on the training set (X, y)
97 | ----------
98 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
99 | shape = (n_instances, series_length, n_dimensions)
100 | The training input samples. If a 2D array-like is passed,
101 | n_dimensions is assumed to be 1.
102 | y : array-like, shape = [n_instances]
103 | The training data class labels.
104 | input_checks : boolean
105 | whether to check the X and y parameters
106 | validation_X : a nested pd.Dataframe, or array-like of shape =
107 | (n_instances, series_length, n_dimensions)
108 | The validation samples. If a 2D array-like is passed,
109 | n_dimensions is assumed to be 1.
110 | Unless strictly defined by the user via callbacks (such as
111 | EarlyStopping), the presence or state of the validation
112 | data does not alter training in any way. Predictions at each epoch
113 | are stored in the model's fit history.
114 | validation_y : array-like, shape = [n_instances]
115 | The validation class labels.
116 | Returns
117 | -------
118 | self : object
119 | """
120 | self.random_state = check_random_state(self.random_state)
121 |
122 | if self.callbacks is None:
123 | self.callbacks = []
124 |
125 | X = check_and_clean_data(X, y, input_checks=input_checks)
126 | y_onehot = self.convert_y(y)
127 |
128 | validation_data = \
129 | check_and_clean_validation_data(validation_X, validation_y,
130 | self.label_encoder,
131 | self.onehot_encoder)
132 |
133 | # ignore the number of instances, X.shape[0],
134 | # just want the shape of each instance
135 | self.input_shape = X.shape[1:]
136 |
137 | self.model = self.build_model(self.input_shape, self.nb_classes)
138 |
139 | if self.verbose:
140 | self.model.summary()
141 |
142 | self.history = self.model.fit(
143 | X,
144 | y_onehot,
145 | batch_size=self.batch_size,
146 | epochs=self.nb_epochs,
147 | verbose=self.verbose,
148 | callbacks=self.callbacks,
149 | validation_data=validation_data,
150 | )
151 |
152 | self.save_trained_model()
153 | self._is_fitted = True
154 |
155 | return self
156 |
--------------------------------------------------------------------------------
/sktime_dl/classification/_fcn.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | __author__ = "James Large"
3 | __all__ = ["FCNClassifier"]
4 |
5 | from tensorflow import keras
6 |
7 | from sktime_dl.classification._classifier import BaseDeepClassifier
8 | from sktime_dl.networks._fcn import FCNNetwork
9 | from sktime_dl.utils import check_and_clean_data, \
10 | check_and_clean_validation_data
11 | from sklearn.utils import check_random_state
12 |
13 |
14 | class FCNClassifier(BaseDeepClassifier, FCNNetwork):
15 | """Fully convolutional neural network (FCN).
16 |
17 | Parameters
18 | ----------
19 | nb_epochs: int, the number of epochs to train the model
20 | batch_size: int, specifying the length of the 1D convolution
21 | window
22 | callbacks: list of tf.keras.callbacks.Callback objects
23 | random_state: int, seed to any needed random actions
24 | verbose: boolean, whether to output extra information
25 | model_name: string, the name of this model for printing and
26 | file writing purposes
27 | model_save_directory: string, if not None; location to save
28 | the trained keras model in hdf5 format
29 |
30 |
31 | Notes
32 | -----
33 | ..[1] Z. Wang et. al, Time series classification from scratch with deep neural
34 | networks: A strong baseline, IJCNN, 2017
35 |
36 | Adapted from the implementation from Fawaz et. al`
37 |
38 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/fcn.py
39 | """
40 |
41 | def __init__(
42 | self,
43 | nb_epochs=2000,
44 | batch_size=16,
45 | callbacks=None,
46 | random_state=0,
47 | verbose=False,
48 | model_name="fcn",
49 | model_save_directory=None,
50 | ):
51 | super(FCNClassifier, self).__init__(
52 | model_name=model_name, model_save_directory=model_save_directory
53 | )
54 |
55 | self.nb_epochs = nb_epochs
56 | self.batch_size = batch_size
57 |
58 | self.callbacks = callbacks
59 | self.random_state = random_state
60 | self.verbose = verbose
61 |
62 | self._is_fitted = False
63 |
64 | def build_model(self, input_shape, nb_classes, **kwargs):
65 | """
66 | Construct a compiled, un-trained, keras model that is ready for training
67 |
68 | Parameters
69 | ----------
70 | input_shape : tuple
71 | The shape of the data fed into the input layer
72 | nb_classes: int
73 | The number of classes, which shall become the size of the output
74 | layer
75 |
76 | Returns
77 | -------
78 | output : a compiled Keras Model
79 | """
80 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
81 |
82 | output_layer = keras.layers.Dense(nb_classes, activation="softmax")(
83 | output_layer
84 | )
85 |
86 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
87 |
88 | model.compile(
89 | loss="categorical_crossentropy",
90 | optimizer=keras.optimizers.Adam(),
91 | metrics=["accuracy"],
92 | )
93 |
94 | # if user hasn't provided a custom ReduceLROnPlateau via
95 | # init already, add the default from literature
96 | if self.callbacks is None:
97 | self.callbacks = []
98 |
99 | if not any(
100 | isinstance(callback, keras.callbacks.ReduceLROnPlateau)
101 | for callback in self.callbacks
102 | ):
103 | reduce_lr = keras.callbacks.ReduceLROnPlateau(
104 | monitor="loss", factor=0.5, patience=50, min_lr=0.0001
105 | )
106 | self.callbacks.append(reduce_lr)
107 |
108 | return model
109 |
110 | def fit(self, X, y, input_checks=True, validation_X=None,
111 | validation_y=None, **kwargs):
112 | """
113 | Fit the classifier on the training set (X, y)
114 |
115 | Parameters
116 | ----------
117 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
118 | shape = (n_instances, series_length, n_dimensions)
119 | The training input samples. If a 2D array-like is passed,
120 | n_dimensions is assumed to be 1.
121 | y : array-like, shape = [n_instances]
122 | The training data class labels.
123 | input_checks : boolean
124 | whether to check the X and y parameters
125 | validation_X : a nested pd.Dataframe, or array-like of shape =
126 | (n_instances, series_length, n_dimensions)
127 | The validation samples. If a 2D array-like is passed,
128 | n_dimensions is assumed to be 1.
129 | Unless strictly defined by the user via callbacks (such as
130 | EarlyStopping), the presence or state of the validation
131 | data does not alter training in any way. Predictions at each epoch
132 | are stored in the model's fit history.
133 | validation_y : array-like, shape = [n_instances]
134 | The validation class labels.
135 |
136 | Returns
137 | -------
138 | self : object
139 | """
140 | self.random_state = check_random_state(self.random_state)
141 |
142 | X = check_and_clean_data(X, y, input_checks=input_checks)
143 | y_onehot = self.convert_y(y)
144 |
145 | validation_data = \
146 | check_and_clean_validation_data(validation_X, validation_y,
147 | self.label_encoder,
148 | self.onehot_encoder)
149 |
150 | # ignore the number of instances, X.shape[0],
151 | # just want the shape of each instance
152 | self.input_shape = X.shape[1:]
153 |
154 | self.batch_size = int(min(X.shape[0] / 10, self.batch_size))
155 |
156 | self.model = self.build_model(self.input_shape, self.nb_classes)
157 |
158 | if self.verbose:
159 | self.model.summary()
160 |
161 | self.history = self.model.fit(
162 | X,
163 | y_onehot,
164 | batch_size=self.batch_size,
165 | epochs=self.nb_epochs,
166 | verbose=self.verbose,
167 | callbacks=self.callbacks,
168 | validation_data=validation_data,
169 | )
170 |
171 | self.save_trained_model()
172 | self._is_fitted = True
173 |
174 | return self
175 |
--------------------------------------------------------------------------------
/sktime_dl/classification/_mlp.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.classification._classifier import BaseDeepClassifier
6 | from sktime_dl.networks._mlp import MLPNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 | from sklearn.utils import check_random_state
10 |
11 |
12 | class MLPClassifier(BaseDeepClassifier, MLPNetwork):
13 | """Multi Layer Perceptron (MLP).
14 |
15 | Adapted from the implementation from Fawaz et. al
16 |
17 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mlp.py
18 |
19 | Network originally defined in:
20 |
21 | @inproceedings{wang2017time, title={Time series classification from
22 | scratch with deep neural networks: A strong baseline}, author={Wang,
23 | Zhiguang and Yan, Weizhong and Oates, Tim}, booktitle={2017
24 | International joint conference on neural networks (IJCNN)}, pages={
25 | 1578--1585}, year={2017}, organization={IEEE} }
26 | """
27 |
28 | def __init__(self,
29 | nb_epochs=5000,
30 | batch_size=16,
31 | callbacks=None,
32 | random_state=0,
33 | verbose=False,
34 | model_name="mlp",
35 | model_save_directory=None):
36 | """
37 | :param nb_epochs: int, the number of epochs to train the model
38 | :param batch_size: int, specifying the length of the 1D convolution
39 | window
40 | :param callbacks: list of tf.keras.callbacks.Callback objects
41 | :param random_state: int, seed to any needed random actions
42 | :param verbose: boolean, whether to output extra information
43 | :param model_name: string, the name of this model for printing and
44 | file writing purposes
45 | :param model_save_directory: string, if not None; location to save
46 | the trained keras model in hdf5 format
47 | """
48 | super(MLPClassifier, self).__init__(
49 | model_save_directory=model_save_directory,
50 | model_name=model_name
51 | )
52 | self.nb_epochs = nb_epochs
53 | self.batch_size = batch_size
54 | self.callbacks = callbacks
55 | self.random_state = random_state
56 | self.verbose = verbose
57 |
58 | self._is_fitted = False
59 |
60 | def build_model(self, input_shape, nb_classes, **kwargs):
61 | """
62 | Construct a compiled, un-trained, keras model that is ready for
63 | training
64 | ----------
65 | input_shape : tuple
66 | The shape of the data fed into the input layer
67 | nb_classes: int
68 | The number of classes, which shall become the size of the output
69 | layer
70 | Returns
71 | -------
72 | output : a compiled Keras Model
73 | """
74 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
75 | output_layer = keras.layers.Dense(nb_classes, activation="softmax")(
76 | output_layer
77 | )
78 |
79 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
80 |
81 | model.compile(
82 | loss="categorical_crossentropy",
83 | optimizer=keras.optimizers.Adadelta(),
84 | metrics=["accuracy"],
85 | )
86 |
87 | # if user hasn't provided a custom ReduceLROnPlateau via init already,
88 | # add the default from literature
89 | if self.callbacks is None:
90 | self.callbacks = []
91 |
92 | if not any(
93 | isinstance(callback, keras.callbacks.ReduceLROnPlateau)
94 | for callback in self.callbacks
95 | ):
96 | reduce_lr = keras.callbacks.ReduceLROnPlateau(
97 | monitor="loss", factor=0.5, patience=50, min_lr=0.0001
98 | )
99 | self.callbacks.append(reduce_lr)
100 |
101 | return model
102 |
103 | def fit(self, X, y, input_checks=True, validation_X=None,
104 | validation_y=None, **kwargs):
105 | """
106 | Fit the classifier on the training set (X, y)
107 | ----------
108 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
109 | shape = (n_instances, series_length, n_dimensions)
110 | The training input samples. If a 2D array-like is passed,
111 | n_dimensions is assumed to be 1.
112 | y : array-like, shape = [n_instances]
113 | The training data class labels.
114 | input_checks : boolean
115 | whether to check the X and y parameters
116 | validation_X : a nested pd.Dataframe, or array-like of shape =
117 | (n_instances, series_length, n_dimensions)
118 | The validation samples. If a 2D array-like is passed,
119 | n_dimensions is assumed to be 1.
120 | Unless strictly defined by the user via callbacks (such as
121 | EarlyStopping), the presence or state of the validation
122 | data does not alter training in any way. Predictions at each epoch
123 | are stored in the model's fit history.
124 | validation_y : array-like, shape = [n_instances]
125 | The validation class labels.
126 | Returns
127 | -------
128 | self : object
129 | """
130 | self.random_state = check_random_state(self.random_state)
131 |
132 | X = check_and_clean_data(X, y, input_checks=input_checks)
133 | y_onehot = self.convert_y(y)
134 |
135 | validation_data = \
136 | check_and_clean_validation_data(validation_X, validation_y,
137 | self.label_encoder,
138 | self.onehot_encoder)
139 |
140 | # ignore the number of instances, X.shape[0], just want the shape of
141 | # each instance
142 | self.input_shape = X.shape[1:]
143 |
144 | self.batch_size = int(min(X.shape[0] / 10, self.batch_size))
145 |
146 | self.model = self.build_model(self.input_shape, self.nb_classes)
147 |
148 | if self.verbose:
149 | self.model.summary()
150 |
151 | self.history = self.model.fit(
152 | X,
153 | y_onehot,
154 | batch_size=self.batch_size,
155 | epochs=self.nb_epochs,
156 | verbose=self.verbose,
157 | callbacks=self.callbacks,
158 | validation_data=validation_data,
159 | )
160 |
161 | self.save_trained_model()
162 | self._is_fitted = True
163 |
164 | return self
165 |
--------------------------------------------------------------------------------
/sktime_dl/classification/_resnet.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.classification._classifier import BaseDeepClassifier
6 | from sktime_dl.networks._resnet import ResNetNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 | from sklearn.utils import check_random_state
10 |
11 |
12 | class ResNetClassifier(BaseDeepClassifier, ResNetNetwork):
13 | """Residual Network (ResNet).
14 |
15 | Adapted from the implementation from Fawaz et. al
16 |
17 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/resnet.py
18 |
19 | Network originally defined in:
20 |
21 | @inproceedings{wang2017time,
22 | title={Time series classification from scratch with deep neural networks:
23 | A strong baseline},
24 | author={Wang, Zhiguang and Yan, Weizhong and Oates, Tim},
25 | booktitle={2017 International joint conference on neural networks
26 | (IJCNN)},
27 | pages={1578--1585},
28 | year={2017},
29 | organization={IEEE}
30 | }
31 | """
32 |
33 | def __init__(self,
34 | nb_epochs=1500,
35 | batch_size=16,
36 | callbacks=None,
37 | random_state=0,
38 | verbose=False,
39 | model_name="resnet",
40 | model_save_directory=None):
41 | """
42 | :param nb_epochs: int, the number of epochs to train the model
43 | :param batch_size: int, specifying the length of the 1D convolution
44 | window
45 | :param callbacks: list of tf.keras.callbacks.Callback objects
46 | :param random_state: int, seed to any needed random actions
47 | :param verbose: boolean, whether to output extra information
48 | :param model_name: string, the name of this model for printing and
49 | file writing purposes
50 | :param model_save_directory: string, if not None; location to save
51 | the trained keras model in hdf5 format
52 | """
53 |
54 | super(ResNetClassifier, self).__init__(
55 | model_name=model_name, model_save_directory=model_save_directory
56 | )
57 |
58 | self.verbose = verbose
59 | self._is_fitted = False
60 |
61 | # calced in fit
62 | self.input_shape = None
63 | self.history = None
64 |
65 | self.nb_epochs = nb_epochs
66 | self.batch_size = batch_size
67 |
68 | self.callbacks = callbacks
69 | self.random_state = random_state
70 | self.verbose = verbose
71 |
72 | self._is_fitted = False
73 |
74 | def build_model(self, input_shape, nb_classes, **kwargs):
75 | """
76 | Construct a compiled, un-trained, keras model that is ready for
77 | training
78 | ----------
79 | input_shape : tuple
80 | The shape of the data fed into the input layer
81 | nb_classes: int
82 | The number of classes, which shall become the size of the output
83 | layer
84 | Returns
85 | -------
86 | output : a compiled Keras Model
87 | """
88 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
89 |
90 | output_layer = keras.layers.Dense(nb_classes, activation="softmax")(
91 | output_layer
92 | )
93 |
94 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
95 |
96 | model.compile(
97 | loss="categorical_crossentropy",
98 | optimizer=keras.optimizers.Adam(),
99 | metrics=["accuracy"],
100 | )
101 |
102 | # if user hasn't provided a custom ReduceLROnPlateau via init already,
103 | # add the default from literature
104 | if self.callbacks is None:
105 | self.callbacks = []
106 |
107 | if not any(
108 | isinstance(callback, keras.callbacks.ReduceLROnPlateau)
109 | for callback in self.callbacks
110 | ):
111 | reduce_lr = keras.callbacks.ReduceLROnPlateau(
112 | monitor="loss", factor=0.5, patience=50, min_lr=0.0001
113 | )
114 | self.callbacks.append(reduce_lr)
115 |
116 | return model
117 |
118 | def fit(self, X, y, input_checks=True, validation_X=None,
119 | validation_y=None, **kwargs):
120 | """
121 | Fit the classifier on the training set (X, y)
122 | ----------
123 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
124 | shape = (n_instances, series_length, n_dimensions)
125 | The training input samples. If a 2D array-like is passed,
126 | n_dimensions is assumed to be 1.
127 | y : array-like, shape = [n_instances]
128 | The training data class labels.
129 | input_checks : boolean
130 | whether to check the X and y parameters
131 | validation_X : a nested pd.Dataframe, or array-like of shape =
132 | (n_instances, series_length, n_dimensions)
133 | The validation samples. If a 2D array-like is passed,
134 | n_dimensions is assumed to be 1.
135 | Unless strictly defined by the user via callbacks (such as
136 | EarlyStopping), the presence or state of the validation
137 | data does not alter training in any way. Predictions at each epoch
138 | are stored in the model's fit history.
139 | validation_y : array-like, shape = [n_instances]
140 | The validation class labels.
141 | Returns
142 | -------
143 | self : object
144 | """
145 | self.random_state = check_random_state(self.random_state)
146 |
147 | X = check_and_clean_data(X, y, input_checks=input_checks)
148 | y_onehot = self.convert_y(y)
149 |
150 | validation_data = \
151 | check_and_clean_validation_data(validation_X, validation_y,
152 | self.label_encoder,
153 | self.onehot_encoder)
154 |
155 | # ignore the number of instances, X.shape[0],
156 | # just want the shape of each instance
157 | self.input_shape = X.shape[1:]
158 |
159 | self.batch_size = int(min(X.shape[0] / 10, self.batch_size))
160 |
161 | self.model = self.build_model(self.input_shape, self.nb_classes)
162 |
163 | if self.verbose:
164 | self.model.summary()
165 |
166 | self.history = self.model.fit(
167 | X,
168 | y_onehot,
169 | batch_size=self.batch_size,
170 | epochs=self.nb_epochs,
171 | verbose=self.verbose,
172 | callbacks=self.callbacks,
173 | validation_data=validation_data,
174 | )
175 |
176 | self.save_trained_model()
177 | self._is_fitted = True
178 |
179 | return self
180 |
--------------------------------------------------------------------------------
/sktime_dl/experimental/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sktime/sktime-dl/1cc7e1d3ce98ccfd1165386fd5ccbd87b6c814e9/sktime_dl/experimental/__init__.py
--------------------------------------------------------------------------------
/sktime_dl/experimental/reduction_examples.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy as np
4 | from numpy.lib.stride_tricks import sliding_window_view
5 | import pandas as pd
6 | from sktime.datasets import load_airline
7 | from sktime.utils.plotting import plot_series
8 | from sktime_dl.regression import CNNRegressor
9 | from sktime_dl.classification import CNNClassifier
10 | from sktime.forecasting.naive import NaiveForecaster
11 | from sktime.classification.interval_based import RandomIntervalSpectralForest
12 |
13 | def forecasting_example():
14 | name = "C:\\Users\\Tony\\OneDrive - University of East Anglia\\Research\\Alex " \
15 | "Mcgregor Grant\\randomNoise.csv"
16 |
17 |
18 | y = pd.read_csv(name, index_col=0, squeeze=True, dtype={1: np.float})
19 | forecast_horizon = np.arange(1, 2)
20 | forecaster = NaiveForecaster(strategy="last")
21 | forecaster.fit(y)
22 | y_pred = forecaster.predict(forecast_horizon)
23 | print("Next predicted value = ",y_pred)
24 | # https://github.com/alan-turing-institute/sktime/blob/main/examples/01_forecasting.ipynb
25 | #Reduce to a regression problem through windowing.
26 | ##Transform forecasting into regression
27 |
28 | np_y = y.to_numpy()
29 | v = sliding_window_view(y, 100)
30 | print("Window shape =",v.shape)
31 | v_3d = np.expand_dims(v, axis=1)
32 | print("Window shape =",v.shape)
33 | print(v_3d.shape)
34 | z = v[:,2]
35 | print(z.shape)
36 | regressor = CNNRegressor()
37 | classifier = CNNClassifier()
38 | regressor.fit(v_3d,z)
39 | p = regressor.predict(v_3d)
40 | #print(p)
41 | d = np.array([0.0])
42 | c = np.digitize(z,d)
43 | classifier = RandomIntervalSpectralForest()
44 | classifier.fit(v_3d,c)
45 | cls = classifier.predict(v_3d)
46 | print(cls)
47 |
48 | if __name__ == "__main__":
49 | forecasting_example()
50 |
51 |
52 |
--------------------------------------------------------------------------------
/sktime_dl/experimental/regression_experiments.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Experiments: code to run experiments as an alternative to orchestration.
3 |
4 | This file is configured for runs of the main method with command line arguments, or for
5 | single debugging runs. Results are written in a standard format
6 | todo: Tidy up this file!
7 | """
8 |
9 | import os
10 |
11 | import sklearn.preprocessing
12 | import sklearn.utils
13 |
14 |
15 |
16 | os.environ["MKL_NUM_THREADS"] = "1" # must be done before numpy import!!
17 | os.environ["NUMEXPR_NUM_THREADS"] = "1" # must be done before numpy import!!
18 | os.environ["OMP_NUM_THREADS"] = "1" # must be done before numpy import!!
19 |
20 | import sys
21 | import time
22 | import numpy as np
23 | import pandas as pd
24 |
25 |
26 | from sklearn import preprocessing
27 | from sklearn.metrics import accuracy_score
28 | from sklearn.model_selection import cross_val_predict
29 | from sktime.contrib.experiments import run_experiment
30 | from sktime.datasets.base import load_UCR_UEA_dataset
31 | from sktime.contrib.experiments import write_results_to_uea_format
32 |
33 |
34 | from sktime_dl.regression import (
35 | CNNRegressor,
36 | EncoderClassifier,
37 | EncoderRegressor,
38 | FCNClassifier,
39 | FCNRegressor,
40 | InceptionTimeClassifier,
41 | InceptionTimeRegressor,
42 | LSTMRegressor,
43 | MCDCNNClassifier,
44 | MCDCNNRegressor,
45 | MCNNClassifier,
46 | MLPClassifier,
47 | MLPRegressor,
48 | ResNetClassifier,
49 | ResNetRegressor,
50 | SimpleRNNRegressor,
51 | TLENETClassifier,
52 | TLENETRegressor,
53 | TWIESNClassifier,
54 | )
55 |
56 | __author__ = ["Tony Bagnall"]
57 |
58 | """Prototype mechanism for testing classifiers on the UCR format. This mirrors the
59 | mechanism used in Java,
60 | https://github.com/TonyBagnall/uea-tsc/tree/master/src/main/java/experiments
61 | but is not yet as engineered. However, if you generate results using the method
62 | recommended here, they can be directly and automatically compared to the results
63 | generated in java
64 |
65 | """
66 |
67 | regressor_list = [
68 | "CNNRegressor",
69 | "EncoderRegressor",
70 | "FCNRegressor",
71 | "InceptionTimeRegressor",
72 | "LSTMRegressor",
73 | "MCDCNNRegressor",
74 | "MLPRegressor",
75 | "ResNetRegressor",
76 | "SimpleRNNRegressor",
77 | "TLENETRegressor",
78 | ]
79 |
80 | def set_regressor(cls, resampleId=None):
81 | """Construct a classifier.
82 |
83 | Basic way of creating the classifier to build using the default settings. This
84 | set up is to help with batch jobs for multiple problems to facilitate easy
85 | reproducability. You can set up bespoke classifier in many other ways.
86 |
87 | Parameters
88 | ----------
89 | cls: String indicating which classifier you want
90 | resampleId: classifier random seed
91 |
92 | Return
93 | ------
94 | A classifier.
95 | """
96 | name = cls.lower()
97 | # Convolutional
98 | if name == "cnn" or name == "cnnregressor":
99 | return CNNRegressor(random_state=resampleId)
100 | elif name == "encode":
101 | return EncoderRegressor()
102 | elif name == "fcn":
103 | return FCNRegressor()
104 | elif name == "inceptiontime":
105 | return InceptionTimeRegressor()
106 | elif name == "mcdcnn":
107 | return MCDCNNRegressor()
108 | elif name == "mcnn":
109 | return MCNNRegressor()
110 | elif name == "mlp":
111 | return MLPRegressor()
112 | elif name == "resnet":
113 | return ResNetRegressor()
114 | elif name == "tlenet":
115 | return TLENETRegressor()
116 | elif name == "twiesn":
117 | return TWIESNClassifier()
118 | else:
119 | raise Exception("UNKNOWN CLASSIFIER")
120 |
121 |
122 |
--------------------------------------------------------------------------------
/sktime_dl/meta/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "DeepLearnerEnsembleClassifier",
3 | "TunedDeepLearningClassifier",
4 | "EnsembleFromFileClassifier"
5 | ]
6 |
7 | from sktime_dl.meta._dlensemble import DeepLearnerEnsembleClassifier
8 | from sktime_dl.meta._dlensemble import EnsembleFromFileClassifier
9 | from sktime_dl.meta._dltuner import TunedDeepLearningClassifier
10 |
--------------------------------------------------------------------------------
/sktime_dl/meta/_dltuner.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large"
2 |
3 | import numpy as np
4 | from sklearn.model_selection import GridSearchCV
5 | from sklearn.model_selection import RandomizedSearchCV
6 |
7 | from sktime_dl.classification import CNNClassifier
8 | from sktime_dl.classification._classifier import BaseDeepClassifier
9 |
10 |
11 | class TunedDeepLearningClassifier(BaseDeepClassifier):
12 | """
13 | A basic tuning framework for the deep learning classifiers Defaults to a
14 | grid search with 5-fold crossvalidation over the param_grid given for
15 | the specified base_model
16 |
17 | TODO provide example param_grids for each deep learner
18 | """
19 |
20 | def __init__(
21 | self,
22 | base_model=CNNClassifier(),
23 | param_grid=dict(
24 | kernel_size=[3, 7], avg_pool_size=[2, 3],
25 | nb_conv_layers=[1, 2],
26 | ),
27 | search_method="grid",
28 | cv_folds=5,
29 | random_state=0,
30 | verbose=False,
31 | model_name=None,
32 | model_save_directory=None,
33 | ):
34 | """
35 | :param base_model: an implementation of BaseDeepLearner, the model
36 | to tune :param param_grid: dict, parameter names corresponding to
37 | parameters of the base_model, mapped to values to search over :param
38 | search_method: string out of ['grid', 'random'], how to search over
39 | the param_grid :param cv_folds: int, number of cross validation
40 | folds to use in evaluation of each parameter set :param random_state:
41 | int, seed to any needed random actions :param verbose: boolean,
42 | whether to output extra information :param model_name: string,
43 | the name of this model for printing and file writing purposes. if
44 | None, will default to 'tuned_' + base_model.model_name :param
45 | model_save_directory: string, if not None; location to save the
46 | tuned, trained keras model in hdf5 format
47 | """
48 |
49 | self.verbose = verbose
50 |
51 | if model_name is None:
52 | self.model_name = "tuned_" + base_model.model_name
53 | else:
54 | self.model_name = model_name
55 |
56 | self.model_save_directory = model_save_directory
57 |
58 | self.random_state = random_state
59 | self.random_state = np.random.RandomState(self.random_state)
60 | self._is_fitted = False
61 |
62 | self.base_model = base_model
63 |
64 | # search parameters
65 | self.param_grid = param_grid
66 | self.cv_folds = cv_folds
67 | self.search_method = search_method
68 | self.n_jobs = 1 # assuming networks themselves are threaded/on gpu,
69 | # not providing this option for now
70 |
71 | # search results (computed in fit)
72 | self.grid_history = None
73 | self.grid = None
74 | self.model = (
75 | None # the best _keras model_, not the sktime classifier object
76 | )
77 | self.tuned_params = None
78 |
79 | def build_model(self, input_shape, nb_classes, **kwargs):
80 | if self.tuned_params is not None:
81 | return self.base_model.build_model(
82 | input_shape, nb_classes, **kwargs
83 | )
84 | else:
85 | return self.base_model.build_model(
86 | input_shape, nb_classes, self.tuned_params
87 | )
88 |
89 | def fit(self, X, y, **kwargs):
90 | """
91 | Searches the best parameters for and fits classifier on the training
92 | set (X, y)
93 | ----------
94 | X : array-like or sparse matrix of shape = [n_instances, n_columns]
95 | The training input samples. If a Pandas data frame is passed,
96 | column 0 is extracted.
97 | y : array-like, shape = [n_instances]
98 | The class labels.
99 | input_checks: boolean
100 | whether to check the X and y parameters
101 | Returns
102 | -------
103 | self : object
104 | """
105 | if self.search_method == "grid":
106 | self.grid = GridSearchCV(
107 | estimator=self.base_model,
108 | param_grid=self.param_grid,
109 | refit=True,
110 | cv=self.cv_folds,
111 | n_jobs=self.n_jobs,
112 | )
113 | elif self.search_method == "random":
114 | self.grid = RandomizedSearchCV(
115 | estimator=self.base_model,
116 | param_distributions=self.param_grid,
117 | refit=True,
118 | cv=self.cv_folds,
119 | n_jobs=self.n_jobs,
120 | random_state=self.random_state,
121 | )
122 | else:
123 | # todo expand, give options etc
124 | raise Exception(
125 | "Unrecognised search method provided: {}".format(
126 | self.search_method
127 | )
128 | )
129 |
130 | self.grid_history = self.grid.fit(X, y)
131 | self.model = self.grid.best_estimator_.model
132 | self.tuned_params = self.grid.best_params_
133 |
134 | # copying data-wrangling info up
135 | self.label_encoder = self.grid.best_estimator_.label_encoder
136 | self.classes_ = self.grid.best_estimator_.classes_
137 | self.nb_classes = self.grid.best_estimator_.nb_classes
138 |
139 | if self.verbose:
140 | self.print_search_summary()
141 |
142 | self.save_trained_model()
143 | self._is_fitted = True
144 |
145 | return self
146 |
147 | def get_tuned_model(self):
148 | return self.model
149 |
150 | def get_tuned_params(self):
151 | return self.tuned_params
152 |
153 | def print_search_summary(self):
154 | print(
155 | "Best: %f using %s"
156 | % (self.grid_history.best_score_, self.grid_history.best_params_)
157 | )
158 | means = self.grid_history.cv_results_["mean_test_score"]
159 | stds = self.grid_history.cv_results_["std_test_score"]
160 | params = self.grid_history.cv_results_["params"]
161 | for mean, stdev, param in zip(means, stds, params):
162 | print("%f (%f) with: %r" % (mean, stdev, param))
163 |
--------------------------------------------------------------------------------
/sktime_dl/meta/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sktime/sktime-dl/1cc7e1d3ce98ccfd1165386fd5ccbd87b6c814e9/sktime_dl/meta/tests/__init__.py
--------------------------------------------------------------------------------
/sktime_dl/meta/tests/test_ensembling.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from sktime.datasets import load_italy_power_demand
4 |
5 | from sktime_dl.classification import CNNClassifier
6 | from sktime_dl.meta import DeepLearnerEnsembleClassifier
7 |
8 |
9 | def test_basic_inmem(
10 | network=DeepLearnerEnsembleClassifier(
11 | base_model=CNNClassifier(nb_epochs=50),
12 | nb_iterations=2,
13 | keep_in_memory=True,
14 | model_save_directory=None,
15 | verbose=True,
16 | )
17 | ):
18 | """
19 | just a super basic test with gunpoint,
20 | load data,
21 | construct classifier,
22 | fit,
23 | score
24 | """
25 |
26 | print("Start test_basic()")
27 |
28 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
29 | X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
30 |
31 | network.fit(X_train[:10], y_train[:10])
32 |
33 | print(network.score(X_test[:10], y_test[:10]))
34 | print("End test_basic()")
35 |
36 |
37 | def test_basic_saving(
38 | network=DeepLearnerEnsembleClassifier(
39 | base_model=CNNClassifier(nb_epochs=50),
40 | nb_iterations=2,
41 | keep_in_memory=False,
42 | model_save_directory="testResultsDELETE",
43 | verbose=True,
44 | )
45 | ):
46 | """
47 | just a super basic test with gunpoint,
48 | load data,
49 | construct classifier,
50 | fit,
51 | score
52 | """
53 |
54 | print("Start test_basic()")
55 |
56 | path = Path(network.model_save_directory)
57 | # if the directory doesn't get cleaned up because of error in testing
58 | if not path.exists():
59 | path.mkdir()
60 |
61 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
62 | X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
63 |
64 | network.fit(X_train[:10], y_train[:10])
65 |
66 | print(network.score(X_test[:10], y_test[:10]))
67 |
68 | (
69 | path / (network.base_model.model_name + "_0.hdf5")
70 | ).unlink() # delete file
71 | (
72 | path / (network.base_model.model_name + "_1.hdf5")
73 | ).unlink() # delete file
74 | path.rmdir() # directory should now be empty, fails if not
75 |
76 | print("End test_basic()")
77 |
78 |
79 | if __name__ == "__main__":
80 | test_basic_inmem()
81 | test_basic_saving()
82 |
--------------------------------------------------------------------------------
/sktime_dl/meta/tests/test_tuning.py:
--------------------------------------------------------------------------------
1 | from sktime.datasets import load_italy_power_demand
2 |
3 | from sktime_dl.classification import CNNClassifier
4 | from sktime_dl.meta import TunedDeepLearningClassifier
5 |
6 |
7 | def test_basic_tuning(
8 | network=TunedDeepLearningClassifier(
9 | base_model=CNNClassifier(),
10 | param_grid=dict(nb_epochs=[50, 100], ),
11 | cv_folds=3,
12 | )
13 | ):
14 | """
15 | just a super basic test of the tuner
16 | """
17 |
18 | print("Start test_basic_tuning()")
19 |
20 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
21 | X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
22 |
23 | network.fit(X_train[:10], y_train[:10])
24 |
25 | print(network.score(X_test[:10], y_test[:10]))
26 | print("End test_basic_tuning()")
27 |
28 |
29 | if __name__ == "__main__":
30 | test_basic_tuning()
31 |
--------------------------------------------------------------------------------
/sktime_dl/networks/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "CNNNetwork",
3 | "FCNNetwork",
4 | "InceptionTimeNetwork",
5 | "LSTMNetwork",
6 | "LSTMFCNNetwork",
7 | "CNTCNetwork",
8 | "MCDCNNNetwork",
9 | "MLPNetwork",
10 | "ResNetNetwork",
11 | "TLENETNetwork",
12 | ]
13 |
14 | from sktime_dl.networks._cnn import CNNNetwork
15 | from sktime_dl.networks._fcn import FCNNetwork
16 | from sktime_dl.networks._inceptiontime import InceptionTimeNetwork
17 | from sktime_dl.networks._lstm import LSTMNetwork
18 | from sktime_dl.networks._lstmfcn import LSTMFCNNetwork
19 | from sktime_dl.networks._cntc import CNTCNetwork
20 | from sktime_dl.networks._mcdcnn import MCDCNNNetwork
21 | from sktime_dl.networks._mlp import MLPNetwork
22 | from sktime_dl.networks._resnet import ResNetNetwork
23 | from sktime_dl.networks._tlenet import TLENETNetwork
24 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_cnn.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Time Convolutional Neural Network (CNN) (minus the final output layer)."""
3 |
4 | __author__ = "James Large, Withington, Tony Bagnall"
5 |
6 | from tensorflow import keras
7 |
8 | from sktime_dl.networks._network import BaseDeepNetwork
9 |
10 |
11 | class CNNNetwork(BaseDeepNetwork):
12 | """
13 | Adapted from the implementation from Fawaz et. al
14 |
15 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/cnn.py
16 | Network originally defined in:
17 |
18 | @article{zhao2017convolutional,
19 | title={Convolutional neural networks for time series classification},
20 | author={Zhao, Bendong and Lu, Huanzhang and Chen, Shangfeng and Liu,
21 | Junliang and Wu, Dongya},
22 | journal={Journal of Systems Engineering and Electronics},
23 | volume={28},
24 | number={1},
25 | pages={162--169},
26 | year={2017},
27 | publisher={BIAI}
28 | }
29 | """
30 |
31 | def __init__(
32 | self,
33 | kernel_size=7,
34 | avg_pool_size=3,
35 | nb_conv_layers=2,
36 | filter_sizes=[6, 12],
37 | random_state=0,
38 | ):
39 | """
40 | :param kernel_size: int, specifying the length of the 1D convolution
41 | window
42 | :param avg_pool_size: int, size of the average pooling windows
43 | :param nb_conv_layers: int, the number of convolutional plus average
44 | pooling layers
45 | :param filter_sizes: int, array of shape = (nb_conv_layers)
46 | :param random_state: int, seed to any needed random actions
47 | """
48 |
49 | self.random_state = random_state
50 | self.kernel_size = kernel_size
51 | self.avg_pool_size = avg_pool_size
52 | self.nb_conv_layers = nb_conv_layers
53 | self.filter_sizes = filter_sizes
54 |
55 | def build_network(self, input_shape, **kwargs):
56 | """
57 | Construct a network and return its input and output layers
58 |
59 | Arguments
60 | ---------
61 | input_shape : tuple
62 | The shape of the data fed into the input layer
63 |
64 | Returns
65 | -------
66 | input_layer : a keras layer
67 | output_layer : a keras layer
68 | """
69 | padding = "valid"
70 | input_layer = keras.layers.Input(input_shape)
71 |
72 | if input_shape[0] < 60: # for ItalyPowerDemand dataset
73 | padding = "same"
74 |
75 | if len(self.filter_sizes) > self.nb_conv_layers:
76 | self.filter_sizes = self.filter_sizes[: self.nb_conv_layers]
77 | elif len(self.filter_sizes) < self.nb_conv_layers:
78 | self.filter_sizes = self.filter_sizes + [self.filter_sizes[-1]] * (
79 | self.nb_conv_layers - len(self.filter_sizes)
80 | )
81 |
82 | conv = keras.layers.Conv1D(
83 | filters=self.filter_sizes[0],
84 | kernel_size=self.kernel_size,
85 | padding=padding,
86 | activation="sigmoid",
87 | )(input_layer)
88 | conv = keras.layers.AveragePooling1D(pool_size=self.avg_pool_size)(
89 | conv
90 | )
91 |
92 | for i in range(1, self.nb_conv_layers):
93 | conv = keras.layers.Conv1D(
94 | filters=self.filter_sizes[i],
95 | kernel_size=self.kernel_size,
96 | padding=padding,
97 | activation="sigmoid",
98 | )(conv)
99 | conv = keras.layers.AveragePooling1D(pool_size=self.avg_pool_size)(
100 | conv
101 | )
102 |
103 | flatten_layer = keras.layers.Flatten()(conv)
104 |
105 | return input_layer, flatten_layer
106 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_encoder.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | import tensorflow
4 | import tensorflow.keras as keras
5 |
6 | from sktime_dl.networks._network import BaseDeepNetwork
7 |
8 | if tensorflow.__version__ >= "1.15" and tensorflow.__version__ <= "2":
9 | keras.__name__ = "tensorflow.keras"
10 |
11 | if tensorflow.__version__ < "2.1.0":
12 | import keras_contrib as ADDONS
13 | else:
14 | import tensorflow_addons as ADDONS
15 |
16 |
17 | class EncoderNetwork(BaseDeepNetwork):
18 | """Encoder
19 |
20 | Adapted from the implementation from Fawaz et. al
21 |
22 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/encoder.py
23 |
24 | Network originally defined in:
25 |
26 | @article{serra2018towards,
27 | title={Towards a universal neural network encoder for time series},
28 | author={Serrà, J and Pascual, S and Karatzoglou, A},
29 | journal={Artif Intell Res Dev Curr Chall New Trends Appl},
30 | volume={308},
31 | pages={120},
32 | year={2018}
33 | }
34 | """
35 |
36 | def __init__(self, random_state=0):
37 | """
38 | :param random_state: int, seed to any needed random actions
39 | """
40 | self.random_state = random_state
41 |
42 | def build_network(self, input_shape, **kwargs):
43 | """
44 | Construct a network and return its input and output layers
45 | ----------
46 | input_shape : tuple
47 | The shape of the data fed into the input layer
48 | Returns
49 | -------
50 | input_layer : a keras layer
51 | output_layer : a keras layer
52 | """
53 | input_layer = keras.layers.Input(input_shape)
54 |
55 | # conv block -1
56 | conv1 = keras.layers.Conv1D(
57 | filters=128, kernel_size=5, strides=1, padding="same"
58 | )(input_layer)
59 | conv1 = ADDONS.layers.InstanceNormalization()(conv1)
60 | conv1 = keras.layers.PReLU(shared_axes=[1])(conv1)
61 | conv1 = keras.layers.Dropout(rate=0.2)(conv1)
62 | conv1 = keras.layers.MaxPooling1D(pool_size=2, padding='same')(conv1)
63 | # conv block -2
64 | conv2 = keras.layers.Conv1D(
65 | filters=256, kernel_size=11, strides=1, padding="same"
66 | )(conv1)
67 | conv2 = ADDONS.layers.InstanceNormalization()(conv2)
68 | conv2 = keras.layers.PReLU(shared_axes=[1])(conv2)
69 | conv2 = keras.layers.Dropout(rate=0.2)(conv2)
70 | conv2 = keras.layers.MaxPooling1D(pool_size=2, padding='same')(conv2)
71 | # conv block -3
72 | conv3 = keras.layers.Conv1D(
73 | filters=512, kernel_size=21, strides=1, padding="same"
74 | )(conv2)
75 | conv3 = ADDONS.layers.InstanceNormalization()(conv3)
76 | conv3 = keras.layers.PReLU(shared_axes=[1])(conv3)
77 | conv3 = keras.layers.Dropout(rate=0.2)(conv3)
78 | # split for attention
79 | attention_data = keras.layers.Lambda(lambda x: x[:, :, :256])(conv3)
80 | attention_softmax = keras.layers.Lambda(lambda x: x[:, :, 256:])(conv3)
81 | # attention mechanism
82 | attention_softmax = keras.layers.Softmax()(attention_softmax)
83 | multiply_layer = keras.layers.Multiply()(
84 | [attention_softmax, attention_data]
85 | )
86 | # last layer
87 | dense_layer = keras.layers.Dense(units=256, activation="sigmoid")(
88 | multiply_layer
89 | )
90 | dense_layer = ADDONS.layers.InstanceNormalization()(dense_layer)
91 | # output layer
92 | flatten_layer = keras.layers.Flatten()(dense_layer)
93 |
94 | return input_layer, flatten_layer
95 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_fcn.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.networks._network import BaseDeepNetwork
6 |
7 |
8 | class FCNNetwork(BaseDeepNetwork):
9 | """Fully convolutional neural network (FCN).
10 |
11 | Adapted from the implementation from Fawaz et. al
12 |
13 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/fcn.py
14 |
15 | Network originally defined in:
16 |
17 | @inproceedings{wang2017time,
18 | title={Time series classification from scratch with deep neural networks:
19 | A strong baseline},
20 | author={Wang, Zhiguang and Yan, Weizhong and Oates, Tim},
21 | booktitle={2017 International joint conference on neural networks
22 | (IJCNN)},
23 | pages={1578--1585},
24 | year={2017},
25 | organization={IEEE}
26 | }
27 |
28 | :param random_state: int, seed to any needed random actions
29 | """
30 |
31 | def __init__(self, random_state=0):
32 | self.random_state = random_state
33 |
34 | def build_network(self, input_shape, **kwargs):
35 | """
36 | Construct a network and return its input and output layers
37 | ----------
38 | input_shape : tuple
39 | The shape of the data fed into the input layer
40 | Returns
41 | -------
42 | input_layer : a keras layer
43 | output_layer : a keras layer
44 | """
45 | input_layer = keras.layers.Input(input_shape)
46 |
47 | conv1 = keras.layers.Conv1D(
48 | filters=128, kernel_size=8, padding="same"
49 | )(input_layer)
50 | conv1 = keras.layers.BatchNormalization()(conv1)
51 | conv1 = keras.layers.Activation(activation="relu")(conv1)
52 |
53 | conv2 = keras.layers.Conv1D(
54 | filters=256, kernel_size=5, padding="same"
55 | )(conv1)
56 | conv2 = keras.layers.BatchNormalization()(conv2)
57 | conv2 = keras.layers.Activation("relu")(conv2)
58 |
59 | conv3 = keras.layers.Conv1D(128, kernel_size=3, padding="same")(conv2)
60 | conv3 = keras.layers.BatchNormalization()(conv3)
61 | conv3 = keras.layers.Activation("relu")(conv3)
62 |
63 | gap_layer = keras.layers.GlobalAveragePooling1D()(conv3)
64 |
65 | return input_layer, gap_layer
66 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_inceptiontime.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.networks._network import BaseDeepNetwork
6 |
7 |
8 | class InceptionTimeNetwork(BaseDeepNetwork):
9 | """InceptionTime
10 |
11 | Adapted from the implementation from Fawaz et. al
12 |
13 | https://github.com/hfawaz/InceptionTime/blob/master/classifiers/
14 | inception.py
15 |
16 | Network originally defined in:
17 |
18 | @article{IsmailFawaz2019inceptionTime, Title = {
19 | InceptionTime: Finding AlexNet for Time Series Classification}, Author
20 | = {Ismail Fawaz, Hassan and Lucas, Benjamin and
21 | Forestier, Germain and Pelletier, Charlotte and Schmidt,
22 | Daniel F. and Weber, Jonathan and Webb, Geoffrey I. and
23 | Idoumghar, Lhassane and Muller, Pierre-Alain and
24 | Petitjean, François}, journal = {
25 | ArXiv}, Year = {2019} }
26 | """
27 |
28 | def __init__(
29 | self,
30 | nb_filters=32,
31 | use_residual=True,
32 | use_bottleneck=True,
33 | bottleneck_size=32,
34 | depth=6,
35 | kernel_size=41 - 1,
36 | random_state=0,
37 | ):
38 | """
39 | :param nb_filters: int,
40 | :param use_residual: boolean,
41 | :param use_bottleneck: boolean,
42 | :param depth: int
43 | :param kernel_size: int, specifying the length of the 1D convolution
44 | window
45 | :param bottleneck_size: int,
46 | :param random_state: int, seed to any needed random actions
47 | """
48 |
49 | self.nb_filters = nb_filters
50 | self.use_residual = use_residual
51 | self.use_bottleneck = use_bottleneck
52 | self.depth = depth
53 | self.kernel_size = kernel_size
54 | self.bottleneck_size = bottleneck_size
55 |
56 | self.random_state = random_state
57 |
58 | def _inception_module(self, input_tensor, stride=1, activation="linear"):
59 |
60 | if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:
61 | input_inception = keras.layers.Conv1D(
62 | filters=self.bottleneck_size,
63 | kernel_size=1,
64 | padding="same",
65 | activation=activation,
66 | use_bias=False,
67 | )(input_tensor)
68 | else:
69 | input_inception = input_tensor
70 |
71 | # kernel_size_s = [3, 5, 8, 11, 17]
72 | kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
73 |
74 | conv_list = []
75 |
76 | for i in range(len(kernel_size_s)):
77 | conv_list.append(
78 | keras.layers.Conv1D(
79 | filters=self.nb_filters,
80 | kernel_size=kernel_size_s[i],
81 | strides=stride,
82 | padding="same",
83 | activation=activation,
84 | use_bias=False,
85 | )(input_inception)
86 | )
87 |
88 | max_pool_1 = keras.layers.MaxPool1D(
89 | pool_size=3, strides=stride, padding="same"
90 | )(input_tensor)
91 |
92 | conv_6 = keras.layers.Conv1D(
93 | filters=self.nb_filters,
94 | kernel_size=1,
95 | padding="same",
96 | activation=activation,
97 | use_bias=False,
98 | )(max_pool_1)
99 |
100 | conv_list.append(conv_6)
101 |
102 | x = keras.layers.Concatenate(axis=2)(conv_list)
103 | x = keras.layers.BatchNormalization()(x)
104 | x = keras.layers.Activation(activation="relu")(x)
105 | return x
106 |
107 | def _shortcut_layer(self, input_tensor, out_tensor):
108 | shortcut_y = keras.layers.Conv1D(
109 | filters=int(out_tensor.shape[-1]),
110 | kernel_size=1,
111 | padding="same",
112 | use_bias=False,
113 | )(input_tensor)
114 | shortcut_y = keras.layers.BatchNormalization()(shortcut_y)
115 |
116 | x = keras.layers.Add()([shortcut_y, out_tensor])
117 | x = keras.layers.Activation("relu")(x)
118 | return x
119 |
120 | def build_network(self, input_shape, **kwargs):
121 | """
122 | Construct a network and return its input and output layers
123 | ----------
124 | input_shape : tuple
125 | The shape of the data fed into the input layer
126 | Returns
127 | -------
128 | input_layer : a keras layer
129 | output_layer : a keras layer
130 | """
131 | input_layer = keras.layers.Input(input_shape)
132 |
133 | x = input_layer
134 | input_res = input_layer
135 |
136 | for d in range(self.depth):
137 | x = self._inception_module(x)
138 |
139 | if self.use_residual and d % 3 == 2:
140 | x = self._shortcut_layer(input_res, x)
141 | input_res = x
142 |
143 | gap_layer = keras.layers.GlobalAveragePooling1D()(x)
144 |
145 | return input_layer, gap_layer
146 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_lstm.py:
--------------------------------------------------------------------------------
1 | __author__ = "Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.networks._network import BaseDeepNetwork
6 |
7 |
8 | class LSTMNetwork(BaseDeepNetwork):
9 | """ Long Short-Term Memory (LSTM)
10 |
11 | Adapted from the implementation of Brownlee, J. (2018)
12 |
13 | https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/
14 | """
15 |
16 | def __init__(self):
17 | self.random_state = None
18 | self.units = None
19 |
20 | def build_network(self, input_shape, **kwargs):
21 | """
22 | Construct a network and return its input and output layers
23 | ----------
24 | input_shape : tuple
25 | The shape of the data fed into the input layer
26 | Returns
27 | -------
28 | input_layer : a keras layer
29 | output_layer : a keras layer
30 | """
31 | input_layer = keras.layers.Input(input_shape)
32 | output_layer = keras.layers.LSTM(
33 | units=self.units[0],
34 | activation='relu',
35 | return_sequences=True)(input_layer)
36 | output_layer = keras.layers.LSTM(
37 | units=self.units[1],
38 | activation='relu')(output_layer)
39 | return input_layer, output_layer
40 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_lstmfcn.py:
--------------------------------------------------------------------------------
1 | __author__ = "Jack Russon"
2 |
3 | from tensorflow import keras
4 | from sktime_dl.utils.layer_utils import AttentionLSTM
5 |
6 | from sktime_dl.networks._network import BaseDeepNetwork
7 |
8 |
9 | class LSTMFCNNetwork(BaseDeepNetwork):
10 | """
11 | """
12 |
13 | def __init__(
14 | self,
15 | kernel_sizes=[8, 5, 3],
16 | filter_sizes=[128, 256, 128],
17 | NUM_CELLS=8,
18 | random_state=0,
19 | dropout=0.8,
20 | attention=False
21 | ):
22 | """
23 | :param kernel_sizes: list of ints, specifying the length of the 1D convolution
24 | windows
25 | :param filter_sizes: int, array of shape = 3, size of filter for each
26 | conv layer
27 | :param random_state: int, seed to any needed random actions
28 | """
29 |
30 | self.random_state = random_state
31 | self.kernel_sizes = kernel_sizes
32 | self.filter_sizes = filter_sizes
33 | self.NUM_CELLS=NUM_CELLS
34 | self.dropout=dropout
35 | self.attention=attention
36 |
37 | def build_network(self, input_shape, **kwargs):
38 | """
39 | Construct a network and return its input and output layers
40 | ----------
41 | input_shape : tuple
42 | The shape of the data fed into the input layer
43 | Returns
44 | -------
45 | input_layers : keras layers
46 | output_layer : a keras layer
47 | """
48 | input_layer = keras.layers.Input(shape=input_shape)
49 |
50 | x = keras.layers.Permute((2, 1))(input_layer)
51 | if self.attention:
52 |
53 | x = AttentionLSTM(self.NUM_CELLS)(x)
54 | else:
55 | x = keras.layers.LSTM(self.NUM_CELLS)(x)
56 | x = keras.layers.Dropout(self.dropout)(x)
57 |
58 | y = keras.layers.Conv1D(self.filter_sizes[0], self.kernel_sizes[0], padding='same', kernel_initializer='he_uniform')(input_layer)
59 | y = keras.layers.BatchNormalization()(y)
60 | y = keras.layers.Activation('relu')(y)
61 |
62 | y = keras.layers.Conv1D(self.filter_sizes[1], self.kernel_sizes[1], padding='same', kernel_initializer='he_uniform')(y)
63 | y = keras.layers.BatchNormalization()(y)
64 | y = keras.layers.Activation('relu')(y)
65 |
66 | y = keras.layers.Conv1D(self.filter_sizes[2], self.kernel_sizes[2], padding='same', kernel_initializer='he_uniform')(y)
67 | y = keras.layers.BatchNormalization()(y)
68 | y = keras.layers.Activation('relu')(y)
69 |
70 | y = keras.layers.GlobalAveragePooling1D()(y)
71 |
72 | output_layer = keras.layers.concatenate([x, y])
73 |
74 | return input_layer, output_layer
75 |
76 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_macnn.py:
--------------------------------------------------------------------------------
1 | __author__ = "Jack Russon"
2 | import tensorflow as tf
3 | from tensorflow import keras
4 |
5 | from sktime_dl.networks._network import BaseDeepNetwork
6 |
7 |
8 | class MACNNNetwork(BaseDeepNetwork):
9 | """
10 | """
11 |
12 | def __init__(
13 | self,
14 | random_state=1,
15 | padding='same',
16 | pool_size=3,
17 | stride=2,
18 | repeats=2,
19 | filters=[64,128,256],
20 | kernel_sizes=[3,6,12],
21 | reduction=16
22 |
23 |
24 |
25 | ):
26 |
27 | super(MACNNNetwork, self).__init__()
28 | self.random_state=random_state
29 | self.padding=padding
30 |
31 | self.kernel_sizes = kernel_sizes
32 | self.filters = filters
33 | self.reduction = reduction
34 | self.pool_size=pool_size
35 | self.stride=stride
36 | self.repeats=repeats
37 |
38 |
39 |
40 | def __MACNN_block(self, x, kernels, reduce):
41 | cov1 = keras.layers.Conv1D(kernels, self.kernel_sizes[0], padding='same')(x)
42 |
43 | cov2 = keras.layers.Conv1D(kernels, self.kernel_sizes[1], padding='same')(x)
44 |
45 | cov3 = keras.layers.Conv1D(kernels, self.kernel_sizes[2], padding='same')(x)
46 |
47 | x = keras.layers.Concatenate(axis=2)([cov1, cov2, cov3])
48 | x = keras.layers.BatchNormalization()(x)
49 | x = keras.layers.Activation('relu')(x)
50 | y = tf.math.reduce_mean(x, 1)
51 | y = keras.layers.Dense(int(kernels * 3 / reduce), use_bias=False, activation='relu')(y)
52 | y = keras.layers.Dense(int(kernels * 3), use_bias=False, activation='relu')(y)
53 | y = tf.reshape(y, [-1, 1, kernels * 3])
54 | return x * y
55 |
56 | def __stack(self, x, loop_num, kernels, reduce=16):
57 | for i in range(loop_num):
58 | x = self.__MACNN_block(x, kernels, reduce)
59 | return x
60 |
61 |
62 | def build_network(self, input_shape, **kwargs):
63 | """
64 | Construct a network and return its input and output layers
65 | ----------
66 | input_shape : tuple
67 | The shape of the data fed into the input layer
68 | Returns
69 | -------
70 | input_layers : keras layers
71 | output_layer : a keras layer
72 | """
73 | input_layer = keras.layers.Input(shape=input_shape)
74 | x=self.__stack(input_layer,self.repeats,self.filters[0],self.reduction)
75 | x = keras.layers.MaxPooling1D( self.pool_size, self.stride, padding='same')(x)
76 | x = self.__stack(x, self.repeats, self.filters[1], self.reduction)
77 | x = keras.layers.MaxPooling1D( self.pool_size, self.stride, padding='same')(x)
78 | x = self.__stack(x, self.repeats, self.filters[2], self.reduction)
79 |
80 | fc = tf.reduce_mean(x, 1)
81 |
82 | return input_layer, fc
83 |
84 |
85 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_mcdcnn.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.networks._network import BaseDeepNetwork
6 |
7 |
8 | class MCDCNNNetwork(BaseDeepNetwork):
9 | """Multi Channel Deep Convolutional Neural Network (MCDCNN).
10 |
11 | Adapted from the implementation from Fawaz et. al
12 |
13 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mcdcnn.py
14 |
15 | Network originally defined in:
16 |
17 | @inproceedings{zheng2014time, title={Time series classification using
18 | multi-channels deep convolutional neural networks}, author={Zheng,
19 | Yi and Liu, Qi and Chen, Enhong and Ge, Yong and Zhao, J Leon},
20 | booktitle={International Conference on Web-Age Information Management},
21 | pages={298--310}, year={2014}, organization={Springer} }
22 | """
23 |
24 | def __init__(
25 | self,
26 | kernel_size=5,
27 | pool_size=2,
28 | filter_sizes=[8, 8],
29 | dense_units=732,
30 | random_state=0,
31 | ):
32 | """
33 | :param kernel_size: int, specifying the length of the 1D convolution
34 | window
35 | :param pool_size: int, size of the max pooling windows
36 | :param filter_sizes: int, array of shape = 2, size of filter for each
37 | conv layer
38 | :param dense_units: int, number of units in the penultimate dense layer
39 | :param random_state: int, seed to any needed random actions
40 | """
41 |
42 | self.random_state = random_state
43 |
44 | self.kernel_size = kernel_size
45 | self.pool_size = pool_size
46 | self.filter_sizes = filter_sizes
47 | self.dense_units = dense_units
48 |
49 | def build_network(self, input_shape, **kwargs):
50 | """
51 | Construct a network and return its input and output layers
52 | ----------
53 | input_shape : tuple
54 | The shape of the data fed into the input layer
55 | Returns
56 | -------
57 | input_layers : keras layers
58 | output_layer : a keras layer
59 | """
60 | n_t = input_shape[0]
61 | n_vars = input_shape[1]
62 |
63 | padding = "valid"
64 |
65 | if n_t < 60: # for ItalyPowerOndemand
66 | padding = "same"
67 |
68 | input_layers = []
69 | conv2_layers = []
70 |
71 | for n_var in range(n_vars):
72 | input_layer = keras.layers.Input((n_t, 1))
73 | input_layers.append(input_layer)
74 |
75 | conv1_layer = keras.layers.Conv1D(
76 | self.filter_sizes[0],
77 | kernel_size=self.kernel_size,
78 | activation="relu",
79 | padding=padding,
80 | )(input_layer)
81 | conv1_layer = keras.layers.MaxPooling1D(
82 | pool_size=self.pool_size,
83 | padding='same',
84 | )(conv1_layer)
85 |
86 | conv2_layer = keras.layers.Conv1D(
87 | self.filter_sizes[1],
88 | kernel_size=self.kernel_size,
89 | activation="relu",
90 | padding=padding,
91 | )(conv1_layer)
92 | conv2_layer = keras.layers.MaxPooling1D(
93 | pool_size=self.pool_size,
94 | padding='same',
95 | )(conv2_layer)
96 | conv2_layer = keras.layers.Flatten()(conv2_layer)
97 |
98 | conv2_layers.append(conv2_layer)
99 |
100 | if n_vars == 1:
101 | # to work with univariate time series
102 | concat_layer = conv2_layers[0]
103 | else:
104 | concat_layer = keras.layers.Concatenate(axis=-1)(conv2_layers)
105 |
106 | fully_connected = keras.layers.Dense(
107 | units=self.dense_units, activation="relu"
108 | )(concat_layer)
109 |
110 | return input_layers, fully_connected
111 |
112 | def prepare_input(self, x):
113 | new_x = []
114 | n_vars = x.shape[2]
115 |
116 | for i in range(n_vars):
117 | new_x.append(x[:, :, i:i + 1])
118 |
119 | return new_x
120 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_mlp.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.networks._network import BaseDeepNetwork
6 |
7 |
8 | class MLPNetwork(BaseDeepNetwork):
9 | """Multi Layer Perceptron (MLP) (minus the final output layer).
10 |
11 | Adapted from the implementation from Fawaz et. al
12 |
13 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mlp.py
14 |
15 | Network originally defined in:
16 |
17 | @inproceedings{wang2017time, title={Time series classification from
18 | scratch with deep neural networks: A strong baseline}, author={Wang,
19 | Zhiguang and Yan, Weizhong and Oates, Tim}, booktitle={2017
20 | International joint conference on neural networks (IJCNN)}, pages={
21 | 1578--1585}, year={2017}, organization={IEEE} }
22 | """
23 |
24 | def __init__(self, random_state=0):
25 | """
26 | :param random_state: int, seed to any needed random actions
27 | """
28 | self.random_state = random_state
29 |
30 | def build_network(self, input_shape, **kwargs):
31 | """
32 | Construct a network and return its input and output layers
33 | ----------
34 | input_shape : tuple
35 | The shape of the data fed into the input layer
36 | Returns
37 | -------
38 | input_layer : a keras layer
39 | output_layer : a keras layer
40 | """
41 | input_layer = keras.layers.Input(input_shape)
42 |
43 | # flatten/reshape because when multivariate all should be on the
44 | # same axis
45 | input_layer_flattened = keras.layers.Flatten()(input_layer)
46 |
47 | layer_1 = keras.layers.Dropout(0.1)(input_layer_flattened)
48 | layer_1 = keras.layers.Dense(500, activation="relu")(layer_1)
49 |
50 | layer_2 = keras.layers.Dropout(0.2)(layer_1)
51 | layer_2 = keras.layers.Dense(500, activation="relu")(layer_2)
52 |
53 | layer_3 = keras.layers.Dropout(0.2)(layer_2)
54 | layer_3 = keras.layers.Dense(500, activation="relu")(layer_3)
55 |
56 | output_layer = keras.layers.Dropout(0.3)(layer_3)
57 |
58 | return input_layer, output_layer
59 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_network.py:
--------------------------------------------------------------------------------
1 | # Base class for networks - partially built neural networks. A final
2 | # output layer can be added to a BaseDeepNetwork to create a classifier
3 | # or a regressor.
4 |
5 | __author__ = "Withington"
6 |
7 |
8 | class BaseDeepNetwork:
9 |
10 | def build_network(self, input_shape, **kwargs):
11 | """
12 | Construct a network and return its input and output layers
13 | ----------
14 | input_shape : tuple
15 | The shape of the data fed into the input layer
16 | Returns
17 | -------
18 | input_layer : a keras layer
19 | output_layer : a keras layer
20 | """
21 | raise NotImplementedError("this is an abstract method")
22 |
--------------------------------------------------------------------------------
/sktime_dl/networks/_resnet.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.networks._network import BaseDeepNetwork
6 |
7 |
8 | class ResNetNetwork(BaseDeepNetwork):
9 | """Residual Network (ResNet).
10 |
11 | Adapted from the implementation from Fawaz et. al
12 |
13 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/resnet.py
14 |
15 | Network originally defined in:
16 |
17 | @inproceedings{wang2017time, title={Time series classification from
18 | scratch with deep neural networks: A strong baseline}, author={Wang,
19 | Zhiguang and Yan, Weizhong and Oates, Tim}, booktitle={2017
20 | International joint conference on neural networks (IJCNN)}, pages={
21 | 1578--1585}, year={2017}, organization={IEEE} }
22 | """
23 |
24 | def __init__(self, random_state=0):
25 | """
26 | :param random_state: int, seed to any needed random actions
27 | """
28 | self.random_state = random_state
29 |
30 | def build_network(self, input_shape, **kwargs):
31 | """
32 | Construct a network and return its input and output layers
33 | ----------
34 | input_shape : tuple
35 | The shape of the data fed into the input layer
36 | Returns
37 | -------
38 | input_layer : a keras layer
39 | output_layer : a keras layer
40 | """
41 | n_feature_maps = 64
42 |
43 | input_layer = keras.layers.Input(input_shape)
44 |
45 | # BLOCK 1
46 |
47 | conv_x = keras.layers.Conv1D(
48 | filters=n_feature_maps, kernel_size=8, padding="same"
49 | )(input_layer)
50 | conv_x = keras.layers.BatchNormalization()(conv_x)
51 | conv_x = keras.layers.Activation("relu")(conv_x)
52 |
53 | conv_y = keras.layers.Conv1D(
54 | filters=n_feature_maps, kernel_size=5, padding="same"
55 | )(conv_x)
56 | conv_y = keras.layers.BatchNormalization()(conv_y)
57 | conv_y = keras.layers.Activation("relu")(conv_y)
58 |
59 | conv_z = keras.layers.Conv1D(
60 | filters=n_feature_maps, kernel_size=3, padding="same"
61 | )(conv_y)
62 | conv_z = keras.layers.BatchNormalization()(conv_z)
63 |
64 | # expand channels for the sum
65 | shortcut_y = keras.layers.Conv1D(
66 | filters=n_feature_maps, kernel_size=1, padding="same"
67 | )(input_layer)
68 | shortcut_y = keras.layers.BatchNormalization()(shortcut_y)
69 |
70 | output_block_1 = keras.layers.add([shortcut_y, conv_z])
71 | output_block_1 = keras.layers.Activation("relu")(output_block_1)
72 |
73 | # BLOCK 2
74 |
75 | conv_x = keras.layers.Conv1D(
76 | filters=n_feature_maps * 2, kernel_size=8, padding="same"
77 | )(output_block_1)
78 | conv_x = keras.layers.BatchNormalization()(conv_x)
79 | conv_x = keras.layers.Activation("relu")(conv_x)
80 |
81 | conv_y = keras.layers.Conv1D(
82 | filters=n_feature_maps * 2, kernel_size=5, padding="same"
83 | )(conv_x)
84 | conv_y = keras.layers.BatchNormalization()(conv_y)
85 | conv_y = keras.layers.Activation("relu")(conv_y)
86 |
87 | conv_z = keras.layers.Conv1D(
88 | filters=n_feature_maps * 2, kernel_size=3, padding="same"
89 | )(conv_y)
90 | conv_z = keras.layers.BatchNormalization()(conv_z)
91 |
92 | # expand channels for the sum
93 | shortcut_y = keras.layers.Conv1D(
94 | filters=n_feature_maps * 2, kernel_size=1, padding="same"
95 | )(output_block_1)
96 | shortcut_y = keras.layers.BatchNormalization()(shortcut_y)
97 |
98 | output_block_2 = keras.layers.add([shortcut_y, conv_z])
99 | output_block_2 = keras.layers.Activation("relu")(output_block_2)
100 |
101 | # BLOCK 3
102 |
103 | conv_x = keras.layers.Conv1D(
104 | filters=n_feature_maps * 2, kernel_size=8, padding="same"
105 | )(output_block_2)
106 | conv_x = keras.layers.BatchNormalization()(conv_x)
107 | conv_x = keras.layers.Activation("relu")(conv_x)
108 |
109 | conv_y = keras.layers.Conv1D(
110 | filters=n_feature_maps * 2, kernel_size=5, padding="same"
111 | )(conv_x)
112 | conv_y = keras.layers.BatchNormalization()(conv_y)
113 | conv_y = keras.layers.Activation("relu")(conv_y)
114 |
115 | conv_z = keras.layers.Conv1D(
116 | filters=n_feature_maps * 2, kernel_size=3, padding="same"
117 | )(conv_y)
118 | conv_z = keras.layers.BatchNormalization()(conv_z)
119 |
120 | # no need to expand channels because they are equal
121 | shortcut_y = keras.layers.BatchNormalization()(output_block_2)
122 |
123 | output_block_3 = keras.layers.add([shortcut_y, conv_z])
124 | output_block_3 = keras.layers.Activation("relu")(output_block_3)
125 |
126 | # FINAL
127 |
128 | gap_layer = keras.layers.GlobalAveragePooling1D()(output_block_3)
129 |
130 | return input_layer, gap_layer
131 |
--------------------------------------------------------------------------------
/sktime_dl/regression/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "CNNRegressor",
3 | "FCNRegressor",
4 | "InceptionTimeRegressor",
5 | "LSTMRegressor",
6 | "LSTMFCNRegressor",
7 | "EncoderRegressor",
8 | "CNTCRegressor",
9 | "MCDCNNRegressor",
10 | "MLPRegressor",
11 | "ResNetRegressor",
12 | "SimpleRNNRegressor",
13 | "TLENETRegressor",
14 | ]
15 |
16 | from sktime_dl.regression._cnn import CNNRegressor
17 | from sktime_dl.regression._fcn import FCNRegressor
18 | from sktime_dl.regression._inceptiontime import InceptionTimeRegressor
19 | from sktime_dl.regression._lstm import LSTMRegressor
20 | from sktime_dl.regression._lstmfcn import LSTMFCNRegressor
21 | from sktime_dl.regression._encoder import EncoderRegressor
22 | from sktime_dl.regression._cntc import CNTCRegressor
23 | from sktime_dl.regression._mcdcnn import MCDCNNRegressor
24 | from sktime_dl.regression._mlp import MLPRegressor
25 | from sktime_dl.regression._resnet import ResNetRegressor
26 | from sktime_dl.regression._rnn import SimpleRNNRegressor
27 | from sktime_dl.regression._tlenet import TLENETRegressor
28 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_cnn.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Time Convolutional Neural Network (CNN) for regression"""
3 |
4 | __author__ = "James Large, Withington"
5 |
6 | from tensorflow import keras
7 |
8 | from sktime_dl.regression._regressor import BaseDeepRegressor
9 | from sktime_dl.networks._cnn import CNNNetwork
10 | from sktime_dl.utils import check_and_clean_data, \
11 | check_and_clean_validation_data
12 |
13 |
14 | class CNNRegressor(BaseDeepRegressor, CNNNetwork):
15 | """Time Convolutional Neural Network (CNN).
16 |
17 | Adapted from the implementation from Fawaz et. al
18 |
19 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/cnn.py
20 |
21 | Network originally defined in:
22 |
23 | @article{zhao2017convolutional, title={Convolutional neural networks for
24 | time series classification}, author={Zhao, Bendong and Lu, Huanzhang and
25 | Chen, Shangfeng and Liu, Junliang and Wu, Dongya}, journal={Journal of
26 | Systems Engineering and Electronics}, volume={28}, number={1}, pages={
27 | 162--169}, year={2017}, publisher={BIAI} }
28 |
29 | :param nb_epochs: int, the number of epochs to train the model
30 | :param batch_size: int, the number of samples per gradient update.
31 | :param kernel_size: int, specifying the length of the 1D convolution
32 | window
33 | :param avg_pool_size: int, size of the average pooling windows
34 | :param nb_conv_layers: int, the number of convolutional plus average
35 | pooling layers
36 | :param filter_sizes: int, array of shape = (nb_conv_layers)
37 | :param callbacks: list of tf.keras.callbacks.Callback objects
38 | :param random_state: int, seed to any needed random actions
39 | :param verbose: boolean, whether to output extra information
40 | :param model_name: string, the name of this model for printing and file
41 | writing purposes
42 | :param model_save_directory: string, if not None; location to save the
43 | trained keras model in hdf5 format
44 | """
45 |
46 | def __init__(
47 | self,
48 | nb_epochs=2000,
49 | batch_size=16,
50 | kernel_size=7,
51 | avg_pool_size=3,
52 | nb_conv_layers=2,
53 | filter_sizes=[6, 12],
54 | callbacks=None,
55 | random_state=0,
56 | verbose=False,
57 | model_name="cnn_regressor",
58 | model_save_directory=None,
59 | ):
60 | super(CNNRegressor, self).__init__(
61 | model_save_directory=model_save_directory,
62 | model_name=model_name,
63 | )
64 | self.filter_sizes = filter_sizes
65 | self.nb_conv_layers = nb_conv_layers
66 | self.avg_pool_size = avg_pool_size
67 | self.random_state = random_state
68 | self.kernel_size = kernel_size
69 | self.verbose = verbose
70 | self.callbacks = callbacks
71 | self.nb_epochs = nb_epochs
72 | self.batch_size = batch_size
73 |
74 | self._is_fitted = False
75 |
76 | def build_model(self, input_shape, **kwargs):
77 | """
78 | Construct a compiled, un-trained, keras model that is ready for
79 | training
80 | ----------
81 | input_shape : tuple
82 | The shape of the data fed into the input layer
83 | Returns
84 | -------
85 | output : a compiled Keras Model
86 | """
87 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
88 |
89 | output_layer = keras.layers.Dense(units=1)(output_layer)
90 |
91 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
92 | model.compile(
93 | loss="mean_squared_error",
94 | optimizer=keras.optimizers.Adam(),
95 | metrics=["mean_squared_error"],
96 | )
97 |
98 | return model
99 |
100 | def fit(self, X, y, input_checks=True, validation_X=None,
101 | validation_y=None, **kwargs):
102 | """
103 | Fit the regressor on the training set (X, y)
104 | ----------
105 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
106 | shape = (n_instances, series_length, n_dimensions)
107 | The training input samples. If a 2D array-like is passed,
108 | n_dimensions is assumed to be 1.
109 | y : array-like, shape = [n_instances]
110 | The training data class labels.
111 | input_checks : boolean
112 | whether to check the X and y parameters
113 | validation_X : a nested pd.Dataframe, or array-like of shape =
114 | (n_instances, series_length, n_dimensions)
115 | The validation samples. If a 2D array-like is passed,
116 | n_dimensions is assumed to be 1.
117 | Unless strictly defined by the user via callbacks (such as
118 | EarlyStopping), the presence or state of the validation
119 | data does not alter training in any way. Predictions at each epoch
120 | are stored in the model's fit history.
121 | validation_y : array-like, shape = [n_instances]
122 | The validation class labels.
123 | Returns
124 | -------
125 | self : object
126 | """
127 | if self.callbacks is None:
128 | self.callbacks = []
129 |
130 | X = check_and_clean_data(X, y, input_checks=input_checks)
131 |
132 | validation_data = \
133 | check_and_clean_validation_data(validation_X, validation_y)
134 |
135 | # ignore the number of instances, X.shape[0],
136 | # just want the shape of each instance
137 | self.input_shape = X.shape[1:]
138 |
139 | self.model = self.build_model(self.input_shape)
140 |
141 | if self.verbose:
142 | self.model.summary()
143 |
144 | self.history = self.model.fit(
145 | X,
146 | y,
147 | batch_size=self.batch_size,
148 | epochs=self.nb_epochs,
149 | verbose=self.verbose,
150 | callbacks=self.callbacks,
151 | validation_data=validation_data,
152 | )
153 |
154 | self.save_trained_model()
155 | self._is_fitted = True
156 |
157 | return self
158 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_encoder.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.regression._regressor import BaseDeepRegressor
6 | from sktime_dl.networks._encoder import EncoderNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 |
10 |
11 | class EncoderRegressor(BaseDeepRegressor, EncoderNetwork):
12 | """Encoder
13 |
14 | Adapted from the implementation from Fawaz et. al
15 |
16 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/encoder.py
17 |
18 | Network originally defined in:
19 |
20 | @article{serra2018towards,
21 | title={Towards a universal neural network encoder for time series},
22 | author={Serrà, J and Pascual, S and Karatzoglou, A},
23 | journal={Artif Intell Res Dev Curr Chall New Trends Appl},
24 | volume={308},
25 | pages={120},
26 | year={2018}
27 | }
28 | :param nb_epochs: int, the number of epochs to train the model
29 | :param batch_size: int, specifying the length of the 1D convolution
30 | window
31 | :param callbacks: list of tf.keras.callbacks.Callback objects
32 | :param random_state: int, seed to any needed random actions
33 | :param verbose: boolean, whether to output extra information
34 | :param model_name: string, the name of this model for printing and
35 | file writing purposes
36 | :param model_save_directory: string, if not None; location to save the
37 | trained keras model in hdf5 format
38 | """
39 |
40 | def __init__(
41 | self,
42 | nb_epochs=2000,
43 | batch_size=16,
44 | callbacks=None,
45 | random_state=0,
46 | verbose=False,
47 | model_name="encoder_regressor",
48 | model_save_directory=None,
49 | ):
50 | super(EncoderRegressor, self).__init__(
51 | model_name=model_name, model_save_directory=model_save_directory
52 | )
53 |
54 | self.verbose = verbose
55 | self._is_fitted = False
56 |
57 | # calced in fit
58 | self.input_shape = None
59 | self.history = None
60 |
61 | # predefined
62 | self.nb_epochs = nb_epochs
63 | self.batch_size = batch_size
64 |
65 | self.callbacks = callbacks
66 | self.random_state = random_state
67 | self.verbose = verbose
68 |
69 | self._is_fitted = False
70 |
71 | def build_model(self, input_shape, **kwargs):
72 | """
73 | Construct a compiled, un-trained, keras model that is ready for
74 | training
75 | ----------
76 | input_shape : tuple
77 | The shape of the data fed into the input layer
78 | Returns
79 | -------
80 | output : a compiled Keras Model
81 | """
82 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
83 | output_layer = keras.layers.Dense(units=1)(output_layer)
84 |
85 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
86 | model.compile(
87 | loss="mean_squared_error",
88 | optimizer=keras.optimizers.Adam(0.00001),
89 | metrics=["mean_squared_error"],
90 | )
91 |
92 | self.callbacks = []
93 |
94 | return model
95 |
96 | def fit(self, X, y, input_checks=True, validation_X=None,
97 | validation_y=None, **kwargs):
98 | """
99 | Fit the regressor on the training set (X, y)
100 | ----------
101 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
102 | shape = (n_instances, series_length, n_dimensions)
103 | The training input samples. If a 2D array-like is passed,
104 | n_dimensions is assumed to be 1.
105 | y : array-like, shape = [n_instances]
106 | The training data class labels.
107 | input_checks : boolean
108 | whether to check the X and y parameters
109 | validation_X : a nested pd.Dataframe, or array-like of shape =
110 | (n_instances, series_length, n_dimensions)
111 | The validation samples. If a 2D array-like is passed,
112 | n_dimensions is assumed to be 1.
113 | Unless strictly defined by the user via callbacks (such as
114 | EarlyStopping), the presence or state of the validation
115 | data does not alter training in any way. Predictions at each epoch
116 | are stored in the model's fit history.
117 | validation_y : array-like, shape = [n_instances]
118 | The validation class labels.
119 | Returns
120 | -------
121 | self : object
122 | """
123 | if self.callbacks is None:
124 | self.callbacks = []
125 |
126 | X = check_and_clean_data(X, y, input_checks=input_checks)
127 |
128 | validation_data = \
129 | check_and_clean_validation_data(validation_X, validation_y)
130 |
131 | # ignore the number of instances, X.shape[0],
132 | # just want the shape of each instance
133 | self.input_shape = X.shape[1:]
134 |
135 | self.model = self.build_model(self.input_shape)
136 |
137 | if self.verbose:
138 | self.model.summary()
139 |
140 | self.history = self.model.fit(
141 | X,
142 | y,
143 | batch_size=self.batch_size,
144 | epochs=self.nb_epochs,
145 | verbose=self.verbose,
146 | callbacks=self.callbacks,
147 | validation_data=validation_data,
148 | )
149 |
150 | self.save_trained_model()
151 | self._is_fitted = True
152 |
153 | return self
154 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_fcn.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.regression._regressor import BaseDeepRegressor
6 | from sktime_dl.networks._fcn import FCNNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 |
10 |
11 | class FCNRegressor(BaseDeepRegressor, FCNNetwork):
12 | """Fully convolutional neural network (FCN).
13 |
14 | Adapted from the implementation from Fawaz et. al
15 |
16 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/fcn.py
17 |
18 | Network originally defined in:
19 |
20 | @inproceedings{wang2017time, title={Time series classification from
21 | scratch with deep neural networks: A strong baseline}, author={Wang,
22 | Zhiguang and Yan, Weizhong and Oates, Tim}, booktitle={2017
23 | International joint conference on neural networks (IJCNN)}, pages={
24 | 1578--1585}, year={2017}, organization={IEEE} }
25 |
26 | :param nb_epochs: int, the number of epochs to train the model
27 | :param batch_size: int, specifying the length of the 1D convolution
28 | window
29 | :param callbacks: list of tf.keras.callbacks.Callback objects
30 | :param random_state: int, seed to any needed random actions
31 | :param verbose: boolean, whether to output extra information
32 | :param model_name: string, the name of this model for printing and
33 | file writing purposes
34 | :param model_save_directory: string, if not None; location to save
35 | the trained keras model in hdf5 format
36 | """
37 |
38 | def __init__(
39 | self,
40 | nb_epochs=2000,
41 | batch_size=16,
42 | callbacks=None,
43 | random_state=0,
44 | verbose=False,
45 | model_name="fcn_regressor",
46 | model_save_directory=None,
47 | ):
48 | super(FCNRegressor, self).__init__(
49 | model_name=model_name, model_save_directory=model_save_directory
50 | )
51 |
52 | self.verbose = verbose
53 | self._is_fitted = False
54 |
55 | # calced in fit
56 | self.input_shape = None
57 | self.history = None
58 |
59 | # predefined
60 | self.nb_epochs = nb_epochs
61 | self.batch_size = batch_size
62 |
63 | self.callbacks = callbacks
64 | self.random_state = random_state
65 | self.verbose = verbose
66 | self._is_fitted = False
67 |
68 | def build_model(self, input_shape, **kwargs):
69 | """
70 | Construct a compiled, un-trained, keras model that is ready for
71 | training
72 | ----------
73 | input_shape : tuple
74 | The shape of the data fed into the input layer
75 | Returns
76 | -------
77 | output : a compiled Keras Model
78 | """
79 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
80 |
81 | output_layer = keras.layers.Dense(units=1)(output_layer)
82 |
83 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
84 |
85 | model.compile(
86 | loss="mean_squared_error",
87 | optimizer=keras.optimizers.Adam(),
88 | metrics=["mean_squared_error"],
89 | )
90 |
91 | # if user hasn't provided a custom ReduceLROnPlateau via init
92 | # already, add the default from literature
93 | if self.callbacks is None:
94 | self.callbacks = []
95 |
96 | if not any(
97 | isinstance(callback, keras.callbacks.ReduceLROnPlateau)
98 | for callback in self.callbacks
99 | ):
100 | reduce_lr = keras.callbacks.ReduceLROnPlateau(
101 | monitor="loss", factor=0.5, patience=50, min_lr=0.0001
102 | )
103 | self.callbacks.append(reduce_lr)
104 |
105 | return model
106 |
107 | def fit(self, X, y, input_checks=True, validation_X=None,
108 | validation_y=None, **kwargs):
109 | """
110 | Fit the regressor on the training set (X, y)
111 | ----------
112 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
113 | shape = (n_instances, series_length, n_dimensions)
114 | The training input samples. If a 2D array-like is passed,
115 | n_dimensions is assumed to be 1.
116 | y : array-like, shape = [n_instances]
117 | The training data class labels.
118 | input_checks : boolean
119 | whether to check the X and y parameters
120 | validation_X : a nested pd.Dataframe, or array-like of shape =
121 | (n_instances, series_length, n_dimensions)
122 | The validation samples. If a 2D array-like is passed,
123 | n_dimensions is assumed to be 1.
124 | Unless strictly defined by the user via callbacks (such as
125 | EarlyStopping), the presence or state of the validation
126 | data does not alter training in any way. Predictions at each epoch
127 | are stored in the model's fit history.
128 | validation_y : array-like, shape = [n_instances]
129 | The validation class labels.
130 | Returns
131 | -------
132 | self : object
133 | """
134 | X = check_and_clean_data(X, y, input_checks=input_checks)
135 |
136 | validation_data = \
137 | check_and_clean_validation_data(validation_X, validation_y)
138 |
139 | # ignore the number of instances, X.shape[0],
140 | # just want the shape of each instance
141 | self.input_shape = X.shape[1:]
142 |
143 | self.batch_size = int(max(1, min(X.shape[0] / 10, self.batch_size)))
144 |
145 | self.model = self.build_model(self.input_shape)
146 |
147 | if self.verbose:
148 | self.model.summary()
149 |
150 | self.history = self.model.fit(
151 | X,
152 | y,
153 | batch_size=self.batch_size,
154 | epochs=self.nb_epochs,
155 | verbose=self.verbose,
156 | callbacks=self.callbacks,
157 | validation_data=validation_data,
158 | )
159 |
160 | self.save_trained_model()
161 | self._is_fitted = True
162 |
163 | return self
164 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_lstm.py:
--------------------------------------------------------------------------------
1 | __author__ = "Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.regression._regressor import BaseDeepRegressor
6 | from sktime_dl.networks._lstm import LSTMNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 |
10 |
11 | class LSTMRegressor(BaseDeepRegressor, LSTMNetwork):
12 | """ Long Short-Term Memory (LSTM)
13 |
14 | Adapted from the implementation of Brownlee, J. (2018)
15 |
16 | https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/
17 | """
18 |
19 | def __init__(
20 | self,
21 | nb_epochs=200,
22 | batch_size=16,
23 | units=[50, 50],
24 | random_state=0,
25 | verbose=False,
26 | model_name="lstm_regressor",
27 | model_save_directory=None
28 | ):
29 | """
30 | :param nb_epochs: int, the number of epochs to train the model
31 | :param batch_size: int, the number of samples per gradient update.
32 | :param units: int, array of size 2, the number units in each LSTM layer
33 | :param random_state: int, seed to any needed random actions
34 | """
35 | super(LSTMRegressor, self).__init__(
36 | model_save_directory=model_save_directory,
37 | model_name=model_name
38 | )
39 | self.nb_epochs = nb_epochs
40 | self.batch_size = batch_size
41 | self.units = units
42 | self.random_state = random_state
43 | self.verbose = verbose
44 |
45 | self._is_fitted = False
46 |
47 | def build_model(self, input_shape, **kwargs):
48 | """
49 | Construct a compiled, un-trained, keras model
50 | ----------
51 | input_shape : tuple
52 | The shape of the data fed into the input layer
53 | Returns
54 | -------
55 | output : a compiled Keras Model
56 | """
57 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
58 | output_layer = keras.layers.Dense(units=1)(output_layer)
59 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
60 |
61 | model.compile(
62 | loss='mean_squared_error',
63 | optimizer=keras.optimizers.Adam(),
64 | metrics=['mean_squared_error'])
65 | return model
66 |
67 | def fit(self, X, y, input_checks=True, validation_X=None,
68 | validation_y=None, **kwargs):
69 | """
70 | Fit the regressor on the training set (X, y)
71 | ----------
72 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
73 | shape = (n_instances, series_length, n_dimensions)
74 | The training input samples. If a 2D array-like is passed,
75 | n_dimensions is assumed to be 1.
76 | y : array-like, shape = [n_instances]
77 | The training data class labels.
78 | input_checks : boolean
79 | whether to check the X and y parameters
80 | validation_X : a nested pd.Dataframe, or array-like of shape =
81 | (n_instances, series_length, n_dimensions)
82 | The validation samples. If a 2D array-like is passed,
83 | n_dimensions is assumed to be 1.
84 | Unless strictly defined by the user via callbacks (such as
85 | EarlyStopping), the presence or state of the validation
86 | data does not alter training in any way. Predictions at each epoch
87 | are stored in the model's fit history.
88 | validation_y : array-like, shape = [n_instances]
89 | The validation class labels.
90 | Returns
91 | -------
92 | self : object
93 | """
94 | X = check_and_clean_data(X, y, input_checks=input_checks)
95 |
96 | validation_data = \
97 | check_and_clean_validation_data(validation_X, validation_y)
98 |
99 | self.input_shape = X.shape[1:]
100 |
101 | self.model = self.build_model(self.input_shape)
102 |
103 | if self.verbose:
104 | self.model.summary()
105 |
106 | self.history = self.model.fit(
107 | X,
108 | y,
109 | batch_size=self.batch_size,
110 | epochs=self.nb_epochs,
111 | verbose=self.verbose,
112 | validation_data=validation_data,
113 | )
114 |
115 | self.save_trained_model()
116 | self._is_fitted = True
117 |
118 | return self
119 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_lstmfcn.py:
--------------------------------------------------------------------------------
1 | __author__ = "Jack Russon"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.regression._regressor import BaseDeepRegressor
6 | from sktime_dl.networks._lstmfcn import LSTMFCNNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 |
10 |
11 | class LSTMFCNRegressor(BaseDeepRegressor, LSTMFCNNetwork):
12 | """
13 |
14 | """
15 |
16 | def __init__(
17 | self,
18 | nb_epochs=120,
19 | batch_size=16,
20 | kernel_sizes=[8, 5, 3],
21 | filter_sizes=[128, 256, 128],
22 | NUM_CELLS=8,
23 | Attention=False,
24 | dropout=0.8,
25 | callbacks=[],
26 | random_state=0,
27 | verbose=False,
28 | model_name="mcdcnn_regressor",
29 | model_save_directory=None,
30 | ):
31 | """
32 | :param nb_epochs: int, the number of epochs to train the model
33 | :param batch_size: int, the number of samples per gradient update
34 | :param kernel_sizes: list of ints, specifying the length of the 1D convolution
35 | windows
36 | :param filter_sizes: int, array of shape = 3, size of filter for each
37 | conv layer
38 | :param callbacks: not used
39 | :param random_state: int, seed to any needed random actions
40 | :param verbose: boolean, whether to output extra information
41 | :param model_name: string, the name of this model for printing and file
42 | writing purposes
43 | :param model_save_directory: string, if not None; location to save the
44 | trained keras model in hdf5 format
45 | """
46 | super(LSTMFCNRegressor, self).__init__(
47 | model_name=model_name, model_save_directory=model_save_directory
48 | )
49 |
50 | self.verbose = verbose
51 | self._is_fitted = False
52 |
53 | # calced in fit
54 | self.input_shape = None
55 | self.model = None
56 | self.history = None
57 |
58 | # predefined
59 | self.nb_epochs = nb_epochs
60 | self.batch_size = batch_size
61 | self.kernel_sizes = kernel_sizes
62 | self.filter_sizes = filter_sizes
63 | self.NUM_CELLS = NUM_CELLS
64 | self.dropout=dropout
65 | self.attention=False
66 |
67 | self.callbacks = callbacks
68 | self.random_state = random_state
69 | self.verbose = verbose
70 | self._is_fitted = False
71 |
72 | def build_model(self, input_shape, **kwargs):
73 | """
74 | Construct a compiled, un-trained, keras model that is ready for
75 | training
76 | ----------
77 | input_shape : tuple
78 | The shape of the data fed into the input layer
79 | Returns
80 | -------
81 | output : a compiled Keras Model
82 | """
83 | input_layers, output_layer = self.build_network(input_shape, **kwargs)
84 |
85 | output_layer = keras.layers.Dense(units=1)(output_layer)
86 |
87 | model = keras.models.Model(inputs=input_layers, outputs=output_layer)
88 |
89 | model.compile(
90 | loss="mean_squared_error",
91 | optimizer=keras.optimizers.SGD(
92 | lr=0.01, momentum=0.9, decay=0.0005
93 | ),
94 | metrics=["mean_squared_error"],
95 | )
96 |
97 | # file_path = self.output_directory + 'best_model.hdf5'
98 | # model_checkpoint = keras.callbacks.ModelCheckpoint(
99 | # filepath=file_path,
100 | # monitor='val_loss',
101 | # save_best_only=True)
102 | # self.callbacks = [model_checkpoint]
103 | self.callbacks = []
104 |
105 | return model
106 |
107 | def fit(self, X, y, input_checks=True, validation_X=None,
108 | validation_y=None, **kwargs):
109 | """
110 | Fit the regressor on the training set (X, y)
111 | ----------
112 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
113 | shape = (n_instances, series_length, n_dimensions)
114 | The training input samples. If a 2D array-like is passed,
115 | n_dimensions is assumed to be 1.
116 | y : array-like, shape = [n_instances]
117 | The training data class labels.
118 | input_checks : boolean
119 | whether to check the X and y parameters
120 | validation_X : a nested pd.Dataframe, or array-like of shape =
121 | (n_instances, series_length, n_dimensions)
122 | The validation samples. If a 2D array-like is passed,
123 | n_dimensions is assumed to be 1.
124 | Unless strictly defined by the user via callbacks (such as
125 | EarlyStopping), the presence or state of the validation
126 | data does not alter training in any way. Predictions at each epoch
127 | are stored in the model's fit history.
128 | validation_y : array-like, shape = [n_instances]
129 | The validation class labels.
130 | Returns
131 | -------
132 | self : object
133 | """
134 | X = check_and_clean_data(X, y, input_checks=input_checks)
135 |
136 | validation_data = \
137 | check_and_clean_validation_data(validation_X, validation_y)
138 |
139 | # ignore the number of instances, X.shape[0],
140 | # just want the shape of each instance
141 | self.input_shape = X.shape[1:]
142 |
143 |
144 | if validation_data is not None:
145 | validation_data = (
146 | self.prepare_input(validation_data[0]),
147 | validation_data[1]
148 | )
149 |
150 | self.model = self.build_model(self.input_shape)
151 |
152 | if self.verbose:
153 | self.model.summary()
154 |
155 | self.history = self.model.fit(
156 | X,
157 | y,
158 | batch_size=self.batch_size,
159 | epochs=self.nb_epochs,
160 | verbose=self.verbose,
161 | validation_data=validation_data,
162 | callbacks=self.callbacks,
163 | )
164 |
165 | self.save_trained_model()
166 | self._is_fitted = True
167 |
168 | return self
169 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_mcdcnn.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.regression._regressor import BaseDeepRegressor
6 | from sktime_dl.networks._mcdcnn import MCDCNNNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 |
10 |
11 | class MCDCNNRegressor(BaseDeepRegressor, MCDCNNNetwork):
12 | """Multi Channel Deep Convolutional Neural Network (MCDCNN).
13 |
14 | Adapted from the implementation from Fawaz et. al
15 |
16 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mcdcnn.py
17 |
18 | Network originally defined in:
19 |
20 | @inproceedings{zheng2014time, title={Time series classification using
21 | multi-channels deep convolutional neural networks}, author={Zheng,
22 | Yi and Liu, Qi and Chen, Enhong and Ge, Yong and Zhao, J Leon},
23 | booktitle={International Conference on Web-Age Information Management},
24 | pages={298--310}, year={2014}, organization={Springer} }
25 | """
26 |
27 | def __init__(
28 | self,
29 | nb_epochs=120,
30 | batch_size=16,
31 | kernel_size=5,
32 | pool_size=2,
33 | filter_sizes=[8, 8],
34 | dense_units=732,
35 | callbacks=[],
36 | random_state=0,
37 | verbose=False,
38 | model_name="mcdcnn_regressor",
39 | model_save_directory=None,
40 | ):
41 | """
42 | :param nb_epochs: int, the number of epochs to train the model
43 | :param batch_size: int, the number of samples per gradient update
44 | :param kernel_size: int, specifying the length of the 1D convolution
45 | window
46 | :param pool_size: int, size of the max pooling windows
47 | :param filter_sizes: int, array of shape = 2, size of filter for each
48 | conv layer
49 | :param dense_units: int, number of units in the penultimate dense layer
50 | :param callbacks: not used
51 | :param random_state: int, seed to any needed random actions
52 | :param verbose: boolean, whether to output extra information
53 | :param model_name: string, the name of this model for printing and file
54 | writing purposes
55 | :param model_save_directory: string, if not None; location to save the
56 | trained keras model in hdf5 format
57 | """
58 | super(MCDCNNRegressor, self).__init__(
59 | model_name=model_name, model_save_directory=model_save_directory
60 | )
61 |
62 | self.verbose = verbose
63 | self._is_fitted = False
64 |
65 | # calced in fit
66 | self.input_shape = None
67 | self.model = None
68 | self.history = None
69 |
70 | # predefined
71 | self.nb_epochs = nb_epochs
72 | self.batch_size = batch_size
73 | self.kernel_size = kernel_size
74 | self.pool_size = pool_size
75 | self.filter_sizes = filter_sizes
76 | self.dense_units = dense_units
77 |
78 | self.callbacks = callbacks
79 | self.random_state = random_state
80 | self.verbose = verbose
81 | self._is_fitted = False
82 |
83 | def build_model(self, input_shape, **kwargs):
84 | """
85 | Construct a compiled, un-trained, keras model that is ready for
86 | training
87 | ----------
88 | input_shape : tuple
89 | The shape of the data fed into the input layer
90 | Returns
91 | -------
92 | output : a compiled Keras Model
93 | """
94 | input_layers, output_layer = self.build_network(input_shape, **kwargs)
95 |
96 | output_layer = keras.layers.Dense(units=1)(output_layer)
97 |
98 | model = keras.models.Model(inputs=input_layers, outputs=output_layer)
99 |
100 | model.compile(
101 | loss="mean_squared_error",
102 | optimizer=keras.optimizers.SGD(
103 | lr=0.01, momentum=0.9, decay=0.0005
104 | ),
105 | metrics=["mean_squared_error"],
106 | )
107 |
108 | # file_path = self.output_directory + 'best_model.hdf5'
109 | # model_checkpoint = keras.callbacks.ModelCheckpoint(
110 | # filepath=file_path,
111 | # monitor='val_loss',
112 | # save_best_only=True)
113 | # self.callbacks = [model_checkpoint]
114 | self.callbacks = []
115 |
116 | return model
117 |
118 | def fit(self, X, y, input_checks=True, validation_X=None,
119 | validation_y=None, **kwargs):
120 | """
121 | Fit the regressor on the training set (X, y)
122 | ----------
123 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
124 | shape = (n_instances, series_length, n_dimensions)
125 | The training input samples. If a 2D array-like is passed,
126 | n_dimensions is assumed to be 1.
127 | y : array-like, shape = [n_instances]
128 | The training data class labels.
129 | input_checks : boolean
130 | whether to check the X and y parameters
131 | validation_X : a nested pd.Dataframe, or array-like of shape =
132 | (n_instances, series_length, n_dimensions)
133 | The validation samples. If a 2D array-like is passed,
134 | n_dimensions is assumed to be 1.
135 | Unless strictly defined by the user via callbacks (such as
136 | EarlyStopping), the presence or state of the validation
137 | data does not alter training in any way. Predictions at each epoch
138 | are stored in the model's fit history.
139 | validation_y : array-like, shape = [n_instances]
140 | The validation class labels.
141 | Returns
142 | -------
143 | self : object
144 | """
145 | X = check_and_clean_data(X, y, input_checks=input_checks)
146 |
147 | validation_data = \
148 | check_and_clean_validation_data(validation_X, validation_y)
149 |
150 | # ignore the number of instances, X.shape[0],
151 | # just want the shape of each instance
152 | self.input_shape = X.shape[1:]
153 |
154 | X = self.prepare_input(X)
155 | if validation_data is not None:
156 | validation_data = (
157 | self.prepare_input(validation_data[0]),
158 | validation_data[1]
159 | )
160 |
161 | self.model = self.build_model(self.input_shape)
162 |
163 | if self.verbose:
164 | self.model.summary()
165 |
166 | self.history = self.model.fit(
167 | X,
168 | y,
169 | batch_size=self.batch_size,
170 | epochs=self.nb_epochs,
171 | verbose=self.verbose,
172 | validation_data=validation_data,
173 | callbacks=self.callbacks,
174 | )
175 |
176 | self.save_trained_model()
177 | self._is_fitted = True
178 |
179 | return self
180 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_mlp.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.regression._regressor import BaseDeepRegressor
6 | from sktime_dl.networks._mlp import MLPNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 |
10 |
11 | class MLPRegressor(BaseDeepRegressor, MLPNetwork):
12 | """Multi Layer Perceptron (MLP).
13 |
14 | Adapted from the implementation from Fawaz et. al
15 |
16 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mlp.py
17 |
18 | Network originally defined in:
19 |
20 | @inproceedings{wang2017time,
21 | title={Time series classification from scratch with deep neural networks:
22 | A strong baseline},
23 | author={Wang, Zhiguang and Yan, Weizhong and Oates, Tim},
24 | booktitle={2017 International joint conference on neural networks
25 | (IJCNN)},
26 | pages={1578--1585},
27 | year={2017},
28 | organization={IEEE}
29 | }
30 | """
31 |
32 | def __init__(self,
33 | nb_epochs=2000,
34 | batch_size=16,
35 | callbacks=None,
36 | random_state=0,
37 | verbose=False,
38 | model_name="mlp_regressor",
39 | model_save_directory=None):
40 | """
41 | :param nb_epochs: int, the number of epochs to train the model
42 | :param batch_size: int, the number of samples per gradient update.
43 | :param callbacks: list of tf.keras.callbacks.Callback objects
44 | :param random_state: int, seed to any needed random actions
45 | :param verbose: boolean, whether to output extra information
46 | :param model_name: string, the name of this model for printing and file
47 | writing purposes
48 | :param model_save_directory: string, if not None; location to save the
49 | trained keras model in hdf5 format
50 | """
51 | super(MLPRegressor, self).__init__(
52 | model_save_directory=model_save_directory,
53 | model_name=model_name
54 | )
55 | self.verbose = verbose
56 | self._is_fitted = False
57 |
58 | # calced in fit
59 | self.input_shape = None
60 | self.history = None
61 |
62 | # predefined
63 | self.nb_epochs = nb_epochs
64 | self.batch_size = batch_size
65 | self.callbacks = callbacks
66 | self.random_state = random_state
67 | self.verbose = verbose
68 |
69 | self._is_fitted = False
70 |
71 | def build_model(self, input_shape, **kwargs):
72 | """
73 | Construct a compiled, un-trained, keras model that is ready for
74 | training
75 | ----------
76 | input_shape : tuple
77 | The shape of the data fed into the input layer
78 | Returns
79 | -------
80 | output : a compiled Keras Model
81 | """
82 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
83 | output_layer = keras.layers.Dense(units=1)(output_layer)
84 |
85 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
86 | model.compile(
87 | loss="mean_squared_error",
88 | optimizer=keras.optimizers.Adadelta(),
89 | metrics=["mean_squared_error"],
90 | )
91 |
92 | # if user hasn't provided a custom ReduceLROnPlateau via init already,
93 | # add the default from literature
94 | if self.callbacks is None:
95 | self.callbacks = []
96 |
97 | if not any(
98 | isinstance(callback, keras.callbacks.ReduceLROnPlateau)
99 | for callback in self.callbacks
100 | ):
101 | reduce_lr = keras.callbacks.ReduceLROnPlateau(
102 | monitor="loss", factor=0.5, patience=50, min_lr=0.0001
103 | )
104 | self.callbacks.append(reduce_lr)
105 |
106 | return model
107 |
108 | def fit(self, X, y, input_checks=True, validation_X=None,
109 | validation_y=None, **kwargs):
110 | """
111 | Fit the regressor on the training set (X, y)
112 | ----------
113 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
114 | shape = (n_instances, series_length, n_dimensions)
115 | The training input samples. If a 2D array-like is passed,
116 | n_dimensions is assumed to be 1.
117 | y : array-like, shape = [n_instances]
118 | The training data class labels.
119 | input_checks : boolean
120 | whether to check the X and y parameters
121 | validation_X : a nested pd.Dataframe, or array-like of shape =
122 | (n_instances, series_length, n_dimensions)
123 | The validation samples. If a 2D array-like is passed,
124 | n_dimensions is assumed to be 1.
125 | Unless strictly defined by the user via callbacks (such as
126 | EarlyStopping), the presence or state of the validation
127 | data does not alter training in any way. Predictions at each epoch
128 | are stored in the model's fit history.
129 | validation_y : array-like, shape = [n_instances]
130 | The validation class labels.
131 | Returns
132 | -------
133 | self : object
134 | """
135 | X = check_and_clean_data(X, y, input_checks=input_checks)
136 |
137 | validation_data = \
138 | check_and_clean_validation_data(validation_X, validation_y)
139 |
140 | # ignore the number of instances, X.shape[0], just want the shape of
141 | # each instance
142 | self.input_shape = X.shape[1:]
143 |
144 | self.batch_size = int(max(1, min(X.shape[0] / 10, self.batch_size)))
145 |
146 | self.model = self.build_model(self.input_shape)
147 |
148 | if self.verbose:
149 | self.model.summary()
150 |
151 | self.history = self.model.fit(
152 | X,
153 | y,
154 | batch_size=self.batch_size,
155 | epochs=self.nb_epochs,
156 | verbose=self.verbose,
157 | callbacks=self.callbacks,
158 | validation_data=validation_data,
159 | )
160 |
161 | self.save_trained_model()
162 | self._is_fitted = True
163 |
164 | return self
165 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_regressor.py:
--------------------------------------------------------------------------------
1 | # Base class for regressors
2 |
3 | __author__ = "Withington, James Large"
4 |
5 | from sklearn.base import RegressorMixin
6 | from sktime.regression.base import BaseRegressor
7 |
8 | from sktime_dl.utils import check_and_clean_data
9 | from sktime_dl.utils import check_is_fitted
10 | from sktime_dl.utils import save_trained_model
11 |
12 |
13 | class BaseDeepRegressor(BaseRegressor, RegressorMixin):
14 | def __init__(self, model_name=None, model_save_directory=None):
15 | self.model_save_directory = model_save_directory
16 | self.model = None
17 | self.model_name = model_name
18 |
19 | def build_model(self, input_shape, **kwargs):
20 | """
21 | Construct a compiled, un-trained, keras model that is ready for
22 | training
23 |
24 | Parameters
25 | ----------
26 | input_shape : tuple The shape of the data fed
27 | into the input layer
28 |
29 | Returns
30 | -------
31 | output : a compiled Keras Model
32 | """
33 | raise NotImplementedError("this is an abstract method")
34 |
35 | def predict(self, X, input_checks=True, **kwargs):
36 | """
37 | Find regression estimate for all cases in X.
38 | Parameters
39 | ----------
40 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
41 | shape = (n_instances, series_length, n_dimensions)
42 | The training input samples. If a 2D array-like is passed,
43 | n_dimensions is assumed to be 1.
44 | input_checks: boolean
45 | whether to check the X parameter
46 | Returns
47 | -------
48 | predictions : 1d numpy array
49 | array of predictions of each instance
50 | """
51 | check_is_fitted(self)
52 |
53 | X = check_and_clean_data(X, input_checks=input_checks)
54 |
55 | y_pred = self.model.predict(X, **kwargs)
56 |
57 | if y_pred.ndim == 1:
58 | y_pred.ravel()
59 | return y_pred
60 |
61 | def save_trained_model(self):
62 | save_trained_model(
63 | self.model, self.model_save_directory, self.model_name
64 | )
65 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_resnet.py:
--------------------------------------------------------------------------------
1 | __author__ = "James Large, Withington"
2 |
3 | from tensorflow import keras
4 |
5 | from sktime_dl.regression._regressor import BaseDeepRegressor
6 | from sktime_dl.networks._resnet import ResNetNetwork
7 | from sktime_dl.utils import check_and_clean_data, \
8 | check_and_clean_validation_data
9 |
10 |
11 | class ResNetRegressor(BaseDeepRegressor, ResNetNetwork):
12 | """Residual Network (ResNet).
13 |
14 | Adapted from the implementation from Fawaz et. al
15 |
16 | https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/resnet.py
17 |
18 | Network originally defined in:
19 |
20 | @inproceedings{wang2017time, title={Time series classification from
21 | scratch with deep neural networks: A strong baseline}, author={Wang,
22 | Zhiguang and Yan, Weizhong and Oates, Tim}, booktitle={2017
23 | International joint conference on neural networks (IJCNN)}, pages={
24 | 1578--1585}, year={2017}, organization={IEEE} }
25 | """
26 |
27 | def __init__(self,
28 | nb_epochs=1500,
29 | batch_size=16,
30 | callbacks=None,
31 | random_state=0,
32 | verbose=False,
33 | model_name="resnet_regressor",
34 | model_save_directory=None):
35 | """
36 | :param nb_epochs: int, the number of epochs to train the model
37 | :param batch_size: int, specifying the length of the 1D convolution
38 | window
39 | :param callbacks: list of tf.keras.callbacks.Callback objects
40 | :param random_state: int, seed to any needed random actions
41 | :param verbose: boolean, whether to output extra information
42 | :param model_name: string, the name of this model for printing and
43 | file writing purposes
44 | :param model_save_directory: string, if not None; location to save the
45 | trained keras model in hdf5 format
46 | """
47 |
48 | super(ResNetRegressor, self).__init__(
49 | model_name=model_name, model_save_directory=model_save_directory
50 | )
51 |
52 | self.nb_epochs = nb_epochs
53 | self.batch_size = batch_size
54 |
55 | self.callbacks = callbacks
56 | self.random_state = random_state
57 | self.verbose = verbose
58 |
59 | self._is_fitted = False
60 |
61 | def build_model(self, input_shape, **kwargs):
62 | """
63 | Construct a compiled, un-trained, keras model that is ready for
64 | training
65 | ----------
66 | input_shape : tuple
67 | The shape of the data fed into the input layer
68 | Returns
69 | -------
70 | output : a compiled Keras Model
71 | """
72 | save_best_model = False
73 |
74 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
75 |
76 | output_layer = keras.layers.Dense(units=1)(output_layer)
77 |
78 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
79 |
80 | model.compile(
81 | loss="mean_squared_error",
82 | optimizer=keras.optimizers.Adam(),
83 | metrics=["mean_squared_error"],
84 | )
85 |
86 | # if user hasn't provided a custom ReduceLROnPlateau via init already,
87 | # add the default from literature
88 | if self.callbacks is None:
89 | self.callbacks = []
90 |
91 | if not any(
92 | isinstance(callback, keras.callbacks.ReduceLROnPlateau)
93 | for callback in self.callbacks
94 | ):
95 | reduce_lr = keras.callbacks.ReduceLROnPlateau(
96 | monitor="loss", factor=0.5, patience=50, min_lr=0.0001
97 | )
98 | self.callbacks.append(reduce_lr)
99 |
100 | # todo could be moved out no that things are passed via init?
101 | # raises argument of defining common/generic callbacks
102 | # in base classes
103 | if save_best_model:
104 | file_path = self.model_save_directory + "best_model.hdf5"
105 | model_checkpoint = keras.callbacks.ModelCheckpoint(
106 | filepath=file_path, monitor="loss", save_best_only=True
107 | )
108 | self.callbacks.append(model_checkpoint)
109 |
110 | return model
111 |
112 | def fit(self, X, y, input_checks=True, validation_X=None,
113 | validation_y=None, **kwargs):
114 | """
115 | Fit the regressor on the training set (X, y)
116 | ----------
117 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
118 | shape = (n_instances, series_length, n_dimensions)
119 | The training input samples. If a 2D array-like is passed,
120 | n_dimensions is assumed to be 1.
121 | y : array-like, shape = [n_instances]
122 | The training data class labels.
123 | input_checks : boolean
124 | whether to check the X and y parameters
125 | validation_X : a nested pd.Dataframe, or array-like of shape =
126 | (n_instances, series_length, n_dimensions)
127 | The validation samples. If a 2D array-like is passed,
128 | n_dimensions is assumed to be 1.
129 | Unless strictly defined by the user via callbacks (such as
130 | EarlyStopping), the presence or state of the validation
131 | data does not alter training in any way. Predictions at each epoch
132 | are stored in the model's fit history.
133 | validation_y : array-like, shape = [n_instances]
134 | The validation class labels.
135 | Returns
136 | -------
137 | self : object
138 | """
139 | X = check_and_clean_data(X, y, input_checks=input_checks)
140 |
141 | validation_data = \
142 | check_and_clean_validation_data(validation_X, validation_y)
143 |
144 | # ignore the number of instances, X.shape[0], just want the shape of
145 | # each instance
146 | self.input_shape = X.shape[1:]
147 |
148 | self.batch_size = int(max(1, min(X.shape[0] / 10, self.batch_size)))
149 |
150 | self.model = self.build_model(self.input_shape)
151 |
152 | if self.verbose:
153 | self.model.summary()
154 |
155 | self.history = self.model.fit(
156 | X,
157 | y,
158 | batch_size=self.batch_size,
159 | epochs=self.nb_epochs,
160 | verbose=self.verbose,
161 | callbacks=self.callbacks,
162 | validation_data=validation_data,
163 | )
164 |
165 | self.save_trained_model()
166 | self._is_fitted = True
167 |
168 | return self
169 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_rnn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3 -u
2 | # coding: utf-8
3 |
4 | __author__ = ["Markus Löning"]
5 | __all__ = ["SimpleRNNRegressor"]
6 |
7 | from tensorflow.keras.callbacks import ReduceLROnPlateau
8 | from tensorflow.keras.layers import Dense
9 | from tensorflow.keras.layers import SimpleRNN
10 | from tensorflow.keras.models import Sequential
11 | from tensorflow.keras.optimizers import RMSprop
12 |
13 | from sktime_dl.networks._network import BaseDeepNetwork
14 | from sktime_dl.regression._regressor import BaseDeepRegressor
15 | from sktime_dl.utils import check_and_clean_data, \
16 | check_and_clean_validation_data
17 |
18 |
19 | class SimpleRNNRegressor(BaseDeepRegressor, BaseDeepNetwork):
20 | """Simple recurrent neural network
21 |
22 | References
23 | ----------
24 | ..[1] benchmark forecaster in M4 forecasting competition:
25 | https://github.com/Mcompetitions/M4-methods
26 | """
27 |
28 | def __init__(
29 | self,
30 | nb_epochs=100,
31 | batch_size=1,
32 | units=6,
33 | callbacks=None,
34 | random_state=0,
35 | verbose=0,
36 | model_name="simple_rnn_regressor",
37 | model_save_directory=None,
38 | ):
39 | self.nb_epochs = nb_epochs
40 | self.batch_size = batch_size
41 | self.verbose = verbose
42 | self.units = units
43 | self.callbacks = callbacks
44 | self.random_state = random_state
45 | super(SimpleRNNRegressor, self).__init__(
46 | model_name=model_name,
47 | model_save_directory=model_save_directory
48 | )
49 |
50 | def build_model(self, input_shape, **kwargs):
51 | model = Sequential(
52 | [
53 | SimpleRNN(
54 | self.units,
55 | input_shape=input_shape,
56 | activation="linear",
57 | use_bias=False,
58 | kernel_initializer="glorot_uniform",
59 | recurrent_initializer="orthogonal",
60 | bias_initializer="zeros",
61 | dropout=0.0,
62 | recurrent_dropout=0.0,
63 | ),
64 | Dense(1, use_bias=True, activation="linear"),
65 | ]
66 | )
67 | model.compile(loss="mean_squared_error", optimizer=RMSprop(lr=0.001))
68 |
69 | if self.callbacks is None:
70 | self.callbacks = []
71 |
72 | if not any(
73 | isinstance(callback, ReduceLROnPlateau)
74 | for callback in self.callbacks
75 | ):
76 | reduce_lr = ReduceLROnPlateau(
77 | monitor="loss", factor=0.5, patience=50, min_lr=0.0001
78 | )
79 | self.callbacks.append(reduce_lr)
80 | return model
81 |
82 | def fit(self, X, y, input_checks=True, validation_X=None,
83 | validation_y=None, **kwargs):
84 | """
85 | Fit the regressor on the training set (X, y)
86 | ----------
87 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
88 | shape = (n_instances, series_length, n_dimensions)
89 | The training input samples. If a 2D array-like is passed,
90 | n_dimensions is assumed to be 1.
91 | y : array-like, shape = [n_instances]
92 | The training data class labels.
93 | input_checks : boolean
94 | whether to check the X and y parameters
95 | validation_X : a nested pd.Dataframe, or array-like of shape =
96 | (n_instances, series_length, n_dimensions)
97 | The validation samples. If a 2D array-like is passed,
98 | n_dimensions is assumed to be 1.
99 | Unless strictly defined by the user via callbacks (such as
100 | EarlyStopping), the presence or state of the validation
101 | data does not alter training in any way. Predictions at each epoch
102 | are stored in the model's fit history.
103 | validation_y : array-like, shape = [n_instances]
104 | The validation class labels.
105 | Returns
106 | -------
107 | self : object
108 | """
109 | X = check_and_clean_data(X, y, input_checks=input_checks)
110 |
111 | validation_data = \
112 | check_and_clean_validation_data(validation_X, validation_y)
113 |
114 | self.input_shape = X.shape[1:]
115 | self.batch_size = int(max(1, min(X.shape[0] / 10, self.batch_size)))
116 |
117 | self.model = self.build_model(self.input_shape)
118 |
119 | if self.verbose:
120 | self.model.summary()
121 |
122 | self.history = self.model.fit(
123 | X,
124 | y,
125 | batch_size=self.batch_size,
126 | epochs=self.nb_epochs,
127 | verbose=self.verbose,
128 | callbacks=self.callbacks,
129 | validation_data=validation_data,
130 | )
131 | self.save_trained_model()
132 | self._is_fitted = True
133 | return self
134 |
--------------------------------------------------------------------------------
/sktime_dl/regression/_tapnet.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """Time Convolutional Neural Network (CNN) for classification"""
3 |
4 |
5 | __author__ = "Jack Russon"
6 |
7 | from sktime_dl.regression._regressor import BaseDeepRegressor
8 | from sktime_dl.networks._tapnet import TapNetNetwork
9 | from sktime_dl.utils import check_and_clean_data, \
10 | check_and_clean_validation_data
11 | from sklearn.utils import check_random_state
12 | from tensorflow import keras
13 |
14 |
15 | class TapNetRegressor(BaseDeepRegressor, TapNetNetwork):
16 | """
17 | Implentation of TapNet found at https://github.com/kdd2019-tapnet/tapnet
18 | Currently does not implement custom distance matrix loss function or class based self attention.
19 |
20 | @inproceedings{zhang2020tapnet,
21 | title={Tapnet: Multivariate time series classification with attentional prototypical network},
22 | author={Zhang, Xuchao and Gao, Yifeng and Lin, Jessica and Lu, Chang-Tien},
23 | booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
24 | volume={34},
25 | number={04},
26 | pages={6845--6852},
27 | year={2020}
28 | }
29 | """
30 |
31 | def __init__(
32 | self,
33 | batch_size=16,
34 | dropout=0.5,
35 | filter_sizes=[256, 256, 128],
36 | kernel_size=[8, 5, 3],
37 | dilation=1,
38 | layers=[500, 300],
39 | use_rp=True,
40 | rp_params=[-1, 3],
41 | use_att=True,
42 | use_ss=False,
43 | use_metric=False,
44 | use_muse=False,
45 | use_lstm=True,
46 | use_cnn=True,
47 | random_state=1,
48 | padding='same',
49 | callbacks=None,
50 | verbose=False,
51 | nb_epochs=2000,
52 | model_name="TapNet",
53 | model_save_directory=None,
54 | is_fitted=False
55 | ):
56 | """
57 | :param kernel_size: int, specifying the length of the 1D convolution
58 | window
59 | :param avg_pool_size: int, size of the average pooling windows
60 | :param layers: int, size of dense layers
61 | :param filter_sizes: int, array of shape = (nb_conv_layers)
62 | :param random_state: int, seed to any needed random actions
63 | :param rp_params: array of ints, parameters for random permutation
64 | :param dropout: dropout rate
65 | """
66 | super(TapNetRegressor, self).__init__(
67 | model_save_directory=model_save_directory,
68 | model_name=model_name)
69 | self.batch_size=batch_size
70 | self.random_state = random_state
71 | self.kernel_size = kernel_size
72 | self.layers = layers
73 | self.rp_params = rp_params
74 | self.filter_sizes = filter_sizes
75 | #Leave this as False for now
76 | self.use_att = False
77 | self.use_ss = use_ss
78 | self.dilation = dilation
79 | self.padding = padding
80 | self.nb_epochs=nb_epochs
81 | self.callbacks=callbacks
82 | self.verbose=verbose
83 |
84 | self._is_fitted=False
85 |
86 | self.dropout = dropout
87 | self.use_metric = use_metric
88 | self.use_muse = use_muse
89 | self.use_lstm = use_lstm
90 | self.use_cnn = use_cnn
91 |
92 | # parameters for random projection
93 | self.use_rp = use_rp
94 | self.rp_params = rp_params
95 |
96 | def build_model(self, input_shape, **kwargs):
97 | """
98 | Construct a compiled, un-trained, keras model that is ready for
99 | training
100 | ----------
101 | input_shape : tuple
102 | The shape of the data fed into the input layer
103 | nb_classes: int
104 | The number of classes, which shall become the size of the output
105 | layer
106 | Returns
107 | -------
108 | output : a compiled Keras Model
109 | """
110 | input_layer, output_layer = self.build_network(input_shape, **kwargs)
111 |
112 | output_layer = keras.layers.Dense(units=1)(output_layer)
113 |
114 | model = keras.models.Model(inputs=input_layer, outputs=output_layer)
115 | model.compile(
116 | loss="mean_squared_error",
117 | optimizer=keras.optimizers.Adam(),
118 | metrics=["mean_squared_error"]
119 | )
120 |
121 | return model
122 |
123 | def fit(self, X, y, input_checks=True, validation_X=None,
124 | validation_y=None, **kwargs):
125 | """
126 | Fit the classifier on the training set (X, y)
127 | ----------
128 | X : a nested pd.Dataframe, or (if input_checks=False) array-like of
129 | shape = (n_instances, series_length, n_dimensions)
130 | The training input samples. If a 2D array-like is passed,
131 | n_dimensions is assumed to be 1.
132 | y : array-like, shape = [n_instances]
133 | The training data class labels.
134 | input_checks : boolean
135 | whether to check the X and y parameters
136 | validation_X : a nested pd.Dataframe, or array-like of shape =
137 | (n_instances, series_length, n_dimensions)
138 | The validation samples. If a 2D array-like is passed,
139 | n_dimensions is assumed to be 1.
140 | Unless strictly defined by the user via callbacks (such as
141 | EarlyStopping), the presence or state of the validation
142 | data does not alter training in any way. Predictions at each epoch
143 | are stored in the model's fit history.
144 | validation_y : array-like, shape = [n_instances]
145 | The validation class labels.
146 | Returns
147 | -------
148 | self : object
149 | """
150 | self.random_state = check_random_state(self.random_state)
151 |
152 | if self.callbacks is None:
153 | self.callbacks = []
154 |
155 | X = check_and_clean_data(X, y, input_checks=input_checks)
156 |
157 |
158 | validation_data = \
159 | check_and_clean_validation_data(validation_X, validation_y)
160 |
161 | # ignore the number of instances, X.shape[0],
162 | # just want the shape of each instance
163 | self.input_shape = X.shape[1:]
164 |
165 | self.model = self.build_model(self.input_shape)
166 |
167 | if self.verbose:
168 | self.model.summary()
169 |
170 | self.history = self.model.fit(
171 | X,
172 | y,
173 | batch_size=self.batch_size,
174 | epochs=self.nb_epochs,
175 | verbose=self.verbose,
176 | callbacks=self.callbacks,
177 | validation_data=validation_data
178 | )
179 |
180 | self._is_fitted = True
181 | self.save_trained_model()
182 |
183 | return self
184 |
185 |
186 |
--------------------------------------------------------------------------------
/sktime_dl/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sktime/sktime-dl/1cc7e1d3ce98ccfd1165386fd5ccbd87b6c814e9/sktime_dl/tests/__init__.py
--------------------------------------------------------------------------------
/sktime_dl/tests/test_accuracy.py:
--------------------------------------------------------------------------------
1 | """
2 | Compare accuracy of the classifiers against results published at
3 | https://github.com/hfawaz/dl-4-tsc/blob/master/README.md
4 | Test that accuracy is higher than the published accuracy minus three
5 | standard deviations (also published) on the ItalyPowerDemand dataset.
6 | """
7 |
8 | import os
9 |
10 | import pytest
11 | from flaky import flaky
12 | from sktime.datasets import load_italy_power_demand
13 |
14 | from sktime_dl.classification import CNNClassifier
15 | from sktime_dl.classification import EncoderClassifier
16 | from sktime_dl.classification import FCNClassifier
17 | from sktime_dl.classification import InceptionTimeClassifier
18 | from sktime_dl.classification import MCDCNNClassifier
19 | from sktime_dl.classification import MCNNClassifier
20 | from sktime_dl.classification import MLPClassifier
21 | from sktime_dl.classification import ResNetClassifier
22 | from sktime_dl.classification import TLENETClassifier
23 | from sktime_dl.classification import TWIESNClassifier
24 |
25 |
26 | def is_not_value_error(err, *args):
27 | return not issubclass(err[0], ValueError)
28 |
29 |
30 | # times the std deviation of reported results, if available
31 | ACCURACY_DEVIATION_THRESHOLD = 3
32 |
33 | # number of times to retry the test in case of catastrophic initialisation
34 | MAX_RUNS = 5
35 |
36 |
37 | def accuracy_test(network, lower=0.94, upper=1.0):
38 | """
39 | Test the classifier accuracy against expected lower and upper bounds.
40 | """
41 | print("Start accuracy_test:", network.__class__.__name__)
42 |
43 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
44 | X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
45 |
46 | network.fit(X_train, y_train)
47 |
48 | accuracy = network.score(X_test, y_test)
49 | print(network.__class__.__name__, "accuracy:", accuracy)
50 | assert lower < accuracy <= upper
51 |
52 |
53 | @pytest.mark.slow
54 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
55 | def test_cnn_accuracy():
56 | accuracy_test(
57 | network=CNNClassifier(),
58 | lower=0.955 - ACCURACY_DEVIATION_THRESHOLD * 0.004,
59 | )
60 |
61 |
62 | @pytest.mark.slow
63 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
64 | def test_encoder_accuracy():
65 | accuracy_test(
66 | network=EncoderClassifier(),
67 | lower=0.965 - ACCURACY_DEVIATION_THRESHOLD * 0.005,
68 | )
69 |
70 |
71 | @pytest.mark.slow
72 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
73 | def test_fcn_accuracy():
74 | accuracy_test(
75 | network=FCNClassifier(),
76 | lower=0.961 - ACCURACY_DEVIATION_THRESHOLD * 0.003,
77 | )
78 |
79 |
80 | @pytest.mark.slow
81 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
82 | def test_mcdcnn_accuracy():
83 | accuracy_test(
84 | network=MCDCNNClassifier(),
85 | lower=0.955 - ACCURACY_DEVIATION_THRESHOLD * 0.019,
86 | )
87 |
88 |
89 | @pytest.mark.skipif("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
90 | reason="Very slow running, causes Travis to time out.")
91 | @pytest.mark.slow
92 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
93 | def test_mcnn_accuracy():
94 | # Low accuracy is consistent with published results
95 | # https://github.com/hfawaz/dl-4-tsc/blob/master/README.md
96 | accuracy_test(
97 | network=MCNNClassifier(),
98 | lower=0.5 - ACCURACY_DEVIATION_THRESHOLD * 0.002,
99 | upper=0.5 + ACCURACY_DEVIATION_THRESHOLD * 0.002,
100 | )
101 |
102 |
103 | @pytest.mark.slow
104 | @pytest.mark.skipif(
105 | ("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true") and
106 | os.environ["TF_VERSION"] == "1.15" and
107 | os.environ["PYTHON_VERSION"] == "3.6",
108 | reason="Very slow running, causes Travis to time out."
109 | )
110 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
111 | def test_mlp_accuracy():
112 | accuracy_test(
113 | network=MLPClassifier(),
114 | lower=0.954 - ACCURACY_DEVIATION_THRESHOLD * 0.002,
115 | )
116 |
117 |
118 | @pytest.mark.slow
119 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
120 | def test_resnet_accuracy():
121 | accuracy_test(
122 | network=ResNetClassifier(),
123 | lower=0.963 - ACCURACY_DEVIATION_THRESHOLD * 0.004,
124 | )
125 |
126 |
127 | @pytest.mark.slow
128 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
129 | def test_tlenet_accuracy():
130 | # Accuracy is higher than the 0.490 in the published results
131 | # https://github.com/hfawaz/dl-4-tsc/blob/master/README.md
132 | accuracy_test(network=TLENETClassifier(), lower=0.90)
133 |
134 |
135 | @pytest.mark.slow
136 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
137 | def test_twiesn_accuracy():
138 | accuracy_test(
139 | network=TWIESNClassifier(),
140 | lower=0.88 - ACCURACY_DEVIATION_THRESHOLD * 0.022,
141 | )
142 |
143 |
144 | @pytest.mark.slow
145 | @flaky(max_runs=MAX_RUNS, rerun_filter=is_not_value_error)
146 | def test_inception_accuracy():
147 | accuracy_test(network=InceptionTimeClassifier(), lower=0.96)
148 |
149 |
150 | if __name__ == "__main__":
151 | test_cnn_accuracy()
152 |
--------------------------------------------------------------------------------
/sktime_dl/tests/test_classifiers.py:
--------------------------------------------------------------------------------
1 | from sktime.datasets import load_basic_motions
2 | from sktime.datasets import load_italy_power_demand
3 |
4 | from sktime_dl.classification import LSTMFCNClassifier
5 | from sktime_dl.classification import CNTCClassifier
6 | from sktime_dl.utils.model_lists import SMALL_NB_EPOCHS
7 | from sktime_dl.utils.model_lists import construct_all_classifiers
8 |
9 |
10 | def test_basic_univariate(network=CNTCClassifier(nb_epochs=SMALL_NB_EPOCHS)):
11 | """
12 | just a super basic test with gunpoint,
13 | load data,
14 | construct classifier,
15 | fit,
16 | score
17 | """
18 |
19 | print("Start test_basic()")
20 |
21 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
22 | X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
23 |
24 | network.fit(X_train[:10], y_train[:10])
25 |
26 | print(network.score(X_test[:10], y_test[:10]))
27 | print("End test_basic()")
28 |
29 |
30 | def test_pipeline(network=CNTCClassifier(nb_epochs=SMALL_NB_EPOCHS)):
31 | """
32 | slightly more generalised test with sktime pipelines
33 | load data,
34 | construct pipeline with classifier,
35 | fit,
36 | score
37 | """
38 |
39 | print("Start test_pipeline()")
40 |
41 | from sklearn.pipeline import Pipeline
42 |
43 | # just a simple (useless) pipeline for the purposes of testing
44 | # that the keras network is compatible with that system
45 | steps = [("clf", network)]
46 | clf = Pipeline(steps)
47 |
48 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
49 | X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
50 |
51 | clf.fit(X_train[:10], y_train[:10])
52 |
53 | print(clf.score(X_test[:10], y_test[:10]))
54 | print("End test_pipeline()")
55 |
56 |
57 | def test_highLevelsktime(network=LSTMFCNClassifier(nb_epochs=SMALL_NB_EPOCHS)):
58 | """
59 | truly generalised test with sktime tasks/strategies
60 | load data, build task
61 | construct classifier, build strategy
62 | fit,
63 | score
64 | """
65 |
66 | print("start test_highLevelsktime()")
67 |
68 | from sktime.benchmarking.tasks import TSCTask
69 | from sktime.benchmarking.strategies import TSCStrategy
70 | from sklearn.metrics import accuracy_score
71 |
72 | train = load_italy_power_demand(split="train")
73 | test = load_italy_power_demand(split="test")
74 | task = TSCTask(target="class_val", metadata=train)
75 |
76 | strategy = TSCStrategy(network)
77 | strategy.fit(task, train.iloc[:10])
78 |
79 | y_pred = strategy.predict(test.iloc[:10])
80 | y_test = test.iloc[:10][task.target]
81 | print(accuracy_score(y_test, y_pred))
82 |
83 | print("End test_highLevelsktime()")
84 |
85 |
86 | def test_basic_multivariate(network=CNTCClassifier(nb_epochs=SMALL_NB_EPOCHS)):
87 | """
88 | just a super basic test with basicmotions,
89 | load data,
90 | construct classifier,
91 | fit,
92 | score
93 | """
94 | print("Start test_multivariate()")
95 | X_train, y_train = load_basic_motions(split="train", return_X_y=True)
96 | X_test, y_test = load_basic_motions(split="test", return_X_y=True)
97 |
98 | network.fit(X_train, y_train)
99 |
100 | print(network.score(X_test, y_test))
101 | print("End test_multivariate()")
102 |
103 |
104 | def test_all_networks():
105 | for name, network in construct_all_classifiers(SMALL_NB_EPOCHS).items():
106 | print("\n\t\t" + name + " testing started")
107 | # test_basic_univariate(network)
108 | test_basic_multivariate(network)
109 | # test_pipeline(network)
110 | test_highLevelsktime(network)
111 | print("\t\t" + name + " testing finished")
112 |
113 |
114 | if __name__ == "__main__":
115 | test_all_networks()
116 |
--------------------------------------------------------------------------------
/sktime_dl/tests/test_is_fitted.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | from sktime.exceptions import NotFittedError
4 | from sktime.datasets import load_italy_power_demand
5 | from sktime.regression.base import BaseRegressor
6 |
7 | from sktime_dl.classification import CNNClassifier
8 | from sktime_dl.utils.model_lists import SMALL_NB_EPOCHS
9 | from sktime_dl.utils.model_lists import construct_all_classifiers
10 | from sktime_dl.utils.model_lists import construct_all_regressors
11 |
12 |
13 | def test_is_fitted(network=CNNClassifier()):
14 | """
15 | testing that the networks correctly recognise when they are not fitted
16 | """
17 |
18 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
19 |
20 | if isinstance(network, BaseRegressor):
21 | # Create some regression values, taken from test_regressor
22 | y_train = np.zeros(len(y_train))
23 | for i in range(len(X_train)):
24 | y_train[i] = X_train.iloc[i].iloc[0].iloc[0]
25 |
26 | # try to predict without fitting: SHOULD fail
27 | with pytest.raises(NotFittedError):
28 | network.predict(X_train[:10])
29 |
30 |
31 | def test_all_networks():
32 | networks = {
33 | **construct_all_classifiers(SMALL_NB_EPOCHS),
34 | **construct_all_regressors(SMALL_NB_EPOCHS),
35 | }
36 |
37 | for name, network in networks.items():
38 | print("\n\t\t" + name + " is_fitted testing started")
39 | test_is_fitted(network)
40 | print("\t\t" + name + " is_fitted testing finished")
41 |
42 |
43 | if __name__ == "__main__":
44 | test_all_networks()
45 |
--------------------------------------------------------------------------------
/sktime_dl/tests/test_regressors.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | from sklearn.metrics import mean_squared_error
4 | from sktime.datasets import load_airline
5 | from sktime.datasets import load_italy_power_demand
6 | from sktime.forecasting.compose import RecursiveTimeSeriesRegressionForecaster
7 | from sktime.forecasting.model_selection import temporal_train_test_split
8 |
9 | from sktime_dl.regression import MLPRegressor, MCDCNNRegressor, CNTCRegressor,\
10 | LSTMFCNRegressor
11 |
12 | from sktime_dl.utils.model_lists import (SMALL_NB_EPOCHS,
13 | construct_all_regressors)
14 |
15 |
16 | def test_regressor(estimator=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS)):
17 | """
18 | test a regressor
19 | """
20 | print("Start test_regressor()")
21 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
22 | X_test, y_test = load_italy_power_demand(split="test", return_X_y=True)
23 |
24 | # Create some regression values
25 | y_train = np.zeros(len(y_train))
26 | for i in range(len(X_train)):
27 | y_train[i] = X_train.iloc[i].iloc[0].iloc[0]
28 | y_test = np.zeros(len(y_test))
29 | for i in range(len(X_test)):
30 | y_test[i] = X_test.iloc[i].iloc[0].iloc[0]
31 |
32 | estimator.fit(X_train[:10], y_train[:10])
33 | estimator.predict(X_test[:10])
34 | score = estimator.score(X_test[:10], y_test[:10])
35 |
36 | print("Estimator score:", score)
37 | print("End test_regressor()")
38 |
39 |
40 | def test_regressor_forecasting(
41 | regressor=CNTCRegressor(nb_epochs=SMALL_NB_EPOCHS), window_length=4
42 | ):
43 | """
44 | test a regressor used for forecasting
45 | """
46 | print("Start test_regressor_forecasting()")
47 |
48 | if isinstance(regressor, MCDCNNRegressor):
49 | regressor.nb_epochs = regressor.nb_epochs * 2
50 |
51 | # load univariate time series data
52 | y = load_airline()
53 | y_train, y_test = temporal_train_test_split(y, test_size=5)
54 | y_train = y_train[:window_length * 2]
55 |
56 | # specify forecasting horizon
57 | fh = np.arange(len(y_test)) + 1
58 |
59 | # solve forecasting task via reduction to time series regression
60 | forecaster = RecursiveTimeSeriesRegressionForecaster(
61 | estimator=regressor, window_length=window_length
62 | )
63 | forecaster.fit(y_train)
64 | y_pred = forecaster.predict(fh)
65 |
66 | try:
67 | mse = np.sqrt(mean_squared_error(y_test, y_pred))
68 | print("Error:", mse)
69 | except ValueError:
70 | if isinstance(regressor, MCDCNNRegressor):
71 | print(
72 | "Warning: MCDCNNRegressor produced NaN predictions. This is a "
73 | "known problem brought about by insufficient data/learning. "
74 | "For now, we accept that this particular network produced "
75 | "predictions at all (even NaNs) as passing for this "
76 | "particular test. Providing more data/epochs risks slowing "
77 | "down tests too much.")
78 | else:
79 | # unexpected error in all other cases
80 | raise
81 |
82 | print("End test_regressor_forecasting()")
83 |
84 |
85 | def test_all_regressors():
86 | for name, network in construct_all_regressors(SMALL_NB_EPOCHS).items():
87 | print("\n\t\t" + name + " testing started")
88 | test_regressor(network)
89 | print("\t\t" + name + " testing finished")
90 |
91 |
92 | @pytest.mark.parametrize(
93 | "name, network",
94 | construct_all_regressors(SMALL_NB_EPOCHS).items()
95 | )
96 | def test_all_forecasters(name, network):
97 | window_length = 8
98 | print("\n\t\t" + name + " forecasttesting \
99 | started")
100 | test_regressor_forecasting(network, window_length=window_length)
101 | print("\t\t" + name + " forecasttesting \
102 | finished")
103 |
--------------------------------------------------------------------------------
/sktime_dl/tests/test_validation.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy as np
4 | import pytest
5 | from sktime.datasets import load_italy_power_demand
6 | from sktime.regression.base import BaseRegressor
7 |
8 | from sktime_dl.classification import MLPClassifier
9 | from sktime_dl.utils.model_lists import SMALL_NB_EPOCHS
10 | from sktime_dl.utils.model_lists import construct_all_classifiers
11 | from sktime_dl.utils.model_lists import construct_all_regressors
12 |
13 |
14 | @pytest.mark.skipif("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
15 | reason="Frequently causes travis to time out")
16 | @pytest.mark.slow
17 | def test_validation(network=MLPClassifier()):
18 | """
19 | testing that the networks log validation predictions in the history object
20 | """
21 |
22 | X_train, y_train = load_italy_power_demand(split="train", return_X_y=True)
23 |
24 | X_train = X_train[:10]
25 | y_train = y_train[:10]
26 |
27 | if isinstance(network, BaseRegressor):
28 | # Create some regression values, taken from test_regressor
29 | y_train = np.zeros(len(y_train))
30 | for i in range(len(X_train)):
31 | y_train[i] = X_train.iloc[i].iloc[0].iloc[0]
32 |
33 | network.fit(X_train, y_train, validation_X=X_train, validation_y=y_train)
34 | hist = network.history.history
35 |
36 | assert ('val_loss' in hist)
37 | assert (isinstance(hist['val_loss'][0],
38 | (float, np.single, np.double, np.float32, np.float64)))
39 |
40 |
41 | @pytest.mark.skipif("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
42 | reason="Frequently causes travis to time out")
43 | @pytest.mark.slow
44 | def test_all_networks():
45 | networks = {
46 | **construct_all_classifiers(SMALL_NB_EPOCHS),
47 | **construct_all_regressors(SMALL_NB_EPOCHS),
48 | }
49 |
50 | # these networks do not support validation data as yet
51 | networks.pop('MCNNClassifier_quick')
52 | networks.pop('TWIESNClassifier_quick')
53 |
54 | # networks = [
55 | # MLPClassifier(nb_epochs=SMALL_NB_EPOCHS),
56 | # ResNetClassifier(nb_epochs=SMALL_NB_EPOCHS),
57 | # InceptionTimeClassifier(nb_epochs=SMALL_NB_EPOCHS),
58 | # ]
59 |
60 | for name, network in networks.items():
61 | print("\n\t\t" + name + " validation testing started")
62 | test_validation(network)
63 | print("\t\t" + name + " validation testing finished")
64 |
65 |
66 | if __name__ == "__main__":
67 | test_all_networks()
68 |
--------------------------------------------------------------------------------
/sktime_dl/utils/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = [
2 | "check_and_clean_data",
3 | "check_and_clean_validation_data",
4 | "check_is_fitted",
5 | "save_trained_model"
6 | ]
7 |
8 | from sktime_dl.utils._data import check_and_clean_data
9 | from sktime_dl.utils._data import check_and_clean_validation_data
10 | from sktime_dl.utils._models import check_is_fitted
11 | from sktime_dl.utils._models import save_trained_model
12 |
--------------------------------------------------------------------------------
/sktime_dl/utils/_data.py:
--------------------------------------------------------------------------------
1 | # Utility functions for data handling
2 |
3 | __author__ = "James Large"
4 |
5 | import pandas as pd
6 | from sktime.utils.data_processing import from_nested_to_3d_numpy
7 | from sktime.utils.validation.panel import check_X, check_X_y
8 |
9 |
10 | def check_and_clean_data(X, y=None, input_checks=True):
11 | '''
12 | Performs basic sktime data checks and prepares the train data for input to
13 | Keras models.
14 |
15 | Parameters
16 | ----------
17 | X: the train data
18 | y: the train labels
19 | input_checks: whether to perform the basic sktime checks
20 |
21 | Returns
22 | -------
23 | X
24 | '''
25 | if input_checks:
26 | if y is None:
27 | check_X(X)
28 | else:
29 | check_X_y(X, y)
30 |
31 | # want data in form: [instances = n][timepoints = m][dimensions = d]
32 | if isinstance(X, pd.DataFrame):
33 | if _is_nested_dataframe(X):
34 | if X.shape[1] > 1:
35 | # we have multiple columns, AND each cell contains a series,
36 | # so this is a multidimensional problem
37 | X = _multivariate_nested_df_to_array(X)
38 | else:
39 | # we have a single column containing a series, treat this as
40 | # a univariate problem
41 | X = _univariate_nested_df_to_array(X)
42 | else:
43 | # we have multiple columns each containing a primitive, treat as
44 | # univariate series
45 | X = _univariate_df_to_array(X)
46 |
47 | if len(X.shape) == 2:
48 | # add a dimension to make it multivariate with one dimension
49 | X = X.values.reshape(
50 | X.shape[0], X.shape[1], 1
51 | ) # go from [n][m] to [n][m][d=1]
52 | # return transposed data to conform with current model formats
53 | return X.transpose(0, 2, 1)
54 |
55 |
56 | def check_and_clean_validation_data(validation_X, validation_y,
57 | label_encoder=None,
58 | onehot_encoder=None, input_checks=True):
59 | '''
60 | Performs basic sktime data checks and prepares the validation data for
61 | input to Keras models. Also provides functionality to encode the y labels
62 | using label encoders that should have already been fit to the train data.
63 |
64 | :param validation_X: required, validation data
65 | :param validation_y: optional, y labels for categorical conversion if
66 | needed
67 | :param label_encoder: if validation_y has been given,
68 | the encoder that has already been fit to the train data
69 | :param onehot_encoder: if validation_y has been given,
70 | the encoder that has already been fit to the train data
71 | :param input_checks: whether to perform the basic input structure checks
72 | :return: ( validation_X, validation_y ), or None if no data given
73 | '''
74 | if validation_X is not None:
75 | validation_X = check_and_clean_data(validation_X, validation_y,
76 | input_checks=input_checks)
77 | else:
78 | return None
79 |
80 | if label_encoder is not None and onehot_encoder is not None:
81 | validation_y = label_encoder.transform(validation_y)
82 | validation_y = validation_y.reshape(
83 | len(validation_y), 1)
84 | validation_y = onehot_encoder.fit_transform(
85 | validation_y)
86 |
87 | return (validation_X, validation_y)
88 |
89 |
90 | def _is_nested_dataframe(X):
91 | return isinstance(X.iloc[0, 0], pd.Series)
92 |
93 |
94 | def _univariate_nested_df_to_array(X):
95 | return from_nested_to_3d_numpy(X)
96 |
97 |
98 | def _univariate_df_to_array(X):
99 | return X.to_numpy()
100 |
101 |
102 | def _multivariate_nested_df_to_array(X):
103 | return from_nested_to_3d_numpy(X)
104 |
--------------------------------------------------------------------------------
/sktime_dl/utils/_models.py:
--------------------------------------------------------------------------------
1 | # Model utility functions
2 |
3 | __author__ = "Withington, James Large"
4 |
5 | from inspect import isclass
6 | from pathlib import Path
7 |
8 | from sktime.exceptions import NotFittedError
9 |
10 |
11 | def save_trained_model(
12 | model, model_save_directory, model_name, save_format="h5"
13 | ):
14 | """
15 | Saves the model to an HDF file.
16 |
17 | Saved models can be reinstantiated via `keras.models.load_model`.
18 |
19 | Parameters
20 | ----------
21 | save_format: string
22 | 'h5'. Defaults to 'h5' currently but future releases
23 | will default to 'tf', the TensorFlow SavedModel format.
24 | """
25 | if save_format != "h5":
26 | raise ValueError(
27 | "save_format must be 'h5'. This is the only format "
28 | "currently supported."
29 | )
30 | if model_save_directory is not None:
31 | if model_name is None:
32 | file_name = "trained_model.hdf5"
33 | else:
34 | file_name = model_name + ".hdf5"
35 | path = Path(model_save_directory) / file_name
36 | model.save(
37 | path
38 | ) # Add save_format here upon migration from keras to tf.keras
39 |
40 |
41 | def check_is_fitted(estimator, msg=None):
42 | """Perform is_fitted validation for estimator.
43 |
44 | Adapted from sklearn.utils.validation.check_is_fitted
45 |
46 | Checks if the estimator is fitted by verifying the presence and
47 | positivity of self.is_fitted_
48 |
49 | Parameters
50 | ----------
51 | msg : string
52 | The default error message is, "This %(name)s instance is not fitted
53 | yet. Call 'fit' with appropriate arguments before using this
54 | estimator."
55 |
56 | For custom messages if "%(name)s" is present in the message string,
57 | it is substituted for the estimator name.
58 |
59 | Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
60 |
61 | Returns
62 | -------
63 | None
64 |
65 | Raises
66 | ------
67 | NotFittedError
68 | If the attributes are not found.
69 | """
70 | if isclass(estimator):
71 | raise TypeError("{} is a class, not an instance.".format(estimator))
72 | if msg is None:
73 | msg = (
74 | "This %(name)s instance is not fitted yet. Call 'fit' with "
75 | "appropriate arguments before using this estimator."
76 | )
77 |
78 | if not hasattr(estimator, "fit"):
79 | raise TypeError("%s is not an estimator instance." % (estimator))
80 |
81 | if not hasattr(estimator, "_is_fitted") or not estimator.is_fitted:
82 | raise NotFittedError(msg % {"name": type(estimator).__name__})
83 |
--------------------------------------------------------------------------------
/sktime_dl/utils/model_lists.py:
--------------------------------------------------------------------------------
1 | from sktime_dl.classification import (CNNClassifier,
2 | EncoderClassifier,
3 | FCNClassifier,
4 | InceptionTimeClassifier,
5 | MCDCNNClassifier,
6 | TLENETClassifier,
7 | TWIESNClassifier,
8 | MCNNClassifier,
9 | MLPClassifier,
10 | ResNetClassifier,
11 | MACNNClassifier
12 | )
13 |
14 | from sktime_dl.regression import (CNNRegressor,
15 | EncoderRegressor,
16 | FCNRegressor,
17 | InceptionTimeRegressor,
18 | LSTMRegressor,
19 | MCDCNNRegressor,
20 | MLPRegressor,
21 | ResNetRegressor,
22 | SimpleRNNRegressor,
23 | TLENETRegressor
24 | )
25 |
26 | SMALL_NB_EPOCHS = 3
27 |
28 |
29 | def construct_all_classifiers(nb_epochs=None):
30 | """
31 | Creates a list of all classification networks ready for testing
32 |
33 | Parameters
34 | ----------
35 | nb_epochs: int, if not None, value shall be set for all networks that accept it
36 |
37 | Returns
38 | -------
39 | map of strings to sktime_dl BaseDeepRegressor implementations
40 | """
41 | if nb_epochs is not None:
42 | # potentially quicker versions for tests
43 | return {
44 | 'CNNClassifier_quick': CNNClassifier(nb_epochs=nb_epochs),
45 | 'EncoderClassifier_quick': EncoderClassifier(nb_epochs=nb_epochs),
46 | 'FCNClassifier_quick': FCNClassifier(nb_epochs=nb_epochs),
47 | 'MCDCNNClassifier_quick': MCDCNNClassifier(nb_epochs=nb_epochs),
48 | 'MCNNClassifier_quick': MCNNClassifier(nb_epochs=nb_epochs),
49 | 'MLPClassifier_quick': MLPClassifier(nb_epochs=nb_epochs),
50 | 'ResNetClassifier_quick': ResNetClassifier(nb_epochs=nb_epochs),
51 | 'TLENETClassifier_quick': TLENETClassifier(nb_epochs=nb_epochs),
52 | 'TWIESNClassifier_quick': TWIESNClassifier(),
53 | 'InceptionTimeClassifier_quick': InceptionTimeClassifier(
54 | nb_epochs=nb_epochs),
55 | "MACNNClassifier_quick": MACNNClassifier(nb_epochs=nb_epochs)
56 | }
57 | else:
58 | # the 'literature-conforming' versions
59 | return {
60 | 'CNNClassifier': CNNClassifier(),
61 | 'EncoderClassifier': EncoderClassifier(),
62 | 'FCNClassifier': FCNClassifier(),
63 | 'MCDCNNClassifier': MCDCNNClassifier(),
64 | 'MCNNClassifier': MCNNClassifier(),
65 | 'MLPClassifier': MLPClassifier(),
66 | 'ResNetClassifier': ResNetClassifier(),
67 | 'TLENETClassifier': TLENETClassifier(),
68 | 'TWIESNClassifier': TWIESNClassifier(),
69 | 'InceptionTimeClassifier': InceptionTimeClassifier(),
70 | "MACNNClassifier": MACNNClassifier()
71 | }
72 |
73 |
74 | def construct_all_regressors(nb_epochs=None):
75 | """
76 | Creates a list of all regression networks ready for testing
77 |
78 | :param nb_epochs: int, if not None, value shall be set for all networks
79 | that accept it
80 | :return: map of strings to sktime_dl BaseDeepRegressor implementations
81 | """
82 | if nb_epochs is not None:
83 | # potentially quicker versions for tests
84 | return {
85 | 'CNNRegressor_quick': CNNRegressor(nb_epochs=nb_epochs,
86 | kernel_size=3,
87 | avg_pool_size=1),
88 | 'EncoderRegressor_quick': EncoderRegressor(nb_epochs=nb_epochs),
89 | 'FCNRegressor_quick': FCNRegressor(nb_epochs=nb_epochs),
90 | 'LSTMRegressor_quick': LSTMRegressor(nb_epochs=nb_epochs),
91 | 'MLPRegressor_quick': MLPRegressor(nb_epochs=nb_epochs),
92 | 'MCDCNNRegressor_quick': MCDCNNRegressor(nb_epochs=nb_epochs,
93 | dense_units=1),
94 | 'ResNetRegressor_quick': ResNetRegressor(nb_epochs=nb_epochs),
95 | 'TLENETRegressor_quick': TLENETRegressor(nb_epochs=nb_epochs),
96 | 'InceptionTimeRegressor_quick': InceptionTimeRegressor(
97 | nb_epochs=nb_epochs),
98 | 'SimpleRNNRegressor_quick': SimpleRNNRegressor(
99 | nb_epochs=nb_epochs),
100 | }
101 | else:
102 | # the 'literature-conforming' versions
103 | return {
104 | 'CNNRegressor': CNNRegressor(),
105 | 'EncoderRegressor': EncoderRegressor(),
106 | 'FCNRegressor': FCNRegressor(),
107 | 'LSTMRegressor': LSTMRegressor(),
108 | 'MCDCNNRegressor': MCDCNNRegressor(),
109 | 'MLPRegressor': MLPRegressor(),
110 | 'ResNetRegressor': ResNetRegressor(),
111 | 'TLENETRegressor': TLENETRegressor(),
112 | 'InceptionTimeRegressor': InceptionTimeRegressor(),
113 | 'SimpleRNNRegressor': SimpleRNNRegressor(),
114 | }
115 |
--------------------------------------------------------------------------------