├── .github └── workflows │ ├── python-publish-pypi.yml │ └── python-test-push.yml ├── .gitignore ├── .jupyter └── custom │ ├── Logo_TU_Dresden.svg │ └── custom.css ├── .readthedocs.yml ├── LICENSE ├── README.md ├── docs ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── _static │ └── img │ │ ├── Logo-STKS.jpg │ │ ├── Logo_IDLab_White.png │ │ ├── Logo_TU_Dresden.svg │ │ ├── SMWA_EFRE-ESF_Sachsen_Logokombi_quer_03.jpg │ │ ├── TUD_Logo_HKS41_114.png │ │ ├── echo_state_network.svg │ │ ├── getting_started_mackey_glass.svg │ │ ├── getting_started_mackey_glass_predicted.svg │ │ ├── logo_UGent_EN_RGB_2400_color-on-white.png │ │ ├── pyrcn_logo.pdf │ │ └── pyrcn_logo.svg │ ├── api │ ├── api.rst │ ├── pyrcn.base.rst │ ├── pyrcn.datasets.rst │ ├── pyrcn.echo_state_network.rst │ ├── pyrcn.extreme_learning_machine.rst │ ├── pyrcn.linear_model.rst │ └── pyrcn.rst │ ├── citation.rst │ ├── conf.py │ ├── development.rst │ ├── getting_started.rst │ ├── index.rst │ ├── installation.rst │ ├── introduction.rst │ └── tutorial.rst ├── environment.yml ├── examples ├── .gitignore ├── MNIST_regressors.ipynb ├── PyRCN_Intro.ipynb ├── PyRCN_Intro.py ├── Video_Door_state_Classification.ipynb ├── Video_Door_state_Classification.py ├── Video_Door_state_Classification_randomized_search.py ├── bip.py ├── digits-kmeans.py ├── digits.ipynb ├── digits.py ├── esn_impulse_responses.ipynb ├── experiments.py ├── f0_extraction.py ├── input-to-node.py ├── mackey-glass-t17.ipynb ├── mackey-glass-t17.py ├── mnist-elm.py ├── mnist_regressors.py ├── multipitch_tracking.ipynb ├── multipitch_tracking.py ├── musical_note_prediction.ipynb ├── musical_note_prediction.py ├── preprocessing-mnist.py ├── setup_local.ipynb ├── sklearn_autoencoder.ipynb ├── spoken_digit_recognition.ipynb ├── spoken_digit_recognition_auto_encoder.ipynb ├── stock-price-prediction.ipynb └── whitening.py ├── mypy.ini ├── pyproject.toml ├── pytest.ini ├── requirements.txt ├── setup.cfg ├── src └── pyrcn │ ├── __init__.py │ ├── _version.py │ ├── base │ ├── __init__.py │ ├── _activations.py │ ├── _base.py │ └── blocks │ │ ├── __init__.py │ │ ├── _input_to_node.py │ │ └── _node_to_node.py │ ├── datasets │ ├── __init__.py │ └── _base.py │ ├── echo_state_network │ ├── __init__.py │ └── _esn.py │ ├── extreme_learning_machine │ ├── __init__.py │ └── _elm.py │ ├── linear_model │ ├── __init__.py │ └── _incremental_regression.py │ ├── metrics │ ├── __init__.py │ ├── _classification.py │ └── _regression.py │ ├── model_selection │ ├── __init__.py │ └── _search.py │ ├── nn │ ├── __init__.py │ ├── _activations.py │ ├── _forward_layers.py │ ├── _recurrent_layers.py │ └── init.py │ ├── postprocessing │ ├── __init__.py │ └── _normal_distribution.py │ ├── preprocessing │ ├── __init__.py │ └── _coates.py │ ├── projection │ ├── __init__.py │ └── _value_projection.py │ └── util │ ├── __init__.py │ ├── _feature_extractor.py │ └── _util.py └── tests ├── test_activations.py ├── test_classification_metrics.py ├── test_coates.py ├── test_elm.py ├── test_esn.py ├── test_incremental_regression.py ├── test_input_to_node.py ├── test_model_selection.py ├── test_node_to_node.py ├── test_regression_metrics.py ├── test_util.py └── test_value_projection.py /.github/workflows/python-publish-pypi.yml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | name: Upload Python Package 7 | 8 | on: 9 | release: 10 | types: [published] 11 | 12 | jobs: 13 | deploy: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: '3.x' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install build 25 | - name: Build package 26 | run: python -m build 27 | - name: Publish package 28 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 29 | with: 30 | user: __token__ 31 | password: ${{ secrets.PYPI_API_TOKEN }} 32 | -------------------------------------------------------------------------------- /.github/workflows/python-test-push.yml: -------------------------------------------------------------------------------- 1 | name: Python Check Push 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | python-version: [3.9, 3.11, 3.12] # 3 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Set up Python ${{ matrix.python-version }} 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: ${{ matrix.python-version }} 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --upgrade pip 21 | pip install pytest flake8 pytest-flake8-v2 mypy pytest-mypy pytest-cov \ 22 | types-setuptools 23 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 24 | - name: Lint with flake8 25 | run: | 26 | # stop the build if there are Python syntax errors or undefined names 27 | flake8 . --count --exit-zero --show-source --statistics # --max-complexity=10 28 | - name: Test with pytest 29 | run: | 30 | pytest 31 | - name: Test installed PyRCN 32 | run: | 33 | pip install . 34 | pytest tests 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.toptal.com/developers/gitignore/api/jupyternotebooks,python 3 | # Edit at https://www.toptal.com/developers/gitignore?templates=jupyternotebooks,python 4 | 5 | ### JupyterNotebooks ### 6 | # gitignore template for Jupyter Notebooks 7 | # website: http://jupyter.org/ 8 | 9 | .ipynb_checkpoints 10 | */.ipynb_checkpoints/* 11 | 12 | # IPython 13 | profile_default/ 14 | ipython_config.py 15 | 16 | # Remove previous ipynb_checkpoints 17 | # git rm -r .ipynb_checkpoints/ 18 | 19 | ### Python ### 20 | # Byte-compiled / optimized / DLL files 21 | __pycache__/ 22 | *.py[cod] 23 | *$py.class 24 | 25 | # C extensions 26 | *.so 27 | 28 | # Distribution / packaging 29 | .Python 30 | build/ 31 | develop-eggs/ 32 | dist/ 33 | downloads/ 34 | eggs/ 35 | .eggs/ 36 | lib/ 37 | lib64/ 38 | parts/ 39 | sdist/ 40 | var/ 41 | wheels/ 42 | pip-wheel-metadata/ 43 | share/python-wheels/ 44 | *.egg-info/ 45 | .installed.cfg 46 | *.egg 47 | .pypirc 48 | MANIFEST 49 | 50 | # PyInstaller 51 | # Usually these files are written by a python script from a template 52 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 53 | *.manifest 54 | *.spec 55 | 56 | # Installer logs 57 | pip-log.txt 58 | pip-delete-this-directory.txt 59 | 60 | # Unit tests / coverage reports 61 | htmlcov/ 62 | .tox/ 63 | .nox/ 64 | .coverage 65 | .coverage.* 66 | .cache 67 | nosetests.xml 68 | coverage.xml 69 | *.cover 70 | *.py,cover 71 | .hypothesis/ 72 | .pytest_cache/ 73 | 74 | # Translations 75 | *.mo 76 | *.pot 77 | 78 | # Django stuff: 79 | *.log 80 | local_settings.py 81 | db.sqlite3 82 | db.sqlite3-journal 83 | 84 | # Flask stuff: 85 | instance/ 86 | .webassets-cache 87 | 88 | # Scrapy stuff: 89 | .scrapy 90 | 91 | # Sphinx documentation 92 | docs/_build/ 93 | 94 | # PyBuilder 95 | target/ 96 | 97 | # Jupyter Notebook 98 | 99 | # IPython 100 | 101 | # pyenv 102 | .python-version 103 | 104 | # pipenv 105 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 106 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 107 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 108 | # install all needed dependencies. 109 | #Pipfile.lock 110 | 111 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 112 | __pypackages__/ 113 | 114 | # Celery stuff 115 | celerybeat-schedule 116 | celerybeat.pid 117 | 118 | # SageMath parsed files 119 | *.sage.py 120 | 121 | # Environments 122 | .env 123 | .venv 124 | env/ 125 | venv/ 126 | venv-hpc-compat/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # End of https://www.toptal.com/developers/gitignore/api/jupyternotebooks,python 153 | /.vs 154 | /MusicNet/models 155 | /MusicNet 156 | /onset_detection 157 | /.idea 158 | 159 | .joblib 160 | /multidataset 161 | /RICSyN2015 162 | /f0 163 | /sequential_search.joblib 164 | /multi_dataset_inertias.pdf 165 | /inertias_normalized.csv 166 | /inertias.csv 167 | /TIMIT 168 | /sequential_search_speech_timit_random.joblib 169 | /sequential_search_speech_timit_kmeans_rec_rho.joblib 170 | /sequential_search_speech_timit_kmeans_rec.joblib 171 | /sequential_search_speech_timit_kmeans.joblib 172 | /sequential_search_speech_command_v0.01_digits_random.joblib 173 | /sequential_search_fsdd_random.joblib 174 | /sequential_search_fsdd_kmeans_rec_eigs.joblib 175 | /sequential_search_fsdd_kmeans_rec.joblib 176 | /sequential_search_fsdd_kmeans.joblib 177 | /docs/source/_build 178 | /junit 179 | /pyrcn/junit 180 | /tests/junit 181 | *.npz 182 | /examples/junit 183 | -------------------------------------------------------------------------------- /.jupyter/custom/custom.css: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css?family=Open+Sans'); 2 | 3 | 4 | /* Change code font */ 5 | .CodeMirror pre { 6 | font-size: 9pt; 7 | } 8 | 9 | div.output pre{ 10 | font-size: 9pt; 11 | } 12 | 13 | div.output_html td{ 14 | font-size: 8pt; 15 | } 16 | 17 | div.prompt{ 18 | font-size: 8pt; 19 | } 20 | 21 | div.completions select{ 22 | font-size: 9pt; 23 | } 24 | 25 | div.container pre{ 26 | font-size: 9pt; 27 | } 28 | 29 | div.tooltiptext pre{ 30 | font-size: 8pt; 31 | } 32 | 33 | div.input_area { 34 | border-color: rgba(0,0,0,0.10); 35 | background: rbga(0,0,0,0.5); 36 | } 37 | 38 | div.text_cell { 39 | max-width: 100%; 40 | } 41 | 42 | div.text_cell_render { 43 | font-family: Open Sans; 44 | color: rgb(0, 48, 93); 45 | font-size: 11pt; 46 | line-height: 145%; /* added for some line spacing of text. */ 47 | } 48 | 49 | div.text_cell_render code{ 50 | font-size: 9pt; 51 | } 52 | 53 | div.text_cell_render h1, 54 | div.text_cell_render h2, 55 | div.text_cell_render h3, 56 | div.text_cell_render h4, 57 | div.text_cell_render h5, 58 | div.text_cell_render h6 { 59 | font-family: Open Sans, 'HelveticaNeue-Light'; 60 | font-weight: 300; 61 | } 62 | 63 | div.text_cell_render h1 { 64 | font-size: 24pt; 65 | } 66 | 67 | div.text_cell_render h2 { 68 | font-size: 18pt; 69 | } 70 | 71 | div.text_cell_render h3 { 72 | font-size: 14pt; 73 | } 74 | 75 | .rendered_html pre, 76 | .rendered_html code { 77 | font-size: medium; 78 | } 79 | 80 | .rendered_html ol { 81 | list-style:decimal; 82 | margin: 1em 2em; 83 | } 84 | 85 | .prompt.input_prompt { 86 | color: rgba(0,0,0,0.5); 87 | } 88 | 89 | .cell.command_mode.selected { 90 | border-color: rgba(0,0,0,0.1); 91 | } 92 | 93 | .cell.edit_mode.selected { 94 | border-color: rgba(0,0,0,0.15); 95 | box-shadow: 0px 0px 5px #f0f0f0; 96 | -webkit-box-shadow: 0px 0px 5px #f0f0f0; 97 | } 98 | 99 | div.output_scroll { 100 | -webkit-box-shadow: inset 0 2px 8px rgba(0,0,0,0.1); 101 | box-shadow: inset 0 2px 8px rgba(0,0,0,0.1); 102 | border-radious: 2px; 103 | } 104 | 105 | #menubar .navbar-inner { 106 | background: #fff; 107 | -webkit-box-shadow: none; 108 | box-shadow: none; 109 | border-radius: 0; 110 | border: none; 111 | font-family: Open Sans; 112 | font-weight: 400; 113 | } 114 | 115 | .navbar-fixed-top .navbar-inner, 116 | .navbar-static-top .navbar-inner { 117 | box-shadow: none; 118 | -webkit-box-shadow: none; 119 | border: none; 120 | } 121 | 122 | div#notebook_panel { 123 | box-shadow: none; 124 | -webkit-box-shadow: none; 125 | border-top: none; 126 | } 127 | 128 | div#notebook { 129 | border-top: 1px solid rgba(0,0,0,0.15); 130 | } 131 | 132 | #menubar .navbar .navbar-inner, 133 | .toolbar-inner { 134 | padding-left: 0; 135 | padding-right: 0; 136 | } 137 | 138 | #checkpoint_status, 139 | #autosave_status { 140 | color: rgba(0,0,0,0.5); 141 | } 142 | 143 | #header { 144 | font-family: Open Sans; 145 | } 146 | 147 | #notebook_name { 148 | font-weight: 200; 149 | } 150 | 151 | 152 | #ipython_notebook img{ 153 | display:block; 154 | background: url(./Logo_TU_Dresden.svg); 155 | background-repeat: no-repeat; 156 | background-size: contain; 157 | width: 233px; 158 | height: 33px; 159 | padding-left: 233px; 160 | -moz-box-sizing: border-box; 161 | box-sizing: border-box; 162 | } 163 | 164 | #notebook { 165 | background-color: rgb(0, 48, 93); 166 | /* 167 | background-repeat: no-repeat; 168 | background-attachment: fixed; 169 | background-position: right center; 170 | background-size: 390px; 171 | */ 172 | } 173 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-20.04 11 | tools: 12 | python: "3.9" 13 | # You can also specify other tool versions: 14 | # nodejs: "16" 15 | # rust: "1.55" 16 | # golang: "1.17" 17 | 18 | # Build documentation in the docs/ directory with Sphinx 19 | sphinx: 20 | configuration: docs/source/conf.py 21 | 22 | # If using Sphinx, optionally build your docs in additional formats such as PDF 23 | # formats: 24 | # - pdf 25 | 26 | # Optionally declare the Python requirements required to build your docs 27 | python: 28 | install: 29 | - requirements: docs/requirements.txt 30 | - method: pip 31 | path: . -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, Chair of Speech Technology and Cognitive Systems, TU Dresden 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | docutils<0.18 2 | sphinx-rtd-theme==0.5.1 3 | sphinx-copybutton==0.3.1 4 | pyrcn 5 | scikit-learn 6 | -------------------------------------------------------------------------------- /docs/source/_static/img/Logo-STKS.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PlasmaControl/PyRCN/453475145995958dcf24d674319825b35a65e778/docs/source/_static/img/Logo-STKS.jpg -------------------------------------------------------------------------------- /docs/source/_static/img/Logo_IDLab_White.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PlasmaControl/PyRCN/453475145995958dcf24d674319825b35a65e778/docs/source/_static/img/Logo_IDLab_White.png -------------------------------------------------------------------------------- /docs/source/_static/img/SMWA_EFRE-ESF_Sachsen_Logokombi_quer_03.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PlasmaControl/PyRCN/453475145995958dcf24d674319825b35a65e778/docs/source/_static/img/SMWA_EFRE-ESF_Sachsen_Logokombi_quer_03.jpg -------------------------------------------------------------------------------- /docs/source/_static/img/TUD_Logo_HKS41_114.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PlasmaControl/PyRCN/453475145995958dcf24d674319825b35a65e778/docs/source/_static/img/TUD_Logo_HKS41_114.png -------------------------------------------------------------------------------- /docs/source/_static/img/logo_UGent_EN_RGB_2400_color-on-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PlasmaControl/PyRCN/453475145995958dcf24d674319825b35a65e778/docs/source/_static/img/logo_UGent_EN_RGB_2400_color-on-white.png -------------------------------------------------------------------------------- /docs/source/_static/img/pyrcn_logo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PlasmaControl/PyRCN/453475145995958dcf24d674319825b35a65e778/docs/source/_static/img/pyrcn_logo.pdf -------------------------------------------------------------------------------- /docs/source/api/api.rst: -------------------------------------------------------------------------------- 1 | PyRCN API 2 | ========= 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | pyrcn 8 | pyrcn.base 9 | pyrcn.echo_state_network 10 | pyrcn.extreme_learning_machine 11 | pyrcn.linear_model 12 | pyrcn.cluster 13 | -------------------------------------------------------------------------------- /docs/source/api/pyrcn.base.rst: -------------------------------------------------------------------------------- 1 | .. _`pyrcn.base`: 2 | 3 | pyrcn.base 4 | ========== 5 | 6 | .. automodule:: pyrcn.base 7 | 8 | pyrcn.base.blocks.InputToNode 9 | ----------------------------- 10 | 11 | .. autoclass:: pyrcn.base.blocks.InputToNode 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | 16 | .. autoclass:: pyrcn.base.blocks.PredefinedWeightsInputToNode 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: 20 | 21 | .. autoclass:: pyrcn.base.blocks.BatchIntrinsicPlasticity 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | 26 | pyrcn.base.blocks.NodeToNode 27 | ---------------------------- 28 | 29 | .. autoclass:: pyrcn.base.blocks.NodeToNode 30 | :members: 31 | :undoc-members: 32 | :show-inheritance: 33 | 34 | .. autoclass:: pyrcn.base.blocks.PredefinedWeightsNodeToNode 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | .. autoclass:: pyrcn.base.blocks.HebbianNodeToNode 40 | :members: 41 | :undoc-members: 42 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/pyrcn.datasets.rst: -------------------------------------------------------------------------------- 1 | .. _`pyrcn.datasets`: 2 | 3 | pyrcn.datasets 4 | ================== 5 | 6 | .. automodule:: pyrcn.datasets 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/pyrcn.echo_state_network.rst: -------------------------------------------------------------------------------- 1 | .. _`pyrcn.echo_state_network`: 2 | 3 | pyrcn.echo_state_network 4 | ======================== 5 | 6 | .. automodule:: pyrcn.echo_state_network 7 | 8 | Echo State Network 9 | ------------------ 10 | 11 | .. autoclass:: pyrcn.echo_state_network.ESNRegressor 12 | :members: 13 | :undoc-members: 14 | :show-inheritance: 15 | 16 | .. autoclass:: pyrcn.echo_state_network.ESNClassifier 17 | :members: 18 | :undoc-members: 19 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/pyrcn.extreme_learning_machine.rst: -------------------------------------------------------------------------------- 1 | .. _`pyrcn.extreme_learning_machine`: 2 | 3 | pyrcn.extreme_learning_machine 4 | ============================== 5 | 6 | .. automodule:: pyrcn.extreme_learning_machine 7 | 8 | .. autoclass:: pyrcn.extreme_learning_machine.ELMRegressor 9 | :members: 10 | :undoc-members: 11 | :show-inheritance: 12 | 13 | .. autoclass:: pyrcn.extreme_learning_machine.ELMClassifier 14 | :members: 15 | :undoc-members: 16 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/pyrcn.linear_model.rst: -------------------------------------------------------------------------------- 1 | .. _`pyrcn.linear_model`: 2 | 3 | pyrcn.linear_model 4 | ================== 5 | 6 | .. automodule:: pyrcn.linear_model 7 | 8 | .. autoclass:: pyrcn.linear_model.IncrementalRegression 9 | :members: 10 | :undoc-members: 11 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/pyrcn.rst: -------------------------------------------------------------------------------- 1 | PyRCN package 2 | ============= 3 | 4 | Submodules 5 | ---------- 6 | 7 | .. list-table:: 8 | :widths: 50 50 9 | :header-rows: 0 10 | 11 | * - :ref:`pyrcn.base` 12 | - Building blocks for Reservoir Computing Networks 13 | * - :ref:`pyrcn.echo_state_network` 14 | - Objects for Echo State Networks 15 | * - :ref:`pyrcn.extreme_learning_machine` 16 | - Objects for Extreme Learning Machines 17 | * - :ref:`pyrcn.linear_model` 18 | - Incremental linear (ridge) regression 19 | 20 | Subpackages 21 | ----------- 22 | 23 | .. list-table:: 24 | :widths: 50 50 25 | :header-rows: 0 26 | 27 | * - :ref:`pyrcn.datasets` 28 | - Ready-to-use datasets for model evaluation 29 | -------------------------------------------------------------------------------- /docs/source/citation.rst: -------------------------------------------------------------------------------- 1 | Citation 2 | ======== 3 | 4 | If you use PyRCN, please cite the following publication: 5 | 6 | .. code-block:: latex 7 | 8 | @misc{steiner2021pyrcn, 9 | title={PyRCN: A Toolbox for Exploration and Application of Reservoir Computing Networks}, 10 | author={Peter Steiner and Azarakhsh Jalalvand and Simon Stone and Peter Birkholz}, 11 | year={2021}, 12 | eprint={2103.04807}, 13 | archivePrefix={arXiv}, 14 | primaryClass={cs.LG} 15 | } 16 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | from pyrcn import __version__ 14 | import os 15 | import sys 16 | sys.path.insert(0, os.path.abspath('..')) 17 | 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = 'PyRCN' 22 | copyright = '2021, Peter Steiner, Azarakhsh Jalalvand, Simon Stone,' \ 23 | 'Peter Birkholz' 24 | author = 'Peter Steiner, Azarakhsh Jalalvand, Simon Stone, Peter Birkholz' 25 | 26 | # The full version, including alpha/beta/rc tags 27 | release = __version__ 28 | 29 | 30 | # -- General configuration --------------------------------------------------- 31 | 32 | # Add any Sphinx extension module names here, as strings. They can be 33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 34 | # ones. 35 | extensions = [ 36 | 'sphinx.ext.napoleon', 37 | 'sphinx.ext.autodoc', 38 | 'sphinx.ext.doctest', 39 | 'sphinx_copybutton' 40 | ] 41 | master_doc = 'index' 42 | 43 | # Add any paths that contain templates here, relative to this directory. 44 | templates_path = ['_templates'] 45 | 46 | # List of patterns, relative to source directory, that match files and 47 | # directories to ignore when looking for source files. 48 | # This pattern also affects html_static_path and html_extra_path. 49 | exclude_patterns: list = [] 50 | 51 | 52 | # -- Options for HTML output ------------------------------------------------- 53 | 54 | # The theme to use for HTML and HTML Help pages. See the documentation for 55 | # a list of builtin themes. 56 | # 57 | html_theme = 'sphinx_rtd_theme' 58 | 59 | # Add any paths that contain custom static files (such as style sheets) here, 60 | # relative to this directory. They are copied after the builtin static files, 61 | # so a file named "default.css" will overwrite the builtin "default.css". 62 | html_static_path = ['_static'] 63 | -------------------------------------------------------------------------------- /docs/source/development.rst: -------------------------------------------------------------------------------- 1 | Development 2 | =========== 3 | 4 | As this open-source project is on an early stage, we highly encourage any contribution! 5 | 6 | How can you contribute: 7 | ----------------------- 8 | 9 | Give us feedback 10 | ~~~~~~~~~~~~~~~~ 11 | 12 | For general feedback, questions or ideas for improvement, please send a mail to 13 | Peter Steiner `peter.steiner@pyrcn.net `_. 14 | 15 | Report bugs 16 | ~~~~~~~~~~~ 17 | 18 | Please report any bugs at the `issue tracker on GitHub`_. 19 | 20 | If you are unsure whether the experienced behaviour is intended or a bug, please just 21 | send a mail to Peter Steiner `peter.steiner@pyrcn.net `_ 22 | first. 23 | 24 | 25 | Improve the documentation 26 | ~~~~~~~~~~~~~~~~~~~~~~~~~ 27 | 28 | Whenever you find something not explained well, please inform Peter Steiner 29 | `peter.steiner@pyrcn.net `_. 30 | 31 | 32 | .. _GitHub: https://github.com/TUD-STKS/PyRCN 33 | .. _issue tracker on GitHub: https://github.com/TUD-STKS/PyRCN/issues 34 | -------------------------------------------------------------------------------- /docs/source/getting_started.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | Getting started 3 | =============== 4 | 5 | Before going further, please make sure that you have installed **PyRCN** as recommended 6 | in the :ref:`installation guide` in a new virtual environment. Afterwards, you can 7 | continue with this getting started guide. 8 | 9 | PyRCN is an open-source project which aims to provide a framework for developing 10 | Reservoir Computing Networks (RCNs) easily and transparently. 11 | 12 | To learn more about the theoretical aspects of reservoir computing, you can read the 13 | page :ref:`whats rc`. 14 | 15 | We also recommend you to have a look at the introduction page of `ReservoirPy 16 | `_, which is another great 17 | resource, in particular for Echo State Networks. 18 | 19 | Playing with blocks -- Building blocks of Reservoir Computing 20 | ============================================================= 21 | 22 | In the recent years, several groups have built toolboxes for Reservoir Computing. 23 | However, they were mostly able to implement one specific type of RCNs from 24 | - Echo State Networks 25 | - Extreme Learning Machines 26 | - Liquid State Machines 27 | 28 | Problem is that, despite the similarities of different RCN architectures, nobody has 29 | decomposed RCNs in building blocks. Together with PyRCN, we aim to do that and provide 30 | building blocks with which almost any RCN structure can be composed. 31 | 32 | Building your first Reservoir Computing Network 33 | ----------------------------------------------- 34 | 35 | Essentially, with only one command, an Echo State Network can be defined 36 | using the :py:class:`pyrcn.echo_state_network.ESNRegressor` or 37 | :py:class:`pyrcn.echo_state_network.ESNClassifier` class: 38 | 39 | .. doctest:: 40 | 41 | >>> from pyrcn.echo_state_network import ESNRegressor, ESNClassifier 42 | >>> esn = ESNRegressor() 43 | >>> esn 44 | ESNRegressor(input_to_node=InputToNode(), node_to_node=NodeToNode(), 45 | regressor=IncrementalRegression()) 46 | 47 | As we can see, the ``esn`` consists of different building blocks, e.g. 48 | :py:class:`pyrcn.base.blocks.InputToNode`, :py:class:`pyrcn.base.blocks 49 | .NodeToNode` and :py:class:`pyrcn.linear_model.IncrementalRegression`. 50 | 51 | The first block is used to connect the input features to the hidden neurons, 52 | the second building block defines how the connections inside the 53 | hidden neurons are organized. By default, all connections are randomly initialized 54 | and fixed. 55 | 56 | In case one would like to customize the building blocks, you can have a look at the 57 | included modules of :py:module:``pyrcn.base.blocks``. 58 | 59 | .. doctest:: 60 | 61 | >>> import pyrcn.base.blocks as blocks 62 | >>> from inspect import getmembers, isclass 63 | >>> getmembers(blocks, isclass) 64 | [('BatchIntrinsicPlasticity', ), 65 | ('HebbianNodeToNode', ), 66 | ('InputToNode', ), 67 | ('NodeToNode', ), 68 | ('PredefinedWeightsInputToNode', ), 69 | ('PredefinedWeightsNodeToNode', )] 70 | 71 | Obviously, there are a lot of derived modules from the basic building blocks available. 72 | Look their functions up in the documentation or in examples! 73 | 74 | Training a RCN 75 | -------------- 76 | 77 | RCNs can be trained on different kinds of data. In particular, ESNs can then be 78 | trained on sequential data, such as timeseries, especially chaotic ones. In PyRCN, 79 | we have re-implemented the Mackey-Glass time-series from `ReservoirPy 80 | `_, which is a common 81 | demonstration for ESNs: 82 | 83 | .. doctest:: 84 | 85 | >>> from pyrcn.datasets import mackey_glass 86 | >>> X, y = mackey_glass(n_timesteps=8000) 87 | 88 | If we visualize the Mackey-Glass time-series, we can see that it is a 89 | quasi-periodic time-series. 90 | We now use an :py:class:``pyrcn.echo_state_network.ESNRegressor`` to do a one-step 91 | ahead prediction of this time-series. 92 | 93 | .. image:: _static/img/getting_started_mackey_glass.svg 94 | 95 | To train the ESN, only three steps are required: 96 | 97 | 1. Randomly distribute the time-series to each reservoir neuron (**Input-to-Node**). 98 | 2. Compute the state of each neuron based on the current input and the previous 99 | state. 100 | 2. Compute a linear regression between the reservoir states and the target output. 101 | 102 | These steps are handled via :py:func:`pyrcn.echo_state_network.ESNRegressor.fit`, 103 | which is the most important function to train the ESN model: 104 | 105 | .. doctest:: 106 | 107 | >>> # Fit the ESN model 108 | >>> esn.fit(X[:4000].reshape(-1, 1), y[:4000]) 109 | ESNRegressor(input_to_node=InputToNode(), node_to_node=NodeToNode(), 110 | regressor=IncrementalRegression(), requires_sequence=False) 111 | 112 | You can see that the ESN can be fitted using only one command. Afterwards, it is 113 | ready to use! 114 | 115 | Testing and predict using the ESN 116 | --------------------------------- 117 | 118 | Finally, we use the :py:func:`pyrcn.echo_state_network.ESNRegressor.predict` function 119 | to use the trained ESN to predict the test data: 120 | 121 | .. doctest:: 122 | 123 | >>> y_pred = esn.predict(X[:4000]) 124 | 125 | .. image:: _static/img/getting_started_mackey_glass_predicted.svg 126 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. PyRCN documentation master file, created by 2 | sphinx-quickstart on Tue Oct 26 11:53:37 2021. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ===== 7 | PyRCN 8 | ===== 9 | 10 | **A Python 3 framework for building Reservoir Computing Networks (RCNs).** 11 | 12 | .. image:: https://badge.fury.io/py/PyRCN.svg 13 | :target: https://badge.fury.io/py/PyRCN 14 | 15 | 16 | PyRCN ("Python Reservoir Computing Networks") is a light-weight and transparent Python 3 17 | framework for Reservoir Computing and is based on widely used scientific Python packages, 18 | such as numpy or scipy. 19 | 20 | The API is fully `scikit-learn `_-compatible, so that 21 | users of scikit-learn do not need to refactor their code in order to use the estimators 22 | implemented by this framework. 23 | Scikit-learn's built-in parameter optimization methods and example datasets can also be 24 | used in the usual way. PyRCN is used by the `Chair of Speech Technology and Cognitive 25 | Systems, Institute for Acoustics and Speech Communications, Technische Universität 26 | Dresden, Dresden, Germany `_ 27 | and `IDLab (Internet and Data Lab), Ghent University, Ghent, Belgium `_ 28 | 29 | Currently, it implements Echo State Networks (ESNs) by Herbert Jaeger and Extreme 30 | Learning Machines (ELMs) by Guang-Bin Huang in different flavors, 31 | e.g. Classifier and Regressor. 32 | It is actively developed to be extended into several directions: 33 | 34 | * Interaction with `sktime `_ 35 | * Interaction with `hmmlearn `_ 36 | * More towards future work: Related architectures, such as Liquid State Machines (LSMs) 37 | and Perturbative Neural Networks (PNNs) 38 | 39 | PyRCN has successfully been used for several tasks: 40 | 41 | * Music Information Retrieval (MIR) 42 | 43 | * Multipitch tracking 44 | 45 | * Onset detection 46 | 47 | * *f*\ :sub:`0`\ analysis of spoken language 48 | 49 | * GCI detection in raw audio signals 50 | 51 | * Time Series Prediction 52 | 53 | * Mackey-Glass benchmark test 54 | 55 | * Stock price prediction 56 | 57 | * Ongoing research tasks: 58 | 59 | * Beat tracking in music signals 60 | 61 | * Pattern recognition in sensor data 62 | 63 | * Phoneme recognition 64 | 65 | * Unsupervised pre-training of RCNs and optimization of ESNs 66 | 67 | .. toctree:: 68 | :maxdepth: 2 69 | :caption: Contents: 70 | 71 | installation 72 | introduction 73 | getting_started 74 | tutorial 75 | development 76 | citation 77 | api/api 78 | 79 | Indices and tables 80 | ================== 81 | 82 | * :ref:`genindex` 83 | * :ref:`modindex` 84 | * :ref:`search` 85 | 86 | Citation 87 | ======== 88 | 89 | If you use PyRCN, please cite the following publication: 90 | 91 | .. code-block:: latex 92 | 93 | @misc{steiner2021pyrcn, 94 | title={PyRCN: A Toolbox for Exploration and Application of Reservoir Computing Networks}, 95 | author={Peter Steiner and Azarakhsh Jalalvand and Simon Stone and Peter Birkholz}, 96 | year={2021}, 97 | eprint={2103.04807}, 98 | archivePrefix={arXiv}, 99 | primaryClass={cs.LG} 100 | } 101 | 102 | 103 | Acknowledgements 104 | ---------------- 105 | 106 | This research was funded by the European Social Fund (Application number: 100327771) and co-financed by tax funds based on the budget approved by the members of the Saxon State Parliament, and by Ghent University. 107 | 108 | .. image:: _static/img/SMWA_EFRE-ESF_Sachsen_Logokombi_quer_03.jpg 109 | :height: 90 110 | :alt: Europäischer Sozialfonds 111 | 112 | .. image:: _static/img/Logo_IDLab_White.png 113 | :height: 70 114 | :alt: IDLab 115 | 116 | .. image:: _static/img/logo_UGent_EN_RGB_2400_color-on-white.png 117 | :height: 70 118 | :alt: Ghent University 119 | 120 | .. image:: _static/img/Logo-STKS.jpg 121 | :height: 70 122 | :alt: Kognitive Systeme und Sprachtechnologie 123 | 124 | .. image:: _static/img/TUD_Logo_HKS41_114.png 125 | :height: 70 126 | :alt: Ghent University 127 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Installation guide 3 | ================== 4 | 5 | Before installing PyRCN, make sure that you have at least **Python 3.7** installed on 6 | your system. **PyRCN is developed and supposed to be used only with Python 3.8 and 7 | higher**. 8 | 9 | Yout can check out the version of your Python distribution, you can run the following 10 | command in a PowerShell or CommandLine in Windows, or in a shell in Linux/MacOS: 11 | 12 | .. code-block:: bash 13 | 14 | python --version 15 | 16 | As any package, **PyRCN** has several dependencies as listed in the `requirements.txt 17 | `_. To avoid any 18 | unexpected interaction with the basic system as installed on your computer, we highly 19 | recommend using a virtual environment 20 | 21 | You can find more information about virtual environments, by checking the `Python 22 | documentation on virtual environments and packages 23 | `_. 24 | 25 | Installation using `pip` 26 | ------------------------ 27 | 28 | We uploaded PyRCN to the `PyPI index for Python packages `_. Thus, 29 | you can simply download and install `PyRCN `_ using 30 | the `pip` command in your terminal: 31 | 32 | .. code-block:: bash 33 | 34 | pip install pyrcn 35 | 36 | You can, of course, also upgrade an existing version of PyRCN using `pip` on your command 37 | line: 38 | 39 | .. code-block:: bash 40 | 41 | pip install --upgrade pyrcn 42 | 43 | 44 | To check your installation of PyRCN, run: 45 | 46 | .. code-block:: bash 47 | 48 | pip show pyrcn 49 | 50 | In case of any problems, please report any bugs at the `issue tracker on GitHub`_ or just 51 | send a mail to Peter Steiner `peter.steiner@pyrcn.net `_. 52 | 53 | Installation from source 54 | ------------------------ 55 | 56 | We only recommend you the installation of PyRCN from source if you would like to 57 | contribute to PyRCN. Therefore, please find the source code of **PyRCN** on `GitHub 58 | `_. 59 | 60 | You can download the latest stable version from the ``main`` branch. To work with older 61 | or unstable versions of **PyRCN**,. you can checkout the ``dev`` branch or any other 62 | branch you would like **PyRCN** from. 63 | 64 | For the actual installation, please unzip the downloaded file or clone the repository. 65 | The installation then work similar as before using `pip` in your command line: 66 | 67 | .. code-block:: bash 68 | 69 | pip install -e /path/to/pyrcn 70 | 71 | .. _issue tracker on GitHub: https://github.com/TUD-STKS/PyRCN/issues 72 | -------------------------------------------------------------------------------- /docs/source/introduction.rst: -------------------------------------------------------------------------------- 1 | Definition of Reservoir Computing 2 | ================================= 3 | 4 | Reservoir Computing (RC) is a paradigm for a fast one-shot training of Recurrent Neural 5 | Networks (RNNs). Probably the best known RC architecture is the Echo State Network 6 | (ESN) by Herbert Jaeger. 7 | 8 | The `Scholarpedia article about Echo State Networks `_ 9 | gives an excellent introduction to the topic of Echo State Networks. 10 | 11 | In a broader sense, the Extreme Learning Machine (ELM) as a closely related architecture 12 | also counts to RC, although it is a feed-forward network. The training paradigm is 13 | similar, and PyRCN aims to unify the development of ESNs and ELMs. 14 | 15 | 16 | Many examples can be found in the PyRCN repository. Some useful examples can also be 17 | found in the `PyRCN repository `_ 18 | with many Jupyter notebooks. 19 | 20 | PyRCN is inspired by `ReservoirPy `_, another RC toolbox 21 | with a different scope. We recommend to check their examples and tutorials. 22 | -------------------------------------------------------------------------------- /docs/source/tutorial.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | 4 | We have prepared a collection of Jupyter (IPython) notebooks that show how to use the 5 | package. 6 | 7 | You can view them online or download Python scripts: 8 | 9 | https://github.com/TUD-STKS/PyRCN/tree/main/examples 10 | 11 | The notebook 12 | `PyRCN_Intro `_ 13 | or its corresponding 14 | `Python script `_ 15 | show how to construct different RCNs with building blocks. 16 | 17 | .. image:: https://mybinder.org/badge_logo.svg 18 | :target: https://mybinder.org/v2/gh/TUD-STKS/PyRCN/main?filepath=examples%2FPyRCN_Intro.ipynb 19 | 20 | 21 | The notebook 22 | `Impulse responses 23 | `_ 24 | is an interactive tool to demonstrate the impact of different hyper-parameters on the 25 | impulse responses of an ESN. 26 | 27 | .. image:: https://mybinder.org/badge_logo.svg 28 | :target: https://mybinder.org/v2/gh/TUD-STKS/PyRCN/main?filepath=examples%2Fesn_impulse_responses.ipynb 29 | 30 | The Jupyter notebook 31 | `digits `_ 32 | or its corresponding 33 | `Python script `_ 34 | show how to set up an ESN for a small hand-written digit recognition experiment. 35 | 36 | .. image:: https://mybinder.org/badge_logo.svg 37 | :target: https://mybinder.org/v2/gh/TUD-STKS/PyRCN/main?filepath=examples%2Fdigits.ipynb 38 | 39 | Fore more advanced examples, please have a look at our 40 | `Automatic Music Transcription Repository 41 | `_, 42 | in which we provide an entire feature extraction, 43 | training and test pipeline for multipitch tracking and for note onset detection using 44 | PyRCN. This is currently transferred to this repository. 45 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: pyrcn 2 | 3 | channels: 4 | - conda-forge 5 | 6 | dependencies: 7 | - python 8 | - pip 9 | - pip: 10 | - . 11 | - scikit-learn 12 | - ipywidgets 13 | - ipympl 14 | - pandas 15 | - seaborn 16 | - tqdm>=4.33.0 17 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | *.md 2 | *.png 3 | *.log 4 | *.csv 5 | *.txt 6 | *.pdf 7 | *.pgf 8 | *.npz 9 | *.joblib 10 | *.pickle 11 | 12 | .ipynb_checkpoints/ 13 | /dataset/ 14 | /mnist-elm* 15 | /plots* 16 | /preprocessing-mnist* 17 | /misc.py 18 | -------------------------------------------------------------------------------- /examples/MNIST_regressors.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# MNIST classification using Extreme Learning Machines\n" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import time\n", 18 | "from scipy.stats import uniform\n", 19 | "from sklearn.base import clone\n", 20 | "from sklearn.datasets import fetch_openml\n", 21 | "from sklearn.linear_model import Ridge\n", 22 | "from sklearn.preprocessing import MinMaxScaler\n", 23 | "from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, StratifiedKFold, ParameterGrid, cross_validate\n", 24 | "from sklearn.utils.fixes import loguniform\n", 25 | "from sklearn.metrics import accuracy_score\n", 26 | "\n", 27 | "from pyrcn.model_selection import SequentialSearchCV\n", 28 | "from pyrcn.extreme_learning_machine import ELMClassifier" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "# Load the dataset" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "X, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False)" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "# Train test split. \n", 52 | "Normalize to a range between [-1, 1]." 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "X = MinMaxScaler(feature_range=(-1,1)).fit_transform(X=X)\n", 62 | "X_train, X_test = X[:60000], X[60000:]\n", 63 | "y_train, y_test = y[:60000].astype(int), y[60000:].astype(int)" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "# Prepare sequential hyperparameter tuning" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "initially_fixed_params = {'hidden_layer_size': 500,\n", 80 | " 'input_activation': 'tanh',\n", 81 | " 'k_in': 10,\n", 82 | " 'bias_scaling': 0.0,\n", 83 | " 'alpha': 1e-5,\n", 84 | " 'random_state': 42}\n", 85 | "\n", 86 | "step1_params = {'input_scaling': loguniform(1e-5, 1e1)}\n", 87 | "kwargs1 = {'random_state': 42,\n", 88 | " 'verbose': 1,\n", 89 | " 'n_jobs': -1,\n", 90 | " 'n_iter': 50,\n", 91 | " 'scoring': 'accuracy'}\n", 92 | "step2_params = {'bias_scaling': np.linspace(0.0, 1.6, 16)}\n", 93 | "kwargs2 = {'verbose': 5,\n", 94 | " 'n_jobs': -1,\n", 95 | " 'scoring': 'accuracy'}\n", 96 | "\n", 97 | "elm = ELMClassifier(regressor=Ridge(), **initially_fixed_params)\n", 98 | "\n", 99 | "# The searches are defined similarly to the steps of a sklearn.pipeline.Pipeline:\n", 100 | "searches = [('step1', RandomizedSearchCV, step1_params, kwargs1),\n", 101 | " ('step2', GridSearchCV, step2_params, kwargs2)]" 102 | ] 103 | }, 104 | { 105 | "cell_type": "markdown", 106 | "metadata": {}, 107 | "source": [ 108 | "# Perform the sequential search" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "metadata": {}, 115 | "outputs": [], 116 | "source": [ 117 | "sequential_search = SequentialSearchCV(elm, searches=searches).fit(X_train, y_train)" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "best_params = sequential_search.best_estimator_.get_params()" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "# Test\n", 134 | "Increase reservoir size and compare different regression methods. Make sure that you have enough RAM for that, because all regression types without chunk size require a lot of memory. This is the reason why, especially for large datasets, the incremental regression is recommeded." 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [ 143 | "base_elm_ridge = ELMClassifier(regressor=Ridge(), **best_params)\n", 144 | "base_elm_inc = ELMClassifier(**best_params)\n", 145 | "base_elm_inc_chunk = clone(base_elm_inc).set_params(chunk_size=6000)\n", 146 | "\n", 147 | "param_grid = {'hidden_layer_size': [500, 1000, 2000, 4000, 8000, 16000]}\n", 148 | "\n", 149 | "print(\"CV results\\tFit time\\tInference time\\tAccuracy score\\tSize[Bytes]\")\n", 150 | "for params in ParameterGrid(param_grid):\n", 151 | " elm_ridge_cv = cross_validate(clone(base_elm_ridge).set_params(**params), X=X_train, y=y_train)\n", 152 | " t1 = time.time()\n", 153 | " elm_ridge = clone(base_elm_ridge).set_params(**params).fit(X_train, y_train)\n", 154 | " t_fit = time.time() - t1\n", 155 | " mem_size = elm_ridge.__sizeof__()\n", 156 | " t1 = time.time()\n", 157 | " acc_score = accuracy_score(y_test, elm_ridge.predict(X_test))\n", 158 | " t_inference = time.time() - t1\n", 159 | " print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\".format(elm_ridge_cv, t_fit, t_inference, acc_score, mem_size))\n", 160 | " elm_inc_cv = cross_validate(clone(base_elm_inc).set_params(**params), X=X_train, y=y_train)\n", 161 | " t1 = time.time()\n", 162 | " elm_inc = clone(base_elm_inc).set_params(**params).fit(X_train, y_train)\n", 163 | " t_fit = time.time() - t1\n", 164 | " mem_size = elm_inc.__sizeof__()\n", 165 | " t1 = time.time()\n", 166 | " acc_score = accuracy_score(y_test, elm_inc.predict(X_test))\n", 167 | " t_inference = time.time() - t1\n", 168 | " print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\".format(elm_inc_cv, t_fit, t_inference, acc_score, mem_size))\n", 169 | " elm_inc_chunk_cv = cross_validate(clone(base_elm_inc_chunk).set_params(**params), X=X_train, y=y_train)\n", 170 | " t1 = time.time()\n", 171 | " elm_inc_chunk = clone(base_elm_inc_chunk).set_params(**params).fit(X_train, y_train)\n", 172 | " t_fit = time.time() - t1\n", 173 | " mem_size = elm_inc_chunk.__sizeof__()\n", 174 | " t1 = time.time()\n", 175 | " acc_score = accuracy_score(y_test, elm_inc_chunk.predict(X_test))\n", 176 | " t_inference = time.time() - t1\n", 177 | " print(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\".format(elm_inc_chunk_cv, t_fit, t_inference, acc_score, mem_size))" 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": null, 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [] 186 | } 187 | ], 188 | "metadata": { 189 | "kernelspec": { 190 | "display_name": "Python 3 (ipykernel)", 191 | "language": "python", 192 | "name": "python3" 193 | }, 194 | "language_info": { 195 | "codemirror_mode": { 196 | "name": "ipython", 197 | "version": 3 198 | }, 199 | "file_extension": ".py", 200 | "mimetype": "text/x-python", 201 | "name": "python", 202 | "nbconvert_exporter": "python", 203 | "pygments_lexer": "ipython3", 204 | "version": "3.10.4" 205 | } 206 | }, 207 | "nbformat": 4, 208 | "nbformat_minor": 4 209 | } 210 | -------------------------------------------------------------------------------- /examples/Video_Door_state_Classification.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tqdm import tqdm 3 | 4 | from pyrcn.base.blocks import PredefinedWeightsInputToNode 5 | from pyrcn.echo_state_network import ESNClassifier 6 | from pyrcn.metrics import accuracy_score 7 | from pyrcn.model_selection import SequentialSearchCV 8 | 9 | from sklearn.base import clone 10 | from sklearn.metrics import make_scorer 11 | from sklearn.model_selection import ( 12 | RandomizedSearchCV, GridSearchCV, 13 | ParameterGrid) 14 | from sklearn.cluster import MiniBatchKMeans 15 | from sklearn.utils.fixes import loguniform 16 | from scipy.stats import uniform 17 | from joblib import dump, load 18 | 19 | 20 | def read_file(fname, Nfr=-1): 21 | tmp = open(fname + '.txt', 'rb') 22 | a = tmp.read() 23 | tmp.close() 24 | T = len(a) # Just to know how many frames (T) are there in the file 25 | if Nfr != -1: 26 | T = np.min((T, Nfr)) 27 | dim = [30, 30] # Dimension of each frame 28 | N_fr = dim[0] * dim[1] # size of the input vector 29 | yuvfile = open(fname + '.yuv', 'rb') # Opening the video file 30 | door_state_file = open(fname + '.txt', 'rb') # Opening the annotation file 31 | TARGET = np.zeros((T, 3)) 32 | FRAMES = np.zeros((T, N_fr)) 33 | for t in tqdm(range(T)): # for each frame 34 | fr2 = np.zeros(N_fr) 35 | frame = yuvfile.read(N_fr) 36 | for i in range(N_fr): 37 | fr2[i] = frame[i] 38 | # ---------------------------------- 39 | fr2 = fr2 / 255.0 # Normalizing the pixel values to [0,1] 40 | FRAMES[t, :] = fr2 41 | TARGET[t, int(door_state_file.read( 42 | 1))] = 1 # setting the desired output class to 1 43 | return FRAMES, TARGET 44 | 45 | 46 | try: 47 | X_train, X_test, y_train, y_test = load( 48 | r"E:\RCN_CICSyN2015\Seq_video_dataset_large.joblib") 49 | except FileNotFoundError: 50 | n_files = 3 51 | 52 | X_total = [None] * n_files 53 | y_total = [None] * n_files 54 | n_sequences_total = [None] * n_files 55 | for k in range(n_files): 56 | X_total[k], y_total[k] = read_file( 57 | r"E:\RCN_CICSyN2015\Seq_" + str(k + 1)) 58 | 59 | X_train_list = [] 60 | y_train_list = [] 61 | X_test_list = [] 62 | y_test_list = [] 63 | 64 | for k in range(n_files): 65 | n_sequences_total[k] = int(len(X_total[k]) / 5400) 66 | X_total[k] = np.array_split(X_total[k], n_sequences_total[k]) 67 | y_total[k] = np.array_split(y_total[k], n_sequences_total[k]) 68 | for m, (X, y) in enumerate(zip(X_total[k], y_total[k])): 69 | if m < int(.5 * n_sequences_total[k]): 70 | X_train_list.append(X) 71 | y_train_list.append(y) 72 | else: 73 | X_test_list.append(X) 74 | y_test_list.append(y) 75 | 76 | X_train = np.empty(shape=(len(X_train_list),), dtype=object) 77 | y_train = np.empty(shape=(len(y_train_list),), dtype=object) 78 | X_test = np.empty(shape=(len(X_test_list),), dtype=object) 79 | y_test = np.empty(shape=(len(y_test_list),), dtype=object) 80 | 81 | for k, (X, y) in enumerate(zip(X_train_list, y_train_list)): 82 | X_train[k] = X.astype(float) 83 | y_train[k] = y.astype(int) 84 | 85 | for k, (X, y) in enumerate(zip(X_test_list, y_test_list)): 86 | X_test[k] = X.astype(float) 87 | y_test[k] = y.astype(int) 88 | 89 | dump([X_train, X_test, y_train, y_test], 90 | r"E:\RCN_CICSyN2015\Seq_video_dataset_large.joblib") 91 | 92 | print(X_train.shape, X_train[0].shape, y_train.shape, y_train[0].shape) 93 | print(X_test.shape, X_test[0].shape, y_test.shape, y_test[0].shape) 94 | 95 | kmeans = MiniBatchKMeans(n_clusters=50, n_init=200, reassignment_ratio=0, 96 | max_no_improvement=50, init='k-means++', verbose=2, 97 | random_state=0) 98 | kmeans.fit(X=np.concatenate(np.concatenate((X_train, X_test)))) 99 | w_in = np.divide(kmeans.cluster_centers_, 100 | np.linalg.norm(kmeans.cluster_centers_, axis=1)[:, None]) 101 | 102 | initially_fixed_params = { 103 | 'hidden_layer_size': 50, 104 | 'k_in': 10, 105 | 'input_scaling': 0.4, 106 | 'input_activation': 'identity', 107 | 'bias_scaling': 0.0, 108 | 'spectral_radius': 0.0, 109 | 'leakage': 0.1, 110 | 'k_rec': 10, 111 | 'reservoir_activation': 'tanh', 112 | 'bidirectional': False, 113 | 'wash_out': 0, 114 | 'continuation': False, 115 | 'alpha': 1e-3, 116 | 'random_state': 42 117 | } 118 | 119 | step1_esn_params = { 120 | 'input_scaling': uniform(loc=1e-2, scale=1), 121 | 'spectral_radius': uniform(loc=0, scale=2) 122 | } 123 | 124 | step2_esn_params = {'leakage': loguniform(1e-5, 1e0)} 125 | step3_esn_params = {'bias_scaling': np.linspace(0.0, 1.0, 11)} 126 | step4_esn_params = {'alpha': loguniform(1e-5, 1e1)} 127 | 128 | kwargs_step1 = { 129 | 'n_iter': 200, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 130 | 'scoring': make_scorer(accuracy_score) 131 | } 132 | kwargs_step2 = { 133 | 'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 134 | 'scoring': make_scorer(accuracy_score) 135 | } 136 | kwargs_step3 = { 137 | 'verbose': 1, 'n_jobs': -1, 'scoring': make_scorer(accuracy_score) 138 | } 139 | kwargs_step4 = { 140 | 'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 141 | 'scoring': make_scorer(accuracy_score) 142 | } 143 | 144 | searches = [('step1', RandomizedSearchCV, step1_esn_params, kwargs_step1), 145 | ('step2', RandomizedSearchCV, step2_esn_params, kwargs_step2), 146 | ('step3', GridSearchCV, step3_esn_params, kwargs_step3), 147 | ('step4', RandomizedSearchCV, step4_esn_params, kwargs_step4)] 148 | 149 | base_km_esn = ESNClassifier( 150 | input_to_node=PredefinedWeightsInputToNode( 151 | predefined_input_weights=w_in.T), 152 | **initially_fixed_params) 153 | 154 | try: 155 | sequential_search = load("../sequential_search_RICSyN2015_km_large.joblib") 156 | except FileNotFoundError: 157 | sequential_search = SequentialSearchCV(base_km_esn, searches=searches).fit( 158 | X_train, y_train) 159 | dump(sequential_search, "../sequential_search_RICSyN2015_km_large.joblib") 160 | 161 | base_esn = clone(sequential_search.best_estimator_) 162 | 163 | param_grid = { 164 | 'hidden_layer_size': [50, 100, 200, 400, 800, 1600], 165 | 'random_state': range(1, 11) 166 | } 167 | 168 | for params in ParameterGrid(param_grid): 169 | esn = clone(base_esn).set_params(**params).fit(X=X_train, y=y_train, 170 | n_jobs=8) 171 | y_pred = esn.predict_proba(X_test) 172 | score = accuracy_score(np.argmax(np.concatenate(y_test), axis=1), 173 | np.argmax(np.concatenate(y_pred), axis=1)) 174 | print("ESN with params {0} achieved score of {1}".format(params, score)) 175 | -------------------------------------------------------------------------------- /examples/Video_Door_state_Classification_randomized_search.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tqdm import tqdm 3 | 4 | from pyrcn.echo_state_network import ESNClassifier 5 | from pyrcn.metrics import mean_squared_error 6 | 7 | from sklearn.metrics import make_scorer 8 | from sklearn.model_selection import RandomizedSearchCV 9 | from sklearn.utils.fixes import loguniform 10 | from scipy.stats import uniform 11 | from joblib import dump, load 12 | 13 | 14 | def read_file(fname, Nfr=-1): 15 | tmp = open(fname + '.txt', 'rb') 16 | a = tmp.read() 17 | tmp.close() 18 | T = len(a) # Just to know how many frames (T) are there in the file 19 | if Nfr != -1: 20 | T = np.min((T, Nfr)) 21 | dim = [30, 30] # Dimension of each frame 22 | N_fr = dim[0] * dim[1] # size of the input vector 23 | yuvfile = open(fname + '.yuv', 'rb') # Opening the video file 24 | door_state_file = open(fname + '.txt', 'rb') # Opening the annotation file 25 | TARGET = np.zeros((T, 3)) 26 | FRAMES = np.zeros((T, N_fr)) 27 | for t in tqdm(range(T)): # for each frame 28 | fr2 = np.zeros(N_fr) 29 | frame = yuvfile.read(N_fr) 30 | for i in range(N_fr): 31 | fr2[i] = frame[i] 32 | # ---------------------------------- 33 | fr2 = fr2 / 255.0 # Normalizing the pixel values to [0,1] 34 | FRAMES[t, :] = fr2 35 | TARGET[t, int(door_state_file.read( 36 | 1))] = 1 # setting the desired output class to 1 37 | return FRAMES, TARGET 38 | 39 | 40 | try: 41 | X_train, X_test, y_train, y_test = load( 42 | r"E:\RCN_CICSyN2015\Seq_video_dataset_large.joblib") 43 | except FileNotFoundError: 44 | n_files = 3 45 | 46 | X_total = [None] * n_files 47 | y_total = [None] * n_files 48 | n_sequences_total = [None] * n_files 49 | for k in range(n_files): 50 | X_total[k], y_total[k] = read_file( 51 | r"E:\RCN_CICSyN2015\Seq_" + str(k + 1)) 52 | 53 | X_train_list = [] 54 | y_train_list = [] 55 | X_test_list = [] 56 | y_test_list = [] 57 | 58 | for k in range(n_files): 59 | n_sequences_total[k] = int(len(X_total[k]) / 5400) 60 | X_total[k] = np.array_split(X_total[k], n_sequences_total[k]) 61 | y_total[k] = np.array_split(y_total[k], n_sequences_total[k]) 62 | for m, (X, y) in enumerate(zip(X_total[k], y_total[k])): 63 | if m < int(.5 * n_sequences_total[k]): 64 | X_train_list.append(X) 65 | y_train_list.append(y) 66 | else: 67 | X_test_list.append(X) 68 | y_test_list.append(y) 69 | 70 | X_train = np.empty(shape=(len(X_train_list),), dtype=object) 71 | y_train = np.empty(shape=(len(y_train_list),), dtype=object) 72 | X_test = np.empty(shape=(len(X_test_list),), dtype=object) 73 | y_test = np.empty(shape=(len(y_test_list),), dtype=object) 74 | 75 | for k, (X, y) in enumerate(zip(X_train_list, y_train_list)): 76 | X_train[k] = X.astype(float) 77 | y_train[k] = y.astype(int) 78 | 79 | for k, (X, y) in enumerate(zip(X_test_list, y_test_list)): 80 | X_test[k] = X.astype(float) 81 | y_test[k] = y.astype(int) 82 | 83 | dump([X_train, X_test, y_train, y_test], 84 | r"E:\RCN_CICSyN2015\Seq_video_dataset_large.joblib") 85 | 86 | initially_fixed_params = {'hidden_layer_size': 50, 87 | 'k_in': 10, 88 | 'input_activation': 'identity', 89 | 'k_rec': 10, 90 | 'reservoir_activation': 'tanh', 91 | 'bidirectional': False, 92 | 'wash_out': 0, 93 | 'continuation': False, 94 | 'alpha': 1e-3, 95 | 'random_state': 42} 96 | 97 | step0_esn_params = {'input_scaling': uniform(loc=1e-2, scale=1), 98 | 'spectral_radius': uniform(loc=0, scale=2), 99 | 'leakage': loguniform(1e-5, 1e0), 100 | 'bias_scaling': uniform(loc=0, scale=2)} 101 | 102 | kwargs_step0 = {'n_iter': 1000, 'random_state': 42, 'verbose': 1, 'n_jobs': 1, 103 | 'scoring': make_scorer(mean_squared_error, 104 | greater_is_better=False, 105 | needs_proba=True)} 106 | 107 | base_esn = ESNClassifier(**initially_fixed_params) 108 | 109 | try: 110 | random_search_basic = load("../random_search_RICSyN2015_basic.joblib") 111 | except FileNotFoundError: 112 | random_search_basic = RandomizedSearchCV( 113 | estimator=base_esn, param_distributions=step0_esn_params, 114 | **kwargs_step0).fit(X_train, y_train) 115 | dump(random_search_basic, "../random_search_RICSyN2015_basic.joblib") 116 | -------------------------------------------------------------------------------- /examples/bip.py: -------------------------------------------------------------------------------- 1 | """Example for BatchIntrinsicPlasticity.""" 2 | import os 3 | import numpy as np 4 | from pyrcn.base.blocks import BatchIntrinsicPlasticity 5 | 6 | import matplotlib.pyplot as plt 7 | import seaborn as sns 8 | 9 | 10 | sns.set_theme() 11 | 12 | 13 | directory = os.path.join(os.getcwd(), 'bip') 14 | 15 | 16 | def main(): 17 | if not os.path.exists(directory): 18 | os.makedirs(directory) 19 | 20 | rs = np.random.RandomState(42) 21 | 22 | algorithm = 'dresden' 23 | sample_size = (1000, 1) 24 | 25 | i2n_uniform = BatchIntrinsicPlasticity( 26 | hidden_layer_size=1, input_activation='tanh', random_state=rs, 27 | distribution='uniform', algorithm=algorithm) 28 | i2n_exponential = BatchIntrinsicPlasticity( 29 | hidden_layer_size=1, input_activation='tanh', random_state=rs, 30 | distribution='exponential', algorithm=algorithm) 31 | i2n_normal = BatchIntrinsicPlasticity( 32 | hidden_layer_size=1, input_activation='tanh', random_state=rs, 33 | distribution='normal', algorithm=algorithm) 34 | 35 | X_uniform = rs.uniform(size=sample_size) 36 | X_exponential = rs.exponential(size=sample_size) 37 | X_normal = rs.normal(size=sample_size) 38 | 39 | def exponential(x, lam): 40 | return lam * np.exp(-lam * x) 41 | 42 | def gaussian(x, mu, sig): 43 | return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) \ 44 | / np.sqrt(2. * np.pi) / sig 45 | 46 | # X_uniform = np.linspace(start=-1., stop=1., num=1000).reshape(-1, 1) 47 | # X_exponential = exponential(X_uniform + 1., 1) 48 | # X_normal = gaussian(X_uniform, 0, 1) 49 | 50 | """ 51 | y_uni_exp = i2n_exponential.fit_transform(X_uniform) 52 | y_exp_exp = i2n_exponential.fit_transform(X_exponential) 53 | y_norm_exp = i2n_exponential.fit_transform(X_normal) 54 | y_uni_uni = i2n_uniform.fit_transform(X_uniform) 55 | y_exp_uni = i2n_uniform.fit_transform(X_exponential) 56 | y_norm_uni = i2n_uniform.fit_transform(X_normal) 57 | y_uni_norm = i2n_normal.fit_transform(X_uniform) 58 | y_exp_norm = i2n_normal.fit_transform(X_exponential) 59 | y_norm_norm = i2n_normal.fit_transform(X_normal) 60 | """ 61 | 62 | # display distributions 63 | fig, axs = plt.subplots(3, 4) 64 | # plt.ylabel('f_x') 65 | # plt.xlabel('f_y') 66 | # fig.suptitle('BIP transformations') 67 | bins = 20 68 | sns.histplot(data=i2n_exponential.fit_transform(X_exponential), bins=bins, 69 | stat="density", ax=axs[0, 0], legend=False) 70 | axs[0, 0].set_xlim((-1., 1.)) 71 | axs[0, 0].set_ylim((0., 3.)) 72 | sns.histplot(data=i2n_normal.fit_transform(X_exponential), bins=bins, 73 | stat="density", ax=axs[0, 1], legend=False) 74 | axs[0, 1].set_xlim((-1., 1.)) 75 | axs[0, 1].set_ylim((0., 3.)) 76 | sns.histplot(data=i2n_uniform.fit_transform(X_exponential), bins=bins, 77 | stat="density", ax=axs[0, 2], legend=False) 78 | axs[0, 2].set_xlim((-1., 1.)) 79 | axs[0, 2].set_ylim((0., 3.)) 80 | 81 | sns.histplot(data=i2n_exponential.fit_transform(X_normal), bins=bins, 82 | stat="density", ax=axs[1, 0], legend=False) 83 | axs[1, 0].set_xlim((-1., 1.)) 84 | axs[1, 0].set_ylim((0., 1.5)) 85 | sns.histplot(data=i2n_normal.fit_transform(X_normal), bins=bins, 86 | stat="density", ax=axs[1, 1], legend=False) 87 | axs[1, 1].set_xlim((-1., 1.)) 88 | axs[1, 1].set_ylim((0., 1.5)) 89 | sns.histplot(data=i2n_uniform.fit_transform(X_normal), bins=bins, 90 | stat="density", ax=axs[1, 2], legend=False) 91 | axs[1, 2].set_xlim((-1., 1.)) 92 | axs[1, 2].set_ylim((0., 1.5)) 93 | 94 | sns.histplot(data=i2n_exponential.fit_transform(X_uniform), bins=bins, 95 | stat="density", ax=axs[2, 0], legend=False) 96 | axs[2, 0].set_xlim((-1., 1.)) 97 | axs[2, 0].set_ylim((0., 2.5)) 98 | sns.histplot(data=i2n_normal.fit_transform(X_uniform), bins=bins, 99 | stat="density", ax=axs[2, 1], legend=False) 100 | axs[2, 1].set_xlim((-1., 1.)) 101 | axs[2, 1].set_ylim((0., 2.5)) 102 | sns.histplot(data=i2n_uniform.fit_transform(X_uniform), bins=bins, 103 | stat="density", ax=axs[2, 2], legend=False) 104 | axs[2, 2].set_xlim((-1., 1.)) 105 | axs[2, 2].set_ylim((0., 2.5)) 106 | 107 | sns.histplot(data=X_exponential, bins=bins, ax=axs[0, 3], legend=False) 108 | axs[0, 3].set_title('exponential') 109 | sns.histplot(data=X_normal, bins=bins, ax=axs[1, 3], legend=False) 110 | axs[1, 3].set_title('normal') 111 | sns.histplot(data=X_uniform, bins=bins, ax=axs[2, 3], legend=False) 112 | axs[2, 3].set_title('uniform') 113 | 114 | plt.tight_layout() 115 | plt.show() 116 | 117 | 118 | if __name__ == "__main__": 119 | main() 120 | -------------------------------------------------------------------------------- /examples/digits-kmeans.py: -------------------------------------------------------------------------------- 1 | """ 2 | An example of the Coates Idea on the digits dataset. 3 | """ 4 | import numpy as np 5 | import time 6 | 7 | import matplotlib 8 | 9 | import matplotlib.gridspec as gridspec 10 | import matplotlib.pyplot as plt 11 | 12 | import seaborn as sns 13 | 14 | from sklearn.decomposition import PCA 15 | from sklearn.cluster import KMeans 16 | 17 | from src.pyrcn.util import get_mnist, tud_colors 18 | 19 | 20 | matplotlib.rc('image', cmap='binary') 21 | sns.set_theme() 22 | 23 | 24 | # define norm 25 | def p2norm(x): 26 | return np.linalg.norm(x, axis=1, ord=2) 27 | 28 | 29 | def get_unique(X, y): 30 | labels = np.unique(y) 31 | 32 | # find first occurrences 33 | idx = np.ones((len(labels), 2)) * -1 34 | cnt = 0 35 | while np.any(idx[:, 0] == -1): 36 | if idx[int(y[cnt]), 0] == -1.0: 37 | idx[int(y[cnt]), :] = int(cnt), y[cnt] 38 | cnt += 1 39 | 40 | # return sorted array 41 | sorted_index = idx[np.argsort(idx[:, 1]).astype(int), 0].astype(int) 42 | return X[sorted_index, ...], y[sorted_index, ...] 43 | 44 | 45 | def main(): 46 | runtime = [time.time()] 47 | X, y = get_mnist() 48 | runtime.append(time.time()) 49 | print('fetch: {0} s'.format(np.diff(runtime[-2:]))) 50 | 51 | whitener = PCA(50, random_state=42) 52 | 53 | X /= 255. 54 | 55 | X_preprocessed = whitener.fit_transform(X) 56 | runtime.append(time.time()) 57 | print('preprocessing: {0} s'.format(np.diff(runtime[-2:]))) 58 | 59 | cls = KMeans(n_clusters=10, random_state=42).fit(X_preprocessed) 60 | runtime.append(time.time()) 61 | print('clustering: {0} s'.format(np.diff(runtime[-2:]))) 62 | 63 | samples, values = get_unique(X_preprocessed, y) 64 | 65 | # reconstruct cluster centers 66 | cluster_centers = whitener.inverse_transform(cls.cluster_centers_) 67 | 68 | # normed_samples = (samples.T / np.linalg.norm(samples, axis=1, ord=2)).T 69 | # calculate distance 70 | cos_similarity = np.divide(np.dot(samples, cls.cluster_centers_.T), 71 | p2norm(samples) * p2norm(cls.cluster_centers_)) 72 | 73 | runtime.append(time.time()) 74 | print('calculations: {0} s'.format(np.diff(runtime[-2:]))) 75 | 76 | # display digits 77 | fig = plt.figure(figsize=(6, 3)) 78 | gs_cyphers = gridspec.GridSpec(2, 5, figure=fig, wspace=.4, hspace=.3, 79 | top=.97, bottom=.1, left=.07, right=.95) 80 | 81 | for i in range(10): 82 | gs_cypher = gridspec.GridSpecFromSubplotSpec( 83 | 2, 1, subplot_spec=gs_cyphers[i], height_ratios=[1., .6], 84 | hspace=.05) 85 | 86 | ax_centroid = fig.add_subplot(gs_cypher[0, 0]) 87 | ax_barchart = fig.add_subplot(gs_cypher[1, 0]) 88 | 89 | ax_centroid.imshow( 90 | cluster_centers[i, :].reshape(28, 28), interpolation='none') 91 | ax_centroid.tick_params(left=False, bottom=False, labelleft=False, 92 | labelbottom=False) 93 | 94 | ax_barchart.bar(list(map(int, values)), cos_similarity[:, i], 95 | tick_label=values, color=tud_colors['lightblue']) 96 | # ax_barchart.set_xlim([0, 9]) 97 | ax_barchart.grid(which='both', axis='y') 98 | ax_barchart.set_yticks([-1., 0., 1.], minor=False) 99 | ax_barchart.set_yticks([-.5, .5], minor=True) 100 | ax_barchart.set_ylim([-1., 1.]) 101 | 102 | # plt.tight_layout() 103 | plt.savefig('mnist-kmeans-centroids-cos-similarity-pca50.pdf') 104 | # plt.savefig(os.path.join(os.environ['PGFPATH'], 105 | # 'mnist-pca50-kmeans-centroids-cos-similarity.pgf'), format='pgf') 106 | runtime.append(time.time()) 107 | print('plotting: {0} s'.format(np.diff(runtime[-2:]))) 108 | 109 | 110 | if __name__ == '__main__': 111 | main() 112 | -------------------------------------------------------------------------------- /examples/digits.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | """ 5 | Recognizing hand-written digits 6 | ------------------------------- 7 | 8 | This notebook adapts the existing example of applying support vector 9 | classification from scikit-learn to PyRCN to demonstrate, how PyRCN can be used 10 | to classify hand-written digits. 11 | 12 | The tutorial is based on numpy, scikit-learn and PyRCN. 13 | """ 14 | import numpy as np 15 | import time 16 | from sklearn.base import clone 17 | from sklearn.model_selection import train_test_split 18 | from sklearn.model_selection import ( 19 | ParameterGrid, RandomizedSearchCV, cross_validate) 20 | from scipy.stats import uniform, loguniform 21 | from sklearn.metrics import make_scorer 22 | 23 | from pyrcn.model_selection import SequentialSearchCV 24 | from pyrcn.echo_state_network import ESNClassifier 25 | from pyrcn.metrics import accuracy_score 26 | from pyrcn.datasets import load_digits 27 | 28 | 29 | # Load the dataset (part of scikit-learn) and consists of 1797 8x8 images. 30 | # We are using our dataloader that is derived from scikit-learns dataloader and 31 | # returns arrays of 8x8 sequences and corresponding labels. 32 | X, y = load_digits(return_X_y=True, as_sequence=True) 33 | print("Number of digits: {0}".format(len(X))) 34 | print("Shape of digits {0}".format(X[0].shape)) 35 | 36 | # Split dataset in training and test 37 | # Afterwards, we split the dataset into training and test sets. 38 | # We train the ESN using 80% of the digits and test it using the remaining 39 | # images. 40 | stratify = np.asarray([np.unique(yt) for yt in y]).flatten() 41 | X_train, X_test, y_train, y_test = train_test_split( 42 | X, y, test_size=0.2, stratify=stratify, random_state=42) 43 | X_tr = np.copy(X_train) 44 | y_tr = np.copy(y_train) 45 | X_te = np.copy(X_test) 46 | y_te = np.copy(y_test) 47 | for k, _ in enumerate(y_tr): 48 | y_tr[k] = np.repeat(y_tr[k], 8, 0) 49 | for k, _ in enumerate(y_te): 50 | y_te[k] = np.repeat(y_te[k], 8, 0) 51 | 52 | print("Number of digits in training set: {0}".format(len(X_train))) 53 | print("Shape of digits in training set: {0}".format(X_train[0].shape)) 54 | print("Number of digits in test set: {0}".format(len(X_test))) 55 | print("Shape of digits in test set: {0}".format(X_test[0].shape)) 56 | 57 | # Set up a ESN 58 | # To develop an ESN model for digit recognition, we need to tune several 59 | # hyper-parameters, e.g., input_scaling, spectral_radius, bias_scaling and 60 | # leaky integration. 61 | # 62 | # We follow the way proposed in the introductory paper of PyRCN to optimize 63 | # hyper-parameters sequentially. 64 | # 65 | # We define the search spaces for each step together with the type of search 66 | # (a grid search in this context). 67 | # 68 | # At last, we initialize an ESNClassifier with the desired output strategy 69 | # and with the initially fixed parameters. 70 | 71 | 72 | initially_fixed_params = { 73 | 'hidden_layer_size': 50, 'input_activation': 'identity', 'k_in': 5, 74 | 'bias_scaling': 0.0, 'reservoir_activation': 'tanh', 'leakage': 1.0, 75 | 'bidirectional': False, 'k_rec': 10, 'continuation': False, 'alpha': 1e-5, 76 | 'random_state': 42, 'decision_strategy': "winner_takes_all"} 77 | 78 | step1_esn_params = {'input_scaling': uniform(loc=1e-2, scale=1), 79 | 'spectral_radius': uniform(loc=0, scale=2)} 80 | 81 | step2_esn_params = {'leakage': loguniform(1e-5, 1e0)} 82 | step3_esn_params = {'bias_scaling': uniform(loc=0, scale=2)} 83 | step4_esn_params = {'alpha': loguniform(1e-5, 1e0)} 84 | 85 | kwargs_step1 = {'n_iter': 200, 'random_state': 42, 'verbose': 1, 'n_jobs': 1, 86 | 'scoring': make_scorer(accuracy_score)} 87 | kwargs_step2 = {'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 88 | 'scoring': make_scorer(accuracy_score)} 89 | kwargs_step3 = {'verbose': 1, 'n_jobs': -1, 90 | 'scoring': make_scorer(accuracy_score)} 91 | kwargs_step4 = {'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 92 | 'scoring': make_scorer(accuracy_score)} 93 | 94 | # The searches are defined similarly to the steps of a 95 | # sklearn.pipeline.Pipeline: 96 | searches = [('step1', RandomizedSearchCV, step1_esn_params, kwargs_step1), 97 | ('step2', RandomizedSearchCV, step2_esn_params, kwargs_step2), 98 | ('step3', RandomizedSearchCV, step3_esn_params, kwargs_step3), 99 | ('step4', RandomizedSearchCV, step4_esn_params, kwargs_step4)] 100 | 101 | base_esn = ESNClassifier(**initially_fixed_params) 102 | 103 | 104 | # Optimization 105 | # We provide a SequentialSearchCV that basically iterates through the list of 106 | # searches that we have defined before. It can be combined with any model 107 | # selection tool from 108 | # scikit-learn. 109 | sequential_search = SequentialSearchCV(base_esn,searches=searches).fit( 110 | X_tr, y_tr) 111 | 112 | 113 | # Use the ESN with final hyper-parameters 114 | # 115 | # After the optimization, we extract the ESN with final hyper-parameters as the 116 | # result # of the optimization. 117 | base_esn = sequential_search.best_estimator_ 118 | 119 | 120 | # Test the ESN 121 | # Finally, we increase the reservoir size and compare the impact of uni- and 122 | # bidirectional ESNs. Notice that the ESN strongly benefit from both, 123 | # increasing the reservoir size and from the bi-directional working mode. 124 | param_grid = {'hidden_layer_size': [50, 100, 200, 400, 500], 125 | 'bidirectional': [False, True]} 126 | 127 | print("CV results\tFit time\tInference time\tAccuracy score\tSize[Bytes]") 128 | for params in ParameterGrid(param_grid): 129 | esn_cv = cross_validate(clone(base_esn).set_params(**params), X=X_train, 130 | y=y_train, scoring=make_scorer(accuracy_score), 131 | n_jobs=-1) 132 | t1 = time.time() 133 | esn = clone(base_esn).set_params(**params).fit(X_train, y_train) 134 | t_fit = time.time() - t1 135 | t1 = time.time() 136 | esn_par = clone(base_esn).set_params(**params).fit(X_train, y_train, 137 | n_jobs=-1) 138 | t_fit_par = time.time() - t1 139 | mem_size = esn.__sizeof__() 140 | t1 = time.time() 141 | acc_score = accuracy_score(y_test, esn.predict(X_test)) 142 | t_inference = time.time() - t1 143 | print(f"{esn_cv}\t{t_fit}\t{t_inference}\t{acc_score}\t{mem_size}") 144 | -------------------------------------------------------------------------------- /examples/experiments.py: -------------------------------------------------------------------------------- 1 | #!/bin/python 2 | 3 | """ 4 | This file contains several functions testing ELMs in different configurations, 5 | optimize them and save the results in data files and pickles 6 | """ 7 | 8 | import sys 9 | import os 10 | import numpy as np 11 | from scipy.signal import convolve2d 12 | 13 | import matplotlib.pyplot as plt 14 | from matplotlib.colors import ListedColormap 15 | 16 | from pyrcn.util import new_logger, argument_parser, get_mnist, tud_colors 17 | 18 | train_size = 60000 19 | 20 | 21 | def images_filter(images, kernel, stride=1): 22 | filtered = np.zeros(images.shape) 23 | for idx in range(images.shape[0]): 24 | filtered[idx, ...] = convolve2d(images[idx, ...], kernel, mode='same') 25 | return filtered 26 | 27 | 28 | def picture_gradient(directory): 29 | self_name = 'picture_gradient' 30 | logger = new_logger(self_name, directory=directory) 31 | X, y = get_mnist(directory) 32 | logger.info('Loaded MNIST successfully with {0} records' 33 | .format(X.shape[0])) 34 | # scale X so X in [0, 1] 35 | X /= 255. 36 | # reshape X 37 | X_images = X.reshape((X.shape[0], 28, 28)) 38 | 39 | list_kernels = [ 40 | {'name': 'laplace', 'kernel': np.array([[-1., -1., -1.], 41 | [-1., 8, -1.], 42 | [-1., -1., -1.]])}, 43 | {'name': 'mexicanhat', 'kernel': np.array([[0., 0., -1., 0., 0.], 44 | [0., -1., -2., -1., 0.], 45 | [-1., -2., 16, -2., -1.], 46 | [0., -1., -2., -1., 0.], 47 | [0., 0., -1., 0., 0.]])}, 48 | {'name': 'v_prewitt', 'kernel': np.array([[-1., -1., -1.], 49 | [0., 0., 0.], 50 | [1., 1., 1.]])}, 51 | {'name': 'h_prewitt', 'kernel': np.array([[-1., -1., -1.], 52 | [0., 0., 0.], 53 | [1., 1., 1.]]).T}, 54 | {'name': 'v_sobel', 'kernel': np.array([[-1., -2., -1.], 55 | [0., 0., 0.], 56 | [1., 2., 1.]])}, 57 | {'name': 'h_sobel', 'kernel': np.array([[-1., -2., -1.], 58 | [0., 0., 0.], 59 | [1., 2., 1.]]).T}] 60 | example_image_idx = 5 61 | 62 | fig, axs = plt.subplots(1, 4, figsize=(6, 2)) 63 | axs[0].imshow(X_images[example_image_idx], cmap=plt.cm.gray_r, 64 | interpolation='none') 65 | axs[0].set_title('no filter') 66 | axs[1].imshow(convolve2d(X_images[example_image_idx], 67 | list_kernels[0]['kernel'], mode='same'), 68 | cmap=plt.cm.gray_r, interpolation='none') 69 | axs[1].set_title('laplace') 70 | axs[2].imshow(convolve2d(X_images[example_image_idx], 71 | list_kernels[2]['kernel'], mode='same'), 72 | cmap=plt.cm.gray_r, interpolation='none') 73 | axs[2].set_title('vertical\nprewitt') 74 | axs[3].imshow(convolve2d(X_images[example_image_idx], 75 | list_kernels[5]['kernel'], mode='same'), 76 | cmap=plt.cm.gray_r, interpolation='none') 77 | axs[3].set_title('horizontal\nsobel') 78 | 79 | for ax in axs: 80 | ax.set_xticks([0, 27]) 81 | ax.set_xticklabels([0, 27]) 82 | ax.set_yticks([0, 27]) 83 | ax.set_yticklabels([0, 27]) 84 | 85 | fig.tight_layout() 86 | fig.savefig( 87 | os.path.join(directory, 'mnist-image-filters.pdf'), format='pdf') 88 | fig.savefig( 89 | os.path.join(os.environ['PGFPATH'], 'mnist-image-filters.pgf'), 90 | format='pgf') 91 | 92 | 93 | def plot_confusion(directory): 94 | filepath = os.path.join(os.environ['DATAPATH'], '/coates20210310/est_' 95 | 'coates-minibatch-pca50+kmeans16000_matrix-' 96 | 'predicted.npz') 97 | 98 | npzfile = np.load(filepath, allow_pickle=True) 99 | X_test = np.array(npzfile['X_test']) 100 | y_test = np.array(npzfile['y_test']).astype(int) 101 | y_pred = np.array(npzfile['y_pred']).astype(int) 102 | 103 | conf_matrix = np.zeros((10, 10)) 104 | 105 | X_example = X_test[(y_pred == 5) & (y_test == 6), ...] 106 | img_example = X_example[3, ...] 107 | imgpath = os.path.join(directory, 'confused6for5.png') 108 | plt.imsave( 109 | imgpath, img_example.reshape(28, 28), format='png', cmap=plt.cm.gray_r) 110 | 111 | for pred_idx in range(conf_matrix.shape[0]): 112 | for test_idx in range(conf_matrix.shape[1]): 113 | conf_matrix[pred_idx, test_idx] = \ 114 | int(np.sum((y_pred == pred_idx) & (y_test == test_idx))) 115 | 116 | tpr = np.zeros(10) 117 | tnr = np.zeros(10) 118 | 119 | for idx in range(10): 120 | tpr[idx] = conf_matrix[idx, idx] / np.sum(conf_matrix[idx, :]) 121 | tnr[idx] = conf_matrix[idx, idx] / np.sum(conf_matrix[:, idx]) 122 | 123 | conf_matrix_norm = np.zeros((10, 10)) 124 | 125 | # norm row by row! => TPR 126 | for idx in range(10): 127 | conf_matrix_norm[idx, :] =\ 128 | conf_matrix[idx, :] / np.sum(conf_matrix[idx, :]) 129 | conf_matrix_norm[idx, idx] = 1 - conf_matrix_norm[idx, idx] 130 | 131 | # colormap 132 | n_colorsteps = 255 # in promille 133 | color_array = np.zeros((n_colorsteps, 4)) 134 | lower_margin = 255 135 | color_array[:lower_margin, :] += np.linspace( 136 | start=tud_colors['lightgreen'], stop=tud_colors['red'], 137 | num=lower_margin) 138 | 139 | cm = ListedColormap(color_array) 140 | 141 | fig = plt.figure(figsize=(4, 3)) 142 | 143 | ax = fig.add_axes([.15, .15, .75, .75]) 144 | img = ax.imshow(conf_matrix_norm * 100, interpolation='none', cmap=cm, 145 | origin='lower', alpha=.7) 146 | ax.set_xticks(np.arange(10)) 147 | ax.set_xticklabels(['{0:.0f}' 148 | .format(pred_idx) for pred_idx in np.arange(10)]) 149 | ax.set_xlabel('true') 150 | ax.set_yticks(np.arange(10)) 151 | ax.set_yticklabels(['{0:.0f}' 152 | .format(pred_idx) for pred_idx in np.arange(10)]) 153 | ax.set_ylabel('predicted') 154 | 155 | plt.colorbar(img, ax=ax, shrink=1., label='deviation from ideal TPR [%]') 156 | 157 | for pred_idx in range(conf_matrix.shape[0]): 158 | for test_idx in range(conf_matrix.shape[1]): 159 | ax.text(x=pred_idx, y=test_idx, s='{0:.0f}' 160 | .format(conf_matrix.T[pred_idx][test_idx]), 161 | fontsize='xx-small', verticalalignment='center', 162 | horizontalalignment='center') 163 | 164 | # plt.show() 165 | plt.savefig(os.path.join('./experiments/', 'confusion_matrix.pdf'), 166 | format='pdf') 167 | plt.savefig(os.path.join(os.environ['PGFPATH'], 'confusion_matrix.pgf'), 168 | format='pgf') 169 | 170 | 171 | def main(directory, params): 172 | if not os.path.isdir(directory): 173 | try: 174 | os.mkdir(directory) 175 | except PermissionError as e: 176 | print('mkdir failed due to missing privileges: {0}'.format(e)) 177 | exit(1) 178 | 179 | # subfolder for results 180 | file_dir = os.path.join(directory, 'experiments') 181 | if not os.path.isdir(file_dir): 182 | os.mkdir(file_dir) 183 | 184 | logger = new_logger('main', directory=file_dir) 185 | logger.info('Started main with directory={0} and params={1}' 186 | .format(directory, params)) 187 | 188 | # register parameters 189 | experiment_names = { 190 | 'picture_gradient': picture_gradient, 191 | 'plot_confusion': plot_confusion, 192 | } 193 | 194 | # run specified programs 195 | for param in params: 196 | if param in experiment_names: 197 | experiment_names[param](file_dir) 198 | else: 199 | logger.warning('Parameter {0} invalid/not found.'.format(param)) 200 | 201 | 202 | if __name__ == '__main__': 203 | parsed_args = argument_parser.parse_args(sys.argv[1:]) 204 | if os.path.isdir(parsed_args.out): 205 | main(parsed_args.out, parsed_args.params) 206 | else: 207 | main(parsed_args.params) 208 | exit(0) 209 | -------------------------------------------------------------------------------- /examples/f0_extraction.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import librosa 3 | 4 | from joblib import dump, load 5 | 6 | from sklearn.preprocessing import StandardScaler 7 | from sklearn.utils import shuffle 8 | from sklearn.utils.fixes import loguniform 9 | from scipy.stats import uniform 10 | from sklearn.base import clone 11 | from sklearn.pipeline import Pipeline 12 | from sklearn.model_selection import (ParameterGrid, RandomizedSearchCV, 13 | GridSearchCV) 14 | from sklearn.metrics import make_scorer, zero_one_loss 15 | from pyrcn.model_selection import SequentialSearchCV 16 | from pyrcn.util import FeatureExtractor 17 | from pyrcn.datasets import fetch_ptdb_tug_dataset 18 | from pyrcn.echo_state_network import ESNRegressor 19 | from pyrcn.base.blocks import PredefinedWeightsInputToNode 20 | 21 | 22 | def create_feature_extraction_pipeline(sr=16000): 23 | audio_loading = Pipeline([("load_audio", 24 | FeatureExtractor(func=librosa.load, 25 | kw_args={"sr": sr, 26 | "mono": True})), 27 | ("normal", 28 | FeatureExtractor(func=librosa.util.normalize, 29 | kw_args={"norm": np.inf}))]) 30 | 31 | feature_extractor = Pipeline([("mel_spectrogram", 32 | FeatureExtractor( 33 | func=librosa.feature.melspectrogram, 34 | kw_args={"sr": sr, "n_fft": 1024, 35 | "hop_length": 160, 36 | "window": 'hann', 37 | "center": False, "power": 2.0, 38 | "n_mels": 80, "fmin": 40, 39 | "fmax": 4000, "htk": True})), 40 | ("power_to_db", 41 | FeatureExtractor(func=librosa.power_to_db, 42 | kw_args={"ref": 1}))]) 43 | 44 | feature_extraction_pipeline = Pipeline( 45 | [("audio_loading", audio_loading), 46 | ("feature_extractor", feature_extractor)]) 47 | return feature_extraction_pipeline 48 | 49 | 50 | # Load and preprocess the dataset 51 | feature_extraction_pipeline = create_feature_extraction_pipeline() 52 | 53 | X_train, X_test, y_train, y_test = fetch_ptdb_tug_dataset( 54 | data_origin="/scratch/ws/1/s2575425-CSTR_VCTK_Corpus/SPEECH_DATA", 55 | data_home="/scratch/ws/1/s2575425-pyrcn/f0_estimation/dataset/6", 56 | preprocessor=feature_extraction_pipeline, force_preprocessing=False, 57 | augment=6) 58 | X_train, y_train = shuffle(X_train, y_train, random_state=0) 59 | 60 | scaler = StandardScaler().fit(np.concatenate(X_train)) 61 | for k, X in enumerate(X_train): 62 | X_train[k] = scaler.transform(X=X) 63 | for k, X in enumerate(X_test): 64 | X_test[k] = scaler.transform(X=X) 65 | 66 | 67 | # Define several error functions for $f_{0}$ extraction 68 | def gpe(y_true, y_pred): 69 | """ 70 | Gross pitch error 71 | ----------------- 72 | 73 | All frames that are considered voiced by both pitch tracker and ground 74 | truth, for which the relative pitch error is higher than a certain 75 | threshold (20 percent). 76 | """ 77 | idx = np.nonzero(y_true*y_pred)[0] 78 | if idx.size == 0: 79 | return np.inf 80 | else: 81 | return np.sum(np.abs(y_true[idx] - y_pred[idx]) > 0.2 * y_true[idx])\ 82 | / len(np.nonzero(y_true)[0]) 83 | 84 | 85 | def vde(y_true, y_pred): 86 | """ 87 | Voicing Decision Error 88 | ---------------------- 89 | 90 | Proportion of frames for which an incorrect voiced/unvoiced decision is 91 | made. 92 | """ 93 | return zero_one_loss(y_true, y_pred) 94 | 95 | 96 | def fpe(y_true, y_pred): 97 | """ 98 | Fine Pitch Error 99 | ---------------- 100 | 101 | Standard deviation of the distribution of relative error values (in cents) 102 | from the frames that do not have gross pitch errors. 103 | """ 104 | idx_voiced = np.nonzero(y_true * y_pred)[0] 105 | idx_correct = np.argwhere(np.abs(y_true - y_pred) <= 0.2 * y_true).ravel() 106 | idx = np.intersect1d(idx_voiced, idx_correct) 107 | if idx.size == 0: 108 | return 0 109 | else: 110 | return 100 * np.std(np.log2(y_pred[idx] / y_true[idx])) 111 | 112 | 113 | def ffe(y_true, y_pred): 114 | """ 115 | $f_{0}$ Frame Error 116 | ------------------- 117 | 118 | Proportion of frames for which an error (either according to the GPE or the 119 | VDE criterion) is made. 120 | 121 | FFE can be seen as a single measure for assessing the overall performance 122 | of a pitch tracker. 123 | """ 124 | idx_correct = np.argwhere(np.abs(y_true - y_pred) <= 0.2 * y_true).ravel() 125 | return 1 - len(idx_correct) / len(y_true) 126 | 127 | 128 | def custom_scorer(y_true, y_pred): 129 | gross_pitch_error = np.zeros(shape=(len(y_true),)) 130 | for k, (y_t, y_p) in enumerate(zip(y_true, y_pred)): 131 | gross_pitch_error[k] = gpe(y_true=y_t[:, 0]*y_t[:, 1], 132 | y_pred=y_p[:, 0]*(y_p[:, 1] >= .5)) 133 | return np.mean(gross_pitch_error) 134 | 135 | 136 | gpe_scorer = make_scorer(custom_scorer, greater_is_better=False) 137 | 138 | kmeans = load("../f0/kmeans_500.joblib") 139 | w_in = np.divide(kmeans.cluster_centers_, 140 | np.linalg.norm(kmeans.cluster_centers_, axis=1)[:, None]) 141 | 142 | input_to_node = PredefinedWeightsInputToNode( 143 | predefined_input_weights=w_in.T, 144 | ) 145 | 146 | # Set up a ESN 147 | # To develop an ESN model for f0 estimation, we need to tune several hyper- 148 | # parameters, e.g., input_scaling, spectral_radius, bias_scaling and leaky 149 | # integration. 150 | # We follow the way proposed in the paper for multipitch tracking and for 151 | # acoustic modeling of piano music to optimize hyper-parameters sequentially. 152 | # We define the search spaces for each step together with the type of search 153 | # (a grid search in this context). 154 | # At last, we initialize an ESNRegressor with the desired output strategy and 155 | # with the initially fixed parameters. 156 | 157 | initially_fixed_params = {'hidden_layer_size': 500, 158 | 'k_in': 10, 159 | 'input_scaling': 0.4, 160 | 'input_activation': 'identity', 161 | 'bias_scaling': 0.0, 162 | 'spectral_radius': 0.0, 163 | 'leakage': 1.0, 164 | 'k_rec': 10, 165 | 'reservoir_activation': 'tanh', 166 | 'bidirectional': False, 167 | 'alpha': 1e-3, 168 | 'random_state': 42} 169 | 170 | step1_esn_params = {'input_scaling': uniform(loc=1e-2, scale=1), 171 | 'spectral_radius': uniform(loc=0, scale=2)} 172 | 173 | step2_esn_params = {'leakage': uniform(1e-5, 1e0)} 174 | step3_esn_params = {'bias_scaling': uniform(loc=0, scale=3)} 175 | step4_esn_params = {'alpha': loguniform(1e-5, 1e1)} 176 | 177 | kwargs_step1 = {'n_iter': 200, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 178 | 'scoring': gpe_scorer} 179 | kwargs_step2 = {'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 180 | 'scoring': gpe_scorer} 181 | kwargs_step3 = {'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 182 | 'scoring': gpe_scorer} 183 | kwargs_step4 = {'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 184 | 'scoring': gpe_scorer} 185 | 186 | searches = [('step1', RandomizedSearchCV, step1_esn_params, kwargs_step1), 187 | ('step2', RandomizedSearchCV, step2_esn_params, kwargs_step2), 188 | ('step3', RandomizedSearchCV, step3_esn_params, kwargs_step3), 189 | ('step4', RandomizedSearchCV, step4_esn_params, kwargs_step4)] 190 | 191 | base_esn = ESNRegressor(input_to_node=input_to_node).set_params( 192 | **initially_fixed_params) 193 | 194 | try: 195 | sequential_search = load( 196 | "../f0/sequential_search_f0_mel_km_500.joblib") 197 | except FileNotFoundError: 198 | print(FileNotFoundError) 199 | sequential_search = SequentialSearchCV( 200 | base_esn, searches=searches).fit(X_train, y_train) 201 | dump(sequential_search, 202 | "../f0/sequential_search_f0_mel_km_500.joblib") 203 | 204 | print(sequential_search) 205 | 206 | param_grid = { 207 | 'hidden_layer_size': [50, 100, 200, 400, 500, 800, 1000, 208 | 1600, 2000, 3200, 4000, 6400, 8000, 16000], 209 | } 210 | for params in ParameterGrid(param_grid): 211 | estimator = clone(sequential_search.best_estimator_).set_params(**params) 212 | kmeans = load("../f0/kmeans_" + str(params["hidden_layer_size"]) 213 | + ".joblib") 214 | w_in = np.divide(kmeans.cluster_centers_, 215 | np.linalg.norm(kmeans.cluster_centers_, axis=1)[:, None]) 216 | estimator.input_to_node.predefined_input_weights = w_in.T 217 | try: 218 | cv = load("../f0/speech_ptdb_tug_kmeans_esn_" 219 | + str(params["hidden_layer_size"]) + "_0_6.joblib") 220 | except FileNotFoundError: 221 | cv = GridSearchCV(estimator=estimator, param_grid={}, 222 | scoring=gpe_scorer, n_jobs=5, verbose=10).fit( 223 | X=X_train, y=y_train) 224 | dump(cv, "../f0/speech_ptdb_tug_kmeans_esn_" 225 | + str(params["hidden_layer_size"]) + "_0_6.joblib") 226 | print(cv.cv_results_) 227 | -------------------------------------------------------------------------------- /examples/input-to-node.py: -------------------------------------------------------------------------------- 1 | """ 2 | An example of the Coates Idea on the digits dataset. 3 | """ 4 | import os 5 | import numpy as np 6 | 7 | from sklearn.decomposition import PCA 8 | from pyrcn.base.blocks import InputToNode 9 | 10 | from pyrcn.util import tud_colors, get_mnist 11 | 12 | import matplotlib.pyplot as plt 13 | import seaborn as sns 14 | 15 | 16 | sns.set_theme() 17 | example_image_idx = 5 18 | min_var = 3088.6875 19 | 20 | # EXPERIMENTS 21 | 22 | 23 | def input2node_distribution(directory): 24 | X, y = get_mnist(directory) 25 | 26 | X /= 255. 27 | 28 | pca = PCA(n_components=784).fit(X) 29 | X_pca = np.matmul(X, pca.components_.T) 30 | 31 | list_activation = ['tanh', 'relu', 'bounded_relu'] 32 | list_train = [X, X_pca] 33 | 34 | fig, axs = plt.subplots(nrows=2, ncols=3) 35 | 36 | for idx_activation in range(len(list_activation)): 37 | activation = list_activation[idx_activation] 38 | 39 | for idx_train in range(len(list_train)): 40 | ax = axs[idx_train, idx_activation] 41 | train = list_train[idx_train] 42 | 43 | if activation in ['tanh', '']: 44 | i2n = InputToNode(hidden_layer_size=1, random_state=82, 45 | input_scaling=50/784, bias_scaling=0., 46 | activation=activation) 47 | elif activation in ['relu', 'bounded_relu']: 48 | i2n = InputToNode(hidden_layer_size=1, random_state=82, 49 | input_scaling=1., bias_scaling=0., 50 | activation=activation) 51 | 52 | node_out = i2n.fit_transform(train, y) 53 | hist, bin_edges = np.histogram(node_out, bins=20, density=True) 54 | 55 | np.delete(bin_edges[:-1], hist <= 1e-3) 56 | np.delete(hist, hist <= 1e-3) 57 | 58 | if activation == 'bounded_relu': 59 | ax.hist(node_out, label=activation, density=True, 60 | bins=[.0, .1, .9, 1.], color=tud_colors['lightblue']) 61 | else: 62 | ax.hist(node_out, label=activation, density=True, bins=20, 63 | color=tud_colors['lightblue']) 64 | 65 | ax.grid(axis='y') 66 | ax.set_yscale('log') 67 | 68 | x_ticks = np.min(node_out), np.max(node_out) 69 | ax.set_xlim(x_ticks) 70 | 71 | if activation == 'tanh': 72 | x_ticks += (0.0, ) 73 | ax.set_xticks(x_ticks) 74 | ax.set_xticklabels( 75 | ['{0:.1f}'.format(x_tick) for x_tick in x_ticks]) 76 | 77 | axs[0, 0].set_title('tanh, orig.') 78 | axs[0, 1].set_title('relu, orig.') 79 | axs[0, 2].set_title('b. relu, orig.') 80 | axs[1, 0].set_title('tanh, pca') 81 | axs[1, 1].set_title('relu, pca') 82 | axs[1, 2].set_title('b. relu, pca') 83 | 84 | # plt.tight_layout() 85 | fig.tight_layout() 86 | fig.savefig(os.path.join(directory, 'node-out.pdf'), format='pdf') 87 | fig.savefig(os.path.join(directory, 'node-out.eps'), format='eps') 88 | plt.rc('pgf', texsystem='pdflatex') 89 | 90 | 91 | if __name__ == "__main__": 92 | directory = os.path.abspath('./examples/input-to-node/') 93 | input2node_distribution(directory=directory) 94 | -------------------------------------------------------------------------------- /examples/mackey-glass-t17.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # Timeseries prediction of the Mackey-Glass Equation with ESNs 5 | import numpy as np 6 | from sklearn.linear_model import Ridge 7 | from sklearn.metrics import mean_squared_error, make_scorer 8 | from sklearn.preprocessing import MinMaxScaler 9 | from sklearn.model_selection import TimeSeriesSplit, GridSearchCV 10 | 11 | from matplotlib import pyplot as plt 12 | import seaborn as sns 13 | 14 | 15 | from pyrcn.echo_state_network import ESNRegressor 16 | from pyrcn.extreme_learning_machine import ELMRegressor 17 | from pyrcn.model_selection import SequentialSearchCV 18 | from pyrcn.datasets import mackey_glass 19 | 20 | 21 | sns.set_theme() 22 | # Load the dataset and rescale it to a range of [-1, 1] 23 | X, y = mackey_glass(n_timesteps=20000) 24 | scaler = MinMaxScaler(feature_range=(-1, 1)).fit(X=X.reshape(-1, 1)) 25 | X = scaler.transform(X=X.reshape(-1, 1)) 26 | y = scaler.transform(y.reshape(-1, 1)).ravel() 27 | 28 | 29 | # Define Train/Test lengths 30 | trainLen = 1900 # number of time steps during which we train the network 31 | testLen = 2000 # number of time steps during which we test/run the network 32 | 33 | X_train = X[:trainLen] 34 | y_train = y[:trainLen] 35 | X_test = X[trainLen:trainLen+testLen] 36 | y_test = y[trainLen:trainLen+testLen] 37 | 38 | 39 | # Visualization 40 | fix, axs = plt.subplots() 41 | sns.lineplot(data=X_train.ravel(), ax=axs) 42 | sns.lineplot(data=y_train.ravel(), ax=axs) 43 | axs.set_xlim([0, 1900]) 44 | axs.set_xlabel('n') 45 | axs.set_ylabel('u[n]') 46 | plt.legend(["Input", "Target"]) 47 | plt.show() 48 | 49 | 50 | # Training and Prediction using vanilla ESNs and ELMs 51 | 52 | # In[ ]: 53 | 54 | 55 | # initialize an ESNRegressor 56 | esn = ESNRegressor() 57 | 58 | # initialize an ELMRegressor 59 | elm = ELMRegressor(regressor=Ridge()) 60 | 61 | # train a model 62 | esn.fit(X=X_train.reshape(-1, 1), y=y_train) 63 | elm.fit(X=X_train.reshape(-1, 1), y=y_train) 64 | 65 | # evaluate the models 66 | y_test_pred = esn.predict(X=X_test) 67 | print(mean_squared_error(y_test, y_test_pred)) 68 | y_test_pred = elm.predict(X=X_test) 69 | print(mean_squared_error(y_test, y_test_pred)) 70 | 71 | 72 | # Hyperparameter optimization ESN 73 | initially_fixed_params = {'hidden_layer_size': 100, 74 | 'input_activation': 'identity', 75 | 'bias_scaling': 0.0, 76 | 'reservoir_activation': 'tanh', 77 | 'leakage': 1.0, 78 | 'bidirectional': False, 79 | 'k_rec': 10, 80 | 'wash_out': 0, 81 | 'continuation': False, 82 | 'alpha': 1e-5, 83 | 'random_state': 42, 84 | 'requires_sequence': False} 85 | 86 | step1_esn_params = {'input_scaling': np.linspace(0.1, 5.0, 50), 87 | 'spectral_radius': np.linspace(0.0, 1.5, 16)} 88 | step2_esn_params = {'leakage': np.linspace(0.1, 1.0, 10)} 89 | step3_esn_params = {'bias_scaling': np.linspace(0.0, 1.5, 16)} 90 | 91 | scorer = make_scorer(score_func=mean_squared_error, greater_is_better=False) 92 | 93 | kwargs = {'verbose': 5, 94 | 'scoring': scorer, 95 | 'n_jobs': -1, 96 | 'cv': TimeSeriesSplit()} 97 | 98 | esn = ESNRegressor(regressor=Ridge(), **initially_fixed_params) 99 | 100 | searches = [('step1', GridSearchCV, step1_esn_params, kwargs), 101 | ('step2', GridSearchCV, step2_esn_params, kwargs), 102 | ('step3', GridSearchCV, step3_esn_params, kwargs)] 103 | 104 | 105 | sequential_search_esn = SequentialSearchCV(esn, 106 | searches=searches).fit( 107 | X_train.reshape(-1, 1), y_train) 108 | 109 | # Hyperparameter optimization ELM 110 | initially_fixed_elm_params = {'hidden_layer_size': 100, 111 | 'activation': 'tanh', 112 | 'k_in': 1, 113 | 'alpha': 1e-5, 114 | 'random_state': 42} 115 | 116 | step1_elm_params = {'input_scaling': np.linspace(0.1, 5.0, 50)} 117 | step2_elm_params = {'bias_scaling': np.linspace(0.0, 1.5, 16)} 118 | 119 | scorer = make_scorer(score_func=mean_squared_error, greater_is_better=False) 120 | 121 | kwargs = {'verbose': 5, 122 | 'scoring': scorer, 123 | 'n_jobs': -1, 124 | 'cv': TimeSeriesSplit()} 125 | 126 | elm = ELMRegressor(regressor=Ridge(), **initially_fixed_elm_params) 127 | 128 | searches = [('step1', GridSearchCV, step1_elm_params, kwargs), 129 | ('step2', GridSearchCV, step2_elm_params, kwargs)] 130 | 131 | sequential_search_elm = SequentialSearchCV(elm, 132 | searches=searches).fit( 133 | X_train.reshape(-1, 1), y_train) 134 | 135 | 136 | # Final prediction and visualization 137 | print(sequential_search_esn.all_best_score_) 138 | print(sequential_search_elm.all_best_score_) 139 | 140 | esn = sequential_search_esn.best_estimator_ 141 | elm = sequential_search_elm.best_estimator_ 142 | 143 | y_train_pred_esn = esn.predict(X=X_train) 144 | y_train_pred_elm = elm.predict(X=X_train) 145 | y_test_pred_esn = esn.predict(X=X_test) 146 | y_test_pred_elm = elm.predict(X=X_test) 147 | 148 | test_err_esn = mean_squared_error(y_true=y_test, y_pred=y_test_pred_esn) 149 | test_err_elm = mean_squared_error(y_true=y_test, y_pred=y_test_pred_elm) 150 | 151 | print("Test MSE ESN:\t{0}".format(test_err_esn)) 152 | print("Test MSE ELM:\t{0}".format(test_err_elm)) 153 | 154 | # Prediction of the test set. 155 | fig, axs = plt.subplots() 156 | sns.lineplot(data=y_test_pred_esn, ax=axs) 157 | sns.lineplot(data=y_test_pred_elm, ax=axs) 158 | axs.set_xlim([0, 1900]) 159 | axs.set_xlabel('n') 160 | axs.set_ylabel('u[n]') 161 | plt.legend(["ESN prediction", "ELM prediction"]) 162 | 163 | 164 | fig, axs = plt.subplots(1, 2, sharey=True) 165 | sns.heatmap(data=esn.hidden_layer_state[:100, :].T, ax=axs[0], cbar=False) 166 | axs[0].set_xlabel("Time Step") 167 | axs[0].set_ylabel("Neuron Index") 168 | sns.heatmap(data=elm.hidden_layer_state[:100, :].T, ax=axs[1]) 169 | axs[1].set_xlabel("Time Step") 170 | plt.show() 171 | -------------------------------------------------------------------------------- /examples/mnist_regressors.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | # MNIST classification using Extreme Learning Machines 4 | import numpy as np 5 | import time 6 | from sklearn.base import clone 7 | from sklearn.datasets import fetch_openml 8 | from sklearn.linear_model import Ridge 9 | from sklearn.preprocessing import MinMaxScaler 10 | from sklearn.model_selection import ( 11 | RandomizedSearchCV, GridSearchCV, ParameterGrid, 12 | cross_validate) 13 | from sklearn.utils.fixes import loguniform 14 | from sklearn.metrics import accuracy_score 15 | 16 | from pyrcn.model_selection import SequentialSearchCV 17 | from pyrcn.extreme_learning_machine import ELMClassifier 18 | 19 | 20 | # Load the dataset 21 | X, y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame=False) 22 | 23 | # Train test split and normalize to a range between [-1, 1]. 24 | X = MinMaxScaler(feature_range=(-1, 1)).fit_transform(X=X) 25 | X_train, X_test = X[:60000], X[60000:] 26 | y_train, y_test = y[:60000].astype(int), y[60000:].astype(int) 27 | 28 | # # Prepare sequential hyperparameter tuning 29 | initially_fixed_params = { 30 | 'hidden_layer_size': 500, 31 | 'input_activation': 'tanh', 32 | 'k_in': 10, 33 | 'bias_scaling': 0.0, 34 | 'alpha': 1e-5, 35 | 'random_state': 42 36 | } 37 | 38 | step1_params = {'input_scaling': loguniform(1e-5, 1e1)} 39 | kwargs1 = { 40 | 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 'n_iter': 5, 41 | 'scoring': 'accuracy' 42 | } 43 | step2_params = {'bias_scaling': np.linspace(0.0, 1.6, 16)} 44 | kwargs2 = {'verbose': 5, 'n_jobs': -1, 'scoring': 'accuracy'} 45 | 46 | elm = ELMClassifier(regressor=Ridge(), **initially_fixed_params) 47 | 48 | searches = [('step1', RandomizedSearchCV, step1_params, kwargs1), 49 | ('step2', GridSearchCV, step2_params, kwargs2)] 50 | 51 | # # Perform the sequential search 52 | sequential_search = SequentialSearchCV(elm, searches=searches).fit(X_train, 53 | y_train) 54 | best_params = sequential_search.best_estimator_.get_params() 55 | 56 | # # Test 57 | # Increase reservoir size and compare different regression methods. 58 | # Make sure that you have enough RAM for that, because all regression types 59 | # without chunk size require a lot of memory. This is the reason why, 60 | # especially for large datasets, the incremental regression is recommended. 61 | 62 | base_elm_ridge = ELMClassifier(regressor=Ridge(), **best_params) 63 | base_elm_inc = ELMClassifier(**best_params) 64 | base_elm_inc_chunk = clone(base_elm_inc).set_params(chunk_size=6000) 65 | 66 | param_grid = {'hidden_layer_size': [500, 1000, 2000, 4000, 8000, 16000]} 67 | 68 | print("CV results\tFit time\tInference time\tAccuracy score\tSize[Bytes]") 69 | for params in ParameterGrid(param_grid): 70 | elm_ridge_cv = cross_validate(clone(base_elm_ridge).set_params(**params), 71 | X=X_train, y=y_train) 72 | t1 = time.time() 73 | elm_ridge = clone(base_elm_ridge).set_params(**params).fit(X_train, 74 | y_train) 75 | t_fit = time.time() - t1 76 | mem_size = elm_ridge.__sizeof__() 77 | t1 = time.time() 78 | acc_score = accuracy_score(y_test, elm_ridge.predict(X_test)) 79 | t_inference = time.time() - t1 80 | print("{0}\t{1}\t{2}\t{3}\t{4}" 81 | .format(elm_ridge_cv, t_fit, t_inference, acc_score, mem_size)) 82 | elm_inc_cv = cross_validate(clone(base_elm_inc).set_params(**params), 83 | X=X_train, y=y_train) 84 | t1 = time.time() 85 | elm_inc = clone(base_elm_inc).set_params(**params).fit(X_train, y_train) 86 | t_fit = time.time() - t1 87 | mem_size = elm_inc.__sizeof__() 88 | t1 = time.time() 89 | acc_score = accuracy_score(y_test, elm_inc.predict(X_test)) 90 | t_inference = time.time() - t1 91 | print("{0}\t{1}\t{2}\t{3}\t{4}" 92 | .format(elm_inc_cv, t_fit, t_inference, acc_score, mem_size)) 93 | elm_inc_chunk_cv = cross_validate( 94 | clone(base_elm_inc_chunk).set_params(**params), 95 | X=X_train, y=y_train) 96 | t1 = time.time() 97 | elm_inc_chunk = clone(base_elm_inc_chunk).set_params(**params)\ 98 | .fit(X_train, y_train) 99 | t_fit = time.time() - t1 100 | mem_size = elm_inc_chunk.__sizeof__() 101 | t1 = time.time() 102 | acc_score = accuracy_score(y_test, elm_inc_chunk.predict(X_test)) 103 | t_inference = time.time() - t1 104 | print("{0}\t{1}\t{2}\t{3}\t{4}" 105 | .format(elm_inc_chunk_cv, t_fit, t_inference, acc_score, mem_size)) 106 | 107 | # In[ ]: 108 | -------------------------------------------------------------------------------- /examples/musical_note_prediction.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # # Prediction of musical notes 5 | # 6 | # ## Introduction 7 | # 8 | # This notebook adapts one reference experiment for note prediction using ESNs 9 | # from ([https://arxiv.org/abs/1812.11527](https://arxiv.org/abs/1812.11527)) 10 | # to PyRCN and shows that introducing bidirectional ESNs significantly 11 | # improves the results 12 | # in terms of Accuracy, already for rather small networks. 13 | # 14 | # The tutorial is based on numpy, scikit-learn, joblib and PyRCN. 15 | # We are using the ESNRegressor, because we further process the outputs of the 16 | # ESN. 17 | # Note that the same can also be done using the ESNClassifier. 18 | import numpy as np 19 | import os 20 | from joblib import load 21 | from sklearn.base import clone 22 | from sklearn.model_selection import (ParameterGrid, RandomizedSearchCV, 23 | GridSearchCV) 24 | from sklearn.metrics import make_scorer 25 | from sklearn.preprocessing import MultiLabelBinarizer 26 | 27 | from sklearn.utils.fixes import loguniform 28 | from scipy.stats import uniform 29 | 30 | from pyrcn.echo_state_network import ESNClassifier 31 | from pyrcn.model_selection import SequentialSearchCV 32 | from pyrcn.metrics import mean_squared_error, accuracy_score 33 | 34 | 35 | # ## Load the dataset, which is available at 36 | # http://www-etud.iro.umontreal.ca/~boulanni/icml2012. 37 | dataset_path = os.path.normpath(r"E:\MusicPrediction\Piano-midi.de.pickle") 38 | dataset = load(dataset_path) 39 | X_train = np.empty(shape=(len(dataset['train']) + len(dataset['valid']),), 40 | dtype=object) 41 | y_train = np.empty(shape=(len(dataset['train']) + len(dataset['valid']),), 42 | dtype=object) 43 | 44 | X_test = np.empty(shape=(len(dataset['test']),), dtype=object) 45 | y_test = np.empty(shape=(len(dataset['test']),), dtype=object) 46 | print("Number of sequences in the training and test set: {0}, {1}" 47 | .format(len(X_train), len(X_test))) 48 | 49 | # Prepare the dataset 50 | # 51 | # We use the MultiLabelBinarizer to transform the sequences of MIDI pitches 52 | # into one-hot encoded vectors. The piano is restricted to 88 keys. 53 | mlb = MultiLabelBinarizer(classes=range(128)) 54 | for k, X in enumerate(dataset['train'] + dataset['valid']): 55 | X_train[k] = mlb.fit_transform(X[:-1]) 56 | y_train[k] = mlb.fit_transform(X[1:]) 57 | for k, X in enumerate(dataset['test']): 58 | X_test[k] = mlb.fit_transform(X[:-1]) 59 | y_test[k] = mlb.fit_transform(X[1:]) 60 | print("Shape of first sequences in the training and test set: {0}, {1}" 61 | .format(X_train[0].shape, X_test[0].shape)) 62 | 63 | # ## Set up a basic ESN 64 | # 65 | # To develop an ESN model, we need to tune several hyper-parameters, 66 | # e.g., input_scaling, spectral_radius, bias_scaling and leaky integration. 67 | # 68 | # We follow the way proposed in the introductory paper of PyRCN 69 | # to optimize hyper-parameters sequentially. 70 | initially_fixed_params = { 71 | 'hidden_layer_size': 50, 72 | 'input_activation': 'identity', 73 | 'k_in': 10, 74 | 'input_scaling': 0.4, 75 | 'bias_scaling': 0.0, 76 | 'spectral_radius': 0.0, 77 | 'reservoir_activation': 'tanh', 78 | 'leakage': 1.0, 79 | 'bidirectional': False, 80 | 'k_rec': 10, 81 | 'alpha': 1e-3, 82 | 'random_state': 42 83 | } 84 | 85 | step1_esn_params = { 86 | 'input_scaling': uniform(loc=1e-2, scale=1), 87 | 'spectral_radius': uniform(loc=0, scale=2) 88 | } 89 | step2_esn_params = {'leakage': loguniform(1e-5, 1e0)} 90 | step3_esn_params = {'bias_scaling': np.linspace(0.0, 1.0, 11)} 91 | step4_esn_params = {'alpha': loguniform(1e-5, 1e1)} 92 | 93 | kwargs_step1 = { 94 | 'n_iter': 200, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 95 | 'scoring': make_scorer(mean_squared_error, greater_is_better=False, 96 | needs_proba=True) 97 | } 98 | kwargs_step2 = { 99 | 'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 100 | 'scoring': make_scorer(mean_squared_error, greater_is_better=False, 101 | needs_proba=True) 102 | } 103 | kwargs_step3 = { 104 | 'verbose': 1, 'n_jobs': -1, 105 | 'scoring': make_scorer(mean_squared_error, greater_is_better=False, 106 | needs_proba=True) 107 | } 108 | kwargs_step4 = { 109 | 'n_iter': 50, 'random_state': 42, 'verbose': 1, 'n_jobs': -1, 110 | 'scoring': make_scorer(mean_squared_error, greater_is_better=False, 111 | needs_proba=True) 112 | } 113 | 114 | searches = [('step1', RandomizedSearchCV, step1_esn_params, kwargs_step1), 115 | ('step2', RandomizedSearchCV, step2_esn_params, kwargs_step2), 116 | ('step3', GridSearchCV, step3_esn_params, kwargs_step3), 117 | ('step4', RandomizedSearchCV, step4_esn_params, kwargs_step4)] 118 | 119 | base_esn = ESNClassifier(**initially_fixed_params) 120 | sequential_search = \ 121 | SequentialSearchCV(base_esn, searches=searches).fit(X_train, y_train) 122 | 123 | # ## Test the ESN 124 | # 125 | # In the test case, we train the ESN using the entire training set as seen 126 | # before. Next, we compute the predicted outputs on the training and test set 127 | # and fix a threshold of 0.5, above a note is assumed to be predicted. 128 | # 129 | # We report the accuracy score for each frame in order to follow the reference 130 | # paper. 131 | param_grid = {'hidden_layer_size': [500, 1000, 2000, 4000, 5000]} 132 | base_esn = sequential_search.best_estimator_ 133 | 134 | for params in ParameterGrid(param_grid): 135 | print(params) 136 | esn = clone(base_esn).set_params(**params) 137 | esn.fit(X_train, y_train) 138 | training_score = accuracy_score(y_train, esn.predict(X_train)) 139 | test_score = accuracy_score(y_test, esn.predict(X_test)) 140 | print('{0}\t{1}'.format(training_score, test_score)) 141 | -------------------------------------------------------------------------------- /examples/setup_local.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# setup_local.py\n", 8 | "\n", 9 | "In this notebook, we will show how to use a virtual environment on ipython and jupyter notebooks and we will load a local directory library into our code.\n", 10 | "\n", 11 | "## Why use a virtual environment?\n", 12 | "\n", 13 | "Python is a very mighty programming language. The major strength of python are its partly user maintained, intuitively usable and vast libraries. Before you import them into your code, you have to install these libraries and their dependencies on your interpreter first. If you are using `pip` to install them to your interpreter, soon it will be clogged by loads of libraries and dependencies which demand to be maintained und kept up to date. Therefore and for many other reasons, we strictly recommend to use virtual environments with specified libraries bundeled with your project. There are mainly three steps:\n", 14 | "\n", 15 | " 1. Setup a virtual environment\n", 16 | " 2. Setup the new virtual environment for ipython and add the kernel\n", 17 | " 3. Start a jupyter notebook and select the custom kernel\n", 18 | "\n", 19 | "As there is already a [useful documentation](https://anbasile.github.io/posts/2017-06-25-jupyter-venv/) about how you might do this, we will skip to importing a local library.\n", 20 | "\n", 21 | "## Import local library\n", 22 | "\n", 23 | "For using local code with jupyter notebooks, you need to add the local directory to the include paths of python. First, we will have a look, what the current import path looks like." 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "import os, sys\n", 33 | "\n", 34 | "print(sys.path)" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "If we have set up the venv correctly, our venv path is now present in the `sys.path` variable. Now let's add our module path, which is one directory above the current working directory" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "import os, sys\n", 51 | "cwd = os.getcwd()\n", 52 | "module_path = os.path.dirname(cwd) # target working directory\n", 53 | "\n", 54 | "sys.path = [item for item in sys.path if item != module_path] # remove module_path from sys.path\n", 55 | "sys.path.append(module_path) # add module_path to sys.path\n", 56 | "\n", 57 | "print(sys.path)" 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "metadata": {}, 63 | "source": [ 64 | "Now we have added the module path to the search paths for our environment and we can start importing our local module." 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": null, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "from pyrcn.extreme_learning_machine import ELMClassifier" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": {}, 79 | "source": [ 80 | "Now it is time for a minimal working example." 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "from sklearn.datasets import load_iris, load_digits\n", 90 | "from sklearn.preprocessing import LabelBinarizer\n", 91 | "from sklearn.model_selection import train_test_split\n", 92 | "\n", 93 | "from pyrcn.extreme_learning_machine import ELMRegressor\n", 94 | "\n", 95 | "\n", 96 | "def test_iris():\n", 97 | " X, y = load_iris(return_X_y=True)\n", 98 | " X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05)\n", 99 | " lb = LabelBinarizer().fit(y)\n", 100 | " y_train_numeric = lb.transform(y_train)\n", 101 | " classifier = ELMClassifier(hidden_layer_size=10)\n", 102 | " classifier.fit(X_train, y_train_numeric)\n", 103 | " y_predicted_numeric = classifier.predict(X_test)\n", 104 | " y_predicted = lb.inverse_transform(y_predicted_numeric)\n", 105 | "\n", 106 | " for record in range(len(y_test)):\n", 107 | " print('predicted: {0} \\ttrue: {1}'.format(y_predicted[record], y_test[record]))\n", 108 | " \n", 109 | "\n", 110 | "test_iris()" 111 | ] 112 | } 113 | ], 114 | "metadata": { 115 | "kernelspec": { 116 | "display_name": "Python 3 (ipykernel)", 117 | "language": "python", 118 | "name": "python3" 119 | }, 120 | "language_info": { 121 | "codemirror_mode": { 122 | "name": "ipython", 123 | "version": 3 124 | }, 125 | "file_extension": ".py", 126 | "mimetype": "text/x-python", 127 | "name": "python", 128 | "nbconvert_exporter": "python", 129 | "pygments_lexer": "ipython3", 130 | "version": "3.8.6" 131 | } 132 | }, 133 | "nbformat": 4, 134 | "nbformat_minor": 4 135 | } 136 | -------------------------------------------------------------------------------- /examples/whitening.py: -------------------------------------------------------------------------------- 1 | #!/bin/python3 2 | 3 | import os 4 | 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | 8 | from sklearn.datasets import fetch_openml 9 | 10 | 11 | def image_whitening_cifar(): 12 | directory = os.path.abspath('./examples/image/') 13 | data_name = 'CIFAR_10_small' 14 | 15 | npzfilepath = os.path.join(directory, '{0}.npz'.format(data_name)) 16 | data_id = 40926 17 | 18 | if os.path.isfile(npzfilepath): 19 | npzfile = np.load(npzfilepath, allow_pickle=True) 20 | X, y = npzfile['X'], npzfile['y'] 21 | else: 22 | df = fetch_openml(data_id=data_id, as_frame=True) 23 | X, y = df.data, df.target 24 | np.savez(npzfilepath, X=X, y=y) 25 | 26 | print('{0} samples loaded'.format(X.shape[0])) 27 | 28 | def scale(X, scaling_range=(0., 1.)): 29 | return (X - np.min(X)) / (np.max(X) - np.min(X)) 30 | 31 | X /= 255. 32 | 33 | cov = np.cov(X.T) 34 | u, s, v = np.linalg.svd(cov) 35 | print(np.allclose(u.T, v)) 36 | W = np.matmul( 37 | u, np.matmul(np.diag(np.sqrt(np.reciprocal(s, where=s != 0.))), u.T)) 38 | X_pca = np.matmul(W, X.T).T 39 | 40 | sample_image_idx = 195 41 | sample_image = np.transpose( 42 | X[sample_image_idx, :].reshape((3, 32, 32)), axes=(1, 2, 0)) 43 | sample_image_pca = np.transpose( 44 | X_pca[sample_image_idx, :].reshape((3, 32, 32)), axes=(1, 2, 0)) 45 | 46 | list_dict_filter = [ 47 | { 48 | 'offset': 0, 49 | 'color': 'r' 50 | }, 51 | { 52 | 'offset': 32 * 32, 53 | 'color': 'g' 54 | }, 55 | { 56 | 'offset': 32 * 32 * 2, 57 | 'color': 'b' 58 | } 59 | ] 60 | 61 | for dict_filter in list_dict_filter: 62 | sample_filter_idx = 32 * 16 + 16 + dict_filter['offset'] 63 | sample_filter = np.transpose( 64 | W[:, sample_filter_idx].reshape((3, 32, 32)), axes=(1, 2, 0)) 65 | plt.imsave(os.path.join(directory, 'cifar-filter-{0}-{1}x{2}.png' 66 | .format(dict_filter['color'], 67 | (sample_filter_idx % (32 ** 2)) // 32, 68 | (sample_filter_idx % (32 ** 2)) % 32)), 69 | scale(sample_filter)) 70 | 71 | plt.imsave(os.path.join(directory, 'cifar-orignal.png'), sample_image) 72 | plt.imsave(os.path.join( 73 | directory, 'cifar-whitened.png'), scale(sample_image_pca)) 74 | 75 | 76 | def image_whitening_mnist(): 77 | directory = os.path.abspath('./examples/image/') 78 | data_name = 'mnist_784' 79 | 80 | npzfilepath = os.path.join(directory, '{0}.npz'.format(data_name)) 81 | data_id = 554 82 | 83 | if os.path.isfile(npzfilepath): 84 | npzfile = np.load(npzfilepath, allow_pickle=True) 85 | X, y = npzfile['X'], npzfile['y'] 86 | else: 87 | df = fetch_openml(data_id=data_id, as_frame=True) 88 | X, y = df.data, df.target 89 | np.savez(npzfilepath, X=X, y=y) 90 | 91 | print('{0} samples loaded'.format(X.shape[0])) 92 | 93 | X /= 255. 94 | 95 | cov = np.cov(X.T) 96 | u, s, v = np.linalg.svd(cov) 97 | print(np.allclose(u.T, v)) 98 | W = np.matmul( 99 | u, np.matmul(np.diag(np.sqrt(np.reciprocal(s, where=s != 0.))), u.T)) 100 | 101 | X_pca = np.matmul(W, X.T).T 102 | 103 | sample_image_idx = 5 104 | sample_image = X[sample_image_idx, :].reshape((28, 28)) 105 | sample_image_pca = X_pca[sample_image_idx, :].reshape((28, 28)) 106 | 107 | sample_filter_idx = 28 * 14 + 14 108 | sample_filter = W[:, sample_filter_idx].reshape((28, 28)) 109 | 110 | plt.imsave(os.path.join(directory, 'mnist-original.png'), sample_image, 111 | cmap=plt.cm.gray_r) 112 | plt.imsave(os.path.join(directory, 'mnist-whitened.png'), sample_image_pca, 113 | cmap=plt.cm.gray_r) 114 | plt.imsave(os.path.join(directory, 'mnist-filter-{0}x{1}.png' 115 | .format(sample_filter_idx // 28, 116 | sample_filter_idx % 28)), 117 | sample_filter, cmap=plt.cm.gray_r) 118 | 119 | 120 | def main(): 121 | image_whitening_cifar() 122 | # image_whitening_mnist() 123 | return 124 | 125 | 126 | if __name__ == '__main__': 127 | main() 128 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] # global options 2 | # add this to find packages without an __init__.py file 3 | namespace_packages = True 4 | strict_optional = True 5 | no_implicit_optional = True 6 | disallow_untyped_defs = True 7 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=61.0", 4 | "wheel"] 5 | build-backend = "setuptools.build_meta" 6 | 7 | [project] 8 | name = "PyRCN" 9 | version = "0.0.18" 10 | authors = [ 11 | { name="Peter Steiner", email="peter.steiner@pyrcn.net" }, 12 | ] 13 | readme = "README.md" 14 | requires-python = ">=3.8" 15 | classifiers = [ 16 | "Development Status :: 2 - Pre-Alpha", 17 | "Intended Audience :: Science/Research", 18 | "Programming Language :: Python :: 3", 19 | "License :: OSI Approved :: MIT License", 20 | "Operating System :: OS Independent", 21 | ] 22 | dependencies = [ 23 | "torch", 24 | "torchvision", 25 | "torchaudio", 26 | "scikit-learn", 27 | "pandas", 28 | ] 29 | 30 | [tool.pytest.ini_options] 31 | minversion = "6.0" 32 | addopts = "-ra -q" 33 | testpaths = [ 34 | "tests", 35 | ] 36 | 37 | [tool.setuptools.packages.find] 38 | where = ["src"] 39 | 40 | [project.urls] 41 | Homepage = "https://pyrcn.net/" 42 | Documentation = "https://pyrcn.readthedocs.io/" 43 | Source = "https://github.com/TUD-STKS/PyRCN/" 44 | Issues = "https://github.com/TUD-STKS/PyRCN/issues" 45 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | # pytest.ini 2 | [pytest] 3 | minversion = 6.5.0 4 | addopts = 5 | -ra -q -v 6 | --doctest-modules 7 | --junitxml=junit/test-results.xml 8 | --cov=src/pyrcn 9 | --cov-branch 10 | --cov-report=xml 11 | --cov-report=html 12 | --cov-report=term-missing 13 | # --pep257 14 | # --flake8 15 | --mypy 16 | 17 | testpaths = 18 | src/pyrcn 19 | docs 20 | tests 21 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | setuptools>=46.4.0 2 | scikit-learn>=1.4 3 | numpy>=1.18.1 4 | scipy>=1.4.0 5 | joblib>=0.13.2 6 | pandas>=1.0.0 7 | typing-extensions 8 | requests[matplotlib] 9 | requests[seaborn] 10 | requests[ipywidgets] 11 | requests[ipympl] 12 | requests[tqdm] 13 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = PyRCN 3 | version = 0.0.18 4 | author = Peter Steiner 5 | author_email = peter.steiner@pyrcn.net 6 | description = A scikit-learn-compatible framework for Reservoir Computing in Python 7 | long_description = file: README.md 8 | long_description_content_type = text/markdown 9 | url = https://github.com/TUD-STKS/PyRCN 10 | project_urls = 11 | Documentation = https://pyrcn.readthedocs.io/ 12 | Funding = https://pyrcn.net/ 13 | Source = https://github.com/TUD-STKS/PyRCN/ 14 | Bug Tracker = https://github.com/TUD-STKS/PyRCN/issues 15 | classifiers = 16 | Programming Language :: Python :: 3 17 | Development Status :: 2 - Pre-Alpha 18 | License :: OSI Approved :: BSD License 19 | Operating System :: OS Independent 20 | Intended Audience :: Science/Research 21 | 22 | [options] 23 | package_dir = 24 | = src 25 | packages = find: 26 | python_requires = >=3.9 27 | 28 | [options.packages.find] 29 | where = src 30 | 31 | [tool:pytest] 32 | testpaths = tests 33 | -------------------------------------------------------------------------------- /src/pyrcn/__init__.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn` module includes various reservoir computing algorithms.""" 2 | 3 | # Authors: Peter Steiner , 4 | # License: BSD 3 clause 5 | from ._version import __version__ 6 | 7 | from . import (base, echo_state_network, extreme_learning_machine, 8 | linear_model, model_selection, nn, postprocessing, 9 | preprocessing, projection, util) 10 | 11 | 12 | __all__ = ('__version__', 13 | 'base', 14 | 'echo_state_network', 15 | 'extreme_learning_machine', 16 | 'linear_model', 17 | 'model_selection', 18 | 'nn', 19 | 'postprocessing', 20 | 'preprocessing', 21 | 'projection', 22 | 'util') 23 | -------------------------------------------------------------------------------- /src/pyrcn/_version.py: -------------------------------------------------------------------------------- 1 | """Version indicator for PyRCN.""" 2 | __version__ = "0.0.17post1" 3 | -------------------------------------------------------------------------------- /src/pyrcn/base/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The :mod:`pyrcn.base` base functionalities for PyRCN. 3 | 4 | It contains activation functions and simple object-oriented implementations 5 | of the building blocks for Reservoir Computing Networks [#]_. 6 | 7 | References 8 | ---------- 9 | .. [#] P. Steiner et al., ‘PyRCN: A Toolbox for Exploration and Application 10 | of Reservoir Computing Networks’, under review. 11 | """ 12 | 13 | # Authors: Peter Steiner , 14 | # License: BSD 3 clause 15 | 16 | from ._activations import (ACTIVATIONS, ACTIVATIONS_INVERSE, 17 | ACTIVATIONS_INVERSE_BOUNDS) 18 | from ._base import ( 19 | _uniform_random_input_weights, _uniform_random_weights, 20 | _normal_random_weights, _make_sparse, _antisymmetric_weights, 21 | _unitary_spectral_radius, _uniform_random_bias, 22 | _normal_random_recurrent_weights, _uniform_random_recurrent_weights) 23 | 24 | __all__ = ( 25 | 'ACTIVATIONS', 'ACTIVATIONS_INVERSE', 'ACTIVATIONS_INVERSE_BOUNDS', 26 | '_uniform_random_input_weights', '_uniform_random_weights', 27 | '_normal_random_weights', '_make_sparse', '_antisymmetric_weights', 28 | '_unitary_spectral_radius', '_uniform_random_bias', 29 | '_normal_random_recurrent_weights', '_uniform_random_recurrent_weights',) 30 | -------------------------------------------------------------------------------- /src/pyrcn/base/_activations.py: -------------------------------------------------------------------------------- 1 | """The :mod:`activations` contains various activation functions for PyRCN.""" 2 | 3 | # Authors: Peter Steiner 4 | # License: BSD 3 clause 5 | 6 | import numpy as np 7 | from sklearn.neural_network._base import ACTIVATIONS 8 | from typing import Dict, Callable 9 | 10 | 11 | def inplace_softplus(X: np.ndarray) -> None: 12 | """ 13 | Compute the softplux activation function inplace: 14 | .. math:: 15 | f(x) = \\mathrm{ln}(1 + e^{x}) 16 | 17 | Parameters 18 | ---------- 19 | X : numpy.ndarray 20 | 21 | beta : float, default=1 22 | Scaling factor 23 | """ 24 | np.log(1 + np.exp(X, out=X), out=X) 25 | 26 | 27 | def inplace_softmax(X: np.ndarray, beta: float = 1) -> None: 28 | """ 29 | Compute the softmax activation function inplace: 30 | .. math:: 31 | y_k = \\frac{e^{x_k}}{\\sum_{i=1}^{n} e^{x_i}} 32 | 33 | Parameters 34 | ---------- 35 | X : numpy.ndarray 36 | 37 | beta : float, default=1 38 | Scaling factor 39 | """ 40 | denominator = np.sum(np.exp(beta*X)) 41 | np.divide(np.exp(beta*X, out=X), denominator, out=X) 42 | 43 | 44 | def inplace_bounded_relu(X: np.ndarray) -> None: 45 | """ 46 | Compute the bounded rectified linear unit function inplace. 47 | 48 | Parameters 49 | ---------- 50 | X : ndarray 51 | The input data. 52 | """ 53 | np.minimum(np.maximum(X, 0, out=X), 1, out=X) 54 | 55 | 56 | def inplace_tanh_inverse(X: np.ndarray) -> None: 57 | """ 58 | Compute the tanh inverse function inplace. 59 | 60 | Parameters 61 | ---------- 62 | X : ndarray 63 | The input data. 64 | """ 65 | np.arctanh(X, out=X) 66 | 67 | 68 | def inplace_identity_inverse(X: np.ndarray) -> None: 69 | """ 70 | Compute the identity inverse function inplace. 71 | 72 | Parameters 73 | ---------- 74 | X : ndarray 75 | The input data. 76 | """ 77 | ACTIVATIONS['identity'](X) 78 | 79 | 80 | def inplace_logistic_inverse(X: np.ndarray) -> None: 81 | """ 82 | Compute the logistic inverse function inplace. 83 | 84 | Parameters 85 | ---------- 86 | X : ndarray 87 | The input data. 88 | """ 89 | np.negative(np.log(1 - X, out=X), out=X) 90 | 91 | 92 | def inplace_relu_inverse(X: np.ndarray) -> None: 93 | r""" 94 | Compute the relu inverse function inplace. 95 | 96 | The relu function is not invertible! 97 | This is an approximation assuming $x = f^{-1}(y=0) = 0$. 98 | It is valid in $x \in [0, \infty]$. 99 | 100 | Parameters 101 | ---------- 102 | X : ndarray 103 | The input data. 104 | """ 105 | ACTIVATIONS['relu'](X) 106 | 107 | 108 | def inplace_bounded_relu_inverse(X: np.ndarray) -> None: 109 | r""" 110 | Compute the bounded relu inverse function inplace. 111 | 112 | The bounded relu function is not invertible! 113 | This is an approximation assuming 114 | $x = f^{-1}(y=0) = 0$ and $x = f^{-1}(y=1) = 1$. 115 | It is valid in $x \in [0, 1]$. 116 | 117 | Parameters 118 | ---------- 119 | X : ndarray 120 | The input data. 121 | """ 122 | ACTIVATIONS['bounded_relu'](X) 123 | 124 | 125 | ACTIVATIONS.update({'bounded_relu': inplace_bounded_relu, 126 | 'softmax': inplace_softmax, 127 | 'softplus': inplace_softplus}) 128 | 129 | ACTIVATIONS_INVERSE: Dict[str, Callable] = { 130 | 'tanh': inplace_tanh_inverse, 131 | 'identity': inplace_identity_inverse, 132 | 'logistic': inplace_logistic_inverse, 133 | 'relu': inplace_relu_inverse, 134 | 'bounded_relu': inplace_bounded_relu_inverse 135 | } 136 | 137 | ACTIVATIONS_INVERSE_BOUNDS: Dict[str, tuple] = { 138 | 'tanh': (-.99, .99), 139 | 'identity': (-np.inf, np.inf), 140 | 'logistic': (0.01, .99), 141 | 'relu': (0, np.inf), 142 | 'bounded_relu': (0, 1) 143 | } 144 | -------------------------------------------------------------------------------- /src/pyrcn/base/_base.py: -------------------------------------------------------------------------------- 1 | """The :mod:`autoencoder` contains base functionality for PyRCN.""" 2 | 3 | # Authors: Peter Steiner 4 | # License: BSD 3 clause 5 | 6 | import sys 7 | 8 | import numpy as np 9 | import scipy 10 | from scipy.sparse.linalg import eigs as eigens 11 | from scipy.sparse.linalg import ArpackNoConvergence 12 | 13 | if sys.version_info >= (3, 8): 14 | from typing import Union 15 | else: 16 | from typing import Union 17 | 18 | 19 | def _antisymmetric_weights( 20 | weights: Union[np.ndarray, scipy.sparse.csr_matrix]) \ 21 | -> Union[np.ndarray, scipy.sparse.csr_matrix]: 22 | """ 23 | Transform a given weight matrix to get antisymmetric, e.g., compute 24 | weights - weights.T 25 | 26 | Parameters 27 | ---------- 28 | weights : Union[np.ndarray, scipy.sparse.csr_matrix], 29 | shape=(hidden_layer_size, hidden_layer_size) 30 | The given square matrix with weight values. 31 | 32 | Returns 33 | ------- 34 | antisymmetric_weights : Union[np.ndarray,scipy.sparse.csr_matrix], 35 | shape=(hidden_layer_size, hidden_layer_size) 36 | The antisymmetric weight matrix. 37 | """ 38 | return weights - weights.transpose() 39 | 40 | 41 | def _unitary_spectral_radius( 42 | weights: Union[np.ndarray, scipy.sparse.csr_matrix], 43 | random_state: np.random.RandomState) \ 44 | -> Union[np.ndarray, scipy.sparse.csr_matrix]: 45 | """ 46 | Normalize a given weight matrix to the unitary spectral radius, e.g., 47 | the maximum absolute eigenvalue. 48 | 49 | Parameters 50 | ---------- 51 | weights : Union[np.ndarray, scipy.sparse.csr_matrix], 52 | shape=(hidden_layer_size, hidden_layer_size) 53 | The given square matrix with weight values. 54 | random_state : numpy.random.RandomState 55 | 56 | Returns 57 | ------- 58 | weights / np.amax(np.abs(eigenvalues)) : 59 | Union[np.ndarray,scipy.sparse.csr_matrix], 60 | shape=(hidden_layer_size, hidden_layer_size) 61 | The weight matrix, divided by its maximum eigenvalue. 62 | """ 63 | try: 64 | we = eigens( 65 | weights, k=np.minimum(10, weights.shape[0] - 2), which='LM', 66 | v0=random_state.normal(loc=0., scale=1., size=weights.shape[0]), 67 | return_eigenvectors=False) 68 | except ArpackNoConvergence as e: 69 | print("WARNING: No convergence! Returning possibly invalid values!!!") 70 | we = e.eigenvalues 71 | return weights / np.amax(np.abs(we)) 72 | 73 | 74 | def _make_sparse(k_in: int, dense_weights: np.ndarray, 75 | random_state: np.random.RandomState) \ 76 | -> scipy.sparse.csr_matrix: 77 | """ 78 | Make a dense weight matrix sparse. 79 | 80 | Parameters 81 | ---------- 82 | k_in : int 83 | Determines how many inputs are mapped to one neuron. 84 | dense_weights : np.ndarray, shape=(n_inputs, n_outputs) 85 | The randomly initialized layer weights. 86 | random_state : numpy.random.RandomState 87 | 88 | Returns 89 | ------- 90 | sparse_weights : scipy.sparse.csr_matrix 91 | The sparse layer weights 92 | """ 93 | n_inputs, n_outputs = dense_weights.shape 94 | 95 | for neuron in range(n_outputs): 96 | all_indices = np.arange(n_inputs) 97 | keep_indices = np.random.choice(n_inputs, k_in, replace=False) 98 | zero_indices = np.setdiff1d(all_indices, keep_indices) 99 | dense_weights[zero_indices, neuron] = 0 100 | 101 | return scipy.sparse.csr_matrix(dense_weights, dtype='float64') 102 | 103 | 104 | def _normal_random_weights( 105 | n_inputs: int, n_outputs: int, k_in: int, 106 | random_state: np.random.RandomState) \ 107 | -> Union[np.ndarray, scipy.sparse.csr_matrix]: 108 | """ 109 | Sparse or dense normal random weights. 110 | 111 | Parameters 112 | ---------- 113 | n_inputs : int 114 | Number of inputs to the layer (e.g., n_features). 115 | n_outputs : int 116 | Number of outputs of the layer (e.g., hidden_layer_size) 117 | k_in : int 118 | Determines how many inputs are mapped to one neuron. 119 | random_state : numpy.random.RandomState 120 | 121 | Returns 122 | ------- 123 | normal_random_weights : Union[np.ndarray,scipy.sparse.csr_matrix], 124 | shape = (n_inputs, n_outputs) 125 | The randomly initialized layer weights. 126 | """ 127 | dense_weights = random_state.normal( 128 | loc=0., scale=1., size=(n_inputs, n_outputs)) 129 | 130 | if k_in < n_outputs: 131 | return _make_sparse(k_in, dense_weights, random_state) 132 | else: 133 | return dense_weights 134 | 135 | 136 | def _uniform_random_weights( 137 | n_inputs: int, n_outputs: int, k_in: int, 138 | random_state: np.random.RandomState) \ 139 | -> Union[np.ndarray, scipy.sparse.csr_matrix]: 140 | """ 141 | Sparse or dense uniform random weights in range [-1, 1]. 142 | 143 | Parameters 144 | ---------- 145 | n_inputs : int 146 | Number of inputs to the layer (e.g., n_features). 147 | n_outputs : int 148 | Number of outputs of the layer (e.g., hidden_layer_size) 149 | k_in : int 150 | Determines how many inputs are mapped to one neuron. 151 | random_state : numpy.random.RandomState 152 | 153 | Returns 154 | ------- 155 | uniform_random_weights : Union[np.ndarray,scipy.sparse.csr_matrix], 156 | shape = (n_inputs, n_outputs) 157 | The randomly initialized layer weights. 158 | """ 159 | dense_weights = random_state.uniform( 160 | low=-1., high=1., size=(n_inputs, n_outputs)) 161 | 162 | if k_in < n_outputs: 163 | return _make_sparse(k_in, dense_weights, random_state) 164 | else: 165 | return dense_weights 166 | 167 | 168 | def _uniform_random_input_weights( 169 | n_features_in: int, hidden_layer_size: int, fan_in: int, 170 | random_state: np.random.RandomState) \ 171 | -> Union[np.ndarray, scipy.sparse.csr_matrix]: 172 | """ 173 | Return uniform random input weights in range [-1, 1]. 174 | 175 | Parameters 176 | ---------- 177 | n_features_in : int 178 | hidden_layer_size : int 179 | fan_in : int 180 | Determines how many features are mapped to one neuron. 181 | random_state : numpy.random.RandomState 182 | 183 | Returns 184 | ------- 185 | uniform_random_input_weights : Union[np.ndarray, 186 | scipy.sparse.csr_matrix], shape = (n_features, hidden_layer_size) 187 | The randomly initialized input weights. 188 | """ 189 | input_weights = _uniform_random_weights( 190 | n_inputs=n_features_in, n_outputs=hidden_layer_size, k_in=fan_in, 191 | random_state=random_state) 192 | return input_weights 193 | 194 | 195 | def _uniform_random_bias( 196 | hidden_layer_size: int, random_state: np.random.RandomState) \ 197 | -> np.ndarray: 198 | """ 199 | Return uniform random bias in range [-1, 1]. 200 | 201 | Parameters 202 | ---------- 203 | hidden_layer_size : int 204 | random_state : numpy.random.RandomState 205 | 206 | Returns 207 | ------- 208 | uniform_random_bias : ndarray of shape (hidden_layer_size, ) 209 | """ 210 | bias_weights = _uniform_random_weights( 211 | n_inputs=hidden_layer_size, n_outputs=1, k_in=hidden_layer_size, 212 | random_state=random_state) 213 | return bias_weights 214 | 215 | 216 | def _uniform_random_recurrent_weights( 217 | hidden_layer_size: int, fan_in: int, 218 | random_state: np.random.RandomState) \ 219 | -> Union[np.ndarray, scipy.sparse.csr_matrix]: 220 | """ 221 | Return uniformly distributed random reservoir weights. 222 | 223 | Parameters 224 | ---------- 225 | hidden_layer_size : Union[int, np.integer] 226 | fan_in : Union[int, np.integer] 227 | Determines how many features are mapped to one neuron. 228 | random_state : numpy.random.RandomState 229 | 230 | Returns 231 | ------- 232 | uniform_random_recurrent_weights : Union[np.ndarray, 233 | scipy.sparse.csr_matrix], shape=(hidden_layer_size, hidden_layer_size) 234 | """ 235 | recurrent_weights = _uniform_random_weights( 236 | n_inputs=hidden_layer_size, n_outputs=hidden_layer_size, k_in=fan_in, 237 | random_state=random_state) 238 | return _antisymmetric_weights(recurrent_weights) 239 | 240 | 241 | def _normal_random_recurrent_weights( 242 | hidden_layer_size: int, fan_in: int, 243 | random_state: np.random.RandomState) \ 244 | -> Union[np.ndarray, scipy.sparse.csr_matrix]: 245 | """ 246 | Return normally distributed random reservoir weights. 247 | 248 | Parameters 249 | ---------- 250 | hidden_layer_size : Union[int, np.integer] 251 | fan_in : Union[int, np.integer] 252 | Determines how many features are mapped to one neuron. 253 | random_state : numpy.random.RandomState 254 | 255 | Returns 256 | ------- 257 | normal_random_recurrent_weights : Union[np.ndarray, 258 | scipy.sparse.csr_matrix], shape=(hidden_layer_size, hidden_layer_size) 259 | """ 260 | recurrent_weights = _normal_random_weights( 261 | n_inputs=hidden_layer_size, n_outputs=hidden_layer_size, k_in=fan_in, 262 | random_state=random_state) 263 | return _unitary_spectral_radius( 264 | weights=recurrent_weights, random_state=random_state) 265 | -------------------------------------------------------------------------------- /src/pyrcn/base/blocks/__init__.py: -------------------------------------------------------------------------------- 1 | """ The :mod:`autoencoder` contains building blocks for Reservoir Computing.""" 2 | 3 | # Authors: Peter Steiner 4 | # License: BSD 3 clause 5 | 6 | from ._input_to_node import ( 7 | InputToNode, PredefinedWeightsInputToNode, BatchIntrinsicPlasticity) 8 | from ._node_to_node import ( 9 | NodeToNode, EulerNodeToNode, PredefinedWeightsNodeToNode, 10 | HebbianNodeToNode) 11 | 12 | 13 | __all__ = ( 14 | 'InputToNode', 'PredefinedWeightsInputToNode', 'BatchIntrinsicPlasticity', 15 | 'NodeToNode', 'EulerNodeToNode', 'PredefinedWeightsNodeToNode', 16 | 'HebbianNodeToNode') 17 | -------------------------------------------------------------------------------- /src/pyrcn/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn.datasets` includes datasets for reference experiments.""" 2 | 3 | # Authors: Peter Steiner 4 | # License: BSD 3 clause 5 | 6 | from ._base import mackey_glass, lorenz, load_digits 7 | 8 | 9 | __all__ = 'mackey_glass', 'lorenz', 'load_digits' 10 | -------------------------------------------------------------------------------- /src/pyrcn/echo_state_network/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The :mod:`pyrcn.echo_state_network`. 3 | 4 | It contains a simple object-oriented implementation of Echo State Networks 5 | [#]_ [#]_. 6 | 7 | Separate implementations of Classifiers and Regressors as specified by 8 | scikit-learn 9 | 10 | References 11 | ---------- 12 | .. [#] H. Jaeger, ‘The “echo state” approach to analysing 13 | and training recurrent neural networks – with an 14 | Erratum note’, p. 48. 15 | .. [#] M. Lukoševičius, ‘A Practical Guide to Applying Echo 16 | State Networks’, Jan. 2012, doi: 10.1007/978-3-642-35289-8_36. 17 | """ 18 | 19 | # Authors: Peter Steiner 20 | # License: BSD 3 clause 21 | 22 | from ._esn import ESNClassifier, ESNRegressor 23 | 24 | __all__ = ('ESNClassifier', 'ESNRegressor', ) 25 | -------------------------------------------------------------------------------- /src/pyrcn/extreme_learning_machine/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The :mod:`pyrcn.extreme_learning_machine`. 3 | 4 | It contains a simple object-oriented implementation of 5 | Extreme Learning Machines [#]_. 6 | 7 | Separate implementations of Classifiers and Regressors as specified by 8 | scikit-learn. 9 | 10 | References 11 | ---------- 12 | .. [#] Guang-Bin Huang et al., ‘Extreme learning machine: Theory and 13 | applications’, p. 489-501, 2006, doi: 10.1016/j.neucom.2005.12.126. 14 | """ 15 | 16 | # Authors: Peter Steiner , 17 | # Michael Schindler 18 | # License: BSD 3 clause 19 | 20 | from ._elm import ELMClassifier, ELMRegressor 21 | 22 | __all__ = ('ELMClassifier', 'ELMRegressor') 23 | -------------------------------------------------------------------------------- /src/pyrcn/linear_model/__init__.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn.linear_model` module has incremental linear regression.""" 2 | 3 | # Authors: Peter Steiner 4 | # License: BSD 3 clause 5 | 6 | from ._incremental_regression import IncrementalRegression 7 | 8 | __all__ = ('IncrementalRegression',) 9 | -------------------------------------------------------------------------------- /src/pyrcn/metrics/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The :mod:`pyrcn.metrics` module includes score functions, performance metrics. 3 | 4 | Also, pairwise metrics and distance computations for sequence-to-sequence 5 | results. 6 | """ 7 | 8 | # Author: Peter Steiner 9 | # License: BSD 3 clause 10 | 11 | from ._classification import (accuracy_score, balanced_accuracy_score, 12 | classification_report, cohen_kappa_score, 13 | confusion_matrix, f1_score, fbeta_score, 14 | hamming_loss, hinge_loss, jaccard_score, 15 | log_loss, matthews_corrcoef, 16 | precision_recall_fscore_support, precision_score, 17 | recall_score, zero_one_loss, brier_score_loss, 18 | multilabel_confusion_matrix) 19 | from ..metrics._regression import (explained_variance_score, max_error, 20 | mean_absolute_error, mean_squared_error, 21 | mean_squared_log_error, 22 | median_absolute_error, 23 | mean_absolute_percentage_error, r2_score, 24 | mean_tweedie_deviance, 25 | mean_poisson_deviance, mean_gamma_deviance) 26 | 27 | 28 | __all__ = ('accuracy_score', 29 | 'balanced_accuracy_score', 30 | 'classification_report', 31 | 'cohen_kappa_score', 32 | 'confusion_matrix', 33 | 'f1_score', 34 | 'fbeta_score', 35 | 'hamming_loss', 36 | 'hinge_loss', 37 | 'jaccard_score', 38 | 'log_loss', 39 | 'matthews_corrcoef', 40 | 'precision_recall_fscore_support', 41 | 'precision_score', 42 | 'recall_score', 43 | 'zero_one_loss', 44 | 'brier_score_loss', 45 | 'multilabel_confusion_matrix', 46 | 'explained_variance_score', 47 | 'max_error', 48 | 'mean_absolute_error', 49 | 'mean_squared_error', 50 | 'mean_squared_log_error', 51 | 'median_absolute_error', 52 | 'mean_absolute_percentage_error', 53 | 'r2_score', 54 | 'mean_tweedie_deviance', 55 | 'mean_poisson_deviance', 56 | 'mean_gamma_deviance', 57 | ) 58 | -------------------------------------------------------------------------------- /src/pyrcn/model_selection/__init__.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn.model_selection` to sequentially tune hyper-parameters.""" 2 | 3 | # Authors: Peter Steiner and 4 | # Simon Stone 5 | # License: BSD 3 clause 6 | 7 | from ._search import SequentialSearchCV, SHGOSearchCV 8 | 9 | __all__ = ('SequentialSearchCV', 'SHGOSearchCV') 10 | -------------------------------------------------------------------------------- /src/pyrcn/nn/__init__.py: -------------------------------------------------------------------------------- 1 | """These are the basic building blocks for RCNs in PyTorch.""" 2 | from ._forward_layers import ELM, LeakyELM 3 | from ._recurrent_layers import ( 4 | ESNCell, IdentityESNCell, IdentityEuSNCell, ESN, DelayLineReservoirCell, 5 | DelayLineReservoirESN, DelayLineReservoirWithFeedbackCell, 6 | DelayLineReservoirWithFeedbackESN, SimpleCycleReservoirCell, 7 | SimpleCycleReservoirESN) 8 | 9 | 10 | __all__ = [ 11 | "ELM", "LeakyELM", "ESNCell", "ESN", "IdentityESNCell", "IdentityEuSNCell", 12 | "DelayLineReservoirCell", "DelayLineReservoirESN", 13 | "DelayLineReservoirWithFeedbackCell", "DelayLineReservoirWithFeedbackESN", 14 | "SimpleCycleReservoirCell", "SimpleCycleReservoirESN"] 15 | -------------------------------------------------------------------------------- /src/pyrcn/nn/_activations.py: -------------------------------------------------------------------------------- 1 | """The :mod:`activations` contains various activation functions for PyRCN.""" 2 | 3 | # Authors: Peter Steiner 4 | # License: BSD 3 clause 5 | 6 | from torch import nn 7 | 8 | 9 | ACTIVATIONS = { 10 | "elu": nn.ELU, 11 | "hard_shrink": nn.Hardshrink, 12 | "hard_sigmoid": nn.Hardsigmoid, 13 | "hard_tanh": nn.Hardtanh, 14 | "hard_swish": nn.Hardswish, 15 | "identity": nn.Identity, 16 | "leaky_relu": nn.LeakyReLU, 17 | "log_sigmoid": nn.LogSigmoid, 18 | "relu": nn.ReLU, 19 | "relu_6": nn.ReLU6, 20 | "selu": nn.SELU, 21 | "sigmoid": nn.Sigmoid, 22 | "silu": nn.SiLU, 23 | "mish": nn.Mish, 24 | "tanh": nn.Tanh, 25 | "tanh_shrink": nn.Tanhshrink, 26 | } 27 | -------------------------------------------------------------------------------- /src/pyrcn/nn/init.py: -------------------------------------------------------------------------------- 1 | """Custom weight initialization methods.""" 2 | import torch 3 | 4 | 5 | def antisymmetric_norm_(tensor: torch.Tensor) -> torch.Tensor: 6 | """ 7 | Normalize a given square matrix to be antisymmetric, i.e., by 8 | computing weight - weight.T . 9 | 10 | Parameters 11 | ---------- 12 | tensor : torch.Tensor, shape=(hidden_size, hidden_size) 13 | Tensor (at least 2D) to be normalized. 14 | 15 | Returns 16 | ------- 17 | tensor : torch.Tensor, shape=(hidden_size, hidden_size) 18 | The normalized tensor (at least 2D). 19 | """ 20 | with torch.no_grad(): 21 | tensor = tensor - tensor.T 22 | return tensor 23 | 24 | 25 | def diffusion_norm_(tensor: torch.Tensor, gamma: float) -> torch.Tensor: 26 | r""" 27 | Normalize a given square matrix to be antisymmetric, i.e., by 28 | computing weight - \gamma I . 29 | 30 | Parameters 31 | ---------- 32 | tensor : torch.Tensor, shape=(hidden_size, hidden_size) 33 | Tensor (at least 2D) to be normalized. 34 | 35 | Returns 36 | ------- 37 | tensor : torch.Tensor, shape=(hidden_size, hidden_size) 38 | The normalized tensor (at least 2D). 39 | """ 40 | with torch.no_grad(): 41 | tensor = tensor - gamma * torch.eye(tensor.shape[0]) 42 | return tensor 43 | 44 | 45 | def spectral_norm_(tensor: torch.Tensor) -> torch.Tensor: 46 | """ 47 | Normalize a given square matrix to a unit spectral radius, i.e., to a 48 | maximum absolute eigenvalue of 1. 49 | 50 | Parameters 51 | ---------- 52 | tensor : torch.Tensor, shape=(hidden_size, hidden_size) 53 | Tensor (at least 2D) to be normalized. 54 | 55 | Returns 56 | ------- 57 | tensor : torch.Tensor, shape=(hidden_size, hidden_size) 58 | The normalized tensor (at least 2D). 59 | """ 60 | eigvals = torch.linalg.eigvals(tensor) 61 | with torch.no_grad(): 62 | tensor /= eigvals.abs().max() 63 | return tensor 64 | 65 | 66 | def dlr_weights_(tensor: torch.Tensor, forward_weight: float = 0.9) -> \ 67 | torch.Tensor: 68 | r""" 69 | Fills the 2D input `Tensor` such that the non-zero elements will be a 70 | delay line, and each non-zero element has exactly the same weight value, 71 | as described in `Minimum Complexity Echo State Network` - Rodan, A. (2010). 72 | 73 | Parameters 74 | ---------- 75 | tensor : torch.Tensor 76 | An n-dimensional `torch.Tensor`. 77 | forward_weight : float, default = 0.9 78 | The non-zero weight that is placed in the lower subdiagonal of the 79 | tensor. 80 | 81 | Returns 82 | ------- 83 | tensor : torch.Tensor 84 | An n-dimensional `torch.Tensor`, in which the lower subdiagonal is 85 | filled with always the same value. 86 | 87 | Examples 88 | -------- 89 | >>> w = torch.empty(3, 5) 90 | >>> dlr_weights_(w, forward_weight=0.9) 91 | """ 92 | if tensor.ndimension() != 2: 93 | raise ValueError("Only tensors with 2 dimensions are supported") 94 | 95 | rows, cols = tensor.shape 96 | 97 | with torch.no_grad(): 98 | tensor.zero_() 99 | for col_idx in range(1, cols): 100 | tensor[col_idx, col_idx-1] = forward_weight 101 | return tensor 102 | 103 | 104 | def dlrb_weights_(tensor, forward_weight: float = 0.9, 105 | feedback_weight: float = 0.1) -> torch.Tensor: 106 | r""" 107 | Fills the 2D input `Tensor` such that the non-zero elements will be a 108 | delay line with feedback connections, and each non-zero element has 109 | exactly the same weight value, as described in 110 | `Minimum Complexity Echo State Network` - Rodan, A. (2010). 111 | 112 | Parameters 113 | ---------- 114 | tensor : torch.Tensor 115 | An n-dimensional `torch.Tensor`. 116 | forward_weight : float, default = 0.9 117 | The non-zero weight that is placed in the lower subdiagonal of the 118 | tensor. 119 | feedback_weight : float, default = 0.1 120 | The non-zero weight that is placed in the upper subdiagonal of the 121 | tensor. 122 | 123 | Returns 124 | ------- 125 | tensor : torch.Tensor 126 | An n-dimensional `torch.Tensor`, in which the lower and upper 127 | subdiagonals are filled with always the same values. 128 | 129 | Examples 130 | -------- 131 | >>> w = torch.empty(3, 5) 132 | >>> dlrb_weights_(w, forward_weight=0.9, feedback_weight=0.1) 133 | """ 134 | if tensor.ndimension() != 2: 135 | raise ValueError("Only tensors with 2 dimensions are supported") 136 | 137 | rows, cols = tensor.shape 138 | 139 | with torch.no_grad(): 140 | tensor.zero_() 141 | for col_idx in range(1, cols): 142 | tensor[col_idx, col_idx-1] = forward_weight 143 | tensor[col_idx-1, col_idx] = feedback_weight 144 | return tensor 145 | 146 | 147 | def scr_weights_(tensor, forward_weight: float = 0.9) -> torch.Tensor: 148 | r""" 149 | Fills the 2D input `Tensor` such that the non-zero elements will be a 150 | cycle, and each non-zero element has exactly the same weight value, as 151 | described in `Minimum Complexity Echo State Network` - Rodan, A. (2010). 152 | 153 | Parameters 154 | ---------- 155 | tensor : torch.Tensor 156 | An n-dimensional `torch.Tensor`. 157 | forward_weight : float, default = 0.9 158 | The non-zero weight that is placed in the lower subdiagonal of the 159 | tensor. 160 | 161 | Returns 162 | ------- 163 | tensor : torch.Tensor 164 | An n-dimensional `torch.Tensor`, in which the lower and upper 165 | subdiagonals are filled with always the same values. 166 | 167 | Examples 168 | -------- 169 | >>> w = torch.empty(3, 5) 170 | >>> scr_weights_(w, forward_weight=0.9) 171 | """ 172 | if tensor.ndimension() != 2: 173 | raise ValueError("Only tensors with 2 dimensions are supported") 174 | 175 | rows, cols = tensor.shape 176 | 177 | with torch.no_grad(): 178 | tensor.zero_() 179 | for col_idx in range(cols): 180 | tensor[col_idx, col_idx-1] = forward_weight 181 | return tensor 182 | 183 | 184 | def bernoulli_(tensor: torch.Tensor, p: float = .5, std: float = 1.) \ 185 | -> torch.Tensor: 186 | r""" 187 | Fills the 2D input `Tensor` as a sparse matrix, where the non-zero elements 188 | will be binary numbers (0 or 1) drawn from a Bernoulli distribution 189 | :math:`\text{Bernoulli}(\texttt{p})` and scaled, as described in 190 | `Minimum Complexity Echo State Network` - Rodan, A. (2010). 191 | 192 | Parameters 193 | ---------- 194 | tensor : torch.Tensor 195 | An n-dimensional `torch.Tensor`. 196 | p : float, default = 0.5 197 | Probability to be used for drawing the binary random number.The 198 | fraction of elements in each column to be set to zero 199 | std : float, defaul = 1. 200 | The scaling factor for the weight matrix. 201 | 202 | Examples 203 | -------- 204 | >>> w = torch.empty(3, 5) 205 | >>> bernoulli_(w, p=.5, std=1.) 206 | """ 207 | with torch.no_grad(): 208 | tensor.bernoulli_(p) 209 | tensor *= 2*std 210 | tensor -= std 211 | return tensor 212 | -------------------------------------------------------------------------------- /src/pyrcn/postprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn.postprocessing` module provides postprocessing methods.""" 2 | 3 | # Authors: Peter Steiner , 4 | # License: BSD 3 clause 5 | 6 | from ._normal_distribution import NormalDistribution 7 | 8 | 9 | __all__ = ('NormalDistribution',) 10 | -------------------------------------------------------------------------------- /src/pyrcn/postprocessing/_normal_distribution.py: -------------------------------------------------------------------------------- 1 | """The :mod:`normal_distribution` contains a class for NormalDistribution.""" 2 | 3 | # Authors: Peter Steiner , 4 | # License: BSD 3 clause 5 | 6 | from __future__ import annotations 7 | 8 | from typing import Union, Any 9 | import scipy 10 | import scipy.stats 11 | import numpy as np 12 | 13 | from sklearn.base import BaseEstimator, TransformerMixin 14 | 15 | 16 | class NormalDistribution(BaseEstimator, TransformerMixin): 17 | """ 18 | Transform an input distribution to a normal distribution. 19 | 20 | Parameters 21 | ---------- 22 | size : Union[int, np.integer], default=1 23 | Defining number of random variates 24 | """ 25 | 26 | def __init__(self, size: Union[int, np.integer] = 1): 27 | """Construct the NormalDistribution.""" 28 | self._transformer = scipy.stats.norm 29 | self._mean = 0 30 | self._std = 0 31 | self._size = size 32 | 33 | def fit(self, X: np.ndarray, y: None = None) -> NormalDistribution: 34 | """ 35 | Fit the NormalDistribution. 36 | 37 | Parameters 38 | ---------- 39 | X : np.ndarray of shape(n_samples, n_features) 40 | The input features 41 | y : None 42 | ignored 43 | 44 | Returns 45 | ------- 46 | self : returns a trained NormalDistribution. 47 | """ 48 | self._mean, self._std = self._transformer.fit(X=X, y=y) 49 | return self 50 | 51 | def transform(self, X: np.ndarray, y: None = None) -> np.ndarray: 52 | """ 53 | Transform the input matrix X. 54 | 55 | Parameters 56 | ---------- 57 | X : ndarray of size (n_samples, n_features) 58 | 59 | Returns 60 | ------- 61 | y: ndarray of size (n_samples, ) 62 | """ 63 | return self._transformer.rvs( 64 | loc=self._mean, scale=self._std, size=self._size) 65 | 66 | def fit_transform(self, X: np.ndarray, y: None = None, **fit_params: Any)\ 67 | -> np.ndarray: 68 | """ 69 | Fit the Estimator and transforms the input matrix X. 70 | 71 | Parameters 72 | ---------- 73 | X : ndarray of size (n_samples, n_features) 74 | y : None 75 | ignored 76 | fit_params : Union[Dict, None] 77 | ignored 78 | 79 | Returns 80 | ------- 81 | y: ndarray of size (n_samples, ) 82 | """ 83 | self.fit(X=X, y=y) 84 | return self.transform(X=X, y=y) 85 | -------------------------------------------------------------------------------- /src/pyrcn/preprocessing/__init__.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn.preprocessing` module provides preprocessing utilities.""" 2 | 3 | # Authors: Peter Steiner , 4 | # License: BSD 3 clause 5 | 6 | from ._coates import Coates 7 | 8 | __all__ = ('Coates',) 9 | -------------------------------------------------------------------------------- /src/pyrcn/projection/__init__.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn.projection` module implements value projections.""" 2 | 3 | # Authors: Peter Steiner , 4 | # License: BSD 3 clause 5 | 6 | from ._value_projection import MatrixToValueProjection 7 | 8 | 9 | __all__ = ('MatrixToValueProjection',) 10 | -------------------------------------------------------------------------------- /src/pyrcn/projection/_value_projection.py: -------------------------------------------------------------------------------- 1 | """The :mod:`value_projection` contains the MatrixToValueProjection.""" 2 | 3 | # Authors: Peter Steiner , 4 | # License: BSD 3 clause 5 | 6 | from __future__ import annotations 7 | import sys 8 | 9 | import numpy as np 10 | from sklearn.base import BaseEstimator, TransformerMixin 11 | 12 | if sys.version_info >= (3, 8): 13 | from typing import Literal, cast 14 | else: 15 | from typing_extensions import Literal 16 | from typing import cast 17 | 18 | 19 | class MatrixToValueProjection(BaseEstimator, TransformerMixin): 20 | """ 21 | Projection of a matrix to any kind of indices, e.g. of the maximum value. 22 | 23 | Parameters 24 | ---------- 25 | output_strategy : Literal["winner_takes_all", "median", "last_value"], 26 | default=winner_takes_all" 27 | Strategy utilized to compute the index 28 | needs_proba : bool, default=False 29 | Whether to return a probability estimate or the index. 30 | """ 31 | 32 | def __init__(self, output_strategy: Literal[ 33 | "winner_takes_all", "median", "last_value"] = "winner_takes_all", 34 | needs_proba: bool = False): 35 | """Construct the MatrixToValueProjection.""" 36 | self._output_strategy = output_strategy 37 | self._needs_proba = needs_proba 38 | 39 | def fit(self, X: np.ndarray, y: None = None) -> MatrixToValueProjection: 40 | """ 41 | Fit the MatrixToValueProjection. 42 | 43 | Parameters 44 | ---------- 45 | X : ndarray of shape (n_samples, n_features) or of shape (n_samples, ) 46 | y : None 47 | Ignored. 48 | 49 | Returns 50 | ------- 51 | self : Returns a trained MatrixToValueProjection model. 52 | """ 53 | return self 54 | 55 | def transform(self, X: np.ndarray) -> np.ndarray: 56 | """ 57 | Transform matrix to a value as defined. 58 | 59 | Parameters 60 | ---------- 61 | X : ndarray of shape (n_samples, n_features) or of shape (n_samples, ) 62 | 63 | Returns 64 | ------- 65 | y : np.ndarray 66 | """ 67 | if self._output_strategy == "winner_takes_all": 68 | X = cast(np.ndarray, np.sum(X, axis=0)) 69 | elif self._output_strategy == "median": 70 | X = cast(np.ndarray, np.median(X, axis=0)) 71 | elif self._output_strategy == "last_value": 72 | X = X[-1, :] 73 | if self._needs_proba: 74 | return X 75 | else: 76 | return np.atleast_1d(np.argmax(X)) 77 | -------------------------------------------------------------------------------- /src/pyrcn/util/__init__.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn.util` has utilities for running, testing and analyzing.""" 2 | 3 | # Author: Peter Steiner and 4 | # Michael Schindler 5 | # License: BSD 3 clause 6 | 7 | from ._util import ( 8 | new_logger, get_mnist, argument_parser, concatenate_sequences, 9 | value_to_tuple, batched) 10 | from ._feature_extractor import FeatureExtractor 11 | 12 | __all__ = ('new_logger', 'get_mnist', 'argument_parser', 'FeatureExtractor', 13 | 'concatenate_sequences', 'value_to_tuple', 'batched') 14 | -------------------------------------------------------------------------------- /src/pyrcn/util/_feature_extractor.py: -------------------------------------------------------------------------------- 1 | """The :mod:`feature_extractor` contains a FeatureExtractor for audio files.""" 2 | 3 | # Authors: Peter Steiner , 4 | # License: BSD 3 clause 5 | 6 | from __future__ import annotations 7 | 8 | from typing import Union, Callable, Dict, Optional 9 | 10 | import numpy as np 11 | from sklearn.preprocessing import FunctionTransformer 12 | 13 | 14 | class FeatureExtractor(FunctionTransformer): 15 | """ 16 | Construct a transformer from an arbitrary callable. 17 | 18 | A FunctionTransformer forwards its X (and optionally y) arguments to a 19 | user-defined function or function object and returns the result of this 20 | function. 21 | This is useful for stateless transformations such as taking the log of 22 | frequencies, doing custom scaling, etc. 23 | 24 | Compared to sklearn.preprocessing.FunctionTransformer, it is possible to 25 | pass a filename as X and process the underlying file. 26 | 27 | Note: If a lambda is used as the function, then the resulting transformer 28 | will not be pickleable. 29 | 30 | Parameters 31 | ---------- 32 | func : Union[Callable, None] 33 | The callable to use for the transformation. 34 | This will be passed the same arguments as transform, 35 | with args and kwargs forwarded. 36 | If func is None, then func will be the identity function. 37 | kw_args : Union[Dict, None], default=None. 38 | Dictionary of additional keyword arguments to pass to func. 39 | 40 | """ 41 | 42 | def __init__(self, func: Union[Callable, None], 43 | kw_args: Union[Dict, None] = None): 44 | """Construct the FeatureExtractor.""" 45 | super().__init__(func=func, inverse_func=None, validate=False, 46 | accept_sparse=False, check_inverse=False, 47 | kw_args=kw_args, inv_kw_args=None) 48 | 49 | def fit(self, X: Union[str, np.ndarray], y: Optional[np.ndarray] = None)\ 50 | -> FeatureExtractor: 51 | """ 52 | Fit transformer by checking X. 53 | 54 | Parameters 55 | ---------- 56 | X : Union[str, np.ndarray] 57 | Input that can either be a feature matrix or a filename. 58 | y : Optional[np.ndarray, None], default=None 59 | Target values (None for unsupervised transformations). 60 | """ 61 | super().fit(X=X, y=y) 62 | return self 63 | 64 | def transform(self, X: Union[str, np.ndarray]) -> np.ndarray: 65 | """ 66 | Transform X using the forward function. 67 | 68 | Parameters 69 | ---------- 70 | X : Union[str, np.ndarray] 71 | Input that can either be a feature matrix or a filename. 72 | 73 | Returns 74 | ------- 75 | X_out : array-like, shape (n_samples, n_features) 76 | Transformed input. 77 | 78 | """ 79 | X_out = self._transform(X=X, func=self.func, kw_args=self.kw_args) 80 | if type(X_out) is tuple: 81 | X_out = X_out[0] 82 | return X_out 83 | -------------------------------------------------------------------------------- /src/pyrcn/util/_util.py: -------------------------------------------------------------------------------- 1 | """The :mod:`pyrcn.util` has utilities for running, testing and analyzing.""" 2 | 3 | # Author: Peter Steiner , 4 | # Michael Schindler 5 | # License: BSD 3 clause 6 | 7 | import sys 8 | from typing import Union, Tuple, Iterable 9 | 10 | import random 11 | import os 12 | import torch 13 | import logging 14 | import argparse 15 | import numpy as np 16 | from itertools import islice 17 | 18 | from sklearn.utils import check_X_y, check_consistent_length 19 | from sklearn.datasets import fetch_openml 20 | 21 | 22 | argument_parser = argparse.ArgumentParser( 23 | description='Standard input parser for HPC on PyRCN.') 24 | argument_parser.add_argument('-o', '--out', metavar='outdir', nargs='?', 25 | help='output directory', dest='out', type=str) 26 | argument_parser.add_argument(dest='params', metavar='params', nargs='*', 27 | help='optional parameter for scripts') 28 | 29 | # noinspection PyArgumentList 30 | logging.basicConfig( 31 | level=logging.INFO, 32 | handlers=[logging.StreamHandler(sys.stdout)] 33 | ) 34 | 35 | 36 | def batched(iterable: Iterable, n: int) -> Tuple: 37 | """ 38 | Iterate over batches of size n. 39 | 40 | Parameters 41 | ---------- 42 | iterable : Iterable 43 | The object over which to be iterated. 44 | n : int 45 | The batch size 46 | 47 | Returns 48 | ------- 49 | batch : Tuple 50 | A batch from the iterable. 51 | 52 | Notes 53 | ----- 54 | Starting from Python 3.12, this is included in `itertools`_. 55 | 56 | .. _itertools: 57 | https://docs.python.org/3/library/itertools.html#itertools.batched 58 | """ 59 | # batched('ABCDEFG', 3) --> ABC DEF G 60 | if n < 1: 61 | raise ValueError('n must be at least one') 62 | it = iter(iterable) 63 | while batch := tuple(islice(it, n)): 64 | yield batch 65 | 66 | 67 | def value_to_tuple(value: Union[float, int], 68 | size: Union[float, int, Tuple[Union[float, int], ...]]) \ 69 | -> Tuple[Union[float, int], ...]: 70 | """ 71 | Convert a value to a tuple of values. 72 | 73 | Parameters 74 | ---------- 75 | value : Union[float, int, Tuple[Union[float, int], ...]] 76 | The value to be inserted in the tuple. 77 | size : int 78 | The length of the tuple. 79 | 80 | Returns 81 | ------- 82 | value : Tuple[Union[float, int], ...] 83 | Tuple of values. 84 | """ 85 | if isinstance(value, float) or isinstance(value, int): 86 | return (value, ) * size 87 | elif isinstance(value, Tuple): 88 | return value 89 | 90 | 91 | def seed_everything(seed: int = 42) -> None: 92 | """ 93 | Fix all random number generators to reproduce results. 94 | 95 | Parameters 96 | ---------- 97 | seed : int, default = 42 98 | The default seed for the random number generators. 99 | """ 100 | random.seed(seed) 101 | os.environ['PYTHONHASHSEED'] = str(seed) 102 | np.random.seed(seed) 103 | torch.manual_seed(seed) 104 | torch.cuda.manual_seed(seed) 105 | torch.backends.cudnn.deterministic = True 106 | torch.backends.cudnn.benchmark = False 107 | 108 | 109 | def new_logger(name: str, directory: str = os.getcwd()) -> logging.Logger: 110 | """Register a new logger for logfiles.""" 111 | logger = logging.getLogger(name) 112 | logger.setLevel(logging.NOTSET) 113 | formatter = logging.Formatter( 114 | fmt='%(asctime)s %(levelname)s %(name)s %(message)s') 115 | handler = logging.FileHandler( 116 | os.path.join(directory, '{0}.log'.format(name))) 117 | handler.setFormatter(formatter) 118 | logger.addHandler(handler) 119 | return logger 120 | 121 | 122 | def get_mnist(directory: str = os.getcwd()) -> Tuple[np.ndarray, np.ndarray]: 123 | """Load the MNIST dataset from harddisk.""" 124 | npzfilepath = os.path.join(directory, 'MNIST.npz') 125 | 126 | if os.path.isfile(npzfilepath): 127 | npzfile = np.load(npzfilepath, allow_pickle=True) 128 | logging.info('Dataset loaded') 129 | return npzfile['X'], npzfile['y'] 130 | else: 131 | X, y = fetch_openml( 132 | data_id=554, return_X_y=True, cache=True, as_frame=False) 133 | logging.info('Fetched dataset') 134 | np.savez(npzfilepath, X=X, y=y) 135 | return X, y 136 | 137 | 138 | def concatenate_sequences(X: Union[list, np.ndarray], 139 | y: Union[list, np.ndarray], 140 | sequence_to_value: bool = False)\ 141 | -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 142 | """ 143 | Concatenate multiple sequences to scikit-learn compatible numpy arrays. 144 | 145 | ´Parameters 146 | ----------- 147 | X : Union[list, np.ndarray] of shape=(n_sequences, ) 148 | All sequences. Note that all elements in ```X``` 149 | must have at least one equal dimension. 150 | y : Union[list, np.ndarray] of shape=(n_sequences, ) 151 | All sequences. Note that all elements in ```X``` 152 | must have at least one equal dimension. 153 | sequence_to_value : bool, default=False 154 | If true, expand each element of y to the sequence length 155 | 156 | Returns 157 | ------- 158 | X : np.ndarray of shape=(n_samples, n_features) 159 | Input data where n_samples is the accumulated length of all sequences 160 | y : np.ndarray of shape=(n_samples, n_features) or shape=(n_samples, ) 161 | Target data where n_samples is the accumulated length of all sequences 162 | sequence_ranges : Union[None, np.ndarray] of shape=(n_sequences, 2) 163 | Sequence border indicator matrix 164 | """ 165 | if isinstance(X, list): 166 | X = np.asarray(X) 167 | if isinstance(y, list): 168 | y = np.asarray(y) 169 | X = np.array(X) 170 | y = np.array(y) 171 | if sequence_to_value: 172 | for k, _ in enumerate(y): 173 | y[k] = np.repeat(y[k], X[k].shape[0]) 174 | 175 | check_consistent_length(X, y) 176 | sequence_ranges: np.ndarray = np.ndarray([]) 177 | if X.ndim == 1: 178 | sequence_ranges = np.zeros((X.shape[0], 2), dtype=int) 179 | sequence_ranges[:, 1] = np.cumsum( 180 | [X[k].shape[0] for k, _ in enumerate(X)]) 181 | sequence_ranges[1:, 0] = sequence_ranges[:-1, 1] 182 | for k, _ in enumerate(X): 183 | X[k], y[k] = check_X_y(X[k], y[k], multi_output=True) 184 | return np.concatenate(X), np.concatenate(y), sequence_ranges 185 | -------------------------------------------------------------------------------- /tests/test_activations.py: -------------------------------------------------------------------------------- 1 | """Testing for activation functions in the module (pyrcn.base).""" 2 | import numpy as np 3 | from scipy.special import expit as logistic_sigmoid 4 | from pyrcn.base import (ACTIVATIONS, ACTIVATIONS_INVERSE, 5 | ACTIVATIONS_INVERSE_BOUNDS) 6 | 7 | 8 | def test_all_activations_callable() -> None: 9 | print('\ttest_all_activations_callable():') 10 | assert any([not callable(func) for func in ACTIVATIONS.values()]) is False 11 | 12 | 13 | def test_all_inverse_activations_callable() -> None: 14 | print('\ttest_all_inverse_activations_callable():') 15 | assert any( 16 | [not callable(func) for func in ACTIVATIONS_INVERSE.values()]) is False 17 | 18 | 19 | def test_inverse_subset_of_activations() -> None: 20 | print('\ttest_inverse_subset_of_activations():') 21 | assert set(ACTIVATIONS_INVERSE.keys()).issubset(set(ACTIVATIONS.keys())) 22 | 23 | 24 | def test_each_inverse_has_boundaries() -> None: 25 | print('\ttest_each_inverse_has_boundaries():') 26 | assert set(ACTIVATIONS_INVERSE.keys()) ==\ 27 | set(ACTIVATIONS_INVERSE_BOUNDS.keys()) 28 | 29 | 30 | def test_each_inverse_boundary_tuple() -> None: 31 | print('\ttest_each_inverse_boundary_tuple():') 32 | assert any([not isinstance(bound, tuple) 33 | for bound in ACTIVATIONS_INVERSE_BOUNDS.values()]) is False 34 | 35 | 36 | def test_bounded_relu() -> None: 37 | print('\ttest_bounded_relu():') 38 | X = np.concatenate((np.full((1, ), -np.inf), np.arange(-5, 5), 39 | np.full((1, ), np.inf))).reshape(1, -1) 40 | X_true = np.minimum(np.maximum(X, 0), 1) 41 | ACTIVATIONS["bounded_relu"](X) 42 | np.testing.assert_array_equal(X, X_true) 43 | X_true = np.minimum(np.maximum(X, 0), 1) 44 | ACTIVATIONS_INVERSE["bounded_relu"](X) 45 | np.testing.assert_array_equal(X, X_true) 46 | 47 | 48 | def test_identity() -> None: 49 | print('\ttest_identity():') 50 | X = np.concatenate((np.full((1, ), -np.inf), np.arange(-5, 5), 51 | np.full((1, ), np.inf))).reshape(1, -1) 52 | X_true = X 53 | ACTIVATIONS["identity"](X) 54 | np.testing.assert_array_equal(X, X_true) 55 | ACTIVATIONS_INVERSE["identity"](X) 56 | np.testing.assert_array_equal(X, X_true) 57 | 58 | 59 | def test_logistic() -> None: 60 | print('\ttest_logistic():') 61 | X = np.concatenate((np.full((1, ), -np.inf), np.arange(-5, 5), 62 | np.full((1, ), np.inf))).reshape(1, -1) 63 | X_true = logistic_sigmoid(X) 64 | ACTIVATIONS["logistic"](X) 65 | np.testing.assert_array_equal(X, X_true) 66 | X_true = np.negative(np.log(1 - X)) 67 | ACTIVATIONS_INVERSE["logistic"](X) 68 | np.testing.assert_array_equal(X, X_true) 69 | 70 | 71 | def test_softplus() -> None: 72 | print('\ttest_softplus():') 73 | X = np.arange(-5, 5, dtype=float) 74 | X_true = np.log(1 + np.exp(X)) 75 | ACTIVATIONS["softplus"](X) 76 | np.testing.assert_array_equal(X, X_true) 77 | 78 | 79 | def test_relu() -> None: 80 | print('\ttest_relu():') 81 | X = np.concatenate((np.full((1, ), -np.inf), np.arange(-5, 5), 82 | np.full((1, ), np.inf))).reshape(1, -1) 83 | X_true = np.maximum(X, 0) 84 | ACTIVATIONS["relu"](X) 85 | np.testing.assert_array_equal(X, X_true) 86 | X_true = np.maximum(X, 0) 87 | ACTIVATIONS_INVERSE["relu"](X) 88 | np.testing.assert_array_equal(X, X_true) 89 | 90 | 91 | def test_softmax() -> None: 92 | print('\ttest_softmax():') 93 | X = np.arange(-5, 5, dtype=float) 94 | X_true = np.exp(X) / np.exp(X).sum() 95 | ACTIVATIONS["softmax"](X) 96 | np.testing.assert_array_equal(X, X_true) 97 | assert any([not X.sum() == 1.0, not X_true.sum() == 1.0, 98 | not X.sum() == X_true.sum()]) is False 99 | 100 | 101 | def test_tanh() -> None: 102 | print('\ttest_l():') 103 | X = np.concatenate((np.full((1, ), -np.inf), np.arange(-5, 5), 104 | np.full((1, ), np.inf))).reshape(1, -1) 105 | X_true = np.tanh(X) 106 | ACTIVATIONS["tanh"](X) 107 | np.testing.assert_array_equal(X, X_true) 108 | X_true = np.arctanh(X) 109 | ACTIVATIONS_INVERSE["tanh"](X) 110 | np.testing.assert_array_equal(X, X_true) 111 | -------------------------------------------------------------------------------- /tests/test_coates.py: -------------------------------------------------------------------------------- 1 | """Testing for coates preprocessing module (pyrcn.preprocessing.coates).""" 2 | import numpy as np 3 | 4 | from sklearn.datasets import load_digits 5 | from sklearn.cluster import KMeans 6 | 7 | from pyrcn.preprocessing import Coates 8 | 9 | 10 | X_digits, y_digits = load_digits(return_X_y=True) 11 | 12 | 13 | def test_image_transform() -> None: 14 | rs = np.random.RandomState(42) 15 | test_image = rs.randint(0, 255, size=(2, 7, 11)) 16 | test_array = test_image.reshape((2, 7 * 11)) 17 | np.testing.assert_array_equal(Coates._reshape_images_to_arrays( 18 | test_image, (7, 11))[0, :], test_array[0, :]) 19 | np.testing.assert_array_equal(Coates._reshape_arrays_to_images( 20 | test_array, (7, 11))[0, :], test_image[0, :]) 21 | 22 | 23 | def test_fit() -> None: 24 | trf = Coates( 25 | image_size=(8, 8), 26 | patch_size=(3, 3), 27 | n_patches=200, 28 | clusterer=KMeans(n_clusters=20, random_state=42), 29 | random_state=42) 30 | trf.fit(X_digits) 31 | assert len(trf.clusterer.cluster_centers_) == 20 32 | -------------------------------------------------------------------------------- /tests/test_elm.py: -------------------------------------------------------------------------------- 1 | """Testing for Extreme Learning Machine module.""" 2 | 3 | import numpy as np 4 | import pytest 5 | from sklearn.datasets import load_iris, load_digits 6 | from sklearn.model_selection import train_test_split 7 | from sklearn.model_selection import GridSearchCV 8 | from sklearn.pipeline import FeatureUnion 9 | from sklearn.linear_model import Ridge 10 | from sklearn.exceptions import NotFittedError 11 | from sklearn.base import clone 12 | 13 | from pyrcn.base.blocks import InputToNode 14 | from pyrcn.linear_model import IncrementalRegression 15 | from pyrcn.extreme_learning_machine import ELMClassifier, ELMRegressor 16 | 17 | 18 | X_iris, y_iris = load_iris(return_X_y=True) 19 | 20 | 21 | def test_elm_get_params() -> None: 22 | print('\ntest_elm_get_params():') 23 | elm = ELMClassifier() 24 | elm_params = elm.get_params() 25 | print(elm_params) 26 | 27 | 28 | def test_elm_classifier_fit() -> None: 29 | print('\ntest_elm_classifier_fit():') 30 | X, y = load_digits(return_X_y=True) 31 | elm = ELMClassifier(hidden_layer_size=50) 32 | elm.fit(X, y) 33 | 34 | 35 | def test_elm_classifier_partial_fit() -> None: 36 | print('\ntest_elm_classifier_partial_fit():') 37 | X, y = load_digits(return_X_y=True) 38 | elm = ELMClassifier(hidden_layer_size=50) 39 | for k in range(10): 40 | elm.partial_fit(X[k:k+1, :], y[k:k+1], classes=np.arange(10)) 41 | print(elm.__sizeof__()) 42 | print(elm.hidden_layer_state(X)) 43 | elm = ELMClassifier(hidden_layer_size=50, regressor=Ridge()) 44 | with pytest.raises(BaseException): 45 | for k in range(10): 46 | elm.partial_fit(X[k:k+1, :], y[k:k+1], classes=np.arange(10)) 47 | 48 | 49 | def test_elm_regressor_jobs() -> None: 50 | print('\ntest_elm_regressor_jobs():') 51 | X = np.linspace(0, 10, 2000) 52 | y = np.hstack((np.sin(X).reshape(-1, 1), np.cos(X).reshape(-1, 1))) 53 | X_train, X_test, y_train, y_test = train_test_split( 54 | X, y, test_size=10, random_state=42) 55 | param_grid = { 56 | 'input_to_node': [ 57 | InputToNode( 58 | bias_scaling=10., hidden_layer_size=20, random_state=42), 59 | InputToNode( 60 | bias_scaling=10., hidden_layer_size=50, random_state=42)], 61 | 'regressor': [IncrementalRegression(alpha=.0001), 62 | IncrementalRegression(alpha=.01)], 63 | 'random_state': [42]} 64 | elm = GridSearchCV(ELMRegressor(), param_grid) 65 | elm.fit(X_train.reshape(-1, 1), y_train, n_jobs=2) 66 | y_elm = elm.predict(X_test.reshape(-1, 1)) 67 | print("tests - elm:\n sin | cos \n {0}".format(y_test-y_elm)) 68 | print("best_params_: {0}".format(elm.best_params_)) 69 | print("best_score: {0}".format(elm.best_score_)) 70 | np.testing.assert_allclose(y_test, y_elm, atol=1e-1) 71 | 72 | 73 | def test_elm_regressor_chunk() -> None: 74 | print('\ntest_elm_regressor_chunk():') 75 | X = np.linspace(0, 10, 2000) 76 | y = np.hstack((np.sin(X).reshape(-1, 1), np.cos(X).reshape(-1, 1))) 77 | X_train, X_test, y_train, y_test = train_test_split( 78 | X, y, test_size=10, random_state=42) 79 | param_grid = { 80 | 'input_to_node__hidden_layer_size': [20, 50], 81 | 'input_to_node__input_scaling': [1.], 82 | 'input_to_node__bias_scaling': [10.], 83 | 'input_to_node__activation': ['tanh'], 84 | 'input_to_node__random_state': [42], 85 | 'chunk_size': [500], 86 | 'regressor__alpha': [1e-2, 1e-5], 87 | 'random_state': [42] 88 | } 89 | elm = GridSearchCV(ELMRegressor(), param_grid) 90 | elm.fit(X_train.reshape(-1, 1), y_train, n_jobs=2) 91 | y_elm = elm.predict(X_test.reshape(-1, 1)) 92 | print("tests - elm:\n sin | cos \n {0}".format(y_test-y_elm)) 93 | print("best_params_: {0}".format(elm.best_params_)) 94 | print("best_score: {0}".format(elm.best_score_)) 95 | np.testing.assert_allclose(y_test, y_elm, atol=1e-1) 96 | elm.fit(X_train.reshape(-1, 1), y_train) 97 | y_elm = elm.predict(X_test.reshape(-1, 1)) 98 | print("tests - elm:\n sin | cos \n {0}".format(y_test-y_elm)) 99 | print("best_params_: {0}".format(elm.best_params_)) 100 | print("best_score: {0}".format(elm.best_score_)) 101 | np.testing.assert_allclose(y_test, y_elm, atol=1e-1) 102 | with pytest.raises(ValueError): 103 | elm = clone(elm.best_estimator_).set_params(chunk_size=-1) 104 | elm.fit(X_train.reshape(-1, 1), y_train) 105 | 106 | 107 | def test_elm_classifier_not_fitted() -> None: 108 | X, y = load_digits(return_X_y=True) 109 | with pytest.raises(NotFittedError): 110 | ELMClassifier(hidden_layer_size=50, verbose=True).predict(X) 111 | 112 | 113 | def test_iris_ensemble_iterative_regression() -> None: 114 | print('\ntest_iris_ensemble_iterative_regression():') 115 | X_train, X_test, y_train, y_test = train_test_split( 116 | X_iris, y_iris, test_size=5, random_state=42) 117 | 118 | cls = ELMClassifier( 119 | input_to_node=FeatureUnion([ 120 | ('tanh', InputToNode(hidden_layer_size=10, random_state=42, 121 | input_activation='tanh')), 122 | ('bounded_relu', InputToNode(hidden_layer_size=10, random_state=42, 123 | input_activation='bounded_relu'))]), 124 | regressor=IncrementalRegression(alpha=.01), 125 | random_state=42) 126 | 127 | for samples in np.split(np.arange(0, X_train.shape[0]), 5): 128 | cls.partial_fit(X_train[samples, :], y_train[samples], 129 | classes=np.arange(3, dtype=int)) 130 | y_predicted = cls.predict(X_test) 131 | 132 | for record in range(len(y_test)): 133 | print('predicted: {0} \ttrue: {1}' 134 | .format(y_predicted[record], y_test[record])) 135 | 136 | print('score: {0}'.format(cls.score(X_test, y_test))) 137 | print('proba: {0}'.format(cls.predict_proba(X_test))) 138 | print('log_proba: {0}'.format(cls.predict_log_proba(X_test))) 139 | assert cls.score(X_test, y_test) >= 4./5. 140 | 141 | 142 | def test_elm_classifier_no_valid_params() -> None: 143 | X, y = load_digits(return_X_y=True) 144 | with pytest.raises(TypeError): 145 | ELMClassifier(input_to_node=ELMRegressor()).fit(X, y) 146 | with pytest.raises(TypeError): 147 | ELMClassifier(regressor=InputToNode()).fit(X, y) 148 | -------------------------------------------------------------------------------- /tests/test_esn.py: -------------------------------------------------------------------------------- 1 | """Testing for Echo State Network module.""" 2 | import numpy as np 3 | import pytest 4 | from pyrcn.datasets import mackey_glass, load_digits 5 | from sklearn.metrics import make_scorer 6 | from sklearn.model_selection import train_test_split 7 | from sklearn.model_selection import GridSearchCV, TimeSeriesSplit 8 | from sklearn.linear_model import Ridge 9 | from sklearn.exceptions import NotFittedError 10 | 11 | from pyrcn.base.blocks import InputToNode, NodeToNode 12 | from pyrcn.linear_model import IncrementalRegression 13 | from pyrcn.echo_state_network import ESNRegressor, ESNClassifier 14 | from pyrcn.metrics import mean_squared_error 15 | 16 | 17 | def test_esn_get_params() -> None: 18 | print('\ntest_esn_get_params():') 19 | esn = ESNClassifier() 20 | esn_params = esn.get_params() 21 | print(esn_params) 22 | 23 | 24 | def test_esn_regressor_jobs() -> None: 25 | print('\ntest_esn_regressor_jobs():') 26 | X, y = mackey_glass(n_timesteps=8000) 27 | X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False) 28 | param_grid = { 29 | "input_to_node": [ 30 | InputToNode( 31 | bias_scaling=.1, hidden_layer_size=10, 32 | input_activation='identity', random_state=42), 33 | InputToNode( 34 | bias_scaling=.1, hidden_layer_size=50, 35 | input_activation='identity', random_state=42)], 36 | "node_to_node": [ 37 | NodeToNode( 38 | spectral_radius=0., hidden_layer_size=10, random_state=42), 39 | NodeToNode( 40 | spectral_radius=1, hidden_layer_size=50, random_state=42)], 41 | "regressor": [ 42 | IncrementalRegression(alpha=.0001), 43 | IncrementalRegression(alpha=.01)], 44 | 'random_state': [42]} 45 | esn = GridSearchCV(estimator=ESNRegressor(), param_grid=param_grid) 46 | esn.fit(X_train.reshape(-1, 1), y_train, n_jobs=2) 47 | y_esn = esn.predict(X_test.reshape(-1, 1)) 48 | print("tests - esn:\n sin | cos \n {0}".format(y_test-y_esn)) 49 | print("best_params_: {0}".format(esn.best_params_)) 50 | print("best_score: {0}".format(esn.best_score_)) 51 | np.testing.assert_allclose(1, esn.best_score_, atol=1e-1) 52 | 53 | 54 | def test_esn_regressor_requires_no_sequence() -> None: 55 | print('\ntest_esn_regressor_requires_sequence():') 56 | X, y = mackey_glass(n_timesteps=8000) 57 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10, 58 | random_state=42) 59 | param_grid = {'hidden_layer_size': [20, 50], 60 | 'input_scaling': [1.], 61 | 'bias_scaling': [10.], 62 | 'input_activation': ['identity'], 63 | 'random_state': [42], 64 | 'spectral_radius': [0.], 65 | 'reservoir_activation': ['tanh'], 66 | 'alpha': [1e-2, 1e-5] 67 | } 68 | esn = GridSearchCV(ESNRegressor(), param_grid) 69 | esn.fit(X_train.reshape(-1, 1), y_train, n_jobs=2) 70 | np.testing.assert_equal(esn.best_estimator_.requires_sequence, False) 71 | 72 | 73 | def test_esn_regressor_requires_sequence() -> None: 74 | print('\ntest_esn_regressor_requires_sequence():') 75 | X, y = mackey_glass(n_timesteps=8000) 76 | X_train = np.empty(shape=(10, ), dtype=object) 77 | y_train = np.empty(shape=(10, ), dtype=object) 78 | X_test = np.empty(shape=(10, ), dtype=object) 79 | y_test = np.empty(shape=(10, ), dtype=object) 80 | splitter = TimeSeriesSplit(n_splits=10) 81 | for k, (train_index, test_index) in enumerate(splitter.split(X, y)): 82 | X_train[k] = X[train_index].reshape(-1, 1) 83 | y_train[k] = y[train_index] 84 | X_test[k] = X[test_index].reshape(-1, 1) 85 | y_test[k] = y[test_index] 86 | param_grid = {'hidden_layer_size': [20, 50], 87 | 'input_scaling': [1.], 88 | 'bias_scaling': [10.], 89 | 'input_activation': ['identity'], 90 | 'random_state': [42], 91 | 'spectral_radius': [0.], 92 | 'reservoir_activation': ['tanh'], 93 | 'alpha': [1e-2, 1e-5], 94 | } 95 | esn = GridSearchCV( 96 | ESNRegressor(), param_grid, 97 | scoring=make_scorer(mean_squared_error, greater_is_better=False)) 98 | esn.fit(X_train, y_train, n_jobs=2) 99 | np.testing.assert_equal(esn.best_estimator_.requires_sequence, True) 100 | 101 | 102 | def test_esn_regressor_wrong_sequence_format() -> None: 103 | print('\ntest_esn_regressor_requires_sequence():') 104 | X, y = mackey_glass(n_timesteps=8000) 105 | X_train = np.empty(shape=(10, 1000, 1)) 106 | y_train = np.empty(shape=(10, 1000, 1)) 107 | splitter = TimeSeriesSplit(n_splits=10) 108 | for k, (train_index, test_index) in enumerate(splitter.split(X, y)): 109 | X_train[k, :, :] = X[:1000].reshape(-1, 1) 110 | y_train[k, :, :] = y[:1000].reshape(-1, 1) 111 | param_grid = {'hidden_layer_size': 50, 112 | 'input_scaling': 1., 113 | 'bias_scaling': 10., 114 | 'input_activation': 'identity', 115 | 'random_state': 42, 116 | 'spectral_radius': 0., 117 | 'reservoir_activation': 'tanh', 118 | 'alpha': 1e-5} 119 | with pytest.raises(ValueError): 120 | ESNRegressor(verbose=True, **param_grid)\ 121 | .fit(X_train, y_train, n_jobs=2) 122 | 123 | 124 | def test_esn_output_unchanged() -> None: 125 | X, y = load_digits(return_X_y=True, as_sequence=True) 126 | shape1 = y[0].shape 127 | esn = ESNClassifier(hidden_layer_size=50).fit(X, y) 128 | print(esn) 129 | shape2 = y[0].shape 130 | assert (shape1 == shape2) 131 | 132 | 133 | def test_esn_classifier_sequence_to_value() -> None: 134 | X, y = load_digits(return_X_y=True, as_sequence=True) 135 | esn = ESNClassifier(hidden_layer_size=50).fit(X, y) 136 | y_pred = esn.predict(X) 137 | assert (len(y) == len(y_pred)) 138 | assert (len(y_pred[0]) == 1) 139 | assert (esn.sequence_to_value is True) 140 | assert (esn.decision_strategy == "winner_takes_all") 141 | y_pred = esn.predict_proba(X) 142 | assert (y_pred[0].ndim == 1) 143 | y_pred = esn.predict_log_proba(X) 144 | assert (y_pred[0].ndim == 1) 145 | esn.sequence_to_value = False 146 | y_pred = esn.predict(X) 147 | assert (len(y_pred[0]) == 8) 148 | y_pred = esn.predict_proba(X) 149 | assert (y_pred[0].ndim == 2) 150 | y_pred = esn.predict_log_proba(X) 151 | assert (y_pred[0].ndim == 2) 152 | 153 | 154 | def test_esn_classifier_instance_fit() -> None: 155 | X, y = load_digits(return_X_y=True, as_sequence=True) 156 | esn = ESNClassifier(hidden_layer_size=50).fit(X[0], np.repeat(y[0], 8)) 157 | assert (esn.sequence_to_value is False) 158 | y_pred = esn.predict_proba(X[0]) 159 | assert (y_pred.ndim == 2) 160 | y_pred = esn.predict_log_proba(X[0]) 161 | assert (y_pred.ndim == 2) 162 | 163 | 164 | def test_esn_classifier_partial_fit() -> None: 165 | X, y = load_digits(return_X_y=True, as_sequence=True) 166 | esn = ESNClassifier(hidden_layer_size=50, verbose=True) 167 | for k in range(10): 168 | esn.partial_fit(X[k], np.repeat(y[k], 8), classes=np.arange(10), 169 | postpone_inverse=True) 170 | print(esn.__sizeof__()) 171 | print(esn.hidden_layer_state(X=X)) 172 | esn = ESNClassifier(hidden_layer_size=50, regressor=Ridge()) 173 | with pytest.raises(BaseException): 174 | for k in range(10): 175 | esn.partial_fit(X[k], np.repeat(y[k], 8), classes=np.arange(10), 176 | postpone_inverse=True) 177 | 178 | 179 | def test_esn_classifier_not_fitted() -> None: 180 | X, y = load_digits(return_X_y=True, as_sequence=True) 181 | with pytest.raises(NotFittedError): 182 | ESNClassifier(hidden_layer_size=50, verbose=True).predict(X) 183 | 184 | 185 | def test_esn_classifier_no_valid_params() -> None: 186 | X, y = load_digits(return_X_y=True, as_sequence=True) 187 | with pytest.raises(TypeError): 188 | ESNClassifier(input_to_node=ESNRegressor()).fit(X, y) 189 | with pytest.raises(TypeError): 190 | ESNClassifier(node_to_node=ESNRegressor()).fit(X, y) 191 | with pytest.raises(TypeError): 192 | ESNClassifier(input_to_node=ESNRegressor()).fit(X, y) 193 | with pytest.raises(ValueError): 194 | ESNClassifier(requires_sequence="True").fit(X, y) 195 | with pytest.raises(TypeError): 196 | ESNClassifier(regressor=InputToNode()).fit(X, y) 197 | -------------------------------------------------------------------------------- /tests/test_incremental_regression.py: -------------------------------------------------------------------------------- 1 | """Testing for Linear model module.""" 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from sklearn.base import is_regressor 7 | from sklearn.datasets import load_diabetes 8 | from sklearn.model_selection import train_test_split 9 | from sklearn.exceptions import NotFittedError 10 | 11 | from pyrcn.linear_model import IncrementalRegression 12 | from sklearn.linear_model import Ridge 13 | 14 | 15 | X_diabetes, y_diabetes = load_diabetes(return_X_y=True) 16 | 17 | 18 | def test_normalize() -> None: 19 | print('\ntest_normalize():') 20 | rs = np.random.RandomState(42) 21 | X = np.hstack((np.linspace(0., 10., 1000).reshape(-1, 1), 22 | np.linspace(-1., 1., 1000).reshape(-1, 1), 23 | rs.random(1000).reshape(-1, 1))) 24 | transformation = rs.random(size=(3, 2)) 25 | y = np.matmul(X, transformation) 26 | reg = IncrementalRegression(normalize=True) 27 | reg.fit(X, y) 28 | 29 | 30 | def test_postpone_inverse() -> None: 31 | print('\ntest_postpone_inverse():') 32 | rs = np.random.RandomState(42) 33 | index = range(1000) 34 | X = np.hstack((np.linspace(0., 10., 1000).reshape(-1, 1), 35 | np.linspace(-1., 1., 1000).reshape(-1, 1), 36 | rs.random(1000).reshape(-1, 1))) 37 | transformation = rs.random(size=(3, 2)) 38 | y = np.matmul(X, transformation) 39 | 40 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10, 41 | random_state=42) 42 | reg = IncrementalRegression() 43 | assert is_regressor(reg) 44 | 45 | for prt in np.array_split(index, 3): 46 | reg.partial_fit(X[prt, :], y[prt, :], postpone_inverse=True) 47 | 48 | with pytest.raises(NotFittedError): 49 | y_reg = reg.predict(X_test) 50 | 51 | reg.partial_fit(X, y) 52 | y_reg = reg.predict(X_test) 53 | print("tests: {0}\nregr: {1}".format(y_test, y_reg)) 54 | np.testing.assert_allclose(y_reg, y_test, rtol=.01, atol=.15) 55 | 56 | 57 | def test_linear() -> None: 58 | print('\ntest_linear():') 59 | rs = np.random.RandomState(42) 60 | index = range(1000) 61 | X = np.hstack((np.linspace(0., 10., 1000).reshape(-1, 1), 62 | np.linspace(-1., 1., 1000).reshape(-1, 1), 63 | rs.random(1000).reshape(-1, 1))) 64 | transformation = rs.random(size=(3, 2)) 65 | y = np.matmul(X, transformation) 66 | 67 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=10, 68 | random_state=42) 69 | reg = IncrementalRegression() 70 | assert is_regressor(reg) 71 | 72 | for prt in np.array_split(index, 3): 73 | reg.partial_fit(X[prt, :], y[prt, :]) 74 | 75 | y_reg = reg.predict(X_test) 76 | print("tests: {0}\nregr: {1}".format(y_test, y_reg)) 77 | np.testing.assert_allclose(y_reg, y_test, rtol=.01, atol=.15) 78 | 79 | 80 | def test_compare_ridge() -> None: 81 | X_train, X_test, y_train, y_test = train_test_split( 82 | X_diabetes, y_diabetes, test_size=10, random_state=42) 83 | 84 | i_reg = IncrementalRegression(alpha=.01).fit(X_train, y_train) 85 | ridge = Ridge(alpha=.01, solver='svd').fit(X_train, y_train) 86 | 87 | print("incremental: {0} ridge: {1}".format(i_reg.coef_, ridge.coef_)) 88 | np.testing.assert_allclose(i_reg.coef_, ridge.coef_, rtol=.0001) 89 | -------------------------------------------------------------------------------- /tests/test_input_to_node.py: -------------------------------------------------------------------------------- 1 | """Testing for blocks.input_to_node module.""" 2 | import scipy 3 | import numpy as np 4 | import pytest 5 | from sklearn.utils.extmath import safe_sparse_dot 6 | 7 | from pyrcn.base.blocks import (InputToNode, PredefinedWeightsInputToNode, 8 | BatchIntrinsicPlasticity) 9 | 10 | 11 | def test_input_to_node_invalid_bias_scaling() -> None: 12 | print('\ntest_input_to_node_invalid_bias_scaling():') 13 | X = np.zeros(shape=(10, 500)) 14 | with pytest.raises(ValueError): 15 | i2n = InputToNode(bias_scaling=-1e-5) 16 | i2n.fit(X) 17 | 18 | 19 | def test_input_to_node_invalid_input_scaling() -> None: 20 | print('\ntest_input_to_node_invalid_input_scaling():') 21 | X = np.zeros(shape=(10, 500)) 22 | with pytest.raises(ValueError): 23 | i2n = InputToNode(input_scaling=0) 24 | i2n.fit(X) 25 | 26 | 27 | def test_input_to_node_invalid_activation() -> None: 28 | print('\ntest_input_to_node_invalid_activation():') 29 | X = np.zeros(shape=(10, 500)) 30 | with pytest.raises(ValueError): 31 | i2n = InputToNode(input_activation="test") 32 | i2n.fit(X) 33 | 34 | 35 | def test_input_to_node_invalid_hls() -> None: 36 | print('\ntest_input_to_node_invalid_hls():') 37 | X = np.zeros(shape=(10, 500)) 38 | with pytest.raises(ValueError): 39 | i2n = InputToNode(hidden_layer_size=0) 40 | X = np.zeros(shape=(10, 3)) 41 | i2n.fit(X) 42 | 43 | 44 | def test_input_to_node_invalid_sparsity() -> None: 45 | print('\ntest_input_to_node_invalid_sparsity():') 46 | X = np.zeros(shape=(10, 500)) 47 | with pytest.raises(ValueError): 48 | i2n = InputToNode(sparsity=1.1) 49 | i2n.fit(X) 50 | with pytest.raises(ValueError): 51 | i2n = InputToNode(sparsity=0.0) 52 | i2n.fit(X) 53 | with pytest.raises(ValueError): 54 | i2n = InputToNode(k_in=-1) 55 | i2n.fit(X) 56 | with pytest.raises(ValueError): 57 | i2n = InputToNode(k_in=500) 58 | i2n.fit(X) 59 | 60 | 61 | def test_predefined_weights_input_to_node() -> None: 62 | print('\ntest_predefined_weights_input_to_node():') 63 | X = np.zeros(shape=(10, 3)) 64 | input_weights = np.random.rand(5, 5) 65 | with pytest.raises(AssertionError): 66 | i2n = PredefinedWeightsInputToNode( 67 | predefined_input_weights=input_weights, input_activation='tanh', 68 | input_scaling=1., bias_scaling=1., random_state=42) 69 | i2n.fit(X) 70 | input_weights = np.random.rand(5, ) 71 | with pytest.raises(ValueError): 72 | i2n = PredefinedWeightsInputToNode( 73 | predefined_input_weights=input_weights, input_activation='tanh', 74 | input_scaling=1., bias_scaling=1., random_state=42) 75 | i2n.fit(X) 76 | input_weights = np.random.rand(3, 5) 77 | with pytest.raises(AssertionError): 78 | i2n = PredefinedWeightsInputToNode( 79 | predefined_input_weights=input_weights, input_activation='tanh', 80 | input_scaling=1., predefined_bias_weights=input_weights, 81 | bias_scaling=1., random_state=42) 82 | i2n.fit(X) 83 | input_weights = np.random.rand(3, 5) 84 | bias_weights = np.random.rand(3, ) 85 | with pytest.raises(AssertionError): 86 | i2n = PredefinedWeightsInputToNode( 87 | predefined_input_weights=input_weights, input_activation='tanh', 88 | input_scaling=1., predefined_bias_weights=bias_weights, 89 | bias_scaling=1., random_state=42) 90 | i2n.fit(X) 91 | input_weights = np.random.rand(3, 5) 92 | bias_weights = np.random.rand(5, 1) 93 | i2n = PredefinedWeightsInputToNode( 94 | predefined_input_weights=input_weights, input_activation='tanh', 95 | input_scaling=1., predefined_bias_weights=bias_weights, 96 | bias_scaling=1., random_state=42) 97 | i2n.fit(X) 98 | print(i2n._input_weights) 99 | assert i2n._input_weights.shape == (3, 5) 100 | assert i2n.__sizeof__() != 0 101 | assert i2n.input_weights is not None 102 | assert i2n.bias_weights is not None 103 | 104 | 105 | def test_bip_dresden() -> None: 106 | print('\ntest_bip_dresden()') 107 | rs = np.random.RandomState(42) 108 | i2n = BatchIntrinsicPlasticity( 109 | hidden_layer_size=1, input_activation='tanh', random_state=rs, 110 | distribution='uniform', algorithm='dresden') 111 | X = rs.normal(size=(1000, 1)) 112 | i2n.fit(X[:1000, :]) 113 | y = i2n.transform(X) 114 | y_test = y[(y > -.75) & (y < .75)] / 1.5 + .5 115 | 116 | statistic, pvalue = scipy.stats.ks_1samp(y_test, scipy.stats.uniform.cdf) 117 | assert statistic < pvalue 118 | print("Kolmogorov-Smirnov does not reject H_0:" 119 | "y is uniformly distributed in [-.75, .75]") 120 | 121 | 122 | def test_bip_run_neumann() -> None: 123 | print('\ntest_bip_run_neumann()') 124 | rs = np.random.RandomState(42) 125 | X = rs.normal(size=(1000, 1)) 126 | i2n = BatchIntrinsicPlasticity( 127 | hidden_layer_size=1, input_activation='tanh', random_state=rs, 128 | distribution='uniform', algorithm='neumann') 129 | i2n.fit(X.reshape(-1, 1)) 130 | i2n.transform(X.reshape(-1, 1)) 131 | 132 | 133 | def test_bip_invalid_params() -> None: 134 | print('\ntest_bip_invalid_params()') 135 | rs = np.random.RandomState(42) 136 | X = rs.normal(size=(1000, 1)) 137 | i2n = BatchIntrinsicPlasticity( 138 | hidden_layer_size=1, input_activation='tanh', random_state=rs, 139 | distribution='test', algorithm='neumann') 140 | with pytest.raises(ValueError): 141 | i2n.fit(X.reshape(-1, 1)) 142 | i2n = BatchIntrinsicPlasticity( 143 | hidden_layer_size=1, input_activation='relu', random_state=rs, 144 | distribution='uniform', algorithm='dresden') 145 | with pytest.raises(ValueError): 146 | i2n.fit(X.reshape(-1, 1)) 147 | i2n = BatchIntrinsicPlasticity( 148 | hidden_layer_size=1, input_activation='tanh', random_state=rs, 149 | distribution='uniform', algorithm='test') 150 | with pytest.raises(ValueError): 151 | i2n.fit(X.reshape(-1, 1)) 152 | 153 | 154 | def test_input_to_node_dense() -> None: 155 | print('\ntest_input_to_node_dense():') 156 | i2n = InputToNode( 157 | hidden_layer_size=5, sparsity=1., input_activation='tanh', 158 | input_scaling=1., bias_scaling=1., random_state=42) 159 | X = np.zeros(shape=(10, 3)) 160 | i2n.fit(X) 161 | print(i2n._input_weights) 162 | assert i2n._input_weights.shape == (3, 5) 163 | assert safe_sparse_dot(X, i2n._input_weights).shape == (10, 5) 164 | assert i2n.__sizeof__() != 0 165 | assert i2n.input_weights is not None 166 | assert i2n.bias_weights is not None 167 | 168 | 169 | def test_input_to_node_sparse() -> None: 170 | print('\ntest_input_to_node_sparse():') 171 | i2n = InputToNode( 172 | hidden_layer_size=5, sparsity=2/5, input_activation='tanh', 173 | input_scaling=1., bias_scaling=1., random_state=42) 174 | X = np.zeros(shape=(10, 3)) 175 | i2n.fit(X) 176 | assert i2n._input_weights.shape == (3, 5) 177 | assert safe_sparse_dot(X, i2n._input_weights).shape == (10, 5) 178 | i2n = InputToNode( 179 | hidden_layer_size=5, k_in=2, input_activation='tanh', 180 | input_scaling=1., bias_scaling=1., random_state=42) 181 | X = np.zeros(shape=(10, 3)) 182 | i2n.fit(X) 183 | assert i2n._input_weights.shape == (3, 5) 184 | assert safe_sparse_dot(X, i2n._input_weights).shape == (10, 5) 185 | assert i2n.__sizeof__() != 0 186 | assert i2n.input_weights is not None 187 | assert i2n.bias_weights is not None 188 | -------------------------------------------------------------------------------- /tests/test_model_selection.py: -------------------------------------------------------------------------------- 1 | """Testing for model selection module.""" 2 | 3 | from sklearn import datasets 4 | from sklearn.model_selection import KFold, GridSearchCV, RandomizedSearchCV 5 | from sklearn.svm import SVC 6 | from collections.abc import Iterable 7 | 8 | from pyrcn.model_selection import SequentialSearchCV, SHGOSearchCV 9 | import pytest 10 | 11 | 12 | def test_sequentialSearchCV_equivalence() -> None: 13 | """Test the equivalence of SequentialSearchCV to a manual sequence.""" 14 | iris = datasets.load_iris() 15 | X = iris.data[:, [0, 2]] 16 | y = iris.target 17 | cv = KFold(2, shuffle=True, random_state=42) 18 | svm1 = SVC(random_state=42) 19 | svm2 = SVC(random_state=42) 20 | param_grid1 = {'C': [1, 2], 'kernel': ['rbf', 'linear']} 21 | param_grid2 = {'shrinking': [True, False]} 22 | gs1 = GridSearchCV(svm1, param_grid1, cv=cv).fit(X, y) 23 | gs2 = RandomizedSearchCV(gs1.best_estimator_, param_grid2, 24 | cv=cv, random_state=42).fit(X, y) 25 | 26 | ss = SequentialSearchCV( 27 | svm2, searches=[ 28 | ('gs1', GridSearchCV, param_grid1, {'cv': cv}), 29 | ('gs2', RandomizedSearchCV, param_grid2, 30 | {'cv': cv, 'random_state': 42, 'refit': True}), 31 | ('gs3', GridSearchCV, param_grid1)]).fit(X, y) 32 | assert gs1.best_params_ == ss.all_best_params_['gs1'] 33 | assert gs2.best_params_ == ss.all_best_params_['gs2'] 34 | assert (isinstance(ss.cv_results_, dict)) 35 | assert (ss.best_estimator_ is not None) 36 | assert (isinstance(ss.best_score_, float)) 37 | print(ss.best_index_) 38 | assert (isinstance(ss.n_splits_, int)) 39 | assert (isinstance(ss.refit_time_, float)) 40 | assert (isinstance(ss.multimetric, bool)) 41 | 42 | 43 | @pytest.mark.skip(reason="no way of currently testing this") 44 | def test_SHGOSearchCV() -> None: 45 | """Test the SHGO search.""" 46 | from sklearn.metrics import accuracy_score 47 | from sklearn.base import clone, BaseEstimator 48 | import numpy as np 49 | from sklearn.model_selection import StratifiedKFold 50 | iris = datasets.load_iris() 51 | X = iris.data[:, [0, 2]] 52 | y = iris.target 53 | cv = StratifiedKFold(n_splits=5) 54 | svm = SVC(random_state=42) 55 | 56 | def func(params: Iterable, param_names: Iterable, 57 | base_estimator: BaseEstimator, X: np.ndarray, y: np.ndarray, 58 | train: np.ndarray, test: np.ndarray) -> float: 59 | estimator = base_estimator 60 | for name, param in zip(param_names, params): 61 | estimator.set_params(**{name: param}) 62 | mse = [] 63 | for tr, te in zip(train, test): 64 | est = clone(estimator).fit(X[tr], y[tr]) 65 | y_pred = est.predict(X[te]) 66 | mse.append(-accuracy_score(y[te], y_pred)) 67 | return np.mean(mse) 68 | 69 | params = {'max_iter': (1, 1000)} 70 | 71 | def fun(x: tuple) -> float: 72 | return max([x[0] - int(x[0])]) 73 | constraints = {'type': 'eq', 'fun': fun} 74 | search = SHGOSearchCV( 75 | estimator=svm, func=func, params=params, cv=cv, 76 | constraints=constraints).fit(X, y) 77 | y_pred = search.predict(X) 78 | print(accuracy_score(y_true=y, y_pred=svm.fit(X, y).predict(X))) 79 | print(accuracy_score(y_true=y, y_pred=y_pred)) 80 | 81 | 82 | if __name__ == '__main__': 83 | test_SHGOSearchCV() 84 | -------------------------------------------------------------------------------- /tests/test_node_to_node.py: -------------------------------------------------------------------------------- 1 | """Testing for blocks.node_to_node module.""" 2 | import numpy as np 3 | import pytest 4 | from sklearn.utils.extmath import safe_sparse_dot 5 | 6 | from pyrcn.base.blocks import ( 7 | InputToNode, NodeToNode,PredefinedWeightsNodeToNode, HebbianNodeToNode) 8 | 9 | 10 | def test_input_to_node_invalid_spectral_radius() -> None: 11 | print('\ntest_input_to_node_invalid_spectral_radius():') 12 | X = np.zeros(shape=(10, 500)) 13 | with pytest.raises(ValueError): 14 | n2n = NodeToNode(spectral_radius=-1e-5) 15 | n2n.fit(X) 16 | 17 | 18 | def test_node_to_node_invalid_activation() -> None: 19 | print('\ntest_node_to_node_invalid_activation():') 20 | X = np.zeros(shape=(10, 500)) 21 | with pytest.raises(ValueError): 22 | n2n = NodeToNode(reservoir_activation="test") 23 | n2n.fit(X) 24 | 25 | 26 | def test_node_to_node_invalid_hls() -> None: 27 | print('\ntest_node_to_node_invalid_hls():') 28 | X = np.zeros(shape=(10, 500)) 29 | with pytest.raises(ValueError): 30 | n2n = NodeToNode(hidden_layer_size=0) 31 | X = np.zeros(shape=(10, 3)) 32 | n2n.fit(X) 33 | 34 | 35 | def test_node_to_node_invalid_sparsity() -> None: 36 | print('\ntest_node_to_node_invalid_sparsity():') 37 | X = np.zeros(shape=(10, 500)) 38 | with pytest.raises(ValueError): 39 | n2n = NodeToNode(sparsity=1.1) 40 | n2n.fit(X) 41 | with pytest.raises(ValueError): 42 | n2n = NodeToNode(sparsity=0.0) 43 | n2n.fit(X) 44 | with pytest.raises(ValueError): 45 | n2n = NodeToNode(k_rec=-1) 46 | n2n.fit(X) 47 | with pytest.raises(ValueError): 48 | n2n = NodeToNode(k_rec=500) 49 | n2n.fit(X) 50 | 51 | 52 | def test_predefined_weights_node_to_node() -> None: 53 | print('\ntest_predefined_weights_node_to_node():') 54 | X = np.zeros(shape=(10, 3)) 55 | weights = np.random.rand(3, 5) 56 | with pytest.raises(AssertionError): 57 | n2n = PredefinedWeightsNodeToNode( 58 | predefined_recurrent_weights=weights, reservoir_activation='tanh', 59 | spectral_radius=1.) 60 | n2n.fit(X) 61 | weights = np.random.rand(5, 3) 62 | with pytest.raises(AssertionError): 63 | n2n = PredefinedWeightsNodeToNode( 64 | predefined_recurrent_weights=weights, reservoir_activation='tanh', 65 | spectral_radius=1.) 66 | n2n.fit(X) 67 | weights = np.random.rand(5, ) 68 | with pytest.raises(ValueError): 69 | n2n = PredefinedWeightsNodeToNode( 70 | predefined_recurrent_weights=weights, reservoir_activation='tanh', 71 | spectral_radius=1.) 72 | n2n.fit(X) 73 | weights = np.random.rand(3, 3) 74 | n2n = PredefinedWeightsNodeToNode( 75 | predefined_recurrent_weights=weights, reservoir_activation='tanh', 76 | spectral_radius=1.) 77 | n2n.fit(X) 78 | print(n2n._recurrent_weights) 79 | assert n2n._recurrent_weights.shape == (3, 3) 80 | assert n2n.__sizeof__() != 0 81 | assert n2n.recurrent_weights is not None 82 | 83 | 84 | def test_node_to_node_dense() -> None: 85 | print('\ntest_node_to_node_dense():') 86 | n2n = NodeToNode( 87 | hidden_layer_size=5, sparsity=1., reservoir_activation='tanh', 88 | spectral_radius=1., random_state=42) 89 | X = np.zeros(shape=(10, 5)) 90 | n2n.fit(X) 91 | print(n2n._recurrent_weights) 92 | assert n2n._recurrent_weights.shape == (5, 5) 93 | assert n2n.__sizeof__() != 0 94 | assert n2n.recurrent_weights is not None 95 | 96 | 97 | def test_node_to_node_sparse() -> None: 98 | print('\ntest_node_to_node_sparse():') 99 | X = np.zeros(shape=(10, 5)) 100 | n2n = NodeToNode( 101 | hidden_layer_size=5, sparsity=2/5, reservoir_activation='tanh', 102 | spectral_radius=1., random_state=42) 103 | n2n.fit(X) 104 | assert n2n._recurrent_weights.shape == (5, 5) 105 | n2n = NodeToNode( 106 | hidden_layer_size=5, k_rec=2, reservoir_activation='tanh', 107 | spectral_radius=1., random_state=42) 108 | n2n.fit(X) 109 | assert n2n._recurrent_weights.shape == (5, 5) 110 | assert n2n.__sizeof__() != 0 111 | assert n2n.recurrent_weights is not None 112 | 113 | 114 | def test_node_to_node_bidirectional() -> None: 115 | print('\ntest_node_to_node_bidirectional():') 116 | X = np.zeros(shape=(10, 5)) 117 | with pytest.raises(ValueError): 118 | n2n = NodeToNode( 119 | hidden_layer_size=5, sparsity=2/5, reservoir_activation='tanh', 120 | spectral_radius=1., bidirectional="True", random_state=42) 121 | n2n.fit(X) 122 | n2n = NodeToNode( 123 | hidden_layer_size=5, sparsity=2/5, reservoir_activation='tanh', 124 | spectral_radius=1., bidirectional=True, random_state=42) 125 | n2n.fit(X) 126 | n2n.transform(X) 127 | assert n2n._recurrent_weights.shape == (5, 5) 128 | 129 | 130 | def test_node_to_node_invalid_leakage() -> None: 131 | print('\ntest_node_to_node_bidirectional():') 132 | X = np.zeros(shape=(10, 5)) 133 | with pytest.raises(ValueError): 134 | n2n = NodeToNode( 135 | hidden_layer_size=5, sparsity=2/5, reservoir_activation='tanh', 136 | spectral_radius=1., leakage=1.1, random_state=42) 137 | n2n.fit(X) 138 | with pytest.raises(ValueError): 139 | n2n = NodeToNode( 140 | hidden_layer_size=5, sparsity=2/5, reservoir_activation='tanh', 141 | spectral_radius=1., leakage=0, random_state=42) 142 | n2n.fit(X) 143 | 144 | 145 | def test_node_to_node_hebbian() -> None: 146 | print('\ntest_node_to_node_hebbian():') 147 | i2n = InputToNode(hidden_layer_size=5, sparsity=2/5, 148 | input_activation='tanh', input_scaling=1., 149 | bias_scaling=1., random_state=42) 150 | X = np.zeros(shape=(10, 3)) 151 | i2n.fit(X) 152 | n2n = HebbianNodeToNode(hidden_layer_size=5, sparsity=2/5, 153 | reservoir_activation='tanh', spectral_radius=1., 154 | random_state=42, learning_rate=0.01) 155 | n2n.fit(i2n.transform(X)) 156 | n2n = HebbianNodeToNode(hidden_layer_size=5, sparsity=2/5, 157 | reservoir_activation='tanh', spectral_radius=1., 158 | random_state=42, learning_rate=0.01, 159 | training_method="anti_hebbian") 160 | n2n.fit(i2n.transform(X)) 161 | n2n = HebbianNodeToNode(hidden_layer_size=5, sparsity=2/5, 162 | reservoir_activation='tanh', spectral_radius=1., 163 | random_state=42, learning_rate=0.01, 164 | training_method="oja") 165 | n2n.fit(i2n.transform(X)) 166 | n2n = HebbianNodeToNode(hidden_layer_size=5, sparsity=2/5, 167 | reservoir_activation='tanh', spectral_radius=1., 168 | random_state=42, learning_rate=0.01, 169 | training_method="anti_oja") 170 | n2n.fit(i2n.transform(X)) 171 | i2n_hidden = i2n.transform(X) 172 | print(n2n.transform(i2n_hidden)) 173 | print(n2n._recurrent_weights) 174 | assert n2n._recurrent_weights.shape == (5, 5) 175 | assert safe_sparse_dot( 176 | i2n.transform(X), n2n._recurrent_weights).shape == (10, 5) 177 | -------------------------------------------------------------------------------- /tests/test_util.py: -------------------------------------------------------------------------------- 1 | """Testing for pyrcn.utils module""" 2 | import os 3 | import pytest 4 | from pyrcn.util import new_logger, argument_parser, get_mnist 5 | 6 | 7 | def test_new_logger() -> None: 8 | directory = os.getcwd() 9 | logger = new_logger(name='test_logger', directory=directory) 10 | logger.info('Test') 11 | assert os.path.isfile(os.path.join(directory, 'test_logger.log')) 12 | 13 | 14 | def test_argument_parser() -> None: 15 | args = argument_parser.parse_args(['-o', './', 'param0', 'param1']) 16 | assert os.path.isdir(args.out) 17 | assert 'param1' in args.params 18 | 19 | 20 | @pytest.mark.skip(reason="no way of currently testing this") 21 | def test_get_mnist() -> None: 22 | X, y = get_mnist(os.getcwd()) 23 | assert X.shape[0] == 70000 24 | -------------------------------------------------------------------------------- /tests/test_value_projection.py: -------------------------------------------------------------------------------- 1 | """Testing for projection module (pyrcn.projection).""" 2 | 3 | import numpy as np 4 | from pyrcn.projection import MatrixToValueProjection 5 | 6 | 7 | def test_matrix_to_value_projection() -> None: 8 | print('\ntest_matrix_to_value_projection():') 9 | r = np.random.RandomState(1234) 10 | X = r.rand(5, 3) 11 | idx_true = np.array([0, 0, 0, 1, 2]) 12 | X[range(5), idx_true] += 1 13 | y = MatrixToValueProjection().fit_transform(X) 14 | np.testing.assert_equal(y, 0) 15 | 16 | 17 | def test_matrix_to_value_projection_median() -> None: 18 | print('\ntest_matrix_to_value_projection_median():') 19 | trf = MatrixToValueProjection(output_strategy="median") 20 | r = np.random.RandomState(1234) 21 | X = r.rand(5, 3) 22 | idx_true = np.array([0, 0, 0, 1, 2]) 23 | X[range(5), idx_true] += 1 24 | y = trf.fit_transform(X) 25 | np.testing.assert_equal(y, np.argmax(np.median(X, axis=0))) 26 | 27 | 28 | def test_matrix_to_value_projection_lv() -> None: 29 | print('\ntest_matrix_to_value_projection_lv():') 30 | r = np.random.RandomState(1234) 31 | X = r.rand(5, 3) 32 | idx_true = np.array([0, 0, 0, 1, 2]) 33 | X[range(5), idx_true] += 1 34 | trf = MatrixToValueProjection(output_strategy="last_value").fit(X) 35 | y = trf.transform(X) 36 | np.testing.assert_equal(y, np.argmax(X[-1, :])) 37 | 38 | 39 | def test_matrix_to_value_projection_proba() -> None: 40 | print('\ntest_matrix_to_value_projection_predict_proba():') 41 | r = np.random.RandomState(1234) 42 | X = r.rand(5, 3) 43 | idx_true = np.array([0, 0, 0, 1, 2]) 44 | X[range(5), idx_true] += 1 45 | trf = MatrixToValueProjection( 46 | output_strategy="last_value", needs_proba=True).fit(X) 47 | y = trf.transform(X) 48 | np.testing.assert_equal(y, X[-1, :]) 49 | --------------------------------------------------------------------------------