├── tests
├── __init__.py
├── detection.py
├── misc.py
├── preprocess.py
└── filters.py
├── docs
├── source
│ ├── static
│ │ └── custom.css
│ ├── sources
│ │ ├── detection.rst
│ │ ├── filters
│ │ │ ├── ap.rst
│ │ │ ├── llncosh.rst
│ │ │ ├── lmf.rst
│ │ │ ├── lms.rst
│ │ │ ├── rls.rst
│ │ │ ├── nlmf.rst
│ │ │ ├── nlms.rst
│ │ │ ├── sslms.rst
│ │ │ ├── gngd.rst
│ │ │ ├── gmcc.rst
│ │ │ ├── nsslms.rst
│ │ │ ├── ocnlms.rst
│ │ │ ├── vslms_ang.rst
│ │ │ ├── vslms_mathews.rst
│ │ │ └── vslms_benveniste.rst
│ │ ├── detection
│ │ │ ├── le.rst
│ │ │ ├── ese.rst
│ │ │ └── elbnd.rst
│ │ ├── preprocess
│ │ │ ├── pca.rst
│ │ │ ├── standardize.rst
│ │ │ ├── lda.rst
│ │ │ ├── standardize_back.rst
│ │ │ └── input_from_history.rst
│ │ ├── misc
│ │ │ └── error_evaluation.rst
│ │ ├── filters.rst
│ │ ├── misc.rst
│ │ ├── preprocess.rst
│ │ └── changelog.rst
│ ├── index.rst
│ ├── ga_code.txt
│ ├── templates
│ │ └── layout.html
│ └── conf.py
├── README.txt
└── Makefile
├── setup.cfg
├── MANIFEST
├── padasip
├── ann
│ ├── __init__.py
│ └── mlp.py
├── misc
│ ├── __init__.py
│ └── error_evaluation.py
├── preprocess
│ ├── __init__.py
│ ├── standardize_back.py
│ ├── input_from_history.py
│ ├── standardize.py
│ ├── pca.py
│ └── lda.py
├── detection
│ ├── __init__.py
│ ├── elbnd.py
│ ├── ese.py
│ └── le.py
├── __init__.py
└── filters
│ ├── gmcc.py
│ ├── llncosh.py
│ ├── vslms_mathews.py
│ ├── gngd.py
│ ├── lmf.py
│ ├── sslms.py
│ ├── vslms_benveniste.py
│ ├── vslms_ang.py
│ ├── nlmf.py
│ ├── nsslms.py
│ ├── lms.py
│ ├── nlms.py
│ ├── ap.py
│ ├── ocnlms.py
│ ├── rls.py
│ ├── __init__.py
│ └── base_filter.py
├── runtests.py
├── LICENSE.txt
├── .gitignore
├── setup.py
└── README.rst
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/source/static/custom.css:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
3 |
--------------------------------------------------------------------------------
/MANIFEST:
--------------------------------------------------------------------------------
1 | # file GENERATED by distutils, do NOT edit
2 | setup.cfg
3 | setup.py
4 | padasip/__init__.py
5 | padasip/consts.py
6 | padasip/preprocess.py
7 |
--------------------------------------------------------------------------------
/docs/source/sources/detection.rst:
--------------------------------------------------------------------------------
1 | .. _detection:
2 |
3 | Detection Tools
4 | *****************
5 |
6 | .. automodule:: padasip.detection
7 | :members:
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/ap.rst:
--------------------------------------------------------------------------------
1 | .. _filter-ap:
2 |
3 | Affine Projection (AP)
4 | ***************************
5 |
6 | .. automodule:: padasip.filters.ap
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/detection/le.rst:
--------------------------------------------------------------------------------
1 | .. _detection-le:
2 |
3 | Learning Entropy (LE)
4 | ============================
5 |
6 | .. automodule:: padasip.detection.le
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/preprocess/pca.rst:
--------------------------------------------------------------------------------
1 | .. _preprocess-pca:
2 |
3 | Principal Component Analysis (PCA)
4 | ***********************************
5 |
6 | .. automodule:: padasip.preprocess.pca
7 | :members:
8 |
--------------------------------------------------------------------------------
/docs/source/sources/preprocess/standardize.rst:
--------------------------------------------------------------------------------
1 | .. _preprocess-standardize:
2 |
3 | Data Standardization
4 | ************************
5 |
6 | .. automodule:: padasip.preprocess.standardize
7 | :members:
8 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/llncosh.rst:
--------------------------------------------------------------------------------
1 | .. _filter-llncosh:
2 |
3 | Least Lncosh (Llncosh)
4 | ======================
5 |
6 | .. automodule:: padasip.filters.llncosh
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/misc/error_evaluation.rst:
--------------------------------------------------------------------------------
1 | .. _mics-error_evaluation:
2 |
3 | Error Evaluation
4 | ===================================
5 |
6 | .. automodule:: padasip.misc.error_evaluation
7 | :members:
8 |
--------------------------------------------------------------------------------
/docs/source/sources/preprocess/lda.rst:
--------------------------------------------------------------------------------
1 | .. _preprocess-lda:
2 |
3 | Linear Discriminant Analysis (LDA)
4 | **************************************
5 |
6 | .. automodule:: padasip.preprocess.lda
7 | :members:
8 |
--------------------------------------------------------------------------------
/padasip/ann/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | This sub-module contains neural networks and all related stuff.
3 | So far it is implemented only MLP (Multi-layer Perceptron).
4 | """
5 | from padasip.ann.mlp import NetworkMLP
6 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/lmf.rst:
--------------------------------------------------------------------------------
1 | .. _filter-lmf:
2 |
3 | Least-mean-fourth (LMF)
4 | =====================================
5 |
6 | .. automodule:: padasip.filters.lmf
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/lms.rst:
--------------------------------------------------------------------------------
1 | .. _filter-lms:
2 |
3 | Least-mean-square (LMS)
4 | =====================================
5 |
6 | .. automodule:: padasip.filters.lms
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/README.txt:
--------------------------------------------------------------------------------
1 | ## preparation
2 |
3 | sudo apt-get install python3-sphinx
4 |
5 |
6 |
7 | ## build
8 | * sphinx-build -b html source/ build/
9 |
10 | or
11 |
12 | * python -m sphinx -b html source/ build/
13 |
--------------------------------------------------------------------------------
/docs/source/sources/detection/ese.rst:
--------------------------------------------------------------------------------
1 | .. _detection-ese:
2 |
3 | Extreme Seeking Entropy (ESE)
4 | =============================
5 |
6 | .. automodule:: padasip.detection.ese
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/rls.rst:
--------------------------------------------------------------------------------
1 | .. _filter-rls:
2 |
3 | Recursive Least Squares (RLS)
4 | ======================================
5 |
6 | .. automodule:: padasip.filters.rls
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters.rst:
--------------------------------------------------------------------------------
1 | .. _filters:
2 |
3 | Adaptive Filters
4 | ******************
5 |
6 | .. automodule:: padasip.filters
7 | :members:
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/nlmf.rst:
--------------------------------------------------------------------------------
1 | .. _filter-nlmf:
2 |
3 | Normalized Least-mean-fourth (NLMF)
4 | =====================================
5 |
6 | .. automodule:: padasip.filters.nlmf
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/nlms.rst:
--------------------------------------------------------------------------------
1 | .. _filter-nlms:
2 |
3 | Normalized Least-mean-square (NLMS)
4 | ======================================
5 |
6 | .. automodule:: padasip.filters.nlms
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/sslms.rst:
--------------------------------------------------------------------------------
1 | .. _filter-sslms:
2 |
3 | Sign-sign Least-mean-square (SSLMS)
4 | =====================================
5 |
6 | .. automodule:: padasip.filters.sslms
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/preprocess/standardize_back.rst:
--------------------------------------------------------------------------------
1 | .. _preprocess-standardize_back:
2 |
3 | Data De-standardization
4 | *******************************
5 |
6 | .. automodule:: padasip.preprocess.standardize_back
7 | :members:
8 |
--------------------------------------------------------------------------------
/docs/source/sources/preprocess/input_from_history.rst:
--------------------------------------------------------------------------------
1 | .. _preprocess-input_from_history:
2 |
3 | Input Matrix Construction
4 | ===================================
5 |
6 | .. automodule:: padasip.preprocess.input_from_history
7 | :members:
8 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/gngd.rst:
--------------------------------------------------------------------------------
1 | .. _filter-gngd:
2 |
3 | Generalized Normalized Gradient Descent (GNGD)
4 | ================================================
5 |
6 | .. automodule:: padasip.filters.gngd
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/gmcc.rst:
--------------------------------------------------------------------------------
1 | .. _filter-gmcc:
2 |
3 | Generalized maximum correntropy criterion (GMCC)
4 | ================================================
5 |
6 | .. automodule:: padasip.filters.gmcc
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/nsslms.rst:
--------------------------------------------------------------------------------
1 | .. _filter-nsslms:
2 |
3 | Normalized Sign-sign Least-mean-square (NSSLMS)
4 | =================================================
5 |
6 | .. automodule:: padasip.filters.nsslms
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/detection/elbnd.rst:
--------------------------------------------------------------------------------
1 | .. _detection-elbnd:
2 |
3 | Error and Learning Based Novelty Detection (ELBND)
4 | ====================================================
5 |
6 | .. automodule:: padasip.detection.elbnd
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/ocnlms.rst:
--------------------------------------------------------------------------------
1 | .. _filter-ocnlms:
2 |
3 | Online centered normalized Least-mean-square (OCNLMS)
4 | ======================================================
5 |
6 | .. automodule:: padasip.filters.ocnlms
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/misc.rst:
--------------------------------------------------------------------------------
1 | .. _mics:
2 |
3 | Miscellaneous
4 | ===================================
5 |
6 | .. automodule:: padasip.misc
7 |
8 | Content
9 |
10 | .. toctree::
11 | :glob:
12 | :maxdepth: 1
13 |
14 | misc/*
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/runtests.py:
--------------------------------------------------------------------------------
1 | """
2 | This file executes all the tests.
3 | """
4 | import unittest
5 | from tests.filters import TestFilters
6 | from tests.preprocess import TestPreprocess
7 | from tests.misc import TestErrorEval
8 | from tests.detection import TestDetection
9 |
10 | unittest.main()
11 |
--------------------------------------------------------------------------------
/padasip/misc/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | This module contains miscellaneous helper functions.
3 |
4 | Currently, only error estimations functions are implemented here.
5 | """
6 | from padasip.misc.error_evaluation import MSE, RMSE, MAE, logSE
7 | from padasip.misc.error_evaluation import get_mean_error
8 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/vslms_ang.rst:
--------------------------------------------------------------------------------
1 | .. _filter-vslms_ang:
2 |
3 | Variable step-size least-mean-square (VSLMS) with Ang's adaptation
4 | ==================================================================
5 |
6 | .. automodule:: padasip.filters.vslms_ang
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/vslms_mathews.rst:
--------------------------------------------------------------------------------
1 | .. _filter-vslms_mathews:
2 |
3 | Variable step-size least-mean-square (VSLMS) with Mathews's adaptation
4 | ======================================================================
5 |
6 | .. automodule:: padasip.filters.vslms_mathews
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/filters/vslms_benveniste.rst:
--------------------------------------------------------------------------------
1 | .. _filter-vslms_benveniste:
2 |
3 | Variable step-size least-mean-square (VSLMS) with Benveniste's adaptation
4 | =========================================================================
5 |
6 | .. automodule:: padasip.filters.vslms_benveniste
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/sources/preprocess.rst:
--------------------------------------------------------------------------------
1 | .. _preprocess:
2 |
3 | Data Preprocessing
4 | ===================================
5 |
6 | .. automodule:: padasip.preprocess
7 |
8 |
9 | Content
10 |
11 | .. toctree::
12 | :glob:
13 | :maxdepth: 1
14 |
15 | preprocess/*
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. Padasip documentation master file, created by
2 | sphinx-quickstart on Thu Sep 15 11:17:12 2016.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 |
7 |
8 | Padasip
9 | ===================
10 | *Python Adaptive Signal Processing*
11 |
12 | .. automodule:: padasip
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/padasip/preprocess/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | In this module are placed functions related to preprocessing of data.
3 | """
4 | from padasip.preprocess.standardize import standardize
5 | from padasip.preprocess.standardize_back import standardize_back
6 | from padasip.preprocess.input_from_history import input_from_history
7 | from padasip.preprocess.pca import PCA, PCA_components
8 | from padasip.preprocess.lda import LDA, LDA_discriminants
9 |
--------------------------------------------------------------------------------
/docs/source/ga_code.txt:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/padasip/detection/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.0.0
3 | .. versionchanged:: 1.2.0
4 |
5 | This module contatines tools for tasks of detection, known as:
6 |
7 | * Novelty detection
8 |
9 | * Fault Detection
10 |
11 | * Outlier Detection
12 |
13 | * System change point detection
14 |
15 | Content of this page:
16 |
17 | .. contents::
18 | :local:
19 | :depth: 1
20 |
21 |
22 | Implemented tools
23 | ========================
24 |
25 | .. toctree::
26 | :glob:
27 | :maxdepth: 1
28 |
29 | detection/*
30 |
31 | """
32 | from padasip.detection.le import learning_entropy
33 | from padasip.detection.elbnd import ELBND
34 |
35 | def ese_not_imported(*args, **kwargs):
36 | """
37 | Raise error if ESE is called without scipy installed.
38 | """
39 | raise ImportError('You have to install scipy package in order to use ESE.')
40 |
41 | try:
42 | from padasip.detection.ese import ESE
43 | except ImportError:
44 | ESE = ese_not_imported
45 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) Matous Cejnek
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | example.py
2 | *.png
3 | *.svg
4 | *.pdf
5 |
6 | .idea/
7 |
8 | to_ignore.py
9 |
10 | *~
11 |
12 | # Byte-compiled / optimized / DLL files
13 | __pycache__/
14 | *.py[cod]
15 | *$py.class
16 | *.pyc
17 |
18 | # C extensions
19 | *.so
20 |
21 | # Distribution / packaging
22 | .Python
23 | env/
24 | build/
25 | develop-eggs/
26 | dist/
27 | downloads/
28 | eggs/
29 | .eggs/
30 | lib/
31 | lib64/
32 | parts/
33 | sdist/
34 | var/
35 | *.egg-info/
36 | .installed.cfg
37 | *.egg
38 |
39 | # PyInstaller
40 | # Usually these files are written by a python script from a template
41 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
42 | *.manifest
43 | *.spec
44 |
45 | # Installer logs
46 | pip-log.txt
47 | pip-delete-this-directory.txt
48 |
49 | # Unit test / coverage reports
50 | htmlcov/
51 | .tox/
52 | .coverage
53 | .coverage.*
54 | .cache
55 | nosetests.xml
56 | coverage.xml
57 | *,cover
58 | .hypothesis/
59 |
60 | # Translations
61 | *.mo
62 | *.pot
63 |
64 | # Django stuff:
65 | *.log
66 |
67 | # Sphinx documentation
68 | docs/_build/
69 | docs/build
70 |
71 | # PyBuilder
72 | target/
73 |
74 | #Ipython Notebook
75 | .ipynb_checkpoints
76 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 | from setuptools import find_packages
3 |
4 | def readme():
5 | try:
6 | with open('README.rst') as f:
7 | return f.read()
8 | except:
9 | pass
10 |
11 | setup(
12 | name = 'padasip',
13 | packages = find_packages(exclude=("tests",)),
14 | version = '1.2.2',
15 | description = 'Python Adaptive Signal Processing',
16 | long_description = readme(),
17 | author = 'Matous Cejnek',
18 | maintainer = "Matous Cejnek",
19 | author_email = 'matousc@gmail.com',
20 | license = 'MIT',
21 | url = 'http://matousc89.github.io/padasip/',
22 | download_url = 'https://github.com/matousc89/padasip/',
23 | keywords = ['signal-processing', 'adaptive filters'],
24 | install_requires=[
25 | 'numpy',
26 | ],
27 | bugtrack_url = "https://github.com/matousc89/padasip/issues",
28 | classifiers=[
29 | 'Development Status :: 3 - Alpha',
30 | 'License :: OSI Approved :: MIT License',
31 | 'Topic :: Adaptive Technologies',
32 | 'Topic :: Scientific/Engineering :: Artificial Intelligence',
33 | 'Topic :: Software Development',
34 | 'Intended Audience :: Science/Research',
35 | 'Intended Audience :: Developers',
36 | 'Intended Audience :: Education',
37 | 'Programming Language :: Python',
38 | ],
39 | )
40 |
--------------------------------------------------------------------------------
/padasip/preprocess/standardize_back.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.1
3 |
4 | This function transforms series to the original score according to
5 | equation:
6 |
7 | :math:`\\textbf{x} = \\textbf{x}_s \cdot b + a`
8 |
9 | where :math:`\\textbf{x}` is time series to de-standardize,
10 | :math:`a` is offset to add and :math:`b` desired scaling factor.
11 |
12 | .. contents::
13 | :local:
14 | :depth: 1
15 |
16 | See also: :ref:`preprocess-standardize`
17 |
18 | Usage Explanation
19 | ********************
20 |
21 | As simple as
22 |
23 | .. code-block:: python
24 |
25 | x = pa.standardize(xs, offset=a, scale=b)
26 |
27 | Code Explanation
28 | *****************
29 | """
30 | from __future__ import division
31 | import numpy as np
32 |
33 | def standardize_back(xs, offset, scale):
34 | """
35 | This is function for de-standarization of input series.
36 |
37 | **Args:**
38 |
39 | * `xs` : standardized input (1 dimensional array)
40 |
41 | * `offset` : offset to add (float).
42 |
43 | * `scale` : scale (float).
44 |
45 | **Returns:**
46 |
47 | * `x` : original (destandardised) series
48 |
49 | """
50 | try:
51 | offset = float(offset)
52 | except:
53 | raise ValueError('The argument offset is not None or float.')
54 | try:
55 | scale = float(scale)
56 | except:
57 | raise ValueError('The argument scale is not None or float.')
58 | try:
59 | xs = np.array(xs, dtype="float64")
60 | except:
61 | raise ValueError('The argument xs is not numpy array or similar.')
62 | return xs*scale + offset
63 |
--------------------------------------------------------------------------------
/docs/source/templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends '!layout.html' %}
2 |
3 | {% block extrahead %}
4 |
5 |
6 |
15 | {% endblock %}
16 |
17 | {% block header %}
18 |
19 | {% endblock %}
20 |
21 |
22 | {% block sidebarlogo %}
23 |
24 |
25 |
26 |
27 |
28 |
29 | {% endblock %}
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/tests/detection.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import sys
3 | import numpy as np
4 |
5 | sys.path.append('..')
6 | import padasip as pa
7 |
8 | class TestDetection(unittest.TestCase):
9 |
10 | def test_le_direct(self):
11 | """
12 | Learning entropy direct approach.
13 | """
14 | np.random.seed(100)
15 | n = 5
16 | N = 2000
17 | x = np.random.normal(0, 1, (N, n))
18 | d = np.sum(x, axis=1) + np.random.normal(0, 0.1, N)
19 | d[1000] += 2.
20 | f = pa.filters.FilterNLMS(n, mu=1., w=np.ones(n))
21 | y, e, w = f.run(d, x)
22 | le = pa.detection.learning_entropy(w, m=30, order=2)
23 | self.assertEqual(np.round(le.sum(), 3), 594.697)
24 |
25 | def test_le_multiscale(self):
26 | """
27 | Learning entropy multiscale approach.
28 | """
29 | np.random.seed(100)
30 | n = 5
31 | N = 2000
32 | x = np.random.normal(0, 1, (N, n))
33 | d = np.sum(x, axis=1) + np.random.normal(0, 0.1, N)
34 | d[1000] += 2.
35 | f = pa.filters.FilterNLMS(n, mu=1., w=np.ones(n))
36 | y, e, w = f.run(d, x)
37 | le = pa.detection.learning_entropy(w, m=30, order=2, alpha=[8., 9.])
38 | self.assertEqual(np.round(le.sum(), 3), 1.8)
39 |
40 | def test_elbnd(self):
41 | """
42 | ElBND
43 | """
44 | np.random.seed(100)
45 | n = 5
46 | N = 2000
47 | x = np.random.normal(0, 1, (N, n))
48 | d = np.sum(x, axis=1) + np.random.normal(0, 0.1, N)
49 | d[1000] += 2.
50 | f = pa.filters.FilterNLMS(n, mu=1., w=np.ones(n))
51 | y, e, w = f.run(d, x)
52 | elbnd = pa.detection.ELBND(w, e, function="max")
53 | self.assertEqual(np.round(elbnd.sum(), 3), 18.539)
54 |
--------------------------------------------------------------------------------
/tests/misc.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import sys
3 | import numpy as np
4 |
5 | sys.path.append('..')
6 | import padasip as pa
7 |
8 | class TestErrorEval(unittest.TestCase):
9 |
10 | def test_MSE(self):
11 | """
12 | Test MSE
13 | """
14 | # two inputs
15 | x1 = np.array([1, 2, 3, 4, 5])
16 | x2 = np.array([5, 4, 3, 2, 1])
17 | mse = pa.misc.MSE(x1, x2)
18 | self.assertEqual(mse, 8.0)
19 | # one input
20 | e = x1 - x2
21 | mse = pa.misc.MSE(e)
22 | self.assertEqual(mse, 8.0)
23 |
24 | def test_RMSE(self):
25 | """
26 | Test RMSE
27 | """
28 | # two inputs
29 | x1 = np.array([1, 4, 4, 4])
30 | x2 = np.array([4, 1, 1, 1])
31 | rmse = pa.misc.RMSE(x1, x2)
32 | self.assertEqual(rmse, 3.0)
33 | # one input
34 | e = x1 - x2
35 | rmse = pa.misc.RMSE(e)
36 | self.assertEqual(rmse, 3.0)
37 |
38 | def test_MAE(self):
39 | """
40 | Test MAE
41 | """
42 | # two inputs
43 | x1 = np.array([2, 4, 4])
44 | x2 = np.array([4, 6, 2])
45 | mae = pa.misc.MAE(x1, x2)
46 | self.assertEqual(mae, 2.0)
47 | # one input
48 | e = x1 - x2
49 | mae = pa.misc.MAE(e)
50 | self.assertEqual(mae, 2.0)
51 |
52 | def test_logSE(self):
53 | """
54 | Test logSE
55 | """
56 | # two inputs
57 | x1 = np.array([2, 4, 4, 1, 2, 3, -1, 4])
58 | x2 = np.array([4, 6, 2, -1, 3, 4, 1, 3])
59 | logse = pa.misc.logSE(x1, x2)
60 | self.assertEqual(np.round(logse.sum(), 4), 30.1030)
61 | # one input
62 | e = x1 - x2
63 | logse = pa.misc.logSE(e)
64 | self.assertEqual(np.round(logse.sum(), 4), 30.1030)
65 |
66 |
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/padasip/preprocess/input_from_history.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.1
3 | .. versionchanged:: 1.2.0
4 |
5 | This function creates input matrix from historical values.
6 |
7 | .. contents::
8 | :local:
9 | :depth: 1
10 |
11 | Minimal Working Example
12 | **************************
13 |
14 | An example how to create input matrix from historical values
15 |
16 | .. code-block:: python
17 |
18 | >>> import numpy as np
19 | >>> import padasip as pa
20 | >>> a = np.arange(1, 7, 1)
21 | >>> a
22 | array([1, 2, 3, 4, 5, 6])
23 | >>> pa.input_from_history(a,3)
24 | array([[1, 2, 3],
25 | [2, 3, 4],
26 | [3, 4, 5],
27 | [4, 5, 6]])
28 |
29 | Code Explanation
30 | *****************
31 | """
32 | from __future__ import division
33 | import numpy as np
34 |
35 | def input_from_history(a, n, bias=False):
36 | """
37 | This is function for creation of input matrix.
38 |
39 | **Args:**
40 |
41 | * `a` : series (1 dimensional array)
42 |
43 | * `n` : size of input matrix row (int). It means how many samples \
44 | of previous history you want to use \
45 | as the filter input. It also represents the filter length.
46 |
47 | **Kwargs:**
48 |
49 | * `bias` : decides if the bias is used (Boolean). If True, \
50 | array of all ones is appended as a last column to matrix `x`. \
51 | So matrix `x` has `n`+1 columns.
52 |
53 | **Returns:**
54 |
55 | * `x` : input matrix (2 dimensional array) \
56 | constructed from an array `a`. The length of `x` \
57 | is calculated as length of `a` - `n` + 1. \
58 | If the `bias` is used, then the amount of columns is `n` if not then \
59 | amount of columns is `n`+1).
60 |
61 | """
62 | if not isinstance(n, int):
63 | raise ValueError('The argument n must be int.')
64 | if not n > 0:
65 | raise ValueError('The argument n must be greater than 0')
66 | try:
67 | a = np.array(a, dtype="float64")
68 | except:
69 | raise ValueError('The argument a is not numpy array or similar.')
70 | x = np.array([a[i:i+n] for i in range(len(a)-n+1)])
71 | if bias:
72 | x = np.vstack((x.T, np.ones(len(x)))).T
73 | return x
74 |
--------------------------------------------------------------------------------
/padasip/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Current version: |version| (:ref:`changelog`)
3 |
4 | This library is designed to simplify adaptive signal
5 | processing tasks within python
6 | (filtering, prediction, reconstruction, classification).
7 | For code optimisation,
8 | this library uses `Numpy `_ for array operations.
9 |
10 | Also in this library is presented some new methods
11 | for adaptive signal processing.
12 | The library is designed to be used with datasets and also with
13 | real-time measuring (sample-after-sample feeding).
14 |
15 | .. toctree::
16 | :maxdepth: 2
17 |
18 | index
19 |
20 | License
21 | ===============
22 |
23 | This project is under `MIT License `_.
24 |
25 | Instalation
26 | ====================
27 | With `pip `_ from terminal: ``$ pip install padasip``
28 |
29 | Or download you can download the source codes from Github
30 | (`link `_)
31 |
32 |
33 | Tutorials
34 | ===============
35 |
36 | All Padasip related tutorials are created as Jupyter notebooks. You can find
37 | them in `Python Adaptive Signal Processing Handbook
38 | `_.
39 |
40 | The User Quide
41 | =====================
42 |
43 | If you need to know something what is not covered by tutorials,
44 | check the complete documentation here
45 |
46 |
47 | .. toctree::
48 | :maxdepth: 2
49 | :titlesonly:
50 |
51 | sources/preprocess
52 | sources/filters
53 | sources/detection
54 | sources/misc
55 |
56 | Contact
57 | =====================
58 |
59 | By email: matousc@gmail.com
60 |
61 | Changelog
62 | ======================
63 |
64 | For information about versions and updates see :ref:`changelog`.
65 |
66 | Indices and tables
67 | ===========================
68 | * :ref:`genindex`
69 | * :ref:`modindex`
70 | * :ref:`search`
71 |
72 | """
73 | import padasip.filters
74 | import padasip.preprocess
75 | import padasip.misc
76 | import padasip.detection
77 |
78 | # will obsolete after 1.1.1
79 | import padasip.ann
80 |
81 | # back compatibility with v0.5
82 | from padasip.preprocess.standardize import standardize
83 | from padasip.preprocess.standardize_back import standardize_back
84 | from padasip.preprocess.input_from_history import input_from_history
85 |
--------------------------------------------------------------------------------
/tests/preprocess.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import sys
3 | import numpy as np
4 |
5 | sys.path.append('..')
6 | import padasip as pa
7 |
8 | class TestPreprocess(unittest.TestCase):
9 |
10 | def test_standardize(self):
11 | """
12 | Test standardization.
13 | """
14 | u = range(1000)
15 | x = pa.preprocess.standardize(u)
16 | self.assertAlmostEqual(x.std(), 1.0, 10)
17 | self.assertEqual(np.round(x.mean(), 3), -0.0)
18 |
19 | def test_standardize_back(self):
20 | """
21 | Test de-standardization.
22 | """
23 | x = range(1000)
24 | u = pa.preprocess.standardize_back(x, 2, 10)
25 | self.assertEqual(np.round(u.std(), 3), 2886.75)
26 | self.assertEqual(np.round(u.mean(), 3), 4997.0)
27 |
28 | def test_input_from_history(self):
29 | """
30 | Test input from history function.
31 | """
32 | u = range(1000)
33 | x = pa.preprocess.input_from_history(u, 4)
34 | self.assertEqual(x.shape, (997, 4))
35 | self.assertEqual(np.round(np.array(u).mean(), 5), np.round(x.mean(), 5))
36 |
37 | def test_pca(self):
38 | """
39 | Principal Component Analysis
40 | """
41 | np.random.seed(100)
42 | x = np.random.uniform(1, 10, (100, 3))
43 | # PCA components
44 | out = pa.preprocess.PCA_components(x)
45 | self.assertEqual(np.round(np.array(out).mean(), 5), 6.82133)
46 | # PCA analysis
47 | out = pa.preprocess.PCA(x, 2)
48 | self.assertEqual(out.shape, (100, 2))
49 | self.assertEqual(np.round(np.array(out).mean(), 5), 3.98888)
50 |
51 | def test_lda(self):
52 | """
53 | Linear Disciminant Analysis
54 | """
55 | np.random.seed(100)
56 | N = 150
57 | classes = np.array(["1", "a", 3])
58 | cols = 4
59 | x = np.random.random((N, cols)) # random data
60 | labels = np.random.choice(classes, size=N) # random labels
61 | # LDA components
62 | out = pa.preprocess.LDA_discriminants(x, labels)
63 | self.assertEqual(np.round(np.array(out).mean(), 5), 0.01298)
64 | # LDA analysis
65 | new_x = pa.preprocess.LDA(x, labels, n=2)
66 | self.assertEqual(np.round(np.array(new_x).mean(), 5), -0.50907)
67 | self.assertEqual(new_x.shape, (150, 2))
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/padasip/filters/gmcc.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.2.0
3 |
4 | The generalized maximum correntropy criterion (GMCC)
5 | is implemented according https://doi.org/10.1109/TSP.2016.2539127.
6 | The GMCC adaptive filter can be created as follows
7 |
8 | >>> import padasip as pa
9 | >>> pa.filters.FilterGMCC(n)
10 |
11 | where :code:`n` is the size (number of taps) of the filter.
12 |
13 | Content of this page:
14 |
15 | .. contents::
16 | :local:
17 | :depth: 1
18 |
19 | .. seealso:: :ref:`filters`
20 |
21 | Minimal Working Examples
22 | ======================================
23 |
24 | If you have measured data you may filter it as follows
25 |
26 | .. code-block:: python
27 |
28 | import numpy as np
29 | import matplotlib.pylab as plt
30 | import padasip as pa
31 |
32 | # creation of data
33 | N = 500
34 | x = np.random.normal(0, 1, (N, 4)) # input matrix
35 | v = np.random.normal(0, 0.1, N) # noise
36 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
37 |
38 | # identification
39 | f = pa.filters.FilterGMCC(n=4, mu=0.1, w="random")
40 | y, e, w = f.run(d, x)
41 |
42 | # show results
43 | plt.figure(figsize=(15,9))
44 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
45 | plt.plot(d,"b", label="d - target")
46 | plt.plot(y,"g", label="y - output");plt.legend()
47 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
48 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
49 | plt.tight_layout()
50 | plt.show()
51 |
52 | Code Explanation
53 | ====================
54 | """
55 | import numpy as np
56 |
57 | from padasip.filters.base_filter import AdaptiveFilter
58 |
59 |
60 | class FilterGMCC(AdaptiveFilter):
61 | """
62 | This class represents an adaptive GMCC filter.
63 | """
64 | kind = "GMCC"
65 |
66 | def __init__(self, n, mu=0.01, lambd=0.03, alpha=2, **kwargs):
67 | """
68 | **Kwargs:**
69 |
70 | * `lambd` : kernel parameter (float) commonly known as lambda.
71 |
72 | * `alpha` : shape parameter (float). `alpha = 2` make the filter LMS
73 | """
74 | super().__init__(n, mu, **kwargs)
75 | self.lambd = lambd
76 | self.alpha = alpha
77 |
78 | def learning_rule(self, e, x):
79 | """
80 | Override the parent class.
81 | """
82 | return self.mu * self.lambd * self.alpha * \
83 | np.exp(-self.lambd * (np.abs(e) ** self.alpha)) * \
84 | (np.abs(e) ** (self.alpha - 1)) * np.sign(e) * x
85 |
--------------------------------------------------------------------------------
/docs/source/sources/changelog.rst:
--------------------------------------------------------------------------------
1 | .. _changelog:
2 |
3 | Changelog
4 | ===========
5 |
6 | **Version 1.2.2** *Released: 2022-08-05*
7 | Added new adaptive filters: :ref:`filter-vslms_mathews`,
8 | :ref:`filter-vslms_benveniste`, :ref:`filter-vslms_ang`.
9 |
10 | **Version 1.2.1** *Released: 2022-02-07*
11 | Bugfix of the main adaptive filter class.
12 |
13 | **Version 1.2.0** *Released: 2022-01-28*
14 | All adaptive filters were significantly refactored.
15 | Added new adaptive filters :ref:`filter-gmcc`, :ref:`filter-llncosh`
16 | and :ref:`filter-ocnlms`.
17 | Added new detection tool :ref:`detection-ese`.
18 | ANN module is removed from docs, and will be removed totally in future -
19 | there are much better Python libraries for ANN utilization.
20 |
21 | **Version 1.1.1** *Released: 2017-08-06*
22 | Bugfix of adaptive filter helper function according to comments of
23 | `Patrick Bartels `_.
24 |
25 | **Version 1.1.0** *Released: 2017-05-19*
26 | Added new adaptive filters :ref:`filter-lmf`, :ref:`filter-nlmf`,
27 | :ref:`filter-sslms` and :ref:`filter-nsslms`.
28 |
29 | **Version 1.0.0** *Released: 2017-03-16*
30 | Added module :ref:`detection` containing :ref:`detection-le` and
31 | :ref:`detection-elbnd`.
32 | All implemented adaptive filters were updated. As a result,
33 | some obsolete helper functions for the adaptive filters were removed.
34 | Please use newer helper functions introduced in v0.7.
35 | Tutorials were updated and moved to `Python Adaptive Signal Processing Handbook
36 | `_.
37 |
38 | **Version 0.7** *Released: 2017-01-07*
39 | Added new helper functions into :ref:`filters`. Furthermore, the
40 | documentation for adaptive filters module was updated.
41 | Added functions for error evaluation - MSE, MAE, RMSE and logSE
42 | (:ref:`mics-error_evaluation`).
43 |
44 | **Version 0.6** *Released: 2016-12-15*
45 | Added :ref:`preprocess-pca` and :ref:`preprocess-lda`. The whole documentation
46 | for preprocess module was updated.
47 |
48 | **Version 0.5** *Released: 2016-11-16*
49 | Bugfix according to issue opened by https://github.com/lolpenguin
50 |
51 | **Version 0.4** *Released: 2016-09-29*
52 | Added :ref:`filter-ap`. And also the first unit tests were implemented.
53 |
54 | **Version 0.3** *Released: 2016-09-22*
55 | Added MLP into ANN module.
56 |
57 | **Version 0.2** *Released: 2016-09-02*
58 | Added :ref:`filter-gngd`
59 |
60 | **Version 0.1** *Released: 2016-03-18*
61 | Created
62 | :ref:`filter-lms`, :ref:`filter-nlms`,
63 | :ref:`filter-rls`, :ref:`preprocess-input_from_history`,
64 | :ref:`preprocess-standardize`, :ref:`preprocess-standardize_back`
65 |
--------------------------------------------------------------------------------
/padasip/filters/llncosh.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.2.0
3 |
4 | The least lncosh (Llncosh) algorithm (proposed in https://doi.org/10.1016/j.sigpro.2019.107348)
5 | is similar to LMS adaptive filter.
6 |
7 | The Llncosh filter can be created as follows
8 |
9 | >>> import padasip as pa
10 | >>> pa.filters.FilterLlncosh(n)
11 |
12 | where :code:`n` is the size (number of taps) of the filter.
13 |
14 | Content of this page:
15 |
16 | .. contents::
17 | :local:
18 | :depth: 1
19 |
20 | .. seealso:: :ref:`filters`
21 |
22 | Algorithm Explanation
23 | ==========================
24 |
25 | The lncosh cost function is the natural logarithm of hyperbolic cosine function,
26 | which behaves like a hybrid of the mean square error and mean absolute error
27 | criteria according to its positive parameter `lambd`.
28 |
29 | Minimal Working Examples
30 | ==============================
31 |
32 | If you have measured data you may filter it as follows
33 |
34 | .. code-block:: python
35 |
36 | import numpy as np
37 | import matplotlib.pylab as plt
38 | import padasip as pa
39 |
40 | # creation of data
41 | N = 500
42 | x = np.random.normal(0, 1, (N, 4)) # input matrix
43 | v = np.random.normal(0, 0.1, N) # noise
44 | d = 2 * x[:, 0] + 0.1 * x[:, 1] - 4 * x[:, 2] + 0.5 * x[:, 3] + v # target
45 |
46 | # identification
47 | f = pa.filters.FilterLlncosh(n=4, mu=0.1, lambd=0.1, w="random")
48 | y, e, w = f.run(d, x)
49 |
50 | # show results
51 | plt.figure(figsize=(15, 9))
52 | plt.subplot(211);
53 | plt.title("Adaptation");
54 | plt.xlabel("samples - k")
55 | plt.plot(d, "b", label="d - target")
56 | plt.plot(y, "g", label="y - output");
57 | plt.legend()
58 | plt.subplot(212);
59 | plt.title("Filter error");
60 | plt.xlabel("samples - k")
61 | plt.plot(10 * np.log10(e ** 2), "r", label="e - error [dB]");
62 | plt.legend()
63 | plt.tight_layout()
64 | plt.show()
65 |
66 | Code Explanation
67 | ====================
68 | """
69 | import numpy as np
70 |
71 | from padasip.filters.base_filter import AdaptiveFilter
72 |
73 |
74 | class FilterLlncosh(AdaptiveFilter):
75 | """
76 | This class represents an adaptive Llncosh filter.
77 | """
78 | kind = "Llncosh"
79 |
80 | def __init__(self, n, mu=0.01, lambd=3, **kwargs):
81 | """
82 | **Kwargs:**
83 |
84 | * `lambd` : lambda (float). Cost function shape parameter.
85 |
86 | """
87 | super().__init__(n, mu, **kwargs)
88 | self.lambd = lambd
89 |
90 | def learning_rule(self, e, x):
91 | """
92 | Override the parent class.
93 | """
94 | return self.mu * np.tanh(self.lambd * e) * x
95 |
--------------------------------------------------------------------------------
/padasip/preprocess/standardize.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.1
3 | .. versionchanged:: 1.2.0
4 |
5 | This function standardizes (z-score) the series according to equation
6 |
7 | :math:`\\textbf{x}_s = \\frac{\\textbf{x} - a}{b}`
8 |
9 | where :math:`\\textbf{x}` is time series to standardize,
10 | :math:`a` is offset to remove and :math:`b` scale to remove
11 |
12 | .. contents::
13 | :local:
14 | :depth: 1
15 |
16 | See also: :ref:`preprocess-standardize_back`
17 |
18 | Usage Explanation
19 | ********************
20 |
21 | As simple as
22 |
23 | .. code-block:: python
24 |
25 | xs = pa.standardize(x, offset=a , scale=b)
26 |
27 | If the key arguments :code:`offset` and :code:`scale` are not provided
28 | (example below) the mean value and standard deviation of `x` is used.
29 |
30 | .. code-block:: python
31 |
32 | xs = pa.standardize(x)
33 |
34 | Minimal Working Example
35 | **************************
36 |
37 | An example how to standarize (z-score) data:
38 |
39 | .. code-block:: python
40 |
41 | >>> import numpy as np
42 | >>> import padasip as pa
43 | >>> x = np.random.random(1000)
44 | >>> x.mean()
45 | 0.49755420774866677
46 | >>> x.std()
47 | 0.29015765297767376
48 | >>> xs = pa.standardize(x)
49 | >>> xs.mean()
50 | 1.4123424652012772e-16
51 | >>> xs.std()
52 | 0.99999999999999989
53 |
54 |
55 | Code Explanation
56 | *****************
57 | """
58 | from __future__ import division
59 | import numpy as np
60 |
61 | def standardize(x, offset=None, scale=None):
62 | """
63 | This is function for standarization of input series.
64 |
65 | **Args:**
66 |
67 | * `x` : series (1 dimensional array)
68 |
69 | **Kwargs:**
70 |
71 | * `offset` : offset to remove (float). If not given, \
72 | the mean value of `x` is used.
73 |
74 | * `scale` : scale (float). If not given, \
75 | the standard deviation of `x` is used.
76 |
77 | **Returns:**
78 |
79 | * `xs` : standardized series
80 | """
81 | if offset is None:
82 | offset = np.array(x).mean()
83 | else:
84 | try:
85 | offset = float(offset)
86 | except:
87 | raise ValueError('The argument offset is not None or float')
88 | if scale is None:
89 | scale = np.array(x).std()
90 | else:
91 | try:
92 | scale = float(scale)
93 | except:
94 | raise ValueError('The argument scale is not None or float')
95 | try:
96 | x = np.array(x, dtype="float64")
97 | except:
98 | raise ValueError('The argument x is not numpy array or similar.')
99 | return (x - offset) / scale
100 |
--------------------------------------------------------------------------------
/padasip/filters/vslms_mathews.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.2.2
3 |
4 | The variable step-size least-mean-square (VSLMS) adaptive filter with Mathews's adaptation
5 | is implemeted according to
6 | `DOI:10.1109/78.218137 `_.
7 |
8 |
9 | The VSLMS filter with Mathews adaptation can be created as follows
10 |
11 | >>> import padasip as pa
12 | >>> pa.filters.FilterVSLMS_Mathews(n)
13 |
14 | where `n` is the size (number of taps) of the filter.
15 |
16 | Content of this page:
17 |
18 | .. contents::
19 | :local:
20 | :depth: 1
21 |
22 | .. seealso:: :ref:`filters`
23 |
24 |
25 | Minimal Working Examples
26 | ======================================
27 |
28 | If you have measured data you may filter it as follows
29 |
30 | .. code-block:: python
31 |
32 | import numpy as np
33 | import matplotlib.pylab as plt
34 | import padasip as pa
35 |
36 | # creation of data
37 | N = 500
38 | x = np.random.normal(0, 1, (N, 4)) # input matrix
39 | v = np.random.normal(0, 0.1, N) # noise
40 | d = 2 * x[:, 0] + 0.1 * x[:, 1] - 4 * x[:, 2] + 0.5 * x[:, 3] + v # target
41 |
42 | # identification
43 | f = pa.filters.FilterVSLMS_Mathews(n=4, mu=0.1, ro=0.001, w="random")
44 | y, e, w = f.run(d, x)
45 |
46 | # show results
47 | plt.figure(figsize=(15, 9))
48 | plt.subplot(211);
49 | plt.title("Adaptation");
50 | plt.xlabel("samples - k")
51 | plt.plot(d, "b", label="d - target")
52 | plt.plot(y, "g", label="y - output");
53 | plt.legend()
54 | plt.subplot(212);
55 | plt.title("Filter error");
56 | plt.xlabel("samples - k")
57 | plt.plot(10 * np.log10(e ** 2), "r", label="e - error [dB]");
58 | plt.legend()
59 | plt.tight_layout()
60 | plt.show()
61 |
62 |
63 | Code Explanation
64 | ======================================
65 | """
66 | import numpy as np
67 |
68 | from padasip.filters.base_filter import AdaptiveFilter
69 |
70 |
71 | class FilterVSLMS_Mathews(AdaptiveFilter):
72 | """
73 | This class represents an adaptive VSLMS filter with Mathews's adaptation.
74 | """
75 | kind = "VSLMS_Mathews"
76 |
77 | def __init__(self, n, mu=1., ro=0.1, **kwargs):
78 | """
79 | **Kwargs:**
80 |
81 | * `ro` : step size adaptation parameter (float) at the beginning.
82 | It is an adaptive parameter.
83 |
84 | """
85 | super().__init__(n, mu, **kwargs)
86 | self.ro = ro
87 | self.last_e = 0
88 | self.last_x = np.zeros(n)
89 | self.last_mu = mu
90 |
91 | def learning_rule(self, e, x):
92 | """
93 | Override the parent class.
94 | """
95 | mu = self.last_mu + (self.ro * e * self.last_e * np.dot(self.last_x, x))
96 | self.last_e, self.last_mu, self.last_x = e, mu, x
97 | return mu * e * x
98 |
--------------------------------------------------------------------------------
/padasip/filters/gngd.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.2
3 | .. versionchanged:: 1.2.0
4 |
5 | The generalized normalized gradient descent (GNGD) adaptive filter
6 | is an extension of the NLMS adaptive filter (:ref:`filter-nlms`).
7 |
8 | The GNGD filter can be created as follows
9 |
10 | >>> import padasip as pa
11 | >>> pa.filters.FilterGNGD(n)
12 |
13 | where `n` is the size (number of taps) of the filter.
14 |
15 | Content of this page:
16 |
17 | .. contents::
18 | :local:
19 | :depth: 1
20 |
21 | .. seealso:: :ref:`filters`
22 |
23 |
24 | Minimal Working Examples
25 | ======================================
26 |
27 | If you have measured data you may filter it as follows
28 |
29 | .. code-block:: python
30 |
31 | import numpy as np
32 | import matplotlib.pylab as plt
33 | import padasip as pa
34 |
35 | # creation of data
36 | N = 500
37 | x = np.random.normal(0, 1, (N, 4)) # input matrix
38 | v = np.random.normal(0, 0.1, N) # noise
39 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
40 |
41 | # identification
42 | f = pa.filters.FilterGNGD(n=4, mu=0.1, w="random")
43 | y, e, w = f.run(d, x)
44 |
45 | # show results
46 | plt.figure(figsize=(15,9))
47 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
48 | plt.plot(d,"b", label="d - target")
49 | plt.plot(y,"g", label="y - output");plt.legend()
50 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
51 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
52 | plt.tight_layout()
53 | plt.show()
54 |
55 | Code Explanation
56 | ======================================
57 | """
58 | import numpy as np
59 |
60 | from padasip.filters.base_filter import AdaptiveFilter
61 |
62 | class FilterGNGD(AdaptiveFilter):
63 | """
64 | Adaptive GNGD filter.
65 | """
66 | kind = "GNGD"
67 |
68 | def __init__(self, n, mu=1., eps=1., ro=0.1, **kwargs):
69 | """
70 | **Kwargs:**
71 |
72 | * `eps` : compensation term (float) at the beginning. It is an adaptive
73 | parameter.
74 |
75 | * `ro` : step size adaptation parameter (float) at the beginning.
76 | It is an adaptive parameter.
77 |
78 | """
79 | super().__init__(n, mu, **kwargs)
80 | self.eps = eps
81 | self.ro = ro
82 | self.last_e = 0
83 | self.last_x = np.zeros(n)
84 |
85 | def learning_rule(self, e, x):
86 | """
87 | Override the parent class.
88 | """
89 | self.eps = self.eps - self.ro * self.mu * e * self.last_e * \
90 | np.dot(x, self.last_x) / \
91 | (np.dot(self.last_x, self.last_x) + self.eps) ** 2
92 | nu = self.mu / (self.eps + np.dot(x, x))
93 | self.last_e, self.last_x = e, x
94 | return nu * e * x
95 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | This library is designed to simplify adaptive signal
2 | processing tasks within python
3 | (filtering, prediction, reconstruction).
4 | For code optimisation, this library uses numpy for array operations.
5 |
6 | Also in this library is presented some new methods for adaptive signal processing.
7 | The library is designed to be used with datasets and also with
8 | real-time measuring (sample-after-sample feeding).
9 |
10 | ============================
11 | Tutorials and Documentation
12 | ============================
13 |
14 | Everything is on github:
15 |
16 | http://matousc89.github.io/padasip/
17 |
18 | ================
19 | Current Features
20 | ================
21 |
22 | ********************
23 | Data Preprocessing
24 | ********************
25 |
26 | - Principal Component Analysis (PCA)
27 |
28 | - Linear Discriminant Analysis (LDA)
29 |
30 | ******************
31 | Adaptive Filters
32 | ******************
33 |
34 | The library features multiple adaptive filters. Input vectors for filters can be
35 | constructed manually or with the assistance of included functions.
36 | So far it is possible to use following filters:
37 |
38 | - LMS (least-mean-squares) adaptive filter
39 |
40 | - NLMS (normalized least-mean-squares) adaptive filter
41 |
42 | - LMF (least-mean-fourth) adaptive filter
43 |
44 | - NLMF (normalized least-mean-fourth) adaptive filter
45 |
46 | - SSLMS (sign-sign least-mean-squares) adaptive filter
47 |
48 | - NSSLMS (normalized sign-sign least-mean-squares) adaptive filter
49 |
50 | - RLS (recursive-least-squares) adaptive filter
51 |
52 | - GNGD (generalized normalized gradient descent) adaptive filter
53 |
54 | - AP (affine projection) adaptive filter
55 |
56 | - GMCC (generalized maximum correntropy criterion) adaptive filter
57 |
58 | - OCNLMS (online centered normalized least-mean-squares) adaptive filter
59 |
60 | - Llncosh (least lncosh) adaptive filter
61 |
62 | - Variable step-size least-mean-square (VSLMS) with Ang’s adaptation.
63 |
64 | - Variable step-size least-mean-square (VSLMS) with Benveniste’s adaptation
65 |
66 | - Variable step-size least-mean-square (VSLMS) with Mathews’s adaptation
67 |
68 |
69 | ******************
70 | Detection Tools
71 | ******************
72 |
73 | The library features two novelty/outlier detection tools
74 |
75 | - Error and Learning Based Novelty Detection (ELBND)
76 |
77 | - Learning Entropy (LE)
78 |
79 | - Extreme Seeking Entropy (ESE)
80 |
81 | *************
82 | Cite Padasip
83 | *************
84 |
85 | .. code-block:: none
86 |
87 | @article{cejnek2022padasip,
88 | title={Padasip: An open-source Python toolbox for adaptive filtering},
89 | author={Cejnek, Matous and Vrba, Jan},
90 | journal={Journal of Computational Science},
91 | volume={65},
92 | pages={101887},
93 | year={2022},
94 | publisher={Elsevier}
95 | }
96 |
97 |
--------------------------------------------------------------------------------
/padasip/filters/lmf.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.1.0
3 | .. versionchanged:: 1.2.0
4 |
5 | The least-mean-fourth (LMF) adaptive filter implemented according to the
6 | paper.
7 |
8 | The LMF filter can be created as follows
9 |
10 | >>> import padasip as pa
11 | >>> pa.filters.FilterLMF(n)
12 |
13 | where :code:`n` is the size (number of taps) of the filter.
14 |
15 | Content of this page:
16 |
17 | .. contents::
18 | :local:
19 | :depth: 1
20 |
21 | .. seealso:: :ref:`filters`
22 |
23 | Algorithm Explanation
24 | ==========================
25 |
26 | The LMF adaptive filter could be described as
27 |
28 | :math:`y(k) = w_1 \cdot x_{1}(k) + ... + w_n \cdot x_{n}(k)`,
29 |
30 | or in a vector form
31 |
32 | :math:`y(k) = \\textbf{x}^T(k) \\textbf{w}(k)`,
33 |
34 | where :math:`k` is discrete time index, :math:`(.)^T` denotes the transposition,
35 | :math:`y(k)` is filtered signal,
36 | :math:`\\textbf{w}` is vector of filter adaptive parameters and
37 | :math:`\\textbf{x}` is input vector (for a filter of size :math:`n`) as follows
38 |
39 | :math:`\\textbf{x}(k) = [x_1(k), ..., x_n(k)]`.
40 |
41 | The LMF weights adaptation could be described as follows
42 |
43 | :math:`\\textbf{w}(k+1) = \\textbf{w}(k) + \Delta \\textbf{w}(k)`,
44 |
45 | where :math:`\Delta \\textbf{w}(k)` is
46 |
47 | :math:`\Delta \\textbf{w}(k) = \\frac{1}{2} \mu \\frac{\partial e^4(k)}
48 | { \partial \\textbf{w}(k)}\ = \mu \cdot e(k)^{3} \cdot \\textbf{x}(k)`,
49 |
50 | where :math:`\mu` is the learning rate (step size) and :math:`e(k)`
51 | is error defined as
52 |
53 | :math:`e(k) = d(k) - y(k)`.
54 |
55 |
56 | Minimal Working Examples
57 | ==============================
58 |
59 | If you have measured data you may filter it as follows
60 |
61 | .. code-block:: python
62 |
63 | # creation of data
64 | N = 500
65 | x = np.random.normal(0, 1, (N, 4)) # input matrix
66 | v = np.random.normal(0, 0.1, N) # noise
67 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
68 |
69 | # identification
70 | f = pa.filters.FilterLMF(n=4, mu=0.01, w="random")
71 | y, e, w = f.run(d, x)
72 |
73 | # show results
74 | plt.figure(figsize=(15,9))
75 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
76 | plt.plot(d,"b", label="d - target")
77 | plt.plot(y,"g", label="y - output");plt.legend()
78 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
79 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
80 | plt.tight_layout()
81 | plt.show()
82 |
83 |
84 | Code Explanation
85 | ====================
86 | """
87 | from padasip.filters.base_filter import AdaptiveFilter
88 |
89 | class FilterLMF(AdaptiveFilter):
90 | """
91 | This class represents an adaptive LMF filter.
92 | """
93 | kind = "LMF"
94 |
95 | def learning_rule(self, e, x):
96 | """
97 | Override the parent class.
98 | """
99 | return self.mu * x * e ** 3
100 |
--------------------------------------------------------------------------------
/padasip/filters/sslms.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.1.0
3 | .. versionchanged:: 1.2.0
4 |
5 | The sign-sign least-mean-square (SSLMS) adaptive filter can be created as follows
6 |
7 | >>> import padasip as pa
8 | >>> pa.filters.FilterSSLMS(n)
9 |
10 | where :code:`n` is the size (number of taps) of the filter.
11 |
12 | Content of this page:
13 |
14 | .. contents::
15 | :local:
16 | :depth: 1
17 |
18 | .. seealso:: :ref:`filters`
19 |
20 | Algorithm Explanation
21 | ==========================
22 |
23 | The SSLMS adaptive filter could be described as
24 |
25 | :math:`y(k) = w_1 \cdot x_{1}(k) + ... + w_n \cdot x_{n}(k)`,
26 |
27 | or in a vector form
28 |
29 | :math:`y(k) = \\textbf{x}^T(k) \\textbf{w}(k)`,
30 |
31 | where :math:`k` is discrete time index, :math:`(.)^T` denotes the transposition,
32 | :math:`y(k)` is filtered signal,
33 | :math:`\\textbf{w}` is vector of filter adaptive parameters and
34 | :math:`\\textbf{x}` is input vector (for a filter of size :math:`n`) as follows
35 |
36 | :math:`\\textbf{x}(k) = [x_1(k), ..., x_n(k)]`.
37 |
38 | The SSLMS weights adaptation could be described as follows
39 |
40 | :math:`\\textbf{w}(k+1) = \\textbf{w}(k) + \Delta \\textbf{w}(k)`,
41 |
42 | where :math:`\Delta \\textbf{w}(k)` is
43 |
44 | :math:`\Delta \\textbf{w}(k) = \mu \cdot \\text{sgn}(e(k)) \cdot
45 | \\text{sgn}(\\textbf{x}(k))`,
46 |
47 | where :math:`\mu` is the learning rate (step size) and :math:`e(k)`
48 | is error defined as
49 |
50 | :math:`e(k) = d(k) - y(k)`.
51 |
52 |
53 | Minimal Working Examples
54 | ==============================
55 |
56 | If you have measured data you may filter it as follows
57 |
58 | .. code-block:: python
59 |
60 | import numpy as np
61 | import matplotlib.pylab as plt
62 | import padasip as pa
63 |
64 | # creation of data
65 | N = 500
66 | x = np.random.normal(0, 1, (N, 4)) # input matrix
67 | v = np.random.normal(0, 0.1, N) # noise
68 | d = 2*x[:,0] + 0.1*x[:,1] - 0.3*x[:,2] + 0.5*x[:,3] + v # target
69 |
70 | # identification
71 | f = pa.filters.FilterSSLMS(n=4, mu=0.01, w="random")
72 | y, e, w = f.run(d, x)
73 |
74 | # show results
75 | plt.figure(figsize=(15,9))
76 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
77 | plt.plot(d,"b", label="d - target")
78 | plt.plot(y,"g", label="y - output");plt.legend()
79 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
80 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
81 | plt.tight_layout()
82 | plt.show()
83 |
84 |
85 | Code Explanation
86 | ====================
87 | """
88 | import numpy as np
89 |
90 | from padasip.filters.base_filter import AdaptiveFilter
91 |
92 |
93 | class FilterSSLMS(AdaptiveFilter):
94 | """
95 | This class represents an adaptive SSLMS filter.
96 | """
97 | kind = "SSLMS"
98 |
99 | def learning_rule(self, e, x):
100 | """
101 | Override the parent class.
102 | """
103 | return self.mu * np.sign(x) * np.sign(e)
104 |
--------------------------------------------------------------------------------
/padasip/filters/vslms_benveniste.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.2.2
3 |
4 | The variable step-size least-mean-square (VSLMS) adaptive filter with Benveniste's adaptation
5 | is implemeted according to
6 | `DOI:10.1109/EMBC.2013.6610622 `_.
7 |
8 |
9 | The VSLMS filter with Benveniste adaptation can be created as follows
10 |
11 | >>> import padasip as pa
12 | >>> pa.filters.FilterVSLMS_Benveniste(n)
13 |
14 | where `n` is the size (number of taps) of the filter.
15 |
16 | Content of this page:
17 |
18 | .. contents::
19 | :local:
20 | :depth: 1
21 |
22 | .. seealso:: :ref:`filters`
23 |
24 |
25 | Minimal Working Examples
26 | ======================================
27 |
28 | If you have measured data you may filter it as follows
29 |
30 | .. code-block:: python
31 |
32 | import numpy as np
33 | import matplotlib.pylab as plt
34 | import padasip as pa
35 |
36 | # creation of data
37 | N = 500
38 | x = np.random.normal(0, 1, (N, 4)) # input matrix
39 | v = np.random.normal(0, 0.1, N) # noise
40 | d = 2 * x[:, 0] + 0.1 * x[:, 1] - 4 * x[:, 2] + 0.5 * x[:, 3] + v # target
41 |
42 | # identification
43 | f = pa.filters.FilterVSLMS_Benveniste(n=4, mu=0.1, ro=0.0002, w="random")
44 | y, e, w = f.run(d, x)
45 |
46 | # show results
47 | plt.figure(figsize=(15, 9))
48 | plt.subplot(211);
49 | plt.title("Adaptation");
50 | plt.xlabel("samples - k")
51 | plt.plot(d, "b", label="d - target")
52 | plt.plot(y, "g", label="y - output");
53 | plt.legend()
54 | plt.subplot(212);
55 | plt.title("Filter error");
56 | plt.xlabel("samples - k")
57 | plt.plot(10 * np.log10(e ** 2), "r", label="e - error [dB]");
58 | plt.legend()
59 | plt.tight_layout()
60 | plt.show()
61 |
62 |
63 | Code Explanation
64 | ======================================
65 | """
66 | import numpy as np
67 |
68 | from padasip.filters.base_filter import AdaptiveFilter
69 |
70 |
71 | class FilterVSLMS_Benveniste(AdaptiveFilter):
72 | """
73 | This class represents an adaptive VSLMS filter with Benveniste's adaptation.
74 | """
75 | kind = "VSLMS_Benveniste"
76 |
77 | def __init__(self, n, mu=1., ro=0.1, **kwargs):
78 | """
79 | **Kwargs:**
80 |
81 | * `ro` : step size adaptation parameter (float) at the beginning.
82 | It is an adaptive parameter.
83 |
84 | """
85 | super().__init__(n, mu, **kwargs)
86 | self.ro = ro
87 | self.last_e = 0
88 | self.last_x = np.zeros(n)
89 | self.last_fi = np.zeros(n)
90 | self.last_mu = mu
91 |
92 | def learning_rule(self, e, x):
93 | """
94 | Override the parent class.
95 | """
96 | fi_part = np.eye(self.n) - (self.last_mu * np.outer(self.last_x, self.last_x))
97 | fi = np.dot(fi_part, self.last_fi) + (self.last_e * self.last_x)
98 | mu = self.last_mu + (self.ro * e * np.dot(self.last_x, fi))
99 | self.last_e, self.last_mu, self.last_x, self.last_fi = e, mu, x, fi
100 | return mu * e * x
101 |
--------------------------------------------------------------------------------
/padasip/filters/vslms_ang.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.2.2
3 |
4 | The variable step-size least-mean-square (VSLMS) adaptive filter with Ang's adaptation
5 | is implemeted according to
6 | `DOI:10.1109/78.912925 `_.
7 |
8 |
9 | The VSLMS filter with Benveniste adaptation can be created as follows
10 |
11 | >>> import padasip as pa
12 | >>> pa.filters.FilterVSLMS_Ang(n)
13 |
14 | where `n` is the size (number of taps) of the filter.
15 |
16 | Content of this page:
17 |
18 | .. contents::
19 | :local:
20 | :depth: 1
21 |
22 | .. seealso:: :ref:`filters`
23 |
24 |
25 | Minimal Working Examples
26 | ======================================
27 |
28 | If you have measured data you may filter it as follows
29 |
30 | .. code-block:: python
31 |
32 | import numpy as np
33 | import matplotlib.pylab as plt
34 | import padasip as pa
35 |
36 | # creation of data
37 | N = 500
38 | x = np.random.normal(0, 1, (N, 4)) # input matrix
39 | v = np.random.normal(0, 0.1, N) # noise
40 | d = 2 * x[:, 0] + 0.1 * x[:, 1] - 4 * x[:, 2] + 0.5 * x[:, 3] + v # target
41 |
42 | # identification
43 | f = pa.filters.FilterVSLMS_Ang(n=4, mu=0.1, ro=0.0002, w="random")
44 | y, e, w = f.run(d, x)
45 |
46 | # show results
47 | plt.figure(figsize=(15, 9))
48 | plt.subplot(211);
49 | plt.title("Adaptation");
50 | plt.xlabel("samples - k")
51 | plt.plot(d, "b", label="d - target")
52 | plt.plot(y, "g", label="y - output");
53 | plt.legend()
54 | plt.subplot(212);
55 | plt.title("Filter error");
56 | plt.xlabel("samples - k")
57 | plt.plot(10 * np.log10(e ** 2), "r", label="e - error [dB]");
58 | plt.legend()
59 | plt.tight_layout()
60 | plt.show()
61 |
62 |
63 | Code Explanation
64 | ======================================
65 | """
66 | import numpy as np
67 |
68 | from padasip.filters.base_filter import AdaptiveFilter
69 |
70 |
71 | class FilterVSLMS_Ang(AdaptiveFilter):
72 | """
73 | This class represents an adaptive VSLMS filter with Ang's adaptation.
74 | """
75 | kind = "VSLMS_Ang"
76 |
77 | def __init__(self, n, mu=1., ro=0.0002, a=0.95, **kwargs):
78 | """
79 | **Kwargs:**
80 |
81 | * `ro` : step size adaptation parameter (float) at the beginning.
82 | It is an adaptive parameter.
83 |
84 | * `a` : small constant close to 1 (but smaller). It works as a simplification
85 | of the Benveniste's algoritm.
86 |
87 | """
88 | super().__init__(n, mu, **kwargs)
89 | self.ro = ro
90 | self.a = a
91 | self.last_e = 0
92 | self.last_x = np.zeros(n)
93 | self.last_fi = np.zeros(n)
94 | self.last_mu = mu
95 |
96 | def learning_rule(self, e, x):
97 | """
98 | Override the parent class.
99 | """
100 | fi = (self.a * self.last_fi) + (self.last_e * self.last_x)
101 | mu = self.last_mu + (self.ro * e * np.dot(self.last_x, fi))
102 | self.last_e, self.last_mu, self.last_x, self.last_fi = e, mu, x, fi
103 | return mu * e * x
104 |
--------------------------------------------------------------------------------
/padasip/filters/nlmf.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.1.0
3 | .. versionchanged:: 1.2.0
4 |
5 | The normalized least-mean-fourth (NLMF) adaptive filter is an extension of the LMF
6 | adaptive filter (:ref:`filter-lmf`).
7 |
8 | The NLMF filter can be created as follows
9 |
10 | >>> import padasip as pa
11 | >>> pa.filters.FilterNLMF(n)
12 |
13 | where `n` is the size (number of taps) of the filter.
14 |
15 | Content of this page:
16 |
17 | .. contents::
18 | :local:
19 | :depth: 1
20 |
21 | .. seealso:: :ref:`filters`
22 |
23 | Algorithm Explanation
24 | ======================================
25 |
26 | The NLMF is extension of LMF filter. See :ref:`filter-lmf`
27 | for explanation of the algorithm behind.
28 |
29 | The extension is based on normalization of learning rate.
30 | The learning rage :math:`\mu` is replaced by learning rate :math:`\eta(k)`
31 | normalized with every new sample according to input power as follows
32 |
33 | :math:`\eta (k) = \\frac{\mu}{\epsilon + || \\textbf{x}(k) ||^2}`,
34 |
35 | where :math:`|| \\textbf{x}(k) ||^2` is norm of input vector and
36 | :math:`\epsilon` is a small positive constant (regularization term).
37 | This constant is introduced to preserve the stability in cases where
38 | the input is close to zero.
39 |
40 | Minimal Working Examples
41 | ======================================
42 |
43 | If you have measured data you may filter it as follows
44 |
45 | .. code-block:: python
46 |
47 | import numpy as np
48 | import matplotlib.pylab as plt
49 | import padasip as pa
50 |
51 | # creation of data
52 | N = 500
53 | x = np.random.normal(0, 1, (N, 4)) # input matrix
54 | v = np.random.normal(0, 0.1, N) # noise
55 | d = 2*x[:,0] + 0.1*x[:,1] - 0.3*x[:,2] + 0.5*x[:,3] + v # target
56 |
57 | # identification
58 | f = pa.filters.FilterNLMF(n=4, mu=0.1, w="random")
59 | y, e, w = f.run(d, x)
60 |
61 | # show results
62 | plt.figure(figsize=(15,9))
63 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
64 | plt.plot(d,"b", label="d - target")
65 | plt.plot(y,"g", label="y - output");plt.legend()
66 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
67 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
68 | plt.tight_layout()
69 | plt.show()
70 |
71 | Code Explanation
72 | ======================================
73 | """
74 | import numpy as np
75 |
76 | from padasip.filters.base_filter import AdaptiveFilter
77 |
78 | class FilterNLMF(AdaptiveFilter):
79 | """
80 | Adaptive NLMF filter.
81 | """
82 | kind = "NLMF"
83 |
84 | def __init__(self, n, mu=0.1, eps=0.001, **kwargs):
85 | """
86 | **Kwargs:**
87 |
88 | * `eps` : regularization term (float). It is introduced to preserve
89 | stability for close-to-zero input vectors
90 |
91 | """
92 | super().__init__(n, mu, **kwargs)
93 | self.eps = eps
94 |
95 | def learning_rule(self, e, x):
96 | """
97 | Override the parent class.
98 | """
99 | return self.mu / (self.eps + np.dot(x, x)) * x * e ** 3
100 |
--------------------------------------------------------------------------------
/padasip/filters/nsslms.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.1.0
3 | .. versionchanged:: 1.2.0
4 |
5 | The normalized sign-sign least-mean-square (NSSLMS) adaptive filter
6 | is an extension of the popular SSLMS adaptive filter (:ref:`filter-sslms`).
7 |
8 | The NSSLMS filter can be created as follows
9 |
10 | >>> import padasip as pa
11 | >>> pa.filters.FilterNSSLMS(n)
12 |
13 | where `n` is the size (number of taps) of the filter.
14 |
15 | Content of this page:
16 |
17 | .. contents::
18 | :local:
19 | :depth: 1
20 |
21 | .. seealso:: :ref:`filters`
22 |
23 | Algorithm Explanation
24 | ======================================
25 |
26 | The NSSLMS is extension of LMS filter. See :ref:`filter-lms`
27 | for explanation of the algorithm behind.
28 |
29 | The extension is based on normalization of learning rate.
30 | The learning rage :math:`\mu` is replaced by learning rate :math:`\eta(k)`
31 | normalized with every new sample according to input power as follows
32 |
33 | :math:`\eta (k) = \\frac{\mu}{\epsilon + || \\textbf{x}(k) ||^2}`,
34 |
35 | where :math:`|| \\textbf{x}(k) ||^2` is norm of input vector and
36 | :math:`\epsilon` is a small positive constant (regularization term).
37 | This constant is introduced to preserve the stability in cases where
38 | the input is close to zero.
39 |
40 | Minimal Working Examples
41 | ======================================
42 |
43 | If you have measured data you may filter it as follows
44 |
45 | .. code-block:: python
46 |
47 | import numpy as np
48 | import matplotlib.pylab as plt
49 | import padasip as pa
50 |
51 | # creation of data
52 | N = 500
53 | x = np.random.normal(0, 1, (N, 4)) # input matrix
54 | v = np.random.normal(0, 0.1, N) # noise
55 | d = 2*x[:,0] + 0.1*x[:,1] - 0.3*x[:,2] + 0.5*x[:,3] + v # target
56 |
57 | # identification
58 | f = pa.filters.FilterNSSLMS(n=4, mu=0.1, w="random")
59 | y, e, w = f.run(d, x)
60 |
61 | # show results
62 | plt.figure(figsize=(15,9))
63 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
64 | plt.plot(d,"b", label="d - target")
65 | plt.plot(y,"g", label="y - output");plt.legend()
66 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
67 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
68 | plt.tight_layout()
69 | plt.show()
70 |
71 |
72 |
73 | Code Explanation
74 | ======================================
75 | """
76 | import numpy as np
77 |
78 | from padasip.filters.base_filter import AdaptiveFilter
79 |
80 | class FilterNSSLMS(AdaptiveFilter):
81 | """
82 | Adaptive NSSLMS filter.
83 | """
84 | kind = "NSSLMS"
85 |
86 | def __init__(self, n, mu=0.1, eps=0.001, **kwargs):
87 | """
88 | **Kwargs:**
89 |
90 | * `eps` : regularization term (float). It is introduced to preserve
91 | stability for close-to-zero input vectors
92 | """
93 | super().__init__(n, mu, **kwargs)
94 | self.eps = eps
95 |
96 | def learning_rule(self, e, x):
97 | """
98 | Override the parent class.
99 | """
100 | return self.mu / (self.eps + np.dot(x, x)) * np.sign(x) * np.sign(e)
101 |
--------------------------------------------------------------------------------
/padasip/detection/elbnd.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.0.0
3 | .. versionchanged:: 1.2.0
4 |
5 | The Error and Learning Based Novelty Detection (ELBND) is based on the
6 | evaluation of an adaptive model error and the change of its parameters.
7 |
8 | Content of this page:
9 |
10 | .. contents::
11 | :local:
12 | :depth: 1
13 |
14 | Algorithm Explanation
15 | ==========================
16 |
17 | The ELBND can describe every sample with vector of values estimated from
18 | the adaptive increments of any adaptive model and the error of that model
19 | as follows
20 |
21 | :math:`\\textrm{ELBND}(k) = \Delta \\textbf{w}(k) e(k).`
22 |
23 | The output is a vector of values describing novelty in given sample.
24 | To obtain single value of novelty ammount for every sample is possible to use
25 | various functions, for example maximum of absolute values.
26 |
27 | :math:`\\textrm{elbnd}(k) = \max |\\textrm{ELBND}(k)|.`
28 |
29 | Other popular option is to make a sum of absolute values.
30 |
31 |
32 | Usage Instructions
33 | ========================
34 |
35 | The ELBND algorithm can be used as follows
36 |
37 | .. code-block:: python
38 |
39 | elbnd = pa.detection.ELBND(w, e, function="max")
40 |
41 | where `w` is matrix of the adaptive parameters (changing in time, every row
42 | should represent one time index), `e` is error of adaptive model and
43 | `function` is input function, in this case maximum.
44 |
45 |
46 | Minimal Working Example
47 | ============================
48 |
49 | In this example is demonstrated how can the LE highligh the position of
50 | a perturbation inserted in a data. As the adaptive model is used
51 | :ref:`filter-nlms` adaptive filter. The perturbation is manually inserted
52 | in sample with index :math:`k=1000` (the length of data is 2000).
53 |
54 | .. code-block:: python
55 |
56 | import numpy as np
57 | import matplotlib.pylab as plt
58 | import padasip as pa
59 |
60 | # data creation
61 | n = 5
62 | N = 2000
63 | x = np.random.normal(0, 1, (N, n))
64 | d = np.sum(x, axis=1) + np.random.normal(0, 0.1, N)
65 |
66 | # perturbation insertion
67 | d[1000] += 2.
68 |
69 | # creation of learning model (adaptive filter)
70 | f = pa.filters.FilterNLMS(n, mu=1., w=np.ones(n))
71 | y, e, w = f.run(d, x)
72 |
73 | # estimation of LE with weights from learning model
74 | elbnd = pa.detection.ELBND(w, e, function="max")
75 |
76 | # ELBND plot
77 | plt.plot(elbnd)
78 | plt.show()
79 |
80 |
81 | Code Explanation
82 | ====================
83 |
84 | """
85 | import numpy as np
86 |
87 | def ELBND(w, e, function="max"):
88 | """
89 | This function estimates Error and Learning Based Novelty Detection measure
90 | from given data.
91 |
92 | **Args:**
93 |
94 | * `w` : history of adaptive parameters of an adaptive model (2d array),
95 | every row represents parameters in given time index.
96 |
97 | * `e` : error of adaptive model (1d array)
98 |
99 | **Kwargs:**
100 |
101 | * `functions` : output function (str). The way how to produce single
102 | value for every sample (from all parameters)
103 |
104 | * `max` - maximal value
105 |
106 | * `sum` - sum of values
107 |
108 | **Returns:**
109 |
110 | * ELBND values (1d array). This vector has same lenght as `w`.
111 |
112 | """
113 | # check if the function is known
114 | if function not in ["max", "sum"]:
115 | raise ValueError('Unknown output function')
116 | # get abs dw from w
117 | dw = np.zeros(w.shape)
118 | dw[:-1] = np.abs(np.diff(w, axis=0))
119 | # absolute values of product of increments and error
120 | elbnd = np.abs((dw.T*e).T)
121 | # apply output function
122 | if function == "max":
123 | elbnd = np.max(elbnd, axis=1)
124 | elif function == "sum":
125 | elbnd = np.sum(elbnd, axis=1)
126 | # return output
127 | return elbnd
128 |
--------------------------------------------------------------------------------
/padasip/preprocess/pca.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.6
3 | .. versionchanged:: 1.2.0
4 |
5 | Principal component analysis (PCA) is a statistical method
6 | how to convert a set of observations with possibly correlated
7 | variables into a data-set of linearly uncorrelated variables
8 | (principal components). The number of principal components
9 | is less or equal than the number of original variables.
10 | This transformation is defined in such a way that the first
11 | principal component has the largest possible variance.
12 |
13 | .. contents::
14 | :local:
15 | :depth: 1
16 |
17 | See also: :ref:`preprocess-lda`
18 |
19 | Usage Explanation
20 | =======================
21 |
22 | For reduction of dataset :code:`x` to :code:`n` number of principal components
23 |
24 | .. code-block:: python
25 |
26 | new_x = pa.preprocess.PCA(x, n)
27 |
28 | If you want to see the ordered eigenvalues of principal components,
29 | you can do it as follows:
30 |
31 | .. code-block:: python
32 |
33 | eigenvalues = pa.preprocess.PCA_components(x)
34 |
35 | Minimal Working Example
36 | ===========================
37 |
38 | In this example is generated random numbers (100 samples, with 3 values each).
39 | After the PCA application the reduced data-set is produced
40 | (all samples, but only 2 valueseach)
41 |
42 | .. code-block:: python
43 |
44 | import numpy as np
45 | import padasip as pa
46 |
47 | np.random.seed(100)
48 | x = np.random.uniform(1, 10, (100, 3))
49 | new_x = pa.preprocess.PCA(x, 2)
50 |
51 | If you do not know, how many principal components you should use,
52 | you can check the eigenvalues of principal components according to
53 | following example
54 |
55 | .. code-block:: python
56 |
57 | import numpy as np
58 | import padasip as pa
59 |
60 | np.random.seed(100)
61 | x = np.random.uniform(1, 10, (100, 3))
62 | print pa.preprocess.PCA_components(x)
63 |
64 | what prints
65 |
66 | >>> [ 8.02948402 7.09335781 5.34116273]
67 |
68 | Code Explanation
69 | ====================
70 | """
71 | from __future__ import division
72 | import numpy as np
73 |
74 |
75 | def PCA_components(x):
76 | """
77 | Principal Component Analysis helper to check out eigenvalues of components.
78 |
79 | **Args:**
80 |
81 | * `x` : input matrix (2d array), every row represents new sample
82 |
83 | **Returns:**
84 |
85 | * `components`: sorted array of principal components eigenvalues
86 |
87 | """
88 | # validate inputs
89 | try:
90 | x = np.array(x)
91 | except:
92 | raise ValueError('Impossible to convert x to a numpy array.')
93 | # eigen values and eigen vectors of data covariance matrix
94 | eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
95 | # sort eigen vectors according biggest eigen value
96 | eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
97 | # form output - order the eigenvalues
98 | return eigen_values[(-eigen_values).argsort()]
99 |
100 |
101 | def PCA(x, n=False):
102 | """
103 | Principal component analysis function.
104 |
105 | **Args:**
106 |
107 | * `x` : input matrix (2d array), every row represents new sample
108 |
109 | **Kwargs:**
110 |
111 | * `n` : number of features returned (integer) - how many columns
112 | should the output keep
113 |
114 | **Returns:**
115 |
116 | * `new_x` : matrix with reduced size (lower number of columns)
117 | """
118 | n = n if n else x.shape[1] - 1
119 | assert x.shape[1] > n, "The requested n is bigger than \
120 | number of features in x."
121 | # eigen values and eigen vectors of data covariance matrix
122 | eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
123 | # sort eigen vectors according biggest eigen value
124 | eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
125 | # form output - reduced x matrix
126 | return eigen_order[:n].dot(x.T).T
127 |
--------------------------------------------------------------------------------
/padasip/detection/ese.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.2.0
3 |
4 | The Extreme Seeking Entropy (ESE) introduced
5 | in https://doi.org/10.3390/e22010093 is based on the
6 | evaluation of a change of adaptive model parameters.
7 | This function requires `SciPy `_.
8 |
9 | Content of this page:
10 |
11 | .. contents::
12 | :local:
13 | :depth: 1
14 |
15 | Algorithm Explanation
16 | ==========================
17 |
18 | The ESE can describe every sample with a value,
19 | that is proportional impropability value of adaptive increments.
20 | The probability of value of adaptive increment that is higher
21 | than some threshold value is estimated from Generalized Pareto distribution.
22 | Value of ESE.
23 |
24 | Usage Instructions
25 | ========================
26 |
27 | The ESE algorithm can be used as follows
28 |
29 | .. code-block:: python
30 |
31 | ese = pa.detection.ELBND(w, window=1000)
32 |
33 | where `w` is matrix of the adaptive parameters (changing in time, every row
34 | should represent one time index) and
35 | `window` is a size of the window used for distribution estimation.
36 | The length of the provided data `w` has to greter than 'window'.
37 | The first `window` number of samples cannot be evaluated with ESE.
38 |
39 |
40 | Minimal Working Example
41 | ============================
42 |
43 | In this example is demonstrated how can the LE highligh the position of
44 | a perturbation inserted in a data. As the adaptive model is used
45 | :ref:`filter-nlms` adaptive filter. The perturbation is manually inserted
46 | in sample with index :math:`k=1000` (the length of data is 2000).
47 |
48 | .. code-block:: python
49 |
50 | import numpy as np
51 | import matplotlib.pylab as plt
52 | import padasip as pa
53 |
54 | # data creation
55 | n = 5
56 | N = 5000
57 | x = np.random.normal(0, 1, (N, n))
58 | d = np.sum(x, axis=1) + np.random.normal(0, 0.1, N)
59 |
60 | # fake perturbation insertion
61 | d[2500] += 2.
62 |
63 | # creation of learning model (adaptive filter)
64 | f = pa.filters.FilterNLMS(n, mu=1., w=np.ones(n))
65 | y, e, w = f.run(d, x)
66 |
67 | # estimation of ESE with weights from learning model
68 | ese = pa.detection.ESE(w)
69 |
70 | # ese plot
71 | plt.plot(ese)
72 | plt.show()
73 |
74 | Code Explanation
75 | ====================
76 |
77 | """
78 | import numpy as np
79 | from scipy.stats import genpareto
80 |
81 | def pot(data, method):
82 | """
83 | Peak-Over-Threshold method.
84 | :param data: input data (n samples)
85 | :param method: method identifier
86 | :return: k highest values
87 | """
88 | sorted_data = -np.sort(-data)
89 | k = 0
90 | n = len(data)
91 | if method == "10%":
92 | k = max(int(0.1 * n), 1)
93 | elif method == "sqrt":
94 | k = max(int(np.sqrt(n)), 1)
95 | elif method == "log10log10":
96 | k = max(int((n ** (2/3))/np.log10(np.log10(n))), 1)
97 | elif method == "log10":
98 | k = max(int(np.log10(n)), 1)
99 | elif method == "35%":
100 | k = max(int(0.35 * n), 1)
101 | return sorted_data[:k]
102 |
103 | def ESE(w, window=1000, pot_method="10%"):
104 | """
105 | This function estimates Extreme Seeking Entropy measure
106 | from given data.
107 |
108 | **Args:**
109 |
110 | * `w` : history of adaptive parameters of an adaptive model (2d array),
111 | every row represents parameters in given time index.
112 |
113 | **Kwargs:**
114 |
115 | * `window` : number of samples that are proceeded via P-O-T method
116 |
117 | * `pot_method` : identifier of P-O-T method (str): 'sqrt', '10%', '30%', 'log10', 'log10log10'
118 |
119 | **Returns:**
120 |
121 | * values of Extreme Seeking Entropy (1d array). This vector has same lenght as `w`.
122 |
123 | """
124 | filter_len = w.shape[1]
125 | dw = np.copy(w)
126 | dw[1:] = np.abs(np.diff(dw, n=1, axis=0))
127 | dw_count = int(dw.shape[0])
128 |
129 | hpp = np.ones((dw_count - window, filter_len))
130 | for i in range(window, dw.shape[0]):
131 | if i % 100 == 0:
132 | pass # print((str(datetime.now())), " processing: ", i)
133 | for j in range(filter_len):
134 | poted_values = pot(dw[i - window:i, j], pot_method)
135 | if dw[i, j] > poted_values[-1]:
136 | fit = genpareto.fit(poted_values, floc=[poted_values[-1]])
137 | if dw[i, j] >= fit[1]:
138 | hpp[i - window, j] = 1 - genpareto.cdf(dw[i, j], fit[0], fit[1], fit[2]) + 1e-20
139 |
140 | ese_value = -np.log10(np.prod(hpp, axis=1))
141 | return np.append(np.zeros(window), ese_value)
142 |
--------------------------------------------------------------------------------
/padasip/filters/lms.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.1
3 | .. versionchanged:: 1.2.0
4 |
5 | The least-mean-square (LMS) adaptive filter is the most popular adaptive filter.
6 |
7 | The LMS filter can be created as follows
8 |
9 | >>> import padasip as pa
10 | >>> pa.filters.FilterLMS(n)
11 |
12 | where :code:`n` is the size (number of taps) of the filter.
13 |
14 | Content of this page:
15 |
16 | .. contents::
17 | :local:
18 | :depth: 1
19 |
20 | .. seealso:: :ref:`filters`
21 |
22 | Algorithm Explanation
23 | ==========================
24 |
25 | The LMS adaptive filter could be described as
26 |
27 | :math:`y(k) = w_1 \cdot x_{1}(k) + ... + w_n \cdot x_{n}(k)`,
28 |
29 | or in a vector form
30 |
31 | :math:`y(k) = \\textbf{x}^T(k) \\textbf{w}(k)`,
32 |
33 | where :math:`k` is discrete time index, :math:`(.)^T` denotes the transposition,
34 | :math:`y(k)` is filtered signal,
35 | :math:`\\textbf{w}` is vector of filter adaptive parameters and
36 | :math:`\\textbf{x}` is input vector (for a filter of size :math:`n`) as follows
37 |
38 | :math:`\\textbf{x}(k) = [x_1(k), ..., x_n(k)]`.
39 |
40 | The LMS weights adaptation could be described as follows
41 |
42 | :math:`\\textbf{w}(k+1) = \\textbf{w}(k) + \Delta \\textbf{w}(k)`,
43 |
44 | where :math:`\Delta \\textbf{w}(k)` is
45 |
46 | :math:`\Delta \\textbf{w}(k) = \\frac{1}{2} \mu \\frac{\partial e^2(k)}
47 | { \partial \\textbf{w}(k)}\ = \mu \cdot e(k) \cdot \\textbf{x}(k)`,
48 |
49 | where :math:`\mu` is the learning rate (step size) and :math:`e(k)`
50 | is error defined as
51 |
52 | :math:`e(k) = d(k) - y(k)`.
53 |
54 |
55 | Stability and Optimal Performance
56 | ==================================
57 |
58 | The general stability criteria of LMS stands as follows
59 |
60 | :math:`|1 - \mu \cdot ||\\textbf{x}(k)||^2 | \leq 1`.
61 |
62 | In practice the key argument :code:`mu` should be set to really small number
63 | in most of the cases
64 | (recomended value can be something in range from 0.1 to 0.00001).
65 | If you have still problems stability or performance of the filter,
66 | then try the normalized LMS (:ref:`filter-nlms`).
67 |
68 | Minimal Working Examples
69 | ==============================
70 |
71 | If you have measured data you may filter it as follows
72 |
73 | .. code-block:: python
74 |
75 | import numpy as np
76 | import matplotlib.pylab as plt
77 | import padasip as pa
78 |
79 | # creation of data
80 | N = 500
81 | x = np.random.normal(0, 1, (N, 4)) # input matrix
82 | v = np.random.normal(0, 0.1, N) # noise
83 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
84 |
85 | # identification
86 | f = pa.filters.FilterLMS(n=4, mu=0.1, w="random")
87 | y, e, w = f.run(d, x)
88 |
89 | # show results
90 | plt.figure(figsize=(15,9))
91 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
92 | plt.plot(d,"b", label="d - target")
93 | plt.plot(y,"g", label="y - output");plt.legend()
94 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
95 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
96 | plt.tight_layout()
97 | plt.show()
98 |
99 | An example how to filter data measured in real-time
100 |
101 | .. code-block:: python
102 |
103 | import numpy as np
104 | import matplotlib.pylab as plt
105 | import padasip as pa
106 |
107 | # these two function supplement your online measurment
108 | def measure_x():
109 | # it produces input vector of size 3
110 | x = np.random.random(3)
111 | return x
112 |
113 | def measure_d(x):
114 | # meausure system output
115 | d = 2*x[0] + 1*x[1] - 1.5*x[2]
116 | return d
117 |
118 | N = 100
119 | log_d = np.zeros(N)
120 | log_y = np.zeros(N)
121 | filt = pa.filters.FilterLMS(3, mu=1.)
122 | for k in range(N):
123 | # measure input
124 | x = measure_x()
125 | # predict new value
126 | y = filt.predict(x)
127 | # do the important stuff with prediction output
128 | pass
129 | # measure output
130 | d = measure_d(x)
131 | # update filter
132 | filt.adapt(d, x)
133 | # log values
134 | log_d[k] = d
135 | log_y[k] = y
136 |
137 | ### show results
138 | plt.figure(figsize=(15,9))
139 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
140 | plt.plot(log_d,"b", label="d - target")
141 | plt.plot(log_y,"g", label="y - output");plt.legend()
142 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
143 | plt.plot(10*np.log10((log_d-log_y)**2),"r", label="e - error [dB]")
144 | plt.legend(); plt.tight_layout(); plt.show()
145 |
146 |
147 |
148 | Code Explanation
149 | ====================
150 | """
151 | from padasip.filters.base_filter import AdaptiveFilter
152 |
153 |
154 | class FilterLMS(AdaptiveFilter):
155 | """
156 | This class represents an adaptive LMS filter.
157 | """
158 | kind = "LMS"
159 |
160 | def learning_rule(self, e, x):
161 | """
162 | Override the parent class.
163 | """
164 | return self.mu * x * e
165 |
--------------------------------------------------------------------------------
/padasip/filters/nlms.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.1
3 | .. versionchanged:: 1.2.0
4 |
5 | The normalized least-mean-square (NLMS) adaptive filter
6 | is an extension of the popular LMS adaptive filter (:ref:`filter-lms`).
7 |
8 | The NLMS filter can be created as follows
9 |
10 | >>> import padasip as pa
11 | >>> pa.filters.FilterNLMS(n)
12 |
13 | where `n` is the size (number of taps) of the filter.
14 |
15 | Content of this page:
16 |
17 | .. contents::
18 | :local:
19 | :depth: 1
20 |
21 | .. seealso:: :ref:`filters`
22 |
23 | Algorithm Explanation
24 | ======================================
25 |
26 | The NLMS is extension of LMS filter. See :ref:`filter-lms`
27 | for explanation of the algorithm behind.
28 |
29 | The extension is based on normalization of learning rate.
30 | The learning rage :math:`\mu` is replaced by learning rate :math:`\eta(k)`
31 | normalized with every new sample according to input power as follows
32 |
33 | :math:`\eta (k) = \\frac{\mu}{\epsilon + || \\textbf{x}(k) ||^2}`,
34 |
35 | where :math:`|| \\textbf{x}(k) ||^2` is norm of input vector and
36 | :math:`\epsilon` is a small positive constant (regularization term).
37 | This constant is introduced to preserve the stability in cases where
38 | the input is close to zero.
39 |
40 | Stability and Optimal Performance
41 | ======================================
42 |
43 | The stability of the NLMS filter si given as follows
44 |
45 | :math:`0 \le \mu \le 2 + \\frac{2\epsilon}{||\\textbf{x}(k)||^2}`,
46 |
47 | or in case without regularization term :math:`\epsilon`
48 |
49 | :math:`\mu \in <0, 2>`.
50 |
51 | In other words, if you use the zero or only small key argument `\eps`,
52 | the key argument `\mu` should be between 0 and 2. Best convergence
53 | should be produced by `mu=1.` according to theory. However in practice
54 | the optimal value can be strongly case specific.
55 |
56 |
57 | Minimal Working Examples
58 | ======================================
59 |
60 | If you have measured data you may filter it as follows
61 |
62 | .. code-block:: python
63 |
64 | import numpy as np
65 | import matplotlib.pylab as plt
66 | import padasip as pa
67 |
68 | # creation of data
69 | N = 500
70 | x = np.random.normal(0, 1, (N, 4)) # input matrix
71 | v = np.random.normal(0, 0.1, N) # noise
72 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
73 |
74 | # identification
75 | f = pa.filters.FilterNLMS(n=4, mu=0.1, w="random")
76 | y, e, w = f.run(d, x)
77 |
78 | # show results
79 | plt.figure(figsize=(15,9))
80 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
81 | plt.plot(d,"b", label="d - target")
82 | plt.plot(y,"g", label="y - output");plt.legend()
83 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
84 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
85 | plt.tight_layout()
86 | plt.show()
87 |
88 | An example how to filter data measured in real-time
89 |
90 | .. code-block:: python
91 |
92 | import numpy as np
93 | import matplotlib.pylab as plt
94 | import padasip as pa
95 |
96 | # these two function supplement your online measurment
97 | def measure_x():
98 | # it produces input vector of size 3
99 | x = np.random.random(3)
100 | return x
101 |
102 | def measure_d(x):
103 | # meausure system output
104 | d = 2*x[0] + 1*x[1] - 1.5*x[2]
105 | return d
106 |
107 | N = 100
108 | log_d = np.zeros(N)
109 | log_y = np.zeros(N)
110 | filt = pa.filters.FilterNLMS(3, mu=1.)
111 | for k in range(N):
112 | # measure input
113 | x = measure_x()
114 | # predict new value
115 | y = filt.predict(x)
116 | # do the important stuff with prediction output
117 | pass
118 | # measure output
119 | d = measure_d(x)
120 | # update filter
121 | filt.adapt(d, x)
122 | # log values
123 | log_d[k] = d
124 | log_y[k] = y
125 |
126 | ### show results
127 | plt.figure(figsize=(15,9))
128 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
129 | plt.plot(log_d,"b", label="d - target")
130 | plt.plot(log_y,"g", label="y - output");plt.legend()
131 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
132 | plt.plot(10*np.log10((log_d-log_y)**2),"r", label="e - error [dB]")
133 | plt.legend(); plt.tight_layout(); plt.show()
134 |
135 |
136 |
137 | Code Explanation
138 | ======================================
139 | """
140 | import numpy as np
141 |
142 | from padasip.filters.base_filter import AdaptiveFilter
143 |
144 | class FilterNLMS(AdaptiveFilter):
145 | """
146 | Adaptive NLMS filter.
147 | """
148 | kind = "NLMS"
149 |
150 | def __init__(self, n, mu=0.1, eps=0.001, **kwargs):
151 | """
152 | **Kwargs:**
153 |
154 | * `eps` : regularization term (float). It is introduced to preserve
155 | stability for close-to-zero input vectors
156 | """
157 | super().__init__(n, mu, **kwargs)
158 | self.eps = eps
159 |
160 | def learning_rule(self, e, x):
161 | """
162 | Override the parent class.
163 | """
164 | return self.mu / (self.eps + np.dot(x, x)) * x * e
165 |
--------------------------------------------------------------------------------
/padasip/filters/ap.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.4
3 | .. versionchanged:: 1.2.0
4 |
5 | The Affine Projection (AP) algorithm is implemented according to paper.
6 | Usage of this filter should be benefical especially
7 | when input data is highly correlated.
8 | This filter is based on LMS. The difference is,
9 | that AP uses multiple input vectors in every sample.
10 | The number of vectors is called projection order.
11 | In this implementation the historic input vectors from input matrix are used
12 | as the additional input vectors in every sample.
13 |
14 | The AP filter can be created as follows
15 |
16 | >>> import padasip as pa
17 | >>> pa.filters.FilterAP(n)
18 |
19 | where `n` is the size of the filter.
20 |
21 | Content of this page:
22 |
23 | .. contents::
24 | :local:
25 | :depth: 1
26 |
27 | .. seealso:: :ref:`filters`
28 |
29 | Algorithm Explanation
30 | ======================================
31 |
32 | The input for AP filter is created as follows
33 |
34 | :math:`\\textbf{X}_{AP}(k) = (\\textbf{x}(k), ..., \\textbf{x}(k-L))`,
35 |
36 | where :math:`\\textbf{X}_{AP}` is filter input, :math:`L` is projection order,
37 | :math:`k` is discrete time index and :math:`\\textbf{x}_{k}` is input vector.
38 | The output of filter si calculated as follows:
39 |
40 | :math:`\\textbf{y}_{AP}(k) = \\textbf{X}^{T}_{AP}(k) \\textbf{w}(k)`,
41 |
42 | where :math:`\\textbf{x}(k)` is the vector of filter adaptive parameters.
43 | The vector of targets is constructed as follows
44 |
45 | :math:`\\textbf{d}_{AP}(k) = (d(k), ..., d(k-L))^T`,
46 |
47 | where :math:`d(k)` is target in time :math:`k`.
48 |
49 | The error of the filter is estimated as
50 |
51 | :math:`\\textbf{e}_{AP}(k) = \\textbf{d}_{AP}(k) - \\textbf{y}_{AP}(k)`.
52 |
53 | And the adaptation of adaptive parameters is calculated according to equation
54 |
55 | :math:`\\textbf{w}_{AP}(k+1) =
56 | \\textbf{w}_{AP}(k) + \mu \\textbf{X}_{AP}(k) (\\textbf{X}_{AP}^{T}(k)
57 | \\textbf{X}_{AP}(k) + \epsilon \\textbf{I})^{-1} \\textbf{e}_{AP}(k)`.
58 |
59 | During the filtering we are interested just in output of filter :math:`y(k)`
60 | and the error :math:`e(k)`. These two values are the first elements in
61 | vectors: :math:`\\textbf{y}_{AP}(k)` for output and
62 | :math:`\\textbf{e}_{AP}(k)` for error.
63 |
64 |
65 |
66 | Minimal Working Example
67 | ======================================
68 |
69 | If you have measured data you may filter it as follows
70 |
71 | .. code-block:: python
72 |
73 | import numpy as np
74 | import matplotlib.pylab as plt
75 | import padasip as pa
76 |
77 | # creation of data
78 | N = 500
79 | x = np.random.normal(0, 1, (N, 4)) # input matrix
80 | v = np.random.normal(0, 0.1, N) # noise
81 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
82 |
83 | # identification
84 | f = pa.filters.FilterAP(n=4, order=5, mu=0.5, ifc=0.001, w="random")
85 | y, e, w = f.run(d, x)
86 |
87 | # show results
88 | plt.figure(figsize=(15,9))
89 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
90 | plt.plot(d,"b", label="d - target")
91 | plt.plot(y,"g", label="y - output");plt.legend()
92 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
93 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
94 | plt.tight_layout()
95 | plt.show()
96 |
97 | An example how to filter data measured in real-time
98 |
99 | .. code-block:: python
100 |
101 | import numpy as np
102 | import matplotlib.pylab as plt
103 | import padasip as pa
104 |
105 | # these two function supplement your online measurment
106 | def measure_x():
107 | # it produces input vector of size 3
108 | x = np.random.random(3)
109 | return x
110 |
111 | def measure_d(x):
112 | # meausure system output
113 | d = 2*x[0] + 1*x[1] - 1.5*x[2]
114 | return d
115 |
116 | N = 100
117 | log_d = np.zeros(N)
118 | log_y = np.zeros(N)
119 | filt = pa.filters.FilterAP(3, mu=1.)
120 | for k in range(N):
121 | # measure input
122 | x = measure_x()
123 | # predict new value
124 | y = filt.predict(x)
125 | # do the important stuff with prediction output
126 | pass
127 | # measure output
128 | d = measure_d(x)
129 | # update filter
130 | filt.adapt(d, x)
131 | # log values
132 | log_d[k] = d
133 | log_y[k] = y
134 |
135 | ### show results
136 | plt.figure(figsize=(15,9))
137 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
138 | plt.plot(log_d,"b", label="d - target")
139 | plt.plot(log_y,"g", label="y - output");plt.legend()
140 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
141 | plt.plot(10*np.log10((log_d-log_y)**2),"r", label="e - error [dB]")
142 | plt.legend(); plt.tight_layout(); plt.show()
143 |
144 | Code Explanation
145 | ======================================
146 | """
147 | import numpy as np
148 |
149 | from padasip.filters.base_filter import AdaptiveFilterAP
150 |
151 | class FilterAP(AdaptiveFilterAP):
152 | """
153 | This class represents an adaptive AP filter.
154 | """
155 | kind = "AP"
156 |
157 | def learning_rule(self, e_mem, x_mem):
158 | """
159 | Override the parent class.
160 | """
161 | dw_part1 = np.dot(x_mem.T, x_mem) + self.ide_ifc
162 | dw_part2 = np.linalg.solve(dw_part1, self.ide)
163 | return self.mu * np.dot(x_mem, np.dot(dw_part2, e_mem))
164 |
--------------------------------------------------------------------------------
/padasip/filters/ocnlms.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.2.0
3 |
4 | The online-centered normalized least-mean-square (OCNLMS) adaptive filter
5 | (proposed in https://doi.org/10.14311/nnw.2021.31.019)
6 | is an extension of the popular NLMS adaptive filter (:ref:`filter-nlms`).
7 |
8 | The OCNLMS filter can be created as follows
9 |
10 | >>> import padasip as pa
11 | >>> pa.filters.FilterOCNLMS(n, mem=100)
12 |
13 | where `n` is the size (number of taps) of the filter.
14 |
15 | Content of this page:
16 |
17 | .. contents::
18 | :local:
19 | :depth: 1
20 |
21 | .. seealso:: :ref:`filters`
22 |
23 | Algorithm Explanation
24 | ======================================
25 |
26 | The OCNLMS is extension of NLMS filter. See :ref:`filter-nlms`
27 | for explanation of the algorithm behind. As an extension of the
28 | normalized least mean square (NLMS), the OCNLMS algorithm
29 | features an approach of online input centering according to
30 | the introduced filter memory. This key feature can compensate
31 | the effect of concept drift in data streams, because
32 | such a centering makes the filter independent
33 | of the nonzero mean value of signal.
34 |
35 | Minimal Working Examples
36 | ======================================
37 |
38 | Exampleof an unknown system identification from mesaured data follows.
39 | The memory size `mem` is defined during the construction of the filter.
40 |
41 | .. code-block:: python
42 |
43 | import numpy as np
44 | import matplotlib.pylab as plt
45 | import padasip as pa
46 |
47 | # creation of data
48 | N = 500
49 | x = np.random.normal(0, 1, (N, 4)) + 121 # input matrix with offset
50 | v = np.random.normal(0, 0.1, N) # noise
51 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
52 |
53 | # identification, memory is set to 100 samples
54 | f = pa.filters.FilterOCNLMS(n=4, mu=0.1, w="random", mem=100)
55 | y, e, w = f.run(d, x)
56 |
57 | # show results
58 | plt.figure(figsize=(15,9))
59 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
60 | plt.plot(d,"b", label="d - target")
61 | plt.plot(y,"g", label="y - output");plt.legend()
62 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
63 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
64 | plt.tight_layout()
65 | plt.show()
66 |
67 |
68 | Code Explanation
69 | ======================================
70 | """
71 | import numpy as np
72 |
73 | from padasip.filters.base_filter import AdaptiveFilter
74 |
75 | class FilterOCNLMS(AdaptiveFilter):
76 | """
77 | Adaptive OCNLMS filter.
78 | """
79 | kind = "OCNLMS"
80 |
81 | def __init__(self, n, mu=0.1, eps=1., mem=100, **kwargs):
82 | """
83 | Kwargs:
84 |
85 | * `eps` : regularization term (float). It is introduced to preserve
86 | stability for close-to-zero input vectors
87 |
88 | * `mem` : size of filter memory (int). This means how many last targets
89 | and input vectors will be used for centering of current input vector
90 | and target.
91 | """
92 | super().__init__(n, mu, **kwargs)
93 | self.eps = eps
94 | self.mem = mem
95 | self.clear_memory()
96 |
97 | def learning_rule(self, e, x):
98 | """
99 | Override the parent class.
100 | """
101 | self.update_memory_x(x)
102 | m_d, m_x = self.read_memory()
103 | y = np.dot(self.w, x-m_x) + m_d
104 | self.update_memory_d(e + y)
105 | return self.mu / (self.eps + np.dot(x - m_x, x - m_x)) * e * (x - m_x)
106 |
107 | def predict(self, x):
108 | """
109 | This function calculates OCNLMS specific output.
110 | The parent class `predict` function cannot be used.
111 |
112 | **Args:**
113 |
114 | * `x` : input vector (1 dimension array) in length of filter.
115 |
116 | **Returns:**
117 |
118 | * `y` : output value (float) calculated from input array.
119 |
120 | """
121 | m_d, m_x = self.read_memory()
122 | return np.dot(self.w, x - m_x) + m_d
123 |
124 | def clear_memory(self):
125 | """
126 | Clear of data from memory and reset memory index.
127 | """
128 | self.mem_empty = True
129 | self.mem_x = np.zeros((self.mem, self.n))
130 | self.mem_d = np.zeros(self.mem)
131 | self.mem_idx = 0
132 |
133 | def update_memory_d(self, d_k):
134 | """
135 | This function update memory of the filter with new target value `d`.
136 | """
137 | self.mem_d[self.mem_idx-1] = d_k
138 |
139 | def update_memory_x(self, x_k):
140 | """
141 | This function update memory of the filter with new input vector `x`.
142 | """
143 | self.mem_x[self.mem_idx, :] = x_k
144 |
145 | def read_memory(self):
146 | """
147 | This function read mean value of target`d`
148 | and input vector `x` from history
149 | """
150 | if self.mem_empty == True:
151 | if self.mem_idx == 0:
152 | m_x = np.zeros(self.n)
153 | m_d = 0
154 | else:
155 | m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0)
156 | m_d = np.mean(self.mem_d[:self.mem_idx])
157 | else:
158 | m_x = np.mean(self.mem_x, axis=0)
159 | m_d = np.mean(np.delete(self.mem_d, self.mem_idx))
160 | self.mem_idx += 1
161 | if self.mem_idx > len(self.mem_x)-1:
162 | self.mem_idx = 0
163 | self.mem_empty = False
164 | return m_d, m_x
165 |
--------------------------------------------------------------------------------
/padasip/filters/rls.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.1
3 | .. versionchanged:: 1.2.0
4 |
5 | The Recursive Least Squares filter can be created as follows
6 |
7 | >>> import padasip as pa
8 | >>> pa.filters.FilterRLS(n)
9 |
10 | where the n is amount of filter inputs (size of input vector).
11 |
12 | Content of this page:
13 |
14 | .. contents::
15 | :local:
16 | :depth: 1
17 |
18 | .. seealso:: :ref:`filters`
19 |
20 | Algorithm Explanation
21 | ======================================
22 |
23 | The RLS adaptive filter may be described as
24 |
25 | :math:`y(k) = w_1 \cdot x_{1}(k) + ... + w_n \cdot x_{n}(k)`,
26 |
27 | or in a vector form
28 |
29 | :math:`y(k) = \\textbf{x}^T(k) \\textbf{w}(k)`,
30 |
31 | where :math:`k` is discrete time index, :math:`(.)^T` denotes the transposition,
32 | :math:`y(k)` is filtered signal,
33 | :math:`\\textbf{w}` is vector of filter adaptive parameters and
34 | :math:`\\textbf{x}` is input vector (for a filter of size :math:`n`) as follows
35 |
36 | :math:`\\textbf{x}(k) = [x_1(k), ..., x_n(k)]`.
37 |
38 | The update is done as folows
39 |
40 | :math:`\\textbf{w}(k+1) = \\textbf{w}(k) + \Delta \\textbf{w}(k)`
41 |
42 | where :math:`\Delta \\textbf{w}(k)` is obtained as follows
43 |
44 | :math:`\Delta \\textbf{w}(k) = \\textbf{R}(k) \\textbf{x}(k) e(k)`,
45 |
46 | where :math:`e(k)` is error and it is estimated according to filter output
47 | and desired value :math:`d(k)` as follows
48 |
49 | :math:`e(k) = d(k) - y(k)`
50 |
51 | The :math:`\\textbf{R}(k)` is inverse of autocorrelation matrix
52 | and it is calculated as follows
53 |
54 | :math:`\\textbf{R}(k) = \\frac{1}{\\mu}(
55 | \\textbf{R}(k-1) -
56 | \\frac{\\textbf{R}(k-1)\\textbf{x}(k) \\textbf{x}(k)^{T} \\textbf{R}(k-1)}
57 | {\\mu + \\textbf{x}(k)^{T}\\textbf{R}(k-1)\\textbf{x}(k)}
58 | )`.
59 |
60 | The initial value of autocorrelation matrix should be set to
61 |
62 | :math:`\\textbf{R}(0) = \\frac{1}{\\delta} \\textbf{I}`,
63 |
64 | where :math:`\\textbf{I}` is identity matrix and :math:`\delta`
65 | is small positive constant.
66 |
67 | Stability and Optimal Performance
68 | ======================================
69 |
70 | Make the RLS working correctly with a real data can be tricky.
71 | The forgetting factor :math:`\\mu` should be in range from 0 to 1.
72 | But in a lot of cases it works only with values close to 1
73 | (for example something like 0.99).
74 |
75 | Minimal Working Examples
76 | ======================================
77 |
78 | If you have measured data you may filter it as follows
79 |
80 | .. code-block:: python
81 |
82 | import numpy as np
83 | import matplotlib.pylab as plt
84 | import padasip as pa
85 |
86 | # creation of data
87 | N = 500
88 | x = np.random.normal(0, 1, (N, 4)) # input matrix
89 | v = np.random.normal(0, 0.1, N) # noise
90 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
91 |
92 | # identification
93 | f = pa.filters.FilterRLS(n=4, mu=0.1, w="random")
94 | y, e, w = f.run(d, x)
95 |
96 | # show results
97 | plt.figure(figsize=(15,9))
98 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
99 | plt.plot(d,"b", label="d - target")
100 | plt.plot(y,"g", label="y - output");plt.legend()
101 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
102 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
103 | plt.tight_layout()
104 | plt.show()
105 |
106 |
107 | An example how to filter data measured in real-time
108 |
109 | .. code-block:: python
110 |
111 | import numpy as np
112 | import matplotlib.pylab as plt
113 | import padasip as pa
114 |
115 | # these two function supplement your online measurment
116 | def measure_x():
117 | # it produces input vector of size 3
118 | x = np.random.random(3)
119 | return x
120 |
121 | def measure_d(x):
122 | # meausure system output
123 | d = 2*x[0] + 1*x[1] - 1.5*x[2]
124 | return d
125 |
126 | N = 100
127 | log_d = np.zeros(N)
128 | log_y = np.zeros(N)
129 | filt = pa.filters.FilterRLS(3, mu=0.5)
130 | for k in range(N):
131 | # measure input
132 | x = measure_x()
133 | # predict new value
134 | y = filt.predict(x)
135 | # do the important stuff with prediction output
136 | pass
137 | # measure output
138 | d = measure_d(x)
139 | # update filter
140 | filt.adapt(d, x)
141 | # log values
142 | log_d[k] = d
143 | log_y[k] = y
144 |
145 | ### show results
146 | plt.figure(figsize=(15,9))
147 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
148 | plt.plot(log_d,"b", label="d - target")
149 | plt.plot(log_y,"g", label="y - output");plt.legend()
150 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
151 | plt.plot(10*np.log10((log_d-log_y)**2),"r", label="e - error [dB]")
152 | plt.legend(); plt.tight_layout(); plt.show()
153 |
154 |
155 | Code Explanation
156 | ======================================
157 | """
158 | import numpy as np
159 |
160 | from padasip.filters.base_filter import AdaptiveFilter
161 |
162 | class FilterRLS(AdaptiveFilter):
163 | """
164 | Adaptive RLS filter.
165 | """
166 | kind = "RLS"
167 |
168 | def __init__(self, n, mu=0.1, eps=0.001, **kwargs):
169 | """
170 | **Kwargs:**
171 |
172 | * `eps` : initialisation value (float). It is usually chosen
173 | between 0.1 and 1.
174 | """
175 | super().__init__(n, mu, **kwargs)
176 | self.eps = eps
177 | self.R = 1 / self.eps * np.identity(n)
178 |
179 | def learning_rule(self, e, x):
180 | """
181 | Override the parent class.
182 | """
183 | R1 = self.R @ (x[:, None] * x[None, :]) @ self.R
184 | R2 = self.mu + np.dot(np.dot(x, self.R), x.T)
185 | self.R = 1 / self.mu * (self.R - R1/R2)
186 | return np.dot(self.R, x.T) * e
187 |
--------------------------------------------------------------------------------
/padasip/preprocess/lda.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.6
3 |
4 | Linear discriminant analysis (LDA)
5 | is a method used to determine the features
6 | that separates some classes of items. The output of LDA may be used as
7 | a linear classifier, or for dimensionality reduction for purposes of
8 | classification.
9 |
10 | .. contents::
11 | :local:
12 | :depth: 1
13 |
14 | See also: :ref:`preprocess-pca`
15 |
16 | Usage Explanation
17 | ********************
18 |
19 | For reduction of data-set :code:`x` with labels stored in array (:code:`labels`)
20 | to new dataset :code:`new_x` containg just :code:`n` number of
21 | columns
22 |
23 | .. code-block:: python
24 |
25 | new_x = pa.preprocess.LDA(x, labels, n)
26 |
27 | The sorted array of scattermatrix eigenvalues for dataset :code:`x` described
28 | with variable :code:`labels` can be obtained as follows
29 |
30 | .. code-block:: python
31 |
32 | eigenvalues = pa.preprocess.LDA_discriminants(x, labels)
33 |
34 |
35 | Minimal Working Examples
36 | *****************************
37 |
38 | In this example we create data-set :code:`x` of 150 random samples. Every sample
39 | is described by 4 values and label. The labels are stored in
40 | array :code:`labels`.
41 |
42 | Firstly, it is good to see the eigenvalues of scatter matrix to determine
43 | how many rows is reasonable to reduce
44 |
45 | .. code-block:: python
46 |
47 | import numpy as np
48 | import padasip as pa
49 |
50 | np.random.seed(100) # constant seed to keep the results consistent
51 |
52 | N = 150 # number of samples
53 | classes = np.array(["1", "a", 3]) # names of classes
54 | cols = 4 # number of features (columns in dataset)
55 |
56 | x = np.random.random((N, cols)) # random data
57 | labels = np.random.choice(classes, size=N) # random labels
58 |
59 | print pa.preprocess.LDA_discriminants(x, labels)
60 |
61 | what prints
62 |
63 | >>> [ 2.90863957e-02 2.28352079e-02 1.23545720e-18 -1.61163011e-18]
64 |
65 | From this output it is obvious that reasonable number of columns to keep is 2.
66 | The following code reduce the number of features to 2.
67 |
68 | .. code-block:: python
69 |
70 | import numpy as np
71 | import padasip as pa
72 |
73 | np.random.seed(100) # constant seed to keep the results consistent
74 |
75 | N = 150 # number of samples
76 | classes = np.array(["1", "a", 3]) # names of classes
77 | cols = 4 # number of features (columns in dataset)
78 |
79 | x = np.random.random((N, cols)) # random data
80 | labels = np.random.choice(classes, size=N) # random labels
81 |
82 | new_x = pa.preprocess.LDA(x, labels, n=2)
83 |
84 | to check if the size of new data-set is really correct we can print the shapes
85 | as follows
86 |
87 | >>> print "Shape of original dataset: {}".format(x.shape)
88 | Shape of original dataset: (150, 4)
89 | >>> print "Shape of new dataset: {}".format(new_x.shape)
90 | Shape of new dataset: (150, 2)
91 |
92 |
93 | Code Explanation
94 | *****************
95 | """
96 | from __future__ import division
97 | import numpy as np
98 |
99 | def LDA_base(x, labels):
100 | """
101 | Base function used for Linear Discriminant Analysis.
102 |
103 | **Args:**
104 |
105 | * `x` : input matrix (2d array), every row represents new sample
106 |
107 | * `labels` : list of labels (iterable), every item should be label for \
108 | sample with corresponding index
109 |
110 | **Returns:**
111 |
112 | * `eigenvalues`, `eigenvectors` : eigenvalues and eigenvectors \
113 | from LDA analysis
114 |
115 | """
116 | classes = np.array(tuple(set(labels)))
117 | cols = x.shape[1]
118 | # mean values for every class
119 | means = np.zeros((len(classes), cols))
120 | for i, cl in enumerate(classes):
121 | means[i] = np.mean(x[labels == cl], axis=0)
122 | # scatter matrices
123 | scatter_within = np.zeros((cols, cols))
124 | for cl, mean in zip(classes, means):
125 | scatter_class = np.zeros((cols, cols))
126 | for row in x[labels == cl]:
127 | dif = row - mean
128 | scatter_class += np.dot(dif.reshape(cols, 1), dif.reshape(1, cols))
129 | scatter_within += scatter_class
130 | total_mean = np.mean(x, axis=0)
131 | scatter_between = np.zeros((cols, cols))
132 | for cl, mean in zip(classes, means):
133 | dif = mean - total_mean
134 | dif_product = np.dot(dif.reshape(cols, 1), dif.reshape(1, cols))
135 | scatter_between += x[labels == cl, :].shape[0] * dif_product
136 | # eigenvalues and eigenvectors from scatter matrices
137 | scatter_product = np.dot(np.linalg.inv(scatter_within), scatter_between)
138 | eigen_values, eigen_vectors = np.linalg.eig(scatter_product)
139 | return eigen_values, eigen_vectors
140 |
141 | def LDA(x, labels, n=False):
142 | """
143 | Linear Discriminant Analysis function.
144 |
145 | **Args:**
146 |
147 | * `x` : input matrix (2d array), every row represents new sample
148 |
149 | * `labels` : list of labels (iterable), every item should be label for \
150 | sample with corresponding index
151 |
152 | **Kwargs:**
153 |
154 | * `n` : number of features returned (integer) - how many columns
155 | should the output keep
156 |
157 | **Returns:**
158 |
159 | * new_x : matrix with reduced size (number of columns are equal `n`)
160 | """
161 | n = n if n else x.shape[1] - 1
162 | assert x.shape[1] > n, "The requested n is bigger than \
163 | number of features in x."
164 | # make the LDA
165 | eigen_values, eigen_vectors = LDA_base(x, labels)
166 | # sort the eigen vectors according to eigen values
167 | eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
168 | return eigen_order[:n].dot(x.T).T
169 |
170 |
171 | def LDA_discriminants(x, labels):
172 | """
173 | Linear Discriminant Analysis helper for determination how many columns of
174 | data should be reduced.
175 |
176 | **Args:**
177 |
178 | * `x` : input matrix (2d array), every row represents new sample
179 |
180 | * `labels` : list of labels (iterable), every item should be label for \
181 | sample with corresponding index
182 |
183 | **Returns:**
184 |
185 | * `discriminants` : array of eigenvalues sorted in descending order
186 |
187 | """
188 | # validate inputs
189 | try:
190 | x = np.array(x)
191 | except:
192 | raise ValueError('Impossible to convert x to a numpy array.')
193 | # make the LDA
194 | eigen_values, eigen_vectors = LDA_base(x, labels)
195 | return eigen_values[(-eigen_values).argsort()]
196 |
--------------------------------------------------------------------------------
/padasip/detection/le.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 1.0.0
3 |
4 | The Learning Entropy (LE) is non-Shannon entropy based on conformity
5 | of individual data samples to the contemporary learned governing law
6 | of a learning system.
7 |
8 | Content of this page:
9 |
10 | .. contents::
11 | :local:
12 | :depth: 1
13 |
14 | Algorithm Explanation
15 | ==========================
16 |
17 | Two options how to estimate the LE are implemented - direct approach and
18 | multiscale approach.
19 |
20 | .. rubric:: Direct approach
21 |
22 | With direct approach the LE is evaluated for every sample as follows
23 |
24 | :math:`\\textrm{LE}_d(k) = \\frac{ (\\Delta \\textbf{w}(k) -
25 | \\overline{| \\Delta \\textbf{w}_M(k) |}) }
26 | { (\\sigma({| \\Delta \\textbf{w}_M(k) |})+\\epsilon) }`
27 |
28 | where
29 |
30 | * :math:`|\\Delta \\textbf{w}(k)|` are the absolute values of current weights
31 | increment.
32 |
33 | * :math:`\overline{| \\Delta \\textbf{w}_M(k) |}` are averages of absolute
34 | values of window used for LE evaluation.
35 |
36 | * :math:`\\sigma (| \\Delta \\textbf{w}_M(k) |)` are standard deviatons of
37 | absolute values of window used for LE evaluation.
38 |
39 | * :math:`\\epsilon` is regularization term to preserve stability for small
40 | values of standard deviation.
41 |
42 | .. rubric:: Multiscale approach
43 |
44 | Value for every sample is defined as follows
45 |
46 | :math:`\\textrm{LE}(k) = \\frac{1}{n \cdot n_\\alpha}
47 | \sum f(\Delta w_{i}(k), \\alpha ),`
48 |
49 | where :math:`\Delta w_i(k)` stands for one weight from vector
50 | :math:`\Delta \\textbf{w}(k)`, the :math:`n` is number of weights,
51 | the :math:`n_\\alpha` is number of used detection sensitivities
52 |
53 | :math:`\\alpha=[\\alpha_{1}, \\alpha_{2}, \ldots, \\alpha_{n_{\\alpha}}].`
54 |
55 | The function :math:`f(\Delta w_{i}(k), \\alpha)` is defined as follows
56 |
57 | :math:`f(\Delta w_{i}(k),\\alpha)=
58 | \{{\\rm if}\,\left(\left\\vert \Delta w_{i}(k)\\right\\vert >
59 | \\alpha\cdot \overline{\left\\vert \Delta w_{Mi}(k)\\right\\vert }\\right)\,
60 | \\rm{then} \, 1, \\rm{else }\,0 \}.`
61 |
62 |
63 | Usage Instructions and Optimal Performance
64 | ==============================================
65 |
66 | The LE algorithm can be used as follows
67 |
68 | .. code-block:: python
69 |
70 | le = pa.detection.learning_entropy(w, m=30, order=1)
71 |
72 | in case of direct approach. For multiscale approach an example follows
73 |
74 | .. code-block:: python
75 |
76 | le = pa.detection.learning_entropy(w, m=30, order=1, alpha=[8., 9., 10., 11., 12., 13.])
77 |
78 | where `w` is matrix of the adaptive parameters (changing in time, every row
79 | should represent one time index), `m` is window size, `order` is LE order and
80 | `alpha` is vector of sensitivities.
81 |
82 | .. rubric:: Used adaptive models
83 |
84 | In general it is possible to use any adaptive model. The input of the LE
85 | algorithm is matrix of an adaptive parameters history, where every row
86 | represents the parameters used in a particular time and every column represents
87 | one parameter in whole adaptation history.
88 |
89 | .. rubric:: Selection of sensitivities
90 |
91 | The optimal number of detection sensitivities and their values
92 | depends on task and data. The sensitivities should be chosen in range
93 | where the function :math:`LE(k)` returns a value lower than 1 for at
94 | least one sample in data, and for at maximally one sample returns value of 0.
95 |
96 | Minimal Working Example
97 | ============================
98 |
99 | In this example is demonstrated how can the multiscale approach LE highligh
100 | the position of a perturbation inserted in a data. As the adaptive model is used
101 | :ref:`filter-nlms` adaptive filter. The perturbation is manually inserted
102 | in sample with index :math:`k=1000` (the length of data is 2000).
103 |
104 | .. code-block:: python
105 |
106 | import numpy as np
107 | import matplotlib.pylab as plt
108 | import padasip as pa
109 |
110 | # data creation
111 | n = 5
112 | N = 2000
113 | x = np.random.normal(0, 1, (N, n))
114 | d = np.sum(x, axis=1) + np.random.normal(0, 0.1, N)
115 |
116 | # perturbation insertion
117 | d[1000] += 2.
118 |
119 | # creation of learning model (adaptive filter)
120 | f = pa.filters.FilterNLMS(n, mu=1., w=np.ones(n))
121 | y, e, w = f.run(d, x)
122 |
123 | # estimation of LE with weights from learning model
124 | le = pa.detection.learning_entropy(w, m=30, order=2, alpha=[8., 9., 10., 11., 12., 13.])
125 |
126 | # LE plot
127 | plt.plot(le)
128 | plt.show()
129 |
130 |
131 | Code Explanation
132 | ====================
133 |
134 | """
135 | import numpy as np
136 |
137 |
138 | def learning_entropy(w, m=10, order=1, alpha=False):
139 | """
140 | This function estimates Learning Entropy.
141 |
142 | **Args:**
143 |
144 | * `w` : history of adaptive parameters of an adaptive model (2d array),
145 | every row represents parameters in given time index.
146 |
147 | **Kwargs:**
148 |
149 | * `m` : window size (1d array) - how many last samples are used for
150 | evaluation of every sample.
151 |
152 | * `order` : order of the LE (int) - order of weights differention
153 |
154 | * `alpha` : list of senstitivites (1d array). If not provided, the LE
155 | direct approach is used.
156 |
157 | **Returns:**
158 |
159 | * Learning Entropy of data (1 d array) - one value for every sample
160 |
161 | """
162 | w = np.array(w)
163 | # get length of data and number of parameters
164 | N, n = w.shape
165 | # get abs dw from w
166 | dw = np.copy(w)
167 | dw[order:] = np.abs(np.diff(dw, n=order, axis=0))
168 | # average floting window - window is k-m ... k-1
169 | awd = np.zeros(w.shape)
170 | if not alpha:
171 | # estimate the ALPHA with multiscale approach
172 | swd = np.zeros(w.shape)
173 | for k in range(m, N):
174 | awd[k] = np.mean(dw[k-m:k], axis=0)
175 | swd[k] = np.std(dw[k-m:k], axis=0)
176 | # estimate the points of entropy
177 | eps = 1e-10 # regularization term
178 | le = (dw - awd) / (swd+eps)
179 | else:
180 | # estimate the ALPHA with direct approach
181 | for k in range(m, N):
182 | awd[k] = np.mean(dw[k-m:k], axis=0)
183 | # estimate the points of entropy
184 | alphas = np.array(alpha)
185 | fh = np.zeros(N)
186 | for alpha in alphas:
187 | fh += np.sum(awd*alpha < dw, axis=1)
188 | le = fh / float(n*len(alphas))
189 | # clear unknown zone on begining
190 | le[:m] = 0
191 | # return output
192 | return le
193 |
--------------------------------------------------------------------------------
/padasip/misc/error_evaluation.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.7
3 |
4 | Implemented functions:
5 |
6 | * **Mean absolute error** (MAE, also known as MAD - mean absolute deviation)
7 |
8 | :math:`\\textrm{MAE}=\\frac{1}{n} \sum _{i=1}^{n}(e_{i})`.
9 |
10 | * **Mean squared error** (MSE, also known as MSD)
11 |
12 | :math:`\\textrm{MSE}=\\frac{1}{n} \sum _{i=1}^{n}(e_{i})^{2}`.
13 |
14 | * **Root-mean-square error** (RMSE, also known as RMSD)
15 |
16 | :math:`\\textrm{RMSE} = \\sqrt{\\textrm{MSE}}`.
17 |
18 | * **Logarithmic squared error** (returns a vector of values in dB!)
19 |
20 | :math:`\\textbf{logSE} = 10 \log_{10} (\\textbf{e}^{2})`
21 |
22 | all functions are often used for evaluation of an error rather than just
23 | the error itself or its mean value.
24 |
25 | Usage instructions
26 | ====================
27 |
28 | For MAE evaluation from two time series use
29 |
30 | .. code-block:: python
31 |
32 | mse = pa.misc.MAE(x1, x2)
33 |
34 | If you have the error already calculated, then just
35 |
36 | .. code-block:: python
37 |
38 | mse = pa.misc.MAE(e)
39 |
40 | The same instructions apply for the MSE, RMSE a logarithmic squared error
41 |
42 | .. code-block:: python
43 |
44 | mse = pa.misc.MSE(x1, x2)
45 | rmse = pa.misc.RMSE(x1, x2)
46 | logse = pa.misc.logSE(x1, x2)
47 |
48 | and from error
49 |
50 | .. code-block:: python
51 |
52 | mse = pa.misc.MSE(e)
53 | rmse = pa.misc.RMSE(e)
54 | logse = pa.misc.logSE(e)
55 |
56 |
57 | Minimal working examples
58 | ==========================
59 |
60 | In the following example is estimated MSE for two series
61 | (:code:`x1` and :code:`x1`):
62 |
63 | .. code-block:: python
64 |
65 | import numpy as np
66 | import padasip as pa
67 |
68 | x1 = np.array([1, 2, 3, 4, 5])
69 | x2 = np.array([5, 4, 3, 2, 1])
70 | mse = pa.misc.MSE(x1, x2)
71 | print(mse)
72 |
73 | You can easily check that the printed result :code:`8.0` is correct MSE for
74 | given series.
75 |
76 | The following example displays, that you can use directly the error series
77 | :code:`e` if you already have it.
78 |
79 | .. code-block:: python
80 |
81 | import numpy as np
82 | import padasip as pa
83 |
84 | # somewhere else in your project
85 | x1 = np.array([1, 2, 3, 4, 5])
86 | x2 = np.array([5, 4, 3, 2, 1])
87 | e = x1 - x2
88 | # you have just the error - e
89 | mse = pa.misc.MSE(e)
90 | print(mse)
91 |
92 | Again, you can check the correctness of the answer easily.
93 |
94 | Code Explanation
95 | ====================
96 | """
97 | import numpy as np
98 |
99 | def get_valid_error(x1, x2=-1):
100 | """
101 | Function that validates:
102 |
103 | * x1 is possible to convert to numpy array
104 |
105 | * x2 is possible to convert to numpy array (if exists)
106 |
107 | * x1 and x2 have the same length (if both exist)
108 | """
109 | # just error
110 | if isinstance(x2, int) and x2 == -1:
111 | try:
112 | e = np.array(x1)
113 | except:
114 | raise ValueError('Impossible to convert series to a numpy array')
115 | # two series
116 | else:
117 | try:
118 | x1 = np.array(x1)
119 | x2 = np.array(x2)
120 | except:
121 | raise ValueError('Impossible to convert one of series to a numpy array')
122 | if not len(x1) == len(x2):
123 | raise ValueError('The length of both series must agree.')
124 | e = x1 - x2
125 | return e
126 |
127 | def logSE(x1, x2=-1):
128 | """
129 | 10 * log10(e**2)
130 | This function accepts two series of data or directly
131 | one series with error.
132 |
133 | **Args:**
134 |
135 | * `x1` - first data series or error (1d array)
136 |
137 | **Kwargs:**
138 |
139 | * `x2` - second series (1d array) if first series was not error directly,
140 | then this should be the second series
141 |
142 | **Returns:**
143 |
144 | * `e` - logSE of error (1d array) obtained directly from `x1`,
145 | or as a difference of `x1` and `x2`. The values are in dB!
146 |
147 | """
148 | e = get_valid_error(x1, x2)
149 | return 10*np.log10(e**2)
150 |
151 |
152 | def MAE(x1, x2=-1):
153 | """
154 | Mean absolute error - this function accepts two series of data or directly
155 | one series with error.
156 |
157 | **Args:**
158 |
159 | * `x1` - first data series or error (1d array)
160 |
161 | **Kwargs:**
162 |
163 | * `x2` - second series (1d array) if first series was not error directly,
164 | then this should be the second series
165 |
166 | **Returns:**
167 |
168 | * `e` - MAE of error (float) obtained directly from `x1`,
169 | or as a difference of `x1` and `x2`
170 |
171 | """
172 | e = get_valid_error(x1, x2)
173 | return np.sum(np.abs(e)) / float(len(e))
174 |
175 | def MSE(x1, x2=-1):
176 | """
177 | Mean squared error - this function accepts two series of data or directly
178 | one series with error.
179 |
180 | **Args:**
181 |
182 | * `x1` - first data series or error (1d array)
183 |
184 | **Kwargs:**
185 |
186 | * `x2` - second series (1d array) if first series was not error directly,
187 | then this should be the second series
188 |
189 | **Returns:**
190 |
191 | * `e` - MSE of error (float) obtained directly from `x1`,
192 | or as a difference of `x1` and `x2`
193 |
194 | """
195 | e = get_valid_error(x1, x2)
196 | return np.dot(e, e) / float(len(e))
197 |
198 | def RMSE(x1, x2=-1):
199 | """
200 | Root-mean-square error - this function accepts two series of data
201 | or directly one series with error.
202 |
203 | **Args:**
204 |
205 | * `x1` - first data series or error (1d array)
206 |
207 | **Kwargs:**
208 |
209 | * `x2` - second series (1d array) if first series was not error directly,
210 | then this should be the second series
211 |
212 | **Returns:**
213 |
214 | * `e` - RMSE of error (float) obtained directly from `x1`,
215 | or as a difference of `x1` and `x2`
216 |
217 | """
218 | e = get_valid_error(x1, x2)
219 | return np.sqrt(np.dot(e, e) / float(len(e)))
220 |
221 | def get_mean_error(x1, x2=-1, function="MSE"):
222 | """
223 | This function returns desired mean error. Options are: MSE, MAE, RMSE
224 |
225 | **Args:**
226 |
227 | * `x1` - first data series or error (1d array)
228 |
229 | **Kwargs:**
230 |
231 | * `x2` - second series (1d array) if first series was not error directly,
232 | then this should be the second series
233 |
234 | **Returns:**
235 |
236 | * `e` - mean error value (float) obtained directly from `x1`,
237 | or as a difference of `x1` and `x2`
238 | """
239 | if function == "MSE":
240 | return MSE(x1, x2)
241 | if function == "MAE":
242 | return MAE(x1, x2)
243 | if function == "RMSE":
244 | return RMSE(x1, x2)
245 | raise ValueError('The provided error function is not known')
246 |
--------------------------------------------------------------------------------
/tests/filters.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import sys
3 | import numpy as np
4 |
5 |
6 | sys.path.append('..')
7 | import padasip as pa
8 |
9 |
10 | class TestFilters(unittest.TestCase):
11 |
12 | def test_base_filter_adapt(self):
13 | filt = pa.filters.FilterLMS(3, mu=1., w="zeros")
14 | x = np.array([2, 4, 3])
15 | filt.adapt(1, x)
16 | self.assertAlmostEqual(filt.w.sum(), 9.0)
17 |
18 | def test_filter_gngd(self):
19 | """
20 | Test of GNGD filter output.
21 | """
22 | np.random.seed(100)
23 | N = 100
24 | x = np.random.normal(0, 1, (N, 4))
25 | v = np.random.normal(0, 0.1, N)
26 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
27 | f = pa.filters.FilterGNGD(n=4, mu=0.9, w="random")
28 | y, e, w = f.run(d, x)
29 | self.assertAlmostEqual(y.sum(), 16.622071160225627)
30 |
31 | def test_filter_vslms_mathews(self):
32 | """
33 | Test of VLSMS with Mathews adaptation filter output.
34 | """
35 | np.random.seed(100)
36 | N = 100
37 | x = np.random.normal(0, 1, (N, 4))
38 | v = np.random.normal(0, 0.1, N)
39 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
40 | f = pa.filters.FilterVSLMS_Mathews(n=4, mu=0.1, ro=0.001, w="random")
41 | y, e, w = f.run(d, x)
42 | self.assertAlmostEqual(y.sum(), 18.46303593650432)
43 |
44 | def test_filter_vslms_benveniste(self):
45 | """
46 | Test of VLSMS with Benveniste adaptation filter output.
47 | """
48 | np.random.seed(100)
49 | N = 100
50 | x = np.random.normal(0, 1, (N, 4))
51 | v = np.random.normal(0, 0.1, N)
52 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
53 | f = pa.filters.FilterVSLMS_Benveniste(n=4, mu=0.1, ro=0.0002, w="random")
54 | y, e, w = f.run(d, x)
55 | self.assertAlmostEqual(y.sum(), 18.048916937718058)
56 |
57 | def test_filter_vslms_ang(self):
58 | """
59 | Test of VLSMS with Ang adaptation filter output.
60 | """
61 | np.random.seed(100)
62 | N = 100
63 | x = np.random.normal(0, 1, (N, 4))
64 | v = np.random.normal(0, 0.1, N)
65 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
66 | f = pa.filters.FilterVSLMS_Ang(n=4, mu=0.1, ro=0.0002, w="random")
67 | y, e, w = f.run(d, x)
68 | self.assertAlmostEqual(y.sum(), 18.341053442007972)
69 |
70 | def test_filter_ap(self):
71 | """
72 | Test of AP filter output.
73 | """
74 | np.random.seed(100)
75 | N = 100
76 | x = np.random.normal(0, 1, (N, 4))
77 | v = np.random.normal(0, 0.1, N)
78 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
79 | f = pa.filters.FilterAP(n=4, order=5, mu=0.5, ifc=0.001, w="random")
80 | y, e, w = f.run(d, x)
81 | self.assertAlmostEqual(y.sum(), 15.105550229065491)
82 |
83 | def test_filter_lms(self):
84 | """
85 | Test of LMS filter output.
86 | """
87 | np.random.seed(100)
88 | N = 100
89 | x = np.random.normal(0, 1, (N, 4))
90 | v = np.random.normal(0, 0.1, N)
91 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
92 | f = pa.filters.FilterLMS(n=4, mu=0.1, w="random")
93 | y, e, w = f.run(d, x)
94 | self.assertAlmostEqual(y.sum(), 18.199308184867885)
95 |
96 | def test_filter_nlms(self):
97 | """
98 | Test of NLMS filter.
99 | """
100 | np.random.seed(100)
101 | N = 100
102 | x = np.random.normal(0, 1, (N, 4))
103 | v = np.random.normal(0, 0.1, N)
104 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
105 | f = pa.filters.FilterNLMS(n=4, mu=0.5, eps=0.01, w="random")
106 | y, e, w = f.run(d, x)
107 | self.assertAlmostEqual(y.sum(), 14.246570369497373)
108 |
109 | def test_filter_ocnlms(self):
110 | """
111 | Test of OCNLMS filter.
112 | """
113 | np.random.seed(100)
114 | N = 100
115 | x = np.random.normal(0, 1, (N, 4))
116 | v = np.random.normal(0, 0.1, N)
117 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
118 | f = pa.filters.FilterOCNLMS(n=4, mu=1., mem=100, w="random")
119 | y, e, w = f.run(d, x)
120 | self.assertAlmostEqual(y.sum(), 13.775034155354426)
121 |
122 | def test_filter_Llncosh(self):
123 | np.random.seed(100)
124 | N = 100
125 | x = np.random.normal(0, 1, (N, 4))
126 | v = np.random.normal(0, 0.1, N)
127 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
128 | f = pa.filters.FilterLlncosh(n=4, mu=1., lambd=3, w="random")
129 | y, e, w = f.run(d, x)
130 | self.assertAlmostEqual(y.sum(), 18.74164638623726)
131 |
132 | def test_filter_rls(self):
133 | """
134 | Test of RLS filter.
135 | """
136 | np.random.seed(100)
137 | N = 100
138 | x = np.random.normal(0, 1, (N, 4))
139 | v = np.random.normal(0, 0.1, N)
140 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
141 | f = pa.filters.FilterRLS(n=4, mu=0.9, w="random")
142 | y, e, w = f.run(d, x)
143 | self.assertAlmostEqual(y.sum(), 16.80842884997325)
144 |
145 | def test_filter_LMF(self):
146 | """
147 | Test of LMF filter.
148 | """
149 | np.random.seed(100)
150 | N = 100
151 | x = np.random.normal(0, 1, (N, 4))
152 | v = np.random.normal(0, 0.1, N)
153 | d = 2*x[:,0] + 0.1*x[:,1] - 1*x[:,2] + 0.5*x[:,3] + v
154 | f = pa.filters.FilterLMF(n=4, mu=0.01, w="random")
155 | y, e, w = f.run(d, x)
156 | self.assertAlmostEqual(y.sum(), 16.611322392961064)
157 |
158 | def test_filter_NLMF(self):
159 | """
160 | Test of NLMF filter.
161 | """
162 | np.random.seed(100)
163 | N = 100
164 | x = np.random.normal(0, 1, (N, 4))
165 | v = np.random.normal(0, 0.1, N)
166 | d = 2*x[:,0] + 0.1*x[:,1] - 1*x[:,2] + 0.5*x[:,3] + v
167 | f = pa.filters.FilterNLMF(n=4, mu=0.1, w="random")
168 | y, e, w = f.run(d, x)
169 | self.assertAlmostEqual(y.sum(), 13.989262305958494)
170 |
171 | def test_filter_SSLMS(self):
172 | """
173 | Test of SSLMS filter.
174 | """
175 | np.random.seed(100)
176 | N = 100
177 | x = np.random.normal(0, 1, (N, 4))
178 | v = np.random.normal(0, 0.1, N)
179 | d = 2*x[:,0] + 0.1*x[:,1] - 1*x[:,2] + 0.5*x[:,3] + v
180 | f = pa.filters.FilterSSLMS(n=4, mu=0.1, w="random")
181 | y, e, w = f.run(d, x)
182 | self.assertAlmostEqual(y.sum(), 12.579327704869938)
183 |
184 | def test_filter_NSSLMS(self):
185 | """
186 | Test of NSSLMS filter.
187 | """
188 | np.random.seed(100)
189 | N = 100
190 | x = np.random.normal(0, 1, (N, 4))
191 | v = np.random.normal(0, 0.1, N)
192 | d = 2*x[:,0] + 0.1*x[:,1] - 1*x[:,2] + 0.5*x[:,3] + v
193 | f = pa.filters.FilterNSSLMS(n=4, mu=0.3, eps=0.001, w="random")
194 | y, e, w = f.run(d, x)
195 | self.assertAlmostEqual(y.sum(), -23.49342599164458)
196 |
197 | def test_filter_GMCC(self):
198 | """
199 | Test of GMCC filter.
200 | """
201 | np.random.seed(100)
202 | N = 100
203 | x = np.random.normal(0, 1, (N, 4))
204 | v = np.random.normal(0, 0.1, N)
205 | d = 2*x[:,0] + 0.1*x[:,1] - 1*x[:,2] + 0.5*x[:,3] + v
206 | f = pa.filters.FilterGMCC(n=4, mu=0.3, lambd=0.03, alpha=2, w="random")
207 | y, e, w = f.run(d, x)
208 | self.assertAlmostEqual(y.sum(), 7.002285017142926)
209 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx
7 | PAPER =
8 | BUILDDIR = build
9 |
10 | # Internal variables.
11 | PAPEROPT_a4 = -D latex_paper_size=a4
12 | PAPEROPT_letter = -D latex_paper_size=letter
13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
14 | # the i18n builder cannot share the environment and doctrees with the others
15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
16 |
17 | .PHONY: help
18 | help:
19 | @echo "Please use \`make ' where is one of"
20 | @echo " html to make standalone HTML files"
21 | @echo " dirhtml to make HTML files named index.html in directories"
22 | @echo " singlehtml to make a single large HTML file"
23 | @echo " pickle to make pickle files"
24 | @echo " json to make JSON files"
25 | @echo " htmlhelp to make HTML files and a HTML help project"
26 | @echo " qthelp to make HTML files and a qthelp project"
27 | @echo " applehelp to make an Apple Help Book"
28 | @echo " devhelp to make HTML files and a Devhelp project"
29 | @echo " epub to make an epub"
30 | @echo " epub3 to make an epub3"
31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
32 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
34 | @echo " text to make text files"
35 | @echo " man to make manual pages"
36 | @echo " texinfo to make Texinfo files"
37 | @echo " info to make Texinfo files and run them through makeinfo"
38 | @echo " gettext to make PO message catalogs"
39 | @echo " changes to make an overview of all changed/added/deprecated items"
40 | @echo " xml to make Docutils-native XML files"
41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
42 | @echo " linkcheck to check all external links for integrity"
43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
44 | @echo " coverage to run coverage check of the documentation (if enabled)"
45 | @echo " dummy to check syntax errors of document sources"
46 |
47 | .PHONY: clean
48 | clean:
49 | rm -rf $(BUILDDIR)/*
50 |
51 | .PHONY: html
52 | html:
53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
54 | @echo
55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
56 |
57 | .PHONY: dirhtml
58 | dirhtml:
59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
60 | @echo
61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
62 |
63 | .PHONY: singlehtml
64 | singlehtml:
65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
66 | @echo
67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
68 |
69 | .PHONY: pickle
70 | pickle:
71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
72 | @echo
73 | @echo "Build finished; now you can process the pickle files."
74 |
75 | .PHONY: json
76 | json:
77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
78 | @echo
79 | @echo "Build finished; now you can process the JSON files."
80 |
81 | .PHONY: htmlhelp
82 | htmlhelp:
83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
84 | @echo
85 | @echo "Build finished; now you can run HTML Help Workshop with the" \
86 | ".hhp project file in $(BUILDDIR)/htmlhelp."
87 |
88 | .PHONY: qthelp
89 | qthelp:
90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
91 | @echo
92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Padasip.qhcp"
95 | @echo "To view the help file:"
96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Padasip.qhc"
97 |
98 | .PHONY: applehelp
99 | applehelp:
100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
101 | @echo
102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
103 | @echo "N.B. You won't be able to view it unless you put it in" \
104 | "~/Library/Documentation/Help or install it in your application" \
105 | "bundle."
106 |
107 | .PHONY: devhelp
108 | devhelp:
109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
110 | @echo
111 | @echo "Build finished."
112 | @echo "To view the help file:"
113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Padasip"
114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Padasip"
115 | @echo "# devhelp"
116 |
117 | .PHONY: epub
118 | epub:
119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
120 | @echo
121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
122 |
123 | .PHONY: epub3
124 | epub3:
125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
126 | @echo
127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
128 |
129 | .PHONY: latex
130 | latex:
131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
132 | @echo
133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
135 | "(use \`make latexpdf' here to do that automatically)."
136 |
137 | .PHONY: latexpdf
138 | latexpdf:
139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
140 | @echo "Running LaTeX files through pdflatex..."
141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
143 |
144 | .PHONY: latexpdfja
145 | latexpdfja:
146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
147 | @echo "Running LaTeX files through platex and dvipdfmx..."
148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
150 |
151 | .PHONY: text
152 | text:
153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
154 | @echo
155 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
156 |
157 | .PHONY: man
158 | man:
159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
160 | @echo
161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
162 |
163 | .PHONY: texinfo
164 | texinfo:
165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
166 | @echo
167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
168 | @echo "Run \`make' in that directory to run these through makeinfo" \
169 | "(use \`make info' here to do that automatically)."
170 |
171 | .PHONY: info
172 | info:
173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
174 | @echo "Running Texinfo files through makeinfo..."
175 | make -C $(BUILDDIR)/texinfo info
176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
177 |
178 | .PHONY: gettext
179 | gettext:
180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
181 | @echo
182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
183 |
184 | .PHONY: changes
185 | changes:
186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
187 | @echo
188 | @echo "The overview file is in $(BUILDDIR)/changes."
189 |
190 | .PHONY: linkcheck
191 | linkcheck:
192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
193 | @echo
194 | @echo "Link check complete; look for any errors in the above output " \
195 | "or in $(BUILDDIR)/linkcheck/output.txt."
196 |
197 | .PHONY: doctest
198 | doctest:
199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
200 | @echo "Testing of doctests in the sources finished, look at the " \
201 | "results in $(BUILDDIR)/doctest/output.txt."
202 |
203 | .PHONY: coverage
204 | coverage:
205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
206 | @echo "Testing of coverage in the sources finished, look at the " \
207 | "results in $(BUILDDIR)/coverage/python.txt."
208 |
209 | .PHONY: xml
210 | xml:
211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
212 | @echo
213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
214 |
215 | .PHONY: pseudoxml
216 | pseudoxml:
217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
218 | @echo
219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
220 |
221 | .PHONY: dummy
222 | dummy:
223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy
224 | @echo
225 | @echo "Build finished. Dummy builder generates no files."
226 |
--------------------------------------------------------------------------------
/padasip/filters/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.1
3 | .. versionchanged:: 1.2.2
4 |
5 |
6 | An adaptive filter is a system that changes its adaptive parameteres
7 | - adaptive weights :math:`\\textbf{w}(k)` - according to an optimization algorithm.
8 |
9 | The an adaptive filter can be described as
10 |
11 | :math:`y(k) = w_1 \cdot x_{1}(k) + ... + w_n \cdot x_{n}(k)`,
12 |
13 | or in a vector form
14 |
15 | :math:`y(k) = \\textbf{x}^T(k) \\textbf{w}(k)`.
16 |
17 | The adaptation of adaptive parameters (weights) can be done with
18 | various algorithms.
19 |
20 | Content of this page:
21 |
22 | .. contents::
23 | :local:
24 | :depth: 1
25 |
26 | Usage instructions
27 | ================================================
28 |
29 | .. rubric:: Adaptive weights initial selection
30 |
31 | The parameters of all implemented adaptive filters can be initially set:
32 |
33 | * manually and passed to a filter as an array
34 |
35 | * :code:`w="random"` - set to random - this will produce a vector of
36 | random values (zero mean, 0.5 standard deviation)
37 |
38 | * :code:`w="zeros"` - set to zeros
39 |
40 | .. rubric:: Input data
41 |
42 | The adaptive filters need two inputs
43 |
44 | * input matrix :code:`x` where rows represent the samples. Every row (sample)
45 | should contain multiple values (features).
46 |
47 | * desired value (target) :code:`d`
48 |
49 | If you have only one signal and the historical values of this signal should
50 | be input of the filter (data reconstruction/prediction task) you can use helper
51 | function :ref:`preprocess-input_from_history` to build input matrix from
52 | the historical values.
53 |
54 | .. rubric:: Creation of an adaptive filter
55 |
56 | If you want to create adaptive filter (for example NLMS), with size :code:`n=4`,
57 | learning rate :code:`mu=0.1` and random initial parameters (weights), than use
58 | following code
59 |
60 | .. code-block:: python
61 |
62 | f = pa.filters.AdaptiveFilter(model="NLMS", n=4, mu=0.1, w="random")
63 |
64 | where returned :code:`f` is the instance of class :code:`FilterNLMS`
65 | with given parameters.
66 |
67 | .. rubric:: Data filtering
68 |
69 | If you already created an instance of adaptive filter (:code:`f` in previous
70 | example) you can use it for filtering of
71 | data :code:`x` with desired value :code:`d` as simple as follows
72 |
73 | .. code-block:: python
74 |
75 | y, e, w = f.run(d, x)
76 |
77 | where :code:`y` is output, :code:`e` is the error and :code:`w` is set
78 | of parameters at the end of the simulation.
79 |
80 | In case you want to just simply filter the data without creating and
81 | storing filter instance manually, use the following function
82 |
83 | .. code-block:: python
84 |
85 | y, e, w = pa.filters.filter_data(d, x, model="NLMS", mu=0.9, w="random")
86 |
87 |
88 | .. rubric:: Search for optimal learning rate
89 |
90 | The search for optimal filter setup (especially learning rate) is a task
91 | of critical importance. Therefor an helper function for this task is
92 | implemented in the Padasip. To use this function you need to specify
93 |
94 | * number of epochs (for training)
95 |
96 | * part of data used in training epochs - `ntrain` (0.5 stands for 50% of
97 | given data)
98 |
99 | * start and end of learning rate range you want to test (and number of
100 | steps in this range) - `mu_start`, `mu_end`, `steps`
101 |
102 | * testing criteria (MSE, RMSE, MAE)
103 |
104 | Example for `mu` in range of 100 values from `[0.01, ..., 1]` follows.
105 | In example is used 50% of data for training and leftoever data for testing
106 | with MSE criteria. Returned arrays are list of errors and list of corresponding
107 | learning rates, so it is easy to plot and analyze the error as
108 | a function of learning rate.
109 |
110 | .. code-block:: python
111 |
112 | errors_e, mu_range = f.explore_learning(d, x,
113 | mu_start=0.01,
114 | mu_end=1.,
115 | steps=100, ntrain=0.5, epochs=1,
116 | criteria="MSE")
117 |
118 | Note: optimal learning rate depends on purpose and usage of filter (ammount
119 | of training, data characteristics, etc.).
120 |
121 |
122 | Full Working Example
123 | ===================================================
124 |
125 | Bellow is full working example with visualisation of results - the NLMS
126 | adaptive filter used for channel identification.
127 |
128 | .. code-block:: python
129 |
130 | import numpy as np
131 | import matplotlib.pylab as plt
132 | import padasip as pa
133 |
134 | # creation of data
135 | N = 500
136 | x = np.random.normal(0, 1, (N, 4)) # input matrix
137 | v = np.random.normal(0, 0.1, N) # noise
138 | d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v # target
139 |
140 | # identification
141 | f = pa.filters.AdaptiveFilter(model="NLMS", n=4, mu=0.1, w="random")
142 | y, e, w = f.run(d, x)
143 |
144 | ## show results
145 | plt.figure(figsize=(15,9))
146 | plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k")
147 | plt.plot(d,"b", label="d - target")
148 | plt.plot(y,"g", label="y - output");plt.legend()
149 | plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k")
150 | plt.plot(10*np.log10(e**2),"r", label="e - error [dB]");plt.legend()
151 | plt.tight_layout()
152 | plt.show()
153 |
154 |
155 | Implemented filters
156 | ========================
157 |
158 | .. toctree::
159 | :glob:
160 | :maxdepth: 1
161 |
162 | filters/*
163 |
164 | Code explanation
165 | ==================
166 | """
167 | from padasip.filters.ap import FilterAP
168 | from padasip.filters.gmcc import FilterGMCC
169 | from padasip.filters.gngd import FilterGNGD
170 | from padasip.filters.llncosh import FilterLlncosh
171 | from padasip.filters.lmf import FilterLMF
172 | from padasip.filters.lms import FilterLMS
173 | from padasip.filters.nlmf import FilterNLMF
174 | from padasip.filters.nlms import FilterNLMS
175 | from padasip.filters.nsslms import FilterNSSLMS
176 | from padasip.filters.ocnlms import FilterOCNLMS
177 | from padasip.filters.rls import FilterRLS
178 | from padasip.filters.sslms import FilterSSLMS
179 | from padasip.filters.vslms_ang import FilterVSLMS_Ang
180 | from padasip.filters.vslms_benveniste import FilterVSLMS_Benveniste
181 | from padasip.filters.vslms_mathews import FilterVSLMS_Mathews
182 |
183 |
184 | def filter_data(d, x, model="lms", **kwargs):
185 | """
186 | Function that filter data with selected adaptive filter.
187 |
188 | **Args:**
189 |
190 | * `d` : desired value (1 dimensional array)
191 |
192 | * `x` : input matrix (2-dimensional array). Rows are samples, columns are
193 | input arrays.
194 |
195 | **Kwargs:**
196 |
197 | * Any key argument that can be accepted with selected filter model.
198 | For more information see documentation of desired adaptive filter.
199 |
200 | **Returns:**
201 |
202 | * `y` : output value (1 dimensional array).
203 | The size corresponds with the desired value.
204 |
205 | * `e` : filter error for every sample (1 dimensional array).
206 | The size corresponds with the desired value.
207 |
208 | * `w` : history of all weights (2 dimensional array).
209 | Every row is set of the weights for given sample.
210 |
211 | """
212 | # overwrite n with correct size
213 | kwargs["n"] = x.shape[1]
214 | # create filter according model
215 | if model.upper() not in FILTERS.keys():
216 | raise ValueError('Unknown model of filter {}'.format(model))
217 | return FILTERS[model.upper()](**kwargs).run(d, x)
218 |
219 | def AdaptiveFilter(model="lms", **kwargs):
220 | """
221 | Function that filter data with selected adaptive filter.
222 |
223 | **Args:**
224 |
225 | * `d` : desired value (1 dimensional array)
226 |
227 | * `x` : input matrix (2-dimensional array). Rows are samples, columns are
228 | input arrays.
229 |
230 | **Kwargs:**
231 |
232 | * Any key argument that can be accepted with selected filter model.
233 | For more information see documentation of desired adaptive filter.
234 |
235 | * It should be at least filter size `n`.
236 |
237 | **Returns:**
238 |
239 | * `y` : output value (1 dimensional array).
240 | The size corresponds with the desired value.
241 |
242 | * `e` : filter error for every sample (1 dimensional array).
243 | The size corresponds with the desired value.
244 |
245 | * `w` : history of all weights (2 dimensional array).
246 | Every row is set of the weights for given sample.
247 |
248 | """
249 | # check if the filter size was specified
250 | if "n" not in kwargs:
251 | raise ValueError('Filter size is not defined (n=?).')
252 | # create filter according model
253 | if model.upper() not in FILTERS.keys():
254 | raise ValueError('Unknown model of filter {}'.format(model))
255 | return FILTERS[model.upper()](**kwargs)
256 |
257 | def get_filter(name):
258 | """
259 | This function returns class of a filter according to the provide name.
260 | """
261 | try:
262 | return FILTERS[name.upper()]
263 | except:
264 | msg = 'Unknown model of filter {}, options are {}'
265 | raise ValueError(msg.format(name, list(FILTERS.keys())))
266 |
267 | FILTER_CLASSES = [
268 | FilterAP,
269 | FilterGMCC,
270 | FilterGNGD,
271 | FilterLlncosh,
272 | FilterLMF,
273 | FilterLMS,
274 | FilterNLMF,
275 | FilterNLMS,
276 | FilterNSSLMS,
277 | FilterOCNLMS,
278 | FilterRLS,
279 | FilterSSLMS,
280 | FilterVSLMS_Ang,
281 | FilterVSLMS_Benveniste,
282 | FilterVSLMS_Mathews,
283 | ]
284 |
285 | FILTERS = {f.kind.upper(): f for f in FILTER_CLASSES}
286 |
--------------------------------------------------------------------------------
/padasip/filters/base_filter.py:
--------------------------------------------------------------------------------
1 | """
2 | .. versionadded:: 0.1
3 | .. versionchanged:: 1.2.0
4 |
5 | """
6 | import numpy as np
7 |
8 |
9 | class AdaptiveFilter():
10 | """
11 | Base class for adaptive filter classes. It puts together some functions
12 | used by all adaptive filters.
13 | """
14 | def __init__(self, n, mu, w="random"):
15 | """
16 | This class represents an generic adaptive filter.
17 |
18 | **Args:**
19 |
20 | * `n` : length of filter (integer) - how many input is input array
21 | (row of input matrix)
22 |
23 | **Kwargs:**
24 |
25 | * `mu` : learning rate (float). Also known as step size. If it is too slow,
26 | the filter may have bad performance. If it is too high,
27 | the filter will be unstable. The default value can be unstable
28 | for ill-conditioned input data.
29 |
30 | * `w` : initial weights of filter. Possible values are:
31 |
32 | * array with initial weights (1 dimensional array) of filter size
33 |
34 | * "random" : create random weights
35 |
36 | * "zeros" : create zero value weights
37 | """
38 | self.w = self.init_weights(w, n)
39 | self.n = n
40 | self.w_history = False
41 | self.mu = mu
42 |
43 | def learning_rule(self, e, x):
44 | """
45 | This functions computes the increment of adaptive weights.
46 |
47 | **Args:**
48 |
49 | * `e` : error of the adaptive filter (1d array)
50 |
51 | * `x` : input matrix (2d array)
52 |
53 | **Returns**
54 |
55 | * increments of adaptive weights - result of adaptation
56 | """
57 | return np.zeros(len(x))
58 |
59 | def init_weights(self, w, n=-1):
60 | """
61 | This function initialises the adaptive weights of the filter.
62 |
63 | **Args:**
64 |
65 | * `w` : initial weights of filter. Possible values are:
66 |
67 | * array with initial weights (1 dimensional array) of filter size
68 |
69 | * "random" : create random weights
70 |
71 | * "zeros" : create zero value weights
72 |
73 |
74 | **Kwargs:**
75 |
76 | * `n` : size of filter (int) - number of filter coefficients.
77 |
78 | **Returns:**
79 |
80 | * `y` : output value (float) calculated from input array.
81 |
82 | """
83 | if n == -1:
84 | n = self.n
85 | if isinstance(w, str):
86 | if w == "random":
87 | w = np.random.normal(0, 0.5, n)
88 | elif w == "zeros":
89 | w = np.zeros(n)
90 | else:
91 | raise ValueError('Impossible to understand the w')
92 | elif len(w) == n:
93 | try:
94 | w = np.array(w, dtype="float64")
95 | except:
96 | raise ValueError('Impossible to understand the w')
97 | else:
98 | raise ValueError('Impossible to understand the w')
99 | return w
100 |
101 | def predict(self, x):
102 | """
103 | This function calculates the new output value `y` from input array `x`.
104 |
105 | **Args:**
106 |
107 | * `x` : input vector (1 dimension array) in length of filter.
108 |
109 | **Returns:**
110 |
111 | * `y` : output value (float) calculated from input array.
112 |
113 | """
114 | return np.dot(self.w, x)
115 |
116 | def pretrained_run(self, d, x, ntrain=0.5, epochs=1):
117 | """
118 | This function sacrifices part of the data for few epochs of learning.
119 |
120 | **Args:**
121 |
122 | * `d` : desired value (1 dimensional array)
123 |
124 | * `x` : input matrix (2-dimensional array). Rows are samples,
125 | columns are input arrays.
126 |
127 | **Kwargs:**
128 |
129 | * `ntrain` : train to test ratio (float), default value is 0.5
130 | (that means 50% of data is used for training)
131 |
132 | * `epochs` : number of training epochs (int), default value is 1.
133 | This number describes how many times the training will be repeated
134 | on dedicated part of data.
135 |
136 | **Returns:**
137 |
138 | * `y` : output value (1 dimensional array).
139 | The size corresponds with the desired value.
140 |
141 | * `e` : filter error for every sample (1 dimensional array).
142 | The size corresponds with the desired value.
143 |
144 | * `w` : vector of final weights (1 dimensional array).
145 | """
146 | Ntrain = int(len(d)*ntrain)
147 | # train
148 | for _ in range(epochs):
149 | self.run(d[:Ntrain], x[:Ntrain])
150 | # test
151 | y, e, w = self.run(d[Ntrain:], x[Ntrain:])
152 | return y, e, w
153 |
154 | def adapt(self, d, x):
155 | """
156 | Adapt weights according one desired value and its input.
157 |
158 | **Args:**
159 |
160 | * `d` : desired value (float)
161 |
162 | * `x` : input array (1-dimensional array)
163 | """
164 | y = self.predict(x)
165 | e = d - y
166 | self.w += self.learning_rule(e, x)
167 |
168 | def run(self, d, x):
169 | """
170 | This function filters multiple samples in a row.
171 |
172 | **Args:**
173 |
174 | * `d` : desired value (1 dimensional array)
175 |
176 | * `x` : input matrix (2-dimensional array). Rows are samples,
177 | columns are input arrays.
178 |
179 | **Returns:**
180 |
181 | * `y` : output value (1 dimensional array).
182 | The size corresponds with the desired value.
183 |
184 | * `e` : filter error for every sample (1 dimensional array).
185 | The size corresponds with the desired value.
186 |
187 | * `w` : history of all weights (2 dimensional array).
188 | Every row is set of the weights for given sample.
189 | """
190 | # measure the data and check if the dimension agree
191 | N = len(x)
192 | if not len(d) == N:
193 | raise ValueError('The length of vector d and matrix x must agree.')
194 | self.n = len(x[0])
195 | # prepare data
196 | try:
197 | x = np.array(x)
198 | d = np.array(d)
199 | except:
200 | raise ValueError('Impossible to convert x or d to a numpy array')
201 | # create empty arrays
202 | y = np.zeros(N)
203 | e = np.zeros(N)
204 | self.w_history = np.zeros((N, self.n))
205 | # adaptation loop
206 | for k in range(N):
207 | self.w_history[k, :] = self.w
208 | y[k] = self.predict(x[k])
209 | e[k] = d[k] - y[k]
210 | self.w += self.learning_rule(e[k], x[k])
211 | return y, e, self.w_history
212 |
213 |
214 | class AdaptiveFilterAP(AdaptiveFilter):
215 | """
216 | This class modifies the AdaptiveFilter class
217 | to allow AP filtering.
218 | """
219 | def __init__(self, *args, order=5, ifc=0.001, **kwargs):
220 | """
221 | **Kwargs:**
222 |
223 | * `order` : projection order (integer) - how many input vectors
224 | are in one input matrix
225 |
226 | * `ifc` : initial offset covariance (float) - regularization term
227 | to prevent problems with inverse matrix
228 |
229 | """
230 | super().__init__(*args, **kwargs)
231 | self.order = order
232 | self.x_mem = np.zeros((self.n, self.order))
233 | self.d_mem = np.zeros(order)
234 | self.ide_ifc = ifc * np.identity(self.order)
235 | self.ide = np.identity(self.order)
236 | self.y_mem = False
237 | self.e_mem = False
238 |
239 | def learning_rule(self, e_mem, x_mem):
240 | """
241 | This functions computes the increment of adaptive weights.
242 |
243 | **Args:**
244 |
245 | * `e_mem` : error of the adaptive filter (1d array)
246 |
247 | * `x_mem` : input matrix (2d array)
248 |
249 | **Returns**
250 |
251 | * increments of adaptive weights - result of adaptation
252 | """
253 | return np.zeros(len(x_mem))
254 |
255 | def adapt(self, d, x):
256 | """
257 | Adapt weights according one desired value and its input.
258 |
259 | **Args:**
260 |
261 | * `d` : desired value (float)
262 |
263 | * `x` : input array (1-dimensional array)
264 | """
265 | # create input matrix and target vector
266 | self.x_mem[:, 1:] = self.x_mem[:, :-1]
267 | self.x_mem[:, 0] = x
268 | self.d_mem[1:] = self.d_mem[:-1]
269 | self.d_mem[0] = d
270 | # estimate output and error
271 | self.y_mem = np.dot(self.x_mem.T, self.w)
272 | self.e_mem = self.d_mem - self.y_mem
273 | # update
274 | dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_ifc
275 | dw_part2 = np.linalg.solve(dw_part1, self.ide)
276 | dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
277 | self.w += self.mu * dw
278 |
279 | def run(self, d, x):
280 | """
281 | This function filters multiple samples in a row.
282 |
283 | **Args:**
284 |
285 | * `d` : desired value (1 dimensional array)
286 |
287 | * `x` : input matrix (2-dimensional array). Rows are samples,
288 | columns are input arrays.
289 |
290 | **Returns:**
291 |
292 | * `y` : output value (1 dimensional array).
293 | The size corresponds with the desired value.
294 |
295 | * `e` : filter error for every sample (1 dimensional array).
296 | The size corresponds with the desired value.
297 |
298 | * `w` : history of all weights (2 dimensional array).
299 | Every row is set of the weights for given sample.
300 |
301 | """
302 | # measure the data and check if the dimmension agree
303 | N = len(x)
304 | if not len(d) == N:
305 | raise ValueError('The length of vector d and matrix x must agree.')
306 | self.n = len(x[0])
307 | # prepare data
308 | try:
309 | x = np.array(x)
310 | d = np.array(d)
311 | except:
312 | raise ValueError('Impossible to convert x or d to a numpy array')
313 | # create empty arrays
314 | y = np.zeros(N)
315 | e = np.zeros(N)
316 | self.w_history = np.zeros((N, self.n))
317 | # adaptation loop
318 | for k in range(N):
319 | self.w_history[k, :] = self.w
320 | # create input matrix and target vector
321 | self.x_mem[:, 1:] = self.x_mem[:, :-1]
322 | self.x_mem[:, 0] = x[k]
323 | self.d_mem[1:] = self.d_mem[:-1]
324 | self.d_mem[0] = d[k]
325 | # estimate output and error
326 | self.y_mem = np.dot(self.x_mem.T, self.w)
327 | self.e_mem = self.d_mem - self.y_mem
328 | y[k] = self.y_mem[0]
329 | e[k] = self.e_mem[0]
330 | # update
331 | self.w += self.learning_rule(self.e_mem, self.x_mem)
332 | return y, e, self.w_history
333 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Padasip documentation build configuration file, created by
4 | # sphinx-quickstart on Thu Sep 15 11:17:12 2016.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | # If extensions (or modules to document with autodoc) are in another directory,
16 | # add these directories to sys.path here. If the directory is relative to the
17 | # documentation root, use os.path.abspath to make it absolute, like shown here.
18 | #
19 | import os
20 | import sys
21 | sys.path.insert(0, os.path.abspath('../..'))
22 |
23 | # -- General configuration ------------------------------------------------
24 |
25 | # If your documentation needs a minimal Sphinx version, state it here.
26 | #
27 | # needs_sphinx = '1.0'
28 |
29 | # Add any Sphinx extension module names here, as strings. They can be
30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
31 | # ones.
32 | extensions = [
33 | 'sphinx.ext.autodoc',
34 | 'sphinx.ext.mathjax',
35 | 'sphinx.ext.ifconfig',
36 | 'sphinx.ext.viewcode',
37 | 'sphinx.ext.githubpages',
38 | ]
39 |
40 | # Add any paths that contain templates here, relative to this directory.
41 | templates_path = ['templates']
42 |
43 | # The suffix(es) of source filenames.
44 | # You can specify multiple suffix as a list of string:
45 | #
46 | # source_suffix = ['.rst', '.md']
47 | source_suffix = '.rst'
48 |
49 | # The encoding of source files.
50 | #
51 | # source_encoding = 'utf-8-sig'
52 |
53 | # The master toctree document.
54 | master_doc = 'index'
55 |
56 | # General information about the project.
57 | project = u'Padasip'
58 | copyright = u'2016-2022, Matous C'
59 | author = u'Matous C'
60 |
61 | # The version info for the project you're documenting, acts as replacement for
62 | # |version| and |release|, also used in various other places throughout the
63 | # built documents.
64 | #
65 | # The short X.Y version.
66 | version = u'1.2.2'
67 | # The full version, including alpha/beta/rc tags.
68 | release = version
69 |
70 | # The language for content autogenerated by Sphinx. Refer to documentation
71 | # for a list of supported languages.
72 | #
73 | # This is also used if you do content translation via gettext catalogs.
74 | # Usually you set "language" from the command line for these cases.
75 | language = None
76 |
77 | # There are two options for replacing |today|: either, you set today to some
78 | # non-false value, then it is used:
79 | #
80 | # today = ''
81 | #
82 | # Else, today_fmt is used as the format for a strftime call.
83 | #
84 | # today_fmt = '%B %d, %Y'
85 |
86 | # List of patterns, relative to source directory, that match files and
87 | # directories to ignore when looking for source files.
88 | # This patterns also effect to html_static_path and html_extra_path
89 | exclude_patterns = []
90 |
91 | # The reST default role (used for this markup: `text`) to use for all
92 | # documents.
93 | #
94 | # default_role = None
95 |
96 | # If true, '()' will be appended to :func: etc. cross-reference text.
97 | #
98 | # add_function_parentheses = True
99 |
100 | # If true, the current module name will be prepended to all description
101 | # unit titles (such as .. function::).
102 | #
103 | # add_module_names = True
104 |
105 | # If true, sectionauthor and moduleauthor directives will be shown in the
106 | # output. They are ignored by default.
107 | #
108 | # show_authors = False
109 |
110 | # The name of the Pygments (syntax highlighting) style to use.
111 | pygments_style = 'sphinx'
112 |
113 | # A list of ignored prefixes for module index sorting.
114 | # modindex_common_prefix = []
115 |
116 | # If true, keep warnings as "system message" paragraphs in the built documents.
117 | # keep_warnings = False
118 |
119 | # If true, `todo` and `todoList` produce output, else they produce nothing.
120 | todo_include_todos = False
121 |
122 |
123 | # -- Options for HTML output ----------------------------------------------
124 |
125 | # The theme to use for HTML and HTML Help pages. See the documentation for
126 | # a list of builtin themes.
127 | #
128 | html_theme = 'alabaster'
129 |
130 | # Theme options are theme-specific and customize the look and feel of a theme
131 | # further. For a list of options available for each theme, see the
132 | # documentation.
133 | #
134 | # html_theme_options = {}
135 |
136 | # Add any paths that contain custom themes here, relative to this directory.
137 | # html_theme_path = []
138 |
139 | # The name for this set of Sphinx documents.
140 | # " v documentation" by default.
141 | #
142 | # html_title = u'Padasip v0.2'
143 |
144 | # A shorter title for the navigation bar. Default is the same as html_title.
145 | #
146 | # html_short_title = None
147 |
148 | # The name of an image file (relative to this directory) to place at the top
149 | # of the sidebar.
150 | #
151 | # html_logo = None
152 |
153 | # The name of an image file (relative to this directory) to use as a favicon of
154 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
155 | # pixels large.
156 | #
157 | # html_favicon = None
158 |
159 | # Add any paths that contain custom static files (such as style sheets) here,
160 | # relative to this directory. They are copied after the builtin static files,
161 | # so a file named "default.css" will overwrite the builtin "default.css".
162 | html_static_path = ['static']
163 |
164 | # Add any extra paths that contain custom files (such as robots.txt or
165 | # .htaccess) here, relative to this directory. These files are copied
166 | # directly to the root of the documentation.
167 | #
168 | # html_extra_path = []
169 |
170 | # If not None, a 'Last updated on:' timestamp is inserted at every page
171 | # bottom, using the given strftime format.
172 | # The empty string is equivalent to '%b %d, %Y'.
173 | #
174 | # html_last_updated_fmt = None
175 |
176 | # If true, SmartyPants will be used to convert quotes and dashes to
177 | # typographically correct entities.
178 | #
179 | # html_use_smartypants = True
180 |
181 | # Custom sidebar templates, maps document names to template names.
182 | #
183 | html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'searchbox.html'], }
184 |
185 | # Additional templates that should be rendered to pages, maps page names to
186 | # template names.
187 | #
188 | # html_additional_pages = {}
189 |
190 | # If false, no module index is generated.
191 | #
192 | # html_domain_indices = True
193 |
194 | # If false, no index is generated.
195 | #
196 | # html_use_index = True
197 |
198 | # If true, the index is split into individual pages for each letter.
199 | #
200 | # html_split_index = False
201 |
202 | # If true, links to the reST sources are added to the pages.
203 | #
204 | # html_show_sourcelink = True
205 |
206 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
207 | #
208 | # html_show_sphinx = True
209 |
210 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
211 | #
212 | # html_show_copyright = True
213 |
214 | # If true, an OpenSearch description file will be output, and all pages will
215 | # contain a tag referring to it. The value of this option must be the
216 | # base URL from which the finished HTML is served.
217 | #
218 | # html_use_opensearch = ''
219 |
220 | # This is the file name suffix for HTML files (e.g. ".xhtml").
221 | # html_file_suffix = None
222 |
223 | # Language to be used for generating the HTML full-text search index.
224 | # Sphinx supports the following languages:
225 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
226 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
227 | #
228 | # html_search_language = 'en'
229 |
230 | # A dictionary with options for the search language support, empty by default.
231 | # 'ja' uses this config value.
232 | # 'zh' user can custom change `jieba` dictionary path.
233 | #
234 | # html_search_options = {'type': 'default'}
235 |
236 | # The name of a javascript file (relative to the configuration directory) that
237 | # implements a search results scorer. If empty, the default will be used.
238 | #
239 | # html_search_scorer = 'scorer.js'
240 |
241 | # Output file base name for HTML help builder.
242 | htmlhelp_basename = 'Padasipdoc'
243 |
244 | # -- Options for LaTeX output ---------------------------------------------
245 |
246 | latex_elements = {
247 | # The paper size ('letterpaper' or 'a4paper').
248 | #
249 | # 'papersize': 'letterpaper',
250 |
251 | # The font size ('10pt', '11pt' or '12pt').
252 | #
253 | # 'pointsize': '10pt',
254 |
255 | # Additional stuff for the LaTeX preamble.
256 | #
257 | # 'preamble': '',
258 |
259 | # Latex figure (float) alignment
260 | #
261 | # 'figure_align': 'htbp',
262 | }
263 |
264 | # Grouping the document tree into LaTeX files. List of tuples
265 | # (source start file, target name, title,
266 | # author, documentclass [howto, manual, or own class]).
267 | latex_documents = [
268 | (master_doc, 'Padasip.tex', u'Padasip Documentation',
269 | u'Matous C', 'manual'),
270 | ]
271 |
272 | # The name of an image file (relative to this directory) to place at the top of
273 | # the title page.
274 | #
275 | # latex_logo = None
276 |
277 | # For "manual" documents, if this is true, then toplevel headings are parts,
278 | # not chapters.
279 | #
280 | # latex_use_parts = False
281 |
282 | # If true, show page references after internal links.
283 | #
284 | # latex_show_pagerefs = False
285 |
286 | # If true, show URL addresses after external links.
287 | #
288 | # latex_show_urls = False
289 |
290 | # Documents to append as an appendix to all manuals.
291 | #
292 | # latex_appendices = []
293 |
294 | # It false, will not define \strong, \code, itleref, \crossref ... but only
295 | # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
296 | # packages.
297 | #
298 | # latex_keep_old_macro_names = True
299 |
300 | # If false, no module index is generated.
301 | #
302 | # latex_domain_indices = True
303 |
304 |
305 | # -- Options for manual page output ---------------------------------------
306 |
307 | # One entry per manual page. List of tuples
308 | # (source start file, name, description, authors, manual section).
309 | man_pages = [
310 | (master_doc, 'padasip', u'Padasip Documentation',
311 | [author], 1)
312 | ]
313 |
314 | # If true, show URL addresses after external links.
315 | #
316 | # man_show_urls = False
317 |
318 |
319 | # -- Options for Texinfo output -------------------------------------------
320 |
321 | # Grouping the document tree into Texinfo files. List of tuples
322 | # (source start file, target name, title, author,
323 | # dir menu entry, description, category)
324 | texinfo_documents = [
325 | (master_doc, 'Padasip', u'Padasip Documentation',
326 | author, 'Padasip', 'One line description of project.',
327 | 'Miscellaneous'),
328 | ]
329 |
330 | # Documents to append as an appendix to all manuals.
331 | #
332 | # texinfo_appendices = []
333 |
334 | # If false, no module index is generated.
335 | #
336 | # texinfo_domain_indices = True
337 |
338 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
339 | #
340 | # texinfo_show_urls = 'footnote'
341 |
342 | # If true, do not generate a @det warnings.warn(
343 | # "MLP is deprecated, use different Python library instead",
344 | # DeprecationWarning
345 | # )ailmenu in the "Top" node's menu.
346 | #
347 | # texinfo_no_detailmenu = False
348 |
349 | def setup(app):
350 | # app.add_stylesheet('custom.css') # may also be an URL
351 | app.add_css_file("custom.css")
352 |
--------------------------------------------------------------------------------
/padasip/ann/mlp.py:
--------------------------------------------------------------------------------
1 | r"""
2 | .. versionadded:: 0.3
3 |
4 | In this module is stored everything related to Multi-layer perceptron (MLP).
5 | This neural network can be used for classification and regression.
6 |
7 |
8 | Minimal Working Example
9 | ************************
10 |
11 | .. code-block:: python
12 |
13 | import numpy as np
14 | import padasip as pa
15 |
16 | # data creation
17 | x = np.array([
18 | [0,0,0,0], [1,0,0,0], [0,1,0,0], [1,1,0,0],
19 | [0,0,1,0], [1,0,1,0], [0,1,1,0], [1,1,1,0],
20 | [0,0,0,1], [1,0,0,1], [0,1,0,1], [1,1,0,1],
21 | [0,0,1,1], [1,0,1,1], [0,1,1,1], [1,1,1,1]
22 | ])
23 | d = np.array([0,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0])
24 | N = len(d)
25 | n = 4
26 |
27 | # creation of neural network
28 | nn = pa.ann.NetworkMLP([5,6], n, outputs=1, activation="tanh", mu="auto")
29 |
30 | # training
31 | e, mse = nn.train(x, d, epochs=200, shuffle=True)
32 |
33 | # get results
34 | y = nn.run(x)
35 |
36 | And the result (pairs: target, output) can look like
37 |
38 | >>> for i in zip(d, y): print i
39 | ...
40 | (0, 0.0032477183193071906)
41 | (1, 1.0058082383308447)
42 | (1, 1.0047503447788306)
43 | (0, 0.0046026142618665845)
44 | (0, 0.0003037425037410007)
45 | (1, 1.0017672193832869)
46 | (0, 0.0015817734995124679)
47 | (0, 0.0019115885715706904)
48 | (1, 0.99342117275580499)
49 | (0, 0.00069114178424850147)
50 | (1, 1.0021789943501729)
51 | (0, 0.0021355836851727717)
52 | (1, 0.99809312951378826)
53 | (1, 1.0071488717506856)
54 | (1, 1.0067500768423701)
55 | (0, -0.0045962250501771244)
56 | >>>
57 |
58 |
59 |
60 | Learning Rate Selection
61 | **************************
62 |
63 | If you select the learning rate (:math:`\mu` in equations,
64 | or `mu` in code) manually, it will be used the same value for all nodes,
65 | otherwise it is selected automatically as follows
66 |
67 | :math:`\mu_{ij} = m^{-0.5}`
68 |
69 | where the :math:`m` is the amount of nodes on input of given node.
70 | The automatic selection is recomended and default option.
71 |
72 |
73 | Default Values of Weights
74 | ****************************
75 |
76 | The distribution from what the weights are taken is chosen automatically,
77 | it has zero mean and the standard derivation is estimated as follows
78 |
79 | :math:`\sigma_{w} = m^{-0.5}`
80 |
81 | where the :math:`m` is the amount of nodes on input of given node.
82 |
83 | Code Explanation
84 | ******************
85 | """
86 | import numpy as np
87 | import warnings
88 |
89 | class Layer():
90 | """
91 | This class represents a single hidden layer of the MLP.
92 |
93 | Args:
94 |
95 | * `n_layer` : size of the layer (int)
96 |
97 | * `n_input` : how many inputs the layer have (int)
98 |
99 | * `activation_f` : what function should be used as activation function (str)
100 |
101 | * `mu` : learning rate (float or str), it can be directly the float value,
102 | or string `auto` for automatic selection of learning rate
103 |
104 | """
105 |
106 | def __init__(self, n_layer, n_input, activation_f, mu):
107 | sigma = n_input**(-0.5)
108 | if mu == "auto":
109 | self.mu = sigma
110 | else:
111 | self.mu = mu
112 | self.n_input = n_input
113 | self.w = np.random.normal(0, sigma, (n_layer, n_input+1))
114 | self.x = np.ones(n_input+1)
115 | self.y = np.zeros(n_input+1)
116 | self.f = activation_f
117 |
118 | def activation(self, x, f="sigmoid", der=False):
119 | """
120 | This function process values of layer outputs with activation function.
121 |
122 | **Args:**
123 |
124 | * `x` : array to process (1-dimensional array)
125 |
126 | **Kwargs:**
127 |
128 | * `f` : activation function
129 |
130 | * `der` : normal output, or its derivation (bool)
131 |
132 | **Returns:**
133 |
134 | * values processed with activation function (1-dimensional array)
135 |
136 | """
137 | if f == "sigmoid":
138 | if der:
139 | return x * (1 - x)
140 | return 1. / (1 + np.exp(-x))
141 | if f == "tanh":
142 | if der:
143 | return 1 - x**2
144 | return (2. / (1 + np.exp(-2*x))) - 1
145 |
146 | def predict(self, x):
147 | """
148 | This function make forward pass through this layer (no update).
149 |
150 | **Args:**
151 |
152 | * `x` : input vector (1-dimensional array)
153 |
154 | **Returns:**
155 |
156 | * `y` : output of MLP (float or 1-diemnsional array).
157 | Size depends on number of nodes in this layer.
158 |
159 | """
160 | self.x[1:] = x
161 | self.y = self.activation(np.sum(self.w*self.x, axis=1), f=self.f)
162 | return self.y
163 |
164 | def update(self, w, e):
165 | """
166 | This function make update according provided target
167 | and the last used input vector.
168 |
169 | **Args:**
170 |
171 | * `d` : target (float or 1-dimensional array).
172 | Size depends on number of MLP outputs.
173 |
174 | **Returns:**
175 |
176 | * `w` : weights of the layers (2-dimensional layer).
177 | Every row represents one node.
178 |
179 | * `e` : error used for update (float or 1-diemnsional array).
180 | Size correspond to size of input `d`.
181 | """
182 | if len(w.shape) == 1:
183 | e = self.activation(self.y, f=self.f, der=True) * e * w
184 | dw = self.mu * np.outer(e, self.x)
185 | else:
186 | e = self.activation(self.y, f=self.f, der=True) * (1 - self.y) * np.dot(e, w)
187 | dw = self.mu * np.outer(e, self.x)
188 | w = self.w[:, 1:]
189 | self.w += dw
190 | return w, e
191 |
192 |
193 | class NetworkMLP():
194 | """
195 | This class represents a Multi-layer Perceptron neural network.
196 |
197 | *Args:**
198 |
199 | * `layers` : array describing hidden layers of network
200 | (1-dimensional array of integers). Every number in array represents
201 | one hidden layer. For example [3, 6, 2] create
202 | network with three hidden layers. First layer will have 3 nodes,
203 | second layer will have 6 nodes and the last hidden layer
204 | will have 2 nodes.
205 |
206 | * `n_input` : number of network inputs (int).
207 |
208 | **Kwargs:**
209 |
210 | * `outputs` : number of network outputs (int). Default is 1.
211 |
212 | * `activation` : activation function (str)
213 |
214 | * "sigmoid" - sigmoid
215 |
216 | * "tanh" : hyperbolic tangens
217 |
218 | * `mu` : learning rate (float or str), it can be:
219 | * float value - value is directly used as `mu`
220 |
221 | * "auto" - this will trigger automatic selection of the learning rate
222 |
223 | """
224 | def __init__(self, layers, n_input, outputs=1, activation="sigmoid", mu="auto"):
225 | warnings.warn(
226 | "MLP is deprecated, use different Python library instead",
227 | DeprecationWarning
228 | )
229 | sigma = layers[-1]**(-0.5)
230 | # set learning rate
231 | if mu == "auto":
232 | self.mu = sigma
233 | else:
234 | try:
235 | param = float(mu)
236 | except:
237 | raise ValueError(
238 | 'Parameter mu is not float or similar'
239 | )
240 | self.mu = mu
241 | self.n_input = n_input
242 | # create output layer
243 | self.outputs = outputs
244 | if self.outputs == 1:
245 | self.w = np.random.normal(0, sigma, layers[-1]+1)
246 | else:
247 | self.w = np.random.normal(0, sigma, (outputs, layers[-1]+1))
248 | self.x = np.ones(layers[-1]+1)
249 | self.y = 0
250 | # create hidden layers
251 | self.n_layers = len(layers)
252 | self.layers = []
253 | for n in range(self.n_layers):
254 | if n == 0:
255 | l = Layer(layers[n], n_input, activation, mu)
256 | self.layers.append(l)
257 | else:
258 | l = Layer(layers[n], layers[n-1], activation, mu)
259 | self.layers.append(l)
260 |
261 | def train(self, x, d, epochs=10, shuffle=False):
262 | """
263 | Function for batch training of MLP.
264 |
265 | **Args:**
266 |
267 | * `x` : input array (2-dimensional array).
268 | Every row represents one input vector (features).
269 |
270 | * `d` : input array (n-dimensional array).
271 | Every row represents target for one input vector.
272 | Target can be one or more values (in case of multiple outputs).
273 |
274 | **Kwargs:**
275 |
276 | * `epochs` : amount of epochs (int). That means how many times
277 | the MLP will iterate over the passed set of data (`x`, `d`).
278 |
279 | * `shuffle` : if true, the order of inputs and outpust are shuffled (bool).
280 | That means the pairs input-output are in different order in every epoch.
281 |
282 | **Returns:**
283 |
284 | * `e`: output vector (m-dimensional array). Every row represents
285 | error (or errors) for an input and output in given epoch.
286 | The size of this array is length of provided data times
287 | amount of epochs (`N*epochs`).
288 |
289 | * `MSE` : mean squared error (1-dimensional array). Every value
290 | stands for MSE of one epoch.
291 |
292 | """
293 | # measure the data and check if the dimmension agree
294 | N = len(x)
295 | if not len(d) == N:
296 | raise ValueError('The length of vector d and matrix x must agree.')
297 | if not len(x[0]) == self.n_input:
298 | raise ValueError('The number of network inputs is not correct.')
299 | if self.outputs == 1:
300 | if not len(d.shape) == 1:
301 | raise ValueError('For one output MLP the d must have one dimension')
302 | else:
303 | if not d.shape[1] == self.outputs:
304 | raise ValueError('The number of outputs must agree with number of columns in d')
305 | try:
306 | x = np.array(x)
307 | d = np.array(d)
308 | except:
309 | raise ValueError('Impossible to convert x or d to a numpy array')
310 | # create empty arrays
311 | if self.outputs == 1:
312 | e = np.zeros(epochs*N)
313 | else:
314 | e = np.zeros((epochs*N, self.outputs))
315 | MSE = np.zeros(epochs)
316 | # shuffle data if demanded
317 | if shuffle:
318 | randomize = np.arange(len(x))
319 | np.random.shuffle(randomize)
320 | x = x[randomize]
321 | d = d[randomize]
322 | # adaptation loop
323 | for epoch in range(epochs):
324 | for k in range(N):
325 | self.predict(x[k])
326 | e[(epoch*N)+k] = self.update(d[k])
327 | MSE[epoch] = np.sum(e[epoch*N:(epoch+1)*N-1]**2) / N
328 | return e, MSE
329 |
330 | def run(self, x):
331 | """
332 | Function for batch usage of already trained and tested MLP.
333 |
334 | **Args:**
335 |
336 | * `x` : input array (2-dimensional array).
337 | Every row represents one input vector (features).
338 |
339 | **Returns:**
340 |
341 | * `y`: output vector (n-dimensional array). Every row represents
342 | output (outputs) for an input vector.
343 |
344 | """
345 | # measure the data and check if the dimmension agree
346 | try:
347 | x = np.array(x)
348 | except:
349 | raise ValueError('Impossible to convert x to a numpy array')
350 | N = len(x)
351 | # create empty arrays
352 | if self.outputs == 1:
353 | y = np.zeros(N)
354 | else:
355 | y = np.zeros((N, self.outputs))
356 | # predict data in loop
357 | for k in range(N):
358 | y[k] = self.predict(x[k])
359 | return y
360 |
361 | def test(self, x, d):
362 | """
363 | Function for batch test of already trained MLP.
364 |
365 | **Args:**
366 |
367 | * `x` : input array (2-dimensional array).
368 | Every row represents one input vector (features).
369 |
370 | * `d` : input array (n-dimensional array).
371 | Every row represents target for one input vector.
372 | Target can be one or more values (in case of multiple outputs).
373 |
374 | **Returns:**
375 |
376 | * `e`: output vector (n-dimensional array). Every row represents
377 | error (or errors) for an input and output.
378 |
379 | """
380 | # measure the data and check if the dimmension agree
381 | N = len(x)
382 | if not len(d) == N:
383 | raise ValueError('The length of vector d and matrix x must agree.')
384 | if not len(x[0]) == self.n_input:
385 | raise ValueError('The number of network inputs is not correct.')
386 | if self.outputs == 1:
387 | if not len(d.shape) == 1:
388 | raise ValueError('For one output MLP the d must have one dimension')
389 | else:
390 | if not d.shape[1] == self.outputs:
391 | raise ValueError('The number of outputs must agree with number of columns in d')
392 | try:
393 | x = np.array(x)
394 | d = np.array(d)
395 | except:
396 | raise ValueError('Impossible to convert x or d to a numpy array')
397 | # create empty arrays
398 | if self.outputs == 1:
399 | y = np.zeros(N)
400 | else:
401 | y = np.zeros((N, self.outputs))
402 | # measure in loop
403 | for k in range(N):
404 | y[k] = self.predict(x[k])
405 | return d - y
406 |
407 | def predict(self, x):
408 | """
409 | This function make forward pass through MLP (no update).
410 |
411 | **Args:**
412 |
413 | * `x` : input vector (1-dimensional array)
414 |
415 | **Returns:**
416 |
417 | * `y` : output of MLP (float or 1-diemnsional array).
418 | Size depends on number of MLP outputs.
419 |
420 | """
421 | # forward pass to hidden layers
422 | for l in self.layers:
423 | x = l.predict(x)
424 | self.x[1:] = x
425 | # forward pass to output layer
426 | if self.outputs == 1:
427 | self.y = np.dot(self.w, self.x)
428 | else:
429 | self.y = np.sum(self.w*self.x, axis=1)
430 | return self.y
431 |
432 | def update(self, d):
433 | """
434 | This function make update according provided target
435 | and the last used input vector.
436 |
437 | **Args:**
438 |
439 | * `d` : target (float or 1-dimensional array).
440 | Size depends on number of MLP outputs.
441 |
442 | **Returns:**
443 |
444 | * `e` : error used for update (float or 1-diemnsional array).
445 | Size correspond to size of input `d`.
446 |
447 | """
448 | # update output layer
449 | e = d - self.y
450 | error = np.copy(e)
451 | if self.outputs == 1:
452 | dw = self.mu * e * self.x
453 | w = np.copy(self.w)[1:]
454 | else:
455 | dw = self.mu * np.outer(e, self.x)
456 | w = np.copy(self.w)[:, 1:]
457 | self.w += dw
458 | # update hidden layers
459 | for l in reversed(self.layers):
460 | w, e = l.update(w, e)
461 | return error
462 |
--------------------------------------------------------------------------------