├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── main.yml │ └── pypi.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yml ├── AUTHORS.rst ├── CONTRIBUTING.rst ├── LICENSE ├── MANIFEST.in ├── README.rst ├── doc ├── ChangeLog.rst ├── Makefile ├── _static │ ├── copybutton.js │ ├── css │ │ ├── buttons.dataTables.min.css │ │ ├── custom.css │ │ ├── jquery.dataTables.min.css │ │ ├── jquery.dropdown.min.css │ │ └── report.css │ ├── jquery.jcarousel.min.js │ └── jquery.js ├── _templates │ └── layout.html ├── autosum.rst ├── biblio.rst ├── concepts.rst ├── conf.py ├── contributions.rst ├── create_psd.py ├── gallery.rst ├── images │ ├── spectrum_logo_large.png │ ├── spectrum_logo_large.xcf │ ├── spectrum_logo_large2.png │ ├── spectrum_logo_medium.png │ └── spectrum_logo_small.png ├── index.rst ├── install.rst ├── joss │ ├── paper.bib │ ├── paper.md │ └── psd_all.png ├── logo.png ├── make.bat ├── overview.txt ├── psd_all.png ├── quickstart.rst ├── ref_fourier.rst ├── ref_mtm.rst ├── ref_others.rst ├── ref_param.rst ├── ref_psd_other.rst ├── requirements.txt ├── source │ ├── _static │ │ └── jquery.js │ └── tutorial_lpc.rst.temp ├── spectral_analysis.rst ├── tutorial_criteria.rst ├── tutorial_front_image.rst ├── tutorial_pburg.rst ├── tutorial_psd.rst ├── tutorial_variance_estimate.rst ├── tutorial_window.rst ├── tutorial_yulewalker.rst └── tutorials.rst ├── environment.yml ├── examples ├── README.txt ├── plot_allpsd.py ├── plot_spectrogram.py └── plot_yule.py ├── requirements.txt ├── setup.cfg ├── setup.py ├── src ├── cpp │ ├── __init__.py │ └── mydpss.c └── spectrum │ ├── __init__.py │ ├── arma.py │ ├── burg.py │ ├── cholesky.py │ ├── correlation.py │ ├── correlog.py │ ├── covar.py │ ├── criteria.py │ ├── data │ ├── DOLPHINS.wav │ ├── __init__.py │ ├── sunspot.dat │ └── sunspot_monthly.dat │ ├── datasets.py │ ├── eigen.py │ ├── eigenfre.py │ ├── errors.py │ ├── io.py │ ├── levinson.py │ ├── linalg.py │ ├── linear_prediction.py │ ├── lpc.py │ ├── minvar.py │ ├── modcovar.py │ ├── mtm.py │ ├── periodogram.py │ ├── psd.py │ ├── spectrogram.py │ ├── toeplitz.py │ ├── tools.py │ ├── transfer.py │ ├── waveform.py │ ├── window.py │ └── yulewalker.py └── test ├── __init__.py ├── test_arma.py ├── test_burg.py ├── test_cholesky.py ├── test_correlation.py ├── test_correlog.py ├── test_covar.py ├── test_criteria.py ├── test_datasets.py ├── test_eigen.py ├── test_eigenfre.py ├── test_errors.py ├── test_io.py ├── test_levinson.py ├── test_linalg.py ├── test_linear_prediction.py ├── test_lms.py ├── test_lpc.py ├── test_minvar.py ├── test_modcovar.py ├── test_mtm.py ├── test_periodogram.py ├── test_psd.py ├── test_spectrum.py ├── test_spetrogram.py ├── test_toeplitz.py ├── test_tools.py ├── test_transfer.py ├── test_waveform.py ├── test_window.py └── test_yulewalker.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **1. Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **2. To Reproduce** 14 | If you are facing a bug or installation problem, I cannot help yo without some information of way to reproduce the errors. Please complete the following information as much as possible 15 | - OS: [e.g. Fedora, windows version, mac version] 16 | - Python version. for example 3.7.2 (please provide X.Y.Z numbers) 17 | - Version of spectrum: 18 | 19 | you can figure out the version using this python code:: 20 | 21 | import spectrum 22 | spectrum.__version__ 23 | 24 | **3.Expected behavior** 25 | A clear and concise description of what you expected to happen. 26 | 27 | **4.Screenshots** 28 | If applicable, add screenshots to help explain your problem. 29 | 30 | **5.What you think may explain the pbl** 31 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches-ignore: [] 9 | 10 | jobs: 11 | build-linux: 12 | runs-on: ubuntu-latest 13 | strategy: 14 | max-parallel: 5 15 | matrix: 16 | python: [3.9, '3.10', '3.11'] 17 | fail-fast: false 18 | 19 | steps: 20 | - uses: actions/checkout@v2 21 | - name: Set up Python 3.X 22 | uses: actions/setup-python@v2 23 | with: 24 | python-version: ${{ matrix.python }} 25 | - name: Install the package itself 26 | run: | 27 | pip install .[testing] 28 | - name: Test with pytest 29 | run: | 30 | pytest --cov-report term --cov=spectrum 31 | 32 | - name: coveralls 33 | run: | 34 | pip install coveralls 35 | coveralls --service=github 36 | env: 37 | GITHUB_TOKEN: ${{ github.token }} 38 | -------------------------------------------------------------------------------- /.github/workflows/pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish to PyPI 2 | on: 3 | workflow_dispatch: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | build-n-publish: 10 | name: Build and publish to PyPI and TestPyPI 11 | runs-on: ubuntu-20.04 12 | steps: 13 | - uses: actions/checkout@main 14 | - name: Set up Python 3.8 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: 3.8 18 | 19 | - name: Install package 20 | run: | 21 | pip install build 22 | 23 | - name: Build source tarball 24 | run: | 25 | rm -rf dist; 26 | python setup.py sdist 27 | 28 | - name: Publish distribution to Test PyPI 29 | uses: pypa/gh-action-pypi-publish@release/v1 30 | with: 31 | user: __token__ 32 | password: ${{ secrets.TEST_PYPI_API_TOKEN }} 33 | repository_url: https://test.pypi.org/legacy/ 34 | - name: Publish distribution to PyPI 35 | if: startsWith(github.ref, 'refs/tags') 36 | uses: pypa/gh-action-pypi-publish@release/v1 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | bin/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # Installer logs 26 | pip-log.txt 27 | pip-delete-this-directory.txt 28 | 29 | # Unit test / coverage reports 30 | htmlcov/ 31 | .tox/ 32 | .coverage 33 | .cache 34 | nosetests.xml 35 | coverage.xml 36 | 37 | # Translations 38 | *.mo 39 | 40 | # Mr Developer 41 | .mr.developer.cfg 42 | .project 43 | .pydevproject 44 | 45 | # Rope 46 | .ropeproject 47 | 48 | # Django stuff: 49 | *.log 50 | *.pot 51 | 52 | # Sphinx documentation 53 | docs/_build/ 54 | 55 | .idea 56 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | 2 | files: '\.(py|rst|sh)$' 3 | fail_fast: false 4 | 5 | repos: 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v3.2.0 8 | hooks: 9 | - id: trailing-whitespace 10 | - id: end-of-file-fixer 11 | - id: check-yaml 12 | #- id: check-executables-have-shebangs 13 | - id: check-ast 14 | 15 | - repo: https://github.com/pycqa/flake8 16 | rev: 6.1.0 17 | hooks: 18 | - id: flake8 19 | args: ["-j8", "--ignore=E203,E501,W503,E722", "--max-line-length=120", "--exit-zero"] 20 | 21 | - repo: https://github.com/psf/black 22 | rev: 22.10.0 23 | hooks: 24 | - id: black 25 | args: ["--line-length=120"] 26 | exclude: E501 27 | 28 | - repo: https://github.com/pycqa/isort 29 | rev: 5.12.0 30 | hooks: 31 | - id: isort 32 | args: ["--profile", "black"] # solves conflicts between black and isort 33 | 34 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | sphinx: 3 | configuration: doc/conf.py 4 | builder: html 5 | 6 | # Set the version of Python and other tools you might need 7 | build: 8 | os: ubuntu-22.04 9 | tools: 10 | python: "3.10" 11 | 12 | 13 | formats: [] 14 | python: 15 | # install doc/requirements at the end to make sure pillow 9.5.0 is used 16 | # also needs sphinx <7 17 | install: 18 | - method: pip 19 | path: . 20 | - requirements: doc/requirements.txt 21 | 22 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | - Thomas Cokelaer (main author) 2 | - https://github.com/juhasch 3 | - https://github.com/anielsen001 4 | - https://github.com/carlkl 5 | - https://github.com/gozzilli 6 | - https://github.com/wanglongqi 7 | - https://github.com/alfredo-f 8 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | ================================== 2 | Spectrum contributing guidelines 3 | ================================== 4 | 5 | Issues 6 | ========== 7 | 8 | You have an issue ? You found a bug ? Please submit an issue with 9 | 10 | - a description of the problem 11 | - the version of Spectrum used 12 | - the error message if any 13 | 14 | 15 | pull requests 16 | =================== 17 | 18 | Pull requests are always welcome, and the Spectrum community appreciates 19 | any help you may give. 20 | 21 | When submitting a pull request, we ask you to check the following: 22 | 23 | 1. **Unit tests**, **documentation**, and **code style** are in order. 24 | 25 | It's also OK to submit work in progress if you're unsure of what 26 | this exactly means, in which case you'll likely be asked to make 27 | some further changes. 28 | 29 | 2. The contributed code will be **licensed under spectrum's license** (BSD3), 30 | If you did not write the code yourself, you ensure the existing 31 | license is compatible and include the license information in the 32 | contributed files, or obtain a permission from the original 33 | author to relicense the contributed code. 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2011-2017, Thomas Cokelaer 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of spectrum nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst L 2 | include requirements.txt AUTHORS.rst LICENSE requirements-dev.txt requirements.txt 3 | 4 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | SPECTRUM : Spectral Analysis in Python 2 | ========================================== 3 | 4 | .. image:: https://badge.fury.io/py/spectrum.svg 5 | :target: https://pypi.python.org/pypi/spectrum 6 | 7 | .. image:: https://github.com/cokelaer/spectrum/actions/workflows/main.yml/badge.svg?branch=master 8 | :target: https://github.com/cokelaer/spectrum/actions/workflows/main.yml 9 | 10 | .. image:: https://coveralls.io/repos/cokelaer/spectrum/badge.png?branch=master 11 | :target: https://coveralls.io/r/cokelaer/spectrum?branch=master 12 | 13 | .. image:: https://anaconda.org/conda-forge/spectrum/badges/license.svg 14 | :target: https://anaconda.org/conda-forge/spectrum 15 | 16 | .. image:: https://anaconda.org/conda-forge/spectrum/badges/version.svg 17 | :target: https://anaconda.org/conda-forge/spectrum/badges/version.svg 18 | 19 | .. image:: https://anaconda.org/conda-forge/spectrum/badges/downloads.svg 20 | :target: https://anaconda.org/conda-forge/spectrum 21 | 22 | .. image:: http://joss.theoj.org/papers/e4e34e78e4a670f2ca9a6a97ce9d3b8e/status.svg 23 | :target: http://joss.theoj.org/papers/e4e34e78e4a670f2ca9a6a97ce9d3b8e 24 | 25 | 26 | 27 | :contributions: Please join https://github.com/cokelaer/spectrum 28 | :contributors: https://github.com/cokelaer/spectrum/graphs/contributors 29 | :issues: Please use https://github.com/cokelaer/spectrum/issues 30 | :documentation: http://pyspectrum.readthedocs.io/ 31 | :Citation: Cokelaer et al, (2017), 'Spectrum': Spectral Analysis in Python, Journal of Open Source Software, 2(18), 348, doi:10.21105/joss.00348 32 | 33 | 34 | 35 | .. image:: http://www.thomas-cokelaer.info/software/spectrum/html/_images/psd_all.png 36 | :class: align-right 37 | :width: 50% 38 | 39 | **Spectrum** contains tools to estimate Power Spectral Densities using methods based on Fourier transform, Parametric methods or eigenvalues analysis: 40 | 41 | * The Fourier methods are based upon correlogram, periodogram and Welch estimates. Standard tapering windows (Hann, Hamming, Blackman) and more exotic ones are available (DPSS, Taylor, ...). 42 | * The parametric methods are based on Yule-Walker, BURG, MA and ARMA, covariance and modified covariance methods. 43 | * Non-parametric methods based on eigen analysis (e.g., MUSIC) and minimum variance analysis are also implemented. 44 | * Multitapering is also available 45 | 46 | 47 | The targetted audience is diverse. Although the use of power spectrum of a 48 | signal is fundamental in electrical engineering (e.g. radio communications, 49 | radar), it has a wide range of applications from cosmology (e.g., detection of 50 | gravitational waves in 2016), to music (pattern detection) or biology (mass 51 | spectroscopy). 52 | 53 | 54 | Quick Installation 55 | ===================== 56 | 57 | **spectrum** is available on Pypi:: 58 | 59 | pip install spectrum 60 | 61 | and **conda**:: 62 | 63 | conda config --append channels conda-forge 64 | conda install spectrum 65 | 66 | To install the **conda** executable itself, please see https://www.continuum.io/downloads . 67 | 68 | Contributions 69 | ================== 70 | 71 | Please see `github `_ for any issues/bugs/comments/contributions. 72 | 73 | Changelog (summary) 74 | =================== 75 | 76 | ========== ============================================================ 77 | release description 78 | ========== ============================================================ 79 | 0.9.0 * handles new numpy API (keeping back compatiblity). 80 | * included https://github.com/cokelaer/spectrum/pull/73 81 | thanks to @butala contribution to speed up fft. 82 | * fix rho calculation in burg algo thanks to contri 83 | https://github.com/cokelaer/spectrum/pull/82 from @cl445 84 | * remove warnings/deprecation related to pkgresources, numpy 85 | and scipy. 86 | * ran black through entire code. 87 | 0.8.1 * move CI to github actions 88 | * include python 3.9 support 89 | * include PR from tikuma-lshhsc contributor to speedup 90 | eigenfre module 91 | * fix deprecated warnings 92 | ========== ============================================================ 93 | 94 | 95 | 96 | Some notebooks (external contributions) 97 | ------------------------------------------- 98 | 99 | * http://nbviewer.ipython.org/gist/juhasch/5182528 100 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " singlehtml to make a single large HTML file" 22 | @echo " pickle to make pickle files" 23 | @echo " json to make JSON files" 24 | @echo " htmlhelp to make HTML files and a HTML help project" 25 | @echo " qthelp to make HTML files and a qthelp project" 26 | @echo " devhelp to make HTML files and a Devhelp project" 27 | @echo " epub to make an epub" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 30 | @echo " text to make text files" 31 | @echo " man to make manual pages" 32 | @echo " changes to make an overview of all changed/added/deprecated items" 33 | @echo " linkcheck to check all external links for integrity" 34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 35 | 36 | clean: 37 | -rm -rf $(BUILDDIR)/* 38 | 39 | html: 40 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 41 | @echo 42 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 43 | 44 | dirhtml: 45 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 48 | 49 | singlehtml: 50 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 51 | @echo 52 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 53 | 54 | pickle: 55 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 56 | @echo 57 | @echo "Build finished; now you can process the pickle files." 58 | 59 | json: 60 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 61 | @echo 62 | @echo "Build finished; now you can process the JSON files." 63 | 64 | htmlhelp: 65 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 66 | @echo 67 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 68 | ".hhp project file in $(BUILDDIR)/htmlhelp." 69 | 70 | qthelp: 71 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 72 | @echo 73 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 74 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 75 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ebisoftware.qhcp" 76 | @echo "To view the help file:" 77 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ebisoftware.qhc" 78 | 79 | devhelp: 80 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 81 | @echo 82 | @echo "Build finished." 83 | @echo "To view the help file:" 84 | @echo "# mkdir -p $$HOME/.local/share/devhelp/ebisoftware" 85 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ebisoftware" 86 | @echo "# devhelp" 87 | 88 | epub: 89 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 90 | @echo 91 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 92 | 93 | latex: 94 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 95 | @echo 96 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 97 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 98 | "(use \`make latexpdf' here to do that automatically)." 99 | 100 | latexpdf: 101 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 102 | @echo "Running LaTeX files through pdflatex..." 103 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 104 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 105 | 106 | text: 107 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 108 | @echo 109 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 110 | 111 | man: 112 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 113 | @echo 114 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 115 | 116 | changes: 117 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 118 | @echo 119 | @echo "The overview file is in $(BUILDDIR)/changes." 120 | 121 | linkcheck: 122 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 123 | @echo 124 | @echo "Link check complete; look for any errors in the above output " \ 125 | "or in $(BUILDDIR)/linkcheck/output.txt." 126 | 127 | doctest: 128 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 129 | @echo "Testing of doctests in the sources finished, look at the " \ 130 | "results in $(BUILDDIR)/doctest/output.txt." 131 | -------------------------------------------------------------------------------- /doc/_static/copybutton.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function() { 2 | /* Add a [>>>] button on the top-right corner of code samples to hide 3 | * the >>> and ... prompts and the output and thus make the code 4 | * copyable. */ 5 | var div = $('.highlight-python .highlight,' + 6 | '.highlight-python3 .highlight') 7 | var pre = div.find('pre'); 8 | 9 | // get the styles from the current theme 10 | pre.parent().parent().css('position', 'relative'); 11 | var hide_text = 'Hide the prompts and ouput'; 12 | var show_text = 'Show the prompts and ouput'; 13 | var border_width = pre.css('border-top-width'); 14 | var border_style = pre.css('border-top-style'); 15 | var border_color = pre.css('border-top-color'); 16 | var button_styles = { 17 | 'cursor':'pointer', 'position': 'absolute', 'top': '0', 'right': '0', 18 | 'border-color': border_color, 'border-style': border_style, 19 | 'border-width': border_width, 'color': border_color, 'text-size': '75%', 20 | 'font-family': 'monospace', 'padding-left': '0.2em', 'padding-right': '0.2em' 21 | } 22 | 23 | // create and add the button to all the code blocks that contain >>> 24 | div.each(function(index) { 25 | var jthis = $(this); 26 | if (jthis.find('.gp').length > 0) { 27 | var button = $('>>>'); 28 | button.css(button_styles) 29 | button.attr('title', hide_text); 30 | jthis.prepend(button); 31 | } 32 | // tracebacks (.gt) contain bare text elements that need to be 33 | // wrapped in a span to work with .nextUntil() (see later) 34 | jthis.find('pre:has(.gt)').contents().filter(function() { 35 | return ((this.nodeType == 3) && (this.data.trim().length > 0)); 36 | }).wrap(''); 37 | }); 38 | 39 | // define the behavior of the button when it's clicked 40 | $('.copybutton').toggle( 41 | function() { 42 | var button = $(this); 43 | button.parent().find('.go, .gp, .gt').hide(); 44 | button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'hidden'); 45 | button.css('text-decoration', 'line-through'); 46 | button.attr('title', show_text); 47 | }, 48 | function() { 49 | var button = $(this); 50 | button.parent().find('.go, .gp, .gt').show(); 51 | button.next('pre').find('.gt').nextUntil('.gp, .go').css('visibility', 'visible'); 52 | button.css('text-decoration', 'none'); 53 | button.attr('title', hide_text); 54 | }); 55 | }); 56 | 57 | 58 | -------------------------------------------------------------------------------- /doc/_static/css/buttons.dataTables.min.css: -------------------------------------------------------------------------------- 1 | div.dt-button-info{position:fixed;top:50%;left:50%;width:400px;margin-top:-100px;margin-left:-200px;background-color:white;border:2px solid #111;box-shadow:3px 3px 8px rgba(0,0,0,0.3);border-radius:3px;text-align:center;z-index:21}div.dt-button-info h2{padding:0.5em;margin:0;font-weight:normal;border-bottom:1px solid #ddd;background-color:#f3f3f3}div.dt-button-info>div{padding:1em}button.dt-button,div.dt-button,a.dt-button{position:relative;display:inline-block;box-sizing:border-box;margin-right:0.333em;padding:0.5em 1em;border:1px solid #999;border-radius:2px;cursor:pointer;font-size:0.88em;color:black;white-space:nowrap;overflow:hidden;background-color:#e9e9e9;background-image:-webkit-linear-gradient(top, #fff 0%, #e9e9e9 100%);background-image:-moz-linear-gradient(top, #fff 0%, #e9e9e9 100%);background-image:-ms-linear-gradient(top, #fff 0%, #e9e9e9 100%);background-image:-o-linear-gradient(top, #fff 0%, #e9e9e9 100%);background-image:linear-gradient(to bottom, #fff 0%, #e9e9e9 100%);filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,StartColorStr='white', EndColorStr='#e9e9e9');-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;text-decoration:none;outline:none}button.dt-button.disabled,div.dt-button.disabled,a.dt-button.disabled{color:#999;border:1px solid #d0d0d0;cursor:default;background-color:#f9f9f9;background-image:-webkit-linear-gradient(top, #fff 0%, #f9f9f9 100%);background-image:-moz-linear-gradient(top, #fff 0%, #f9f9f9 100%);background-image:-ms-linear-gradient(top, #fff 0%, #f9f9f9 100%);background-image:-o-linear-gradient(top, #fff 0%, #f9f9f9 100%);background-image:linear-gradient(to bottom, #fff 0%, #f9f9f9 100%);filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,StartColorStr='#ffffff', EndColorStr='#f9f9f9')}button.dt-button:active:not(.disabled),button.dt-button.active:not(.disabled),div.dt-button:active:not(.disabled),div.dt-button.active:not(.disabled),a.dt-button:active:not(.disabled),a.dt-button.active:not(.disabled){background-color:#e2e2e2;background-image:-webkit-linear-gradient(top, #f3f3f3 0%, #e2e2e2 100%);background-image:-moz-linear-gradient(top, #f3f3f3 0%, #e2e2e2 100%);background-image:-ms-linear-gradient(top, #f3f3f3 0%, #e2e2e2 100%);background-image:-o-linear-gradient(top, #f3f3f3 0%, #e2e2e2 100%);background-image:linear-gradient(to bottom, #f3f3f3 0%, #e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,StartColorStr='#f3f3f3', EndColorStr='#e2e2e2');box-shadow:inset 1px 1px 3px #999999}button.dt-button:active:not(.disabled):hover:not(.disabled),button.dt-button.active:not(.disabled):hover:not(.disabled),div.dt-button:active:not(.disabled):hover:not(.disabled),div.dt-button.active:not(.disabled):hover:not(.disabled),a.dt-button:active:not(.disabled):hover:not(.disabled),a.dt-button.active:not(.disabled):hover:not(.disabled){box-shadow:inset 1px 1px 3px #999999;background-color:#cccccc;background-image:-webkit-linear-gradient(top, #eaeaea 0%, #ccc 100%);background-image:-moz-linear-gradient(top, #eaeaea 0%, #ccc 100%);background-image:-ms-linear-gradient(top, #eaeaea 0%, #ccc 100%);background-image:-o-linear-gradient(top, #eaeaea 0%, #ccc 100%);background-image:linear-gradient(to bottom, #eaeaea 0%, #ccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,StartColorStr='#eaeaea', EndColorStr='#cccccc')}button.dt-button:hover,div.dt-button:hover,a.dt-button:hover{text-decoration:none}button.dt-button:hover:not(.disabled),div.dt-button:hover:not(.disabled),a.dt-button:hover:not(.disabled){border:1px solid #666;background-color:#e0e0e0;background-image:-webkit-linear-gradient(top, #f9f9f9 0%, #e0e0e0 100%);background-image:-moz-linear-gradient(top, #f9f9f9 0%, #e0e0e0 100%);background-image:-ms-linear-gradient(top, #f9f9f9 0%, #e0e0e0 100%);background-image:-o-linear-gradient(top, #f9f9f9 0%, #e0e0e0 100%);background-image:linear-gradient(to bottom, #f9f9f9 0%, #e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,StartColorStr='#f9f9f9', EndColorStr='#e0e0e0')}button.dt-button:focus:not(.disabled),div.dt-button:focus:not(.disabled),a.dt-button:focus:not(.disabled){border:1px solid #426c9e;text-shadow:0 1px 0 #c4def1;outline:none;background-color:#79ace9;background-image:-webkit-linear-gradient(top, #bddef4 0%, #79ace9 100%);background-image:-moz-linear-gradient(top, #bddef4 0%, #79ace9 100%);background-image:-ms-linear-gradient(top, #bddef4 0%, #79ace9 100%);background-image:-o-linear-gradient(top, #bddef4 0%, #79ace9 100%);background-image:linear-gradient(to bottom, #bddef4 0%, #79ace9 100%);filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,StartColorStr='#bddef4', EndColorStr='#79ace9')}.dt-button embed{outline:none}div.dt-buttons{position:relative;float:left}div.dt-buttons.buttons-right{float:right}div.dt-button-collection{position:absolute;top:0;left:0;width:150px;margin-top:3px;padding:8px 8px 4px 8px;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.4);background-color:white;overflow:hidden;z-index:2002;border-radius:5px;box-shadow:3px 3px 5px rgba(0,0,0,0.3);z-index:2002;-webkit-column-gap:8px;-moz-column-gap:8px;-ms-column-gap:8px;-o-column-gap:8px;column-gap:8px}div.dt-button-collection button.dt-button,div.dt-button-collection div.dt-button,div.dt-button-collection a.dt-button{position:relative;left:0;right:0;display:block;float:none;margin-bottom:4px;margin-right:0}div.dt-button-collection button.dt-button:active:not(.disabled),div.dt-button-collection button.dt-button.active:not(.disabled),div.dt-button-collection div.dt-button:active:not(.disabled),div.dt-button-collection div.dt-button.active:not(.disabled),div.dt-button-collection a.dt-button:active:not(.disabled),div.dt-button-collection a.dt-button.active:not(.disabled){background-color:#dadada;background-image:-webkit-linear-gradient(top, #f0f0f0 0%, #dadada 100%);background-image:-moz-linear-gradient(top, #f0f0f0 0%, #dadada 100%);background-image:-ms-linear-gradient(top, #f0f0f0 0%, #dadada 100%);background-image:-o-linear-gradient(top, #f0f0f0 0%, #dadada 100%);background-image:linear-gradient(to bottom, #f0f0f0 0%, #dadada 100%);filter:progid:DXImageTransform.Microsoft.gradient(GradientType=0,StartColorStr='#f0f0f0', EndColorStr='#dadada');box-shadow:inset 1px 1px 3px #666}div.dt-button-collection.fixed{position:fixed;top:50%;left:50%;margin-left:-75px;border-radius:0}div.dt-button-collection.fixed.two-column{margin-left:-150px}div.dt-button-collection.fixed.three-column{margin-left:-225px}div.dt-button-collection.fixed.four-column{margin-left:-300px}div.dt-button-collection>*{-webkit-column-break-inside:avoid;break-inside:avoid}div.dt-button-collection.two-column{width:300px;padding-bottom:1px;-webkit-column-count:2;-moz-column-count:2;-ms-column-count:2;-o-column-count:2;column-count:2}div.dt-button-collection.three-column{width:450px;padding-bottom:1px;-webkit-column-count:3;-moz-column-count:3;-ms-column-count:3;-o-column-count:3;column-count:3}div.dt-button-collection.four-column{width:600px;padding-bottom:1px;-webkit-column-count:4;-moz-column-count:4;-ms-column-count:4;-o-column-count:4;column-count:4}div.dt-button-background{position:fixed;top:0;left:0;width:100%;height:100%;background:rgba(0,0,0,0.7);background:-ms-radial-gradient(center, ellipse farthest-corner, rgba(0,0,0,0.3) 0%, rgba(0,0,0,0.7) 100%);background:-moz-radial-gradient(center, ellipse farthest-corner, rgba(0,0,0,0.3) 0%, rgba(0,0,0,0.7) 100%);background:-o-radial-gradient(center, ellipse farthest-corner, rgba(0,0,0,0.3) 0%, rgba(0,0,0,0.7) 100%);background:-webkit-gradient(radial, center center, 0, center center, 497, color-stop(0, rgba(0,0,0,0.3)), color-stop(1, rgba(0,0,0,0.7)));background:-webkit-radial-gradient(center, ellipse farthest-corner, rgba(0,0,0,0.3) 0%, rgba(0,0,0,0.7) 100%);background:radial-gradient(ellipse farthest-corner at center, rgba(0,0,0,0.3) 0%, rgba(0,0,0,0.7) 100%);z-index:2001}@media screen and (max-width: 640px){div.dt-buttons{float:none !important;text-align:center}} 2 | -------------------------------------------------------------------------------- /doc/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | /* This is a hack to fix */ 5 | div.linenodiv pre{ 6 | line-height:1.17; 7 | } 8 | 9 | 10 | 11 | div.body p.sphx-glr-script-out { 12 | margin: -.9ex 0ex; 13 | } 14 | 15 | 16 | /* ------- alternating colors in table rows -------------------------- */ 17 | table.docutils { 18 | border: 0; 19 | border-collapse: collapse; 20 | } 21 | 22 | table.docutils tr:nth-child(even) { 23 | background-color: #F3F3FF; 24 | } 25 | table.docutils tr:nth-child(odd) { 26 | background-color: #FFFFEE; 27 | } 28 | 29 | table.docutils tr { 30 | border-style: solid; 31 | border-width: 1px 0 1px 0; 32 | border-color: #AAAAAA; 33 | } 34 | 35 | table.docutils td { 36 | border-color: #ddd; 37 | } 38 | 39 | 40 | table.longtable.docutils { 41 | max-width: 100%; 42 | margin: 0; 43 | } 44 | 45 | .section.group h3 { 46 | background-color: transparent; 47 | padding: 0px; 48 | font-size: 170%; 49 | margin: 0px; 50 | border: none; 51 | padding:0em; 52 | } 53 | 54 | .section.group h3 a { 55 | color: #355F7C; 56 | font-size: 0.8em; 57 | } 58 | 59 | /* GRID OF TWO */ 60 | .span_1_of_2 { 61 | width: 20%; 62 | } 63 | .span_1_of_3 { 64 | width: 35%; 65 | float:left; 66 | } 67 | .span_2_of_3 { 68 | width: 60%; 69 | margin-left: 0%; 70 | float:left; 71 | } 72 | 73 | 74 | @media only screen and (max-width: 639px) { 75 | .col { 76 | margin: 1% 0 1% 0%; 77 | } 78 | } 79 | 80 | @media only screen and (max-width: 639px) { 81 | .span_1_of_2 { 82 | width: 100%; 83 | margin-top: 0; 84 | margin-bottom: 2em; 85 | } 86 | } 87 | 88 | @media (min-width: 801px) and (max-width: 1135px) { 89 | .span_1_of_2 { 90 | width: 45%; 91 | } 92 | } 93 | 94 | div.index-paragraph { 95 | border: 1px solid #FBEED5; 96 | border-radius: 5px; 97 | padding: 0px 5px 0px 10px; 98 | background-color: #FFFBE8; 99 | max-width: 48em; 100 | margin: 15px auto 0px; 101 | color: #2C2C28; 102 | } 103 | 104 | div.index-paragraph p { 105 | font-size: 115%; 106 | margin: .5em 0 .5em 0; 107 | } 108 | 109 | 110 | 111 | /************ The carrousel *******************/ 112 | div.body .jcarousel-wrapper { 113 | margin: 0px -40px 10px 20px; 114 | position: relative; 115 | border: 10px solid #fff; 116 | width: 420px; 117 | margin-top:20px; 118 | -webkit-border-radius: 30px; 119 | -moz-border-radius: 30px; 120 | border-radius: 30px; 121 | -webkit-box-shadow: 0 0 2px #999; 122 | -moz-box-shadow: 0 0 2px #999; 123 | box-shadow: 0 0 2px #999; 124 | } 125 | 126 | @media (max-width: 400px) { 127 | div.body .jcarousel-wrapper { 128 | display: none; 129 | } 130 | } 131 | 132 | div.body .jcarousel { 133 | position: relative; 134 | overflow: hidden; 135 | width: 420px; 136 | height: 280px; 137 | } 138 | 139 | div.body .jcarousel ul { 140 | width: 20000em; 141 | position: absolute; 142 | /*list-style: none;*/ 143 | margin: 0; 144 | padding: 0; 145 | } 146 | 147 | div.body .jcarousel li { 148 | float: left; 149 | width: 420px; 150 | line-height : 240px; 151 | text-align: center; 152 | } 153 | 154 | div.body div.jcarousel img { 155 | max-width: 380px; 156 | max-height: 250px; 157 | vertical-align: middle; 158 | } 159 | 160 | 161 | /** Carousel Controls **/ 162 | 163 | div.body .jcarousel-control-prev, 164 | div.body .jcarousel-control-next { 165 | position: absolute; 166 | top: 105px; 167 | width: 30px; 168 | height: 30px; 169 | text-align: center; 170 | background: #4E443C; 171 | color: #fff; 172 | text-decoration: none; 173 | text-shadow: 0 0 1px #000; 174 | font: 24px/27px Arial, sans-serif; 175 | -webkit-border-radius: 30px; 176 | -moz-border-radius: 30px; 177 | border-radius: 30px; 178 | -webkit-box-shadow: 0 0 2px #999; 179 | -moz-box-shadow: 0 0 2px #999; 180 | box-shadow: 0 0 2px #999; 181 | } 182 | 183 | div.body .jcarousel-control-prev { 184 | left: -5px; 185 | } 186 | 187 | div.body .jcarousel-control-next { 188 | right: -5px; 189 | } 190 | 191 | div.body .jcarousel-control-prev:hover span, 192 | div.body .jcarousel-control-next:hover span { 193 | display: block; 194 | } 195 | 196 | div.body .jcarousel-control-prev.inactive, 197 | div.body .jcarousel-control-next.inactive { 198 | opacity: .5; 199 | cursor: default; 200 | } 201 | 202 | /** Carousel Pagination **/ 203 | 204 | div.body .jcarousel-pagination { 205 | position: absolute; 206 | bottom: -10px; 207 | left: 156px; /* This value must be adjusted with the number of imgs */ 208 | } 209 | 210 | div.body .jcarousel-pagination a { 211 | text-decoration: none; 212 | display: inline-block; 213 | 214 | font-size: 11px; 215 | line-height: 14px; 216 | min-width: 14px; 217 | 218 | background: #fff; 219 | color: #4E443C; 220 | border-radius: 14px; 221 | padding: 3px; 222 | text-align: center; 223 | 224 | margin-right: 2px; 225 | 226 | opacity: .75; 227 | } 228 | 229 | div.body .jcarousel-pagination a.active { 230 | background: #4E443C; 231 | color: #fff; 232 | opacity: 1; 233 | } 234 | 235 | /* For the examples */ 236 | p.sphx-glr-horizontal { 237 | margin-top: 2em; 238 | } 239 | 240 | div.snakemake { 241 | border-style: solid; 242 | border-width: 1px 1px 1px 1px; 243 | border-color: black; 244 | border-radius: 2px; 245 | padding: 1em; 246 | background: #d0d8da; 247 | } 248 | 249 | 250 | div.snakemake .document .literal-block { 251 | padding:6px; 252 | font-size:0.9em; 253 | border: 2px solid orange; 254 | } 255 | 256 | 257 | 258 | 259 | -------------------------------------------------------------------------------- /doc/_static/css/jquery.dropdown.min.css: -------------------------------------------------------------------------------- 1 | .jq-dropdown{position:absolute;z-index:1039;display:none}.jq-dropdown .jq-dropdown-menu,.jq-dropdown .jq-dropdown-panel{min-width:160px;max-width:360px;list-style:none;background:#fff;border:solid 1px #ddd;border-radius:4px;box-shadow:0 5px 10px rgba(0,0,0,0.2);overflow:visible;padding:4px 0;margin:0}.jq-dropdown .jq-dropdown-panel{padding:10px}.jq-dropdown.jq-dropdown-tip{margin-top:8px}.jq-dropdown.jq-dropdown-tip:before{position:absolute;top:-6px;left:9px;content:"";border-left:7px solid transparent;border-right:7px solid transparent;border-bottom:7px solid #ddd;display:inline-block}.jq-dropdown.jq-dropdown-tip:after{position:absolute;top:-5px;left:10px;content:"";border-left:6px solid transparent;border-right:6px solid transparent;border-bottom:6px solid #fff;display:inline-block}.jq-dropdown.jq-dropdown-tip.jq-dropdown-anchor-right:before{left:auto;right:9px}.jq-dropdown.jq-dropdown-tip.jq-dropdown-anchor-right:after{left:auto;right:10px}.jq-dropdown.jq-dropdown-scroll .jq-dropdown-menu,.jq-dropdown.jq-dropdown-scroll .jq-dropdown-panel{max-height:180px;overflow:auto}.jq-dropdown .jq-dropdown-menu li{list-style:none;padding:0 0;margin:0;line-height:18px}.jq-dropdown .jq-dropdown-menu li>a,.jq-dropdown .jq-dropdown-menu label{display:block;color:inherit;text-decoration:none;line-height:18px;padding:3px 15px;margin:0;white-space:nowrap}.jq-dropdown .jq-dropdown-menu li>a:hover,.jq-dropdown .jq-dropdown-menu label:hover{background-color:#f2f2f2;color:inherit;cursor:pointer}.jq-dropdown .jq-dropdown-menu .jq-dropdown-divider{font-size:1px;border-top:solid 1px #e5e5e5;padding:0;margin:5px 0} 2 | -------------------------------------------------------------------------------- /doc/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "!layout.html" %} 2 | {% block extrahead %} 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | {% endblock %} 65 | -------------------------------------------------------------------------------- /doc/autosum.rst: -------------------------------------------------------------------------------- 1 | Reference guide 2 | ############### 3 | 4 | .. currentmodule:: spectrum 5 | 6 | .. toctree:: 7 | 8 | ref_fourier.rst 9 | ref_mtm.rst 10 | ref_param.rst 11 | ref_psd_other.rst 12 | ref_others.rst 13 | 14 | 15 | 16 | .. toctree:: 17 | :hidden: 18 | 19 | biblio.rst 20 | -------------------------------------------------------------------------------- /doc/biblio.rst: -------------------------------------------------------------------------------- 1 | Bibliography 2 | ============= 3 | 4 | For those interested, here are some books and articles used to design this 5 | library. 6 | 7 | Books 8 | ------ 9 | 10 | .. [Hayes] Hayes, M.H. Statistical Digital Signal Processing and Modeling. New York: John Wiley & Sons, 1996. 11 | 12 | .. [Kay] Kay, S.M. Modern Spectral Estimation. Englewood Cliffs, NJ: Prentice Hall, 1988. 13 | 14 | .. [Marple] Marple, S.L. Digital Spectral Analysis. Englewood Cliffs, NJ: Prentice Hall, 1987. 15 | 16 | .. [Orfanidis] Orfanidis, S.J. Introduction to Signal Processing. Upper Saddle River, NJ: Prentice Hall, 1996. 17 | 18 | .. [Percival] Percival, D.B., and A.T. Walden. Spectral Analysis for Physical Applications: Multitaper and Conventional Univariate Techniques. Cambridge: Cambridge University Press, 1993. 19 | 20 | .. [Proakis] Proakis, J.G., and D.G. Manolakis. Digital Signal Processing: Principles, Algorithms, and Applications. Englewood Cliffs, NJ: Prentice Hall, 1996. 21 | 22 | .. [Stoica] Stoica, P., and R. Moses. Introduction to Spectral Analysis. Upper Saddle River, NJ: Prentice Hall, 1997. 23 | 24 | .. [octave] octave software 25 | 26 | 27 | Articles 28 | -------- 29 | 30 | .. [Harris] Harris, F.J. "On the Use of Windows for Harmonic Analysis with the Discrete Fourier Transform." 31 | Proceedings of the IEEE. Vol. 66, No. 1 (January 1978). 32 | 33 | .. [Nuttall] Nuttall, Albert H. "Some Windows with Very Good Sidelobe Behavior." 34 | IEEE Transactions on Acoustics, Speech, and Signal Processing. Vol. ASSP-29 (February 1981). pp. 84-91. 35 | 36 | .. [Wax] Wax, M. and Kailath, T. Detection of signals by information Theoretic criteria, IEEE Trans Acoust. Speech Signal Process, vol ASSP-33, pp 387-392, 1985. 37 | 38 | .. [Welch] Welch, P.D. "The Use of Fast Fourier Transform for the Estimation of Power Spectra: A Method Based on Time Averaging Over Short, Modified Periodograms." IEEE Trans. Audio Electroacoust. Vol. AU-15 (June 1967). Pgs.70-73. 39 | 40 | * John Parker Burg (1968) "A new analysis technique for time series data", NATO advanced study Institute on Signal Processing with Emphasis on Underwater Acoustics, Enschede, Netherlands, Aug. 12-23, 1968. 41 | * Steven M. Kay and Stanley Lawrence Marple Jr.: "Spectrum analysis -- a modern perspective", Proceedings of the IEEE, Vol 69, pp 1380-1419, Nov., 1981 42 | * Abd-Krim Seghouane and Maiza Bekara "A small sample model selection criterion based on Kullback's symmetric divergence", IEEE Transactions on Signal Processing, Vol. 52(12), pp 3314-3323, Dec. 2004 43 | 44 | 45 | .. * William H. Press and Saul A. Teukolsky and William T. Vetterling and Brian P. Flannery "Numerical recipes in C, The art of scientific computing", 2nd edition,Cambridge University Press, 2002 --- Section 13.7. 46 | -------------------------------------------------------------------------------- /doc/concepts.rst: -------------------------------------------------------------------------------- 1 | Concepts 2 | =========== 3 | 4 | Frequency range 5 | ------------------- 6 | 7 | Frequency range for the PSD estimate can one of 'onesided', 'twosided', 8 | 'centered'. The default is 'onesided' for real-valued signals and 'twosided' for 9 | complex signals. 10 | 11 | More details follows this convention: 12 | 13 | * 'onesided': returns the one-sided PSD estimate of a real-valued input signal, 14 | x. If NFFT is even, PSD has length NFFT/2 + 1 over the interval [0,pi]. If NFFT is odd, the length of 15 | PSD is (NFFT+1)/2 and the interval is [0, pi) 16 | * 'twosided' returns the two-sided PSD estimate for either real or complex 17 | values. PSD has the length NFFT and is computed over the interval [0,2pi). 18 | * 'centered' returns the centered two-sided PSD estimate for either real or complex values. PSD has length NFFT and is computed over (-pi,pi] for even length NFFT and (-pi,pi] for odd length NFFT. 19 | 20 | 21 | Padding 22 | ---------- 23 | NFFT is used to compute the fft with NFFT points. If the data is shorter, the 24 | data is padded with zeros so that the frequency signal has a length equal to 25 | NFFT. The PSD's length is not equal to NFFT (see above). 26 | 27 | 28 | PSD length for real data case 29 | ------------------------------ 30 | 31 | 32 | N = 8; FFT gives X0, X1,X2,X3, XN/2, X3,X2,X1 --> PSD length=2+6=5=NFFT/2+1 33 | N = 9; FFT gives X0, X1,X2,X3, X4,X4, X3,X2,X1 --> PSD length=1+3+1=5=(NFFT+1)/2 34 | 35 | if one sets the psd, the NFFT is set manually to 2+ (N - 2) * 2 36 | if one sets the psd, we assume that the original data is even. 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /doc/contributions.rst: -------------------------------------------------------------------------------- 1 | Contributions 2 | =============== 3 | 4 | Contributors 5 | ---------------- 6 | 7 | Here is a non-exhaustive list of contributors. If you have contributed and are 8 | not listed, please update the AUTHORS.rst file in the source repository. 9 | 10 | .. include:: ../AUTHORS.rst 11 | 12 | 13 | -- 14 | 15 | 16 | For an up-to-date page of `source code contributors 17 | `_, please visit the 18 | github repository pages. 19 | 20 | How to contribute ? 21 | ----------------------- 22 | 23 | If you have found a bug, have issues or contributions, please join the 24 | development on https://github.com/cokelaer/spectrum. 25 | 26 | 27 | Some notebooks 28 | -------------------- 29 | * http://nbviewer.ipython.org/gist/juhasch/5182528 30 | -------------------------------------------------------------------------------- /doc/create_psd.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import spectrum 3 | from spectrum import tools 4 | #from spectrum.psd import * 5 | #from spectrum.errors import * 6 | from numpy.testing import assert_array_almost_equal 7 | import pylab 8 | data = spectrum.marple_data 9 | 10 | 11 | def create_all_psd(): 12 | 13 | 14 | f = pylab.linspace(0, 1, 4096) 15 | pylab.clf() 16 | 17 | pylab.figure(figsize=(12,8)) 18 | 19 | #MA 15 order 20 | b, rho = spectrum.ma(data, 15, 30) 21 | psd = spectrum.arma2psd(B=b, rho=rho) 22 | newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq 23 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='MA 15') 24 | 25 | #ARMA 15 order 26 | a, b, rho = spectrum.arma_estimate(data, 15,15, 30) 27 | psd = spectrum.arma2psd(A=a,B=b, rho=rho) 28 | newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq 29 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='ARMA 15,15') 30 | 31 | #yulewalker 32 | ar, P,c = spectrum.aryule(data, 15, norm='biased') 33 | psd = spectrum.arma2psd(A=ar, rho=P) 34 | newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq 35 | 36 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='YuleWalker 15') 37 | 38 | #burg method 39 | ar, P,k = spectrum.arburg(data, order=15) 40 | psd = spectrum.arma2psd(A=ar, rho=P) 41 | newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq 42 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='Burg 15') 43 | 44 | #covar method 45 | af, pf, ab, pb, pv = spectrum.arcovar_marple(data, 15) 46 | psd = spectrum.arma2psd(A=af, B=ab, rho=pf) 47 | newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq 48 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='covar 15') 49 | 50 | #modcovar method 51 | a, p, pv = spectrum.modcovar_marple(data, 15) 52 | psd = spectrum.arma2psd(A=a) 53 | newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq 54 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='modcovar 15') 55 | 56 | #correlogram 57 | psd = spectrum.CORRELOGRAMPSD(data, data, lag=15) 58 | newpsd = tools.cshift(psd, len(psd)/2) # switch positive and negative freq 59 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='correlogram 15') 60 | 61 | #minvar 62 | psd = spectrum.minvar(data, 15) 63 | #newpsd = tools.cshift(psd, len(psd)/2) # switch positive and negative freq 64 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='MINVAR 15') 65 | 66 | #music 67 | psd,db = spectrum.music(data, 15, 11) 68 | pylab.plot(f, 10 * pylab.log10(psd/max(psd)), '--',label='MUSIC 15') 69 | 70 | #ev music 71 | psd,db = spectrum.ev(data, 15, 11) 72 | pylab.plot(f, 10 * pylab.log10(psd/max(psd)), '--',label='EV 15') 73 | 74 | 75 | pylab.legend(loc='upper left', prop={'size':10}, ncol=2) 76 | pylab.ylim([-80,10]) 77 | pylab.savefig('psd_all.png') 78 | 79 | 80 | create_all_psd() 81 | -------------------------------------------------------------------------------- /doc/gallery.rst: -------------------------------------------------------------------------------- 1 | 2 | .. raw:: html 3 | 4 |
5 |
6 |
7 |

Installation

8 |

conda install spectrum

9 |

Examples

10 |

Visit our example gallery or jump to the main documentation

11 |
12 | 13 | 14 | .. raw:: html 15 | 16 |
17 |

18 |
19 | 20 | 21 |
22 | 23 | -------------------------------------------------------------------------------- /doc/images/spectrum_logo_large.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/doc/images/spectrum_logo_large.png -------------------------------------------------------------------------------- /doc/images/spectrum_logo_large.xcf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/doc/images/spectrum_logo_large.xcf -------------------------------------------------------------------------------- /doc/images/spectrum_logo_large2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/doc/images/spectrum_logo_large2.png -------------------------------------------------------------------------------- /doc/images/spectrum_logo_medium.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/doc/images/spectrum_logo_medium.png -------------------------------------------------------------------------------- /doc/images/spectrum_logo_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/doc/images/spectrum_logo_small.png -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | 2 | .. rubric:: Spectrum: a Spectral Analysis Library in Python 3 | 4 | .. image:: https://badge.fury.io/py/spectrum.svg 5 | :target: https://pypi.python.org/pypi/spectrum 6 | 7 | .. image:: https://github.com/cokelaer/spectrum/actions/workflows/main.yml/badge.svg?branch=master 8 | :target: http://travis-ci.com/cokelaer/spectrum 9 | 10 | .. image:: https://coveralls.io/repos/cokelaer/spectrum/badge.png?branch=master 11 | :target: https://coveralls.io/r/cokelaer/spectrum?branch=master 12 | 13 | .. image:: https://anaconda.org/conda-forge/spectrum/badges/license.svg 14 | :target: https://anaconda.org/conda-forge/spectrum 15 | 16 | .. image:: https://anaconda.org/conda-forge/spectrum/badges/installer/conda.svg 17 | :target: https://conda.anaconda.org/conda-forge/spectrum 18 | 19 | .. image:: https://anaconda.org/conda-forge/spectrum/badges/downloads.svg 20 | :target: https://anaconda.org/conda-forge/spectrum 21 | 22 | .. image:: http://joss.theoj.org/papers/e4e34e78e4a670f2ca9a6a97ce9d3b8e/status.svg 23 | :target: http://joss.theoj.org/papers/e4e34e78e4a670f2ca9a6a97ce9d3b8e 24 | 25 | 26 | :Citation: Cokelaer et al., (2017). 'Spectrum': Spectral Analysis in Python. Journal of Open Source Software, 2(18), 348, doi:10.21105/joss.00348 27 | :Contributions: Please join https://github.com/cokelaer/spectrum 28 | :Contributors: https://github.com/cokelaer/spectrum/graphs/contributors 29 | :Issues: Please use https://github.com/cokelaer/spectrum/issues 30 | :Documentation: http://pyspectrum.readthedocs.io/ 31 | 32 | .. .. include:: overview.txt 33 | 34 | .. |img1| image:: auto_examples/images/sphx_glr_plot_allpsd_001.png 35 | :target: auto_examples/plot_allpsd.html 36 | 37 | .. |img2| image:: auto_examples/images/sphx_glr_plot_spectrogram_001.png 38 | :target: auto_examples/plot_spectrogram.html 39 | 40 | .. |img3| image:: auto_examples/images/sphx_glr_plot_yule_001.png 41 | :target: auto_examples/plot_yule.html 42 | 43 | .. |img4| image:: psd_all.png 44 | :target: tutorial_front_image.html 45 | 46 | .. .. |img4| image:: psd_all.png 47 | :class: align-right 48 | :width: 50% 49 | 50 | 51 | 52 | **Spectrum** is a Python library that contains tools to estimate Power 53 | Spectral Densities based on Fourier transform, Parametric methods or 54 | eigenvalues analysis. The Fourier methods are based upon correlogram, periodogram and 55 | Welch estimates. Standard tapering windows (Hann, Hamming, Blackman) and more exotic 56 | ones are available (DPSS, Taylor, ...). The parametric methods are based on Yule-Walker, BURG, MA and 57 | ARMA, covariance and modified covariance methods. Non-parametric methods based on eigen analysis (e.g., MUSIC) and minimum variance analysis are also implemented. Finally, Multitapering combines several orthogonal tapering windows. 58 | 59 | 60 | 61 | 62 | .. raw:: html 63 | 64 |
65 |
66 |
67 |

Quick installation

68 |

conda install spectrum

69 |

Examples

70 |

Visit our example gallery or jump to the main documentation

71 |
72 | 73 |
74 |
75 |
76 | 77 | * |img1| 78 | * |img2| 79 | * |img3| 80 | * |img4| 81 | 82 | .. raw:: html 83 | 84 |
85 | 86 | 87 |

88 |

89 |
90 |
91 |
92 |
93 |
94 | 95 | .. raw:: html 96 | 97 |
98 |
99 | 100 |

Targetted audience

101 |

The targetted audience is diverse: the use of power spectrum of a 102 | signal is fundamental in electrical engineering (e.g. radio communications, 103 | radar), it has a wide range of applications from cosmology (e.g., detection of 104 | gravitational waves in 2016), to music (pattern detection) or biology (mass 105 | spectroscopy).

106 | 107 |
108 |
109 | 110 | 111 | 112 | 113 | Documentation 114 | ============= 115 | 116 | .. toctree:: 117 | :maxdepth: 2 118 | :numbered: 119 | 120 | Installation 121 | Quick Start 122 | Overview of available PSD methods 123 | Tutorials 124 | Reference Guide 125 | ChangeLog 126 | Contributions 127 | 128 | 129 | .. toctree:: 130 | :hidden: 131 | 132 | biblio.rst 133 | 134 | License 135 | ======= 136 | 137 | **Spectrum** is released under a BSD3 license 138 | 139 | 140 | Indices and tables 141 | ================== 142 | * :ref:`genindex` 143 | * :ref:`modindex` 144 | * :ref:`search` 145 | 146 | -------------------------------------------------------------------------------- /doc/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ================= 3 | 4 | Using pip 5 | -------------- 6 | 7 | **Spectrum** is available on `PYPi `_, so you should be able to type:: 8 | 9 | pip install spectrum 10 | 11 | 12 | Since **spectrum** depends on other python packages such as Numpy, Matplotlib and Scipy they will be installed automatically (if not already installed). 13 | 14 | You can also install the dependencies yourself by typing:: 15 | 16 | pip install numpy matplotlib scipy 17 | 18 | **Spectrum** source code is available on Github https://github.com/cokelaer/spectrum 19 | 20 | 21 | Conda installation 22 | --------------------- 23 | 24 | **Spectrum** is now available on CONDA. For Linux and MAC users, if you prefer to use conda, please use:: 25 | 26 | conda config --append channels conda-forge 27 | conda install spectrum 28 | 29 | 30 | From source and notes for developers 31 | ----------------------------------------- 32 | 33 | Developpers who want to get the source code can clone the repository:: 34 | 35 | git clone git@github.com:cokelaer/spectrum.git 36 | cd spectrum 37 | python setup.py install 38 | 39 | 40 | Then, you can test the library using **pytest** or compile the documentation 41 | with Sphinx. To do so, install sphinx and other dependencies:: 42 | 43 | pip install --file requirements-dev.txt 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /doc/joss/paper.bib: -------------------------------------------------------------------------------- 1 | @book{marple:1987, 2 | address = {Australia, Sydney}, 3 | author = {Marple, S. L.}, 4 | publisher = {Prentice Hall}, 5 | title = {Digital Spectral Analysis With Applications}, 6 | year = 1987 7 | } 8 | 9 | @book{percival:1993, 10 | title={Spectral analysis for physical applications: Multitaper and Conventional Univariate Techniques.}, 11 | author={Percival, Donald B and Walden, Andrew T}, 12 | year={1993}, 13 | publisher={cambridge university press} 14 | } 15 | 16 | @article{harris:1978, 17 | author = {Harris, F. J.}, 18 | doi = {10.1109/PROC.1978.10837}, 19 | issn = {0018-9219}, 20 | journal = {Proceedings of the IEEE}, 21 | month = jan, 22 | number = 1, 23 | pages = {51--83}, 24 | publisher = {IEEE}, 25 | title = {On the use of windows for harmonic analysis with the discrete Fouriertransform}, 26 | url = {http://dx.doi.org/10.1109/PROC.1978.10837}, 27 | volume = 66, 28 | year = 1978 29 | } 30 | 31 | @article{welch:1967, 32 | author = {Welch, P.}, 33 | journal = {IEEE Transactions on Audio and Electroacoustics}, 34 | volume = 15, 35 | number = 2, 36 | pages = {70--73}, 37 | title = {The use of fast Fourier transform for the estimation of power spectra: a method based on time averaging over short, modified periodograms}, 38 | year = 1967 39 | } 40 | 41 | @Misc{scipy:2001, 42 | author = {Eric Jones and Travis Oliphant and Pearu Peterson and others}, 43 | title = {{SciPy}: Open source scientific tools for {Python}}, 44 | year = {2001--}, 45 | url = "http://www.scipy.org/", 46 | note = {[Online; accessed ]} 47 | } 48 | 49 | 50 | @article{matplotlib:2007, 51 | author = {John D. Hunter}, 52 | title = {Matplotlib: A 2D Graphics Environment}, 53 | year = {2007}, 54 | journal= {Computing in Science \& Engineering}, 55 | volume = {9}, 56 | pages = {90-95}, 57 | doi = {DOI:10.1109/MCSE.2007.55} 58 | } 59 | 60 | @article{numpy:2011, 61 | author = {St\'{e}fan van der Walt, S. Chris Colbert and Ga\"{e}l Varoquaux}, 62 | title = {"The NumPy Array: A Structure for Efficient Numerical Computation}, 63 | journal = {Computing in Science \& Engineering}, 64 | volume = {13}, 65 | pages = {22-30}, 66 | year = {2011}, 67 | doi = {DOI:10.1109/MCSE.2011.37} 68 | } 69 | -------------------------------------------------------------------------------- /doc/joss/paper.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "'Spectrum': Spectral Analysis in Python" 3 | tags: 4 | - spectral analysis 5 | - periodogram 6 | - yule-walker 7 | - multi-tapering 8 | - burg 9 | - ARMA 10 | - eigen-values 11 | - tapering windows 12 | authors: 13 | - name: Thomas Cokelaer 14 | orcid: 0000-0001-6286-1138 15 | affiliation: 1 16 | - name: Juergen Hasch 17 | orcid: 0000-0002-9457-1220 18 | affiliation: 2 19 | affiliations: 20 | - name: Institut Pasteur - Bioinformatics and Biostatistics Hub - C3BI, USR 3756 IP CNRS - Paris, France 21 | index: 1 22 | - name: "Robert Bosch GmbH: Renningen, Baden-Württemberg, Germany" 23 | index: 2 24 | date: 2 August 2017 25 | bibliography: paper.bib 26 | --- 27 | 28 | # Summary 29 | 30 | **Spectrum** is a Python library that includes tools to estimate Power Spectral Densities. Although the use of 31 | power spectrum of a signal is fundamental in electrical engineering (e.g. radio communications, radar), it has 32 | a wide range of applications from cosmology (e.g., detection of gravitational waves in 2016), to music 33 | (pattern detection) or biology (mass spectroscopy). 34 | 35 | Methods available are based on Fourier transform, parametric methods or eigenvalues analysis. Although standard methods such as 36 | periodogram are available, less common methods (e.g. multitapering) are also implemented: 37 | 38 | - The Fourier methods are based upon correlogram, periodogram and Welch estimates. Standard tapering windows (Hann, Hamming, Blackman) and more exotic ones are available (DPSS, Taylor, ...)[@harris:1978; @welch:1967; @marple:1987]. 39 | - The parametric methods are based on Yule-Walker, BURG, MA and ARMA, covariance and modified covariance methods [@marple:1987; @percival:1993]. 40 | - Non-parametric methods based on eigen analysis (e.g., MUSIC) and minimum variance analysis are also implemented [@marple:1987]. 41 | - Multitapering method is also available [@percival:1993] 42 | - Classical tools useful to spectral analysis and more generally signal processing such as window tapering [@harris:1978] or transfer function are also available within the library. 43 | 44 | The following image shows the different methods of spectral estimation that are available in **Spectrum**. 45 | 46 | -![https://doi.org/10.6084/m9.figshare.5270866.v1](psd_all.png) 47 | 48 | **Spectrum** relies on Matplotlib [@matplotlib:2007] for the plotting. We also 49 | use Numpy [@numpy:2011] for fast array manipulation and Scipy [@scipy:2001] for 50 | linear algebra. 51 | 52 | # References 53 | -------------------------------------------------------------------------------- /doc/joss/psd_all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/doc/joss/psd_all.png -------------------------------------------------------------------------------- /doc/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/doc/logo.png -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | set SPHINXBUILD=sphinx-build 6 | set BUILDDIR=_build 7 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source 8 | if NOT "%PAPER%" == "" ( 9 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 10 | ) 11 | 12 | if "%1" == "" goto help 13 | 14 | if "%1" == "help" ( 15 | :help 16 | echo.Please use `make ^` where ^ is one of 17 | echo. html to make standalone HTML files 18 | echo. dirhtml to make HTML files named index.html in directories 19 | echo. pickle to make pickle files 20 | echo. json to make JSON files 21 | echo. htmlhelp to make HTML files and a HTML help project 22 | echo. qthelp to make HTML files and a qthelp project 23 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 24 | echo. changes to make an overview over all changed/added/deprecated items 25 | echo. linkcheck to check all external links for integrity 26 | echo. doctest to run all doctests embedded in the documentation if enabled 27 | goto end 28 | ) 29 | 30 | if "%1" == "clean" ( 31 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 32 | del /q /s %BUILDDIR%\* 33 | goto end 34 | ) 35 | 36 | if "%1" == "html" ( 37 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 38 | echo. 39 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 40 | goto end 41 | ) 42 | 43 | if "%1" == "dirhtml" ( 44 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 45 | echo. 46 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 47 | goto end 48 | ) 49 | 50 | if "%1" == "pickle" ( 51 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 52 | echo. 53 | echo.Build finished; now you can process the pickle files. 54 | goto end 55 | ) 56 | 57 | if "%1" == "json" ( 58 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 59 | echo. 60 | echo.Build finished; now you can process the JSON files. 61 | goto end 62 | ) 63 | 64 | if "%1" == "htmlhelp" ( 65 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 66 | echo. 67 | echo.Build finished; now you can run HTML Help Workshop with the ^ 68 | .hhp project file in %BUILDDIR%/htmlhelp. 69 | goto end 70 | ) 71 | 72 | if "%1" == "qthelp" ( 73 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 74 | echo. 75 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 76 | .qhcp project file in %BUILDDIR%/qthelp, like this: 77 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\MAppleT.qhcp 78 | echo.To view the help file: 79 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\MAppleT.ghc 80 | goto end 81 | ) 82 | 83 | if "%1" == "latex" ( 84 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 85 | echo. 86 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 87 | goto end 88 | ) 89 | 90 | if "%1" == "changes" ( 91 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 92 | echo. 93 | echo.The overview file is in %BUILDDIR%/changes. 94 | goto end 95 | ) 96 | 97 | if "%1" == "linkcheck" ( 98 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 99 | echo. 100 | echo.Link check complete; look for any errors in the above output ^ 101 | or in %BUILDDIR%/linkcheck/output.txt. 102 | goto end 103 | ) 104 | 105 | if "%1" == "doctest" ( 106 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 107 | echo. 108 | echo.Testing of doctests in the sources finished, look at the ^ 109 | results in %BUILDDIR%/doctest/output.txt. 110 | goto end 111 | ) 112 | 113 | :end 114 | -------------------------------------------------------------------------------- /doc/overview.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | .. image:: psd_all.png 4 | :class: align-right 5 | :width: 50% 6 | 7 | 8 | 9 | **Spectrum** is a Python library that contains tools to estimate Power Spectral Densities based on Fourier transform, Parametric methods or eigenvalues analysis: 10 | 11 | - The Fourier methods are based upon correlogram, periodogram and 12 | Welch estimates. Standard tapering windows (Hann, Hamming, Blackman) 13 | and more exotic ones are available (DPSS, Taylor, ...). 14 | - The parametric methods are based on Yule-Walker, BURG, MA and ARMA, 15 | covariance and modified covariance methods. 16 | - Non-parametric methods based on eigen analysis (e.g., MUSIC) and minimum 17 | variance analysis are also implemented. 18 | - Finally, Multitapering combines several orthogonal tapering windows. 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /doc/psd_all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/doc/psd_all.png -------------------------------------------------------------------------------- /doc/quickstart.rst: -------------------------------------------------------------------------------- 1 | User Guide 2 | =========== 3 | 4 | **Spectrum** provides classes and functions to estimate Power Spectral Densities (PSD hereafter). This documentation will not describe PSD theoretical background, which can be found in many good books and references. Therefore, we consider that the reader is aware of some terminology used here below. 5 | 6 | QuickStart (Periodogram example) 7 | -------------------------------- 8 | 9 | **Spectrum** can be invoked from a python shell. No GUI interface is provided yet. We recommend to use `ipython `_, which should be started with the pylab option:: 10 | 11 | ipython --pylab 12 | 13 | 14 | Then, you can import tools from **Spectrum** as follows: 15 | 16 | .. doctest:: 17 | 18 | from spectrum import Periodogram, data_cosine 19 | 20 | Here we import a tool to compute a periodogram, and a tool to create some data. 21 | Indeed, we will use :func:`~spectrum.datasets.data_cosine` to generate a toy data sets:: 22 | 23 | data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200) 24 | 25 | where `data` contains a cosine signal with a frequency of 200Hz buried in white noise (amplitude 0.1). The data has a length of N=1024 and the sampling is 1024Hz. 26 | 27 | We can analyse this data using one of the Power Spectrum Estimation method provided in spectrum. All methods can be found as functions or classes. Although we strongly recommend to use the object oriented approach, the functional approach may also be useful. For now, we will use the object approach because it provides more robustness and additional tools as compared to the functional approach (e.g., plotting). So, let us create a simple periodogram:: 28 | 29 | p = Periodogram(data, sampling=1024) 30 | 31 | Here, we have created an object Periodogram. No computation has been performed yet. To run the actual estimation, you can use either:: 32 | 33 | p() 34 | 35 | or:: 36 | 37 | p.run() 38 | 39 | and finally, you can plot the resulting PSD:: 40 | 41 | p.plot(marker='o') # standard matplotlib options are accepted 42 | 43 | .. warning:: 44 | 45 | .. versionchanged:: 0.6.7 you do not need to use p() or p.run() anymore. It will be 46 | called automatically when using p.plot() or when you access to the *psd* 47 | attribute. 48 | 49 | 50 | .. plot:: 51 | :width: 80% 52 | 53 | from spectrum import * 54 | data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200) 55 | p = Periodogram(data, sampling=1024) #here you just define the PSD estimate 56 | p.plot(marker='o') 57 | 58 | Since the data is purely real, the PSD (stored in p.psd) is a onesided PSD, with positive frequencies only. If the data were complex, the two-sided PSD would have been computed and plotted. For the real case, you can still plot a two-sided PSD by setting the sides option manually:: 59 | 60 | p.plot(sides='twosided') 61 | 62 | 63 | .. plot:: 64 | :width: 80% 65 | 66 | from spectrum import * 67 | data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200) 68 | p = Periodogram(data, sampling=1024) #here you just define the PSD estimate 69 | p.plot(marker='o', sides='twosided') 70 | 71 | 72 | You can also look at a centered PSD around the zero frequency:: 73 | 74 | p.plot(sides='centerdc') 75 | 76 | .. warning:: By convention, the :attr:`psd` attribute contains the default PSD (either one-sided for real data or two-sided for the complex data). 77 | 78 | Since **p** is an instance of Periodogram, you can introspect the object to obtain diverse information such as the original data, the sampling, the PSD itself and so on:: 79 | 80 | p.psd # contains the PSD values 81 | p.frequencies() returns a list of frequencies 82 | print(p) # prints some information about the PSD. 83 | 84 | 85 | The object approach versus functional approach (ARMA example) 86 | -------------------------------------------------------------- 87 | 88 | Object approach 89 | ~~~~~~~~~~~~~~~~~~ 90 | In the previous section, we've already illustrated the object approach using a Fourier-based method with the simple periodogram method. In addition to the Fourier-based PSD estimates, **Spectrum** also provides parametric-based estimates. Let us use :func:`~spectrum.arma.parma` class as a second illustrative example of the object approach: 91 | 92 | .. doctest:: 93 | 94 | from spectrum import parma 95 | 96 | 97 | Many functionalities available in **Spectrum** are inspired by methods found in [Marple]_. The data sample used in most of the examples is also taken from this reference and can be imported as follows (this is a 64 complex data samples):: 98 | 99 | from spectrum import marple_data 100 | 101 | The class :class:`~spectrum.arma.parma` allows to create an ARMA model and to plot the PSD, similarly to the previous example (Periodogram). First, we need to create the object:: 102 | 103 | p = parma(marple_data, 15, 15, 30, NFFT=4096) 104 | 105 | where 15,15 and 30 are arguments of the ARMA model (see :class:`spectrum.parma`), and NFFT is the number of final points. 106 | 107 | Then, computation and plot can be performed:: 108 | 109 | p.plot(norm=True, color='red', linewidth=2) 110 | 111 | .. plot:: 112 | :width: 80% 113 | 114 | from spectrum import parma, marple_data 115 | p = parma(marple_data, 15, 15, 30, NFFT=4096) 116 | p.plot(norm=True, color='red', linewidth=2) # same options as pylab.plot 117 | 118 | Since the data is complex, the PSD (stored in p.psd) is a twosided PSD. Note also that all optional arguments accepted by matplotlib function are also available in this implementation. 119 | 120 | 121 | Functional approach 122 | ~~~~~~~~~~~~~~~~~~~~ 123 | The object-oriented approach can be replaced by a functional one if required. Nevertheless, as mentionned earlier, this approach required more expertise and could easily lead to errors. The following example is identical to the previous piece of code. 124 | 125 | In order to extract the autoregressive coefficients (AR) and Moving average coefficients (MA), the :func:`~spectrum.arma.arma_estimate` can be used:: 126 | 127 | from spectrum.arma import arma_estimate, arma2psd 128 | ar, ma, rho = arma_estimate(marple_data, 15, 15, 30) 129 | 130 | Once the AR and/or MA parameters are found, the :func:`~spectrum.arma.arma2psd` function creates a two-sided PSD for you and the PSD can be plotted as follows: 131 | 132 | .. plot:: 133 | :include-source: 134 | :width: 80% 135 | 136 | from spectrum import arma_estimate, arma2psd, marple_data 137 | from pylab import plot, axis, xlabel, ylabel, grid, log10 138 | ar, ma, rho = arma_estimate(marple_data, 15, 15, 30) 139 | psd = arma2psd(ar, ma, rho=rho, NFFT=4096) 140 | plot(10*log10(psd/max(psd))) 141 | axis([0, 4096, -80, 0]) 142 | xlabel('Frequency') 143 | ylabel('power (dB)') 144 | grid(True) 145 | 146 | .. note:: 147 | 148 | #. The parameter 30 (line 3) is the correlation lag that should be twice as much as the required AR and MA coefficient number (see reference guide for details). 149 | #. Then, we plot the PSD manually (line 5), and normalise it so as to use dB units (10*log10) 150 | #. Since the data are complex data, the default plot is a two-sided PSD. 151 | #. The frequency vector is not provided. 152 | 153 | -------------------------------------------------------------------------------- /doc/ref_fourier.rst: -------------------------------------------------------------------------------- 1 | Fourier Methods 2 | ################################################## 3 | 4 | 5 | Power Spectrum Density based on Fourier Spectrum 6 | ================================================== 7 | 8 | .. automodule:: spectrum 9 | :show-inheritance: 10 | :members: 11 | :undoc-members: 12 | :synopsis: Periodogram methods to estimate PSDs 13 | 14 | 15 | .. module:: periodogram 16 | :synopsis: Periodogram methods to estimate PSDs 17 | .. automodule:: spectrum.periodogram 18 | :members: 19 | :undoc-members: 20 | :synopsis: Periodogram methods to estimate PSDs 21 | 22 | 23 | .. module:: correlog 24 | :synopsis: PSD estimate using Correlogram methods 25 | .. automodule:: spectrum.correlog 26 | :members: 27 | :undoc-members: 28 | :synopsis: PSD estimate using Correlogram methods 29 | 30 | 31 | 32 | Tapering Windows 33 | =================== 34 | 35 | .. module:: window 36 | .. automodule:: spectrum.window 37 | :members: 38 | :undoc-members: 39 | :synopsis: window 40 | 41 | 42 | -------------------------------------------------------------------------------- /doc/ref_mtm.rst: -------------------------------------------------------------------------------- 1 | Multitapering 2 | ################ 3 | 4 | .. module:: mtm 5 | .. automodule:: spectrum.mtm 6 | :members: 7 | :undoc-members: 8 | :synopsis: Multitapering methods and DPSS windows 9 | 10 | -------------------------------------------------------------------------------- /doc/ref_others.rst: -------------------------------------------------------------------------------- 1 | 2 | Tools and classes 3 | =================== 4 | 5 | 6 | .. contents:: 7 | 8 | 9 | Classes 10 | --------- 11 | .. module:: psd 12 | .. automodule:: spectrum.psd 13 | :members: 14 | :undoc-members: 15 | :synopsis: psd classes to ease test and manipulation of PSDs 16 | 17 | 18 | Correlation 19 | ------------- 20 | 21 | .. module:: correlation 22 | .. automodule:: spectrum.correlation 23 | :members: 24 | :undoc-members: 25 | :synopsis: Correlation methods 26 | 27 | Tools 28 | ---------- 29 | 30 | .. module:: tools 31 | :synopsis: utilities to manipulate PSDs 32 | .. automodule:: spectrum.tools 33 | :members: 34 | :undoc-members: 35 | :synopsis: utilities to manipulate PSDs 36 | 37 | datasets 38 | ----------- 39 | 40 | .. module:: datasets 41 | .. automodule:: spectrum.datasets 42 | :members: 43 | :undoc-members: 44 | :synopsis: data sets to play with Spectrum functionalities. 45 | 46 | Linear Algebra Tools 47 | --------------------- 48 | 49 | cholesky 50 | ~~~~~~~~~ 51 | .. automodule:: spectrum.cholesky 52 | :members: 53 | :undoc-members: 54 | :synopsis: Cholesky 55 | 56 | eigen 57 | ~~~~~~ 58 | .. automodule:: spectrum.eigen 59 | :members: 60 | :undoc-members: 61 | :synopsis: MINEIGVAL method 62 | 63 | 64 | levinson 65 | ~~~~~~~~ 66 | 67 | .. module:: levinson 68 | :synopsis: levinson recursion tools 69 | .. automodule:: spectrum.levinson 70 | :members: 71 | :undoc-members: 72 | :synopsis: levinson recursion tools 73 | 74 | 75 | toeplitz 76 | ~~~~~~~~ 77 | .. automodule:: spectrum.toeplitz 78 | :members: 79 | :undoc-members: 80 | :synopsis: Toeplitz 81 | 82 | linalg 83 | ~~~~~~~~~ 84 | 85 | .. module:: linalg 86 | :synopsis: Linear algebra tools 87 | .. automodule:: spectrum.linalg 88 | :members: 89 | :undoc-members: 90 | :synopsis: Linear algebra tools 91 | 92 | Transfer function 93 | ~~~~~~~~~~~~~~~~~~~~ 94 | .. automodule:: spectrum.transfer 95 | :members: 96 | :undoc-members: 97 | :synopsis: transfer function utilities 98 | 99 | Waveforms 100 | ------------- 101 | .. module:: waveform 102 | .. automodule:: spectrum.waveform 103 | :members: 104 | :undoc-members: 105 | :synopsis: transfer function utilities 106 | 107 | 108 | Linear prediction 109 | ------------------- 110 | .. automodule:: spectrum.linear_prediction 111 | :members: 112 | :undoc-members: 113 | :synopsis: linear prediction 114 | -------------------------------------------------------------------------------- /doc/ref_param.rst: -------------------------------------------------------------------------------- 1 | Parametric methods 2 | ##################### 3 | 4 | .. contents:: 5 | 6 | 7 | 8 | Power Spectrum Density based on Parametric Methods 9 | ====================================================== 10 | ARMA and MA estimates (yule-walker) 11 | ------------------------------------ 12 | 13 | 14 | .. module:: arma 15 | :synopsis: ARMA and MA parameters and PSD estimates using YuleWalker method 16 | 17 | .. automodule:: spectrum.arma 18 | :members: 19 | :undoc-members: 20 | :synopsis: ARMA and MA parameters and PSD estimates using YuleWalker method 21 | 22 | AR estimate based on Burg algorithm 23 | ------------------------------------- 24 | 25 | .. module:: burg 26 | :synopsis: AR parameters and PSD estimates using BURG algorithm. 27 | .. automodule:: spectrum.burg 28 | :members: 29 | :undoc-members: 30 | :synopsis: AR parameters and PSD estimates using BURG algorithm. 31 | 32 | AR estimate based on YuleWalker 33 | ------------------------------------ 34 | 35 | .. module:: yulewalker 36 | :synopsis: AR parameters estimate using Yule-Walker method 37 | .. automodule:: spectrum.yulewalker 38 | :members: 39 | :undoc-members: 40 | :synopsis: AR parameters estimate using Yule-Walker method 41 | 42 | Criteria 43 | ========= 44 | 45 | .. module:: criteria 46 | :synopsis: Criteria for ARMA models 47 | .. automodule:: spectrum.criteria 48 | :members: 49 | :undoc-members: 50 | :synopsis: Criteria for ARMA models 51 | 52 | -------------------------------------------------------------------------------- /doc/ref_psd_other.rst: -------------------------------------------------------------------------------- 1 | Other Power Spectral Density estimates 2 | ========================================== 3 | 4 | .. contents:: 5 | 6 | 7 | Covariance method 8 | --------------------- 9 | .. module:: covar 10 | :synopsis: AR estimate using covariance method 11 | .. automodule:: spectrum.covar 12 | :members: 13 | :undoc-members: 14 | :synopsis: AR estimate using covariance method 15 | 16 | Eigen-analysis methods 17 | ----------------------- 18 | .. module:: eigenfre 19 | :synopsis: PSDs estimate using eigen analysis method 20 | .. automodule:: spectrum.eigenfre 21 | :members: eigen, pev, pmusic 22 | :undoc-members: 23 | :synopsis: PSDs estimate using eigen analysis method 24 | 25 | Minimum Variance estimator 26 | ----------------------------- 27 | .. module:: minvar 28 | :synopsis: Minimum Variance Spectral Estimator 29 | .. automodule:: spectrum.minvar 30 | :members: 31 | :undoc-members: 32 | :synopsis: Minimum Variance Spectral Estimator 33 | 34 | Modified Covariance method 35 | --------------------------- 36 | 37 | .. module:: modcovar 38 | :synopsis: PSDs estimate using modified covariance analysis 39 | .. automodule:: spectrum.modcovar 40 | :members: 41 | :undoc-members: 42 | :synopsis: PSDs estimate using modified covariance analysis 43 | 44 | 45 | Spectrogram 46 | ----------------- 47 | .. module:: spectrogram 48 | :synopsis: 49 | .. automodule:: spectrum.spectrogram 50 | :members: 51 | :undoc-members: 52 | :synopsis: 53 | 54 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | numpydoc 2 | sphinx 3 | sphinx_rtd_theme 4 | sequana_sphinxext 5 | pillow 6 | sphinx_gallery 7 | sphinx_copybutton 8 | sphinxcontrib_googleanalytics 9 | -------------------------------------------------------------------------------- /doc/source/tutorial_lpc.rst.temp: -------------------------------------------------------------------------------- 1 | 2 | LPC Examples 3 | ============= 4 | 5 | :source: `http://www.mathworks.com/help/toolbox/signal/lpc.html `_ 6 | 7 | In this tutorial, we will estimate a data series using a third-order forward predictor, and compare to the original signal. 8 | 9 | First, we create a signal data as the output of an autoregressive process 10 | driven by white noise. 11 | 12 | :: 13 | 14 | from scipy.signal import lfilter 15 | from pylab import * 16 | n = randn(50000) # Normalized white Gaussian noise 17 | x = lfilter([1],[1., 1/2., 1/3., 1/4.],n); 18 | x = x[45904:50000] # get 2096 data samples 19 | 20 | note that we use the last 4096 samples of the AR process output to avoid start-up transients. 21 | 22 | Then, we can compute the predictor coefficients, estimated signal, prediction error, and autocorrelation sequence of the prediction error:: 23 | 24 | from spectrum.lpc import lpc 25 | [a, error_variance] = lpc(x,3) 26 | est_x = lfilter([0,-a[1], -a[2]],[1],x) # Estimated signal 27 | e = x - est_x; # Prediction error 28 | acs = xcorr(e,e,maxlags=2048); #% ACS of prediction error 29 | 30 | The error_variance should be around 1. 31 | 32 | 33 | 34 | Compare the predicted signal to the original signal: 35 | 36 | .. plot:: 37 | :width: 80% 38 | :include-source: 39 | 40 | from scipy.signal import lfilter 41 | from pylab import * 42 | n = randn(50000) # Normalized white Gaussian noise 43 | x = lfilter([1],[1., 1/2., 1/3., 1/4.],n) 44 | x = x[45904:50000] # get 2096 data samples 45 | 46 | from spectrum.lpc import lpc 47 | a,error_variance = lpc(x,3) 48 | est_x = lfilter([0,-a[1], -a[2]],[1],x) # Estimated signal 49 | e = x - est_x; # Prediction error 50 | acs = xcorr(e,e,maxlags=2048); #% ACS of prediction error 51 | 52 | figure() 53 | xr = arange(1,97) 54 | plot(xr,x[4000:],xr,est_x[4000:],'--') 55 | title('Original Signal vs. LPC Estimate') 56 | xlabel('Sample Number') 57 | ylabel('Amplitude') 58 | grid(True) 59 | legend(['Original Signal','LPC Estimate']) 60 | 61 | 62 | -------------------------------------------------------------------------------- /doc/spectral_analysis.rst: -------------------------------------------------------------------------------- 1 | 2 | Quick overview of spectral analysis methods 3 | ############################################### 4 | 5 | This section gives you a quick overview of the spectral analysis methods and classes that are available in **spectrum**. You will find the different classes associated to each PSD estimates. A functional approach is also possible but is not described here. See the reference guide for more details. 6 | 7 | 8 | Non-parametric classes 9 | ========================= 10 | 11 | The Fourier-based methods provides :class:`~spectrum.periodogram.Periodogram`, :class:`~spectrum.correlog.pcorrelogram`, Welch estimate (not implemented see pylab.psd instead) and multitapering :class:`~spectrum.mtm.pmtm`. 12 | 13 | In addition to the Fourier-based methods, there are 3 types of non-parametric methods: 14 | 15 | #. The Minimum of variance MV (Capon) is implemented in the class :class:`~spectrum.minvar.pminvar`. 16 | #. Two eigenvalues decomposition (MUSIC, eigenvalue) can be found in :class:`~spectrum.eigenfreq.pev` and :class:`~spectrum.eigenfre.pmusic`. 17 | #. Maximum entropy (MEM) (not yet implemented) 18 | 19 | Autoregressive spectral estimation 20 | ======================================== 21 | 22 | There are essentially 3 methods to estimate the autoregressive (AR) parameters. 23 | The first one uses the autocorrelation sequence such as in the so-called 24 | **Yule-Walker** method (see :class:`~spectrum.yulewalker.pyule`). A second method uses the reflection coefficient method such as in the **Burg** algorithm (see :class:`~spectrum.burg.pburg`). These methods minimise the forward prediction error (and backward) using Levinson recursion. Finally, a third important category of AR parameter method is based on the least squares linear prediction, which can be further decomposed into 2 categories. One that separate the minimization of the forward and backward linear prediction squared errors such as the 25 | **autocorrelation** or **covariance** methods (see :class:`~spectrum.covar.pcovar`). Another one that performs a 26 | combined minimization of the forward and backward prediction squared errors 27 | (**modified covariance**) (see :class:`~spectrum.modcovar.pmodcovar`). 28 | 29 | Spectrum also provides :class:`~spectrum.arma.parma`, :class:`~spectrum.arma.pma` classes. 30 | 31 | -------------------------------------------------------------------------------- /doc/tutorial_criteria.rst: -------------------------------------------------------------------------------- 1 | Criteria for Parametric methods 2 | =================================== 3 | 4 | In order to estimate the order of a parametric model, one chose a PSD method such as the :func:`~spectrum.yulewalker.aryule` function. This function (when given an order) returns a list of AR parameters. The order selected may not be optimal (too low or too high). One tricky question is then to find a criteria to select this order in an optiaml way. Criteria are available and the following example illustrate their usage. 5 | 6 | 7 | Example 1 8 | ---------- 9 | Let us consider a data set (the Marple data already used earlier). We use the aryule function to estimate the AR parameter. This function also returns a parameter called `rho`. This parameter together with the length of the data and the selected order can be used by criteria functions such as the :func:`~spectrum.criteria.AIC` function to figure out the optimal order. 10 | 11 | .. plot:: 12 | :width: 80% 13 | :include-source: 14 | 15 | import spectrum 16 | from spectrum.datasets import marple_data 17 | import pylab 18 | 19 | order = pylab.arange(1, 25) 20 | rho = [spectrum.aryule(marple_data, i, norm='biased')[1] for i in order] 21 | pylab.plot(order, spectrum.AIC(len(marple_data), rho, order), label='AIC') 22 | 23 | 24 | The optimal order corresponds to the minimal of the plotted function. 25 | 26 | Example 2 27 | ----------- 28 | We can look at another example that was look at earlier with a AR(4): 29 | 30 | .. plot:: 31 | :width: 80% 32 | :include-source: 33 | 34 | import spectrum 35 | from spectrum.datasets import marple_data 36 | import scipy.signal 37 | import pylab 38 | 39 | # Define AR filter coefficients and some data accordingly 40 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606]; 41 | x = scipy.signal.lfilter([1], a, pylab.randn(1,256)) 42 | 43 | # study different order 44 | order = pylab.arange(1, 25) 45 | rho = [spectrum.aryule(x[0], i, norm='biased')[1] for i in order] 46 | pylab.plot(order, spectrum.AIC(len(x[0]), rho, order), label='AIC') 47 | 48 | 49 | Here, is appears that an order of 4 (at least) should be used, which correspond indeed to the original choice. 50 | -------------------------------------------------------------------------------- /doc/tutorial_front_image.rst: -------------------------------------------------------------------------------- 1 | All PSD methods 2 | ================ 3 | 4 | This example is used to generate the front image. It shows how to use the different PSD classes that can be found in **Spectrum**. 5 | 6 | .. plot:: 7 | :include-source: 8 | :width: 80% 9 | 10 | import spectrum 11 | from spectrum.datasets import marple_data 12 | from pylab import legend, ylim 13 | norm = True 14 | sides = 'centerdc' 15 | 16 | # MA method 17 | p = spectrum.pma(marple_data, 15, 30, NFFT=4096) 18 | p(); p.plot(label='MA (15, 30)', norm=norm, sides=sides) 19 | 20 | # ARMA method 21 | p = spectrum.parma(marple_data, 15, 15, 30, NFFT=4096) 22 | p(); p.plot(label='ARMA(15,15)', norm=norm, sides=sides) 23 | 24 | # yulewalker 25 | p = spectrum.pyule(marple_data, 15, norm='biased', NFFT=4096) 26 | p(); p.plot(label='YuleWalker(15)', norm=norm, sides=sides) 27 | 28 | #burg method 29 | p = spectrum.pburg(marple_data, order=15, NFFT=4096) 30 | p(); p.plot(label='Burg(15)', norm=norm, sides=sides) 31 | 32 | #covar method 33 | p = spectrum.pcovar(marple_data, 15, NFFT=4096) 34 | p(); p.plot(label='Covar(15)', norm=norm, sides=sides) 35 | 36 | #modcovar method 37 | p = spectrum.pmodcovar(marple_data, 15, NFFT=4096) 38 | p(); p.plot(label='Modcovar(15)', norm=norm, sides=sides) 39 | 40 | # correlagram 41 | p = spectrum.pcorrelogram(marple_data, lag=15, NFFT=4096) 42 | p(); p.plot(label='Correlogram(15)', norm=norm, sides=sides) 43 | 44 | #minvar 45 | p = spectrum.pminvar(marple_data, 15, NFFT=4096) 46 | p(); p.plot(label='minvar (15)', norm=norm, sides=sides) 47 | 48 | #music 49 | p = spectrum.pmusic(marple_data, 15, 11, NFFT=4096) 50 | p(); p.plot(label='music (15, 11)', norm=norm, sides=sides) 51 | 52 | #ev 53 | p = spectrum.pev(marple_data, 15, 11, NFFT=4096) 54 | p(); p.plot(label='ev (15, 11)', norm=norm, sides=sides) 55 | 56 | legend(loc='upper left', prop={'size':10}, ncol=2) 57 | ylim([-80,10]) 58 | 59 | -------------------------------------------------------------------------------- /doc/tutorial_pburg.rst: -------------------------------------------------------------------------------- 1 | PBURG example 2 | ================ 3 | 4 | Here is another method to estimate an AR model, based on :func:`~spectrum.burg.arburg` . 5 | 6 | This example is inspired by an example found in Marple book. This is very similar 7 | to the previous example, where you will find more explanation (see yule-Walker tutorial). 8 | 9 | 10 | .. doctest:: 11 | 12 | from pylab import log10, pi, plot, xlabel, randn 13 | import scipy.signal 14 | from spectrum import arma2psd, arburg 15 | 16 | # Define AR filter coefficients 17 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606]; 18 | 19 | .. doctest:: 20 | 21 | [w,H] = scipy.signal.freqz(1, a, 256) 22 | Hp = plot(w/pi, 20*log10(2*abs(H)/(2.*pi)),'r') 23 | 24 | .. doctest:: 25 | 26 | x = scipy.signal.lfilter([1], a, randn(256)) 27 | AR, rho, ref = arburg(x, 4) 28 | 29 | .. doctest:: 30 | 31 | PSD = arma2psd(AR, rho=rho, NFFT=512) 32 | PSD = PSD[len(PSD):len(PSD)//2:-1] 33 | 34 | plot(linspace(0, 1, len(PSD)), 10*log10(abs(PSD)*2./(2.*pi))) 35 | xlabel('Normalized frequency (\times \pi rad/sample)') 36 | 37 | 38 | 39 | .. plot:: 40 | :width: 80% 41 | 42 | from pylab import legend, xlabel, ylabel, plot, log10, pi, linspace, randn 43 | import scipy.signal 44 | from spectrum import arma2psd, arburg 45 | 46 | # Define AR filter coefficients 47 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606]; 48 | 49 | [w,H] = scipy.signal.freqz(1, a, 256) 50 | Hp = plot(w/pi, 20*log10(2*abs(H)/(2.*pi)),'r') 51 | x = scipy.signal.lfilter([1], a, randn(256)) 52 | AR, rho, ref = arburg(x, 4) 53 | PSD = arma2psd(AR, rho=rho, NFFT=512) 54 | PSD = PSD[len(PSD):len(PSD)//2:-1] 55 | plot(linspace(0, 1, len(PSD)), 10*log10(abs(PSD)*2./(2.*pi))) 56 | xlabel('Normalized frequency (\times \pi rad/sample)') 57 | ylabel('One-sided PSD (dB/rad/sample)') 58 | legend(['PSD of model output','PSD estimate of x']) 59 | 60 | 61 | .. plot:: 62 | :width: 80% 63 | 64 | from pylab import pi, log10, randn, plot 65 | import scipy.signal 66 | from spectrum import pburg 67 | 68 | # Define AR filter coefficients 69 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606]; 70 | 71 | [w,H] = scipy.signal.freqz(1, a, 256) 72 | Hp = plot(w/pi, 20*log10(2*abs(H)/(2.*pi)),'r') 73 | x = scipy.signal.lfilter([1], a, randn(256)) 74 | p = pburg(x, 4, sampling=2) 75 | p() 76 | p.plot() 77 | 78 | -------------------------------------------------------------------------------- /doc/tutorial_psd.rst: -------------------------------------------------------------------------------- 1 | What is the Spectrum object ? 2 | =============================== 3 | 4 | .. module:: spectrum.psd 5 | 6 | 7 | Normally Users should not be bother by the classes used. For instance if you use the pburg class to compute a PSD estimate base on the Burg method, you just nee to use :class:`~spectrum.burg.pburg`. Indeed, the normal usage to estimate a PSD is to use the PSD estimate starting with the letter `p` such as parma, pminvar, pburg, (exception: use Periodogram instead of pPeriodogram). 8 | 9 | 10 | Yet, it may be useful for some advanced users and developers to know that all PSD estimates are based upon the :class:`Spectrum` class (used by specialised classes such as :class:`FourierSpectrum` and :class:`ParametricSpectrum`). 11 | 12 | The following example shows how to use :class:`Spectrum`. First, let us create a Spectrum instance (first argument is the time series/data):: 13 | 14 | from spectrum import Spectrum, data_cosine, speriodogram, minvar 15 | p = Spectrum(data_cosine(), sampling=1024) 16 | 17 | Some information are stored and can be retrieved later on:: 18 | 19 | p.N 20 | p.sampling 21 | 22 | 23 | However, for now it contains no information about the PSD estimation method. For instance, if you type:: 24 | 25 | p.psd 26 | 27 | it should return a warning message telling you that the PSD has not yet been computed. You can compute it either independantly, and set the `psd` attribute manually:: 28 | 29 | psd = speriodogram(p.data) 30 | 31 | or you can associate a function to the `method` attribute:: 32 | 33 | p.method = minvar 34 | 35 | and then call the function with the proper optional arguments:: 36 | 37 | p(15, NFFT=4096) 38 | 39 | In both cases, the PSD is now saved in the `psd` attribute. 40 | 41 | Of course, if you already know the method you want to use, then it is much simpler to call the appropriate class directly as shown in previous sections and examples:: 42 | 43 | p = pminvar(data_cosine(), 15) 44 | p() 45 | p.plot() 46 | 47 | 48 | .. plot:: 49 | :width: 80% 50 | 51 | from spectrum import * 52 | p = pminvar(data_cosine(), 15) 53 | p() 54 | p.plot() 55 | 56 | -------------------------------------------------------------------------------- /doc/tutorial_variance_estimate.rst: -------------------------------------------------------------------------------- 1 | Variance outputs 2 | =================== 3 | 4 | The :func:`~spectrum.burg.arburg` function returns the AR parameters but also an estimation of the variance. 5 | 6 | The following example plots the estimated variance (using arburg function) versus the true variance for different values of variance. In other words, we plot how accurately the variance can be estimated. 7 | 8 | 9 | .. plot:: 10 | :width: 80% 11 | :include-source: 12 | 13 | from pylab import plot, xlabel, ylabel, plot, axis, linspace, randn 14 | import scipy.signal 15 | from spectrum import arburg 16 | 17 | # Define AR filter coefficients 18 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606]; 19 | 20 | # for different variance, 21 | true_variance = linspace(0.1, 1, 20) 22 | estimated_variance = [] 23 | for tv in true_variance: 24 | x = scipy.signal.lfilter([1], a, tv**0.5 * randn(1,256)) 25 | AR, v, k = arburg(x[0], 4) # we estimate the AR parameter and variance 26 | estimated_variance.append(v) 27 | 28 | plot(true_variance, estimated_variance, 'o') 29 | xlabel('true variance') 30 | ylabel('estimated variance') 31 | plot([0,0],[1,1]) 32 | axis([0,1,0,1]) 33 | 34 | -------------------------------------------------------------------------------- /doc/tutorial_window.rst: -------------------------------------------------------------------------------- 1 | Windowing 2 | ========== 3 | 4 | .. contents:: 5 | 6 | 7 | In spectral analysis, it is common practice to multiply the input data by a tapering window. 8 | 9 | Many windows are implemented and available in the :mod:`~spectrum.window` module as well as utilities 10 | to plot the window in time and frequency domains. Some windows that have been implemented are: 11 | 12 | .. autosummary:: 13 | 14 | spectrum.window.window_bartlett 15 | spectrum.window.window_blackman 16 | spectrum.window.window_gaussian 17 | spectrum.window.window_hamming 18 | spectrum.window.window_hann 19 | spectrum.window.window_kaiser 20 | spectrum.window.window_lanczos 21 | spectrum.window.window_nuttall 22 | spectrum.window.window_tukey 23 | 24 | 25 | 26 | See :mod:`~spectrum.window` module for a full list of windows. Note also that the :mod:`~spectrum.waveform` provides additional waveforms/windows. 27 | 28 | 29 | Window object 30 | -------------- 31 | 32 | There is a class :class:`~spectrum.window.Window` that ease the manipulation of the tapering windows. It works as follows: 33 | 34 | 35 | .. plot:: 36 | :width: 80% 37 | :include-source: 38 | 39 | from spectrum.window import Window 40 | N = 64 41 | w = Window(N, 'hamming') 42 | w.plot_time_freq() 43 | 44 | where `N` is the length of the desired window, and "hamming" is the name. There are a lot of different windows, some of them require arguments. For example, the blackman window require an `alpha` argument:: 45 | 46 | w = Window(64, 'blackman', alpha=1) 47 | 48 | From the object, you can easily access to the window data (`w.data`) and frequency (`w.frequencies`), as well as quantities such as the equivalent noise band width: 49 | 50 | .. doctest:: 51 | 52 | >>> from spectrum.window import Window 53 | >>> N = 64 54 | >>> w = Window(N, 'rectangular') 55 | >>> w.enbw 56 | 1.0 57 | 58 | To have a list of valid names, omit the name. It should raise an error with the list of valid names. Alternatively, type:: 59 | 60 | window_names.keys() 61 | 62 | Finally, when a window require arguments, you need to know their names (e.g., in the blackman example above, the `alpha` parameter is required). 63 | 64 | The only way to get this information is to look at the function `window_` (e.g. window_blackman) and type:: 65 | 66 | window_blackman? 67 | 68 | 69 | 70 | Simple window function call 71 | ---------------------------- 72 | 73 | You can explore the module to get the window function. For instance, if you look for the Hamming window, you should find a function called :func:`~spectrum.window.window_hamming`. You can look at it as follows: 74 | 75 | .. plot:: 76 | :width: 80% 77 | :include-source: 78 | 79 | from spectrum.window import window_hamming 80 | from pylab import plot 81 | 82 | N = 64 83 | w = window_hamming(N) 84 | plot(w) 85 | 86 | Window Visualisation 87 | --------------------- 88 | 89 | If you want to have a quick look at the window shape and its frequency behaviour, you can use the :func:`~spectrum.window.window_visu`: 90 | 91 | .. plot:: 92 | :width: 80% 93 | :include-source: 94 | 95 | from spectrum.window import window_visu 96 | N = 64 97 | window_visu(N, 'hamming') 98 | 99 | Window Factory 100 | ------------------ 101 | 102 | If you do not want the object approach, you may want to use the Factory function called :func:`~spectrum.window.create_window` (the :class:`Window` class relies on this function). The previous Hamming window can be called using: 103 | 104 | .. plot:: 105 | :width: 80% 106 | :include-source: 107 | 108 | from spectrum.window import create_window 109 | from pylab import plot 110 | 111 | N = 64 112 | w = create_window(N, 'hamming') 113 | plot(w) 114 | 115 | -------------------------------------------------------------------------------- /doc/tutorial_yulewalker.rst: -------------------------------------------------------------------------------- 1 | Yule Walker example 2 | ===================== 3 | 4 | The following example illustrate the usage of the :func:`~spectrum.yulewalker.aryule` function 5 | that allows you to estimate the autoregressive coefficients of a set of data. First, we need some 6 | packages: 7 | 8 | 9 | .. doctest:: 10 | 11 | import scipy.signal 12 | from spectrum import aryule 13 | 14 | Then, we define a list of AR filter coefficients:: 15 | 16 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606] 17 | 18 | and create some noisy data with them:: 19 | 20 | y = scipy.signal.lfilter([1], a, randn(1, 1024)) 21 | 22 | This array will be our data to test the Yule-Walker function, namely :func:`aryule`. Our goal is to estimate the AR coefficients from `y`. Since, we do not know the order of the autoregressive estimate, we first start by setting the order to 20:: 23 | 24 | ar, variance, coeff_reflection = aryule(y[0], 20) 25 | 26 | By looking at the `coeff_reflection` output, it appears that the AR coefficient are rather small for order>4 (see following plot). From the plot, chosing an order 4 seems a reasonable choice. 27 | 28 | 29 | .. plot:: 30 | :width: 80% 31 | 32 | import pylab 33 | import scipy.signal 34 | from spectrum import aryule 35 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606] 36 | y = scipy.signal.lfilter([1], a, pylab.randn(1, 1024)) 37 | ar, variance, coeff_reflection = aryule(y[0], 20) 38 | pylab.stem(range(1,21), ar) 39 | pylab.title('Evolution of the first AR parameters') 40 | 41 | 42 | It is possible to plot the PSD from the `ar` values using this: 43 | 44 | .. plot:: 45 | :width: 80% 46 | :include-source: 47 | 48 | from pylab import log10, linspace, plot, xlabel, ylabel, legend, randn, pi 49 | import scipy.signal 50 | from spectrum import aryule, Periodogram, arma2psd 51 | # Create a AR model 52 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606] 53 | # create some data based on these AR parameters 54 | y = scipy.signal.lfilter([1], a, randn(1, 1024)) 55 | # if we know only the data, we estimate the PSD using Periodogram 56 | p = Periodogram(y[0], sampling=2) # y is a list of list hence the y[0] 57 | p.plot(label='Model ouput') 58 | 59 | # now, let us try to estimate the original AR parameters 60 | AR, P, k = aryule(y[0], 4) 61 | PSD = arma2psd(AR, NFFT=512) 62 | PSD = PSD[len(PSD):len(PSD)//2:-1] 63 | plot(linspace(0, 1, len(PSD)), 10*log10(abs(PSD)*2./(2.*pi)), 64 | label='Estimate of y using Yule-Walker AR(4)') 65 | xlabel(r'Normalized frequency (\times \pi rad/sample)') 66 | ylabel('One-sided PSD (dB/rad/sample)') 67 | legend() 68 | 69 | 70 | This example uses the functional approach. Again, it is recommended to use the object approach with an instance from :class:`~spectrum.yulewalker.pyule` (see quickstart section). The previous example would become even simpler: 71 | 72 | .. plot:: 73 | :width: 80% 74 | :include-source: 75 | 76 | from pylab import legend, randn 77 | import scipy.signal 78 | from spectrum import Periodogram, pyule 79 | a = [1, -2.2137, 2.9403, -2.1697, 0.9606] 80 | y = scipy.signal.lfilter([1], a, randn(1, 1024)) 81 | p = Periodogram(y[0], sampling=2) 82 | p.plot() 83 | p = pyule(y[0], 4, sampling=2, scale_by_freq=False) 84 | p.plot() 85 | legend(['PSD of model output','PSD estimate of x using Yule-Walker AR(4)']) 86 | -------------------------------------------------------------------------------- /doc/tutorials.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | =========== 3 | 4 | **Spectrum** contains PSD estimates classes and methods but also many other functionalities that are useful in spectral estimation such as linear algebra tools (e.g., Levinson recursion), tapering windows, linear prediction and so on. This section provides several tutorials related to spectral estimation. They may require some expertise to fully understand them... 5 | 6 | 7 | 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | tutorial_yulewalker.rst 13 | tutorial_pburg.rst 14 | tutorial_variance_estimate.rst 15 | tutorial_window.rst 16 | tutorial_front_image.rst 17 | tutorial_psd.rst 18 | tutorial_criteria.rst 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: spectrum_env 2 | 3 | channels: 4 | - bioconda 5 | - defaults 6 | - r 7 | - conda-forge 8 | 9 | dependencies: 10 | - numpy 11 | - pandas 12 | - matplotlib 13 | - scipy 14 | - easydev 15 | - numpydoc 16 | - sphinx 17 | - sphinx-gallery 18 | #- pip: 19 | # - easydev>=0.9.21 20 | -------------------------------------------------------------------------------- /examples/README.txt: -------------------------------------------------------------------------------- 1 | Gallery 2 | ========== 3 | 4 | General-purpose examples for spectrum library. 5 | -------------------------------------------------------------------------------- /examples/plot_allpsd.py: -------------------------------------------------------------------------------- 1 | """ 2 | Spectral analysis of a two frequencies signal 3 | ================================================== 4 | 5 | 6 | """ 7 | ########################################################### 8 | # Context 9 | # ---------- 10 | 11 | ############################################## 12 | # Example 13 | # -------- 14 | # 15 | # In the following example, we use most of the methods available to 16 | # analyse an input signal made of the addition of two sinus and an 17 | # additive gaussian noise 18 | import numpy 19 | import spectrum 20 | from spectrum import tools 21 | from numpy.testing import assert_array_almost_equal 22 | import pylab 23 | data = spectrum.marple_data 24 | 25 | from pylab import * 26 | nn = numpy.arange(200) 27 | xx = cos(0.257*pi*nn) + sin(0.2*pi*nn) + 0.01*randn(size(nn)); 28 | 29 | def create_all_psd(): 30 | 31 | f = pylab.linspace(0, 1, 4096) 32 | 33 | pylab.figure(figsize=(12,8)) 34 | 35 | # MA model 36 | p = spectrum.pma(xx, 64,128); p(); p.plot() 37 | """ 38 | #ARMA 15 order 39 | a, b, rho = spectrum.arma_estimate(data, 15,15, 30) 40 | psd = spectrum.arma2psd(A=a,B=b, rho=rho) 41 | newpsd = tools.cshift(psd, len(psd)//2) # switch positive and negative freq 42 | pylab.plot(f, 10 * pylab.log10(newpsd/max(newpsd)), label='ARMA 15,15') 43 | """ 44 | # YULE WALKER 45 | p = spectrum.pyule(xx, 7 , NFFT=4096, scale_by_freq=False); p.plot() 46 | # equivalent to 47 | # plot([x for x in p.frequencies()] , 10*log10(p.psd)); grid(True) 48 | 49 | #burg method 50 | p = spectrum.pburg(xx, 7, scale_by_freq=False); p.plot() 51 | 52 | #pcovar 53 | p = spectrum.pcovar(xx, 7, scale_by_freq=False); p.plot() 54 | 55 | #pmodcovar 56 | p = spectrum.pmodcovar(xx, 7, scale_by_freq=False); p.plot() 57 | 58 | # correlogram 59 | p = spectrum.pcorrelogram(xx, lag=60, NFFT=512, scale_by_freq=False); p.plot() 60 | 61 | # minvar 62 | p = spectrum.pminvar(xx, 7, NFFT=256, scale_by_freq=False); p.plot() 63 | 64 | # pmusic 65 | p = spectrum.pmusic(xx, 10,4, scale_by_freq=False); p.plot() 66 | 67 | # pmusic 68 | p = spectrum.pev(xx, 10, 4, scale_by_freq=False); p.plot() 69 | 70 | # periodogram 71 | p = spectrum.Periodogram(xx, scale_by_freq=False); p.plot() 72 | 73 | # 74 | legend( ["MA 32", "pyule 7", "pburg 7", "pcovar", "pmodcovar", "correlogram", 75 | "minvar", "pmusic", "pev", "periodgram"]) 76 | 77 | 78 | pylab.ylim([-80,80]) 79 | 80 | create_all_psd() 81 | -------------------------------------------------------------------------------- /examples/plot_spectrogram.py: -------------------------------------------------------------------------------- 1 | """ 2 | spectrogram 3 | ======================= 4 | 5 | 6 | """ 7 | ########################################################### 8 | # Context 9 | # ---------- 10 | 11 | ############################################## 12 | # Example 13 | # -------- 14 | # 15 | # In the following example, we compute and plot a spectrogram 16 | # from a signal emitted by a dolphin to see the time-frequency components 17 | # 18 | from spectrum import Spectrogram, dolphin_filename, readwav 19 | data, samplerate = readwav(dolphin_filename) 20 | 21 | p = Spectrogram(data, ws=128, W=4096, sampling=samplerate) 22 | p.periodogram() 23 | p.plot() 24 | 25 | -------------------------------------------------------------------------------- /examples/plot_yule.py: -------------------------------------------------------------------------------- 1 | """ 2 | pyule 3 | ======================= 4 | 5 | 6 | """ 7 | ########################################################### 8 | # Context 9 | # ---------- 10 | 11 | ############################################## 12 | # Example 13 | # -------- 14 | # 15 | # In the following example, we use pyule to analyse 1000 noisy signals. 16 | # We plot them in gray and the average in red. 17 | # 18 | from spectrum import pyule 19 | from scipy.signal import lfilter 20 | from pylab import plot, mean, log10, randn 21 | from numpy import array 22 | allpsds = [] 23 | a = array([1,-2.2137,2.9403,-2.1697,0.9606]) 24 | 25 | for this in range(1000): 26 | x = lfilter([1], a, randn(1,256)) 27 | p = pyule(x[0],4, scale_by_freq=False, NFFT=256) 28 | p.plot(color='gray', alpha=0.1) 29 | allpsds.append(p.psd) 30 | 31 | plot(p.frequencies(), 10*log10(mean(allpsds, axis=0)), "red") 32 | 33 | 34 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | easydev 2 | numpy 3 | scipy 4 | matplotlib 5 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | 2 | [upload_docs] 3 | upload_dir=doc/build/html/ 4 | 5 | 6 | [upload_sphinx] 7 | upload-dir = doc/build/html 8 | 9 | 10 | 11 | 12 | #[tool:pytest] 13 | # do not use --cov because it interfers with travis command 14 | #addopts= --durations=10 --verbose --cov spectrum --cov-report term-missing 15 | 16 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import sys 4 | 5 | pj = os.path.join 6 | 7 | from distutils.core import Extension 8 | 9 | from setuptools import find_packages, setup 10 | 11 | _MAJOR = 0 12 | _MINOR = 9 13 | _MICRO = 0 14 | version = "%d.%d.%d" % (_MAJOR, _MINOR, _MICRO) 15 | release = "%d.%d" % (_MAJOR, _MINOR) 16 | 17 | 18 | with open("README.rst") as f: 19 | readme = f.read() 20 | 21 | 22 | setup( 23 | name="spectrum", 24 | version=version, 25 | description="Spectrum Analysis Tools", 26 | long_description=readme, 27 | author="Thomas Cokelaer", 28 | author_email="cokelaer@gmail.com", 29 | url="http://github.com/cokelaer/spectrum", 30 | license="new BSD", 31 | ext_modules=[ 32 | Extension( 33 | "spectrum.mydpss", 34 | [ 35 | "src/cpp/mydpss.c", 36 | ], 37 | export_symbols=["multitap"], 38 | ) 39 | ], 40 | packages=find_packages("src"), 41 | package_dir={"": "src"}, 42 | # Dependencies 43 | install_requires=open("requirements.txt").read(), 44 | extras_require={ 45 | "plot": ["matplotlib"], 46 | "testing": [ 47 | "pytest", 48 | "pytest-cov", 49 | "pytest-xdist", 50 | "pytest-mock", 51 | "pytest-timeout", 52 | "pytest-runner", 53 | "coveralls", 54 | ], 55 | "doc": ["sphinx", "sphinx_rtd_theme"], 56 | }, 57 | package_data={ 58 | "spectrum.data": ["*"], 59 | }, 60 | platforms=["Linux"], 61 | classifiers=[ 62 | "Development Status :: 1 - Planning", 63 | "Environment :: Console", 64 | "Intended Audience :: Developers", 65 | "Intended Audience :: Education", 66 | "Intended Audience :: Financial and Insurance Industry", 67 | "Intended Audience :: Information Technology", 68 | "Intended Audience :: Science/Research", 69 | "Intended Audience :: Telecommunications Industry", 70 | "License :: OSI Approved :: BSD License", 71 | "Operating System :: POSIX :: Linux", 72 | "Operating System :: Unix", 73 | "Programming Language :: Python :: 2.7", 74 | "Programming Language :: Python :: 3.5", 75 | "Programming Language :: Python :: 3.7", 76 | "Topic :: Scientific/Engineering", 77 | ], 78 | ) 79 | -------------------------------------------------------------------------------- /src/cpp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/src/cpp/__init__.py -------------------------------------------------------------------------------- /src/spectrum/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib import metadata 2 | 3 | 4 | def get_package_version(package_name): 5 | try: 6 | version = metadata.version(package_name) 7 | return version 8 | except metadata.PackageNotFoundError: # pragma no cover 9 | return f"{package_name} not found" 10 | 11 | 12 | version = get_package_version("versionix") 13 | 14 | import logging 15 | 16 | 17 | def spectrum_set_level(level): 18 | assert level in ["DEBUG", "INFO", "CRITICAL", "ERROR", "WARNING"] 19 | logging.getLogger().setLevel(level) 20 | 21 | 22 | #: default number of samples used to compute FFT 23 | default_NFFT = 4096 24 | 25 | from .arma import * 26 | from .burg import * 27 | from .cholesky import * 28 | from .correlation import * 29 | from .correlog import * 30 | from .covar import * 31 | from .criteria import * 32 | from .datasets import * 33 | from .eigen import * 34 | from .eigenfre import * 35 | from .io import * 36 | from .levinson import * 37 | from .linalg import * 38 | from .linear_prediction import * 39 | 40 | # from lms import * 41 | from .lpc import * 42 | from .minvar import * 43 | from .modcovar import * 44 | from .mtm import * 45 | from .periodogram import * 46 | from .psd import * 47 | from .spectrogram import * 48 | from .toeplitz import * 49 | from .tools import * 50 | from .transfer import * 51 | from .waveform import * 52 | from .window import * 53 | from .yulewalker import * 54 | -------------------------------------------------------------------------------- /src/spectrum/cholesky.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. topic:: Cholesky methods 3 | 4 | .. autosummary:: 5 | 6 | CHOLESKY 7 | 8 | .. codeauthor:: Thomas Cokelaer, 2011 9 | """ 10 | import numpy 11 | 12 | 13 | __all__ = ["CHOLESKY"] 14 | 15 | 16 | def _numpy_cholesky(A, B): 17 | """Solve Ax=B using numpy cholesky solver 18 | 19 | A = LU 20 | 21 | in the case where A is square and Hermitian, A = L.L* where L* is 22 | transpoed and conjugate matrix 23 | 24 | Ly = b 25 | 26 | where 27 | 28 | Ux=y 29 | 30 | so x = U^{-1} y 31 | where U = L* 32 | and y = L^{-1} B 33 | """ 34 | L = numpy.linalg.cholesky(A) 35 | # A=L*numpy.transpose(L).conjugate() 36 | # Ly = b 37 | y = numpy.linalg.solve(L,B) 38 | # Ux = y 39 | x = numpy.linalg.solve(L.transpose().conjugate(),y) 40 | return x, L 41 | 42 | def _numpy_solver(A, B): 43 | """This function solve Ax=B directly without taking care of the input 44 | matrix properties. 45 | """ 46 | x = numpy.linalg.solve(A, B) 47 | return x 48 | 49 | def CHOLESKY(A, B, method='scipy'): 50 | """Solve linear system `AX=B` using CHOLESKY method. 51 | 52 | :param A: an input Hermitian matrix 53 | :param B: an array 54 | :param str method: a choice of method in [numpy, scipy, numpy_solver] 55 | 56 | * `numpy_solver` relies entirely on numpy.solver (no cholesky decomposition) 57 | * `numpy` relies on the numpy.linalg.cholesky for the decomposition and 58 | numpy.linalg.solve for the inversion. 59 | * `scipy` uses scipy.linalg.cholesky for the decomposition and 60 | scipy.linalg.cho_solve for the inversion. 61 | 62 | .. rubric:: Description 63 | 64 | When a matrix is square and Hermitian (symmetric with lower part being 65 | the complex conjugate of the upper one), then the usual triangular 66 | factorization takes on the special form: 67 | 68 | .. math:: A = R R^H 69 | 70 | where :math:`R` is a lower triangular matrix with nonzero real principal 71 | diagonal element. The input matrix can be made of complex data. Then, the 72 | inversion to find :math:`x` is made as follows: 73 | 74 | .. math:: Ry = B 75 | 76 | and 77 | 78 | .. math:: Rx = y 79 | 80 | .. doctest:: 81 | 82 | >>> import numpy 83 | >>> from spectrum import CHOLESKY 84 | >>> A = numpy.array([[ 2.0+0.j , 0.5-0.5j, -0.2+0.1j], 85 | ... [ 0.5+0.5j, 1.0+0.j , 0.3-0.2j], 86 | ... [-0.2-0.1j, 0.3+0.2j, 0.5+0.j ]]) 87 | >>> B = numpy.array([ 1.0+3.j , 2.0-1.j , 0.5+0.8j]) 88 | >>> CHOLESKY(A, B) 89 | array([ 0.95945946+5.25675676j, 4.41891892-7.04054054j, 90 | -5.13513514+6.35135135j]) 91 | 92 | """ 93 | if method == 'numpy_solver': 94 | X = _numpy_solver(A,B) 95 | return X 96 | elif method == 'numpy': 97 | X, _L = _numpy_cholesky(A, B) 98 | return X 99 | elif method == 'scipy': 100 | import scipy.linalg 101 | L = scipy.linalg.cholesky(A) 102 | X = scipy.linalg.cho_solve((L, False), B) 103 | else: 104 | raise ValueError('method must be numpy_solver, numpy_cholesky or cholesky_inplace') 105 | return X 106 | -------------------------------------------------------------------------------- /src/spectrum/data/DOLPHINS.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/src/spectrum/data/DOLPHINS.wav -------------------------------------------------------------------------------- /src/spectrum/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/src/spectrum/data/__init__.py -------------------------------------------------------------------------------- /src/spectrum/data/sunspot.dat: -------------------------------------------------------------------------------- 1 | 1700 5 2 | 1701 11 3 | 1702 16 4 | 1703 23 5 | 1704 36 6 | 1705 58 7 | 1706 29 8 | 1707 20 9 | 1708 10 10 | 1709 8 11 | 1710 3 12 | 1711 0 13 | 1712 0 14 | 1713 2 15 | 1714 11 16 | 1715 27 17 | 1716 47 18 | 1717 63 19 | 1718 60 20 | 1719 39 21 | 1720 28 22 | 1721 26 23 | 1722 22 24 | 1723 11 25 | 1724 21 26 | 1725 40 27 | 1726 78 28 | 1727 122 29 | 1728 103 30 | 1729 73 31 | 1730 47 32 | 1731 35 33 | 1732 11 34 | 1733 5 35 | 1734 16 36 | 1735 34 37 | 1736 70 38 | 1737 81 39 | 1738 111 40 | 1739 101 41 | 1740 73 42 | 1741 40 43 | 1742 20 44 | 1743 16 45 | 1744 5 46 | 1745 11 47 | 1746 22 48 | 1747 40 49 | 1748 60 50 | 1749 80.9 51 | 1750 83.4 52 | 1751 47.7 53 | 1752 47.8 54 | 1753 30.7 55 | 1754 12.2 56 | 1755 9.6 57 | 1756 10.2 58 | 1757 32.4 59 | 1758 47.6 60 | 1759 54.0 61 | 1760 62.9 62 | 1761 85.9 63 | 1762 61.2 64 | 1763 45.1 65 | 1764 36.4 66 | 1765 20.9 67 | 1766 11.4 68 | 1767 37.8 69 | 1768 69.8 70 | 1769 106.1 71 | 1770 100.8 72 | 1771 81.6 73 | 1772 66.5 74 | 1773 34.8 75 | 1774 30.6 76 | 1775 7.0 77 | 1776 19.8 78 | 1777 92.5 79 | 1778 154.4 80 | 1779 125.9 81 | 1780 84.8 82 | 1781 68.1 83 | 1782 38.5 84 | 1783 22.8 85 | 1784 10.2 86 | 1785 24.1 87 | 1786 82.9 88 | 1787 132.0 89 | 1788 130.9 90 | 1789 118.1 91 | 1790 89.9 92 | 1791 66.6 93 | 1792 60.0 94 | 1793 46.9 95 | 1794 41.0 96 | 1795 21.3 97 | 1796 16.0 98 | 1797 6.4 99 | 1798 4.1 100 | 1799 6.8 101 | 1800 14.5 102 | 1801 34.0 103 | 1802 45.0 104 | 1803 43.1 105 | 1804 47.5 106 | 1805 42.2 107 | 1806 28.1 108 | 1807 10.1 109 | 1808 8.1 110 | 1809 2.5 111 | 1810 0.0 112 | 1811 1.4 113 | 1812 5.0 114 | 1813 12.2 115 | 1814 13.9 116 | 1815 35.4 117 | 1816 45.8 118 | 1817 41.1 119 | 1818 30.1 120 | 1819 23.9 121 | 1820 15.6 122 | 1821 6.6 123 | 1822 4.0 124 | 1823 1.8 125 | 1824 8.5 126 | 1825 16.6 127 | 1826 36.3 128 | 1827 49.6 129 | 1828 64.2 130 | 1829 67.0 131 | 1830 70.9 132 | 1831 47.8 133 | 1832 27.5 134 | 1833 8.5 135 | 1834 13.2 136 | 1835 56.9 137 | 1836 121.5 138 | 1837 138.3 139 | 1838 103.2 140 | 1839 85.7 141 | 1840 64.6 142 | 1841 36.7 143 | 1842 24.2 144 | 1843 10.7 145 | 1844 15.0 146 | 1845 40.1 147 | 1846 61.5 148 | 1847 98.5 149 | 1848 124.7 150 | 1849 96.3 151 | 1850 66.6 152 | 1851 64.5 153 | 1852 54.1 154 | 1853 39.0 155 | 1854 20.6 156 | 1855 6.7 157 | 1856 4.3 158 | 1857 22.7 159 | 1858 54.8 160 | 1859 93.8 161 | 1860 95.8 162 | 1861 77.2 163 | 1862 59.1 164 | 1863 44.0 165 | 1864 47.0 166 | 1865 30.5 167 | 1866 16.3 168 | 1867 7.3 169 | 1868 37.6 170 | 1869 74.0 171 | 1870 139.0 172 | 1871 111.2 173 | 1872 101.6 174 | 1873 66.2 175 | 1874 44.7 176 | 1875 17.0 177 | 1876 11.3 178 | 1877 12.4 179 | 1878 3.4 180 | 1879 6.0 181 | 1880 32.3 182 | 1881 54.3 183 | 1882 59.7 184 | 1883 63.7 185 | 1884 63.5 186 | 1885 52.2 187 | 1886 25.4 188 | 1887 13.1 189 | 1888 6.8 190 | 1889 6.3 191 | 1890 7.1 192 | 1891 35.6 193 | 1892 73.0 194 | 1893 85.1 195 | 1894 78.0 196 | 1895 64.0 197 | 1896 41.8 198 | 1897 26.2 199 | 1898 26.7 200 | 1899 12.1 201 | 1900 9.5 202 | 1901 2.7 203 | 1902 5.0 204 | 1903 24.4 205 | 1904 42.0 206 | 1905 63.5 207 | 1906 53.8 208 | 1907 62.0 209 | 1908 48.5 210 | 1909 43.9 211 | 1910 18.6 212 | 1911 5.7 213 | 1912 3.6 214 | 1913 1.4 215 | 1914 9.6 216 | 1915 47.4 217 | 1916 57.1 218 | 1917 103.9 219 | 1918 80.6 220 | 1919 63.6 221 | 1920 37.6 222 | 1921 26.1 223 | 1922 14.2 224 | 1923 5.8 225 | 1924 16.7 226 | 1925 44.3 227 | 1926 63.9 228 | 1927 69.0 229 | 1928 77.8 230 | 1929 64.9 231 | 1930 35.7 232 | 1931 21.2 233 | 1932 11.1 234 | 1933 5.7 235 | 1934 8.7 236 | 1935 36.1 237 | 1936 79.7 238 | 1937 114.4 239 | 1938 109.6 240 | 1939 88.8 241 | 1940 67.8 242 | 1941 47.5 243 | 1942 30.6 244 | 1943 16.3 245 | 1944 9.6 246 | 1945 33.2 247 | 1946 92.6 248 | 1947 151.6 249 | 1948 136.3 250 | 1949 134.7 251 | 1950 83.9 252 | 1951 69.4 253 | 1952 31.5 254 | 1953 13.9 255 | 1954 4.4 256 | 1955 38.0 257 | 1956 141.7 258 | 1957 190.2 259 | 1958 184.8 260 | 1959 159.0 261 | 1960 112.3 262 | 1961 53.9 263 | 1962 37.6 264 | 1963 27.9 265 | 1964 10.2 266 | 1965 15.1 267 | 1966 47.0 268 | 1967 93.8 269 | 1968 105.9 270 | 1969 105.5 271 | 1970 104.5 272 | 1971 66.6 273 | 1972 68.9 274 | 1973 38.0 275 | 1974 34.5 276 | 1975 15.5 277 | 1976 12.6 278 | 1977 27.5 279 | 1978 92.5 280 | 1979 155.4 281 | 1980 154.6 282 | 1981 140.4 283 | 1982 115.9 284 | 1983 66.6 285 | 1984 45.9 286 | 1985 17.9 287 | 1986 13.4 288 | 1987 29.4 289 | 1988 100.2 290 | 1989 157.6 291 | 1990 142.6 292 | 1991 145.7 293 | 1992 94.3 294 | 1993 54.6 295 | 1994 29.9 296 | 1995 17.5 297 | 1996 8.6 298 | 1997 21.5 299 | 1998 64.3 300 | 1999 93.3 301 | 2000 119.6 302 | 2001 111.0 303 | 2002 104.0 304 | 2003 63.7 305 | 2004 40.4 306 | -------------------------------------------------------------------------------- /src/spectrum/datasets.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. topic:: The :mod:`datasets` module provides data sets to test the 3 | **Spectrum** functionalities. 4 | 5 | .. autosummary:: 6 | 7 | data_cosine 8 | marple_data 9 | TimeSeries 10 | 11 | .. codeauthor:: Thomas Cokelaer 2011 12 | 13 | :Reference: [Marple]_ 14 | """ 15 | import numpy as np 16 | from numpy import arange, cos, pi, sin 17 | from numpy.random import randn 18 | 19 | # example marple app 3.A 20 | A_cholesky = np.array( 21 | [[2 + 0.0j, 0.5 - 0.5j, -0.2 + 0.1j], [0.5 + 0.5j, 1, 0.3 - 0.2j], [-0.2 - 0.1j, 0.3 + 0.2j, 0.5]], dtype=complex 22 | ) 23 | a_cholesky = np.array([2 + 0.0j, 0.5 - 0.5j, 1.0, -0.2 + 0.1j, 0.3 - 0.2j, 0.5], dtype=complex) 24 | B_cholesky = np.array([1 + 3j, 2 - 1j, 0.5 + 0.8j], dtype=complex) 25 | # should return 26 | sol_cholesky = np.array([0.95945946 + 5.25675676j, 4.41891892 - 7.04054054j, -5.13513514 + 6.35135135j]) 27 | 28 | 29 | #: 64-complex data length from Marple reference [Marple]_ 30 | marple_data = [ 31 | 1.349839091 + 2.011167288j, 32 | -2.117270231 + 0.817693591j, 33 | -1.786421657 - 1.291698933j, 34 | 1.162236333 - 1.482598066j, 35 | 1.641072035 + 0.372950256j, 36 | 0.072213709 + 1.828492761j, 37 | -1.564284801 + 0.824533045j, 38 | -1.080565453 - 1.869776845j, 39 | 0.927129090 - 1.743406534j, 40 | 1.891979456 + 0.972347319j, 41 | -0.105391249 + 1.602209687j, 42 | -1.618367076 + 0.637513280j, 43 | -0.945704579 - 1.079569221j, 44 | 1.135566235 - 1.692269921j, 45 | 1.855816245 + 0.986030221j, 46 | -1.032083511 + 1.414613724j, 47 | -1.571600199 + 0.089229003j, 48 | -0.243143231 - 1.444692016j, 49 | 0.838980973 - 0.985756695j, 50 | 1.516003132 + 0.928058863j, 51 | 0.257979959 + 1.170676708j, 52 | -2.057927608 + 0.343388647j, 53 | -0.578682184 - 1.441192508j, 54 | 1.584011555 - 1.011150956j, 55 | 0.614114344 + 1.508176208j, 56 | -0.710567117 + 1.130144477j, 57 | -1.100205779 - 0.584209621j, 58 | 0.150702029 - 1.217450142j, 59 | 0.748856127 - 0.804411888j, 60 | 0.795235813 + 1.114466429j, 61 | -0.071512341 + 1.017092347j, 62 | -1.732939839 - 0.283070654j, 63 | 0.404945314 - 0.781708360j, 64 | 1.293794155 - 0.352723092j, 65 | -0.119905084 + 0.905150294j, 66 | -0.522588372 + 0.437393665j, 67 | -0.974838495 - 0.670074046j, 68 | 0.275279552 - 0.509659231j, 69 | 0.854210198 - 0.008278057j, 70 | 0.289598197 + 0.506233990j, 71 | -0.283553183 + 0.250371397j, 72 | -0.359602571 - 0.135261074j, 73 | 0.102775671 - 0.466086507j, 74 | -0.009722650 + 0.030377999j, 75 | 0.185930878 + 0.808869600j, 76 | -0.243692726 - 0.200126961j, 77 | -0.270986766 - 0.460243553j, 78 | 0.399368525 + 0.249096692j, 79 | -0.250714004 - 0.362990230j, 80 | 0.419116348 - 0.389185309j, 81 | -0.050458215 + 0.702862442j, 82 | -0.395043731 + 0.140808776j, 83 | 0.746575892 - 0.126762003j, 84 | -0.559076190 + 0.523169816j, 85 | -0.344389260 - 0.913451135j, 86 | 0.733228028 - 0.006237417j, 87 | -0.480273813 + 0.509469569j, 88 | 0.033316225 + 0.087501869j, 89 | -0.321229130 - 0.254548967j, 90 | -0.063007891 - 0.499800682j, 91 | 1.239739418 - 0.013479125j, 92 | 0.083303742 + 0.673984587j, 93 | -0.762731433 + 0.408971250j, 94 | -0.895898521 - 0.364855707j, 95 | ] 96 | 97 | 98 | def data_cosine(N=1024, A=0.1, sampling=1024.0, freq=200): 99 | r"""Return a noisy cosine at a given frequency. 100 | 101 | :param N: the final data size 102 | :param A: the strength of the noise 103 | :param float sampling: sampling frequency of the input :attr:`data`. 104 | :param float freq: the frequency :math:`f_0` of the cosine. 105 | 106 | .. math:: x[t] = cos(2\pi t * f_0) + A w[t] 107 | 108 | where w[t] is a white noise of variance 1. 109 | 110 | .. doctest:: 111 | 112 | >>> from spectrum import data_cosine 113 | >>> a = data_cosine(N=1024, sampling=1024, A=0.5, freq=100) 114 | 115 | """ 116 | t = arange(0, float(N) / sampling, 1.0 / sampling) 117 | x = cos(2.0 * pi * t * freq) + A * randn(t.size) 118 | return x 119 | 120 | 121 | def data_two_freqs(N=200): 122 | """A simple test example with two close frequencies""" 123 | nn = arange(N) 124 | xx = cos(0.257 * pi * nn) + sin(0.2 * pi * nn) + 0.01 * randn(nn.size) 125 | return xx 126 | 127 | 128 | def spectrum_data(filename): 129 | """Simple utilities to retrieve data sets from""" 130 | import inspect 131 | from pathlib import Path 132 | 133 | import spectrum 134 | 135 | spectrum_path = Path(inspect.getfile(spectrum)).parent 136 | 137 | filename = spectrum_path / "data" / filename 138 | if filename.exists(): 139 | return str(filename) 140 | else: 141 | raise Exception("unknown file %s" % filename) 142 | 143 | 144 | #: filename of a WAV data file 150,000 data points 145 | dolphin_filename = spectrum_data("DOLPHINS.wav") 146 | 147 | 148 | class TimeSeries(object): 149 | """A simple Base Class for various data sets. 150 | 151 | .. doctest:: 152 | 153 | >>> from spectrum import TimeSeries 154 | >>> data = [1, 2, 3, 4, 3, 2, 1, 0 ] 155 | >>> ts = TimeSeries(data, sampling=1) 156 | >>> ts.plot() 157 | >>> ts.dt 158 | 1.0 159 | 160 | """ 161 | 162 | def __init__(self, data, sampling=1): 163 | """ 164 | 165 | :param array data: input data (list or numpy.array) 166 | :param sampling: the sampling frequency of the data (default 1Hz) 167 | 168 | """ 169 | self.data = data 170 | self.N = len(data) 171 | self.sampling = sampling 172 | self.dt = 1.0 / sampling 173 | 174 | def plot(self, **kargs): 175 | """Plot the data set, using the sampling information to set the x-axis 176 | correctly.""" 177 | from pylab import grid, linspace, plot, xlabel, ylabel 178 | 179 | time = linspace(1 * self.dt, self.N * self.dt, self.N) 180 | plot(time, self.data, **kargs) 181 | xlabel("Time") 182 | ylabel("Amplitude") 183 | grid(True) 184 | -------------------------------------------------------------------------------- /src/spectrum/eigen.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from . import toeplitz 3 | 4 | __all__ = ["MINEIGVAL"] 5 | 6 | 7 | def MINEIGVAL(T0, T, TOL): 8 | """Finds the minimum eigenvalue of a Hermitian Toeplitz matrix 9 | 10 | The classical power method is used together with a fast Toeplitz 11 | equation solution routine. The eigenvector is normalized to unit length. 12 | 13 | :param T0: Scalar corresponding to real matrix element t(0) 14 | :param T: Array of M complex matrix elements t(1),...,t(M) C from the left column of the Toeplitz matrix 15 | :param TOL: Real scalar tolerance; routine exits when [ EVAL(k) - EVAL(k-1) ]/EVAL(k-1) < TOL , where the index k denotes the iteration number. 16 | 17 | :return: 18 | * EVAL - Real scalar denoting the minimum eigenvalue of matrix 19 | * EVEC - Array of M complex eigenvector elements associated 20 | 21 | 22 | .. note:: 23 | * External array T must be dimensioned >= M 24 | * array EVEC must be >= M+1 25 | * Internal array E must be dimensioned >= M+1 . 26 | 27 | * **dependencies** 28 | * :meth:`spectrum.toeplitz.HERMTOEP` 29 | """ 30 | M = len(T) 31 | eigval = 10 32 | eigvalold = 1 33 | eigvec = numpy.zeros(M+1, dtype=complex) 34 | for k in range(0,M+1): 35 | eigvec[k] = 1+0j 36 | it=0 37 | #print 'initialisation',T0, T, eigval, eigvec 38 | maxit = 15 39 | while abs(eigvalold-eigval)>TOL*eigvalold and it m-1: 64 | #disp('Warning: zero-padding short input sequence') 65 | x.resize(N+1) 66 | #todo: check this zero-padding. 67 | 68 | X = fft(x, 2**nextpow2(2.*len(x)-1)) 69 | R = real(ifft(abs(X)**2)) 70 | R = R/(m-1.) #Biased autocorrelation estimate 71 | a, e, ref = LEVINSON(R, N) 72 | return a, e 73 | -------------------------------------------------------------------------------- /src/spectrum/minvar.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. topic:: Minimum Variance Spectral Estimators 3 | 4 | 5 | .. autosummary:: 6 | 7 | :mod:`spectrum.minvar` 8 | pminvar 9 | 10 | .. codeauthor:: Thomas Cokelaer, 2011 11 | """ 12 | import numpy as np 13 | from numpy.fft import fft 14 | 15 | from spectrum.burg import arburg 16 | from spectrum.psd import ParametricSpectrum 17 | from spectrum import default_NFFT 18 | from spectrum import errors 19 | 20 | __all__ = ["minvar", "pminvar"] 21 | 22 | 23 | def minvar(X, order, sampling=1., NFFT=default_NFFT): 24 | r"""Minimum Variance Spectral Estimation (MV) 25 | 26 | This function computes the minimum variance spectral estimate using 27 | the Musicus procedure. The Burg algorithm from :func:`~spectrum.burg.arburg` 28 | is used for the estimation of the autoregressive parameters. 29 | The MV spectral estimator is given by: 30 | 31 | .. math:: P_{MV}(f) = \frac{T}{e^H(f) R^{-1}_p e(f)} 32 | 33 | 34 | where :math:`R^{-1}_p` is the inverse of the estimated autocorrelation 35 | matrix (Toeplitz) and :math:`e(f)` is the complex sinusoid vector. 36 | 37 | :param X: Array of complex or real data samples (length N) 38 | :param int order: Dimension of correlation matrix (AR order = order - 1 ) 39 | :param float T: Sample interval (PSD scaling) 40 | :param int NFFT: length of the final PSD 41 | 42 | :return: 43 | * PSD - Power spectral density values (two-sided) 44 | * AR - AR coefficients (Burg algorithm) 45 | * k - Reflection coefficients (Burg algorithm) 46 | 47 | .. note:: The MV spectral estimator is not a true PSD function because the 48 | area under the MV estimate does not represent the total power in the 49 | measured process. MV minimises the variance of the output of a narrowband 50 | filter and adpats itself to the spectral content of the input data 51 | at each frequency. 52 | 53 | :Example: The following example computes a PSD estimate using :func:`minvar` 54 | The output PSD is transformed to a ``centerdc`` PSD and plotted. 55 | 56 | .. plot:: 57 | :width: 80% 58 | :include-source: 59 | 60 | from spectrum import * 61 | from pylab import plot, log10, linspace, xlim 62 | psd, A, k = minvar(marple_data, 15) 63 | psd = twosided_2_centerdc(psd) # switch positive and negative freq 64 | f = linspace(-0.5, 0.5, len(psd)) 65 | plot(f, 10 * log10(psd/max(psd))) 66 | xlim(-0.5, 0.5 ) 67 | 68 | .. seealso:: 69 | 70 | * External functions used are :meth:`~spectrum.burg.arburg` 71 | and numpy.fft.fft 72 | * :class:`pminvar`, a Class dedicated to MV method. 73 | 74 | :Reference: [Marple]_ 75 | 76 | """ 77 | errors.is_positive_integer(order) 78 | errors.is_positive_integer(NFFT) 79 | 80 | psi = np.zeros(NFFT, dtype=complex) 81 | 82 | # First, we need to compute the AR values (note that order-1) 83 | A, P, k = arburg (X, order - 1) 84 | 85 | # add the order 0 86 | A = np.insert(A, 0, 1.+0j) 87 | 88 | # We cannot compare the output with those of MARPLE in a precise way. 89 | # Indeed the burg algorithm is only single precision in fortram code 90 | # So, the AR values are slightly differnt. 91 | # The followign values are those from Marple 92 | """A[1] = 2.62284255-0.701703191j 93 | A[2] = 4.97930574-2.32781982j 94 | A[3] = 6.78445101-5.02477741j 95 | A[4] =7.85207081-8.01284409j 96 | A[5] =7.39412165-10.7684202j 97 | A[6] =6.03175116-12.7067814j 98 | A[7] =3.80106878-13.6808891j 99 | A[8] =1.48207295-13.2265558j 100 | A[9] =-0.644280195-11.4574194j 101 | A[10] =-2.02386642-8.53268814j 102 | A[11] =-2.32437634-5.25636244j 103 | A[12] =-1.75356281-2.46820402j 104 | A[13] =-0.888899028-0.781434655j 105 | A[14] =-0.287197977-0.0918145925j 106 | P = 0.00636525545 107 | """ 108 | 109 | # if we use exactly the same AR coeff and P from Marple Burg output, then 110 | # we can compare the following code. This has been done and reveals that 111 | # the FFT in marple is also slightly different (precision) from this one. 112 | # However, the results are sufficiently close (when NFFT is small) that 113 | # we are confident the following code is correct. 114 | 115 | # Compute the psi coefficients 116 | for K in range(0, order): 117 | SUM = 0. 118 | MK = order-K 119 | 120 | # Correlate the autoregressive parameters 121 | for I in range(0, order - K): 122 | SUM = SUM + float(MK-2*I) * A[I].conjugate()*A[I+K] # Eq. (12.25) 123 | 124 | SUM = SUM/P 125 | if K != 0: 126 | psi[NFFT-K] = SUM.conjugate() 127 | psi[K] = SUM 128 | 129 | # Compute FFT of denominator 130 | psi = fft(psi, NFFT) 131 | 132 | # Invert the psi terms at this point to get PSD values 133 | PSD = sampling / np.real(psi) 134 | 135 | return PSD, A, k 136 | 137 | 138 | class pminvar(ParametricSpectrum): 139 | """Class to create PSD based on the Minimum variance spectral estimation 140 | 141 | See :func:`minvar` for description. 142 | 143 | .. plot:: 144 | :width: 80% 145 | :include-source: 146 | 147 | from spectrum import * 148 | p = pminvar(marple_data, 15, NFFT=4096) 149 | p.plot(sides='centerdc') 150 | 151 | 152 | """ 153 | def __init__(self, data, order, NFFT=None, sampling=1., scale_by_freq=False): 154 | """**Constructor** 155 | 156 | For a detailled description of the parameters, see :func:`minvar`. 157 | 158 | :param array data: input data (list or numpy.array) 159 | :param int order: 160 | :param int NFFT: total length of the final data sets (padded with 161 | zero if needed; default is 4096) 162 | :param float sampling: sampling frequency of the input :attr:`data`. 163 | 164 | """ 165 | super(pminvar, self).__init__(data, ar_order=order, sampling=sampling, 166 | NFFT=NFFT, scale_by_freq=scale_by_freq) 167 | 168 | def __call__(self): 169 | res = minvar(self.data, self.ar_order, sampling=self.sampling, 170 | NFFT=self.NFFT) 171 | psd = res[0] 172 | # save the AR and reflection coefficients. 173 | self.ar = res[1] 174 | self.reflection = res[2] 175 | 176 | # save the PSD 177 | if self.datatype == 'real': 178 | if self.NFFT % 2 == 0: 179 | newpsd = psd[0:int(self.NFFT/2+1)] * 2 180 | else: 181 | newpsd = psd[0:int((self.NFFT+1)/2)] * 2 182 | self.psd = newpsd 183 | else: 184 | self.psd = psd 185 | self.scale() 186 | 187 | def _str_title(self): 188 | return "Minimum Variance spectral estimation\n" 189 | 190 | def __str__(self): 191 | return super(pminvar, self).__str__() 192 | 193 | 194 | 195 | -------------------------------------------------------------------------------- /src/spectrum/spectrogram.py: -------------------------------------------------------------------------------- 1 | from spectrum import Periodogram, pmtm 2 | import numpy as np 3 | 4 | 5 | __all__ = ["Spectrogram"] 6 | 7 | 8 | class Spectrogram(object): 9 | """Simple example of spectrogram 10 | 11 | .. plot:: 12 | 13 | from spectrum import Spectrogram, dolphin_filename, readwav 14 | data, samplerate = readwav(dolphin_filename) 15 | 16 | p = Spectrogram(data, ws=128, W=4096, sampling=samplerate) 17 | p.periodogram() 18 | p.plot() 19 | 20 | .. warning:: this is a prototype and need careful checking about x/y axis 21 | 22 | 23 | """ 24 | def __init__(self, signal, ws=128, W=4096, sampling=1, channel=1): 25 | if len(signal.shape) == 1: 26 | self.signal = signal 27 | else: 28 | self.signal = signal[:,channel-1] 29 | self.W = W 30 | self.ws = ws 31 | self._start_y = 10 32 | self.sampling = sampling 33 | self.duration = len(self.signal) / float(self.sampling) 34 | 35 | def plot(self, filename=None, vmin=None, vmax=None, cmap='jet_r'): 36 | import pylab 37 | pylab.clf() 38 | pylab.imshow(-np.log10(self.results[self._start_y:,:]), 39 | origin="lower", 40 | aspect="auto", cmap=cmap, vmin=vmin, vmax=vmax) 41 | pylab.colorbar() 42 | 43 | # Fix xticks 44 | XMAX = float(self.results.shape[1]) # The max integer on xaxis 45 | xpos = list(range(0, int(XMAX), int(XMAX/5))) 46 | xx = [int(this*100)/100 for this in np.array(xpos) / XMAX * self.duration] 47 | pylab.xticks(xpos, xx, fontsize=16) 48 | 49 | # Fix yticks 50 | YMAX = float(self.results.shape[0]) # The max integer on xaxis 51 | ypos = list(range(0, int(YMAX), int(YMAX/5))) 52 | yy = [int(this) for this in np.array(ypos) / YMAX * self.sampling] 53 | pylab.yticks(ypos, yy, fontsize=16) 54 | 55 | #pylab.yticks([1000,2000,3000,4000], [5500,11000,16500,22000], fontsize=16) 56 | #pylab.title("%s echoes" % filename.replace(".png", ""), fontsize=25) 57 | pylab.xlabel("Time (seconds)", fontsize=25) 58 | pylab.ylabel("Frequence (Hz)", fontsize=25) 59 | pylab.tight_layout() 60 | if filename: 61 | pylab.savefig(filename) 62 | 63 | def periodogram(self): 64 | W = self.W 65 | ws = self.ws 66 | N = int(len(self.signal)/ws) 67 | self.results = np.zeros((W*2+1, N-8)) 68 | print("Duration: %s" % self.duration) 69 | print("W: %s" % W) 70 | print("ws: %s" % ws) 71 | print("Computing %s TFs" % N) 72 | for i in range(N-8): 73 | data = self.signal[i*ws:i*ws+W] 74 | p = Periodogram(data, sampling=self.sampling, NFFT=W*4) 75 | p() 76 | self.results[:,i] = p.psd 77 | print("done") 78 | 79 | def pmtm(self): 80 | W = self.W 81 | ws = self.ws 82 | N = int(len(self.signal) / ws) 83 | self.results = np.zeros((W+1, N-8)) 84 | for i in range(N-8): 85 | data = self.signal[i*ws:i*ws+W] 86 | a = pmtm(data, 4, NFFT=W*4, show=False) 87 | Sk = np.mean(abs(a[0].transpose())**2 * a[1], axis=1) 88 | self.results[:, i] = Sk[0:self.W+1] 89 | print(i, N) 90 | print("done") 91 | 92 | 93 | -------------------------------------------------------------------------------- /src/spectrum/toeplitz.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. topic:: Toeplitz module 3 | 4 | These functions are not yet used by other functions, which explains the 5 | lack of documentation, test, examples. 6 | 7 | Nevertheless, they can be used for production. 8 | 9 | .. autosummary:: 10 | 11 | HERMTOEP 12 | 13 | .. codeauthor:: Thomas Cokelaer, 2011 14 | """ 15 | 16 | import numpy 17 | 18 | __all__ = [ "HERMTOEP"] 19 | 20 | 21 | def TOEPLITZ(T0, TC, TR, Z): 22 | """solve the general toeplitz linear equations 23 | 24 | Solve TX=Z 25 | 26 | :param T0: zero lag value 27 | :param TC: r1 to rN 28 | :param TR: r1 to rN 29 | 30 | returns X 31 | 32 | requires 3M^2+M operations instead of M^3 with gaussian elimination 33 | 34 | .. warning:: not used right now 35 | """ 36 | assert len(TC)>0 37 | assert len(TC)==len(TR) 38 | M = len(TC) 39 | X = numpy.zeros(M+1,dtype=complex) 40 | A = numpy.zeros(M,dtype=complex) 41 | B = numpy.zeros(M,dtype=complex) 42 | P = T0 43 | if P == 0: raise ValueError("P must be different from zero") 44 | if P == 0: raise ValueError("P must be different from zero") 45 | X[0] = Z[0]/T0 46 | for k in range(0, M): 47 | save1 = TC[k] 48 | save2 = TR[k] 49 | beta = X[0]*TC[k] 50 | if k == 0: 51 | temp1 = -save1 / P 52 | temp2 = -save2 / P 53 | else: 54 | for j in range(0, k): 55 | save1 = save1 + A[j] * TC[k-j-1] 56 | save2 = save2 + B[j] * TR[k-j-1] 57 | beta = beta + X[j+1] * TC[k-j-1] 58 | temp1 = -save1 / P 59 | temp2 = -save2/P 60 | P = P * (1. - (temp1*temp2)) 61 | if P <= 0: 62 | raise ValueError("singular matrix") 63 | A[k] = temp1 64 | B[k] = temp2 65 | alpha = (Z[k+1]-beta)/P 66 | if k == 0: 67 | X[k+1] = alpha 68 | for j in range(0,k+1): 69 | X[j] = X[j] + alpha * B[k-j] 70 | continue 71 | 72 | for j in range(0, k): 73 | kj = k-j-1 74 | save1 = A[j] 75 | A[j] = save1 + temp1 * B[kj] 76 | B[kj] = B[kj] + temp2*save1 77 | 78 | X[k+1] = alpha 79 | for j in range(0,k+1): 80 | X[j] = X[j] + alpha*B[k-j] 81 | return X 82 | 83 | 84 | def HERMTOEP(T0, T, Z): 85 | """solve Tx=Z by a variation of Levinson algorithm where T 86 | is a complex hermitian toeplitz matrix 87 | 88 | :param T0: zero lag value 89 | :param T: r1 to rN 90 | 91 | :return: X 92 | 93 | used by eigen PSD method 94 | """ 95 | assert len(T)>0 96 | M = len(T) 97 | X = numpy.zeros(M+1,dtype=complex) 98 | A = numpy.zeros(M,dtype=complex) 99 | P = T0 100 | if P == 0: raise ValueError("P must be different from zero") 101 | X[0] = Z[0]/T0 102 | for k in range(0, M): 103 | save = T[k] 104 | beta = X[0]*T[k] 105 | if k == 0: 106 | temp = -save / P 107 | else: 108 | for j in range(0, k): 109 | save = save + A[j] * T[k-j-1] 110 | beta = beta + X[j+1] * T[k-j-1] 111 | temp = -save / P 112 | P = P * (1. - (temp.real**2+temp.imag**2)) 113 | if P <= 0: 114 | raise ValueError("singular matrix") 115 | A[k] = temp 116 | alpha = (Z[k+1]-beta)/P 117 | 118 | if k == 0: 119 | #print 'skipping code for k=0' 120 | X[k+1] = alpha 121 | for j in range(0,k+1): 122 | X[j] = X[j] + alpha * A[k-j].conjugate() 123 | continue 124 | khalf = (k+1)//2 125 | for j in range(0, khalf): 126 | kj = k-j-1 127 | save=A[j] 128 | A[j] = save+temp*A[kj].conjugate() 129 | if j != kj: 130 | A[kj] = A[kj] + temp*save.conjugate() 131 | X[k+1] = alpha 132 | for j in range(0,k+1): 133 | X[j] = X[j] + alpha * A[k-j].conjugate() 134 | return X 135 | 136 | 137 | -------------------------------------------------------------------------------- /src/spectrum/tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | .. topic:: Tools module 3 | 4 | .. autosummary:: 5 | 6 | db2mag 7 | db2pow 8 | mag2db 9 | nextpow2 10 | pow2db 11 | onesided_2_twosided 12 | twosided_2_onesided 13 | centerdc_2_twosided 14 | twosided_2_centerdc 15 | 16 | .. codeauthor:: Thomas Cokelaer, 2011 17 | """ 18 | import numpy as np 19 | from numpy import ceil, log2 20 | from collections import deque 21 | 22 | 23 | #__all__ = ["cshift", "pow2dB", "nextpow2", "twosided", "twosided_zerolag", 24 | # "fftshift"] 25 | 26 | 27 | def fftshift(x): 28 | """wrapper to numpy.fft.fftshift 29 | 30 | .. doctest:: 31 | 32 | >>> from spectrum import fftshift 33 | >>> x = [100, 2, 3, 4, 5] 34 | >>> fftshift(x) 35 | array([ 4, 5, 100, 2, 3]) 36 | """ 37 | return np.fft.fftshift(x) 38 | 39 | 40 | def _swapsides(data): 41 | """todo is it really useful ? 42 | 43 | Swap sides 44 | 45 | .. doctest:: 46 | 47 | >>> from spectrum import swapsides 48 | >>> x = [-2, -1, 1, 2] 49 | >>> swapsides(x) 50 | array([ 2, -2, -1]) 51 | 52 | """ 53 | N = len(data) 54 | return np.concatenate((data[N//2+1:], data[0:N//2])) 55 | 56 | 57 | def twosided_2_onesided(data): 58 | """Convert a one-sided PSD to a twosided PSD 59 | 60 | In order to keep the power in the onesided PSD the same 61 | as in the twosided version, the onesided values are twice 62 | as much as in the input data (except for the zero-lag value). 63 | 64 | :: 65 | 66 | >>> twosided_2_onesided([10, 2,3,3,2,8]) 67 | array([ 10., 4., 6., 8.]) 68 | 69 | """ 70 | assert len(data) % 2 == 0 71 | N = len(data) 72 | psd = np.array(data[0:N//2+1]) * 2. 73 | psd[0] /= 2. 74 | psd[-1] = data[-1] 75 | return psd 76 | 77 | 78 | def onesided_2_twosided(data): 79 | """Convert a two-sided PSD to a one-sided PSD 80 | 81 | In order to keep the power in the twosided PSD the same 82 | as in the onesided version, the twosided values are 2 times 83 | lower than the input data (except for the zero-lag and N-lag 84 | values). 85 | 86 | :: 87 | 88 | >>> twosided_2_onesided([10, 4, 6, 8]) 89 | array([ 10., 2., 3., 3., 2., 8.]) 90 | 91 | """ 92 | psd = np.concatenate((data[0:-1], cshift(data[-1:0:-1], -1)))/2. 93 | psd[0] *= 2. 94 | psd[-1] *= 2. 95 | return psd 96 | 97 | 98 | def twosided_2_centerdc(data): 99 | """Convert a two-sided PSD to a center-dc PSD""" 100 | N = len(data) 101 | # could us int() or // in python 3 102 | newpsd = np.concatenate((cshift(data[N//2:], 1), data[0:N//2])) 103 | newpsd[0] = data[-1] 104 | return newpsd 105 | 106 | 107 | def centerdc_2_twosided(data): 108 | """Convert a center-dc PSD to a twosided PSD""" 109 | N = len(data) 110 | newpsd = np.concatenate((data[N//2:], (cshift(data[0:N//2], -1)))) 111 | return newpsd 112 | 113 | 114 | def twosided(data): 115 | """return a twosided vector with non-duplication of the first element 116 | 117 | .. doctest:: 118 | 119 | >>> from spectrum import twosided 120 | >>> a = [1,2,3] 121 | >>> twosided(a) 122 | array([3, 2, 1, 2, 3]) 123 | 124 | """ 125 | twosided = np.concatenate((data[::-1], data[1:])) 126 | return twosided #remove the first element to have a power of 2 and compatiable with pylab.psd 127 | 128 | 129 | def _twosided_zerolag(data, zerolag): 130 | """Build a symmetric vector out of stricly positive lag vector and zero-lag 131 | 132 | .. doctest:: 133 | 134 | >>> data = [3,2,1] 135 | >>> zerolag = 4 136 | >>> twosided_zerolag(data, zerolag) 137 | array([1, 2, 3, 4, 3, 2, 1]) 138 | 139 | .. seealso:: Same behaviour as :func:`twosided_zerolag` 140 | """ 141 | res = twosided(np.insert(data, 0, zerolag)) 142 | return res 143 | 144 | 145 | def cshift(data, offset): 146 | """Circular shift to the right (within an array) by a given offset 147 | 148 | :param array data: input data (list or numpy.array) 149 | :param int offset: shift the array with the offset 150 | 151 | .. doctest:: 152 | 153 | >>> from spectrum import cshift 154 | >>> cshift([0, 1, 2, 3, -2, -1], 2) 155 | array([-2, -1, 0, 1, 2, 3]) 156 | 157 | """ 158 | # the deque method is suppose to be optimal when using rotate to shift the 159 | # data that playing with the data to build a new list. 160 | if isinstance(offset, float): 161 | offset = int(offset) 162 | a = deque(data) 163 | a.rotate(offset) 164 | return np.array(a) #convert back to an array. Is it necessary? 165 | 166 | 167 | def pow2db(x): 168 | """returns the corresponding decibel (dB) value for a power value x. 169 | 170 | The relationship between power and decibels is: 171 | 172 | .. math:: X_{dB} = 10 * \log_{10}(x) 173 | 174 | .. doctest:: 175 | 176 | >>> from spectrum import pow2db 177 | >>> x = pow2db(0.1) 178 | >>> x 179 | -10.0 180 | """ 181 | return 10 * log10(x) 182 | 183 | 184 | def db2pow(xdb): 185 | """Convert decibels (dB) to power 186 | 187 | .. doctest:: 188 | 189 | >>> from spectrum import db2pow 190 | >>> p = db2pow(-10) 191 | >>> p 192 | 0.1 193 | 194 | .. seealso:: :func:`pow2db` 195 | """ 196 | return 10.**(xdb/10.) 197 | 198 | 199 | def nextpow2(x): 200 | """returns the smallest power of two that is greater than or equal to the 201 | absolute value of x. 202 | 203 | This function is useful for optimizing FFT operations, which are 204 | most efficient when sequence length is an exact power of two. 205 | 206 | :Example: 207 | 208 | .. doctest:: 209 | 210 | >>> from spectrum import nextpow2 211 | >>> x = [255, 256, 257] 212 | >>> nextpow2(x) 213 | array([8, 8, 9]) 214 | 215 | """ 216 | res = ceil(log2(x)) 217 | return res.astype('int') #we want integer values only but ceil gives float 218 | 219 | 220 | def db2mag(xdb): 221 | """Convert decibels (dB) to magnitude 222 | 223 | .. doctest:: 224 | 225 | >>> from spectrum import db2mag 226 | >>> db2mag(-20) 227 | 0.1 228 | 229 | .. seealso:: :func:`pow2db` 230 | """ 231 | return 10.**(xdb/20.) 232 | 233 | 234 | def mag2db(x): 235 | """Convert magnitude to decibels (dB) 236 | 237 | The relationship between magnitude and decibels is: 238 | 239 | .. math:: X_{dB} = 20 * \log_{10}(x) 240 | 241 | .. doctest:: 242 | 243 | >>> from spectrum import mag2db 244 | >>> mag2db(0.1) 245 | -20.0 246 | 247 | .. seealso:: :func:`db2mag` 248 | """ 249 | return 20. * log10(x) 250 | 251 | 252 | def log10(data): 253 | np.seterr(divide='ignore') 254 | data = np.log10(data) 255 | np.seterr(divide='warn') 256 | return data 257 | 258 | 259 | -------------------------------------------------------------------------------- /src/spectrum/waveform.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import pi 3 | 4 | __all__ = ['morlet', 'chirp', 'mexican', 'meyeraux'] 5 | 6 | 7 | def morlet(lb, ub, n): 8 | r"""Generate the Morlet waveform 9 | 10 | 11 | The Morlet waveform is defined as follows: 12 | 13 | .. math:: w[x] = \cos{5x} \exp^{-x^2/2} 14 | 15 | :param lb: lower bound 16 | :param ub: upper bound 17 | :param int n: waveform data samples 18 | 19 | 20 | .. plot:: 21 | :include-source: 22 | :width: 80% 23 | 24 | from spectrum import morlet 25 | from pylab import plot 26 | plot(morlet(0,10,100)) 27 | 28 | """ 29 | if n <= 0: 30 | raise ValueError("n must be strictly positive") 31 | 32 | x = numpy.linspace(lb, ub, n) 33 | psi = numpy.cos(5*x) * numpy.exp(-x**2/2.) 34 | return psi 35 | 36 | 37 | def chirp(t, f0=0., t1=1., f1=100., form='linear', phase=0): 38 | r"""Evaluate a chirp signal at time t. 39 | 40 | A chirp signal is a frequency swept cosine wave. 41 | 42 | .. math:: a = \pi (f_1 - f_0) / t_1 43 | .. math:: b = 2 \pi f_0 44 | .. math:: y = \cos\left( \pi\frac{f_1-f_0}{t_1} t^2 + 2\pi f_0 t + \rm{phase} \right) 45 | 46 | :param array t: times at which to evaluate the chirp signal 47 | :param float f0: frequency at time t=0 (Hz) 48 | :param float t1: time t1 49 | :param float f1: frequency at time t=t1 (Hz) 50 | :param str form: shape of frequency sweep in ['linear', 'quadratic', 'logarithmic'] 51 | :param float phase: phase shift at t=0 52 | 53 | The parameter **form** can be: 54 | 55 | * 'linear' :math:`f(t) = (f_1-f_0)(t/t_1) + f_0` 56 | * 'quadratic' :math:`f(t) = (f_1-f_0)(t/t_1)^2 + f_0` 57 | * 'logarithmic' :math:`f(t) = (f_1-f_0)^{(t/t_1)} + f_0` 58 | 59 | Example: 60 | 61 | .. plot:: 62 | :include-source: 63 | :width: 80% 64 | 65 | from spectrum import chirp 66 | from pylab import linspace, plot 67 | t = linspace(0, 1, 1000) 68 | y = chirp(t, form='linear') 69 | plot(y) 70 | y = chirp(t, form='quadratic') 71 | plot(y, 'r') 72 | 73 | """ 74 | valid_forms = ['linear', 'quadratic', 'logarithmic'] 75 | if form not in valid_forms: 76 | raise ValueError("Invalid form. Valid form are %s" 77 | % valid_forms) 78 | t = numpy.array(t) 79 | phase = 2. * pi * phase / 360. 80 | if form == "linear": 81 | a = pi * (f1 - f0)/t1 82 | b = 2. * pi * f0 83 | y = numpy.cos(a * t**2 + b*t + phase) 84 | elif form == "quadratic": 85 | a = (2/3. * pi * (f1-f0)/t1/t1) 86 | b = 2. * pi * f0 87 | y = numpy.cos(a*t**3 + b * t + phase) 88 | elif form == "logarithmic": 89 | a = 2. * pi * t1/numpy.log(f1-f0) 90 | b = 2. * pi * f0 91 | x = (f1-f0)**(1./t1) 92 | y = numpy.cos(a * x**t + b * t + phase) 93 | 94 | return y 95 | 96 | 97 | def mexican(lb, ub, n): 98 | r"""Generate the mexican hat wavelet 99 | 100 | The Mexican wavelet is: 101 | 102 | .. math:: w[x] = \cos{5x} \exp^{-x^2/2} 103 | 104 | :param lb: lower bound 105 | :param ub: upper bound 106 | :param int n: waveform data samples 107 | :return: the waveform 108 | 109 | .. plot:: 110 | :include-source: 111 | :width: 80% 112 | 113 | from spectrum import mexican 114 | from pylab import plot 115 | plot(mexican(0, 10, 100)) 116 | 117 | """ 118 | if n <= 0: 119 | raise ValueError("n must be strictly positive") 120 | 121 | x = numpy.linspace(lb, ub, n) 122 | psi = (1.-x**2.) * (2./(numpy.sqrt(3.)*pi**0.25)) * numpy.exp(-x**2/2.) 123 | return psi 124 | 125 | 126 | def meyeraux(x): 127 | r"""Compute the Meyer auxiliary function 128 | 129 | The Meyer function is 130 | 131 | .. math:: y = 35 x^4-84 x^5+70 x^6-20 x^7 132 | 133 | :param array x: 134 | :return: the waveform 135 | 136 | .. plot:: 137 | :include-source: 138 | :width: 80% 139 | 140 | from spectrum import meyeraux 141 | from pylab import linspace, plot 142 | t = linspace(0, 1, 1000) 143 | plot(t, meyeraux(t)) 144 | 145 | """ 146 | 147 | return 35*x**4-84.*x**5+70.*x**6-20.*x**7 148 | 149 | -------------------------------------------------------------------------------- /src/spectrum/yulewalker.py: -------------------------------------------------------------------------------- 1 | """Yule Walker method to estimate AR values. 2 | 3 | .. topic:: Estimation of AR values using Yule-Walker method 4 | 5 | .. autosummary:: 6 | 7 | aryule 8 | pyule 9 | 10 | .. codeauthor:: Thomas Cokelaer 2011 11 | 12 | """ 13 | 14 | from .correlation import CORRELATION 15 | from .levinson import LEVINSON 16 | from .psd import ParametricSpectrum 17 | from spectrum import tools 18 | import numpy as np 19 | 20 | __all__ = ['aryule', 'pyule'] 21 | 22 | 23 | def aryule(X, order, norm='biased', allow_singularity=True): 24 | r"""Compute AR coefficients using Yule-Walker method 25 | 26 | :param X: Array of complex data values, X(1) to X(N) 27 | :param int order: Order of autoregressive process to be fitted (integer) 28 | :param str norm: Use a biased or unbiased correlation. 29 | :param bool allow_singularity: 30 | 31 | :return: 32 | * AR coefficients (complex) 33 | * variance of white noise (Real) 34 | * reflection coefficients for use in lattice filter 35 | 36 | .. rubric:: Description: 37 | 38 | The Yule-Walker method returns the polynomial A corresponding to the 39 | AR parametric signal model estimate of vector X using the Yule-Walker 40 | (autocorrelation) method. The autocorrelation may be computed using a 41 | **biased** or **unbiased** estimation. In practice, the biased estimate of 42 | the autocorrelation is used for the unknown true autocorrelation. Indeed, 43 | an unbiased estimate may result in nonpositive-definite autocorrelation 44 | matrix. 45 | So, a biased estimate leads to a stable AR filter. 46 | The following matrix form represents the Yule-Walker equations. The are 47 | solved by means of the Levinson-Durbin recursion: 48 | 49 | .. math:: 50 | 51 | \left( \begin{array}{cccc} 52 | r(1) & r(2)^* & \dots & r(n)^*\\ 53 | r(2) & r(1)^* & \dots & r(n-1)^*\\ 54 | \dots & \dots & \dots & \dots\\ 55 | r(n) & \dots & r(2) & r(1) \end{array} \right) 56 | \left( \begin{array}{cccc} 57 | a(2)\\ 58 | a(3) \\ 59 | \dots \\ 60 | a(n+1) \end{array} \right) 61 | = 62 | \left( \begin{array}{cccc} 63 | -r(2)\\ 64 | -r(3) \\ 65 | \dots \\ 66 | -r(n+1) \end{array} \right) 67 | 68 | The outputs consists of the AR coefficients, the estimated variance of the 69 | white noise process, and the reflection coefficients. These outputs can be 70 | used to estimate the optimal order by using :mod:`~spectrum.criteria`. 71 | 72 | .. rubric:: Examples: 73 | 74 | From a known AR process or order 4, we estimate those AR parameters using 75 | the aryule function. 76 | 77 | .. doctest:: 78 | 79 | >>> from scipy.signal import lfilter 80 | >>> from spectrum import * 81 | >>> from numpy.random import randn 82 | >>> A =[1, -2.7607, 3.8106, -2.6535, 0.9238] 83 | >>> noise = randn(1, 1024) 84 | >>> y = lfilter([1], A, noise); 85 | >>> #filter a white noise input to create AR(4) process 86 | >>> [ar, var, reflec] = aryule(y[0], 4) 87 | >>> # ar should contains values similar to A 88 | 89 | The PSD estimate of a data samples is computed and plotted as follows: 90 | 91 | .. plot:: 92 | :width: 80% 93 | :include-source: 94 | 95 | from spectrum import * 96 | from pylab import * 97 | 98 | ar, P, k = aryule(marple_data, 15, norm='biased') 99 | psd = arma2psd(ar) 100 | plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd/max(psd))) 101 | axis([-0.5, 0.5, -60, 0]) 102 | 103 | .. note:: The outputs have been double checked against (1) octave outputs 104 | (octave has norm='biased' by default) and (2) Marple test code. 105 | 106 | .. seealso:: This function uses :func:`~spectrum.levinson.LEVINSON` and 107 | :func:`~spectrum.correlation.CORRELATION`. See the :mod:`~spectrum.criteria` 108 | module for criteria to automatically select the AR order. 109 | 110 | :References: [Marple]_ 111 | 112 | """ 113 | assert norm in ['biased', 'unbiased'] 114 | r = CORRELATION(X, maxlags=order, norm=norm) 115 | A, P, k = LEVINSON(r, allow_singularity=allow_singularity) 116 | return A, P, k 117 | 118 | 119 | class pyule(ParametricSpectrum): 120 | """Class to create PSD based on the Yule Walker method 121 | 122 | See :func:`aryule` for description. 123 | 124 | .. plot:: 125 | :width: 80% 126 | :include-source: 127 | 128 | from spectrum import * 129 | p = pyule(marple_data, 15, NFFT=4096) 130 | p.plot(sides='centerdc') 131 | 132 | """ 133 | def __init__(self, data, order, norm='biased', NFFT=None, sampling=1., 134 | scale_by_freq=True): 135 | """**Constructor** 136 | 137 | For a detailled description of the parameters, see :func:`aryule`. 138 | 139 | :param array data: input data (list or numpy.array) 140 | :param int order: 141 | :param int NFFT: total length of the final data sets (padded with 142 | zero if needed; default is 4096) 143 | :param float sampling: sampling frequency of the input :attr:`data` 144 | :param str norm: don't change if you do not know 145 | 146 | """ 147 | super(pyule, self).__init__(data, ar_order=order, NFFT=NFFT, 148 | scale_by_freq=scale_by_freq, 149 | sampling=sampling) 150 | self.sampling = sampling 151 | self._norm_aryule = norm 152 | 153 | def __call__(self): 154 | from . import arma 155 | ar, rho, k = aryule(self.data, self.ar_order, norm=self._norm_aryule) 156 | psd = arma.arma2psd(ar, NFFT=self.NFFT, rho=rho, T=self.sampling) 157 | # save the AR and reflection coefficients. 158 | self.ar = ar 159 | self.reflection = k 160 | 161 | # save the PSD 162 | if self.datatype == 'real': 163 | # see doc/concepts.rst for details 164 | if self.NFFT % 2 == 0: 165 | newpsd = psd[0:int(self.NFFT/2 + 1)] * 2 166 | else: 167 | newpsd = psd[0:int((self.NFFT+1) / 2)] * 2 168 | self.psd = newpsd 169 | else: 170 | self.psd = psd 171 | if self.scale_by_freq is True: 172 | self.scale() 173 | return self 174 | 175 | def _str_title(self): 176 | return "PYule PSD estimate\n" 177 | 178 | def __str__(self): 179 | return super(pyule, self).__str__() 180 | 181 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cokelaer/spectrum/080e5aa5722e5c9b79f5d438d996b899c94f47dc/test/__init__.py -------------------------------------------------------------------------------- /test/test_arma.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import pytest 3 | from numpy.testing import assert_almost_equal, assert_array_almost_equal 4 | from pylab import linspace, log10, plot, savefig, ylim 5 | 6 | from spectrum import * 7 | from spectrum.arma import arma2psd, arma_estimate, ma, parma, pma 8 | 9 | 10 | def test_arma_values(): 11 | a, b, rho = arma_estimate(marple_data, 15, 15, 30) 12 | assert_almost_equal(rho, 0.20050144053393698, 1e-6) 13 | 14 | assert_array_almost_equal( 15 | a, 16 | numpy.array( 17 | [ 18 | 1.47857824 - 0.16358208j, 19 | 4.32139091 - 0.86231938j, 20 | 6.04115773 - 1.77030183j, 21 | 6.09285854 - 3.96367752j, 22 | 4.70699008 - 3.27199141j, 23 | 3.45467782 - 1.59183506j, 24 | 3.11230094 - 1.06510595j, 25 | 1.55237009 + 1.09800024j, 26 | 1.05148353 + 2.2720917j, 27 | 1.68042547 + 4.9737292j, 28 | 3.22899406 + 6.39981425j, 29 | 3.16557650 + 5.92783737j, 30 | 3.47120865 + 5.48246963j, 31 | 2.79508215 + 3.3238971j, 32 | 2.13174602 + 1.51034329j, 33 | ] 34 | ), 35 | decimal=4, 36 | ) 37 | 38 | try: 39 | ma(marple_data, 0, 15) 40 | assert False 41 | except: 42 | assert True 43 | 44 | 45 | def test_arma(): 46 | """arma, check that rho is correct (appendix 10.A )and reproduce figure 10.2""" 47 | a, b, rho = arma_estimate(marple_data, 20, 20, 40) 48 | psd = arma2psd(A=a, B=b, rho=rho, NFFT=None) 49 | psd = arma2psd(A=a, B=b, rho=rho) 50 | try: 51 | psd = arma2psd(A=None, B=None, rho=rho) 52 | assert False 53 | except: 54 | assert True 55 | 56 | 57 | def create_figure_arma(): 58 | psd = test_arma() 59 | psd = cshift(psd, len(psd) / 2) # switch positive and negative freq 60 | plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd / max(psd))) 61 | ylim([-50, 0]) 62 | savefig("psd_arma.png") 63 | 64 | 65 | def create_figure_ma(): 66 | psd = test_ma() 67 | psd = cshift(psd, len(psd) / 2) # switch positive and negative freq 68 | plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd / max(psd))) 69 | ylim([-50, 0]) 70 | savefig("psd_ma.png") 71 | 72 | 73 | def test_ma(): 74 | """ma PSD. check that rho is correct (appendix 10.A )and reproduce figure 10.2""" 75 | b, rho = ma(marple_data, 15, 30) 76 | assert_almost_equal(rho, 0.21432, decimal=4) 77 | 78 | assert_almost_equal(b[0], -0.25150803 + 0.67246418j, decimal=6) 79 | assert_almost_equal(b[1], -0.68612023 + 0.14571702j, decimal=6) 80 | 81 | """-0.25150803+0.67246418j, -0.68612023+0.14571702j, 82 | 0.02061484-0.52246411j, 0.11444091-0.19157961j, 83 | 0.36592370+0.09885689j, 0.00556917+0.4330789j , 84 | -0.40634639+0.04854752j, -0.10092740-0.42813962j, 85 | 0.26013726-0.16101382j, 0.25119793+0.16711825j, 86 | -0.13448885+0.21202256j, -0.16125290+0.10804393j, 87 | -0.03402254-0.18015694j, 0.08780647-0.1146388j , 88 | 0.02294750+0.08411391j 89 | """ 90 | psd = arma2psd(B=b, rho=rho) 91 | 92 | 93 | def test_arma2psd(): 94 | psd = arma2psd([0.5], NFFT=16, norm=True) * 4 95 | assert_array_almost_equal( 96 | psd, 97 | numpy.array( 98 | [ 99 | 0.44444444, 100 | 0.46000709, 101 | 0.51095832, 102 | 0.61248861, 103 | 0.8, 104 | 1.15298155, 105 | 1.84198285, 106 | 3.06635155, 107 | 4.0, 108 | 3.06635155, 109 | 1.84198285, 110 | 1.15298155, 111 | 0.8, 112 | 0.61248861, 113 | 0.51095832, 114 | 0.46000709, 115 | ] 116 | ), 117 | ) 118 | 119 | 120 | def test_parma(): 121 | p = parma(marple_data, 4, 4, 30, NFFT=4096) 122 | p.plot() 123 | print(p) 124 | p = parma(marple_data, 4, 4, 30, NFFT=4096, scale_by_freq=True) 125 | p() 126 | 127 | 128 | def test_moving_average_class(): 129 | p = pma(marple_data, 15, 30, NFFT=4096) 130 | p() 131 | print(p) 132 | 133 | 134 | def test_moving_average_class_real(): 135 | p = pma(data_cosine(N=1024), 15, 30, NFFT=4096) 136 | p() 137 | p = pma(data_cosine(N=1024), 15, 30, NFFT=4096, scale_by_freq=True) 138 | p() 139 | 140 | 141 | def create_figure_ma(): 142 | psd = test_ma() 143 | psd = cshift(psd, len(psd) / 2) # switch positive and negative freq 144 | plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd / max(psd))) 145 | ylim([-50, 0]) 146 | savefig("psd_ma.png") 147 | 148 | 149 | if __name__ == "__main__": 150 | create_figure_ma() 151 | create_figure_arma() 152 | -------------------------------------------------------------------------------- /test/test_burg.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import pylab 3 | from numpy.testing import assert_almost_equal, assert_array_almost_equal 4 | 5 | from spectrum.arma import arma2psd 6 | from spectrum.burg import * 7 | from spectrum.criteria import * 8 | from spectrum.datasets import data_cosine, marple_data 9 | from spectrum.tools import cshift 10 | 11 | 12 | def test_arburg2(): 13 | from spectrum.burg import _arburg2 14 | 15 | ar, P, k = _arburg2(marple_data, order=15) 16 | try: 17 | ar, P, k = _arburg2(marple_data, order=0) 18 | assert False 19 | except: 20 | assert True 21 | 22 | 23 | def test_arburg_functional(): 24 | ar, P, k = arburg(marple_data, order=15) 25 | PSD = arma2psd(ar) 26 | newpsd = cshift(PSD, len(PSD) // 2) # switch positive and negative freq 27 | 28 | 29 | def test_arburg_real_output(): 30 | a, b, c = arburg([1, 2, 3, 4, 5, 6, 7, -8], 4) 31 | assert_array_almost_equal(a, [-3.2581e-01, 3.4571e-04, 3.3790e-02, 9.8853e-02], decimal=5) 32 | assert_almost_equal(b, 22.44687609887804) 33 | assert_array_almost_equal(c, [-0.326531, 0.022568, 0.066648, 0.098853], decimal=5) 34 | 35 | 36 | def test_arburg_imag_output(): 37 | a, b, c = arburg(marple_data, 15) 38 | 39 | a_e, b_e, c_e = ( 40 | numpy.array( 41 | [ 42 | 2.70936368 - 0.77610302j, 43 | 5.17482864 - 2.73293024j, 44 | 7.03527787 - 6.15070038j, 45 | 7.89423853 - 10.20591369j, 46 | 6.84853701 - 14.07469247j, 47 | 4.56915619 - 16.84486008j, 48 | 1.32687590 - 18.13284671j, 49 | -1.87811360 - 17.49937286j, 50 | -4.64976221 - 15.05888331j, 51 | -6.22557823 - 11.25070227j, 52 | -6.28367510 - 6.93498375j, 53 | -4.89652279 - 3.24910899j, 54 | -2.99758653 - 0.8736847j, 55 | -1.32183647 + 0.04527281j, 56 | -0.35565856 + 0.14754881j, 57 | ] 58 | ), 59 | 0.0054379699760549929, 60 | numpy.array( 61 | [ 62 | -0.18570222 - 0.87179346j, 63 | 0.26402371 - 0.5190592j, 64 | 0.07162311 - 0.46372011j, 65 | 0.44463099 + 0.05080174j, 66 | -0.02634972 - 0.14691215j, 67 | 0.19255061 - 0.37032848j, 68 | -0.25994598 - 0.55924338j, 69 | -0.20237974 - 0.23641516j, 70 | -0.40546748 - 0.40598876j, 71 | -0.47824854 - 0.42553068j, 72 | -0.51507096 - 0.49435948j, 73 | -0.32530245 - 0.49134098j, 74 | -0.21950049 - 0.37261937j, 75 | -0.28613904 - 0.0921211j, 76 | -0.35565856 + 0.14754881j, 77 | ] 78 | ), 79 | ) 80 | 81 | assert_array_almost_equal(a, a_e) 82 | assert_almost_equal(b, b_e) 83 | assert_array_almost_equal(c, c_e) 84 | 85 | 86 | def test_burg_criteria(): 87 | ar, P, k = arburg(marple_data, order=15, criteria="AIC") 88 | ar, P, k = arburg(marple_data, order=15, criteria="AICc") 89 | ar, P, k = arburg(marple_data, order=15, criteria="KIC") 90 | ar, P, k = arburg(marple_data, order=15, criteria="MDL") 91 | ar, P, k = arburg(marple_data, order=15, criteria="FPE") 92 | ar, P, k = arburg(marple_data, order=15, criteria="AKICc") 93 | 94 | 95 | def test_pburg(): 96 | p = pburg(marple_data, 15, NFFT=4096) 97 | p.plot(sides="centerdc") 98 | print(p) 99 | # test real case 100 | p = pburg(data_cosine(), 15, NFFT=4096) 101 | p() 102 | p.plot() 103 | 104 | 105 | def create_figure(): 106 | psd = test_burg() 107 | pylab.plot(pylab.linspace(-0.5, 0.5, len(psd)), 10 * pylab.log10(psd / max(psd))) 108 | pylab.axis([-0.5, 0.5, -60, 0]) 109 | pylab.savefig("psd_burg.png") 110 | 111 | 112 | def test_burg_others(): 113 | ar, P, k = arburg(marple_data, order=30, criteria="AIC") 114 | 115 | # This raises an error by entering the arburg estimation and leading to a 116 | # negative rho 117 | try: 118 | a, rho, ref = arburg([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1], 20, None) 119 | assert False 120 | except: 121 | assert True 122 | 123 | # order is too large 124 | try: 125 | a, rho, ref = arburg([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 20) 126 | assert False 127 | except: 128 | assert True 129 | 130 | # order is zero or negative 131 | try: 132 | a, rho, ref = arburg([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0) 133 | assert False 134 | except: 135 | assert True 136 | -------------------------------------------------------------------------------- /test/test_cholesky.py: -------------------------------------------------------------------------------- 1 | from spectrum import * 2 | from numpy.testing import assert_array_almost_equal 3 | import numpy 4 | 5 | #TOEPLITZ 6 | #example marple app 3.A 7 | A = numpy.array([[2+0.j, .5-0.5j,-.2+.1j],[.5+.5j,1,.3-0.2j],[-.2-.1j,.3+.2j,.5]], dtype=complex) 8 | 9 | #a_cholesky = numpy.array([2+0.j, .5-0.5j, 1., -.2+.1j,.3-0.2j,.5], dtype=complex) 10 | B = numpy.array([1+3j,2-1j,.5+.8j], dtype=complex) 11 | #should return 12 | sol_cholesky = numpy.array([ 0.95945946+5.25675676j, 4.41891892-7.04054054j, 13 | -5.13513514+6.35135135j]) 14 | 15 | 16 | def test_cholesky(): 17 | a = CHOLESKY(A, B, 'numpy_solver') 18 | assert_array_almost_equal(a, sol_cholesky) 19 | 20 | a = CHOLESKY(A, B, 'numpy') 21 | assert_array_almost_equal(a, sol_cholesky) 22 | 23 | a = CHOLESKY(A, B, 'scipy') 24 | assert_array_almost_equal(a, sol_cholesky) 25 | 26 | try: 27 | a = CHOLESKY(A, B, 'dummyy') 28 | assert False 29 | except: 30 | assert True 31 | -------------------------------------------------------------------------------- /test/test_correlation.py: -------------------------------------------------------------------------------- 1 | from spectrum import CORRELATION, marple_data, xcorr 2 | from numpy.testing import assert_array_almost_equal, assert_almost_equal 3 | from numpy import array 4 | 5 | 6 | def test_CORRELATION(): 7 | R15 = CORRELATION(marple_data, maxlags=15, norm='biased') 8 | R15 = CORRELATION(marple_data, maxlags=15, norm='unbiased') 9 | R15 = CORRELATION(marple_data, maxlags=15, norm='coeff') 10 | R15 = CORRELATION(marple_data, maxlags=15, norm=None) 11 | 12 | def test_correlation_others(): 13 | CORRELATION(marple_data, y=array([1,2,3,4]), maxlags=5) 14 | CORRELATION(array([1,2,3,4]), marple_data, maxlags=5) 15 | CORRELATION(array([1,2,3,4]), marple_data) 16 | 17 | 18 | def test_CORRELATION_biased(): 19 | R15 = CORRELATION(marple_data, maxlags=15, norm='biased') 20 | 21 | assert_almost_equal(R15[0], 1.7804598944893049+0j) 22 | assert_almost_equal(R15[1], 0.32076613+1.50586147j) 23 | assert_almost_equal(R15[2], -1.29947785+0.74815755j) 24 | 25 | 26 | R30 = CORRELATION(marple_data, maxlags=30, norm='biased') 27 | assert_almost_equal(R30[0], 1.7804598944893049+0j) 28 | assert_almost_equal(R30[1], R15[1]) 29 | assert_almost_equal(R30[2], R15[2]) 30 | 31 | 32 | def test_CORRELATION_unbiased(): 33 | R15 = CORRELATION(marple_data, maxlags=15, norm='unbiased') 34 | assert_almost_equal(R15[0], 1.7804598944893049+0j) 35 | assert_almost_equal(R15[1], 0.32585765+1.52976403j) 36 | assert_almost_equal(R15[2], -1.34139649+0.77229166j) 37 | 38 | 39 | R30 = CORRELATION(marple_data, maxlags=30, norm='unbiased') 40 | assert_almost_equal(R30[0], 1.7804598944893049+0j ) 41 | assert_almost_equal(R30[1], R15[1]) 42 | assert_almost_equal(R30[2], R15[2]) 43 | 44 | 45 | def test_xcorr(): 46 | 47 | x = array([1,2,3,4,5]) 48 | corr,l = xcorr(x, x, maxlags=4, norm='coeff') 49 | corr_coeff = array([ 0.09090909, 0.25454545, 0.47272727, 0.72727273, 1. , 0.72727273, 0.47272727, 0.25454545, 0.09090909]) 50 | assert_array_almost_equal(corr, corr_coeff) 51 | 52 | corr,l = xcorr(x, x, maxlags=4, norm='biased') 53 | corr_biased = array([ 1. , 2.8, 5.2, 8. , 11. , 8. , 5.2, 2.8, 1. ]) 54 | assert_array_almost_equal(corr, corr_biased) 55 | 56 | corr,l = xcorr(x, x, maxlags=4, norm='unbiased') 57 | corr_unbiased = array([ 5. , 7. , 8.66666667, 10. , 11. , 10. , 8.66666667, 7. , 5. ]) 58 | assert_array_almost_equal(corr, corr_unbiased) 59 | 60 | corr,l = xcorr(x, x, maxlags=4, norm=None) 61 | corr_none = array([ 5, 14, 26, 40, 55, 40, 26, 14, 5]) 62 | assert_array_almost_equal(corr, corr_none) 63 | 64 | # check default behaviour of maxlags 65 | corr1,l = xcorr(x, x) 66 | corr2,l = xcorr(x, x, maxlags=4) 67 | assert_array_almost_equal(corr1, corr2) 68 | 69 | # check default behaviour of maxlags 70 | corr1,l = xcorr(x) 71 | corr2,l = xcorr(x, x) 72 | assert_array_almost_equal(corr1, corr2) 73 | 74 | 75 | def test_xcorr_versus_CORRELATION_real_data(): 76 | from spectrum.tools import twosided as func 77 | x = array([1,2,3,4,5]) 78 | 79 | for norm in ['biased', 'unbiased', 'coeff',None]: 80 | corr1,l = xcorr(x, x, maxlags=4, norm=norm) 81 | corr2 = CORRELATION(x, x, maxlags=4, norm=norm) 82 | assert_array_almost_equal(corr1, func(corr2)) 83 | 84 | 85 | def _test_xcorr_versus_CORRELATION_imag_data(): 86 | from spectrum.tools import twosided as func 87 | x = array([1,2,3,4,5+1.j]) 88 | 89 | for norm in ['biased', 'unbiased', 'coeff',None]: 90 | corr1,l = xcorr(x, x, maxlags=4, norm=norm) 91 | corr2 = CORRELATION(x, x, maxlags=4, norm=norm) 92 | assert_array_almost_equal(corr1, func(corr2)) 93 | -------------------------------------------------------------------------------- /test/test_correlog.py: -------------------------------------------------------------------------------- 1 | from numpy.testing import assert_almost_equal, assert_array_almost_equal 2 | from pylab import linspace, log10, plot, savefig 3 | 4 | from spectrum import ( 5 | CORRELATION, 6 | CORRELOGRAMPSD, 7 | data_two_freqs, 8 | marple_data, 9 | pcorrelogram, 10 | ) 11 | 12 | 13 | def test_correlog(): 14 | psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15) 15 | assert_almost_equal(psd[0], 0.138216970) 16 | assert_almost_equal(psd[1000 - 1], 7.900110787) 17 | assert_almost_equal(psd[2000 - 1], 0.110103858) 18 | assert_almost_equal(psd[3000 - 1], 0.222184134) 19 | assert_almost_equal(psd[4000 - 1], -0.036255277) 20 | assert_almost_equal(psd[4096 - 1], 0.1391839711) 21 | 22 | 23 | def test_correlog_auto_cross(): 24 | """Same as test_correlog but x and y provided""" 25 | psd1 = CORRELOGRAMPSD(marple_data, lag=16) 26 | psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16) 27 | assert_array_almost_equal(psd1, psd2) 28 | 29 | psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method="CORRELATION") 30 | psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method="CORRELATION") 31 | assert_array_almost_equal(psd1, psd2) 32 | 33 | 34 | def test_correlog_correlation_method(): 35 | """test correlogramPSD playing with method argument""" 36 | psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method="CORRELATION") 37 | psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method="xcorr") 38 | assert_array_almost_equal(psd1, psd2) 39 | 40 | 41 | def test_pcorrelogram_class(): 42 | p = pcorrelogram(marple_data, lag=16) 43 | p() 44 | print(p) 45 | p = pcorrelogram(data_two_freqs(), lag=16) 46 | p.plot() 47 | print(p) 48 | 49 | 50 | def test_CORRELOGRAMPSD_others(): 51 | p = CORRELOGRAMPSD(marple_data, marple_data, lag=16, NFFT=None) 52 | 53 | 54 | def create_figure(): 55 | psd = test_correlog() 56 | f = linspace(-0.5, 0.5, len(psd)) 57 | 58 | psd = cshift(psd, len(psd) / 2) 59 | plot(f, 10 * log10(psd / max(psd))) 60 | savefig("psd_corr.png") 61 | 62 | 63 | if __name__ == "__main__": 64 | create_figure() 65 | -------------------------------------------------------------------------------- /test/test_covar.py: -------------------------------------------------------------------------------- 1 | import pylab 2 | from numpy import array 3 | from numpy.testing import assert_almost_equal, assert_array_almost_equal 4 | 5 | from spectrum import ( 6 | arcovar, 7 | arcovar_marple, 8 | arma2psd, 9 | cshift, 10 | data_cosine, 11 | marple_data, 12 | pcovar, 13 | ) 14 | 15 | 16 | def test_covar_null_ip(): 17 | af, pf, ab, pb, c = arcovar_marple(marple_data, 0) 18 | assert_almost_equal(pf, 1.7804598944893046) 19 | 20 | 21 | def test_covar_15_ip(): 22 | af, pf, ab, pb, pbv = arcovar_marple(marple_data, 15) 23 | assert_almost_equal(pf, 0.0031358526195905032) 24 | assert_almost_equal(pb, 0.0026095580050847235) 25 | assert_array_almost_equal( 26 | af[0:15], 27 | array( 28 | [ 29 | 3.14064291e00 - 0.53085796j, 30 | 6.71499124e00 - 2.02047795j, 31 | 1.06218919e01 - 4.91215366j, 32 | 1.40604378e01 - 8.88144555j, 33 | 1.56600743e01 - 13.2925649j, 34 | 1.52808636e01 - 17.26357445j, 35 | 1.29553371e01 - 20.19441487j, 36 | 9.56479043e00 - 21.35967801j, 37 | 5.76086019e00 - 20.39407074j, 38 | 2.35478080e00 - 17.25236853j, 39 | -1.39883911e-02 - 12.63099132j, 40 | -1.01307484e00 - 7.71542788j, 41 | -1.00735874e00 - 3.71449987j, 42 | -5.47782956e-01 - 1.24481265j, 43 | -1.63739470e-01 - 0.22820697j, 44 | ] 45 | ), 46 | ) 47 | assert_array_almost_equal( 48 | ab[0:15], 49 | array( 50 | [ 51 | 3.06854326 + 0.4396126j, 52 | 6.52836187 + 1.85223579j, 53 | 10.14250939 + 4.53484335j, 54 | 13.27104933 + 8.16295648j, 55 | 14.65282324 + 12.10370542j, 56 | 14.30283278 + 15.67072521j, 57 | 12.13984749 + 18.32533332j, 58 | 9.02885933 + 19.34952244j, 59 | 5.49933445 + 18.38815454j, 60 | 2.39313549 + 15.41172794j, 61 | 0.23240843 + 11.16952573j, 62 | -0.69430878 + 6.74812076j, 63 | -0.75349882 + 3.21552564j, 64 | -0.42710881 + 1.07407686j, 65 | -0.13625884 + 0.18990667j, 66 | ] 67 | ), 68 | ) 69 | assert_array_almost_equal( 70 | pbv, 71 | array( 72 | [ 73 | 23.002882564886164, 74 | 14.963158025030376, 75 | 11.46060060362683, 76 | 8.8047876198403294, 77 | 8.464718707735825, 78 | 6.7595928955003961, 79 | 3.9194229830412644, 80 | 3.4283223276191257, 81 | 2.2528330561384045, 82 | 1.174361182536527, 83 | 0.53260425403862111, 84 | 0.30138304540853789, 85 | 0.1893577453852136, 86 | 0.13685257356088598, 87 | ] 88 | ), 89 | ) 90 | 91 | 92 | def test_covar_simplified(): 93 | af, pf, ab, pb, pv = arcovar_marple(marple_data, 15) 94 | a2, e2 = arcovar(marple_data, 15) 95 | assert_array_almost_equal(af[0:15], a2) # af contains zeros after order=15 96 | 97 | 98 | def test_covar(): 99 | af, pf, ab, pb, pv = arcovar_marple(marple_data, 15) 100 | PSD = arma2psd(af) 101 | 102 | newpsd = cshift(PSD, len(PSD) // 2) # switch positive and negative freq 103 | 104 | 105 | def test_pcovar_plot(): 106 | p = pcovar([1, 2, 3, 4, 5, 6, 7, 8], 2) 107 | p.plot() 108 | assert len(p.psd) == 5 109 | p = pcovar([1, 2, 3, 4, 5, 6, 7, 8, 9], 2) 110 | p.plot() 111 | assert len(p.psd) == 5 112 | 113 | 114 | def test_pcovar(): 115 | p = pcovar(data_cosine(), 15, NFFT=4096, scale_by_freq=True) 116 | p() 117 | print(p) 118 | p = pcovar(marple_data, 15, NFFT=4096) 119 | p() 120 | print(p) 121 | print(p.get_converted_psd("centerdc")) 122 | 123 | 124 | def create_figure(): 125 | psd = test_pcovar() 126 | pylab.axis([-0.5, 0.5, -60, 0]) 127 | pylab.savefig("psd_covar.png") 128 | 129 | 130 | if __name__ == "__main__": 131 | create_figure() 132 | -------------------------------------------------------------------------------- /test/test_criteria.py: -------------------------------------------------------------------------------- 1 | from spectrum.criteria import * 2 | from spectrum import * 3 | 4 | 5 | 6 | def test_aic(): 7 | order = arange(1, 25) 8 | rho = [aryule(marple_data, i, norm='biased')[1] for i in order] 9 | AIC(len(marple_data), rho, order) 10 | 11 | AIC(len(marple_data), rho, order) 12 | KIC(len(marple_data), rho, order) 13 | AKICc(len(marple_data), rho, order) 14 | FPE(len(marple_data), rho, order) 15 | MDL(len(marple_data), rho, order) 16 | CAT(len(marple_data), rho, order) 17 | 18 | 19 | crit = Criteria("AIC", 20) 20 | try: 21 | crit = Criteria("dummy", 20) 22 | assert False 23 | except: 24 | assert True 25 | 26 | try: 27 | crit = Criteria("AIC", 20) 28 | crit.N = 0 29 | assert False 30 | except: 31 | assert True 32 | 33 | 34 | crit = Criteria("AIC", 20) 35 | crit.N = 40 36 | crit(10, 10) 37 | 38 | 39 | def test_mdl_eigen(): 40 | s = np.array([10, 8 ,6,4,2,1,0.9,0.8,.7,.6]) 41 | mdl_eigen(s, 4) 42 | 43 | -------------------------------------------------------------------------------- /test/test_datasets.py: -------------------------------------------------------------------------------- 1 | #from spectrum import datasets 2 | from spectrum.datasets import * 3 | 4 | def test_marple_data(): 5 | d = marple_data 6 | assert len(d) == 64 7 | 8 | 9 | def test_timeseries(): 10 | data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200) 11 | ts = TimeSeries(data, sampling=1) 12 | #assert ts.N == 1024 13 | #assert ts.sampling == 1024 14 | ts.plot() 15 | 16 | 17 | def test_data_cosine(): 18 | data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200) 19 | 20 | 21 | def test_datafile(): 22 | from spectrum.datasets import dolphin_filename 23 | try: 24 | spectrum_data("testdummy") 25 | assert False 26 | except: 27 | assert True 28 | -------------------------------------------------------------------------------- /test/test_eigen.py: -------------------------------------------------------------------------------- 1 | from spectrum import * 2 | import numpy 3 | #from spectrum import MINEIGVAL 4 | from numpy.testing import assert_almost_equal 5 | 6 | 7 | 8 | def test_mineigval(): 9 | tol = 1e-10 10 | T0=3 11 | T = numpy.array([-2+.5j, .7-1j],dtype=complex) 12 | eigval, eigvec = MINEIGVAL(T0 , T, tol) 13 | print('Eigenvalue=',eigval) 14 | print('Eigenvector=',eigvec) 15 | 16 | assert_almost_equal(eigval, .488694078106) 17 | 18 | expected_eigvec = numpy.array([ 0.13790622 -1.74155903e-02j , 0.21272177 -4.65701963e-18j, 0.13790622 +1.74155903e-02j]) 19 | 20 | # 21 | assert_almost_equal(eigvec, expected_eigvec) 22 | 23 | 24 | -------------------------------------------------------------------------------- /test/test_eigenfre.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy.testing import assert_array_almost_equal 3 | from pylab import linspace, log10, plot, savefig 4 | 5 | from spectrum import ( 6 | data_cosine, 7 | data_two_freqs, 8 | ev, 9 | marple_data, 10 | music, 11 | pev, 12 | pmusic, 13 | spectrum_set_level, 14 | ) 15 | from spectrum.eigenfre import eigen 16 | 17 | 18 | def test_pmusic(): 19 | p = pmusic(marple_data, 15, NSIG=11) 20 | p() 21 | p = pmusic(data_cosine(), 15, NSIG=11, verbose=True) 22 | p() 23 | print(p) 24 | 25 | # test verbosity of the _get_signal_space function 26 | spectrum_set_level("DEBUG") 27 | pmusic(data_two_freqs(), 15, threshold=1)() 28 | pmusic(data_two_freqs(), 15, NSIG=11, verbose=True)() 29 | pmusic(data_two_freqs(), 15, criteria="mdl")() 30 | 31 | # 32 | pmusic(data_two_freqs(), 15, NSIG=0)() 33 | 34 | 35 | def test_pev(): 36 | p = pev(marple_data, 15, NSIG=11) 37 | p() 38 | p = pev(data_cosine(), 15, NSIG=11, verbose=True) 39 | p() 40 | print(p) 41 | 42 | 43 | def test_eigen_constructor(): 44 | try: 45 | eigen(marple_data, 8, 4, method="dummy") 46 | assert False 47 | except: 48 | assert True 49 | 50 | # threshold and NSIG cannot be used together 51 | p = pmusic(marple_data, 15, NSIG=11, threshold=2) 52 | 53 | # NSIG must be less than IP and > 0 54 | try: 55 | p = pmusic(marple_data, 15, NSIG=110, threshold=2) 56 | assert False 57 | except: 58 | assert True 59 | p = pmusic(marple_data, 15, NSIG=-10) 60 | 61 | # NSIG and threshold cannot be provided together 62 | try: 63 | eigen(marple_data, 8, NSIG=4, threshold=2) 64 | assert False 65 | except: 66 | assert True 67 | 68 | # NSIG must be positive 69 | try: 70 | eigen(marple_data, 8, NSIG=-10) 71 | assert False 72 | except: 73 | assert True 74 | 75 | # NSIG must be less than P (8) 76 | try: 77 | eigen(marple_data, 8, NSIG=40) 78 | assert False 79 | except: 80 | assert True 81 | 82 | 83 | def test_eigenfre_music(): 84 | 85 | psd, s = music(marple_data, 15, NSIG=11) 86 | 87 | 88 | def test_eigenfre_ev(): 89 | psd, s = ev(marple_data, 15, NSIG=11) 90 | assert_array_almost_equal( 91 | s, 92 | numpy.array( 93 | [ 94 | 4.45510959e01, 95 | 1.01451096e01, 96 | 8.37309134e00, 97 | 7.17637043e00, 98 | 6.62545637e00, 99 | 5.83043837e00, 100 | 4.16284271e00, 101 | 3.69224764e00, 102 | 3.64345761e00, 103 | 3.07519938e00, 104 | 2.05618798e00, 105 | 1.53143913e00, 106 | 8.21242005e-01, 107 | 1.10463229e-01, 108 | 1.02225490e-02, 109 | ] 110 | ), 111 | ) 112 | 113 | 114 | def test_eigen_parameters(): 115 | psd, s = ev(data_cosine(), 15) 116 | psd, s = ev(data_cosine(), 15, NSIG=11) 117 | psd, s = ev(data_cosine(), 15, threshold=2) 118 | 119 | 120 | def create_figure(): 121 | psd = test_eigenfre_music() 122 | f = linspace(-0.5, 0.5, len(psd)) 123 | plot(f, 10 * log10(psd / max(psd)), "--", label="MUSIC 15") 124 | savefig("psd_eigenfre_music.png") 125 | 126 | psd = test_eigenfre_ev() 127 | f = linspace(-0.5, 0.5, len(psd)) 128 | plot(f, 10 * log10(psd / max(psd)), "--", label="EV 15") 129 | savefig("psd_eigenfre_ev.png") 130 | 131 | 132 | if __name__ == "__main__": 133 | create_figure() 134 | -------------------------------------------------------------------------------- /test/test_errors.py: -------------------------------------------------------------------------------- 1 | from spectrum.errors import * 2 | 3 | def test_is_positive(): 4 | assert True == is_positive_integer(1) 5 | 6 | try: 7 | is_positive_integer(-1) 8 | assert False 9 | except: 10 | assert True 11 | try: 12 | is_positive_integer(1.) 13 | assert False 14 | except: 15 | assert True 16 | 17 | def test_errors(): 18 | 19 | a = SpectrumError() 20 | print(a) 21 | 22 | a = SpectrumChoiceError("dummy", ['valid']) 23 | print(a) 24 | 25 | a = SpectrumPSDError() 26 | print(a) 27 | 28 | a = SpectrumModifiedError() 29 | print(a) 30 | 31 | a = SpectrumARMAError() 32 | print(a) 33 | 34 | a = SpectrumMAError() 35 | print(a) 36 | 37 | a = SpectrumARError() 38 | print(a) 39 | a = SpectrumOrder() 40 | print(a) 41 | a = SpectrumNFFT() 42 | print(a) 43 | 44 | 45 | try: 46 | raise SpectrumModifiedError 47 | assert False 48 | except: 49 | assert True 50 | try: 51 | raise SpectrumError 52 | assert False 53 | except: 54 | assert True 55 | -------------------------------------------------------------------------------- /test/test_io.py: -------------------------------------------------------------------------------- 1 | from spectrum.io import readwav 2 | from spectrum.datasets import dolphin_filename 3 | 4 | def test_readwav(): 5 | data, fs = readwav(dolphin_filename) 6 | assert fs == 22050 7 | -------------------------------------------------------------------------------- /test/test_levinson.py: -------------------------------------------------------------------------------- 1 | from spectrum import * 2 | from spectrum.levinson import * 3 | import numpy 4 | from numpy.testing import assert_array_almost_equal, assert_almost_equal 5 | 6 | 7 | #LEVINSON 8 | T0 = 3 9 | T = numpy.array([-2+0.5j, .7-1j]) 10 | 11 | 12 | expected_P = 1.3221 13 | expected_A = numpy.array([.86316+0.03158j, .34737+0.21053j]) 14 | 15 | 16 | 17 | 18 | def test_levinson(): 19 | 20 | data = [T0] 21 | data.extend(T) 22 | A, P,k = LEVINSON(data) 23 | 24 | #use only 4 digits to compare float P 25 | assert_almost_equal(P, expected_P, 4) 26 | 27 | #compare element by element of vector A 28 | for a1, a2 in zip(A, expected_A): 29 | assert_almost_equal(a1, a2, 3) 30 | 31 | def test_levinson_real(): 32 | r = [5.0000, -1.5450, -3.9547, 3.9331, 1.4681, -4.7500] 33 | A, E, K = LEVINSON(r) 34 | assert_array_almost_equal(A, numpy.array([ 6.14739427e-01, 9.89813712e-01, 4.20968656e-04, 35 | 3.44472001e-03, -7.70967347e-03])) 36 | assert_almost_equal(0.1791451516, E) 37 | 38 | 39 | A, E,k = LEVINSON([1, 0.5,0.3],1) 40 | for a1, a2 in zip(A, [-0.5]): 41 | assert_almost_equal(a1, a2) 42 | assert_almost_equal(E, 0.75) 43 | 44 | 45 | A, E, K = LEVINSON([1,0.5,0.1,0.05]) 46 | assert_array_almost_equal(A, (array([-0.625, 0.275, -0.125]))) 47 | assert_array_almost_equal(E, 0.708749) 48 | assert_array_almost_equal(K, array([-0.5 , 0.2 , -0.125])) 49 | 50 | def test_levinson_others(): 51 | rlevinson([1,2,3], 0.1) 52 | try: 53 | rlevinson([1], 0.1) 54 | assert False 55 | except: 56 | assert True 57 | 58 | # test imag data 59 | rlevinson([1,2,3+1j], 0.1) 60 | 61 | # test singularity 62 | try: 63 | LEVINSON([1,2,3]) 64 | assert False 65 | except: 66 | assert True 67 | LEVINSON([1,2,3], allow_singularity=True) 68 | -------------------------------------------------------------------------------- /test/test_linalg.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from spectrum import corrmtx, csvd, pascal 3 | from numpy.testing import assert_array_almost_equal 4 | 5 | def test_pascal(): 6 | res = pascal(6) 7 | 8 | assert list(res[5]) == list([ 1., 6., 21., 56., 126., 252.]) 9 | try: 10 | pascal(0) 11 | assert False 12 | except: 13 | assert True 14 | 15 | 16 | def test_svd(): 17 | a = numpy.array([[2.8-.4j, 1.6],[3.6-1.2j, 2.4-1.8j],[2+.4j, 2.8-4.4j]]) 18 | U, S, V = csvd(a) 19 | assert_array_almost_equal(S, numpy.array([ 7.51711296, 2.96867189])) 20 | 21 | csvd(a) 22 | 23 | 24 | def test_corrmtx(): 25 | C = corrmtx([1,2,3,4,5,6,7,8+1j], 2, method='modified') 26 | assert_array_almost_equal, C, numpy.array([[ 3.+0.j, 2.+0.j, 1.+0.j], 27 | [ 4.+0.j, 3.+0.j, 2.+0.j], 28 | [ 5.+0.j, 4.+0.j, 3.+0.j], 29 | [ 6.+0.j, 5.+0.j, 4.+0.j], 30 | [ 7.+0.j, 6.+0.j, 5.+0.j], 31 | [ 8.+1.j, 7.+0.j, 6.+0.j], 32 | [ 1.-0.j, 2.-0.j, 3.-0.j], 33 | [ 2.-0.j, 3.-0.j, 4.-0.j], 34 | [ 3.-0.j, 4.-0.j, 5.-0.j], 35 | [ 4.-0.j, 5.-0.j, 6.-0.j], 36 | [ 5.-0.j, 6.-0.j, 7.-0.j], 37 | [ 6.-0.j, 7.-0.j, 8.-1.j]]) 38 | 39 | C = corrmtx([1,2,3,4,5,6,7,8+1j], 3, method='autocorrelation') 40 | assert_array_almost_equal(C, numpy.array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], 41 | [ 2.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], 42 | [ 3.+0.j, 2.+0.j, 1.+0.j, 0.+0.j], 43 | [ 4.+0.j, 3.+0.j, 2.+0.j, 1.+0.j], 44 | [ 5.+0.j, 4.+0.j, 3.+0.j, 2.+0.j], 45 | [ 6.+0.j, 5.+0.j, 4.+0.j, 3.+0.j], 46 | [ 7.+0.j, 6.+0.j, 5.+0.j, 4.+0.j], 47 | [ 8.+1.j, 7.+0.j, 6.+0.j, 5.+0.j], 48 | [ 0.+0.j, 8.+1.j, 7.+0.j, 6.+0.j], 49 | [ 0.+0.j, 0.+0.j, 8.+1.j, 7.+0.j], 50 | [ 0.+0.j, 0.+0.j, 0.+0.j, 8.+1.j]])) 51 | 52 | 53 | C = corrmtx([1,2,3,4,5,6,7,8+1j], 3, method='covariance') 54 | assert_array_almost_equal(C, numpy.array([[ 4.+0.j, 3.+0.j, 2.+0.j, 1.+0.j], 55 | [ 5.+0.j, 4.+0.j, 3.+0.j, 2.+0.j], 56 | [ 6.+0.j, 5.+0.j, 4.+0.j, 3.+0.j], 57 | [ 7.+0.j, 6.+0.j, 5.+0.j, 4.+0.j], 58 | [ 8.+1.j, 7.+0.j, 6.+0.j, 5.+0.j]])) 59 | 60 | C = corrmtx([1,2,3,4,5,6,7,8+1j], 3, method='postwindowed') 61 | assert_array_almost_equal(C, numpy.array([[ 4.+0.j, 3.+0.j, 2.+0.j, 1.+0.j], 62 | [ 5.+0.j, 4.+0.j, 3.+0.j, 2.+0.j], 63 | [ 6.+0.j, 5.+0.j, 4.+0.j, 3.+0.j], 64 | [ 7.+0.j, 6.+0.j, 5.+0.j, 4.+0.j], 65 | [ 8.+1.j, 7.+0.j, 6.+0.j, 5.+0.j], 66 | [ 0.+0.j, 8.+1.j, 7.+0.j, 6.+0.j], 67 | [ 0.+0.j, 0.+0.j, 8.+1.j, 7.+0.j], 68 | [ 0.+0.j, 0.+0.j, 0.+0.j, 8.+1.j]])) 69 | 70 | C = corrmtx([1,2,3,4,5,6,7,8+1j], 3, method='prewindowed') 71 | assert_array_almost_equal(C, numpy.array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], 72 | [ 2.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], 73 | [ 3.+0.j, 2.+0.j, 1.+0.j, 0.+0.j], 74 | [ 4.+0.j, 3.+0.j, 2.+0.j, 1.+0.j], 75 | [ 5.+0.j, 4.+0.j, 3.+0.j, 2.+0.j], 76 | [ 6.+0.j, 5.+0.j, 4.+0.j, 3.+0.j], 77 | [ 7.+0.j, 6.+0.j, 5.+0.j, 4.+0.j], 78 | [ 8.+1.j, 7.+0.j, 6.+0.j, 5.+0.j]])) 79 | 80 | 81 | for method in ['prewindowed','postwindowed','autocorrelation','modified','covariance']: 82 | corrmtx([1,2,3,4,5,6,7,8], 3, method=method) 83 | 84 | 85 | try: 86 | corrmtx([1,2,3,4,5,6,7,8], 3, method='dummy') 87 | assert False 88 | except: 89 | assert True 90 | -------------------------------------------------------------------------------- /test/test_linear_prediction.py: -------------------------------------------------------------------------------- 1 | from spectrum.linear_prediction import * 2 | import numpy 3 | from numpy.testing import assert_array_almost_equal, assert_almost_equal 4 | 5 | ri = [5.0000, -1.5450, -3.9547, 3.9331, 1.4681, -4.7500] 6 | ki = [0.3090 , 0.9800 ,0.0031 , 0.0082 , -0.0082]; 7 | ai = [1, 0.6147, 0.9898 , 0.0004, 0.0034, -0.0077] 8 | 9 | def test_ac2poly(): 10 | a,efinal = ac2poly(ri) 11 | assert_array_almost_equal(a, ai, decimal=4) 12 | assert_almost_equal(efinal, 0.1791, decimal=4) 13 | 14 | 15 | def test_poly2ac(): 16 | a, efinal = ac2poly(ri) 17 | r = poly2ac(a, efinal) 18 | assert_array_almost_equal(a, ai, decimal=4) 19 | 20 | assert_array_almost_equal(numpy.real(poly2ac(a, efinal)), ri) 21 | assert sum(numpy.imag(poly2ac(a, efinal))) == 0 22 | 23 | 24 | 25 | def test_rc2poly(): 26 | a, e = rc2poly(ki, 1) 27 | assert_array_almost_equal(a , numpy.array([1.00000000e+00, 6.14816180e-01, 9.89881431e-01, 28 | 2.42604054e-05, 3.15795596e-03, -8.20000000e-03]), decimal=4) 29 | assert_almost_equal(e, 0.035813791455383194) 30 | 31 | 32 | def test_rc2ac(): 33 | R = rc2ac(ki, 1) 34 | assert_array_almost_equal(R, numpy.array([ 1.00000000+0.j, -0.30900000+0.j, -0.79094762-0.j, 0.78662653-0.j, 0.29362938-0.j, -0.95000010-0.j])) 35 | 36 | 37 | def test_ac2rc(): 38 | a, r0 = ac2rc(ri) 39 | assert_almost_equal(r0, 5) 40 | 41 | 42 | def test_poly2rc(): 43 | a, efinal = ac2poly(ri) 44 | k = poly2rc(numpy.insert(a, 0, 1), efinal) 45 | #assert_array_almost_equal(ki, k) 46 | #\numpy.array([ 0.309 , 0.97999158, 0.00302085, 0.00818465, -0.00770967])) 47 | 48 | 49 | def test_lar2rc(): 50 | lar2rc(.1) 51 | lar2rc([.1,.2]) 52 | 53 | def test_rc2lar(): 54 | rc2lar([0.5,0.9]) 55 | try: 56 | rc2lar([2]) 57 | assert False 58 | except: 59 | assert True 60 | 61 | def test_is2rc(): 62 | is2rc([1,2]) 63 | 64 | def test_rc2is(): 65 | #rc2is(0.5) 66 | rc2is([0.5,0.9]) 67 | try: 68 | rc2is([2]) 69 | assert False 70 | except: 71 | assert True 72 | 73 | 74 | def test_lsf2poly_poly2lsf(): 75 | lsf = [0.7842 , 1.5605 , 1.8776 , 1.8984, 2.3593] 76 | a = lsf2poly(lsf) 77 | assert_array_almost_equal(a, numpy.array([ 1.00000000e+00, 6.14837835e-01, 9.89884967e-01, 78 | 9.31594056e-05, 3.13713832e-03, -8.12002261e-03])) 79 | 80 | lsf2 = poly2lsf(a) 81 | assert_array_almost_equal(lsf, lsf2) 82 | 83 | -------------------------------------------------------------------------------- /test/test_lms.py: -------------------------------------------------------------------------------- 1 | #from spectrum import lms 2 | #from spectrum.lms import LMS 3 | 4 | 5 | 6 | def _test_lms(): 7 | U = 0.98 8 | x = [1,2,3,4,5] 9 | ar = [1,0.5,0.2] 10 | LMS(x, U, ar) 11 | -------------------------------------------------------------------------------- /test/test_lpc.py: -------------------------------------------------------------------------------- 1 | from spectrum import * 2 | import numpy 3 | from numpy.testing import assert_almost_equal, assert_array_almost_equal 4 | 5 | def test_lpc_real(): 6 | a, e = lpc([1,2,3,4,5,6,7,8], 3) 7 | assert_array_almost_equal(a, numpy.array([-0.88472690 -4.69340421e-17j, 0.01650407 +2.26160049e-17j, 0.07301958 +1.88056678e-17j])) 8 | assert_almost_equal(e, 9.2661256972730612) 9 | 10 | -------------------------------------------------------------------------------- /test/test_minvar.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import pylab 3 | from numpy.testing import assert_array_almost_equal 4 | 5 | from spectrum import * 6 | from spectrum import marple_data 7 | 8 | 9 | def test_minvar_values(): 10 | res = minvar(marple_data, 15, NFFT=16) 11 | assert_array_almost_equal( 12 | res[0], 13 | numpy.array( 14 | [ 15 | 9.34915901e-07, 16 | 9.86770648e-07, 17 | 1.38729656e-06, 18 | 1.74456323e-06, 19 | 4.68522681e-06, 20 | 8.93086143e-05, 21 | -4.43764511e-06, 22 | -3.07585044e-06, 23 | -3.05032082e-06, 24 | -3.91693034e-06, 25 | -5.80606841e-05, 26 | 6.18226003e-06, 27 | 1.83189816e-06, 28 | 1.51951850e-06, 29 | 9.63865479e-07, 30 | 1.00497139e-06, 31 | ] 32 | ), 33 | ) 34 | 35 | 36 | def test_minvar(): 37 | from spectrum import tools 38 | 39 | res = minvar(marple_data, 15, 1.0) 40 | psd = res[0] 41 | newpsd = tools.cshift(psd, len(psd) // 2) # switch positive and negative freq 42 | 43 | 44 | def test_pminvar(): 45 | psd = pminvar(marple_data, 15) 46 | psd() 47 | print(psd) 48 | psd = pminvar([1, 2, 3, 4, 5, 6, 7, -8], 2) 49 | psd() 50 | 51 | 52 | def create_figure(): 53 | res = test_minvar() 54 | psd = res[0] 55 | f = pylab.linspace(-0.5, 0.5, len(psd)) 56 | pylab.plot(f, 10 * pylab.log10(psd / max(psd)), label="minvar 15") 57 | pylab.savefig("psd_minvar.png") 58 | 59 | 60 | if __name__ == "__main__": 61 | create_figure() 62 | -------------------------------------------------------------------------------- /test/test_modcovar.py: -------------------------------------------------------------------------------- 1 | import pylab 2 | import pytest 3 | from numpy.testing import assert_almost_equal, assert_array_almost_equal 4 | 5 | from spectrum import * 6 | 7 | 8 | def test_modcovar_ip_null(): 9 | a, p, pv = modcovar_marple(marple_data, 0) 10 | assert_almost_equal(p, 1.780459894489305) 11 | assert len(a) == 0 12 | assert len(pv) == 0 13 | 14 | 15 | def test_modcovar(): 16 | a, p, pv = modcovar_marple(marple_data, 15) 17 | PSD = arma2psd(a, sides="centerdc") 18 | 19 | 20 | def test_covar_simplified(): 21 | a, b, c = modcovar_marple(marple_data, 15) 22 | a2, e2 = modcovar(marple_data, 15) 23 | assert pytest.approx(a[0:15]) == a2 # af contains zeros after order=15 24 | 25 | 26 | def test_pmodcovar2(): 27 | p = pmodcovar([1, 2, 3, 4, 5, 6, 7, 8], 2) 28 | p.plot() 29 | assert len(p.psd) == 5 30 | p = pmodcovar([1, 2, 3, 4, 5, 6, 7, 8, 9], 2) 31 | p.plot() 32 | assert len(p.psd) == 5 33 | 34 | 35 | def test_pmodcovar(): 36 | # test real data 37 | p = pmodcovar(data_cosine(), 15, scale_by_freq=True) 38 | p() 39 | print(p) 40 | 41 | # and complex data 42 | p = pmodcovar(marple_data, 15, NFFT=4096) 43 | p() 44 | 45 | # return psd for the create_figure function 46 | p.sides = "centerdc" 47 | 48 | 49 | def create_figure(): 50 | newpsd = test_modcovar() 51 | 52 | pylab.plot(pylab.linspace(-0.5, 0.5, 4096), 10 * pylab.log10(newpsd / max(newpsd))) 53 | pylab.axis([-0.5, 0.5, -60, 0]) 54 | pylab.savefig("psd_modcovar.png") 55 | 56 | 57 | if __name__ == "__main__": 58 | create_figure() 59 | -------------------------------------------------------------------------------- /test/test_mtm.py: -------------------------------------------------------------------------------- 1 | from spectrum import * 2 | from spectrum.mtm import dpss, pmtm 3 | from spectrum import data_cosine 4 | from spectrum import mtm 5 | 6 | def test_dpss(): 7 | dpss(64, 2.5, 4) 8 | 9 | 10 | def test_pmtm(): 11 | data = data_cosine(N=64, A=0.1, sampling=1024, freq=200) 12 | res = pmtm(data, 2.5, 4, show=False) 13 | res = pmtm(data, 2.5, show=False) 14 | 15 | 16 | 17 | res = pmtm(data, 2.5, show=False, method="eigen") 18 | res = pmtm(data, 2.5, show=False, method="unity") 19 | res = pmtm(data, 2.5, method="eigen", show=True) 20 | res = pmtm(data, 2.5, method="adapt", show=True) 21 | #res = pmtm(data, 2.5, show=False, method="eigen", show=True) 22 | 23 | # e and v must be provided together 24 | try: 25 | res = pmtm(data, 2.5, show=False, e=1, v=None) 26 | assert False 27 | except: 28 | assert True 29 | 30 | # provide v and e 31 | v,e = dpss(64,4,2) 32 | pmtm(marple_data, NW=4, k=2, v=v, e=e); 33 | 34 | 35 | try: 36 | pmtm(marple_data, NW=None, k=2); 37 | assert False 38 | except: 39 | assert True 40 | 41 | 42 | def test_fftconvolve(): 43 | from spectrum import mtm 44 | from pylab import randn 45 | mtm._fftconvolve(randn(128), randn(128),mode="full") 46 | mtm._fftconvolve(randn(128), randn(128), mode="same") 47 | mtm._fftconvolve(randn(128), randn(128), mode="valid") 48 | 49 | 50 | def test_crosscorr(): 51 | from spectrum import mtm 52 | from pylab import randn 53 | mtm._crosscorr(randn(128), randn(128), all_lags=True) 54 | mtm._crosscorr(randn(128), randn(128), all_lags=False) 55 | 56 | 57 | def test_mtm(): 58 | mtm._other_dpss_method(64,4,10) 59 | 60 | 61 | def test_Multitapering(): 62 | p = MultiTapering(data_two_freqs(), 4,2) 63 | p() 64 | p = MultiTapering(marple_data, 4,2, method="eigen") 65 | p() 66 | print(p) 67 | p._str_title() 68 | -------------------------------------------------------------------------------- /test/test_periodogram.py: -------------------------------------------------------------------------------- 1 | from pylab import linspace, log10, plot, randn, savefig, ylim 2 | 3 | from spectrum import * 4 | from spectrum.periodogram import * 5 | 6 | 7 | def test_Periodogram(): 8 | p = Periodogram(marple_data) 9 | p() 10 | p.plot() 11 | print(p) 12 | 13 | 14 | def test_periodogram(): 15 | """check that rho is correct (appendix 10.A )and reproduce figure 10.2""" 16 | psd = speriodogram(marple_data) 17 | 18 | 19 | def test_daniell_periodogram(): 20 | """check that rho is correct (appendix 10.A )and reproduce figure 10.2""" 21 | psd = DaniellPeriodogram(datasets.data_cosine(N=1024), 8, NFFT=1024) 22 | psd = DaniellPeriodogram(marple_data, 8, NFFT=1024) 23 | psd = DaniellPeriodogram(data_two_freqs(), 8, NFFT=1024) 24 | p = pdaniell(data_two_freqs(), 8, NFFT=1024) 25 | p() 26 | print(p) 27 | p._str_title() 28 | 29 | 30 | def test_speriodogram_2d(): 31 | data = randn(1024, 2) 32 | speriodogram(data) 33 | 34 | data = np.array([marple_data, marple_data]).reshape(64, 2) 35 | speriodogram(data) 36 | 37 | 38 | def test_welch(): 39 | WelchPeriodogram(data_two_freqs()) 40 | 41 | 42 | def test_Periodogram(): 43 | p = Periodogram(data_two_freqs()) 44 | p.plot() 45 | p = Periodogram(data_two_freqs(), scale_by_freq=True) 46 | p.plot() 47 | print(p) 48 | p._str_title() 49 | 50 | 51 | def test_periodogram_real_vs_octave(): 52 | # the periodogram is tested against the octave output that is "identical" 53 | # for the following real example 54 | import numpy as np 55 | 56 | PSDs = [] 57 | for this in range(100): 58 | xx = data_two_freqs() 59 | p = Periodogram(xx, 4, window="hanning") 60 | p() 61 | PSDs.append(p.psd) 62 | M = 10 * log10(np.mean(PSDs, axis=0)) 63 | assert max(M) > 10 # 10.939020375396096 64 | 65 | assert np.mean(M[M < -35]) > -50 66 | assert np.mean(M[M < -35]) < -40 67 | 68 | 69 | def create_figure(): 70 | psd = test_periodogram() 71 | ylim([-50, 0]) 72 | savefig("psd_periodogram.png") 73 | 74 | 75 | if __name__ == "__main__": 76 | create_figure() 77 | -------------------------------------------------------------------------------- /test/test_spectrum.py: -------------------------------------------------------------------------------- 1 | import spectrum 2 | from spectrum import * 3 | 4 | 5 | def test_spectrum(): 6 | assert spectrum.default_NFFT 7 | spectrum_set_level("DEBUG") 8 | -------------------------------------------------------------------------------- /test/test_spetrogram.py: -------------------------------------------------------------------------------- 1 | 2 | from spectrum import Spectrogram, dolphin_filename, readwav 3 | 4 | 5 | def test_spectrogram(): 6 | 7 | data, samplerate = readwav(dolphin_filename) 8 | p = Spectrogram(data, ws=128, W=4096, sampling=samplerate) 9 | p.periodogram() 10 | p.plot() 11 | 12 | -------------------------------------------------------------------------------- /test/test_toeplitz.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import pytest 3 | 4 | from spectrum import toeplitz 5 | from spectrum.toeplitz import HERMTOEP, TOEPLITZ 6 | 7 | 8 | def test_toeplitz(): 9 | T0 = 3 + 0j 10 | TC = numpy.array([-2.0 + 0.5j, 0.7 - 1j]) 11 | TR = numpy.array([-0.2 - 0.4j, 0.3 - 0.6j]) 12 | Z = numpy.array([1.0 + 3.0j, 2.0 - 1.0j, 0.5 + 0.8j]) 13 | X = TOEPLITZ(T0, TC, TR, Z) 14 | expected_X = numpy.array([0.23518549 + 1.24372203j, 1.03025807 + 0.53575117j, 0.47334662 + 0.24031779j]) 15 | assert X == pytest.approx(expected_X) 16 | 17 | 18 | def test_hermtoep(): 19 | T0 = 3.0 20 | T = numpy.array([-2.0 + 0.5j, 0.7 - 1j]) 21 | Z = numpy.array([1.0 + 3.0j, 2.0 - 1.0j, 0.5 + 0.8j]) 22 | X = HERMTOEP(T0, T, Z) 23 | expected_X = numpy.array([2.29697452 + 1.69904459j, 3.31847134 + 1.29697452j, 1.49283439 + 0.94745223j]) 24 | assert X == pytest.approx(expected_X) 25 | -------------------------------------------------------------------------------- /test/test_tools.py: -------------------------------------------------------------------------------- 1 | from numpy.testing import assert_almost_equal, assert_array_almost_equal 2 | from numpy import * 3 | from spectrum.tools import * 4 | 5 | 6 | def test_fftshift(): 7 | fftshift([100,2,3,4,5]) 8 | 9 | def test_nextpow2(): 10 | res = nextpow2([255,256,257]) 11 | assert res[0] == 8. 12 | assert res[1] == 8. 13 | assert res[2] == 9. 14 | 15 | def test_swapsides(): 16 | from spectrum.tools import _swapsides 17 | assert all(_swapsides([-2,-1,1,2]) == array([2,-2,-1])) 18 | 19 | def test_twosided_zero(): 20 | from spectrum.tools import _twosided_zerolag 21 | assert all(_twosided_zerolag([3,2,1],4) == array([1,2,3,4,3,2,1])) 22 | 23 | def test_pow2db_db2pow(): 24 | p1 = 10. 25 | x = pow2db(p1) 26 | p2 = db2pow(x) 27 | assert_almost_equal(p1, p2) 28 | 29 | def test_mag2db_db2mag(): 30 | p1 = 10. 31 | x = mag2db(p1) 32 | p2 = db2mag(x) 33 | assert_almost_equal(p1, p2) 34 | 35 | def test_cshift(): 36 | a = [1,2,3,4] 37 | b = cshift(a, 2) 38 | assert_array_almost_equal([3,4,1,2], b) 39 | b = cshift(a, 2.) 40 | assert_array_almost_equal([3,4,1,2], b) 41 | 42 | 43 | def _test_twosided_zerolag(): 44 | data = [3,2,1] 45 | zerolag = 4 46 | res = twosided_zerolag(data, zerolag) 47 | assert_array_almost_equal(array([1, 2, 3, 4, 3, 2, 1]), res) 48 | 49 | def test_twosided(): 50 | a = [1,2,3] 51 | b = twosided(a) 52 | assert_array_almost_equal(b, array([3, 2, 1, 2, 3])) 53 | 54 | def _test_swap_sides(): 55 | x = [-2, -1, 1, 2] 56 | b = swapsides(x) 57 | assert_array_almost_equal(b, array([2, -2, -1])) 58 | 59 | def _test_fftshift(): 60 | x = [1,2,3,4,5] 61 | y = fftshift([1,2,3,5,4]) 62 | assert_array_almost_equal(y, array([5, 4, 1, 2, 3])) 63 | 64 | 65 | def test_onesided_twosided(): 66 | x = [10, 2, 3, 4, 6] 67 | y = onesided_2_twosided(x) 68 | x2 = twosided_2_onesided(y) 69 | assert_array_almost_equal(x, x2) 70 | 71 | def test_centeddc_twosided(): 72 | x = [10, 2, 3, 4, 4, 3, 2, 20] 73 | y = centerdc_2_twosided(x) 74 | x2 = twosided_2_centerdc(y) 75 | assert_array_almost_equal(x, x2) 76 | -------------------------------------------------------------------------------- /test/test_transfer.py: -------------------------------------------------------------------------------- 1 | from spectrum import transfer 2 | from numpy.testing import assert_almost_equal 3 | import numpy as np 4 | 5 | def test_tf2zpk(): 6 | from scipy import signal 7 | [b,a] = signal.butter(3,.4); 8 | z,p,k = transfer.tf2zpk(b,a) 9 | assert_almost_equal(k, 0.09853, 4) 10 | assert_almost_equal(z[0], -1.00000293 +5.07009147e-06j, 4) 11 | assert_almost_equal(z[1], -1.00000293 -5.07009147e-06j, 4) 12 | assert_almost_equal(z[2], -0.99999415 +0.00000000e+00j, 4) 13 | assert_almost_equal(p[0], 0.20942804+0.55819948j) 14 | assert_almost_equal(p[1], 0.20942804-0.55819948j) 15 | assert_almost_equal(p[2], 0.15838444+0.j) 16 | 17 | 18 | def test_tf2zp(): 19 | b = [2, 3, 0] 20 | a = [1, 0.4, 1] 21 | [z,p,k] = transfer.tf2zp(b,a) 22 | assert all(z == np.array([-1.5,0])) 23 | assert k == 2 24 | assert_almost_equal(p[0], -0.2000 + 0.9798j,4) 25 | assert_almost_equal(p[1], -0.2000 - 0.9798j,4) 26 | 27 | transfer.zp2tf(z,p,k) 28 | 29 | def test_eqtlength(): 30 | a, b = transfer.eqtflength([1,2,3,4], np.array([1,2])) 31 | assert all(b == np.array([1,2,0,0])) 32 | a, b = transfer.eqtflength([1,2,3,4], [1,2]) 33 | assert b == [1,2,0,0] 34 | a, b = transfer.eqtflength(np.array([1,2]), [1,2,3,4]) 35 | assert all(a == np.array([1,2,0,0])) 36 | a, b = transfer.eqtflength([1,2], [1,2,3,4]) 37 | assert a == [1,2,0,0] 38 | a, b = transfer.eqtflength([1,2], [1,2]) 39 | assert a == b 40 | 41 | 42 | def test_latc2tf(): 43 | try: 44 | transfer.latc2tf() 45 | assert False 46 | except NotImplementedError: 47 | assert True 48 | except: 49 | assert False 50 | 51 | 52 | def test_latcfilt(): 53 | try: 54 | transfer.latcfilt() 55 | assert False 56 | except NotImplementedError: 57 | assert True 58 | except: 59 | assert False 60 | 61 | 62 | def test_tf2ss(): 63 | try: 64 | transfer.tf2ss() 65 | assert False 66 | except NotImplementedError: 67 | assert True 68 | except: 69 | assert False 70 | 71 | 72 | def test_tf2sos(): 73 | try: 74 | transfer.tf2sos() 75 | assert False 76 | except NotImplementedError: 77 | assert True 78 | except: 79 | assert False 80 | 81 | 82 | def test_ss2zpk(): 83 | z,p,k = [1.5,1], [-0.2,1], 2. 84 | 85 | zp,pp,kp = transfer.ss2zpk(*transfer.zpk2ss(z,p,k)) 86 | #assert zp == z 87 | # FIXME: this fails 88 | #assert pp == p 89 | assert k == kp 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /test/test_waveform.py: -------------------------------------------------------------------------------- 1 | from spectrum import * 2 | from numpy import linspace 3 | 4 | 5 | def test_morlet(): 6 | x = morlet(0,1,100) 7 | try: 8 | x = morlet(0,1,-100) 9 | assert False 10 | except: 11 | assert True 12 | 13 | 14 | def test_chirp(): 15 | x = chirp(linspace(0,1,1000)) 16 | x = chirp(linspace(0,1,1000), form='linear') 17 | x = chirp(linspace(0,1,1000), form='quadratic') 18 | x = chirp(linspace(0,1,1000), form='logarithmic') 19 | try: 20 | x = chirp(linspace(0,1,1000), form='dummy') 21 | assert False 22 | except: 23 | assert True 24 | 25 | 26 | def test_mexican(): 27 | mexican(0,1,10) 28 | try: 29 | mexican(0,1,0) 30 | assert False 31 | except: 32 | assert True 33 | 34 | def test_meyeraux(): 35 | meyeraux(10) 36 | -------------------------------------------------------------------------------- /test/test_yulewalker.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from numpy.testing import assert_almost_equal 3 | from pylab import axis, linspace, log10, plot, savefig 4 | 5 | from spectrum import * 6 | 7 | 8 | # do not change. used to create figure 9 | def test_yule(): 10 | ar, P, c = aryule(marple_data, 15, norm="biased") 11 | psd = arma2psd(ar) 12 | 13 | 14 | # test functional checked versus octave 15 | def test_yule_data(): 16 | ar, v, c = aryule([1, -1, 1, 1, 1], 2, norm="biased") 17 | assert ar[0] == pytest.approx(0.0 + 0.0j) 18 | assert ar[1] == pytest.approx(-0.2 + 0.0j) 19 | assert v == pytest.approx(0.95999999999999996) 20 | assert c[0] == pytest.approx(0.0 + 0.0j) 21 | assert c[1] == pytest.approx(-0.2 + 0.0j) 22 | 23 | 24 | def test_pyule(): 25 | p = pyule(marple_data, 15) 26 | p() 27 | p.plot() 28 | print(p) 29 | 30 | 31 | # do not change. used to create figure 32 | def create_figure(): 33 | psd = test_yule() 34 | psd = cshift(psd, len(psd) / 2) # switch positive and negative freq 35 | 36 | plot(linspace(-0.5, 0.5, 4096), 10 * log10(psd / max(psd))) 37 | axis([-0.5, 0.5, -60, 0]) 38 | savefig("psd_yulewalker.png") 39 | 40 | 41 | if __name__ == "__main__": 42 | create_figure() 43 | --------------------------------------------------------------------------------