├── .github └── workflows │ ├── publish-to-test-pypi.yml │ ├── pytest.yml │ └── sphinx.yml ├── .gitignore ├── .nojekyll ├── .travis.yml ├── LICENCE ├── README.md ├── bispy ├── __init__.py ├── filters.py ├── qfft.py ├── signals.py ├── spectral.py ├── timefrequency.py └── utils.py ├── doc-requirements.txt ├── docs ├── .DS_Store ├── Makefile ├── conf.py ├── index.rst ├── make.bat ├── reference.filters.rst ├── reference.qfft.rst ├── reference.rst ├── reference.signals.rst ├── reference.spectral.rst ├── reference.timefrequency.rst ├── reference.utils.rst ├── requirements.txt └── tutorials │ ├── index.rst │ ├── spectralanalysis.rst │ ├── spectralanalysisTutorial_files │ ├── output_17_1.png │ ├── output_18_1.png │ ├── output_25_0.png │ ├── output_31_1.png │ ├── output_32_1.png │ ├── output_39_2.png │ ├── output_42_2.png │ ├── output_45_2.png │ └── output_9_0.png │ ├── timefrequency.rst │ └── timefrequencyTutorial_files │ ├── output_11_0.png │ ├── output_17_0.png │ ├── output_21_0.png │ ├── output_31_0.png │ ├── output_37_0.png │ └── output_41_0.png ├── notebooks ├── spectralanalysisTutorial.ipynb └── timefrequencyTutorial.ipynb ├── requirements.txt ├── scripts └── install.sh ├── setup.py └── tests ├── .DS_Store ├── test_plots.py └── test_timefrequency.py /.github/workflows/publish-to-test-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI 2 | 3 | on: push 4 | 5 | jobs: 6 | build-n-publish: 7 | name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v3 11 | - name: Set up Python 12 | uses: actions/setup-python@v4 13 | with: 14 | python-version: "3.x" 15 | 16 | - name: Install pypa/build 17 | run: >- 18 | python3 -m 19 | pip install 20 | build 21 | --user 22 | - name: Build a binary wheel and a source tarball 23 | run: >- 24 | python3 -m 25 | build 26 | --sdist 27 | --wheel 28 | --outdir dist/ 29 | . 30 | - name: Publish distribution 📦 to Test PyPI 31 | if: startsWith(github.ref, 'refs/tags') 32 | uses: pypa/gh-action-pypi-publish@release/v1 33 | with: 34 | password: ${{ secrets.TEST_PYPI_API_TOKEN }} 35 | repository-url: https://test.pypi.org/legacy/ 36 | 37 | - name: Publish distribution 📦 to PyPI 38 | if: startsWith(github.ref, 'refs/tags') 39 | uses: pypa/gh-action-pypi-publish@release/v1 40 | with: 41 | password: ${{ secrets.PYPI_API_TOKEN }} 42 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: Pytest 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] 12 | 13 | steps: 14 | - uses: actions/checkout@v3 15 | - name: Set up Python ${{ matrix.python-version }} 16 | uses: actions/setup-python@v4 17 | with: 18 | python-version: ${{ matrix.python-version }} 19 | - name: Install dependencies 20 | run: | 21 | python -m pip install --upgrade pip 22 | pip install ruff pytest 23 | pip install -e . 24 | # - name: Lint with ruff 25 | # run: | 26 | # # stop the build if there are Python syntax errors or undefined names 27 | # ruff --format=github --select=E9,F63,F7,F82 --target-version=py37 . 28 | # # default set of ruff rules with GitHub Annotations 29 | # ruff --format=github --target-version=py37 . 30 | - name: Test with pytest 31 | run: | 32 | pytest tests/ 33 | -------------------------------------------------------------------------------- /.github/workflows/sphinx.yml: -------------------------------------------------------------------------------- 1 | name: Docs 2 | on: 3 | push: 4 | branches: 5 | - main 6 | paths: 7 | - "src/**" 8 | - "docs/**" 9 | - "!**.md" 10 | - "!.github/workflows/ci.yml" 11 | pull_request: 12 | paths-ignore: 13 | - "**.md" 14 | 15 | # Allows you to run this workflow manually from the Actions tab 16 | workflow_dispatch: 17 | permissions: 18 | contents: write 19 | jobs: 20 | docs: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v3 24 | - uses: actions/setup-python@v3 25 | - name: Install dependencies 26 | run: | 27 | pip install numpydoc m2r2 28 | pip install -e . 29 | pip install sphinx sphinx_rtd_theme 30 | 31 | - name: Build docs with sphinx 32 | run: sphinx-build -b html docs docs/_build/html 33 | 34 | - name: Deploy documentation to gh-pages branch 35 | uses: s0/git-publish-subdir-action@develop 36 | env: 37 | REPO: self 38 | BRANCH: gh-pages 39 | FOLDER: docs/_build/html 40 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | *.pyc 3 | 4 | # Vim ignoring swap files 5 | 6 | *~ 7 | *.swp 8 | *.swo 9 | 10 | # doc files 11 | /docs/_build/ 12 | /docs/generated/ 13 | 14 | # notebooks 15 | *.ipynb_checkpoints 16 | /notebooks/datasets/ 17 | 18 | # misc 19 | /bispy.egg-info 20 | -------------------------------------------------------------------------------- /.nojekyll: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # The configurations you want to execute 2 | # That is, a combination of operating system and python version 3 | # Please add or remove for your needs. 4 | # Check all possible values for python in here: https://docs.travis-ci.com/user/languages/python/ 5 | # See how to further configure the matrix of combinations in here: https://docs.travis-ci.com/user/multi-os/ 6 | language: python 7 | 8 | cache: pip 9 | 10 | # Uncomment if you need super powers (e.g., for apt-get) 11 | # sudo: require 12 | 13 | # To use other python versions in osx, set the PYTHON variable with the version supported by pyenv. 14 | # Check available versions with `pyenv install --list'. 15 | matrix: 16 | include: 17 | - os: linux 18 | python: 3.5 19 | 20 | # Command to install dependencies for each configuration. 21 | install: 22 | # For OSX, you may want to call a script installing virtual env and so on. 23 | - ./scripts/install.sh 24 | - pip install -U pytest 25 | - CI=false pip install . 26 | 27 | # Command to run tests. These are run for each configuration 28 | script: 29 | - python --version 30 | - pytest 31 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | Attribution 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More_considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution 4.0 International Public License 58 | 59 | By exercising the Licensed Rights (defined below), You accept and agree 60 | to be bound by the terms and conditions of this Creative Commons 61 | Attribution 4.0 International Public License ("Public License"). To the 62 | extent this Public License may be interpreted as a contract, You are 63 | granted the Licensed Rights in consideration of Your acceptance of 64 | these terms and conditions, and the Licensor grants You such rights in 65 | consideration of benefits the Licensor receives from making the 66 | Licensed Material available under these terms and conditions. 67 | 68 | 69 | Section 1 -- Definitions. 70 | 71 | a. Adapted Material means material subject to Copyright and Similar 72 | Rights that is derived from or based upon the Licensed Material 73 | and in which the Licensed Material is translated, altered, 74 | arranged, transformed, or otherwise modified in a manner requiring 75 | permission under the Copyright and Similar Rights held by the 76 | Licensor. For purposes of this Public License, where the Licensed 77 | Material is a musical work, performance, or sound recording, 78 | Adapted Material is always produced where the Licensed Material is 79 | synched in timed relation with a moving image. 80 | 81 | b. Adapter's License means the license You apply to Your Copyright 82 | and Similar Rights in Your contributions to Adapted Material in 83 | accordance with the terms and conditions of this Public License. 84 | 85 | c. Copyright and Similar Rights means copyright and/or similar rights 86 | closely related to copyright including, without limitation, 87 | performance, broadcast, sound recording, and Sui Generis Database 88 | Rights, without regard to how the rights are labeled or 89 | categorized. For purposes of this Public License, the rights 90 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 91 | Rights. 92 | 93 | d. Effective Technological Measures means those measures that, in the 94 | absence of proper authority, may not be circumvented under laws 95 | fulfilling obligations under Article 11 of the WIPO Copyright 96 | Treaty adopted on December 20, 1996, and/or similar international 97 | agreements. 98 | 99 | e. Exceptions and Limitations means fair use, fair dealing, and/or 100 | any other exception or limitation to Copyright and Similar Rights 101 | that applies to Your use of the Licensed Material. 102 | 103 | f. Licensed Material means the artistic or literary work, database, 104 | or other material to which the Licensor applied this Public 105 | License. 106 | 107 | g. Licensed Rights means the rights granted to You subject to the 108 | terms and conditions of this Public License, which are limited to 109 | all Copyright and Similar Rights that apply to Your use of the 110 | Licensed Material and that the Licensor has authority to license. 111 | 112 | h. Licensor means the individual(s) or entity(ies) granting rights 113 | under this Public License. 114 | 115 | i. Share means to provide material to the public by any means or 116 | process that requires permission under the Licensed Rights, such 117 | as reproduction, public display, public performance, distribution, 118 | dissemination, communication, or importation, and to make material 119 | available to the public including in ways that members of the 120 | public may access the material from a place and at a time 121 | individually chosen by them. 122 | 123 | j. Sui Generis Database Rights means rights other than copyright 124 | resulting from Directive 96/9/EC of the European Parliament and of 125 | the Council of 11 March 1996 on the legal protection of databases, 126 | as amended and/or succeeded, as well as other essentially 127 | equivalent rights anywhere in the world. 128 | 129 | k. You means the individual or entity exercising the Licensed Rights 130 | under this Public License. Your has a corresponding meaning. 131 | 132 | 133 | Section 2 -- Scope. 134 | 135 | a. License grant. 136 | 137 | 1. Subject to the terms and conditions of this Public License, 138 | the Licensor hereby grants You a worldwide, royalty-free, 139 | non-sublicensable, non-exclusive, irrevocable license to 140 | exercise the Licensed Rights in the Licensed Material to: 141 | 142 | a. reproduce and Share the Licensed Material, in whole or 143 | in part; and 144 | 145 | b. produce, reproduce, and Share Adapted Material. 146 | 147 | 2. Exceptions and Limitations. For the avoidance of doubt, where 148 | Exceptions and Limitations apply to Your use, this Public 149 | License does not apply, and You do not need to comply with 150 | its terms and conditions. 151 | 152 | 3. Term. The term of this Public License is specified in Section 153 | 6(a). 154 | 155 | 4. Media and formats; technical modifications allowed. The 156 | Licensor authorizes You to exercise the Licensed Rights in 157 | all media and formats whether now known or hereafter created, 158 | and to make technical modifications necessary to do so. The 159 | Licensor waives and/or agrees not to assert any right or 160 | authority to forbid You from making technical modifications 161 | necessary to exercise the Licensed Rights, including 162 | technical modifications necessary to circumvent Effective 163 | Technological Measures. For purposes of this Public License, 164 | simply making modifications authorized by this Section 2(a) 165 | (4) never produces Adapted Material. 166 | 167 | 5. Downstream recipients. 168 | 169 | a. Offer from the Licensor -- Licensed Material. Every 170 | recipient of the Licensed Material automatically 171 | receives an offer from the Licensor to exercise the 172 | Licensed Rights under the terms and conditions of this 173 | Public License. 174 | 175 | b. No downstream restrictions. You may not offer or impose 176 | any additional or different terms or conditions on, or 177 | apply any Effective Technological Measures to, the 178 | Licensed Material if doing so restricts exercise of the 179 | Licensed Rights by any recipient of the Licensed 180 | Material. 181 | 182 | 6. No endorsement. Nothing in this Public License constitutes or 183 | may be construed as permission to assert or imply that You 184 | are, or that Your use of the Licensed Material is, connected 185 | with, or sponsored, endorsed, or granted official status by, 186 | the Licensor or others designated to receive attribution as 187 | provided in Section 3(a)(1)(A)(i). 188 | 189 | b. Other rights. 190 | 191 | 1. Moral rights, such as the right of integrity, are not 192 | licensed under this Public License, nor are publicity, 193 | privacy, and/or other similar personality rights; however, to 194 | the extent possible, the Licensor waives and/or agrees not to 195 | assert any such rights held by the Licensor to the limited 196 | extent necessary to allow You to exercise the Licensed 197 | Rights, but not otherwise. 198 | 199 | 2. Patent and trademark rights are not licensed under this 200 | Public License. 201 | 202 | 3. To the extent possible, the Licensor waives any right to 203 | collect royalties from You for the exercise of the Licensed 204 | Rights, whether directly or through a collecting society 205 | under any voluntary or waivable statutory or compulsory 206 | licensing scheme. In all other cases the Licensor expressly 207 | reserves any right to collect such royalties. 208 | 209 | 210 | Section 3 -- License Conditions. 211 | 212 | Your exercise of the Licensed Rights is expressly made subject to the 213 | following conditions. 214 | 215 | a. Attribution. 216 | 217 | 1. If You Share the Licensed Material (including in modified 218 | form), You must: 219 | 220 | a. retain the following if it is supplied by the Licensor 221 | with the Licensed Material: 222 | 223 | i. identification of the creator(s) of the Licensed 224 | Material and any others designated to receive 225 | attribution, in any reasonable manner requested by 226 | the Licensor (including by pseudonym if 227 | designated); 228 | 229 | ii. a copyright notice; 230 | 231 | iii. a notice that refers to this Public License; 232 | 233 | iv. a notice that refers to the disclaimer of 234 | warranties; 235 | 236 | v. a URI or hyperlink to the Licensed Material to the 237 | extent reasonably practicable; 238 | 239 | b. indicate if You modified the Licensed Material and 240 | retain an indication of any previous modifications; and 241 | 242 | c. indicate the Licensed Material is licensed under this 243 | Public License, and include the text of, or the URI or 244 | hyperlink to, this Public License. 245 | 246 | 2. You may satisfy the conditions in Section 3(a)(1) in any 247 | reasonable manner based on the medium, means, and context in 248 | which You Share the Licensed Material. For example, it may be 249 | reasonable to satisfy the conditions by providing a URI or 250 | hyperlink to a resource that includes the required 251 | information. 252 | 253 | 3. If requested by the Licensor, You must remove any of the 254 | information required by Section 3(a)(1)(A) to the extent 255 | reasonably practicable. 256 | 257 | 4. If You Share Adapted Material You produce, the Adapter's 258 | License You apply must not prevent recipients of the Adapted 259 | Material from complying with this Public License. 260 | 261 | 262 | Section 4 -- Sui Generis Database Rights. 263 | 264 | Where the Licensed Rights include Sui Generis Database Rights that 265 | apply to Your use of the Licensed Material: 266 | 267 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 268 | to extract, reuse, reproduce, and Share all or a substantial 269 | portion of the contents of the database; 270 | 271 | b. if You include all or a substantial portion of the database 272 | contents in a database in which You have Sui Generis Database 273 | Rights, then the database in which You have Sui Generis Database 274 | Rights (but not its individual contents) is Adapted Material; and 275 | 276 | c. You must comply with the conditions in Section 3(a) if You Share 277 | all or a substantial portion of the contents of the database. 278 | 279 | For the avoidance of doubt, this Section 4 supplements and does not 280 | replace Your obligations under this Public License where the Licensed 281 | Rights include other Copyright and Similar Rights. 282 | 283 | 284 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 285 | 286 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 287 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 288 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 289 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 290 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 291 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 292 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 293 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 294 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 295 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 296 | 297 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 298 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 299 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 300 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 301 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 302 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 303 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 304 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 305 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 306 | 307 | c. The disclaimer of warranties and limitation of liability provided 308 | above shall be interpreted in a manner that, to the extent 309 | possible, most closely approximates an absolute disclaimer and 310 | waiver of all liability. 311 | 312 | 313 | Section 6 -- Term and Termination. 314 | 315 | a. This Public License applies for the term of the Copyright and 316 | Similar Rights licensed here. However, if You fail to comply with 317 | this Public License, then Your rights under this Public License 318 | terminate automatically. 319 | 320 | b. Where Your right to use the Licensed Material has terminated under 321 | Section 6(a), it reinstates: 322 | 323 | 1. automatically as of the date the violation is cured, provided 324 | it is cured within 30 days of Your discovery of the 325 | violation; or 326 | 327 | 2. upon express reinstatement by the Licensor. 328 | 329 | For the avoidance of doubt, this Section 6(b) does not affect any 330 | right the Licensor may have to seek remedies for Your violations 331 | of this Public License. 332 | 333 | c. For the avoidance of doubt, the Licensor may also offer the 334 | Licensed Material under separate terms or conditions or stop 335 | distributing the Licensed Material at any time; however, doing so 336 | will not terminate this Public License. 337 | 338 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 339 | License. 340 | 341 | 342 | Section 7 -- Other Terms and Conditions. 343 | 344 | a. The Licensor shall not be bound by any additional or different 345 | terms or conditions communicated by You unless expressly agreed. 346 | 347 | b. Any arrangements, understandings, or agreements regarding the 348 | Licensed Material not stated herein are separate from and 349 | independent of the terms and conditions of this Public License. 350 | 351 | 352 | Section 8 -- Interpretation. 353 | 354 | a. For the avoidance of doubt, this Public License does not, and 355 | shall not be interpreted to, reduce, limit, restrict, or impose 356 | conditions on any use of the Licensed Material that could lawfully 357 | be made without permission under this Public License. 358 | 359 | b. To the extent possible, if any provision of this Public License is 360 | deemed unenforceable, it shall be automatically reformed to the 361 | minimum extent necessary to make it enforceable. If the provision 362 | cannot be reformed, it shall be severed from this Public License 363 | without affecting the enforceability of the remaining terms and 364 | conditions. 365 | 366 | c. No term or condition of this Public License will be waived and no 367 | failure to comply consented to unless expressly agreed to by the 368 | Licensor. 369 | 370 | d. Nothing in this Public License constitutes or may be interpreted 371 | as a limitation upon, or waiver of, any privileges and immunities 372 | that apply to the Licensor or You, including from the legal 373 | processes of any jurisdiction or authority. 374 | 375 | 376 | ======================================================================= 377 | 378 | Creative Commons is not a party to its public 379 | licenses. Notwithstanding, Creative Commons may elect to apply one of 380 | its public licenses to material it publishes and in those instances 381 | will be considered the “Licensor.” The text of the Creative Commons 382 | public licenses is dedicated to the public domain under the CC0 Public 383 | Domain Dedication. Except for the limited purpose of indicating that 384 | material is shared under a Creative Commons public license or as 385 | otherwise permitted by the Creative Commons policies published at 386 | creativecommons.org/policies, Creative Commons does not authorize the 387 | use of the trademark "Creative Commons" or any other trademark or logo 388 | of Creative Commons without its prior written consent including, 389 | without limitation, in connection with any unauthorized modifications 390 | to any of its public licenses or any other arrangements, 391 | understandings, or agreements concerning use of licensed material. For 392 | the avoidance of doubt, this paragraph does not form part of the 393 | public licenses. 394 | 395 | Creative Commons may be contacted at creativecommons.org. 396 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BiSPy : Bivariate Signal Processing with Python 2 | 3 | [![docs-page](https://img.shields.io/badge/docs-latest-blue)](https://jflamant.github.io/bispy/) 4 | [![PyPI version](https://badge.fury.io/py/bispy-polar.svg)](https://badge.fury.io/py/bispy-polar) 5 | 6 | BiSPy is an open-source python framework for processing bivariate signals. It supports our papers on time-frequency analysis [1], spectral analysis [2] and linear time-invariant filtering [3] of bivariate signals. 7 | 8 | > [1] Julien Flamant, Nicolas Le Bihan, Pierre Chainais: “Time-frequency analysis of bivariate signals”, Applied and Computational Harmonic Analysis, 2017; [arXiv:1609.0246](http://arxiv.org/abs/1609.02463), [doi:10.1016/j.acha.2017.05.007](https://doi.org/10.1016/j.acha.2017.05.007) 9 | 10 | > [2] Julien Flamant, Nicolas Le Bihan, Pierre Chainais: “Spectral analysis of stationary random bivariate signals”, 2017, IEEE Transactions on Signal Processing; [arXiv:1703.06417](http://arxiv.org/abs/1703.06417), [doi:10.1109/TSP.2017.2736494](https://doi.org/10.1109/TSP.2017.2736494) 11 | 12 | > [3] Julien Flamant, Pierre Chainais, Nicolas Le Bihan: “A complete framework for linear filtering of bivariate signals”, 2018; IEEE Transactions on Signal Processing; [arXiv:1802.02469](https://arxiv.org/abs/1802.02469), [doi:10.1109/TSP.2018.2855659](https://doi.org/10.1109/TSP.2018.2855659) 13 | 14 | These papers contains theoretical results and several applications that can be reproduced with this toolbox. 15 | 16 | 17 | This python toolbox is currently under development and is hosted on GitHub. If you encounter a bug or something unexpected please let me know by [raising an issue](https://github.com/jflamant/bispy/issues) on the project page. 18 | 19 | ### Install from PyPi 20 | 21 | Due to name conflict the available version on PyPi is named ``bispy-polar''. To install from PyPi, simply type 22 | 23 | ``` 24 | pip install bispy-polar 25 | ``` 26 | 27 | It will automatically install dependencies (see also below). 28 | 29 | To get started, simply use 30 | ``` 31 | import bispy as bsp 32 | ``` 33 | 34 | Requirements 35 | ============ 36 | BiSPy works with python 3.5+. 37 | 38 | Dependencies: 39 | - [NumPy](http://www.numpy.org) 40 | - [SciPy](https://www.scipy.org) 41 | - [Matplotlib](http://matplotlib.org) 42 | - [numpy-quaternion](https://github.com/moble/quaternion) 43 | 44 | To install dependencies: 45 | ```shell 46 | pip install numpy scipy matplotlib numpy-quaternion 47 | ``` 48 | 49 | [numpy-quaternion](https://github.com/moble/quaternion) add quaternion dtype support to numpy. Implementation by [moble]. Since this python toolbox relies extensively on this module, you can check out first the nice introduction [here](https://github.com/moble). 50 | 51 | 52 | License 53 | ======= 54 | This software is distributed under the [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) license. 55 | 56 | Cite this work 57 | ============== 58 | If you use this package for your own work, please consider citing it with this piece of BibTeX: 59 | 60 | ```bibtex 61 | @misc{BiSPy, 62 | title = {{BiSPy: an Open-Source Python project for processing bivariate signals}}, 63 | author = {Julien Flamant}, 64 | year = {2018}, 65 | url = {https://github.com/jflamant/bispy/}, 66 | howpublished = {Online at: \url{github.com/jflamant/bispy/}}, 67 | note = {Code at https://github.com/jflamant/bispy/, documentation at https://bispy.readthedocs.io/} 68 | } 69 | ``` 70 | -------------------------------------------------------------------------------- /bispy/__init__.py: -------------------------------------------------------------------------------- 1 | from . import utils 2 | from . import qfft 3 | from . import timefrequency 4 | from . import spectral 5 | from . import filters 6 | from . import signals 7 | -------------------------------------------------------------------------------- /bispy/filters.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # Copyright © 2018 Julien Flamant 4 | # 5 | # Distributed under terms of the CeCILL Free Software Licence Agreement 6 | 7 | ''' 8 | Module for the LTI filtering of bivariate signals. 9 | ''' 10 | 11 | import numpy as np 12 | import quaternion 13 | 14 | from . import qfft 15 | 16 | class Filter(object): 17 | def __init__(self, N, dt=1.0): 18 | 19 | self.f = np.fft.fftfreq(N, d=dt) 20 | self.N = N 21 | self.dt = dt 22 | 23 | # def plot(self): 24 | # # ''' Displays the spectral response of the filter for different input types''' 25 | # # fig, ax = plt.subplots() 26 | 27 | # # N = len(self.f) 28 | # # ax.plot(self.f, randn(N)) 29 | 30 | 31 | class HermitianFilter(Filter): 32 | ''' 33 | Hermitian filter for bivariate signals. 34 | The Hermitian filtering relations reads in the QFT spectral domain: 35 | Y(nu) = K(nu)*[X(nu) - eta(nu)*mu(nu)*X(nu)*qj] 36 | where K is the homogeneous gain of the filter, eta is the polarizing power 37 | and mu the axis of the filter. 38 | 39 | Parameters 40 | ---------- 41 | 42 | N : int 43 | length of the filter 44 | 45 | K : array_type or float 46 | homogeneous gain array (should be of size N). If K is a float, then a 47 | constant gain is assumed throughout frequencies. 48 | 49 | eta : array_type or float 50 | polarizing power array (should be of size N). If eta is a float, then a 51 | constant polarizing is assumed throughout frequencies. 52 | 53 | mu : array_type (quaternion) or quaternion 54 | diattenuation axis quaternion array (should be of size N and of dtype quaternion). 55 | 56 | dt : float (optional) 57 | time sampling step (default 1) 58 | 59 | Attributes 60 | ---------- 61 | 62 | N : int 63 | length of the filter 64 | 65 | f : array_type 66 | sampled frequencies 67 | 68 | dt : float 69 | time sampling step (default 1) 70 | 71 | K, eta, mu : array_types 72 | filter parameters 73 | ''' 74 | 75 | def __init__(self, N, K, eta, mu, dt=1.0): 76 | # initialize Filter 77 | Filter.__init__(self, N, dt=dt) 78 | 79 | # several tests to ensure proper feed 80 | for param in [K, eta, mu]: 81 | if np.size(param) != 1 and np.size(param) != N: 82 | raise ValueError('Parameters should be either scalar or of size N') 83 | 84 | 85 | if np.size(K) == 1: 86 | Kvec = np.ones(N)*K 87 | else: 88 | Kvec = K 89 | if np.size(eta) == 1: 90 | etavec = np.ones(N)*eta 91 | else: 92 | etavec = eta 93 | if np.size(mu) ==1: 94 | muvec = np.ones(N)*mu/np.abs(mu) 95 | else: 96 | muvec = np.zeros(N, dtype='quaternion') 97 | muvec[np.abs(mu) > 0] = mu[np.abs(mu) > 0]/np.abs(mu)[np.abs(mu) > 0] 98 | # ensure symmetry relations 99 | qi = quaternion.x 100 | 101 | Kvec[N//2 +1:] = Kvec[1:N//2][::-1] # K(-v) = K(v) 102 | etavec[N//2 +1:] = etavec[1:N//2][::-1] # eta(-v) = eta(v) 103 | 104 | # mu(-v) = conj_i(mu(v)) 105 | muvec[N//2 + 1:] = -qi*np.conj(muvec[1:N//2][::-1])*qi 106 | muvec[0] = .5*(muvec[1] + muvec[-1]) 107 | muvec[N//2] = .5*(muvec[N//2+1] + muvec[N//2-1]) 108 | 109 | 110 | # save 111 | self.K = Kvec 112 | self.eta = etavec 113 | self.mu = muvec 114 | 115 | def output(self, x): 116 | ''' returns the output of the filter given an input signal x 117 | 118 | ''' 119 | 120 | if np.size(x) != self.N: 121 | raise ValueError('Size of input array should be the same as the constructed filter') 122 | 123 | X = qfft.Qfft(x) 124 | 125 | qj = quaternion.y 126 | 127 | Y = self.K*(X - self.eta*(self.mu*X)*qj) 128 | y = qfft.iQfft(Y) 129 | 130 | return y 131 | 132 | class UnitaryFilter(Filter): 133 | ''' 134 | Unitary filter for bivariate signals. 135 | The Unitary filtering relation reads in the QFT spectral domain: 136 | Y(nu) = exp(mu(nu)*alpha(nu) / 2)*X(nu)exp(1j*phi(nu)) 137 | where phi is phase delay of the filter, mu its axis and alpha is the 138 | birefringence angle. 139 | 140 | Parameters 141 | ---------- 142 | 143 | N : int 144 | length of the filter 145 | 146 | mu : array_type (quaternion) 147 | birefringence axis quaternion array (should be of size N and of dtype quaternion). 148 | 149 | alpha : array_type 150 | birefringence angle array (should be of size N). If alpha is a float, then alpha is assumed constant throughout frequencies. 151 | 152 | phi : array_type or float 153 | phase delay array (should be of size N). If phi is a float, then a 154 | constant phase delay is assumed throughout frequencies. 155 | 156 | dt : float (optional) 157 | time sampling step (default 1) 158 | 159 | Attributes 160 | ---------- 161 | 162 | N : int 163 | length of the filter 164 | 165 | f : array_type 166 | sampled frequencies 167 | 168 | dt : float 169 | time sampling step (default 1) 170 | 171 | mu, alpha, phi : array_types 172 | filter parameters 173 | ''' 174 | def __init__(self, N, mu, alpha, phi, dt=1.0): 175 | # initialize Filter 176 | Filter.__init__(self, N, dt=dt) 177 | 178 | # several tests to ensure proper feed 179 | for param in [mu, alpha, phi]: 180 | if np.size(param) != 1 and np.size(param) != N: 181 | raise ValueError('Parameters should be either scalar or of size N') 182 | 183 | if np.size(mu) ==1: 184 | muvec = np.ones(N)*mu/np.abs(mu) 185 | else: 186 | muvec = np.zeros(N, dtype='quaternion') 187 | muvec[np.abs(mu) > 0] = mu[np.abs(mu) > 0]/np.abs(mu)[np.abs(mu) > 0] 188 | 189 | if np.size(alpha) == 1: 190 | alphavec = np.ones(N)*alpha 191 | else: 192 | alphavec = alpha 193 | 194 | if np.size(phi) == 1: 195 | phivec = np.ones(N)*phi 196 | else: 197 | phivec = phi 198 | # ensure symmetry relations 199 | qi = quaternion.x 200 | 201 | alphavec[N//2 +1:] = alphavec[1:N//2][::-1] # alpha(-v) = alpha(v) 202 | phivec[N//2 +1:] = -phivec[1:N//2][::-1] # phi(-v) = -phi(v) 203 | phivec[0] = 0 204 | phivec[N//2] = 0 205 | 206 | # mu(-v) = invol_i(mu(v)) 207 | muvec[N//2 + 1:] = -(qi*muvec[1:N//2][::-1])*qi 208 | muvec[0] = .5*(muvec[1] + muvec[-1]) 209 | muvec[N//2] = .5*(muvec[N//2+1] + muvec[N//2-1]) 210 | 211 | 212 | # save 213 | self.alpha = alphavec 214 | self.phi = phivec 215 | self.mu = muvec 216 | 217 | def output(self, x): 218 | ''' returns the output of the filter given an input signal x''' 219 | 220 | if np.size(x) != self.N: 221 | raise ValueError('Size of input array should be the same as the constructed filter') 222 | 223 | X = qfft.Qfft(x) 224 | 225 | qj = quaternion.y 226 | 227 | Y = (np.exp(self.mu*self.alpha/2)*X)*np.exp(qj*self.phi) 228 | y = qfft.iQfft(Y) 229 | 230 | return y 231 | -------------------------------------------------------------------------------- /bispy/qfft.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding: utf-8 -*- 3 | 4 | 5 | """ 6 | This module contains QFT routines 7 | 8 | """ 9 | import numpy as np 10 | from .utils import sympSplit, sympSynth 11 | 12 | __all__ = ['Qfft', 'iQfft', 'Qfftshift', 'iQfftshift', 'Qfftfreq'] 13 | 14 | 15 | # QFTTs functions 16 | 17 | 18 | def Qfft(x, **kwargs): 19 | ''' Performs QFT using 2 ffts. 20 | 21 | Parameters 22 | ---------- 23 | x : array_type 24 | 25 | Returns 26 | ------- 27 | X : array_type 28 | ''' 29 | x_1, x_2 = sympSplit(np.ascontiguousarray(x)) # ascontiguous may be needed 30 | 31 | X_1 = np.fft.fft(x_1, **kwargs) 32 | X_2 = np.fft.fft(x_2, **kwargs) 33 | 34 | X = sympSynth(X_1, X_2) 35 | 36 | return X 37 | 38 | 39 | def iQfft(X, **kwargs): 40 | ''' Performs inverse QFT. 41 | 42 | Parameters 43 | ---------- 44 | X : array_type 45 | 46 | Returns 47 | ------- 48 | x : array_type 49 | ''' 50 | 51 | X_1, X_2 = sympSplit(X) 52 | 53 | x_1 = np.fft.ifft(X_1, **kwargs) 54 | x_2 = np.fft.ifft(X_2, **kwargs) 55 | 56 | x = sympSynth(x_1, x_2) 57 | 58 | return x 59 | 60 | 61 | # Qfft manipulations 62 | 63 | def Qfftshift(X): 64 | ''' Shifts the QFT array 65 | 66 | Parameters 67 | ---------- 68 | X : array_type 69 | 70 | Returns 71 | ------- 72 | Xshifted : array_type 73 | 74 | ''' 75 | 76 | return np.fft.fftshift(X) 77 | 78 | 79 | def iQfftshift(X): 80 | ''' Unshifts the QFFT array 81 | 82 | Parameters 83 | ---------- 84 | X : array_type 85 | 86 | Returns 87 | ------- 88 | Xunshifted : array_type 89 | ''' 90 | 91 | return np.fft.ifftshift(X) 92 | 93 | 94 | def Qfftfreq(N, dt=1.0): 95 | ''' Return the sampled frequencies, from time spacing dt. 96 | 97 | See numpy.fft.fftfreq for further reference. 98 | 99 | Parameters 100 | ---------- 101 | N : int 102 | length of the signal 103 | dt : float, optional 104 | time sampling step. Default 1.0 105 | 106 | Returns 107 | ------- 108 | f : array_type 109 | sampled frequencies 110 | ''' 111 | 112 | return np.fft.fftfreq(N, d=dt) 113 | -------------------------------------------------------------------------------- /bispy/signals.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | Module for the synthesis of prototype bivariate signals 6 | """ 7 | import numpy as np 8 | import quaternion 9 | 10 | from .utils import euler2quat, sympSynth, sympSplit 11 | from .filters import HermitianFilter 12 | from .spectral import quaternionPSD 13 | 14 | class stationaryBivariate(HermitianFilter): 15 | ''' 16 | Simulates realizations of a stationary Gaussian random bivariate sequence with specficied quaternion PSD. 17 | 18 | The simulation method relies on spectral synthesis and is approximate. 19 | The quality of the approximation increases with the size N (aka the number of frequency bins). 20 | 21 | 22 | Parameters 23 | ---------- 24 | targetPSD: quaternionPSD object 25 | target PSD of the signal to sample from 26 | 27 | Attributes 28 | ---------- 29 | simulation : array_type 30 | array of size (M, N) where M is the number of independent realizations of the signal and N is the length of the simulated sequence. 31 | 32 | ''' 33 | 34 | def __init__(self, targetPSD): 35 | # check input is a quaternionPSD object 36 | if isinstance(targetPSD, quaternionPSD) is False: 37 | raise ValueError("target PSD should be a quaternionPSD object") 38 | 39 | N = targetPSD.N 40 | dt = targetPSD.dt 41 | 42 | # filter parameters 43 | mu = targetPSD.mu 44 | 45 | eta = np.zeros_like(targetPSD.Phi) 46 | valid = targetPSD.Phi > 0 47 | eta[valid] = (1-np.sqrt(1-targetPSD.Phi[valid]**2))/targetPSD.Phi[valid] 48 | K = np.sqrt(targetPSD.S0/(1+eta**2)) 49 | 50 | super(stationaryBivariate, self).__init__(N, K, eta, mu, dt=dt) 51 | 52 | self.simulation = None 53 | 54 | def simulate(self, M): 55 | ''' 56 | Simulate realizations of the stationary Gaussian random bivariate signal with specified quaternion PSD. 57 | 58 | Parameters 59 | ---------- 60 | M : int 61 | number of independent realizations to simulate 62 | 63 | ''' 64 | self.simulation = np.zeros((M, self.N), dtype='quaternion') 65 | for m in range(M): 66 | w = bivariatewhiteNoise(self.N, 1) 67 | self.simulation[m, :] = self.output(w) 68 | 69 | 70 | 71 | 72 | def bivariateAMFM(a, theta, chi, phi, Hembedding=True, complexOutput=False): 73 | 74 | ''' Construct a bivariate AM-FM model with specified parameters. 75 | 76 | The output x[n] is constructed as:: 77 | 78 | x[n] = a[n] * np.exp(i * theta[n]) * np.exp(-k * chi[n]) * np.exp(j * phi[n]) 79 | 80 | Parameters 81 | ---------- 82 | a, theta, chi, phi : array_type 83 | These are instantaneous geometrical and phase parameters. 84 | 85 | Hembedding : bool, optional 86 | If `True`, returns the H-embedding signal of x, otherwise returns x 87 | (1, i)-complex (as a quaternion array). Default is `True`. 88 | 89 | complexOutput : bool, optional 90 | If `True`, output is a complex numpy array. Otherwise output is a 91 | quaternion numpy array. Default is `False`. 92 | 93 | Returns 94 | ------- 95 | x : array_type 96 | 97 | See also 98 | -------- 99 | euler2quat 100 | 101 | ''' 102 | 103 | # N = np.size(phi) 104 | 105 | # if np.size(theta) != N or np.size(chi) != N or np.size(phi) != N: 106 | # raise ValueError('All parameters should have same length!') 107 | 108 | x = euler2quat(a, theta, chi, phi) 109 | 110 | if Hembedding is True: 111 | return x 112 | else: 113 | x1, x2 = sympSplit(x) 114 | if complexOutput is True: 115 | return x1.real + 1j * x2.real 116 | else: 117 | return sympSynth(x1.real, x2.real) 118 | 119 | 120 | def bivariatewhiteNoise(N, S0, P=0, theta=0, complexOutput=False): 121 | 122 | ''' Generates a bivariate white noise with prescribed polarization 123 | properties using the Unpolarized/Polarized part decomposition. 124 | 125 | Parameters 126 | ---------- 127 | N : int 128 | length of the signal 129 | S0 : float 130 | white noise power 131 | P : float, optional 132 | degree of polarization, must be 0 <= P <= 1. Default is 0 133 | theta : float, optional 134 | angle of linear polarization. Default is 0 135 | complexOutput: bool, optional 136 | If `True`, output is a complex numpy array. Otherwise output is a 137 | quaternion numpy array. Default is `False`. 138 | returns 139 | ------- 140 | w : array_type 141 | bivariate white noise signal 142 | ''' 143 | # check value of P 144 | if (0 <= P <= 1) is False: 145 | raise ValueError('Degree of polarization P must be between 0 and 1 !') 146 | 147 | # unpolarized part 148 | wu = 1 / np.sqrt(2) * (np.random.randn(N) + 1j * np.random.randn(N)) 149 | 150 | # polarized part 151 | wp = np.random.randn(N) 152 | 153 | # use of the UP decomposition to construct the output 154 | w = (S0)**0.5 * (np.sqrt(1 - P) * wu + np.sqrt(P) * 155 | np.exp(1j * theta) * wp) 156 | 157 | if complexOutput is True: 158 | return w 159 | else: 160 | return sympSynth(w.real, w.imag) 161 | -------------------------------------------------------------------------------- /bispy/spectral.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | This file is part of BiSPy. 6 | This program contains several classes to perform spectral analysis of bivariate 7 | signals. 8 | """ 9 | # import modules and packages 10 | import numpy as np 11 | import quaternion 12 | import matplotlib.pyplot as plt 13 | from mpl_toolkits.mplot3d import Axes3D # required for 3D plot 14 | import matplotlib.gridspec as gridspec 15 | import scipy.signal as sg 16 | 17 | from . import qfft 18 | from . import utils 19 | 20 | class quaternionPSD(object): 21 | ''' 22 | Quaternion Power Spectral Density constructor of the form: 23 | Gamma_xx(nu) = S_0x(nu)*[1 + Phi_x(nu)*mu_x(nu)] 24 | 25 | where S_0x is the PSD of the signal x, Phi_x(nu) is the degree of polarization of the signal and mu_x is the polarization axis. 26 | 27 | Parameters 28 | ---------- 29 | N : int 30 | size of the frequencies array 31 | 32 | S0x : array_type 33 | PSD array of the signal 34 | 35 | Phix : array_type 36 | degree of polarization array 37 | 38 | mux : array_type (quaternion) 39 | polarization axis array 40 | 41 | dt : float (optional) 42 | time sampling size step 43 | 44 | Attributes 45 | ---------- 46 | S0, S1, S2, S3 : array_type 47 | Stokes Parameters array 48 | 49 | density : array_type 50 | quaternion PSD 51 | 52 | f : array_type 53 | sampled frequencies array 54 | 55 | Phi : array_type 56 | degree of polarization 57 | 58 | mu : array_type 59 | polarization axis 60 | ''' 61 | 62 | def __init__(self, N, S0x, Phix, mux, dt=1): 63 | 64 | if mux.dtype != 'quaternion': 65 | raise ValueError('polarization axis array should be of quaternion type.') 66 | # several tests to ensure proper feed 67 | for param in [S0x, Phix, mux]: 68 | if np.size(param) != 1 and np.size(param) != N: 69 | raise ValueError('Parameters should be either scalar or of size N') 70 | 71 | if np.size(S0x) == 1: 72 | S0xvec = np.ones(N)*S0x 73 | else: 74 | S0xvec = S0x 75 | if np.size(Phix) == 1: 76 | Phixvec = np.ones(N)*Phix 77 | else: 78 | Phixvec = Phix 79 | if np.size(mux) ==1: 80 | muxvec = np.ones(N)*mux/np.abs(mux) 81 | else: 82 | muxvec = np.zeros(N, dtype='quaternion') 83 | valid = np.abs(mux) > 0 84 | muxvec[valid] = mux[valid]/np.abs(mux)[valid] 85 | 86 | if np.max(np.abs(Phixvec)) > 1: 87 | raise ValueError('Degree of polarization shall not exceed 1') 88 | elif np.min(Phixvec) < 0: 89 | raise ValueError('Degree of polarization cannot be negative') 90 | # save 91 | self.N = N 92 | self.dt = dt 93 | self.f = np.fft.fftfreq(N, d=dt) 94 | self.S0, self.Phi, self.mu = self.__ensureSym(S0xvec, Phixvec, muxvec) 95 | 96 | self.density = self.S0*(1 + self.Phi*self.mu) 97 | 98 | __, self.S1, self.S2, self.S3 = self._getStokes() 99 | 100 | def __ensureSym(self, S0xvec, Phixvec, muxvec): 101 | ''' 102 | Ensure symmetry relation on PSD parameters 103 | Gamma_xx(-nu) =-i*conj(Gamma_xx(nu))*i 104 | ''' 105 | N = np.size(self.f) 106 | # 107 | qi = quaternion.x 108 | 109 | # SO(-v) = SO(v) 110 | S0 = np.zeros_like(S0xvec) 111 | S0[:N//2+1] = S0xvec[:N//2+1] 112 | S0[N//2 +1:] = S0xvec[1:N//2][::-1] 113 | 114 | # Phi(-v) = Phi(v) 115 | Phi = np.zeros_like(Phixvec) 116 | Phi[:N//2 +1] = Phixvec[:N//2+1] 117 | Phi[N//2 +1:] = Phixvec[1:N//2][::-1] 118 | 119 | # mu(-v) = conj_i(mu(v)) 120 | mu = np.zeros_like(muxvec) 121 | mu[1:N//2] = muxvec[1:N//2] 122 | mu[N//2 + 1:] = -qi*np.conj(muxvec[1:N//2][::-1])*qi 123 | mu[0] = .5*(mu[1] + mu[-1]) 124 | mu[N//2] = .5*(mu[N//2+1] + mu[N//2-1]) 125 | 126 | return S0, Phi, mu 127 | 128 | def _getStokes(self): 129 | ''' 130 | Low-level function. 131 | Extract extract Stokes parameters from the spectral density 132 | Recall that 133 | 134 | Gamma_{xx} = S0 + iS_3 + jS_1 + kS_2 135 | 136 | Returns 137 | ------- 138 | S0, S1, S2, S2: array_type 139 | Stokes parameters 140 | ''' 141 | g1, g2 = utils.sympSplit(self.density) 142 | 143 | S0 = g1.real 144 | S1 = g1.imag 145 | S3 = g2.real 146 | S2 = g2.imag 147 | 148 | return S0, S1, S2, S3 149 | 150 | def plotStokes(self, single_sided=True): 151 | ''' 152 | Displays Stokes Parameters S0, S1, S2, S3 153 | ''' 154 | 155 | f = np.fft.fftshift(self.f) 156 | 157 | # size of plot 158 | A = np.random.rand(1, 4) 159 | w, h = plt.figaspect(A) 160 | labelsize= 20 161 | 162 | fig, ax = plt.subplots(ncols=4, figsize=(w, h), sharey=True, gridspec_kw = {'width_ratios':[1, 1, 1, 1]}) 163 | 164 | im0 = ax[0].plot(f, np.fft.fftshift(self.S0)) 165 | im1 = ax[1].plot(f, np.fft.fftshift(self.S1)) 166 | im2 = ax[2].plot(f, np.fft.fftshift(self.S2)) 167 | im3 = ax[3].plot(f, np.fft.fftshift(self.S3)) 168 | 169 | label =[r'$S_0$', r'$S_1$', r'$S_2$', r'$S_3$'] 170 | for i, axis in enumerate(ax): 171 | if single_sided is True: 172 | axis.set_xlim(0, f.max()) 173 | axis.set_xlabel('Frequency [Hz]') 174 | axis.set_aspect(1./axis.get_data_ratio()) 175 | axis.set_adjustable('box-forced') 176 | axis.set_title(label[i], y = 0.85, size=labelsize) 177 | 178 | 179 | 180 | # set ylabls 181 | fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05, top=0.92, bottom=0.12) 182 | return fig, ax 183 | 184 | def plot(self, single_sided=True): 185 | ''' 186 | Displays the quaternion PSD 187 | ''' 188 | 189 | f = np.fft.fftshift(self.f) 190 | # get geometric Parameters 191 | __, theta, chi, Phi = utils.Stokes2geo(np.fft.fftshift(self.S0), np.fft.fftshift(self.S1), np.fft.fftshift(self.S2), np.fft.fftshift(self.S3)) 192 | 193 | # size of plot 194 | A = np.random.rand(1, 3) 195 | w, h = plt.figaspect(A) 196 | labelsize= 20 197 | 198 | fig, ax = plt.subplots(ncols=3, figsize=(w, h), gridspec_kw = {'width_ratios':[1, 1, 1]}) 199 | 200 | ax[0].plot(f, np.fft.fftshift(self.S0)) 201 | ax[1].plot(f, Phi) 202 | ax[1].set_ylim(0, 1.05) 203 | ax[1].set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0]) 204 | # angles 205 | ax[2].plot(f, theta, color='r') 206 | ax[2].tick_params('y', colors='r') 207 | ax[2].set_ylim(-np.pi/2, np.pi/2) 208 | 209 | 210 | axchi = ax[2].twinx() 211 | axchi.plot(f, chi, color='g') 212 | axchi.tick_params('y', colors='g') 213 | axchi.set_ylim(-np.pi/4, np.pi/4) 214 | axchi.set_xlim(-np.pi/4, np.pi/4) 215 | 216 | label =[r'$S_0$', r'$\Phi$', ''] 217 | 218 | if single_sided is True: 219 | axchi.set_xlim(0, f.max()) 220 | axchi.set_aspect(1./axchi.get_data_ratio()) 221 | axchi.set_adjustable('box-forced') 222 | 223 | for i, axis in enumerate(ax): 224 | if single_sided is True: 225 | axis.set_xlim(0, f.max()) 226 | axis.set_xlabel('Frequency [Hz]') 227 | axis.set_aspect(1./axis.get_data_ratio()) 228 | axis.set_adjustable('box-forced') 229 | axis.set_title(label[i], y = 0.85, size=labelsize) 230 | 231 | 232 | ax[2].set_title(r'$\theta$', size=labelsize, color='r', x=0.1, y=0.9) 233 | axchi.set_title(r'$\chi$', size=labelsize, color='g', x=0.9, y=0.9) 234 | 235 | fig.subplots_adjust(left=0.05, right=0.95, wspace=0.1, top=0.92, bottom=0.12) 236 | return fig, ax 237 | 238 | 239 | 240 | class Periodogram(object): 241 | ''' 242 | Compute the periodogram of bivariate 243 | signals taken as (1, i)-quaternion valued signals. 244 | 245 | Parameters 246 | ---------- 247 | t : array_type 248 | time samples array 249 | 250 | x : array_type 251 | input signal array (has to be of quaternion dtype) 252 | 253 | compute : bool, optional 254 | Flag activating computation of the estimate. Default is true. If False 255 | one has to run the compute() method manually. 256 | 257 | Attributes 258 | ---------- 259 | t : array_type 260 | time samples array 261 | 262 | signal : array_type 263 | input signal array 264 | 265 | f : array_type 266 | sampled frequencies array 267 | 268 | density : array_type 269 | spectral density quaternion array 270 | 271 | S0, S1, S2, S3 : array_type 272 | Stokes parameters, non-normalized [w.r.t. S0] 273 | 274 | S1n, S2n, S3n : array_type 275 | normalized Stokes parameters [w.r.t. S0] using the 276 | tolerance factor `tol`. They are not computed by 277 | default. See `normalize`. 278 | 279 | Phi : array_type 280 | Degree of polarization. Not computed by default; See `normalize`. 281 | 282 | ''' 283 | 284 | def __init__(self, t, x, computeFlag=True): 285 | 286 | if x.dtype != 'quaternion': 287 | raise ValueError('signal array should be of quaternion type.') 288 | 289 | # Store the signal and parameters 290 | self.t = t 291 | self.signal = x 292 | 293 | N = np.size(x, 0) 294 | dt = (t[1] - t[0]) 295 | self.f = np.fft.fftfreq(N) / dt 296 | 297 | self.density = np.zeros(N, dtype='quaternion') 298 | 299 | if computeFlag is True: 300 | self.compute() 301 | 302 | # and SO, S1, S2, S3 associated 303 | 304 | self.S0, self.S1, self.S2, self.S3 = self._getStokes() 305 | 306 | # initialize normalized Stokes and degree of polarization 307 | self.S1n = np.zeros_like(self.S0) 308 | self.S2n = np.zeros_like(self.S0) 309 | self.S3n = np.zeros_like(self.S0) 310 | 311 | self.Phi = np.zeros_like(self.S0) 312 | 313 | def compute(self): 314 | ''' 315 | Low-level function. Compute Periodogram estimate 316 | ''' 317 | # compute the QFT of x 318 | dt = (self.t[1] - self.t[0]) 319 | N = np.size(self.signal, 0) 320 | 321 | QFTx = qfft.Qfft(self.signal) 322 | 323 | # then the spectral density Gamma_{xx} 324 | 325 | self.density = dt / N * (np.norm(QFTx) + utils.StokesNorm(QFTx)) 326 | 327 | 328 | def __add__(self, other): 329 | if np.any(self.t != other.t) is True: 330 | raise ValueError('Cannot sum Periodograms with differents time \ 331 | arrays') 332 | 333 | new = Periodogram(self.t, self.signal, computeFlag=False) # keep self data 334 | 335 | # update density 336 | new.density = self.density + other.density 337 | 338 | # and SO, S1, S2, S3 associated 339 | new.S0, new.S1, new.S2, new.S3 = new._getStokes() 340 | 341 | return new 342 | 343 | def __mul__(self, scalar): 344 | if np.size(scalar) > 1: 345 | raise ValueError('Only scalar multiplication is supported') 346 | 347 | new = Periodogram(self.t, self.signal, computeFlag=False) # keep self data 348 | 349 | # update density 350 | new.density = scalar * self.density 351 | 352 | # and SO, S1, S2, S3 associated 353 | new.S0, new.S1, new.S2, new.S3 = new._getStokes() 354 | 355 | return new 356 | 357 | def __rmul__(self, scalar): 358 | return self * scalar 359 | 360 | def _getStokes(self): 361 | ''' 362 | Low-level function. 363 | Extract extract Stokes parameters from the spectral density 364 | Recall that 365 | 366 | Gamma_{xx} = S0 + iS_3 + jS_1 + kS_2 367 | 368 | Returns 369 | ------- 370 | S0, S1, S2, S2: array_type 371 | Stokes parameters 372 | ''' 373 | g1, g2 = utils.sympSplit(self.density) 374 | 375 | S0 = g1.real 376 | S1 = g1.imag 377 | S3 = g2.real 378 | S2 = g2.imag 379 | 380 | return S0, S1, S2, S3 381 | 382 | def normalize(self, tol=0.0): 383 | 384 | ''' Normalize Stokes parameters wrt S0. 385 | In addition, compute the degree of polarization Phi. 386 | 387 | Parameters 388 | ---------- 389 | tol : float, optional 390 | tolerance factor used in Stokes parameters normalization. 391 | Default is 0.0 392 | 393 | Returns 394 | ------- 395 | self.S1n, self.S2n, self.S3n : array_type 396 | normalized Stokes parameters 397 | 398 | self.Phi : array_type 399 | degree of polarization 400 | 401 | See also 402 | -------- 403 | utils.normalizeStokes 404 | ''' 405 | 406 | self.S1n, self.S2n, self.S3n = utils.normalizeStokes(self.S0, self.S1, 407 | self.S2, self.S3, tol=tol) 408 | 409 | self.Phi = np.sqrt(self.S1n**2 + self.S2n**2 + self.S3n**2) 410 | 411 | def plot(self): 412 | '''Generic plot of spectral estimates''' 413 | 414 | fig, axes = _plotResultSpectral(self.t, self.signal, self) 415 | fig.show() 416 | return fig, axes 417 | 418 | 419 | class Multitaper(object): 420 | ''' 421 | Compute a multitaper spectral estimate of the spectrum of bivariate 422 | signals taken as (1, i)-quaternion valued signals. 423 | The data tapers are chosen as discrete-prolate spheroidal sequences 424 | (dpss or Slepian tapers). 425 | 426 | Parameters 427 | ---------- 428 | t : array_type 429 | time samples array 430 | 431 | x : array_type 432 | input signal array (has to be of quaternion dtype) 433 | 434 | bw : float, optional 435 | spectral bandwidth. Default is 2.5 436 | 437 | computeFlag : bool, optional 438 | Flag activating computation of the estimate. Default is true. If False 439 | one has to run the compute() method manually. 440 | 441 | Attributes 442 | ---------- 443 | t : array_type 444 | time samples array 445 | 446 | signal : array_type 447 | input signal array 448 | 449 | f : array_type 450 | sampled frequencies array 451 | 452 | densities : array_type 453 | spectral density quaternion array for each taper 454 | 455 | density : array_type 456 | spectral density quaternion array 457 | 458 | dpss : array_type 459 | data tapers used 460 | 461 | S0, S1, S2, S3 : array_type 462 | Stokes parameters, non-normalized [w.r.t. S0] 463 | 464 | S1n, S2n, S3n : array_type 465 | normalized Stokes parameters [w.r.t. S0] using the 466 | tolerance factor `tol`. They are not computed by 467 | default. See `normalize`. 468 | 469 | Phi : array_type 470 | Degree of polarization. Not computed by default; See `normalize`. 471 | 472 | ''' 473 | 474 | def __init__(self, t, x, bw=2.5, computeFlag=True): 475 | 476 | if x.dtype != 'quaternion': 477 | raise ValueError('signal array should be of quaternion type.') 478 | 479 | # Store the signal and parameters 480 | self.t = t 481 | self.signal = x 482 | 483 | N = np.size(x, 0) 484 | dt = (t[1] - t[0]) 485 | self.f = np.fft.fftfreq(N) / dt 486 | 487 | # compute number of tapers 488 | Nmt = int(np.floor(2 * bw)) - 1 # add reference here 489 | 490 | # define multitaper array 491 | 492 | self.densities = np.zeros((N, Nmt), dtype='quaternion') 493 | self.dpss = np.zeros((N, Nmt)) 494 | 495 | if computeFlag is True: 496 | self.compute(bw=bw) 497 | 498 | # simple average (workaround needed since quaternion arrays cannot be 499 | # averaged simply) 500 | self.density = quaternion.as_quat_array(np.mean(quaternion.as_float_array(self.densities), axis=1)) 501 | 502 | # and SO, S1, S2, S3 associated 503 | self.S0, self.S1, self.S2, self.S3 = self._getStokes() 504 | 505 | # initialize normalized Stokes and degree of polarization 506 | self.S1n = np.zeros_like(self.S0) 507 | self.S2n = np.zeros_like(self.S0) 508 | self.S3n = np.zeros_like(self.S0) 509 | 510 | self.Phi = np.zeros_like(self.S0) 511 | 512 | def compute(self, bw=2.5): 513 | 514 | ''' Low-level method that computes the multitaper estimate 515 | ''' 516 | N = np.size(self.dpss, 0) 517 | Nmt = np.size(self.dpss, 1) 518 | dt = (self.t[1] - self.t[0]) 519 | # data tapers 520 | print('Number of data tapers: ' + str(Nmt)) 521 | self.dpss = sg.windows.dpss(N, bw, Kmax=Nmt).T 522 | 523 | # compute Nmt tapered periodograms 524 | for n in range(Nmt): 525 | 526 | QFTx = qfft.Qfft(self.signal * self.dpss[:, n]) # tapered QFT 527 | 528 | self.densities[:, n] = dt * (np.norm(QFTx) + 529 | utils.StokesNorm(QFTx)) 530 | 531 | def __add__(self, other): 532 | if np.any(self.t != other.t) is True: 533 | raise ValueError('Cannot sum Periodograms with differents time \ 534 | arrays') 535 | 536 | new = Multitaper(self.t, self.signal, computeFlag=False) # keep self data 537 | 538 | # update density 539 | new.density = self.density + other.density 540 | 541 | # and SO, S1, S2, S3 associated 542 | new.S0, new.S1, new.S2, new.S3 = new._getStokes() 543 | 544 | return new 545 | 546 | def __mul__(self, scalar): 547 | if np.size(scalar) > 1: 548 | raise ValueError('Only scalar multiplication is supported') 549 | 550 | new = Multitaper(self.t, self.signal, computeFlag=False) # keep self data 551 | 552 | # update density 553 | new.density = scalar * self.density 554 | 555 | # and SO, S1, S2, S3 associated 556 | new.S0, new.S1, new.S2, new.S3 = new._getStokes() 557 | 558 | return new 559 | 560 | def __rmul__(self, scalar): 561 | return self * scalar 562 | 563 | def _getStokes(self): 564 | r'''Low-level function. 565 | Extract extract Stokes parameters from the spectral density 566 | Recall that 567 | 568 | Gamma_{xx} = S0 + iS_3 + jS_1 + kS_2 569 | 570 | Returns 571 | ------- 572 | S0, S1, S2, S2: array_type 573 | Stokes parameters 574 | ''' 575 | g1, g2 = utils.sympSplit(self.density) 576 | 577 | S0 = g1.real 578 | S1 = g1.imag 579 | S3 = g2.real 580 | S2 = g2.imag 581 | 582 | return S0, S1, S2, S3 583 | 584 | def normalize(self, tol=0.0): 585 | 586 | ''' Normalize Stokes parameters wrt S0. 587 | In addition, compute the degree of polarization Phi. 588 | 589 | Parameters 590 | ---------- 591 | tol : float, optional 592 | tolerance factor used in Stokes parameters normalization. 593 | Default is 0.0 594 | 595 | Returns 596 | ------- 597 | self.S1n, self.S2n, self.S3n : array_type 598 | normalized Stokes parameters 599 | 600 | self.Phi : array_type 601 | degree of polarization 602 | 603 | See also 604 | -------- 605 | utils.normalizeStokes 606 | ''' 607 | 608 | self.S1n, self.S2n, self.S3n = utils.normalizeStokes(self.S0, self.S1, 609 | self.S2, self.S3, tol=tol) 610 | 611 | self.Phi = np.sqrt(self.S1n**2 + self.S2n**2 + self.S3n**2) 612 | 613 | def plot(self): 614 | '''Generic plot of spectral estimates''' 615 | 616 | fig, axes = _plotResultSpectral(self.t, self.signal, self) 617 | fig.show() 618 | return fig, axes 619 | 620 | 621 | def _plotResultSpectral(t, sig, spe): 622 | 623 | N = np.size(t) 624 | 625 | fig = plt.figure(figsize=(12, 8)) 626 | gs = gridspec.GridSpec(4, 2) 627 | gs.update(left=0.0, right=0.98, hspace=0, wspace=0.05) 628 | 629 | # axes 630 | ax_sig = plt.subplot(gs[0:3, 0], projection='3d') 631 | 632 | gs1 = gridspec.GridSpec(4, 2) 633 | gs1.update(left=0.05, right=0.92, hspace=0.0) 634 | ax_S0 = plt.subplot(gs1[3:4, 0]) 635 | 636 | gs2 = gridspec.GridSpec(4, 2) 637 | gs2.update(left=0.1, right=0.98, hspace=0) 638 | 639 | ax_s1 = plt.subplot(gs2[0, 1]) 640 | ax_s2 = plt.subplot(gs2[1, 1]) 641 | ax_s3 = plt.subplot(gs2[2, 1]) 642 | 643 | gs3 = gridspec.GridSpec(4, 2) 644 | gs3.update(left=0.1, right=0.98, hspace=0.2) 645 | ax_phi = plt.subplot(gs3[3, 1]) 646 | 647 | ######################################################### 648 | #ax_sig 649 | if sig.dtype == 'quaternion': 650 | x1, x2 = utils.sympSplit(sig) 651 | x = x1.real + 1j * x2.real 652 | else: 653 | x = sig 654 | 655 | ax_sig.plot(t, np.real(x), np.imag(x), color='k') 656 | 657 | tmin = ax_sig.get_xlim3d()[0] 658 | tmax = ax_sig.get_xlim3d()[1] 659 | xmin = min(ax_sig.get_ylim3d()[0], ax_sig.get_zlim3d()[0]) 660 | xmax = max(ax_sig.get_ylim3d()[1], ax_sig.get_zlim3d()[1]) 661 | ymin = min(ax_sig.get_ylim3d()[0], ax_sig.get_zlim3d()[0]) 662 | ymax = max(ax_sig.get_ylim3d()[1], ax_sig.get_zlim3d()[1]) 663 | 664 | # surfaces 665 | 666 | # complex plane 667 | xx_c, yy_c = np.meshgrid(np.linspace(xmin, xmax), np.linspace(ymin, ymax)) 668 | #ax_sig.plot_surface(-.05*(tmin+tmax), xx_c, yy_c, alpha=0.05, color='gray', rstride = 100, cstride=100) 669 | ax_sig.plot(x.real, x.imag, -.05*(tmin+tmax), zdir='x', color='gray') 670 | ax_sig.set_xlim([-.05*(tmin+tmax), tmax]) 671 | 672 | # real proj 673 | xx_r, yy_r = np.meshgrid(np.linspace(tmin, tmax), np.linspace(xmin, xmax)) 674 | #ax_sig.plot_surface(xx_r, yy_r, 1.05*ymin, alpha=0.05, color='gray', rstride = 100, cstride=100) 675 | ax_sig.plot(t, x.real, ymin*1.05, zdir='z', color='gray') 676 | ax_sig.set_zlim([1.05*ymin, ymax]) 677 | 678 | #imaginary proj 679 | xx_i, yy_i = np.meshgrid(np.linspace(tmin, tmax), np.linspace(ymin, ymax)) 680 | #ax_sig.plot_surface(xx_i, 1.05*xmax, yy_i, alpha=0.05, color='gray',rstride = 100, cstride=100) 681 | ax_sig.plot(t, x.imag, 1.05*xmax, zdir='y', color='gray') 682 | ax_sig.set_ylim([xmin, 1.05*xmax]) 683 | 684 | # replot to avoid 'overlays' 685 | ax_sig.plot(t, np.real(x), np.imag(x), color='k') 686 | #proj3d.persp_transformation = _orthogonal_proj 687 | ######################################################### 688 | # ax_S0 689 | end = N // 2 - 1 690 | line_per, = ax_S0.semilogy(spe.f[:end], spe.S0[:end], color='k') 691 | 692 | ax_S0.set_xlim([spe.f[0], spe.f[N // 2 -1]]) 693 | 694 | boundsS0min = np.min(spe.S0) 695 | boundsS0max = np.max(spe.S0) 696 | 697 | logBoundsmin = int(np.floor(np.log10(boundsS0min))) 698 | logBoundsmax = int(np.ceil(np.log10(boundsS0max))) 699 | 700 | ax_S0.spines['left'].set_bounds(0.6*10**(logBoundsmin), 1.4*10**(logBoundsmax)) 701 | 702 | # Hide the right and top spines 703 | ax_S0.spines['right'].set_visible(False) 704 | ax_S0.spines['top'].set_visible(False) 705 | ax_S0.yaxis.set_ticks_position('left') 706 | ax_S0.xaxis.set_ticks_position('bottom') 707 | 708 | ax_S0.set_ylim((0.2*10**(logBoundsmin), 1.2*10**(logBoundsmax))) 709 | ax_S0.set_yticks(np.logspace(logBoundsmin, logBoundsmax, 1 + logBoundsmax-logBoundsmin)) 710 | ax_S0.minorticks_off() 711 | #labels 712 | ax_S0.set_ylabel(r'$S_0(\nu)$') 713 | ax_S0.set_xlabel('Frequency '+ r'$\nu$' + ' [Hz]') 714 | 715 | #ax_s1 716 | ax_s1.axhline(0, color='gray', lw='1') 717 | ax_s1.plot(spe.f[:end], spe.S1n[:end], color='black', lw='2') 718 | 719 | ax_s1.set_xlim([spe.f[0], spe.f[N // 2-1]]) 720 | ax_s1.set_xticks([]) 721 | ax_s1.set_ylim((-1.2, 1.2)) 722 | ax_s1.set_yticks([-1, 0, 1]) 723 | # Only draw spine between the y-ticks 724 | ax_s1.spines['left'].set_bounds(-1.1, 1.1) 725 | # Hide the right and top spines 726 | ax_s1.spines['right'].set_visible(False) 727 | ax_s1.spines['top'].set_visible(False) 728 | ax_s1.spines['bottom'].set_visible(False) 729 | ax_s1.yaxis.set_ticks_position('left') 730 | ax_s1.xaxis.set_ticks_position('bottom') 731 | #labels 732 | ax_s1.set_ylabel(r'$s_1(\nu)$') 733 | 734 | #ax_s2 735 | ax_s2.axhline(0, color='gray', lw='1') 736 | ax_s2.plot(spe.f[:end], spe.S2n[:end], color='black', lw='2') 737 | 738 | ax_s2.set_xlim([spe.f[0], spe.f[N // 2 - 1]]) 739 | ax_s2.set_xticks([]) 740 | ax_s2.set_ylim((-1.2, 1.2)) 741 | ax_s2.set_yticks([-1, 0, 1]) 742 | # Only draw spine between the y-ticks 743 | ax_s2.spines['left'].set_bounds(-1.1, 1.1) 744 | # Hide the right and top spines 745 | ax_s2.spines['right'].set_visible(False) 746 | ax_s2.spines['top'].set_visible(False) 747 | ax_s2.spines['bottom'].set_visible(False) 748 | ax_s2.yaxis.set_ticks_position('left') 749 | ax_s2.xaxis.set_ticks_position('bottom') 750 | #labels 751 | ax_s2.set_ylabel(r'$s_2(\nu)$') 752 | 753 | #ax_s3 754 | ax_s3.axhline(0, color='gray', lw='1') 755 | 756 | ax_s3.plot(spe.f[:end], spe.S3n[:end], color='black', lw='2') 757 | 758 | ax_s3.set_xlim([spe.f[0], spe.f[N // 2 - 1]]) 759 | 760 | ax_s3.set_ylim((-1.2, 1.2)) 761 | ax_s3.set_yticks([-1, 0, 1]) 762 | # Only draw spine between the y-ticks 763 | ax_s3.spines['left'].set_bounds(-1.1, 1.1) 764 | # Hide the right and top spines 765 | ax_s3.spines['right'].set_visible(False) 766 | ax_s3.spines['top'].set_visible(False) 767 | ax_s3.spines['bottom'].set_visible(False) 768 | ax_s3.yaxis.set_ticks_position('left') 769 | ax_s3.xaxis.set_ticks_position('bottom') 770 | # Only show ticks on the left and bottom spines 771 | ax_s3.set_ylim((-1.6, 1.2)) 772 | # ax_s3.set_xticks([0, N/4, N/2]) 773 | ax_s3.spines['bottom'].set_visible(True) 774 | ax_s3.yaxis.set_ticks_position('left') 775 | ax_s3.xaxis.set_ticks_position('bottom') 776 | #labels 777 | ax_s3.set_ylabel(r'$s_3(\nu)$') 778 | #ax_s3.set_xlabel('Frequency '+ r'$\nu$') 779 | #ax_phi 780 | ax_phi.plot(spe.f[:end], spe.Phi[:end], color='black', lw='2') 781 | 782 | ax_phi.set_xlim([spe.f[0], spe.f[N // 2 - 1]]) 783 | ax_phi.set_yticks([0, 1]) 784 | # Only draw spine between the y-ticks 785 | ax_phi.spines['left'].set_bounds(-0.1, 1.1) 786 | # Hide the right and top spines 787 | ax_phi.spines['right'].set_visible(False) 788 | ax_phi.spines['top'].set_visible(False) 789 | ax_phi.yaxis.set_ticks_position('left') 790 | ax_phi.xaxis.set_ticks_position('bottom') 791 | # Only show ticks on the left and bottom spines 792 | ax_phi.set_ylim((-.2, 1.5)) 793 | #ax_phi.set_xticks([0, N/4, N/2]) 794 | #labels 795 | ax_phi.set_ylabel(r'$\Phi(\nu)$') 796 | ax_phi.set_xlabel('Frequency '+ r'$\nu$'+ ' [Hz]') 797 | 798 | axes = [ax_sig, ax_S0, ax_s1, ax_s2, ax_s3, ax_phi] 799 | return fig, axes 800 | 801 | # workaround orthographic projection (deprecated) 802 | # from mpl_toolkits.mplot3d import proj3d 803 | 804 | # def _orthogonal_proj(zfront, zback): 805 | # a = (zfront+zback)/(zfront-zback) 806 | # b = -2*(zfront*zback)/(zfront-zback) 807 | # # -0.0001 added for numerical stability as suggested in: 808 | # # http://stackoverflow.com/questions/23840756 809 | # return np.array([[1,0,0,0], 810 | # [0,1,0,0], 811 | # [0,0,a,b], 812 | # [0,0,-0.0001,zback]]) 813 | -------------------------------------------------------------------------------- /bispy/timefrequency.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # Copyright © 2018 Julien Flamant 4 | # 5 | # Distributed under terms of the CeCILL Free Software Licence Agreement 6 | 7 | ''' 8 | Module for the time-frequency analysis of bivariate signals. 9 | ''' 10 | # import modules and packages 11 | import numpy as np 12 | import quaternion 13 | 14 | import matplotlib.pyplot as plt 15 | import matplotlib.colors as col 16 | from mpl_toolkits.mplot3d import Axes3D # required for 3D plot 17 | from matplotlib.collections import LineCollection 18 | import scipy.signal as sg 19 | 20 | from . import qfft 21 | from . import utils 22 | 23 | 24 | class Hembedding(object): 25 | ''' H-embedding class. Computes quaternion embedding of complex-valued 26 | signals. 27 | 28 | Parameters 29 | ---------- 30 | q : array_type 31 | quaternion input signal 32 | 33 | Attributes 34 | ---------- 35 | signal : array_type 36 | original input signal 37 | 38 | Hembedding : array_type 39 | Quaternion-embedding of the input signal 40 | 41 | a : array_type 42 | instantaneous amplitude 43 | 44 | theta : array_type 45 | instantaneous orientation 46 | 47 | chi : array_type 48 | instantaneous ellipticity 49 | 50 | phi : array_type 51 | instantaneous phase 52 | ''' 53 | 54 | def __init__(self, q): 55 | 56 | if q.dtype != 'quaternion': 57 | raise ValueError('array should be of quaternion type') 58 | 59 | self.signal = q 60 | 61 | # compute H-extension of the signal 62 | N = np.size(q, 0) 63 | Q = qfft.Qfft(q) 64 | 65 | # filter frequencies 66 | h = np.zeros((N, 4)) 67 | h[0, 0] = 1 68 | h[N // 2, 0] = 1 69 | h[1: N // 2, 0] = 2 70 | hq = quaternion.as_quat_array(h) 71 | 72 | self.Hembedding = qfft.iQfft(Q * hq) 73 | 74 | # Compute Euler angle form 75 | a, theta, chi, phi = utils.quat2euler(self.Hembedding) 76 | 77 | self.a = a 78 | self.theta = theta 79 | self.chi = chi 80 | self.phi = phi 81 | 82 | # TODO: routine for plotting the extracted parameters? 83 | 84 | class TFPrepresentation(object): 85 | 86 | def __init__(self, x, **kwargs): 87 | """ 88 | Base time-frequency-polarization representation object. 89 | This is a low-level function, not meant to be used directly. 90 | """ 91 | # check dimension of input array 92 | if x.ndim > 1: 93 | x = x.ravel() 94 | # check dtype of signal x and convert if necessary 95 | if x.dtype != 'quaternion': 96 | x = utils.sympSynth(x.real, x.imag) 97 | self.x = x 98 | N = x.shape[0] 99 | 100 | # timestamps of the signal x 101 | t = kwargs.get('t') 102 | if t is None: 103 | t = np.arange(N) 104 | self.t = t 105 | 106 | # # number of frequency bins 107 | # NFFT = kwargs.get('NFFT') 108 | # if NFFT is None: 109 | # NFFT = nextpow2(N) 110 | # elif NFFT < 0: 111 | # raise ValueError('Nfft should be greater than 0.') 112 | # else: 113 | # NFFT = nextpow2(NFFT) 114 | # self.NFFT = NFFT 115 | # # sampled frequencies 116 | # self.f = np.fft.fftfreq(NFFT) / (t[1] - t[0]) 117 | 118 | # sampled instants (spacing) 119 | # spacing = kwargs.get('spacing') 120 | # if spacing is None: 121 | # spacing = 1 122 | # 123 | # self.sampled_index = np.arange(0, N, spacing) 124 | # self.sampled_time = t[::spacing] 125 | 126 | 127 | # init representation 128 | self.tfpr = None 129 | 130 | # init Stokes parameters 131 | self.S0 = None 132 | self.S1 = None 133 | self.S2 = None 134 | self.S3 = None 135 | 136 | self.S1n = None 137 | self.S2n = None 138 | self.S3n = None 139 | 140 | def normalizeStokes(self, tol=0.01): 141 | ''' Re-compute normalized Stokes parameters with a different normalization. 142 | 143 | Parameters 144 | ---------- 145 | tol : float 146 | tolerance parameter. Default is 0.01. 147 | ''' 148 | if self.S0 is not None: 149 | if np.sum(np.abs(self.S0)) > 0: 150 | self.S1n, self.S2n, self.S3n = utils.normalizeStokes(self.S0, self.S1, self.S2, self.S3, tol=tol) 151 | 152 | 153 | def plotSignal(self, kind='2D'): 154 | ''' 155 | Plot the bivariate signal x. 156 | 157 | Parameters 158 | ---------- 159 | kind : string, '2D' or '3D' 160 | type of plot. See `utils.visual`. 161 | ''' 162 | if kind == '2D': 163 | fig, ax = utils.visual.plot2D(self.t, self.x) 164 | elif kind == '3D': 165 | fig, ax = utils.visual.plot3D(self.t, self.x) 166 | return fig, ax 167 | 168 | def _plotStokes(self, t, f, S0_cmap='viridis', s_cmap='coolwarm', single_sided=True, affine=False): 169 | ''' Time-frequency plot of time-frequency energy map (S0) and time-frequency polarization parameters (normalized Stokes parameters S1n, S2n, S3n) 170 | 171 | Parameters 172 | ---------- 173 | t : array_type 174 | sampled times array 175 | f : array_type 176 | frequencies array (assuming unshifted) 177 | S0_cmap : colormap (sequential) 178 | to use for S0 time-frequency distribution 179 | s_cmap : colormap (diverging) 180 | to use for normalized Stokes time-frequency distribution 181 | 182 | Returns 183 | ------- 184 | fig, ax : figure and axis handles 185 | may be needed to tweak the plot 186 | ''' 187 | if affine: 188 | # size of plot 189 | A = np.random.rand(1, 4) 190 | w, h = plt.figaspect(A) 191 | labelsize= 20 192 | 193 | fig, ax = plt.subplots(ncols=4, figsize=(w, h), sharey=True, gridspec_kw = {'width_ratios':[1, 1, 1, 1]}) 194 | 195 | im0 = ax[0].pcolormesh(t, f, self.S0, cmap=S0_cmap) 196 | im1 = ax[1].pcolormesh(t, f, self.S1n, cmap=s_cmap, vmin=-1, vmax=+1) 197 | im2 = ax[2].pcolormesh(t, f, self.S2n, cmap=s_cmap, vmin=-1, vmax=+1) 198 | im3 = ax[3].pcolormesh(t, f, self.S3n, cmap=s_cmap, vmin=-1, vmax=+1) 199 | 200 | # adjust figure 201 | cbarax1 = fig.add_axes([0.96, 0.12, 0.01, 0.8]) 202 | cbar1 = fig.colorbar(im1, cax=cbarax1, orientation='vertical', ticks=[-1, 0, 1]) 203 | #cbar1.ax.set_xticklabels([-1, 0, 1]) 204 | #cbar1.ax.xaxis.set_ticks_position('top') 205 | 206 | label =[r'$S_0$', r'$s_1$', r'$s_2$', r'$s_3$'] 207 | for i, axis in enumerate(ax): 208 | axis.set_xlabel('Time') 209 | axis.set_aspect(1./axis.get_data_ratio()) 210 | axis.set_adjustable('box') 211 | axis.set_title(label[i], y = 0.85, size=labelsize) 212 | axis.set_xlim(self.t.min(), self.t.max()) 213 | 214 | # set ylabls 215 | ax[0].set_ylabel('Frequency') 216 | fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05, top=0.92, bottom=0.12) 217 | return fig, ax 218 | else: 219 | 220 | f = np.fft.fftshift(f) 221 | #tt, ff = np.meshgrid(t, np.fft.fftshift(f)) 222 | # size of plot 223 | A = np.random.rand(1, 4) 224 | w, h = plt.figaspect(A) 225 | labelsize= 20 226 | 227 | fig, ax = plt.subplots(ncols=4, figsize=(w, h), sharey=True, gridspec_kw = {'width_ratios':[1, 1, 1, 1]}) 228 | 229 | im0 = ax[0].imshow(np.fft.fftshift(self.S0, 0), cmap=S0_cmap, extent=[t.min(), t.max(), f.min(), f.max()], origin='lower') 230 | im1 = ax[1].imshow(np.fft.fftshift(self.S1n, axes=0), cmap=s_cmap, vmin=-1, vmax=+1, extent=[t.min(), t.max(), f.min(), f.max()], origin='lower') 231 | im2 = ax[2].imshow(np.fft.fftshift(self.S2n, axes=0), cmap=s_cmap, vmin=-1, vmax=+1, extent=[t.min(), t.max(), f.min(), f.max()], origin='lower') 232 | im3 = ax[3].imshow(np.fft.fftshift(self.S3n, axes=0), cmap=s_cmap, vmin=-1, vmax=+1, extent=[t.min(), t.max(), f.min(), f.max()], origin='lower') 233 | 234 | if single_sided is True: 235 | ax[0].set_ylim(0, f.max()) 236 | # adjust figure 237 | cbarax1 = fig.add_axes([0.96, 0.12, 0.01, 0.8]) 238 | cbar1 = fig.colorbar(im1, cax=cbarax1, orientation='vertical', ticks=[-1, 0, 1]) 239 | #cbar1.ax.set_xticklabels([-1, 0, 1]) 240 | #cbar1.ax.xaxis.set_ticks_position('top') 241 | 242 | label =[r'$S_0$', r'$s_1$', r'$s_2$', r'$s_3$'] 243 | for i, axis in enumerate(ax): 244 | axis.set_xlabel('Time') 245 | axis.set_aspect(1./axis.get_data_ratio()) 246 | axis.set_adjustable('box') 247 | axis.set_title(label[i], y = 0.85, size=labelsize) 248 | axis.set_xlim(self.t.min(), self.t.max()) 249 | 250 | # set ylabls 251 | ax[0].set_ylabel('Frequency') 252 | fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05, top=0.92, bottom=0.12) 253 | return fig, ax 254 | 255 | 256 | class QSTFT(TFPrepresentation): 257 | ''' Compute the Quaternion-Short Term Fourier Transform for bivariate 258 | signals taken as (1, i)-quaternion valued signals. 259 | 260 | Parameters 261 | ---------- 262 | x : array_type 263 | input signal array 264 | 265 | t : array_type (optional) 266 | time samples array. Default is t = np.arange(x.shape[0]) 267 | 268 | Attributes 269 | ---------- 270 | t : array_type 271 | time samples array 272 | 273 | x : array_type 274 | input signal array 275 | 276 | params : dict 277 | parameters used for the computation of the Q-STFT. 278 | 279 | sampled_time : array_type 280 | sampled times instants 281 | 282 | f : array_type 283 | sampled frequencies 284 | 285 | tfpr : array_type 286 | Q-STFT coefficients array 287 | 288 | S0, S1, S2, S3 : array_type 289 | Time-frequency Stokes parameters, non-normalized [w.r.t. S0] 290 | 291 | S1n, S2n, S3n : array_type 292 | normalized time-frequency Stokes parameters [w.r.t. S0] using the 293 | tolerance factor `tol`. See `utils.normalizeStokes`. 294 | 295 | ridges : list 296 | List of ridges index and values extracted from the time-frequency 297 | energy density S0. Requires call of `extractRidges` for ridges to 298 | be added. 299 | ''' 300 | 301 | def __init__(self, x, t=None): 302 | # init main base object 303 | super(QSTFT, self).__init__(x=x, t=t) 304 | # init frequencies, sampled times and params directly 305 | self.f = None 306 | self.sampled_times = None 307 | self.params = None 308 | 309 | #init ridges 310 | self.ridges = [] 311 | 312 | def compute(self, window='hamming', nperseg=128, noverlap=None, nfft=None, 313 | boundary='zeros', tol=0.01, ridges=False): 314 | ''' 315 | Compute the Q-STFT of the signal x. 316 | 317 | It takes advantages of the scipy.signal.stft function for greater 318 | flexibility. 319 | 320 | Parameters 321 | ---------- 322 | window, nperseg, noverlap, nfft, boundary : stft parameters 323 | See `scipy.signal.stft` 324 | tol : float, optional 325 | tolerance factor used in normalization of Stokes parameters. 326 | Default to 0.01 327 | ridges: bool, optional 328 | If True, compute also the ridges of the transform. 329 | Default to `False`. Ridges can be later computed using 330 | `extractRidges()`. 331 | ''' 332 | # parameters 333 | self.params = dict(fs = 1./(self.t[1]-self.t[0]), 334 | window=window, 335 | nperseg=nperseg, 336 | noverlap=noverlap, 337 | nfft=nfft, 338 | boundary=boundary, 339 | return_onesided=False, 340 | detrend=False, 341 | padded=True) 342 | 343 | # split x = x_1 + i x_2 344 | x1, x2 = utils.sympSplit(self.x) 345 | 346 | # Compute the Q-STFT using scipy.signal.stft on x1, x2 347 | f, sampled_times, temp1 = sg.stft(x1, **self.params ) 348 | _, _ , temp2 = sg.stft(x2, **self.params) 349 | 350 | # update Attributes 351 | self.f = f 352 | self.sampled_times = sampled_times 353 | # recombine 354 | self.tfpr = utils.sympSynth(temp1, temp2) 355 | 356 | # old version 357 | # sizewindow = np.size(self.window, 0) 358 | # # check size of window is odd 359 | # if sizewindow % 2 == 0: 360 | # raise ValueError('Window size must me odd.') 361 | # 362 | # Lh = (sizewindow - 1) // 2 # half size index 363 | # N = self.x.shape[0] 364 | # print('Computing Q-STFT coefficients') 365 | # temp = np.zeros_like(self.tfpr) 366 | # for ti, ts in enumerate(self.sampled_index): 367 | # 368 | # taumin = - min([round(self.NFFT / 2) - 1, Lh, ts]) 369 | # taumax = min([round(self.NFFT / 2) - 1, Lh, N - ts - 1]) 370 | # tau = np.arange(taumin, taumax + 1) 371 | # indices = ((self.NFFT + tau) % self.NFFT).astype(int) 372 | # 373 | # windowInd = self.window[(Lh + tau).astype(int)] 374 | # windowIndq = utils.sympSynth(np.conj(windowInd) / np.linalg.norm(windowInd), 0) 375 | # 376 | # temp[indices, ti] = self.x[(ts + tau).astype(int)] * windowIndq 377 | # 378 | # temp = qfft.Qfft(temp, axis=0) 379 | #self.tfpr = temp 380 | 381 | # Compute the Time-Frequency Stokes parameters S0, S1, S2, S3 382 | print('Computing Time-Frequency Stokes parameters') 383 | 384 | self.S0 = np.norm(self.tfpr) # already squared norm with this definition 385 | 386 | # compute the j-involution + conjugation 387 | qjq = utils.StokesNorm(self.tfpr) 388 | qjq_float = quaternion.as_float_array(qjq) 389 | 390 | self.S1 = qjq_float[..., 2] 391 | self.S2 = qjq_float[..., 3] 392 | self.S3 = qjq_float[..., 1] 393 | 394 | # normalized Stokes parameters 395 | self.S1n, self.S2n, self.S3n = utils.normalizeStokes(self.S0, self.S1, self.S2, self.S3, tol=tol) 396 | 397 | if ridges is True: 398 | self.extractRidges() 399 | 400 | def inverse(self, mask=None): 401 | '''Compute inverse Q-STFT 402 | 403 | Parameters 404 | ---------- 405 | mask: array_type 406 | mask applied to Q-STFT coefficients prior to inversion. 407 | If mask=None, no mask is employed. 408 | ''' 409 | 410 | # construct dict for inversion 411 | inversion_dict = dict(fs = self.params['fs'], 412 | window=self.params['window'], 413 | nperseg=self.params['nperseg'], 414 | noverlap=self.params['noverlap'], 415 | nfft=self.params['nfft'], 416 | boundary=self.params['boundary'], 417 | input_onesided=False) 418 | 419 | if mask is None: 420 | mask = np.ones(self.S0.shape, dtype=bool) 421 | 422 | tfp1, tfp2 = utils.sympSplit(self.tfpr*mask) 423 | t, x1 = sg.istft(tfp1, **inversion_dict) 424 | __, x2 = sg.istft(tfp2, **inversion_dict) 425 | 426 | xr = utils.sympSynth(x1, x2) 427 | return t, xr 428 | 429 | def extractRidges(self, parThresh=4, parMinD=3): 430 | ''' Extracts ridges from the time-frequency energy density S0. 431 | 432 | Parameters 433 | ---------- 434 | parThresh : float, optional 435 | Controls the threshold at which local maxima of S0 are accepted or 436 | rejected. Larger values of `parThresh`increase the number of 437 | eligible points. 438 | 439 | parMinD : float, optional 440 | Ridge smoothness parameter. Controls at which maximal distance 441 | can be located two eligible same ridge points. The smaller 442 | `parMinD`is the smoother ridges are. 443 | 444 | Returns 445 | ------- 446 | ridges : list 447 | list of detected ridges 448 | ''' 449 | nfft = self.params['nfft'] 450 | # Extract ridges 451 | print('Extracting ridges') 452 | self.ridges = _extractRidges(self.S0[:nfft//2, :], parThresh, parMinD) 453 | 454 | def plotRidges(self, quivertdecim=10): 455 | ''' Plot S0, the orientation and ellipticity recovered from the 456 | ridges in time-frequency domain 457 | 458 | If ridges are not extracted yet, it runs `extractRidges` method first. 459 | 460 | Parameters 461 | ---------- 462 | quivertdecim : int, optional 463 | time-decimation index (allows faster and cleaner visualization of 464 | orientation vector field) 465 | 466 | Returns 467 | ------- 468 | fig, ax : figure and axis handles 469 | may be needed to tweak the plot 470 | ''' 471 | 472 | # default colormaps 473 | cmap_S0 = 'Greys' 474 | cmap_theta = 'hsv' 475 | cmap_chi = 'coolwarm' 476 | 477 | # check whether ridges have been computed 478 | 479 | if len(self.ridges) == 0: 480 | print('No ridges detected, computing ridges.') 481 | self.extractRidges() 482 | 483 | # create ridge mask 484 | maskRidge = np.zeros(self.S0.shape, dtype=bool) 485 | for r in self.ridges: 486 | maskRidge[r[0], r[1]] = True 487 | 488 | # Compute orientation and ellipticity values 489 | 490 | S1mask = np.ma.masked_where(maskRidge == False, self.S1n) 491 | S2mask = np.ma.masked_where(maskRidge == False, self.S2n) 492 | S3mask = np.ma.masked_where(maskRidge == False, self.S3n) 493 | 494 | theta = .5 * np.arctan2(S2mask, S1mask) 495 | ori = np.exp(1j * theta) 496 | 497 | chi = 0.5 * np.arcsin(S3mask) 498 | 499 | N = np.size(self.t) 500 | 501 | # prepare meshgrid 502 | tt, ff = np.meshgrid(self.sampled_times, np.fft.fftshift(self.f)) 503 | # size of plot 504 | A = np.random.rand(1, 3) 505 | w, h = plt.figaspect(A) 506 | labelsize= 20 507 | 508 | fig, ax = plt.subplots(ncols=3, figsize=(w, h), sharey=True, gridspec_kw = {'width_ratios':[1, 1, 1]}) 509 | 510 | #im0 = ax[0].imshow(np.fft.fftshift(self.S0, 0), cmap=cmap_S0,origin='lower', extent=[self.sampled_times.min(), self.sampled_times.max(), 0, self.f.max()], aspect='auto') 511 | im0 = ax[0].imshow(np.fft.fftshift(self.S0, 0), cmap=cmap_S0, extent=[self.sampled_times.min(), self.sampled_times.max(), self.f.min(), self.f.max()], origin='lower') 512 | 513 | im1 = ax[1].quiver(self.sampled_times[::quivertdecim], self.f, np.real(ori[:, ::quivertdecim]), (np.imag(ori[:, ::quivertdecim])), theta[:, ::quivertdecim], clim=[-np.pi/2, np.pi/2], cmap=cmap_theta, headaxislength=0,headlength=0.001, pivot='middle',width=0.005, scale=15) 514 | 515 | for r in self.ridges: 516 | points = np.array([self.sampled_times[r[1]], self.f[r[0]]]).T.reshape(-1, 1, 2) 517 | segments = np.concatenate([points[:-1], points[1:]], axis=1) 518 | 519 | lc = LineCollection(segments, cmap=plt.get_cmap(cmap_chi), 520 | norm=plt.Normalize(-np.pi / 4, np.pi / 4)) 521 | lc.set_array(chi[(r[0], r[1])]) 522 | lc.set_linewidth(5) 523 | im2 = ax[2].add_collection(lc) 524 | 525 | #im2 = ax[2].imshow(chi, vmin=-np.pi/4, vmax=np.pi/4, interpolation='none', origin='lower', aspect='auto', cmap='coolwarm', extent=[self.t.min(), self.t.max(), 0, self.f[N/2-1]]) 526 | 527 | # adjust figure 528 | fig.subplots_adjust(left=0.05, top=0.8, right=0.99, wspace=0.05) 529 | 530 | for i, axis in enumerate(ax): 531 | axis.set_xlabel('Time') 532 | axis.set_ylim([0, self.f.max()]) 533 | axis.set_xlim(self.t.min(), self.t.max()) 534 | axis.set_aspect(1./axis.get_data_ratio()) 535 | axis.set_adjustable('box') 536 | 537 | 538 | cbarax0 = fig.add_axes([0.09, 0.83, 0.224, 0.03]) 539 | cbar0 = fig.colorbar(im0, cax=cbarax0, orientation='horizontal', ticks=[0, np.max(self.S0)]) 540 | cbar0.ax.set_xticklabels(['', '']) 541 | cbar0.ax.xaxis.set_ticks_position('top') 542 | 543 | cbarax1 = fig.add_axes([0.185+0.224, 0.83, 0.224, 0.03]) 544 | cbar1 = fig.colorbar(im1, cax=cbarax1, orientation='horizontal', ticks=[-np.pi/2, 0, np.pi/2]) 545 | cbar1.ax.set_xticklabels([r'$-\frac{\pi}{2}$', r'$0$', r'$\frac{\pi}{2}$']) 546 | cbar1.ax.xaxis.set_ticks_position('top') 547 | 548 | cbarax2 = fig.add_axes([0.725, 0.83, 0.224, 0.03]) 549 | cbar2 = fig.colorbar(im2, cax=cbarax2, ticks=[-np.pi/4, 0, np.pi/4], orientation='horizontal') 550 | cbar2.ax.set_xticklabels([r'$-\frac{\pi}{4}$', r'$0$', r'$\frac{\pi}{4}$']) 551 | cbar2.ax.xaxis.set_ticks_position('top') 552 | 553 | 554 | ax[0].set_ylabel('Frequency [Hz]') 555 | ax[0].set_title('Time-Frequency energy density', y=1.14) 556 | ax[1].set_title('Instantaneous orientation', y=1.14) 557 | ax[2].set_title('Instantaneous ellipticity', y=1.14) 558 | 559 | return fig, ax 560 | 561 | def plotStokes(self, S0_cmap='viridis', s_cmap='coolwarm', single_sided=True): 562 | ''' Time-frequency plot of time-frequency energy map (S0) and time-frequency polarization parameters (normalized Stokes parameters S1n, S2n, S3n) 563 | 564 | Parameters 565 | ---------- 566 | S0_cmap : colormap (sequential) 567 | to use for S0 time-frequency distribution 568 | s_cmap : colormap (diverging) 569 | to use for normalized Stokes time-frequency distribution 570 | 571 | Returns 572 | ------- 573 | fig, ax : figure and axis handles 574 | may be needed to tweak the plot 575 | ''' 576 | 577 | return self._plotStokes(self.sampled_times, self.f, S0_cmap=S0_cmap, s_cmap=s_cmap, single_sided=single_sided) 578 | 579 | 580 | #!---- Quaternion Continuous Wavelet Transform --------------------------!# 581 | class QCWT(TFPrepresentation): 582 | def __init__(self, x, t=None): 583 | ''' Compute the Quaternion-Continuous Wavelet Transform for bivariate 584 | signals taken as (1, i)-quaternion valued signals. 585 | 586 | Parameters 587 | ---------- 588 | x : array_type 589 | input signal array 590 | 591 | t : array_type (optional) 592 | time samples array. Default is t = np.arange(x.shape[0]) 593 | 594 | Attributes 595 | ---------- 596 | t : array_type 597 | time samples array 598 | 599 | x : array_type 600 | input signal array 601 | 602 | params : dict 603 | parameters used for the computation of the Q-CWT. 604 | 605 | sampled_frequencies : array_type 606 | sampled frequencies 607 | 608 | tfpr : array_type 609 | Q-STFT coefficients array 610 | 611 | S0, S1, S2, S3 : array_type 612 | Time-frequency Stokes parameters, non-normalized [w.r.t. S0] 613 | 614 | S1n, S2n, S3n : array_type 615 | normalized time-frequency Stokes parameters [w.r.t. S0] using the 616 | tolerance factor `tol`. See `utils.normalizeStokes`. 617 | 618 | ridges : list 619 | List of ridges index and values extracted from the time-frequency 620 | energy density S0. Requires call of `extractRidges` for ridges to 621 | be added. 622 | ''' 623 | 624 | # init main base object 625 | super(QCWT, self).__init__(x=x, t=t) 626 | # init frequencies, scales and params directly 627 | self.sampled_frequencies = None 628 | self.params = None 629 | 630 | #init ridges 631 | self.ridges = [] 632 | 633 | def _getWavelet(self, Nscales, **waveletParams): 634 | # construct wavelet array (len(x), Nscales) 635 | N = self.x.shape[0] 636 | W = np.zeros((Nscales, N), dtype='quaternion') 637 | Fs = 1./(self.t[1]-self.t[0]) 638 | f = np.fft.fftfreq(N, d=1./Fs) 639 | 640 | wType = waveletParams['type'] 641 | 642 | if wType not in ['Morse', 'Morlet']: 643 | raise ValueError("Unknown value for wavelet type %s, must be one of: " 644 | "{'Morse', 'Morlet'}" % wType) 645 | 646 | if wType == 'Morse': 647 | 648 | if waveletParams.get('beta') is None: 649 | beta = 3 650 | else: 651 | beta = waveletParams['beta'] 652 | 653 | if waveletParams.get('gamma') is None: 654 | gamma = 1 655 | else: 656 | gamma = waveletParams['gamma'] 657 | if waveletParams.get('norm') is None: 658 | mode = 'bandpass' 659 | else: 660 | mode = waveletParams['norm'] 661 | 662 | fc = (beta/gamma)**(1./gamma) # central frequency of Morse wavelets 663 | 664 | for fi, fsampled in enumerate(self.sampled_frequencies): 665 | fnorm = f[:N//2]*fc/(fsampled) 666 | temp = fnorm**beta*np.exp(-fnorm**gamma) 667 | norm = self._getNormalization(wType, beta, gamma, mode=mode, Fs=Fs, fc=fc, fsampled=fsampled) 668 | W[fi, :N//2] = utils.sympSynth(temp*norm, 0) 669 | 670 | elif wType == 'Morlet': 671 | if waveletParams.get('eta') is None: 672 | eta = 2*np.pi 673 | else: 674 | eta = waveletParams.get('eta') 675 | 676 | for fi, fsampled in enumerate(self.sampled_frequencies): 677 | s = eta /(2*np.pi*fsampled) # scale 678 | 679 | prefactor = (np.pi)**(-1/4)/(1+np.exp(-eta**2)-2*np.exp(-3/4*eta**2))**1/2*np.sqrt(s) 680 | 681 | temp = prefactor*(np.exp(-0.5*(s*2*np.pi*f - eta)**2) - np.exp(-0.5*((s*2*np.pi*f)**2 + eta**2))) 682 | W[fi, :] = utils.sympSynth(temp, 0) 683 | 684 | return W 685 | 686 | 687 | def _getNormalization(self, wType, beta=None, gamma=None, Fs=None, fc=None, fsampled=None, mode='bandpass'): 688 | if wType == 'Morse': 689 | ''' 690 | See e.g., 691 | Lilly, Jonathan M., and Sofia C. Olhede. 2009. “Higher-Order Properties of Analytic Wavelets.” IEEE Transactions on Signal Processing 57 (1): 146–60. doi:10.1109/TSP.2008.2007607. 692 | 693 | Olhede, Sofia C., and Andrew T. Walden. 2002. “Generalized Morse Wavelets.” IEEE Transactions on Signal Processing 50 (11): 2661–70. doi:10.1109/TSP.2002.804066. 694 | ''' 695 | import scipy.special as sp 696 | 697 | if mode == 'bandpass': 698 | a = 2*(np.exp(1)*gamma/beta)**(beta/gamma) 699 | elif mode == 'energy': 700 | r = (2*beta+1)/gamma 701 | a = (gamma*(2**r)/sp.gamma(r))**(0.5)*np.sqrt(Fs*fc/(fsampled)) 702 | return a 703 | 704 | def compute(self, fmin, fmax, waveletParams, Nscales=50, tol=0.01, ridges=False): 705 | ''' Compute the Q-CWT of x using a specified wavelet. 706 | 707 | Parameters 708 | ---------- 709 | fmin, fmax : float 710 | min and max frequencies 711 | waveletParams : dict 712 | dictionary containing wavelet features. Currently 2 types, 713 | 'Morlet' and 'Morse' are supported. 714 | Nscales : int 715 | number of scales to analyze. Controls the size of the 716 | sampled_frequencies array. 717 | tol : float, optional 718 | tolerance factor used in normalization of Stokes parameters. 719 | Default to 0.01 720 | ridges: bool, optional 721 | If True, compute also the ridges of the transform. 722 | Default to `False`. Ridges can be later computed using 723 | `extractRidges()`. 724 | 725 | ''' 726 | 727 | #deine sampled frequencies from fmin and fmax 728 | Fs = self.t[1]-self.t[0] 729 | N = self.x.shape[0] 730 | self.sampled_frequencies = np.logspace(np.log10(fmin), np.log10(fmax), Nscales) # note to self: allow other choices as well? 731 | 732 | W =self._getWavelet(Nscales=Nscales, **waveletParams) 733 | 734 | X = qfft.Qfft(self.x) # Precompute the QFT of signal sarray 735 | temp = np.zeros((Nscales, N), dtype='quaternion') 736 | for k in range(Nscales): 737 | temp[k, :] = qfft.iQfft(X * W[k, :]) 738 | 739 | self.tfpr = temp 740 | self.params = dict(fmin=fmin, fmax=fmax, wavelets = W) 741 | 742 | # Compute the Time-Frequency Stokes parameters S0, S1, S2, S3 743 | print('Computing Time-Frequency Stokes parameters') 744 | 745 | self.S0 = np.norm(self.tfpr) # already squared norm with this definition 746 | 747 | # compute the j-involution + conjugation 748 | qjq = utils.StokesNorm(self.tfpr) 749 | qjq_float = quaternion.as_float_array(qjq) 750 | 751 | self.S1 = qjq_float[..., 2] 752 | self.S2 = qjq_float[..., 3] 753 | self.S3 = qjq_float[..., 1] 754 | 755 | # normalized Stokes parameters 756 | self.S1n, self.S2n, self.S3n = utils.normalizeStokes(self.S0, self.S1, self.S2, self.S3, tol=tol) 757 | 758 | if ridges is True: 759 | self.extractRidges() 760 | 761 | def extractRidges(self, parThresh=4, parMinD=3): 762 | ''' Extracts ridges from the time-scale energy density S0. 763 | 764 | Parameters 765 | ---------- 766 | parThresh : float, optional 767 | Controls the threshold at which local maxima of S0 are accepted or 768 | rejected. Larger values of `parThresh`increase the number of 769 | eligible points. 770 | 771 | parMinD : float, optional 772 | Ridge smoothness parameter. Controls at which maximal distance 773 | can be located two eligible same ridge points. The smaller 774 | `parMinD`is the smoother ridges are. 775 | 776 | Returns 777 | ------- 778 | ridges : list 779 | list of detected ridges 780 | ''' 781 | 782 | print('Extracting ridges') 783 | self.ridges = _extractRidges(self.S0, parThresh, parMinD) 784 | 785 | 786 | def plotStokes(self, S0_cmap='viridis', s_cmap='coolwarm'): 787 | ''' Time-frequency plot of time-frequency energy map (S0) and time-frequency polarization parameters (normalized Stokes parameters S1n, S2n, S3n) 788 | 789 | Parameters 790 | ---------- 791 | S0_cmap : colormap (sequential) 792 | to use for S0 time-frequency distribution 793 | s_cmap : colormap (diverging) 794 | to use for normalized Stokes time-frequency distribution 795 | 796 | Returns 797 | ------- 798 | fig, ax : figure and axis handles 799 | may be needed to tweak the plot 800 | ''' 801 | 802 | return self._plotStokes(self.t, self.sampled_frequencies, S0_cmap=S0_cmap, s_cmap=s_cmap, affine=True) 803 | 804 | def plotRidges(self, quivertdecim=10): 805 | 806 | ''' Plot S0, and the orientation and ellipticity recovered from the 807 | ridges in time-scale domain 808 | 809 | If ridges are not extracted yet, it runs `extractRidges` method first. 810 | 811 | Parameters 812 | ---------- 813 | quivertdecim : int, optional 814 | time-decimation index (allows faster and cleaner visualization of 815 | orientation vector field) 816 | 817 | Returns 818 | ------- 819 | fig, ax : figure and axis handles 820 | may be needed to tweak the plot 821 | ''' 822 | # default colormaps 823 | cmap_S0 = 'Greys' 824 | cmap_theta = 'hsv' 825 | cmap_chi = 'coolwarm' 826 | 827 | # check whether ridges have been computed 828 | 829 | if len(self.ridges) == 0: 830 | print('No ridges detected, computing ridges.') 831 | self.extractRidges() 832 | 833 | # create ridge mask 834 | maskRidge = np.zeros(self.S0.shape, dtype=bool) 835 | for r in self.ridges: 836 | maskRidge[r[0], r[1]] = True 837 | 838 | # Compute orientation and ellipticity values 839 | 840 | S1mask = np.ma.masked_where(maskRidge == False, self.S1n) 841 | S2mask = np.ma.masked_where(maskRidge == False, self.S2n) 842 | S3mask = np.ma.masked_where(maskRidge == False, self.S3n) 843 | 844 | theta = .5*np.arctan2(S2mask, S1mask) 845 | ori = np.exp(1j * theta) 846 | 847 | chi = 0.5 * np.arcsin(S3mask) 848 | 849 | N = np.size(self.t) 850 | fig, ax = plt.subplots(ncols=3, figsize=(12, 5), sharey=True) 851 | im0 = ax[0].pcolormesh(self.t, self.sampled_frequencies, self.S0, cmap=cmap_S0) 852 | 853 | im1 = ax[1].quiver(self.t[::quivertdecim], self.sampled_frequencies, np.real(ori[:, ::quivertdecim]), (np.imag(ori[:, ::quivertdecim])), theta[:, ::quivertdecim], clim=[-np.pi/2, np.pi/2], cmap=cmap_theta, headaxislength=0,headlength=0.001, pivot='middle',width=0.005, scale=15) 854 | 855 | for r in self.ridges: 856 | points = np.array([self.t[r[1]], self.sampled_frequencies[r[0]]]).T.reshape(-1, 1, 2) 857 | segments = np.concatenate([points[:-1], points[1:]], axis=1) 858 | 859 | lc = LineCollection(segments, cmap=plt.get_cmap(cmap_chi), 860 | norm=plt.Normalize(-np.pi / 4, np.pi / 4)) 861 | lc.set_array(chi[(r[0], r[1])]) 862 | lc.set_linewidth(3) 863 | im2 = ax[2].add_collection(lc) 864 | 865 | #im2 = ax[2].imshow(chi, vmin=-np.pi/4, vmax=np.pi/4, interpolation='none', origin='lower', aspect='auto', cmap='coolwarm', extent=[self.t.min(), self.t.max(), 0, self.f[N/2-1]]) 866 | 867 | # adjust figure 868 | fig.subplots_adjust(left=0.05, top=0.8, right=0.99, wspace=0.05) 869 | 870 | cbarax0 = fig.add_axes([0.05, 0.83, 0.303, 0.03]) 871 | cbar0 = fig.colorbar(im0, cax=cbarax0, orientation='horizontal', ticks=[0, np.max(self.S0)]) 872 | cbar0.ax.set_xticklabels(['', '']) 873 | cbar0.ax.xaxis.set_ticks_position('top') 874 | 875 | cbarax1 = fig.add_axes([0.369, 0.83, 0.303, 0.03]) 876 | cbar1 = fig.colorbar(im1, cax=cbarax1, orientation='horizontal', ticks=[-np.pi/2, 0, np.pi/2]) 877 | cbar1.ax.set_xticklabels([r'$-\frac{\pi}{2}$', r'$0$', r'$\frac{\pi}{2}$']) 878 | cbar1.ax.xaxis.set_ticks_position('top') 879 | 880 | cbarax2 = fig.add_axes([0.686, 0.83, 0.303, 0.03]) 881 | cbar2 = fig.colorbar(im2, cax=cbarax2, ticks=[-np.pi/4, 0, np.pi/4], orientation='horizontal') 882 | cbar2.ax.set_xticklabels([r'$-\frac{\pi}{4}$', r'$0$', r'$\frac{\pi}{4}$']) 883 | cbar2.ax.xaxis.set_ticks_position('top') 884 | 885 | for i, axis in enumerate(ax): 886 | axis.set_xlim([self.t.min(), self.t.max()]) 887 | axis.set_ylim([self.sampled_frequencies.min(), self.sampled_frequencies.max()]) 888 | 889 | ax[0].set_xlabel('Time [s]') 890 | ax[0].set_ylabel('Frequency [Hz]') 891 | 892 | ax[0].set_title('Time-frequency energy density', y=1.14) 893 | ax[1].set_title('Instantaneous orientation', y=1.14) 894 | ax[2].set_title('Instantaneous ellipticity', y=1.14) 895 | 896 | return fig, ax 897 | 898 | # 899 | # Low-level functions 900 | # 901 | 902 | 903 | def _extractRidges(density, parThresh, parMinD): 904 | 905 | A, B = density.shape # A: len of frequency axis, B len of time axis 906 | 907 | # find all local maximas 908 | 909 | locMax = np.zeros((A, B), dtype=bool) 910 | thresh = np.max(density) / parThresh 911 | for ind in range(B): 912 | 913 | detectmax = sg.argrelextrema(density[:, ind], np.greater)[0] 914 | ismaxOK = density[detectmax, ind] > thresh 915 | locMax[detectmax, ind] = True * ismaxOK 916 | 917 | # chain the ridges 918 | ridges = [] 919 | 920 | currentRidget = [] 921 | currentRidgef = [] 922 | 923 | while np.any(locMax): 924 | 925 | freqMask, timeMask = np.where(locMax) 926 | 927 | currentRidget.append(timeMask[0]) 928 | currentRidgef.append(freqMask[0]) 929 | 930 | locMax[freqMask[0], timeMask[0]] = False 931 | 932 | freqMask, timeMask = np.where(locMax) 933 | 934 | FLAG = False # Avoid undifined FLAG if condition is false. 935 | if len(timeMask) > 1: 936 | FLAG = True 937 | while FLAG: 938 | 939 | distances = np.sqrt((timeMask-currentRidget[-1])**2 + (freqMask-currentRidgef[-1])**2) 940 | 941 | minD = np.where(distances == distances.min())[0][0] 942 | if (distances[minD] < parMinD) and (len(timeMask) > 1): 943 | currentRidget.append(timeMask[minD]) 944 | currentRidgef.append(freqMask[minD]) 945 | locMax[freqMask[minD], timeMask[minD]] = False 946 | freqMask, timeMask = np.where(locMax) 947 | else: 948 | FLAG = False 949 | if len(timeMask) == 1: 950 | currentRidget.append(timeMask[0]) 951 | currentRidgef.append(freqMask[0]) 952 | ridges.append((currentRidgef, currentRidget)) 953 | currentRidget = [] 954 | currentRidgef = [] 955 | print('Ridge added') 956 | 957 | print(str(len(ridges)) + ' ridges were recovered.') 958 | 959 | return ridges 960 | 961 | 962 | def log2(x): 963 | return np.log(x) / np.log(2) 964 | 965 | 966 | def nextpow2(i): 967 | n = 1 968 | while n < i: n *= 2 969 | return int(n) 970 | -------------------------------------------------------------------------------- /bispy/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | This program contains utility tools. 6 | """ 7 | 8 | import numpy as np 9 | import quaternion 10 | 11 | import matplotlib.pyplot as plt 12 | from mpl_toolkits.mplot3d import Axes3D # required for 3D plot 13 | import matplotlib.gridspec as gridspec # required for 2D plot 14 | 15 | __all__ = ['sympSplit', 'sympSynth', 'StokesNorm', 'normalizeStokes', 16 | 'Stokes2geo', 'geo2Stokes', 'quat2euler', 'euler2quat'] 17 | 18 | 19 | def sympSplit(q): 20 | 21 | '''Splits a quaternion array into two complex arrays. 22 | 23 | The decomposition reads:: 24 | 25 | q = q_1 + i q_2 26 | 27 | where q_1, q_2 are complex (1, 1j) numpy arrays 28 | 29 | Parameters 30 | ---------- 31 | q : quaternion numpy array 32 | 33 | Returns 34 | ------- 35 | q_1, q_2 : complex numpy arrays 36 | 37 | See also 38 | -------- 39 | sympSynth 40 | 41 | Examples 42 | -------- 43 | >>> q 44 | array([[quaternion(0.3, 0.47, -0.86, -0.42), 45 | quaternion(0.24, -1.07, -2.11, 0.37), 46 | quaternion(-0.24, -1.36, -1.14, 1.69)], 47 | [quaternion(0.4, -0.61, 0.04, -0.03), 48 | quaternion(-1.58, -1.69, -1.18, -1.02), 49 | quaternion(0.78, -1.06, -1.05, -0.62)]], dtype=quaternion) 50 | >>> q_1, q_2 = sympSplit(q) 51 | >>> q_1 52 | array([[ 0.30-0.86j, 0.24-2.11j, -0.24-1.14j], 53 | [ 0.40+0.04j, -1.58-1.18j, 0.78-1.05j]]) 54 | >>> q_2 55 | array([[ 0.47-0.42j, -1.07+0.37j, -1.36+1.69j], 56 | [-0.61-0.03j, -1.69-1.02j, -1.06-0.62j]]) 57 | ''' 58 | 59 | if q.dtype != 'quaternion': 60 | raise ValueError('array should be of quaternion type') 61 | 62 | qfloat = quaternion.as_float_array(q) 63 | q_1 = qfloat[..., 0] + 1j * qfloat[..., 2] 64 | q_2 = qfloat[..., 1] + 1j * qfloat[..., 3] 65 | 66 | return q_1, q_2 67 | 68 | 69 | def sympSynth(q_1, q_2): 70 | 71 | '''Constructs a quaternion array from two complex arrays. 72 | 73 | The decomposition reads:: 74 | 75 | q = q_1 + i q_2 76 | 77 | where q_1, q_2 are complex (1, 1j) numpy arrays 78 | 79 | Parameters 80 | ---------- 81 | q_1, q_2 : complex numpy arrays 82 | 83 | Returns 84 | ------- 85 | q : quaternion numpy array 86 | 87 | See also 88 | -------- 89 | sympSplit 90 | 91 | Examples 92 | -------- 93 | >>> q_1 94 | array([[ 0.30-0.86j, 0.24-2.11j, -0.24-1.14j], 95 | [ 0.40+0.04j, -1.58-1.18j, 0.78-1.05j]]) 96 | >>> q_2 97 | array([[ 0.47-0.42j, -1.07+0.37j, -1.36+1.69j], 98 | [-0.61-0.03j, -1.69-1.02j, -1.06-0.62j]]) 99 | >>> sympSynth(q_1 q_2) 100 | array([[quaternion(0.3, 0.47, -0.86, -0.42), 101 | quaternion(0.24, -1.07, -2.11, 0.37), 102 | quaternion(-0.24, -1.36, -1.14, 1.69)], 103 | [quaternion(0.4, -0.61, 0.04, -0.03), 104 | quaternion(-1.58, -1.69, -1.18, -1.02), 105 | quaternion(0.78, -1.06, -1.05, -0.62)]], dtype=quaternion) 106 | 107 | ''' 108 | # construct correct dimension of float array (shape(q_1), 4) 109 | dimArray = list(q_1.shape) 110 | dimArray.append(4) 111 | 112 | qfloat = np.zeros(tuple(dimArray)) 113 | 114 | qfloat[..., 0] = np.real(q_1) 115 | qfloat[..., 1] = np.real(q_2) 116 | qfloat[..., 2] = np.imag(q_1) 117 | qfloat[..., 3] = np.imag(q_2) 118 | 119 | return quaternion.as_quat_array(qfloat) 120 | 121 | 122 | ''' Stokes related functions, and geometric parameters extraction ''' 123 | 124 | 125 | def StokesNorm(q): 126 | ''' Return the Stokes-Poincaré norm of a quaternion. 127 | 128 | The Stokes-Poincaré norm is defined by:: 129 | 130 | StokesNorm(q) = -q*j*np.conj(q) 131 | 132 | with j = quaternion(0, 0, 1, 0). 133 | 134 | Parameters 135 | ---------- 136 | q : quaternion numpy array 137 | 138 | Returns 139 | ------- 140 | q*j*np.conj(q) : Stokes-Poincaré norm of q 141 | 142 | See also 143 | -------- 144 | quat2euler 145 | 146 | ''' 147 | 148 | if q.dtype != 'quaternion': 149 | raise ValueError('array should be of quaternion type') 150 | 151 | # compute j-product 152 | jq = quaternion.y * np.conj(q) 153 | 154 | return q * jq 155 | 156 | 157 | def normalizeStokes(S0, S1, S2, S3, tol=0.0): 158 | ''' Normalize Stokes parameters S1, S2, S3 by S0. 159 | 160 | Normalization can be performed using a soft thresholding-like method, if 161 | regularization is needed:: 162 | 163 | Si = Si/(S0 + tol*np.max(S0)) 164 | 165 | where i = 1, 2, 3 and `tol` is the tolerance factor. This function assumes 166 | that the maximum value of S0 has a significance for the whole indices of 167 | the Si arrays. 168 | 169 | Parameters 170 | ---------- 171 | S0, S1, S2, S3 : array_type 172 | tol : float, optional 173 | 174 | Returns 175 | ------- 176 | S1n, S2n, S3n : array_type 177 | 178 | See also 179 | -------- 180 | quat2euler 181 | ''' 182 | 183 | epsilon = tol * np.max(S0) # soft thresholding 184 | 185 | S1n = S1 / (S0 + epsilon) 186 | S2n = S2 / (S0 + epsilon) 187 | S3n = S3 / (S0 + epsilon) 188 | 189 | return S1n, S2n, S3n 190 | 191 | 192 | def Stokes2geo(S0, S1, S2, S3, tol=0.0): 193 | ''' Return geometric parameters from Stokes parameters. 194 | 195 | It returns the decomposition in a, theta, chi and degree of polarization 196 | Phi. 197 | 198 | Parameters 199 | ---------- 200 | S0, S1, S2, S3 : array_type 201 | tol : float, optional 202 | 203 | Returns 204 | ------- 205 | 206 | a, theta, chi, Phi : array_type 207 | 208 | See also 209 | -------- 210 | quat2euler 211 | normalizeStokes 212 | geo2Stokes 213 | 214 | ''' 215 | 216 | # normalize 217 | S1n, S2n, S3n = normalizeStokes(S0, S1, S2, S3, tol=tol) 218 | Phi = np.sqrt(S1n**2 + S2n**2 + S3n**2) 219 | 220 | # estimate geometrical paramaters 221 | 222 | a = np.sqrt(Phi*S0) 223 | theta = 0.5 * np.arctan2(S2n, S1n) 224 | chi = 0.5 * np.arcsin(S3n/Phi) 225 | 226 | return a, theta, chi, Phi 227 | 228 | 229 | def geo2Stokes(a, theta, chi, Phi=1): 230 | ''' 231 | Compute Stokes parameters from geometric parameters. 232 | 233 | Parameters 234 | ---------- 235 | a, theta, chi : array_type 236 | Phi : array_type, optional 237 | 238 | Returns 239 | ------- 240 | S0, S1, S2, S3 : array_type 241 | 242 | See also 243 | -------- 244 | quat2euler 245 | Stokes2geo 246 | 247 | ''' 248 | 249 | S0 = np.abs(a)**2 250 | S1 = np.abs(a)**2 * Phi * np.cos(2 * theta) * np.cos(2 * chi) 251 | S2 = np.abs(a)**2 * Phi * np.sin(2 * theta) * np.cos(2 * chi) 252 | S3 = np.abs(a)**2 * Phi * np.sin(2 * chi) 253 | 254 | return S0, S1, S2, S3 255 | 256 | 257 | def quat2euler(q): 258 | '''Euler polar form of a quaternion array. 259 | 260 | The decomposition reads:: 261 | 262 | q = a * np.exp(i * theta) * np.exp(-k * chi) * np.exp(j * phi) 263 | 264 | with a > 0, -pi/2 < theta < pi/2, -pi/4 < chi < pi/4 and -pi < phi < pi . 265 | 266 | Parameters 267 | ---------- 268 | q : quaternion numpy array 269 | 270 | Returns 271 | ------- 272 | a, theta, chi, phi : array_type 273 | 274 | See also 275 | -------- 276 | euler2quat 277 | 278 | ''' 279 | 280 | S0 = np.norm(q) # squared modulus 281 | 282 | qjq = StokesNorm(q) 283 | qjq_float = quaternion.as_float_array(qjq) 284 | S1 = qjq_float[..., 2] 285 | S2 = qjq_float[..., 3] 286 | S3 = qjq_float[..., 1] 287 | 288 | a, theta, chi, Phi = Stokes2geo(S0, S1, S2, S3) 289 | 290 | qi = quaternion.x 291 | qk = quaternion.z 292 | 293 | prefactor = a * np.exp(qi * theta) * np.exp(-qk * chi) 294 | expjphi = quaternion.as_float_array(prefactor**(-1) * q) 295 | 296 | expjphi_cplx = expjphi[..., 0] + 1j * expjphi[..., 2] 297 | 298 | phi = np.angle(expjphi_cplx) 299 | 300 | return a, theta, chi, phi 301 | 302 | 303 | def euler2quat(a, theta, chi, phi): 304 | ''' Quaternion from Euler polar form. 305 | 306 | The decomposition reads:: 307 | 308 | q = a * np.exp(i * theta) * np.exp(-k * chi) * np.exp(j * phi) 309 | 310 | with a > 0, -pi/2 < theta < pi/2, -pi/4 < chi < pi/4 and -pi < phi < pi . 311 | 312 | Parameters 313 | ---------- 314 | a, theta, chi, phi : array_type 315 | 316 | Returns 317 | ------- 318 | q : quaternion numpy array 319 | 320 | See also 321 | -------- 322 | quat2euler 323 | 324 | ''' 325 | 326 | qi = quaternion.x 327 | qj = quaternion.y 328 | qk = quaternion.z 329 | 330 | q = a * np.exp(qi * theta) * np.exp(-qk * chi) * np.exp(qj * phi) 331 | 332 | return q 333 | 334 | 335 | ''' Windows related functions ''' 336 | 337 | 338 | class windows(object): 339 | 340 | ''' Windows functions static methods. 341 | 342 | These window functions are provided for convenience, and are meant to be 343 | used with the QSTFT class. 344 | ''' 345 | 346 | def __init__(self): 347 | pass 348 | 349 | @staticmethod 350 | def rectangle(N): 351 | ''' Rectangle window''' 352 | window = np.ones(N) 353 | return window 354 | 355 | @staticmethod 356 | def hamming(N): 357 | ''' Hamming window''' 358 | window = 0.54 - 0.46 * np.cos(2.0 * np.pi * np.arange(1, N + 1) / (N + 1)) 359 | return window 360 | 361 | @staticmethod 362 | def hanning(N): 363 | ''' Hanning window''' 364 | window = 0.50 - 0.50 * np.cos(2.0 * np.pi * np.arange(1, 365 | N + 1) / (N + 1)) 366 | return window 367 | 368 | @staticmethod 369 | def gaussian(N, sigma=0.005): 370 | '''Gaussian window''' 371 | if sigma > 0.5: 372 | raise ValueError('Sigma must be smaller than 0.5') 373 | else: 374 | window = np.exp(np.log(sigma) * np.linspace(-1, 1, N)**2) 375 | 376 | return window 377 | 378 | def polarizationEllipse(theta, chi, a=1, N=128): 379 | 380 | '''Returns the trace of the polarization ellipse given its orientation and ellipticity. 381 | 382 | Parameters 383 | ---------- 384 | theta : float 385 | Orientation of the ellipse, must be between -pi/2 and pi/2 386 | 387 | chi : float 388 | Ellipticity. It defines the shape of the ellipse, must be between -pi/4 and pi/4 389 | 390 | a : float, optional 391 | Scale parameter. Default is 1. 392 | 393 | N : int, optional 394 | Length of the complex trace. Default is 128. 395 | 396 | Returns 397 | ------- 398 | 399 | phi : array_type 400 | Curvilinear absciss of the polarization ellipe 401 | 402 | ell : array_type 403 | Complex trace of the polarization ellipse. 404 | ''' 405 | 406 | phi = np.linspace(0, 2*np.pi, N) 407 | 408 | ell = a*np.exp(1j*theta)*(np.cos(chi)*np.cos(phi)+1j*np.sin(chi)*np.sin(phi)) 409 | 410 | return phi, ell 411 | 412 | 413 | class visual(object): 414 | ''' 415 | Static methods for visualization of bivariate signals. 416 | ''' 417 | 418 | def __init__(self): 419 | pass 420 | 421 | @staticmethod 422 | def plot2D(t, q, labels=['u(t)', 'v(t)']): 423 | ''' 2D plot of a bivariate signal. 424 | 425 | Plots the 2D trace, and time evolution of each component. 426 | 427 | Parameters 428 | ---------- 429 | t, q : array_type 430 | time and signal arrays (signal array may be either complex or quaternion type) 431 | labels : [label1, label2] 432 | list of labels to display. 433 | 434 | Returns 435 | ------- 436 | fig, ax : figure and axis handles 437 | ''' 438 | 439 | fig = plt.figure(figsize=(10, 4)) 440 | 441 | N = np.size(t) 442 | q1, q2 = sympSplit(q) 443 | 444 | gs = gridspec.GridSpec(2, 5) 445 | gs.update(hspace=0.1, wspace=0.1, bottom=0.18, left=0.09, top=0.95, right=0.94) 446 | 447 | 448 | ax1 = plt.subplot(gs[0, 2:]) 449 | ax2 = plt.subplot(gs[1, 2:]) 450 | ax3 = plt.subplot(gs[:, :2]) 451 | 452 | # ax1 453 | ax1.spines['top'].set_visible(False) 454 | ax1.spines['left'].set_visible(False) 455 | ax1.spines['bottom'].set_visible(False) 456 | ax1.set_xticks([]) 457 | ax1.yaxis.set_ticks_position('right') 458 | ax1.spines['right'].set_position(('outward', 10)) 459 | #ax2 460 | ax2.spines['top'].set_visible(False) 461 | ax2.spines['left'].set_visible(False) 462 | ax2.yaxis.set_ticks_position('right') 463 | ax2.spines['right'].set_position(('outward', 10)) 464 | ax2.spines['bottom'].set_position(('outward', 10)) 465 | 466 | #ax3 467 | ax3.spines['top'].set_visible(False) 468 | ax3.spines['right'].set_visible(False) 469 | ax3.spines['left'].set_position(('outward', 10)) 470 | ax3.spines['bottom'].set_position(('outward', 10)) 471 | 472 | # plots 473 | ax1.plot(t, q1.real) 474 | ax2.plot(t, q2.real) 475 | ax3.plot(q1.real, q2.real) 476 | 477 | ax3.set_aspect('equal', 'box') 478 | # get limits 479 | lims = ax3.get_xlim() + ax3.get_ylim() 480 | li = np.max(np.abs(lims)) 481 | #set lims 482 | for ax in [ax1, ax2, ax3]: 483 | ax.set_ylim([-li, li]) 484 | ax3.set_xlim([-li, li]) 485 | ax3.set_xlabel(labels[0]) 486 | ax3.set_ylabel(labels[1]) 487 | 488 | ax1.set_title(labels[0]) 489 | ax2.set_title(labels[1]) 490 | ax2.set_xlabel('time') 491 | return fig, [ax1, ax2, ax3] 492 | 493 | @staticmethod 494 | def plot3D(t, q): 495 | ''' 3D plot of a bivariate signal 496 | 497 | Parameters 498 | ---------- 499 | t, q : array_type 500 | time and signal arrays (signal array may be either complex or quaternion type) 501 | 502 | Returns 503 | ------- 504 | fig, ax : figure and axis handles 505 | 506 | ''' 507 | 508 | if q.dtype == 'quaternion': 509 | u, v = sympSplit(q) 510 | x = u.real + 1j * v.real 511 | else: 512 | x = q # complex array 513 | 514 | if len(q.shape) > 2: 515 | raise ValueError('Data should be a vector to be 3D plotted') 516 | 517 | fig = plt.figure() 518 | ax_sig = fig.add_subplot(projection = '3d') 519 | # ax_sig 520 | ax_sig.plot(t, np.real(x), np.imag(x), color='k') 521 | 522 | tmin = ax_sig.get_xlim3d()[0] 523 | tmax = ax_sig.get_xlim3d()[1] 524 | xmin = min(ax_sig.get_ylim3d()[0], ax_sig.get_zlim3d()[0]) 525 | xmax = max(ax_sig.get_ylim3d()[1], ax_sig.get_zlim3d()[1]) 526 | ymin = min(ax_sig.get_ylim3d()[0], ax_sig.get_zlim3d()[0]) 527 | ymax = max(ax_sig.get_ylim3d()[1], ax_sig.get_zlim3d()[1]) 528 | 529 | # surfaces 530 | 531 | # complex plane 532 | xx_c, yy_c = np.meshgrid(np.linspace(xmin, xmax), np.linspace(ymin, ymax)) 533 | #ax_sig.plot_surface(-.05*(tmin+tmax), xx_c, yy_c, alpha=0.05, color='gray', rstride = 100, cstride=100) 534 | ax_sig.plot(x.real, x.imag, -.05*(tmin+tmax), zdir='x', color='gray') 535 | ax_sig.set_xlim([-.05*(tmin+tmax), tmax]) 536 | 537 | # real proj 538 | xx_r, yy_r = np.meshgrid(np.linspace(tmin, tmax), np.linspace(xmin, xmax)) 539 | #ax_sig.plot_surface(xx_r, yy_r, 1.05*ymin, alpha=0.05, color='gray', rstride = 100, cstride=100) 540 | ax_sig.plot(t, x.real, ymin*1.05, zdir='z', color='gray') 541 | ax_sig.set_zlim([1.05*ymin, ymax]) 542 | 543 | #imaginary proj 544 | xx_i, yy_i = np.meshgrid(np.linspace(tmin, tmax), np.linspace(ymin, ymax)) 545 | #ax_sig.plot_surface(xx_i, 1.05*xmax, yy_i, alpha=0.05, color='gray',rstride = 100, cstride=100) 546 | ax_sig.plot(t, x.imag, 1.05*xmax, zdir='y', color='gray') 547 | ax_sig.set_ylim([xmin, 1.05*xmax]) 548 | 549 | # replot to avoid 'overlays' 550 | ax_sig.plot(t, np.real(x), np.imag(x), color='k') 551 | #proj3d.persp_transformation = _orthogonal_proj 552 | fig.show() 553 | return fig, ax_sig 554 | 555 | 556 | # workaround orthographic projection (deprecated) 557 | # from mpl_toolkits.mplot3d import proj3d 558 | 559 | # def _orthogonal_proj(zfront, zback): 560 | # a = (zfront+zback)/(zfront-zback) 561 | # b = -2*(zfront*zback)/(zfront-zback) 562 | # # -0.0001 added for numerical stability as suggested in: 563 | # # http://stackoverflow.com/questions/23840756 564 | # return np.array([[1,0,0,0], 565 | # [0,1,0,0], 566 | # [0,0,a,b], 567 | # [0,0,-0.0001,zback]]) 568 | -------------------------------------------------------------------------------- /doc-requirements.txt: -------------------------------------------------------------------------------- 1 | numpydoc -------------------------------------------------------------------------------- /docs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/.DS_Store -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyBiv.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyBiv.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/PyBiv" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyBiv" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # BiSPy documentation build configuration file, created by 5 | # sphinx-quickstart on Tue Jan 31 16:28:23 2017. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import sys 17 | import os 18 | import shlex 19 | 20 | from unittest.mock import MagicMock 21 | 22 | class Mock(MagicMock): 23 | @classmethod 24 | def __getattr__(cls, name): 25 | return MagicMock() 26 | 27 | MOCK_MODULES = ['numpy-quaternion', 'quaternion'] 28 | sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) 29 | 30 | # If extensions (or modules to document with autodoc) are in another directory, 31 | # add these directories to sys.path here. If the directory is relative to the 32 | # documentation root, use os.path.abspath to make it absolute, like shown here. 33 | sys.path.insert(0, os.path.abspath('../..')) 34 | 35 | # -- General configuration ------------------------------------------------ 36 | 37 | # If your documentation needs a minimal Sphinx version, state it here. 38 | #needs_sphinx = '1.0' 39 | 40 | # Add any Sphinx extension module names here, as strings. They can be 41 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 42 | # ones. 43 | extensions = [ 44 | 'sphinx.ext.autodoc', 45 | 'sphinx.ext.doctest', 46 | 'sphinx.ext.intersphinx', 47 | "sphinx.ext.githubpages", # Publish HTML docs in GitHub Pages 48 | 'sphinx.ext.coverage', 49 | 'sphinx.ext.imgmath', 50 | 'sphinx.ext.viewcode', 51 | 'sphinx.ext.autosummary', 52 | 'numpydoc', 53 | 'm2r2', 54 | ] 55 | 56 | # Add any paths that contain templates here, relative to this directory. 57 | templates_path = ['_templates'] 58 | 59 | # The suffix(es) of source filenames. 60 | # You can specify multiple suffix as a list of string: 61 | source_suffix = ['.rst', '.md'] 62 | #source_suffix = '.rst' 63 | 64 | # The encoding of source files. 65 | #source_encoding = 'utf-8-sig' 66 | 67 | # The master toctree document. 68 | master_doc = 'index' 69 | 70 | # General information about the project. 71 | project = 'BiSPy' 72 | copyright = '2017-2018, Julien Flamant' 73 | author = 'Julien Flamant' 74 | 75 | # The version info for the project you're documenting, acts as replacement for 76 | # |version| and |release|, also used in various other places throughout the 77 | # built documents. 78 | # 79 | # The short X.Y version. 80 | version = '1.0' 81 | # The full version, including alpha/beta/rc tags. 82 | release = '1.0' 83 | 84 | # The language for content autogenerated by Sphinx. Refer to documentation 85 | # for a list of supported languages. 86 | # 87 | # This is also used if you do content translation via gettext catalogs. 88 | # Usually you set "language" from the command line for these cases. 89 | language = None 90 | 91 | # There are two options for replacing |today|: either, you set today to some 92 | # non-false value, then it is used: 93 | #today = '' 94 | # Else, today_fmt is used as the format for a strftime call. 95 | #today_fmt = '%B %d, %Y' 96 | 97 | # List of patterns, relative to source directory, that match files and 98 | # directories to ignore when looking for source files. 99 | exclude_patterns = ['_build'] 100 | 101 | # The reST default role (used for this markup: `text`) to use for all 102 | # documents. 103 | #default_role = None 104 | 105 | # If true, '()' will be appended to :func: etc. cross-reference text. 106 | #add_function_parentheses = True 107 | 108 | # If true, the current module name will be prepended to all description 109 | # unit titles (such as .. function::). 110 | #add_module_names = True 111 | 112 | # If true, sectionauthor and moduleauthor directives will be shown in the 113 | # output. They are ignored by default. 114 | #show_authors = False 115 | 116 | # The name of the Pygments (syntax highlighting) style to use. 117 | pygments_style = 'sphinx' 118 | 119 | # A list of ignored prefixes for module index sorting. 120 | #modindex_common_prefix = [] 121 | 122 | # If true, keep warnings as "system message" paragraphs in the built documents. 123 | #keep_warnings = False 124 | 125 | # If true, `todo` and `todoList` produce output, else they produce nothing. 126 | todo_include_todos = False 127 | 128 | 129 | # -- Options for HTML output ---------------------------------------------- 130 | 131 | # The theme to use for HTML and HTML Help pages. See the documentation for 132 | # a list of builtin themes. 133 | html_theme = 'sphinx_rtd_theme' 134 | 135 | # Theme options are theme-specific and customize the look and feel of a theme 136 | # further. For a list of options available for each theme, see the 137 | # documentation. 138 | #html_theme_options = {} 139 | 140 | # Add any paths that contain custom themes here, relative to this directory. 141 | #html_theme_path = [] 142 | 143 | # The name for this set of Sphinx documents. If None, it defaults to 144 | # " v documentation". 145 | #html_title = None 146 | 147 | # A shorter title for the navigation bar. Default is the same as html_title. 148 | #html_short_title = None 149 | 150 | # The name of an image file (relative to this directory) to place at the top 151 | # of the sidebar. 152 | #html_logo = None 153 | 154 | # The name of an image file (within the static path) to use as favicon of the 155 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 156 | # pixels large. 157 | #html_favicon = None 158 | 159 | # Add any paths that contain custom static files (such as style sheets) here, 160 | # relative to this directory. They are copied after the builtin static files, 161 | # so a file named "default.css" will overwrite the builtin "default.css". 162 | html_static_path = ['_static'] 163 | 164 | # Add any extra paths that contain custom files (such as robots.txt or 165 | # .htaccess) here, relative to this directory. These files are copied 166 | # directly to the root of the documentation. 167 | #html_extra_path = [] 168 | 169 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 170 | # using the given strftime format. 171 | #html_last_updated_fmt = '%b %d, %Y' 172 | 173 | # If true, SmartyPants will be used to convert quotes and dashes to 174 | # typographically correct entities. 175 | #html_use_smartypants = True 176 | 177 | # Custom sidebar templates, maps document names to template names. 178 | #html_sidebars = {} 179 | 180 | # Additional templates that should be rendered to pages, maps page names to 181 | # template names. 182 | #html_additional_pages = {} 183 | 184 | # If false, no module index is generated. 185 | #html_domain_indices = True 186 | 187 | # If false, no index is generated. 188 | #html_use_index = True 189 | 190 | # If true, the index is split into individual pages for each letter. 191 | #html_split_index = False 192 | 193 | # If true, links to the reST sources are added to the pages. 194 | #html_show_sourcelink = True 195 | 196 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 197 | #html_show_sphinx = True 198 | 199 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 200 | #html_show_copyright = True 201 | 202 | # If true, an OpenSearch description file will be output, and all pages will 203 | # contain a tag referring to it. The value of this option must be the 204 | # base URL from which the finished HTML is served. 205 | #html_use_opensearch = '' 206 | 207 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 208 | #html_file_suffix = None 209 | 210 | # Language to be used for generating the HTML full-text search index. 211 | # Sphinx supports the following languages: 212 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' 213 | # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' 214 | #html_search_language = 'en' 215 | 216 | # A dictionary with options for the search language support, empty by default. 217 | # Now only 'ja' uses this config value 218 | #html_search_options = {'type': 'default'} 219 | 220 | # The name of a javascript file (relative to the configuration directory) that 221 | # implements a search results scorer. If empty, the default will be used. 222 | #html_search_scorer = 'scorer.js' 223 | 224 | # Output file base name for HTML help builder. 225 | htmlhelp_basename = 'BiSPydoc' 226 | 227 | # -- Options for LaTeX output --------------------------------------------- 228 | 229 | latex_elements = { 230 | # The paper size ('letterpaper' or 'a4paper'). 231 | #'papersize': 'letterpaper', 232 | 233 | # The font size ('10pt', '11pt' or '12pt'). 234 | #'pointsize': '10pt', 235 | 236 | # Additional stuff for the LaTeX preamble. 237 | #'preamble': '', 238 | 239 | # Latex figure (float) alignment 240 | #'figure_align': 'htbp', 241 | } 242 | 243 | # Grouping the document tree into LaTeX files. List of tuples 244 | # (source start file, target name, title, 245 | # author, documentclass [howto, manual, or own class]). 246 | latex_documents = [ 247 | (master_doc, 'BiSPy.tex', 'BiSPy Documentation', 248 | 'Julien Flamant', 'manual'), 249 | ] 250 | 251 | # The name of an image file (relative to this directory) to place at the top of 252 | # the title page. 253 | #latex_logo = None 254 | 255 | # For "manual" documents, if this is true, then toplevel headings are parts, 256 | # not chapters. 257 | #latex_use_parts = False 258 | 259 | # If true, show page references after internal links. 260 | #latex_show_pagerefs = False 261 | 262 | # If true, show URL addresses after external links. 263 | #latex_show_urls = False 264 | 265 | # Documents to append as an appendix to all manuals. 266 | #latex_appendices = [] 267 | 268 | # If false, no module index is generated. 269 | #latex_domain_indices = True 270 | 271 | 272 | # -- Options for manual page output --------------------------------------- 273 | 274 | # One entry per manual page. List of tuples 275 | # (source start file, name, description, authors, manual section). 276 | man_pages = [ 277 | (master_doc, 'BiSpy', 'BiSPy Documentation', 278 | [author], 1) 279 | ] 280 | 281 | # If true, show URL addresses after external links. 282 | #man_show_urls = False 283 | 284 | 285 | # -- Options for Texinfo output ------------------------------------------- 286 | 287 | # Grouping the document tree into Texinfo files. List of tuples 288 | # (source start file, target name, title, author, 289 | # dir menu entry, description, category) 290 | texinfo_documents = [ 291 | (master_doc, 'BiSPy', 'BiSPy Documentation', 292 | author, 'BiSPy', 'One line description of project.', 293 | 'Miscellaneous'), 294 | ] 295 | 296 | # Documents to append as an appendix to all manuals. 297 | #texinfo_appendices = [] 298 | 299 | # If false, no module index is generated. 300 | #texinfo_domain_indices = True 301 | 302 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 303 | #texinfo_show_urls = 'footnote' 304 | 305 | # If true, do not generate a @detailmenu in the "Top" node's menu. 306 | #texinfo_no_detailmenu = False 307 | 308 | 309 | # Example configuration for intersphinx: refer to the Python standard library. 310 | intersphinx_mapping = {'https://docs.python.org/': None} 311 | 312 | 313 | # ----------------------------------------------------------------------------- 314 | # Autosummary 315 | # ----------------------------------------------------------------------------- 316 | 317 | import glob 318 | autosummary_generate = glob.glob("*.rst") 319 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. BiSPy documentation master file, created by 2 | sphinx-quickstart on Tue Jan 31 16:28:23 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | 7 | .. mdinclude:: ../README.md 8 | 9 | 10 | Documentation contents 11 | ====================== 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | 16 | tutorials/index 17 | reference 18 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PyBiv.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PyBiv.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/reference.filters.rst: -------------------------------------------------------------------------------- 1 | .. _reference.filters: 2 | 3 | .. currentmodule:: bispy.filters 4 | 5 | :mod:`filters` - LTI filters for bivariate signals 6 | ***************************************************** 7 | 8 | 9 | 10 | Unitary Filter 11 | -------------------- 12 | 13 | .. autoclass:: UnitaryFilter 14 | :members: 15 | 16 | HermitianFilter 17 | ------ 18 | 19 | .. autoclass:: HermitianFilter 20 | :members: 21 | -------------------------------------------------------------------------------- /docs/reference.qfft.rst: -------------------------------------------------------------------------------- 1 | .. _reference.qfft: 2 | 3 | .. currentmodule:: bispy.qfft 4 | 5 | :mod:`qfft` - Quaternion Fourier Transforms 6 | ************************************************* 7 | 8 | 9 | 10 | General 11 | ------- 12 | 13 | .. autosummary:: 14 | :toctree: generated/ 15 | 16 | Qfft 17 | iQfft 18 | Qfftshift 19 | iQfftshift 20 | Qfftfreq -------------------------------------------------------------------------------- /docs/reference.rst: -------------------------------------------------------------------------------- 1 | .. _reference: 2 | 3 | ################ 4 | Reference manual 5 | ################ 6 | 7 | .. module:: bispy 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | reference.qfft 13 | reference.utils 14 | reference.timefrequency 15 | reference.spectral 16 | reference.filters 17 | reference.signals 18 | -------------------------------------------------------------------------------- /docs/reference.signals.rst: -------------------------------------------------------------------------------- 1 | .. _reference.signals: 2 | 3 | .. currentmodule:: bispy.signals 4 | 5 | :mod:`signals` - generating bivariate signals 6 | ***************************************************** 7 | 8 | Prototype signals 9 | ----------------- 10 | 11 | .. autosummary:: 12 | :toctree: generated/ 13 | 14 | bivariateAMFM 15 | bivariatewhiteNoise 16 | 17 | .. autoclass:: stationaryBivariate 18 | :members: 19 | -------------------------------------------------------------------------------- /docs/reference.spectral.rst: -------------------------------------------------------------------------------- 1 | .. _reference.spectral: 2 | 3 | .. currentmodule:: bispy.spectral 4 | 5 | :mod:`spectral` - Spectral analysis routines 6 | ******************************************** 7 | 8 | 9 | Quaternion PSD 10 | -------------- 11 | 12 | .. autoclass:: quaternionPSD 13 | :members: 14 | 15 | Periodogram 16 | ----------- 17 | 18 | .. autoclass:: Periodogram 19 | :members: 20 | 21 | Multitaper 22 | ---------- 23 | 24 | .. autoclass:: Multitaper 25 | :members: 26 | -------------------------------------------------------------------------------- /docs/reference.timefrequency.rst: -------------------------------------------------------------------------------- 1 | .. _reference.timefrequency: 2 | 3 | .. currentmodule:: bispy.timefrequency 4 | 5 | :mod:`timefrequency` - Time-Frequency representations 6 | ***************************************************** 7 | 8 | 9 | 10 | Quaternion Embedding 11 | -------------------- 12 | 13 | .. autoclass:: Hembedding 14 | :members: 15 | 16 | Q-STFT 17 | ------ 18 | 19 | .. autoclass:: QSTFT 20 | :members: 21 | 22 | Q-CWT 23 | ------ 24 | 25 | .. autoclass:: QCWT 26 | :members: 27 | -------------------------------------------------------------------------------- /docs/reference.utils.rst: -------------------------------------------------------------------------------- 1 | .. _reference.utils: 2 | 3 | .. currentmodule:: bispy.utils 4 | 5 | :mod:`utils` - Utility functions 6 | ******************************** 7 | 8 | 9 | Quaternion specific operations 10 | ------------------------------ 11 | 12 | .. autosummary:: 13 | :toctree: generated/ 14 | 15 | sympSplit 16 | sympSynth 17 | StokesNorm 18 | normalizeStokes 19 | Stokes2geo 20 | geo2Stokes 21 | quat2euler 22 | euler2quat 23 | 24 | 25 | Windows functions 26 | ----------------- 27 | 28 | .. autoclass:: windows 29 | 30 | Graphical tools 31 | --------------- 32 | 33 | .. autoclass:: visual 34 | :members: 35 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | furo==2021.11.16 2 | -------------------------------------------------------------------------------- /docs/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | .. _tutorials: 2 | 3 | ######### 4 | Tutorials 5 | ######### 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | timefrequency 11 | spectralanalysis 12 | -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysis.rst: -------------------------------------------------------------------------------- 1 | 2 | Spectral analysis of bivariate signals: tutorial 3 | ================================================ 4 | 5 | This tutorial aims at demonstrating different tools available within the 6 | ``spectral`` module of ``BiSPy``. The examples provided here come along 7 | with the paper 8 | 9 | - Julien Flamant, Nicolas Le Bihan, Pierre Chainais: “Spectral analysis 10 | of stationary random bivariate signals”, IEEE Transactions on Signal 11 | Processing, 2017; 12 | `arXiv:1703.06417 `__, 13 | `doi:10.1109/TSP.2017.2736494 `__ 14 | 15 | The paper contains theoretical results and several applications that can 16 | be reproduced with the following tutorial. A completementary notebook 17 | version is available 18 | `here `__. 19 | 20 | Load ``bispy`` and necessary modules 21 | ------------------------------------ 22 | 23 | .. code:: ipython3 24 | 25 | import numpy as np 26 | import matplotlib.pyplot as plt 27 | import quaternion # load the quaternion module 28 | import bispy as bsp 29 | 30 | Synthetic examples 31 | ------------------ 32 | 33 | The following examples are presented in the aforementioned paper. The 34 | module ``bispy.signals`` gives useful functions to generate the synthetic 35 | signals presented. 36 | 37 | Example 1: Bivariate white noise only 38 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 39 | 40 | First let us define the constants defining the polarization properties 41 | of the bivariate white gaussian noise. 42 | 43 | .. code:: ipython3 44 | 45 | N = 1024 # length of the signal 46 | S0 = 1 # power of the bivariate WGN 47 | P0 = .5 # degree of polarization 48 | theta0 = np.pi/4 # angle of linear polarization 49 | 50 | t = np.arange(0, N) # time vector 51 | 52 | First simulate a realization of this bivariate WGN: 53 | 54 | .. code:: ipython3 55 | 56 | w = bsp.signals.bivariatewhiteNoise(N, S0, P=P0, theta=theta0) 57 | 58 | Now, display this signal 59 | 60 | .. code:: ipython3 61 | 62 | fig, ax = bsp.utils.visual.plot2D(t, w) 63 | 64 | 65 | 66 | .. image:: spectralanalysisTutorial_files/output_9_0.png 67 | 68 | 69 | The goal is now to compare 2 spectral density estimation methods: 70 | 71 | - an averaged polarization periodogram 72 | - an averaged multitaper estimate using Slepian tapers. 73 | 74 | To do so, we simulate ``M`` independent realization of this bivariate 75 | WGN, and average across realizations each method output. 76 | 77 | .. code:: ipython3 78 | 79 | M = 10 # number of independent realization of the WGN 80 | 81 | The periodogram and multitaper estimates are computed like: 82 | 83 | .. code:: ipython3 84 | 85 | w = bsp.signals.bivariatewhiteNoise(N, S0, P=P0, theta=theta0) 86 | # compute spectral estimates 87 | per = bsp.spectral.Periodogram(t, w) 88 | multi = bsp.spectral.Multitaper(t, w) 89 | 90 | # loop accros realizations 91 | for k in range(1, M): 92 | w = bsp.signals.bivariatewhiteNoise(N, S0, P=P0, theta=theta0) 93 | 94 | per2 = bsp.spectral.Periodogram(t, w) 95 | multi2 = bsp.spectral.Multitaper(t, w) 96 | per = per + per2 97 | multi = multi + multi2 98 | 99 | # normalize by M 100 | per = 1./M * per 101 | multi = 1./M * multi 102 | 103 | 104 | By default, the ``Multitaper`` class assumes a bandwidth ``bw`` of 2.5 105 | frequency samples, giving 4 Slepian tapers. 106 | 107 | The next step is to normalize the Stokes parameters 108 | :math:`S_1, S_2, S_3` by the intensity Stokes parameter :math:`S_0` 109 | 110 | .. code:: ipython3 111 | 112 | per.normalize() 113 | multi.normalize() 114 | 115 | We can now display the results for both methods 116 | 117 | .. code:: ipython3 118 | 119 | fig, axes = per.plot() 120 | 121 | 122 | 123 | .. image:: spectralanalysisTutorial_files/output_17_1.png 124 | 125 | 126 | .. code:: ipython3 127 | 128 | fig, ax = multi.plot() 129 | 130 | 131 | 132 | .. image:: spectralanalysisTutorial_files/output_18_1.png 133 | 134 | 135 | Both estimates permit to recover the main features of the bivariate WGN: 136 | power, degree of polarization and polarization state are recovered. 137 | 138 | Then the usual discussion between periodogram and multitaper estimates 139 | apply: the multitaper estimate exhibits reduced leakage bias and less 140 | variance than the periodogram estimate. 141 | 142 | Example 2: bivariate monochromatic signal in white noise 143 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 144 | 145 | We proceed similarly. First define the different parameters: 146 | 147 | .. code:: ipython3 148 | 149 | N = 1024 # length of the signal 150 | 151 | t = np.arange(0, N) # time vector 152 | dt = (t[1]-t[0]) 153 | 154 | # bivariate monochromatic signal parameters 155 | a = 1/np.sqrt(N*dt) # amplitude = 1 156 | theta = -np.pi/3 # polarization angle 157 | chi = np.pi/8 # ellipticity parameter 158 | f0 = 128/N/dt # frequency 159 | 160 | # bivariate WGN noise paramerters 161 | S0_w = 10**(-2) # power of the bivariate WGN 162 | Phi_w = .2 # degree of polarization 163 | theta_w = np.pi/8 # angle of linear polarization 164 | 165 | Now, simulate a bivariate monochromatic signal (note the use of the 166 | argument ``complexOutput`` which provides a complex output (useful for 167 | plots), rather than a quaternion-valued output (useful for computations) 168 | 169 | .. code:: ipython3 170 | 171 | x = bsp.signals.bivariateAMFM(a, theta, chi, 2*np.pi*f0*t) 172 | 173 | Let us have a look at the bivariate signal itself 174 | 175 | .. code:: ipython3 176 | 177 | fig, ax = bsp.utils.visual.plot2D(t, x) 178 | 179 | 180 | 181 | .. image:: spectralanalysisTutorial_files/output_25_0.png 182 | 183 | 184 | Again, we compare 2 spectral density estimation methods: 185 | 186 | - an averaged polarization periodogram 187 | - an averaged multitaper estimate using Slepian tapers. 188 | 189 | To do so, we simulate ``M`` independent realization of this bivariate 190 | WGN, and average across realizations each method output. 191 | 192 | .. code:: ipython3 193 | 194 | M = 20 # number of realizations 195 | y = np.zeros((N, M), dtype='quaternion') 196 | 197 | # generate the data 198 | for k in range(M): 199 | phi = 2*np.pi*np.random.rand() # random initial phase term 200 | x = bsp.signals.bivariateAMFM(a, theta, chi, 2*np.pi*f0*t+phi) # bivariate monochromatic signal 201 | w = bsp.signals.bivariatewhiteNoise(N, S0_w, Phi_w, theta_w) # bivariate WGN 202 | y[:, k] = x + w 203 | 204 | # compute spectral estimates 205 | per = bsp.spectral.Periodogram(t, y[:, 0]) 206 | multi = bsp.spectral.Multitaper(t, y[:, 0], bw=3) 207 | for k in range(1, M): 208 | per2 = bsp.spectral.Periodogram(t, y[:, k]) 209 | multi2 = bsp.spectral.Multitaper(t, y[:, k], bw=3) 210 | 211 | per = per + per2 212 | multi = multi + multi2 213 | 214 | 215 | per = 1./M * per 216 | multi = 1/M * multi 217 | 218 | 219 | 220 | Here the multitaper class is computed with a bandwidth ``bw = 3`` 221 | frequency samples, giving 5 Slepian tapers. 222 | 223 | The next step is to normalize the Stokes parameters 224 | :math:`S_1, S_2, S_3` by the intensity Stokes parameter :math:`S_0` 225 | 226 | .. code:: ipython3 227 | 228 | per.normalize() 229 | multi.normalize() 230 | 231 | We can now display the results for both methods 232 | 233 | .. code:: ipython3 234 | 235 | fig, ax = per.plot() 236 | 237 | 238 | 239 | 240 | .. image:: spectralanalysisTutorial_files/output_31_1.png 241 | 242 | 243 | .. code:: ipython3 244 | 245 | fig, ax = multi.plot() 246 | 247 | 248 | 249 | 250 | .. image:: spectralanalysisTutorial_files/output_32_1.png 251 | 252 | 253 | A real case example: spectral analysis of wind measurements 254 | ----------------------------------------------------------- 255 | 256 | We turn to a real-life example to illustrate the general relevance of 257 | the method. 258 | 259 | We consider a dataset of instantaneous wind measurements (east and 260 | northward velocities). The dataset is available for download at 261 | http://www.commsp.ee.ic.ac.uk/~mandic/research/WL\_Complex\_Stuff.htm. 262 | This dataset has been used by the authors in several publications, e.g. 263 | in 264 | 265 | :: 266 | 267 | S. L. Goh, M. Chen, D. H. Popovic, K. Aihara, D. Obradovic and D. P. Mandic, "Complex-Valued Forecasting of Wind Profile," Renewable Energy, vol. 31, pp. 1733-1750, 2006. 268 | 269 | 270 | Quoting the included Readme: >- Wind data for 'low', 'medium' and 'high' 271 | dynamics regions. - Data are recorded using the Gill Instruments 272 | WindMaster, the 2D ultrasonic anemometer - Wind was sampled at 32 Hz and 273 | resampled at 50Hz, and the two channels correspond to the the "north" 274 | and "east" direction - To make a complex-valued wind signal, combine 275 | z=v\_n + j v\_e, where 'v' is wind speed and 'n' and 'e' the north and 276 | east directions - Data length = 5000 samples 277 | 278 | Setting 1: low-wind 279 | ~~~~~~~~~~~~~~~~~~~ 280 | 281 | We start by loading the data 282 | 283 | .. code:: ipython3 284 | 285 | import scipy.io as scio 286 | windData = scio.loadmat('datasets/wind/low-wind.mat') 287 | 288 | u = windData['v_east'][:,0] 289 | v = windData['v_north'][:, 0] 290 | 291 | N = np.size(u) # should be 5000 292 | dt = 1./50 293 | 294 | Estimating polarization features in bivariate signals requires ideally 295 | multiple measurements/realizations. We will fake this out using an 296 | ergodic hypothesis. This thus split the signal into ``Nw`` subsignals, 297 | and compute for each a spectral estimate. By averaging out spectral 298 | estimates, one obtains a estimate of the spectral density of the 299 | underlying process. (Welch method with no overlap) 300 | 301 | Let's define a handy function: 302 | 303 | .. code:: ipython3 304 | 305 | def subsignal(u, v, Nx, k): 306 | '''subsamples u, v components and returns the associated quaternion signal''' 307 | uk = u[k*Nx:(k+1)*Nx] 308 | vk = v[k*Nx:(k+1)*Nx] 309 | 310 | # to make it zero-mean 311 | uk = uk - np.mean(uk) 312 | vk = vk - np.mean(vk) 313 | 314 | return bsp.utils.sympSynth(uk, vk) 315 | 316 | Then we compute the averaged multitaper estimate 317 | 318 | .. code:: ipython3 319 | 320 | # subsampling parameters 321 | Nw = 20 # number of subsamples 322 | Nx = N // Nw # length of one subsampled signal 323 | 324 | # time index for subsampled signals 325 | tx = np.arange(Nx)*dt 326 | 327 | xk = subsignal(u, v, Nx, 0) 328 | 329 | multi = bsp.spectral.Multitaper(tx, xk) 330 | # loop across subsamples 331 | for k in range(1, Nw): 332 | 333 | xk = subsignal(u, v, Nx, k) 334 | multi2 = bsp.spectral.Multitaper(tx, xk) 335 | multi = multi + multi2 336 | 337 | # normalize and plot multitaper estimate 338 | multi.normalize() 339 | fig, ax = multi.plot() 340 | 341 | 342 | 343 | .. image:: spectralanalysisTutorial_files/output_39_2.png 344 | 345 | 346 | The total power spectrum :math:`S_0(\nu)` exhibits a power-law like 347 | shape. 348 | 349 | Looking at the degree of polarization :math:`\Phi(\nu)`, we see that the 350 | signal is almost unpolarized at all frequencies, except for frequencies 351 | below 0.5 Hz, where we notice a small increase in the degree of 352 | polarization. 353 | 354 | Setting 2: moderate wind 355 | ~~~~~~~~~~~~~~~~~~~~~~~~ 356 | 357 | We follow the same procedure as above. 358 | 359 | .. code:: ipython3 360 | 361 | # load data 362 | windData = scio.loadmat('datasets/wind/medium-wind.mat') 363 | 364 | u = windData['v_east'][:,0] 365 | v = windData['v_north'][:, 0] 366 | 367 | N = np.size(u) 368 | 369 | # we use an ergodic argument and split the signal into "sub-signals" 370 | Nw = 20 371 | Nx = N // Nw 372 | tx = np.arange(Nx)*dt 373 | 374 | xk = subsignal(u, v, Nx, 0) 375 | 376 | # compute spectral estimate 377 | multi = bsp.spectral.Multitaper(tx, xk) 378 | for k in range(1, Nw): 379 | 380 | xk = subsignal(u, v, Nx, k) 381 | multi2 = bsp.spectral.Multitaper(tx, xk) 382 | 383 | multi = multi + multi2 384 | 385 | # normalize and plot multitaper estimate 386 | multi.normalize() 387 | fig, ax = multi.plot() 388 | 389 | 390 | 391 | .. image:: spectralanalysisTutorial_files/output_42_2.png 392 | 393 | 394 | We observe again power law - like shape in the total power 395 | :math:`S_0(\nu)`. The degree of polarization :math:`\Phi(\nu)` is close 396 | to zero for frequencies above 1 Hz; There is again a small "step" for 397 | frequencies below 1 Hz. 398 | 399 | Setting 3: high-wind 400 | ~~~~~~~~~~~~~~~~~~~~ 401 | 402 | Again, same procedure. 403 | 404 | .. code:: ipython3 405 | 406 | # load data 407 | windData = scio.loadmat('datasets/wind/high-wind.mat') 408 | 409 | u = windData['v_east'][:,0] 410 | v = windData['v_north'][:, 0] 411 | 412 | N = np.size(u) 413 | 414 | # we use an ergodic argument and split the signal into "sub-signals" 415 | Nw = 20 416 | Nx = N // Nw 417 | tx = np.arange(Nx) 418 | 419 | xk = subsignal(u, v, Nx, 0) 420 | 421 | # compute spectral estimate 422 | multi = bsp.spectral.Multitaper(tx, xk) 423 | for k in range(1, Nw): 424 | 425 | xk = subsignal(u, v, Nx, k) 426 | multi2 = bsp.spectral.Multitaper(tx, xk) 427 | 428 | multi = multi + multi2 429 | # normalize and plot multitaper estimate 430 | multi.normalize() 431 | fig, ax = multi.plot() 432 | 433 | 434 | 435 | .. image:: spectralanalysisTutorial_files/output_45_2.png 436 | 437 | 438 | Again :math:`S_0(\nu)` exhibits a power law shape. The degree of 439 | polarization is overall higher than in the low and moderate wind 440 | settings. The signal is strongly polarized (:math:`\Phi(0) \simeq 0.7`) 441 | at low frequencies. High frequencies show a relatively constant degree 442 | of polarization, around :math:`\Phi(\nu) \simeq 0.3` 443 | -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_17_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_17_1.png -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_18_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_18_1.png -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_25_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_25_0.png -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_31_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_31_1.png -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_32_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_32_1.png -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_39_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_39_2.png -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_42_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_42_2.png -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_45_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_45_2.png -------------------------------------------------------------------------------- /docs/tutorials/spectralanalysisTutorial_files/output_9_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/spectralanalysisTutorial_files/output_9_0.png -------------------------------------------------------------------------------- /docs/tutorials/timefrequency.rst: -------------------------------------------------------------------------------- 1 | 2 | Time-Frequency-Polarization analysis: tutorial 3 | ============================================== 4 | 5 | This tutorial aims at demonstrating different tools available within the 6 | ``timefrequency`` module of ``BiSPy``. The examples provided here come 7 | along with the paper 8 | 9 | - Julien Flamant, Nicolas Le Bihan, Pierre Chainais: “Time-frequency 10 | analysis of bivariate signals”, In press, Applied and Computational 11 | Harmonic Analysis, 2017; 12 | `arXiv:1609.0246 `__, 13 | `doi:10.1016/j.acha.2017.05.007 `__. 14 | 15 | The paper contains theoretical results and several applications that can 16 | be reproduced with the following tutorial. 17 | A Jupyter notebook version can be downloaded `here `__. 18 | 19 | Load ``bispy`` and necessary modules 20 | ------------------------------------ 21 | 22 | .. code:: ipython3 23 | 24 | import numpy as np 25 | import matplotlib.pyplot as plt 26 | import quaternion # load the quaternion module 27 | import bispy as bsp 28 | 29 | Quaternion Short-Term Fourier Transform (Q-STFT) example 30 | -------------------------------------------------------- 31 | 32 | To illustrate the behaviour of the Q-STFT, we construct a simple signal 33 | made of two linear chirps, each having its own instantaneous 34 | polarization properties. 35 | 36 | First, define some constants: 37 | 38 | .. code:: ipython3 39 | 40 | N = 1024 # length of the signal 41 | 42 | # linear chirps constants 43 | a = 250*np.pi 44 | b = 50*np.pi 45 | c = 150*np.pi 46 | 47 | Then define the instantaneous amplitudes, orientation, ellipticity and 48 | phase of each linear chirp. The amplitudes are taken equal - just a 49 | Hanning window. 50 | 51 | .. code:: ipython3 52 | 53 | # time vector 54 | t = np.linspace(0, 1, N) 55 | 56 | # first chirp 57 | theta1 = np.pi/4 # constant orientation 58 | chi1 = np.pi/6-t # reversing ellipticity 59 | phi1 = b*t+a*t**2 # linear chirp 60 | 61 | # second chirp 62 | theta2 = np.pi/4*10*t # rotating orientation 63 | chi2 = 0 # constant null ellipticity 64 | phi2 = c*t+a*t**2 # linear chirp 65 | 66 | # common amplitude -- simply a window 67 | env = bsp.utils.windows.hanning(N) 68 | 69 | We can now construct the two components and sum it. To do so, we use the 70 | function ``signals.bivariateAMFM`` to compute directly the quaternion 71 | embeddings of each linear chirp. 72 | 73 | .. code:: ipython3 74 | 75 | # define chirps x1 and x2 76 | x1 = bsp.signals.bivariateAMFM(env, theta1, chi1, phi1) 77 | x2 = bsp.signals.bivariateAMFM(env, theta2, chi2, phi2) 78 | 79 | # sum it 80 | x = x1 + x2 81 | 82 | Let us have a look at the signal ``x[t]`` 83 | 84 | .. code:: ipython3 85 | 86 | fig, ax = bsp.utils.visual.plot2D(t, x) 87 | 88 | 89 | 90 | .. image:: timefrequencyTutorial_files/output_11_0.png 91 | 92 | 93 | Now we can compute the Q-STFT. First initialize the object Q-STFT 94 | 95 | .. code:: ipython3 96 | 97 | S = bsp.timefrequency.QSTFT(x, t) 98 | 99 | And compute: 100 | 101 | .. code:: ipython3 102 | 103 | S.compute(window='hamming', nperseg=101, noverlap=100, nfft=N) 104 | 105 | 106 | .. parsed-literal:: 107 | 108 | Computing Time-Frequency Stokes parameters 109 | 110 | 111 | Let us have a look at Time-Frequency Stokes parameters S1, S2 and S3 112 | 113 | .. code:: ipython3 114 | 115 | fig, ax = S.plotStokes() 116 | 117 | 118 | 119 | .. image:: timefrequencyTutorial_files/output_17_0.png 120 | 121 | 122 | Alternatively, we can compute the instantaneous polarization properties 123 | from the ridges of the Q-STFT. 124 | 125 | Extract the ridges: 126 | 127 | .. code:: ipython3 128 | 129 | S.extractRidges() 130 | 131 | 132 | .. parsed-literal:: 133 | 134 | Extracting ridges 135 | Ridge added 136 | Ridge added 137 | 2 ridges were recovered. 138 | 139 | 140 | And plot (``quivertdecim`` controls the time-decimation of the quiver 141 | plot, for a cleaner view): 142 | 143 | .. code:: ipython3 144 | 145 | fig, ax = S.plotRidges(quivertdecim=30) 146 | 147 | 148 | 149 | .. image:: timefrequencyTutorial_files/output_21_0.png 150 | 151 | 152 | The two representations are equivalent and provide the same information: 153 | time, frequency and polarization properties of the bivariate signal. A 154 | direct inspection shows that instantaneous parameters of each components 155 | are recovered by both representations. 156 | 157 | Quaternion Continuous Wavelet Transform (Q-CWT) example 158 | ------------------------------------------------------- 159 | 160 | The Q-STFT method has the same limitations as the usual STFT, that is 161 | not the ideal tool to analyze signals spanning a wide range of 162 | frequencies over short time scales. We revisit here the classic two 163 | chirps example in its bivariate (polarized) version. 164 | 165 | As before, let us first define some constants: 166 | 167 | .. code:: ipython3 168 | 169 | N = 1024 # length of the signal 170 | 171 | # hyperbolic chirps parameters 172 | alpha = 15*np.pi 173 | beta = 5*np.pi 174 | tup = 0.8 # set blow-up time value 175 | 176 | Now, let us define the instantaneous amplitudes, orientation, 177 | ellipticity and phase of each linear chirp. The chirps are also 178 | windowed. 179 | 180 | .. code:: ipython3 181 | 182 | t = np.linspace(0, 1, N) # time vector 183 | 184 | # chirp 1 parameters 185 | theta1 = -np.pi/3 # constant orientation 186 | chi1 = np.pi/6 # constant ellipticity 187 | phi1 = alpha/(.8-t) # hyperbolic chirp 188 | 189 | # chirp 2 parameters 190 | theta2 = 5*t # rotating orientation 191 | chi2 = -np.pi/10 # constant ellipticity 192 | phi2 = beta/(.8-t) # hyperbolic chirp 193 | 194 | # envelope 195 | env = np.zeros(N) 196 | Nmin = int(0.1*N) # minimum value of N such that x is nonzero 197 | Nmax = int(0.75*N) # maximum value of N such that x is nonzero 198 | 199 | env[Nmin:Nmax] = bsp.utils.windows.hanning(Nmax-Nmin) 200 | 201 | Construct the two components and sum it. Again we use the function 202 | ``utils.bivariateAMFM`` to compute directly the quaternion embeddings of 203 | each linear chirp. 204 | 205 | .. code:: ipython3 206 | 207 | x1 = bsp.signals.bivariateAMFM(env, theta1, chi1, phi1) 208 | x2 = bsp.signals.bivariateAMFM(env, theta2, chi2, phi2) 209 | 210 | x = x1 + x2 211 | 212 | Let us visualize the resulting signal, ``x[t]`` 213 | 214 | .. code:: ipython3 215 | 216 | fig, ax = bsp.utils.visual.plot2D(t, x) 217 | 218 | 219 | 220 | .. image:: timefrequencyTutorial_files/output_31_0.png 221 | 222 | 223 | Now, we can compute its Q-CWT. First define the wavelet parameters and 224 | initialize the QCWT object: 225 | 226 | .. code:: ipython3 227 | 228 | waveletParams = dict(type='Morse', beta=12, gamma=3) 229 | S = bsp.timefrequency.QCWT(x, t) 230 | 231 | And compute: 232 | 233 | .. code:: ipython3 234 | 235 | fmin = 0.01 236 | fmax = 400 237 | S.compute(fmin, fmax, waveletParams, N) 238 | 239 | 240 | .. parsed-literal:: 241 | 242 | Computing Time-Frequency Stokes parameters 243 | 244 | 245 | Let us have a look at Time-Scale Stokes parameters S1, S2 and S3 246 | 247 | .. code:: ipython3 248 | 249 | fig, ax = S.plotStokes() 250 | 251 | 252 | 253 | .. image:: timefrequencyTutorial_files/output_37_0.png 254 | 255 | 256 | Similarly we can compute the instantaneous polarization attributes from 257 | the ridges of the Q-CWT. 258 | 259 | .. code:: ipython3 260 | 261 | S.extractRidges() 262 | 263 | 264 | .. parsed-literal:: 265 | 266 | Extracting ridges 267 | Ridge added 268 | Ridge added 269 | 2 ridges were recovered. 270 | 271 | 272 | And plot the results 273 | 274 | .. code:: ipython3 275 | 276 | fig, ax = S.plotRidges(quivertdecim=40) 277 | 278 | 279 | 280 | .. image:: timefrequencyTutorial_files/output_41_0.png 281 | 282 | 283 | Again, both representations are equivalent and provide the same 284 | information: time, scale and polarization properties of the bivariate 285 | signal. A direct inspection shows that instantaneous parameters of each 286 | components are recovered by both representations. 287 | -------------------------------------------------------------------------------- /docs/tutorials/timefrequencyTutorial_files/output_11_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/timefrequencyTutorial_files/output_11_0.png -------------------------------------------------------------------------------- /docs/tutorials/timefrequencyTutorial_files/output_17_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/timefrequencyTutorial_files/output_17_0.png -------------------------------------------------------------------------------- /docs/tutorials/timefrequencyTutorial_files/output_21_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/timefrequencyTutorial_files/output_21_0.png -------------------------------------------------------------------------------- /docs/tutorials/timefrequencyTutorial_files/output_31_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/timefrequencyTutorial_files/output_31_0.png -------------------------------------------------------------------------------- /docs/tutorials/timefrequencyTutorial_files/output_37_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/timefrequencyTutorial_files/output_37_0.png -------------------------------------------------------------------------------- /docs/tutorials/timefrequencyTutorial_files/output_41_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/docs/tutorials/timefrequencyTutorial_files/output_41_0.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | matplotlib 4 | numpydoc 5 | spectrum 6 | m2r2 7 | ipython 8 | -------------------------------------------------------------------------------- /scripts/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $TRAVIS_OS_NAME == 'osx' ]]; then 4 | # If in OSX, we need to install python by hand. 5 | # We do that using homebrew, pyenv and pyenv-virtualenv 6 | # You should normally not change anything in here 7 | brew update >/dev/null 8 | brew outdated pyenv || brew upgrade --quiet pyenv 9 | brew install homebrew/boneyard/pyenv-pip-rehash 10 | brew install pyenv 11 | eval "$(pyenv init -)" 12 | 13 | # Install some custom requirements on OS X 14 | # e.g. brew install pyenv-virtualenv 15 | 16 | # See all available PYTHON versions with `pyenv install --list'. 17 | pyenv install ${PYTHON} 18 | pyenv global ${PYTHON} 19 | 20 | export PYENV_VERSION=${PYTHON} 21 | export PATH="/Users/travis/.pyenv/shims:${PATH}" 22 | #pyenv-virtualenv venv 23 | #source venv/bin/activate 24 | python --version 25 | else 26 | # Additional installation instructions for UNIX 27 | # sudo apt-get install -qq gcc g++ 28 | echo 'not on OSX' 29 | fi 30 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """A setuptools based setup module. 2 | See: 3 | https://packaging.python.org/tutorials/distributing-packages/#configuring-your-project 4 | https://packaging.python.org/en/latest/distributing.html 5 | https://github.com/pypa/sampleproject 6 | """ 7 | 8 | # Always prefer setuptools over distutils 9 | from setuptools import setup, find_packages 10 | # To use a consistent encoding 11 | from codecs import open 12 | from os import path 13 | 14 | here = path.abspath(path.dirname(__file__)) 15 | 16 | # read the contents of your README file 17 | from pathlib import Path 18 | this_directory = Path(__file__).parent 19 | long_description = (this_directory / "README.md").read_text() 20 | 21 | setup( 22 | name='bispy-polar', 23 | 24 | # Versions should comply with PEP440. For a discussion on single-sourcing 25 | # the version across setup.py and the project code, see 26 | # https://packaging.python.org/en/latest/single_source_version.html 27 | version='0.9.4.dev', 28 | 29 | description='An open-source python framework for processing \ 30 | bivariate signals.', 31 | long_description=long_description, 32 | long_description_content_type='text/markdown', 33 | 34 | # The project's main homepage. 35 | url='https://github.com/jflamant/bispy', 36 | 37 | # Author details 38 | author='Julien Flamant', 39 | author_email='julien.flamant@cnrs.fr', 40 | 41 | # Choose your license 42 | license='CeCIll', 43 | 44 | # See https://pypi.python.org/pypi?%3Aaction=list_classifiers 45 | classifiers=[ 46 | # How mature is this project? Common values are 47 | # 3 - Alpha 48 | # 4 - Beta 49 | # 5 - Production/Stable 50 | 'Development Status :: 3 - Alpha', 51 | 52 | # Indicate who your project is intended for 53 | 'Intended Audience :: Science/Research', 54 | 55 | # Pick your license as you wish (should match "license" above) 56 | 'License :: OSI Approved :: MIT License', 57 | 58 | # Specify the Python versions you support here. In particular, ensure 59 | # that you indicate whether you support Python 2, Python 3 or both. 60 | 'Programming Language :: Python :: 3.5', 61 | ], 62 | 63 | # What does your project relate to? 64 | keywords='signal processing', 65 | 66 | # You can just specify the packages manually here if your project is 67 | # simple. Or you can use find_packages(). 68 | packages=find_packages(exclude=['contrib', 'docs', 'tests']), 69 | 70 | # Alternatively, if you want to distribute just a my_module.py, uncomment 71 | # this: 72 | # py_modules=["my_module"], 73 | 74 | # List run-time dependencies here. These will be installed by pip when 75 | # your project is installed. For an analysis of "install_requires" vs pip's 76 | # requirements files see: 77 | # https://packaging.python.org/en/latest/requirements.html 78 | install_requires=['numpy', 'scipy', 'matplotlib', 'numpy-quaternion'], 79 | 80 | # List additional groups of dependencies here (e.g. development 81 | # dependencies). You can install these using the following syntax, 82 | # for example: 83 | # $ pip install -e .[dev,test] 84 | extras_require={ 85 | 'dev': ['check-manifest'], 86 | 'test': ['coverage'], 87 | }, 88 | 89 | # If there are data files included in your packages that need to be 90 | # installed, specify them here. If using Python 2.6 or less, then these 91 | # have to be included in MANIFEST.in as well. 92 | #package_data={ 93 | # 'sample': ['package_data.dat'], 94 | #}, 95 | 96 | # Although 'package_data' is the preferred approach, in some case you may 97 | # need to place data files outside of your packages. See: 98 | # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa 99 | # In this case, 'data_file' will be installed into '/my_data' 100 | #data_files=[('my_data', ['data/data_file'])], 101 | 102 | # To provide executable scripts, use entry points in preference to the 103 | # "scripts" keyword. Entry points provide cross-platform support and allow 104 | # pip to create the appropriate form of executable for the target platform. 105 | # entry_points={ 106 | # 'console_scripts': [ 107 | # 'sample=sample:main', 108 | # ], 109 | #}, 110 | ) 111 | -------------------------------------------------------------------------------- /tests/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jflamant/bispy/3ceb2abc1d2e6d0dab7c6a43f92679954ad601ee/tests/.DS_Store -------------------------------------------------------------------------------- /tests/test_plots.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import quaternion # load the quaternion module 4 | import bispy as bsp 5 | 6 | 7 | 8 | def _create_data(): 9 | N = 1024 # length of the signal 10 | 11 | # linear chirps constants 12 | a = 250*np.pi 13 | b = 50*np.pi 14 | c = 150*np.pi 15 | 16 | # time vector 17 | t = np.linspace(0, 1, N) 18 | 19 | # first chirp 20 | theta1 = np.pi/4 # constant orientation 21 | chi1 = np.pi/6-t # reversing ellipticity 22 | phi1 = b*t+a*t**2 # linear chirp 23 | 24 | # second chirp 25 | theta2 = np.pi/4*10*t # rotating orientation 26 | chi2 = 0 # constant null ellipticity 27 | phi2 = c*t+a*t**2 # linear chirp 28 | 29 | # common amplitude -- simply a window 30 | env = bsp.utils.windows.hanning(N) 31 | 32 | 33 | # define chirps x1 and x2 34 | x1 = bsp.signals.bivariateAMFM(env, theta1, chi1, phi1) 35 | x2 = bsp.signals.bivariateAMFM(env, theta2, chi2, phi2) 36 | 37 | # sum it 38 | x = x1 + x2 39 | 40 | return t, x 41 | 42 | def test_plot2D(): 43 | 44 | t, x = _create_data() 45 | fig, ax = bsp.utils.visual.plot2D(t, x) 46 | 47 | def test_plot3D(): 48 | t, x = _create_data() 49 | fig, ax = bsp.utils.visual.plot3D(t, x) 50 | -------------------------------------------------------------------------------- /tests/test_timefrequency.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import quaternion # load the quaternion module 4 | import bispy as bsp 5 | 6 | N = 1024 # length of the signal 7 | 8 | # linear chirps constants 9 | a = 250*np.pi 10 | b = 50*np.pi 11 | c = 150*np.pi 12 | 13 | # time vector 14 | t = np.linspace(0, 1, N) 15 | 16 | # first chirp 17 | theta1 = np.pi/4 # constant orientation 18 | chi1 = np.pi/6-t # reversing ellipticity 19 | phi1 = b*t+a*t**2 # linear chirp 20 | 21 | # second chirp 22 | theta2 = np.pi/4*10*t # rotating orientation 23 | chi2 = 0 # constant null ellipticity 24 | phi2 = c*t+a*t**2 # linear chirp 25 | 26 | # common amplitude -- simply a window 27 | env = bsp.utils.windows.hanning(N) 28 | 29 | 30 | # define chirps x1 and x2 31 | x1 = bsp.signals.bivariateAMFM(env, theta1, chi1, phi1) 32 | x2 = bsp.signals.bivariateAMFM(env, theta2, chi2, phi2) 33 | 34 | # sum it 35 | x = x1 + x2 36 | 37 | # plot 2D and 3D 38 | fig, ax = bsp.utils.visual.plot2D(t, x) 39 | fig, ax = bsp.utils.visual.plot3D(t, x) 40 | 41 | S = bsp.timefrequency.QSTFT(x, t) 42 | 43 | S.compute(window='hamming', nperseg=101, noverlap=100, nfft=N) 44 | fig, ax = S.plotStokes() 45 | 46 | S.extractRidges() 47 | 48 | fig, ax = S.plotRidges(quivertdecim=30) 49 | 50 | N = 1024 # length of the signal 51 | 52 | # hyperbolic chirps parameters 53 | alpha = 15*np.pi 54 | beta = 5*np.pi 55 | tup = 0.8 # set blow-up time value 56 | 57 | t = np.linspace(0, 1, N) # time vector 58 | 59 | # chirp 1 parameters 60 | theta1 = -np.pi/3 # constant orientation 61 | chi1 = np.pi/6 # constant ellipticity 62 | phi1 = alpha/(.8-t) # hyperbolic chirp 63 | 64 | # chirp 2 parameters 65 | theta2 = 5*t # rotating orientation 66 | chi2 = -np.pi/10 # constant ellipticity 67 | phi2 = beta/(.8-t) # hyperbolic chirp 68 | 69 | # envelope 70 | env = np.zeros(N) 71 | Nmin = int(0.1*N) # minimum value of N such that x is nonzero 72 | Nmax = int(0.75*N) # maximum value of N such that x is nonzero 73 | 74 | env[Nmin:Nmax] = bsp.utils.windows.hanning(Nmax-Nmin) 75 | 76 | x1 = bsp.signals.bivariateAMFM(env, theta1, chi1, phi1) 77 | x2 = bsp.signals.bivariateAMFM(env, theta2, chi2, phi2) 78 | 79 | x = x1 + x2 80 | 81 | fig, ax = bsp.utils.visual.plot2D(t, x) 82 | 83 | waveletParams = dict(type='Morse', beta=12, gamma=3) 84 | S = bsp.timefrequency.QCWT(x, t) 85 | 86 | fmin = 0.01 87 | fmax = 400 88 | S.compute(fmin, fmax, waveletParams, N) 89 | 90 | fig, ax = S.plotStokes() 91 | 92 | 93 | S.extractRidges() 94 | 95 | fig, ax = S.plotRidges(quivertdecim=40) --------------------------------------------------------------------------------