├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── data └── kuiper_table.npy ├── docs ├── Makefile ├── clustering.rst ├── conf.py ├── decorators.rst ├── descriptive.rst ├── distributions.rst ├── event_series.rst ├── index.rst ├── iterators.rst ├── make.bat ├── regression.rst └── tests.rst ├── pycircstat ├── __init__.py ├── clustering.py ├── data.py ├── decorators.py ├── descriptive.py ├── distributions.py ├── event_series.py ├── iterators.py ├── regression.py ├── tests.py └── utils.py ├── setup.py └── tests ├── __init__.py ├── test_clustering.py ├── test_decorators.py ├── test_descriptive.py ├── test_distributions.py ├── test_event_series.py ├── test_regression.py ├── test_tests.py └── test_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # PyInstaller 26 | # Usually these files are written by a python script from a template 27 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 28 | *.manifest 29 | *.spec 30 | 31 | # Installer logs 32 | pip-log.txt 33 | pip-delete-this-directory.txt 34 | 35 | # Unit test / coverage reports 36 | htmlcov/ 37 | .tox/ 38 | .coverage 39 | .cache 40 | nosetests.xml 41 | coverage.xml 42 | 43 | # Translations 44 | *.mo 45 | *.pot 46 | 47 | # Django stuff: 48 | *.log 49 | 50 | # Sphinx documentation 51 | docs/_build/ 52 | 53 | # PyBuilder 54 | target/ 55 | .idea 56 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | language: python 3 | python: 4 | - "3.5" 5 | - "3.6" 6 | - "3.7" 7 | - "2.7" 8 | before_install: 9 | - sudo apt-get -y update 10 | - sudo apt-get -y install -qq libatlas-dev libatlas-base-dev liblapack-dev gfortran 11 | # You may want to periodically update this, although the conda update 12 | # conda line below will keep everything up-to-date. We do this 13 | # conditionally because it saves us some downloading if the version is 14 | # the same (updated to new paths). 15 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then 16 | wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh; 17 | else 18 | wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; 19 | fi 20 | - bash miniconda.sh -b -p $HOME/miniconda 21 | - export PATH="$HOME/miniconda/bin:$PATH" 22 | - hash -r 23 | - conda update -q --yes conda 24 | - conda config --set always_yes yes --set changeps1 no 25 | # Useful for debugging any issues with conda 26 | - conda info -a 27 | install: 28 | # Replace dep1 dep2 ... with your dependencies 29 | - conda create -n test-environment python=$TRAVIS_PYTHON_VERSION numpy scipy setuptools pip 30 | - source activate test-environment 31 | - pip install nose nose-cov python-coveralls decorator pandas 32 | - conda info -a 33 | # command to run tests 34 | script: 35 | - nosetests --with-coverage --cover-package=pycircstat 36 | after_success: 37 | - coveralls 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 circstat 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | @echo 'MakeFile for pycircstat packaging ' 3 | @echo ' ' 4 | @echo 'make sdist Creates source distribution ' 5 | @echo 'make wheel Creates Wheel distribution ' 6 | @echo 'make pypi Package and upload to PyPI ' 7 | @echo 'make pypitest Package and upload to PyPI test server' 8 | @echo 'make purge Remove all build related directories ' 9 | 10 | 11 | sdist: 12 | python3 setup.py sdist 13 | 14 | wheel: 15 | python3 setup.py bdist_wheel 16 | 17 | pypi:purge sdist wheel 18 | twine upload dist/* 19 | 20 | pypitest: purge sdist wheel 21 | twine upload -r pypitest dist/* 22 | 23 | purge: 24 | rm -rf dist && rm -rf build && rm -rf pycircstat.egg-info 25 | 26 | 27 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | pycircstat 2 | ========== 3 | 4 | **PyCircStat: circular statistics with Python** 5 | 6 | This toolbox contains descriptive and inferential statistics for circular data. 7 | 8 | Note: The toolbox is currently in development. Many functions are working, but many are yet to come. We encourage you to start using the toolbox and provide feedback on bugs, features you would like and so on. 9 | 10 | Until there is a dedicated article describing the toolbox, please credit this web page. 11 | 12 | Team: Philipp Berens, Fabian Sinz 13 | 14 | Installation 15 | ------------ 16 | 17 | The package can be installed via pip (`pip` might need to be replaced by `pip3`) 18 | 19 | pip install pycircstat 20 | 21 | You can also install it via 22 | 23 | python setup.py install 24 | 25 | To generate the documentation, run 26 | 27 | python setup.py build_sphinx 28 | 29 | An online documentation can be found on http://pycircstat.readthedocs.org/. 30 | 31 | Note that the call to sphinx' apidoc has still to be included. 32 | -------------------------------------------------------------------------------- /data/kuiper_table.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/circstat/pycircstat/dae8ecad8d594c402ea72fcc03dc3dd9172a66cf/data/kuiper_table.npy -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PyCircStat.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyCircStat.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/PyCircStat" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyCircStat" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/clustering.rst: -------------------------------------------------------------------------------- 1 | Machine Learning 2 | ================ 3 | 4 | .. automodule:: pycircstat.clustering 5 | :members: 6 | :inherited-members: 7 | 8 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # PyCircStat documentation build configuration file, created by 4 | # sphinx-quickstart on Tue Sep 23 13:08:52 2014. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import mock 17 | import os 18 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 19 | 20 | 21 | 22 | MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'decorator', 'pandas', "nose.tools"] 23 | for mod_name in MOCK_MODULES: 24 | sys.modules[mod_name] = mock.Mock() 25 | 26 | sys.modules['scipy.stats'] = mock.Mock(rv_continuous=object) 27 | 28 | 29 | # If extensions (or modules to document with autodoc) are in another directory, 30 | # add these directories to sys.path here. If the directory is relative to the 31 | # documentation root, use os.path.abspath to make it absolute, like shown here. 32 | sys.path.insert(0, os.path.abspath('../')) 33 | 34 | # -- General configuration ------------------------------------------------ 35 | 36 | # If your documentation needs a minimal Sphinx version, state it here. 37 | #needs_sphinx = '1.0' 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | 'sphinx.ext.autodoc', 44 | 'sphinx.ext.doctest', 45 | 'sphinx.ext.intersphinx', 46 | 'sphinx.ext.todo', 47 | 'sphinx.ext.coverage', 48 | 'sphinx.ext.mathjax', 49 | 'sphinx.ext.ifconfig', 50 | 'sphinx.ext.viewcode', 51 | ] 52 | 53 | # Add any paths that contain templates here, relative to this directory. 54 | templates_path = ['_templates'] 55 | 56 | # The suffix of source filenames. 57 | source_suffix = '.rst' 58 | 59 | # The encoding of source files. 60 | #source_encoding = 'utf-8-sig' 61 | 62 | # The master toctree document. 63 | master_doc = 'index' 64 | 65 | # General information about the project. 66 | project = u'pycircstat' 67 | copyright = u'2014, Philipp Berens, Matthias Kümmerer, Fabian Sinz, Tom Wallis, ' 68 | 69 | # The version info for the project you're documenting, acts as replacement for 70 | # |version| and |release|, also used in various other places throughout the 71 | # built documents. 72 | # 73 | # The short X.Y version. 74 | version = '0.0.1' 75 | # The full version, including alpha/beta/rc tags. 76 | release = '0.0.1' 77 | 78 | # The language for content autogenerated by Sphinx. Refer to documentation 79 | # for a list of supported languages. 80 | #language = None 81 | 82 | # There are two options for replacing |today|: either, you set today to some 83 | # non-false value, then it is used: 84 | #today = '' 85 | # Else, today_fmt is used as the format for a strftime call. 86 | #today_fmt = '%B %d, %Y' 87 | 88 | # List of patterns, relative to source directory, that match files and 89 | # directories to ignore when looking for source files. 90 | exclude_patterns = ['_build'] 91 | 92 | # The reST default role (used for this markup: `text`) to use for all 93 | # documents. 94 | #default_role = None 95 | 96 | # If true, '()' will be appended to :func: etc. cross-reference text. 97 | #add_function_parentheses = True 98 | 99 | # If true, the current module name will be prepended to all description 100 | # unit titles (such as .. function::). 101 | #add_module_names = True 102 | 103 | # If true, sectionauthor and moduleauthor directives will be shown in the 104 | # output. They are ignored by default. 105 | #show_authors = False 106 | 107 | # The name of the Pygments (syntax highlighting) style to use. 108 | pygments_style = 'sphinx' 109 | 110 | # A list of ignored prefixes for module index sorting. 111 | #modindex_common_prefix = [] 112 | 113 | # If true, keep warnings as "system message" paragraphs in the built documents. 114 | #keep_warnings = False 115 | 116 | 117 | # -- Options for HTML output ---------------------------------------------- 118 | 119 | # The theme to use for HTML and HTML Help pages. See the documentation for 120 | # a list of builtin themes. 121 | if not on_rtd: # only import and set the theme if we're building docs locally 122 | import sphinx_rtd_theme 123 | html_theme = 'sphinx_rtd_theme' 124 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 125 | 126 | # Add any paths that contain custom themes here, relative to this directory. 127 | #html_theme_path = [] 128 | 129 | # The name for this set of Sphinx documents. If None, it defaults to 130 | # " v documentation". 131 | #html_title = None 132 | 133 | # A shorter title for the navigation bar. Default is the same as html_title. 134 | #html_short_title = None 135 | 136 | # The name of an image file (relative to this directory) to place at the top 137 | # of the sidebar. 138 | #html_logo = None 139 | 140 | # The name of an image file (within the static path) to use as favicon of the 141 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 142 | # pixels large. 143 | #html_favicon = None 144 | 145 | # Add any paths that contain custom static files (such as style sheets) here, 146 | # relative to this directory. They are copied after the builtin static files, 147 | # so a file named "default.css" will overwrite the builtin "default.css". 148 | html_static_path = ['_static'] 149 | 150 | # Add any extra paths that contain custom files (such as robots.txt or 151 | # .htaccess) here, relative to this directory. These files are copied 152 | # directly to the root of the documentation. 153 | #html_extra_path = [] 154 | 155 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 156 | # using the given strftime format. 157 | #html_last_updated_fmt = '%b %d, %Y' 158 | 159 | # If true, SmartyPants will be used to convert quotes and dashes to 160 | # typographically correct entities. 161 | #html_use_smartypants = True 162 | 163 | # Custom sidebar templates, maps document names to template names. 164 | #html_sidebars = {} 165 | 166 | # Additional templates that should be rendered to pages, maps page names to 167 | # template names. 168 | #html_additional_pages = {} 169 | 170 | # If false, no module index is generated. 171 | #html_domain_indices = True 172 | 173 | # If false, no index is generated. 174 | #html_use_index = True 175 | 176 | # If true, the index is split into individual pages for each letter. 177 | #html_split_index = False 178 | 179 | # If true, links to the reST sources are added to the pages. 180 | #html_show_sourcelink = True 181 | 182 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 183 | #html_show_sphinx = True 184 | 185 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 186 | #html_show_copyright = True 187 | 188 | # If true, an OpenSearch description file will be output, and all pages will 189 | # contain a tag referring to it. The value of this option must be the 190 | # base URL from which the finished HTML is served. 191 | #html_use_opensearch = '' 192 | 193 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 194 | #html_file_suffix = None 195 | 196 | # Output file base name for HTML help builder. 197 | htmlhelp_basename = 'PyCircStatdoc' 198 | 199 | 200 | # -- Options for LaTeX output --------------------------------------------- 201 | 202 | latex_elements = { 203 | # The paper size ('letterpaper' or 'a4paper'). 204 | #'papersize': 'letterpaper', 205 | 206 | # The font size ('10pt', '11pt' or '12pt'). 207 | #'pointsize': '10pt', 208 | 209 | # Additional stuff for the LaTeX preamble. 210 | #'preamble': '', 211 | } 212 | 213 | # Grouping the document tree into LaTeX files. List of tuples 214 | # (source start file, target name, title, 215 | # author, documentclass [howto, manual, or own class]). 216 | latex_documents = [ 217 | ('index', 'pycircstat.tex', u'pycircstat Documentation', 218 | u'Philipp Berens et al.', 'manual'), 219 | ] 220 | 221 | # The name of an image file (relative to this directory) to place at the top of 222 | # the title page. 223 | #latex_logo = None 224 | 225 | # For "manual" documents, if this is true, then toplevel headings are parts, 226 | # not chapters. 227 | #latex_use_parts = False 228 | 229 | # If true, show page references after internal links. 230 | #latex_show_pagerefs = False 231 | 232 | # If true, show URL addresses after external links. 233 | #latex_show_urls = False 234 | 235 | # Documents to append as an appendix to all manuals. 236 | #latex_appendices = [] 237 | 238 | # If false, no module index is generated. 239 | #latex_domain_indices = True 240 | 241 | 242 | # -- Options for manual page output --------------------------------------- 243 | 244 | # One entry per manual page. List of tuples 245 | # (source start file, name, description, authors, manual section). 246 | man_pages = [ 247 | ('index', 'pycircstat', u'pycircstat Documentation', 248 | [u'Philipp Behrens et al.'], 1) 249 | ] 250 | 251 | # If true, show URL addresses after external links. 252 | #man_show_urls = False 253 | 254 | 255 | # -- Options for Texinfo output ------------------------------------------- 256 | 257 | # Grouping the document tree into Texinfo files. List of tuples 258 | # (source start file, target name, title, author, 259 | # dir menu entry, description, category) 260 | texinfo_documents = [ 261 | ('index', 'pycircstat', u'pycircstat Documentation', 262 | u'Philipp Berens et al.', 'pycircstat', 'One line description of project.', 263 | 'Miscellaneous'), 264 | ] 265 | 266 | # Documents to append as an appendix to all manuals. 267 | #texinfo_appendices = [] 268 | 269 | # If false, no module index is generated. 270 | #texinfo_domain_indices = True 271 | 272 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 273 | #texinfo_show_urls = 'footnote' 274 | 275 | # If true, do not generate a @detailmenu in the "Top" node's menu. 276 | #texinfo_no_detailmenu = False 277 | 278 | 279 | # Example configuration for intersphinx: refer to the Python standard library. 280 | intersphinx_mapping = {'http://docs.python.org/': None} 281 | -------------------------------------------------------------------------------- /docs/decorators.rst: -------------------------------------------------------------------------------- 1 | Decorators 2 | ========== 3 | 4 | .. automodule:: pycircstat.decorators 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /docs/descriptive.rst: -------------------------------------------------------------------------------- 1 | Descriptive Circular Statistics 2 | =============================== 3 | 4 | .. automodule:: pycircstat.descriptive 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /docs/distributions.rst: -------------------------------------------------------------------------------- 1 | Circular Distributions 2 | ====================== 3 | 4 | .. automodule:: pycircstat.distributions 5 | :members: 6 | :inherited-members: 7 | 8 | -------------------------------------------------------------------------------- /docs/event_series.rst: -------------------------------------------------------------------------------- 1 | Tools for event series 2 | ====================== 3 | 4 | .. automodule:: pycircstat.event_series 5 | :members: 6 | 7 | 8 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. pycircstat documentation master file, created by 2 | sphinx-quickstart on Tue Sep 23 13:08:52 2014. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to pycircstat's documentation! 7 | ====================================== 8 | 9 | All functions take arguments in radians. 10 | For a detailed description of arguments and outputs consult the help text of the respective functions. 11 | 12 | The implementation follows in most cases 'Biostatistical Analysis' and all referenced equations and tables 13 | are taken from this book, if not otherwise noted. In some cases, the other books were preferred for implementation 14 | was more straightforward for solutions presented there. 15 | 16 | If you have suggestions, bugs or feature requests or want to contribute code, please email us. 17 | 18 | 19 | 20 | Contents: 21 | ========= 22 | 23 | .. toctree:: 24 | :maxdepth: 2 25 | 26 | descriptive.rst 27 | tests.rst 28 | distributions.rst 29 | clustering.rst 30 | regression.rst 31 | event_series.rst 32 | iterators.rst 33 | decorators.rst 34 | 35 | Disclaimer: 36 | =========== 37 | 38 | All functions in this toolbox were implemented with care and tested on the examples presented in 39 | 'Biostatistical Analysis' where possible. Nevertheless, they may contain errors or bugs, which may 40 | affect the outcome of your analysis. We do not take responsibility for any harm coming from using 41 | this toolbox, neither if it is caused by errors in the software nor if it is caused by its improper 42 | application. Please email us any bugs you find. 43 | 44 | 45 | Indices and tables 46 | ================== 47 | 48 | * :ref:`genindex` 49 | * :ref:`modindex` 50 | * :ref:`search` 51 | 52 | .. [Fisher1995] Fisher, Nicholas I. Statistical analysis of circular data. Cambridge University Press, 1995. 53 | .. [Jammalamadaka2001] Jammalamadaka, S. Rao, and Ambar Sengupta. Topics in circular statistics. Vol. 5. World Scientific, 2001. 54 | .. [Zar2009] Zar, Jerrold H. Biostatistical analysis. Pearson Education India, 2009. 55 | .. [Batschelet1981] Batschelet, E. Circular statistics in biology, Academic Press, London, 1981. ISBN 0-12-081050-6. 56 | .. [RusselLevitin1995] Russell, G. S., and D. J. Levitin. "An expanded table of probability values for Rao's Spacing Test." Communications in Statistics-Simulation and Computation 24.4 (1995): 879-888. 57 | .. [Pewsey2004] Pewsey, A. The large-sample joint distribution of key circular statistics, Metrika, July 2004, Volume 60, Issue 1, pp 25-32 58 | -------------------------------------------------------------------------------- /docs/iterators.rst: -------------------------------------------------------------------------------- 1 | Iterators and Generators 2 | ======================== 3 | 4 | .. automodule:: pycircstat.iterators 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | goto end 41 | ) 42 | 43 | if "%1" == "clean" ( 44 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 45 | del /q /s %BUILDDIR%\* 46 | goto end 47 | ) 48 | 49 | 50 | %SPHINXBUILD% 2> nul 51 | if errorlevel 9009 ( 52 | echo. 53 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 54 | echo.installed, then set the SPHINXBUILD environment variable to point 55 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 56 | echo.may add the Sphinx directory to PATH. 57 | echo. 58 | echo.If you don't have Sphinx installed, grab it from 59 | echo.http://sphinx-doc.org/ 60 | exit /b 1 61 | ) 62 | 63 | if "%1" == "html" ( 64 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 68 | goto end 69 | ) 70 | 71 | if "%1" == "dirhtml" ( 72 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 76 | goto end 77 | ) 78 | 79 | if "%1" == "singlehtml" ( 80 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 84 | goto end 85 | ) 86 | 87 | if "%1" == "pickle" ( 88 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can process the pickle files. 92 | goto end 93 | ) 94 | 95 | if "%1" == "json" ( 96 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 97 | if errorlevel 1 exit /b 1 98 | echo. 99 | echo.Build finished; now you can process the JSON files. 100 | goto end 101 | ) 102 | 103 | if "%1" == "htmlhelp" ( 104 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 105 | if errorlevel 1 exit /b 1 106 | echo. 107 | echo.Build finished; now you can run HTML Help Workshop with the ^ 108 | .hhp project file in %BUILDDIR%/htmlhelp. 109 | goto end 110 | ) 111 | 112 | if "%1" == "qthelp" ( 113 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 114 | if errorlevel 1 exit /b 1 115 | echo. 116 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 117 | .qhcp project file in %BUILDDIR%/qthelp, like this: 118 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PyCircStat.qhcp 119 | echo.To view the help file: 120 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PyCircStat.ghc 121 | goto end 122 | ) 123 | 124 | if "%1" == "devhelp" ( 125 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished. 129 | goto end 130 | ) 131 | 132 | if "%1" == "epub" ( 133 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 137 | goto end 138 | ) 139 | 140 | if "%1" == "latex" ( 141 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 145 | goto end 146 | ) 147 | 148 | if "%1" == "latexpdf" ( 149 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 150 | cd %BUILDDIR%/latex 151 | make all-pdf 152 | cd %BUILDDIR%/.. 153 | echo. 154 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 155 | goto end 156 | ) 157 | 158 | if "%1" == "latexpdfja" ( 159 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 160 | cd %BUILDDIR%/latex 161 | make all-pdf-ja 162 | cd %BUILDDIR%/.. 163 | echo. 164 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 165 | goto end 166 | ) 167 | 168 | if "%1" == "text" ( 169 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 170 | if errorlevel 1 exit /b 1 171 | echo. 172 | echo.Build finished. The text files are in %BUILDDIR%/text. 173 | goto end 174 | ) 175 | 176 | if "%1" == "man" ( 177 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 178 | if errorlevel 1 exit /b 1 179 | echo. 180 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 181 | goto end 182 | ) 183 | 184 | if "%1" == "texinfo" ( 185 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 186 | if errorlevel 1 exit /b 1 187 | echo. 188 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 189 | goto end 190 | ) 191 | 192 | if "%1" == "gettext" ( 193 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 194 | if errorlevel 1 exit /b 1 195 | echo. 196 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 197 | goto end 198 | ) 199 | 200 | if "%1" == "changes" ( 201 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 202 | if errorlevel 1 exit /b 1 203 | echo. 204 | echo.The overview file is in %BUILDDIR%/changes. 205 | goto end 206 | ) 207 | 208 | if "%1" == "linkcheck" ( 209 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 210 | if errorlevel 1 exit /b 1 211 | echo. 212 | echo.Link check complete; look for any errors in the above output ^ 213 | or in %BUILDDIR%/linkcheck/output.txt. 214 | goto end 215 | ) 216 | 217 | if "%1" == "doctest" ( 218 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 219 | if errorlevel 1 exit /b 1 220 | echo. 221 | echo.Testing of doctests in the sources finished, look at the ^ 222 | results in %BUILDDIR%/doctest/output.txt. 223 | goto end 224 | ) 225 | 226 | if "%1" == "xml" ( 227 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 228 | if errorlevel 1 exit /b 1 229 | echo. 230 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 231 | goto end 232 | ) 233 | 234 | if "%1" == "pseudoxml" ( 235 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 236 | if errorlevel 1 exit /b 1 237 | echo. 238 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 239 | goto end 240 | ) 241 | 242 | :end 243 | -------------------------------------------------------------------------------- /docs/regression.rst: -------------------------------------------------------------------------------- 1 | Regression Models 2 | ================= 3 | 4 | Example for circular-linear regression 5 | 6 | .. code-block:: python 7 | 8 | # generate toy data 9 | alpha = np.random.rand(200)*np.pi*2 10 | a0 = np.random.rand()*2*np.pi 11 | A0 = np.abs(np.random.randn()) 12 | m0 = np.random.randn()*10 13 | x = m0 + A0*np.cos(alpha - a0) 14 | 15 | # generate regressor 16 | reg = CircularLinearRegression() 17 | 18 | # train regressor 19 | reg.train(alpha, x) 20 | 21 | # predict 22 | x2 = reg(alpha) 23 | 24 | # look at coefficients 25 | print(reg[:]) 26 | 27 | .. automodule:: pycircstat.regression 28 | :members: 29 | -------------------------------------------------------------------------------- /docs/tests.rst: -------------------------------------------------------------------------------- 1 | Tests 2 | ===== 3 | 4 | .. automodule:: pycircstat.tests 5 | :members: 6 | -------------------------------------------------------------------------------- /pycircstat/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from collections import namedtuple 3 | 4 | CI = namedtuple('confidence_interval', ['lower', 'upper']) 5 | 6 | from .descriptive import * 7 | from .tests import * 8 | from .utils import * 9 | from . import distributions 10 | from . import data 11 | from . import clustering 12 | from . import event_series -------------------------------------------------------------------------------- /pycircstat/clustering.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from . import descriptive as descr 3 | 4 | class AggCluster1D(object): 5 | 6 | """ 7 | Performs a simple agglomerative clustering of angular data. 8 | 9 | 10 | :param numclust: number of clusters desired, default: 2 11 | """ 12 | 13 | def __init__(self, numclust=2): 14 | self.numclust = numclust 15 | 16 | def train(self, alpha): 17 | """ 18 | Finds the agglomerative clustering on the data alpha 19 | :param alpha: angles in radians 20 | :returns: data, cluster ids 21 | 22 | """ 23 | assert len(alpha.shape) == 1, 'Clustering works only for 1d data' 24 | n = len(alpha) 25 | cid = np.arange(n, dtype=int) 26 | 27 | nu = n 28 | 29 | 30 | while nu > self.numclust: 31 | mu = np.asarray([descr.mean(alpha[cid == j]) if j in cid else np.Inf for j in range(n)]) 32 | D = np.abs(descr.pairwise_cdiff(mu)) 33 | idx = np.triu_indices(n,1) 34 | min = np.nanargmin(D[idx]) 35 | cid[cid == cid[idx[0][min]]] = cid[idx[1][min]] 36 | nu -= 1 37 | 38 | 39 | cid2 = np.empty_like(cid) 40 | for i,j in enumerate(np.unique(cid)): 41 | cid2[cid == j] = i 42 | ucid = np.unique(cid2) 43 | self.centroids = np.asarray([descr.mean(alpha[cid2 == i]) for i in ucid]) 44 | self.cluster_ids = ucid 45 | self.r = np.asarray([descr.resultant_vector_length(alpha[cid2 == i]) for i in ucid]) 46 | 47 | return alpha, cid2 48 | 49 | def test(self, alpha): 50 | """ 51 | Finds closests centroids to the data and returns their ids. 52 | 53 | :param alpha: angles in radians 54 | :return: data, cluster ids 55 | """ 56 | D = np.abs(descr.pairwise_cdiff(self.centroids, alpha)) 57 | idx = np.argmin(D, axis=0) 58 | return alpha, np.asarray([self.cluster_ids[i] for i in idx]) 59 | -------------------------------------------------------------------------------- /pycircstat/data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | _data_path = '/'.join(os.path.realpath(__file__).split('/')[:-2] + ['data/']) 4 | 5 | 6 | def load_kuiper_table(): 7 | """ 8 | Loads the lookup table for the kuiper test 9 | 10 | :return: table as numpy array 11 | """ 12 | return np.load(_data_path + 'kuiper_table.npy') 13 | -------------------------------------------------------------------------------- /pycircstat/decorators.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from functools import wraps 4 | import numpy as np 5 | from . import CI 6 | from decorator import decorator 7 | 8 | 9 | def mod2pi(f): 10 | """ 11 | Decorator to apply modulo 2*pi on the output of the function. 12 | 13 | The decorated function must either return a tuple of numpy.ndarrays or a 14 | numpy.ndarray itself. 15 | """ 16 | def wrapper(f, *args, **kwargs): 17 | ret = f(*args, **kwargs) 18 | 19 | if isinstance(ret, tuple): 20 | ret2 = [] 21 | for r in ret: 22 | if isinstance(r, np.ndarray) or np.isscalar(r): 23 | ret2.append(r % (2 * np.pi)) 24 | elif isinstance(r, CI): 25 | ret2.append( 26 | CI(r.lower % (2 * np.pi), r.upper % (2 * np.pi))) 27 | else: 28 | raise TypeError("Type not known!") 29 | return tuple(ret2) 30 | elif isinstance(ret, np.ndarray) or np.isscalar(ret): 31 | return ret % (2 * np.pi) 32 | else: 33 | raise TypeError("Type not known!") 34 | 35 | return decorator(wrapper, f) 36 | 37 | 38 | def get_var(f, varnames, args, kwargs): 39 | fvarnames = f.__code__.co_varnames 40 | 41 | var_idx = [] 42 | kwar_keys = [] 43 | for varname in varnames: 44 | if varname in fvarnames: 45 | var_pos = fvarnames.index(varname) 46 | else: 47 | raise ValueError('Function %s does not have variable %s.' 48 | % (f.__name__, varnames)) 49 | if len(args) >= var_pos + 1: 50 | var_idx.append(var_pos) 51 | elif varname in kwargs: 52 | kwar_keys.append(varname) 53 | else: 54 | raise ValueError('%s was not specified in %s.' 55 | % (varnames, f.__name__)) 56 | 57 | return var_idx, kwar_keys 58 | 59 | 60 | class swap2zeroaxis: 61 | 62 | """ 63 | This decorator is best explained by an example:: 64 | 65 | @swap2zeroaxis(['x','y'], [0, 1]) 66 | def dummy(x,y,z, axis=None): 67 | return np.mean(x[::2,...], axis=0), np.mean(y[::2, ...], axis=0), z 68 | 69 | This creates a new function that 70 | 71 | - either swaps the axes axis to zero for the arguments x and y if axis 72 | is specified in dummy or ravels x and y 73 | - swaps back the axes from the output arguments 0 and 1. Here it is 74 | assumed that the outputs lost one dimension during the function 75 | (e.g. like numpy.mean(x, axis=1) looses one axis). 76 | """ 77 | 78 | def __init__(self, inputs, out_idx): 79 | self.inputs = inputs 80 | self.out_idx = out_idx 81 | 82 | def __call__(self, f): 83 | 84 | def _deco(f, *args, **kwargs): 85 | 86 | to_swap_idx, to_swap_keys = get_var(f, self.inputs, args, kwargs) 87 | args = list(args) 88 | 89 | # extract axis parameter 90 | try: 91 | axis_idx, axis_kw = get_var(f, ['axis'], args, kwargs) 92 | if len(axis_idx) == 0 and len(axis_kw) == 0: 93 | axis = None 94 | else: 95 | if len(axis_idx) > 0: 96 | axis, args[axis_idx[0]] = args[axis_idx[0]], 0 97 | else: 98 | axis, kwargs[axis_kw[0]] = kwargs[axis_kw[0]], 0 99 | except ValueError: 100 | axis = None 101 | 102 | # adjust axes or flatten 103 | if axis is not None: 104 | for i in to_swap_idx: 105 | if args[i] is not None: 106 | args[i] = args[i].swapaxes(0, axis) 107 | for k in to_swap_keys: 108 | if kwargs[k] is not None: 109 | kwargs[k] = kwargs[k].swapaxes(0, axis) 110 | else: 111 | for i in to_swap_idx: 112 | if args[i] is not None: 113 | args[i] = args[i].ravel() 114 | for k in to_swap_keys: 115 | if kwargs[k] is not None: 116 | kwargs[k] = kwargs[k].ravel() 117 | 118 | # compute function 119 | outputs = f(*args, **kwargs) 120 | 121 | # swap everything back into place 122 | if len(self.out_idx) > 0 and axis is not None: 123 | if isinstance(outputs, tuple): 124 | outputs = list(outputs) 125 | for i in self.out_idx: 126 | outputs[i] = outputs[i][np.newaxis, ...].\ 127 | swapaxes(0, axis).squeeze() 128 | 129 | return tuple(outputs) 130 | else: 131 | if self.out_idx != [0]: 132 | raise ValueError("Single output argument and out_idx \ 133 | != [0] are inconsistent!") 134 | return outputs[np.newaxis, ...].swapaxes(0, axis).squeeze() 135 | else: 136 | return outputs 137 | 138 | return decorator(_deco, f) 139 | -------------------------------------------------------------------------------- /pycircstat/descriptive.py: -------------------------------------------------------------------------------- 1 | """ 2 | Descriptive statistical functions 3 | """ 4 | from __future__ import absolute_import 5 | 6 | from functools import wraps 7 | import itertools 8 | from decorator import decorator 9 | 10 | import numpy as np 11 | from scipy import stats 12 | import warnings 13 | from . import CI 14 | from .iterators import nd_bootstrap 15 | from .decorators import mod2pi, swap2zeroaxis 16 | 17 | 18 | class bootstrap: 19 | 20 | """ 21 | Decorator to implement bootstrapping. It looks for the arguments ci, axis, 22 | and bootstrap_iter to determine the proper parameters for bootstrapping. 23 | The argument scale determines whether the percentile is taken on a circular 24 | scale or on a linear scale. 25 | 26 | :param no_bootstrap: the number of arguments that are bootstrapped 27 | (e.g. for correlation it would be two, for median it 28 | would be one) 29 | :param scale: linear or ciruclar scale (default is 'linear') 30 | """ 31 | 32 | def __init__(self, no_bootstrap, scale='linear'): 33 | self.no_boostrap = no_bootstrap 34 | self.scale = scale 35 | 36 | def _get_var(self, f, what, default, args, kwargs, remove=False): 37 | varnames = f.__code__.co_varnames 38 | 39 | if what in varnames: 40 | what_idx = varnames.index(what) 41 | else: 42 | raise ValueError('Function %s does not have variable %s.' % 43 | (f.__name__, what)) 44 | 45 | if len(args) >= what_idx + 1: 46 | val = args[what_idx] 47 | if remove: 48 | args[what_idx] = default 49 | # this part is actually never called since decocator seems to convert everything 50 | # positional arguments. Therefore, I just commented, but did not remove this piece 51 | # of code since it might be called with keyword arguments under certain circumstances. 52 | # elif what in kwargs: 53 | # if remove: 54 | # val = kwargs.pop(what, default) 55 | # else: 56 | # val = kwargs[what] 57 | else: 58 | val = default 59 | 60 | return val 61 | 62 | def __call__(self, f): 63 | 64 | def wrapper(f, *args, **kwargs): 65 | args = list(args) 66 | ci = self._get_var(f, 'ci', None, args, kwargs, remove=True) 67 | bootstrap_iter = self._get_var(f, 'bootstrap_iter', None, 68 | args, kwargs, remove=True) 69 | axis = self._get_var(f, 'axis', None, args, kwargs) 70 | 71 | alpha = args[:self.no_boostrap] 72 | args0 = args[self.no_boostrap:] 73 | 74 | if bootstrap_iter is None: 75 | bootstrap_iter = alpha[0].shape[axis] if axis is not None \ 76 | else alpha[0].size 77 | 78 | r0 = f(*(alpha + args0), **kwargs) 79 | if ci is not None: 80 | r = np.asarray([f(*(list(a) + args0), **kwargs) for a in 81 | nd_bootstrap(alpha, bootstrap_iter, axis=axis, 82 | strip_tuple_if_one=False)]) 83 | 84 | if self.scale == 'linear': 85 | ci_low, ci_high = np.percentile(r, [(1 - ci) / 2 * 100, 86 | (1 + ci) / 2 * 100], 87 | axis=0) 88 | elif self.scale == 'circular': 89 | ci_low, ci_high = percentile(r, [(1 - ci) / 2 * 100, 90 | (1 + ci) / 2 * 100], 91 | q0=(r0 + np.pi) % (2 * np.pi), 92 | axis=0) 93 | else: 94 | raise ValueError('Scale %s not known!' % (self.scale, )) 95 | return r0, CI(ci_low, ci_high) 96 | else: 97 | return r0 98 | 99 | return decorator(wrapper, f) 100 | 101 | 102 | @bootstrap(1, 'circular') 103 | def median(alpha, axis=None, ci=None, bootstrap_iter=None): 104 | """ 105 | Computes the median direction for circular data. 106 | 107 | :param alpha: sample of angles in radians 108 | :param axis: compute along this dimension, 109 | default is None (across all dimensions) 110 | :param ci: if not None, the upper and lower 100*ci% confidence 111 | interval is returned as well 112 | :param bootstrap_iter: number of bootstrap iterations 113 | (number of samples if None) 114 | :return: median direction 115 | """ 116 | if axis is None: 117 | axis = 0 118 | alpha = alpha.ravel() 119 | 120 | dims = [range(alpha.shape[i]) for i in range(len(alpha.shape))] 121 | dims[axis] = [slice(None)] 122 | 123 | med = np.empty(alpha.shape[:axis] + alpha.shape[axis + 1:]) 124 | n = alpha.shape[axis] 125 | is_odd = (n % 2 == 1) 126 | for idx in itertools.product(*dims): 127 | out_idx = idx[:axis] + idx[axis + 1:] 128 | 129 | beta = alpha[idx] % (2 * np.pi) 130 | 131 | dd = pairwise_cdiff(beta) 132 | m1 = np.sum(dd >= 0, 0) 133 | m2 = np.sum(dd <= 0, 0) 134 | dm = np.abs(m1 - m2) 135 | 136 | if is_odd: 137 | min_idx = np.argmin(dm) 138 | m = dm[min_idx] 139 | else: 140 | m = np.min(dm) 141 | min_idx = np.argsort(dm)[:2] 142 | 143 | if m > 1: 144 | warnings.warn('Ties detected in median computation') 145 | 146 | md = mean(beta[min_idx]) 147 | if np.abs(cdiff(mean(beta), md)) > np.abs(cdiff(mean(beta), 148 | md + np.pi)): 149 | md = (md + np.pi) % (2 * np.pi) 150 | 151 | med[out_idx] = md 152 | 153 | return med 154 | 155 | 156 | def center_angle(angle): 157 | return (angle + np.pi) % (2*np.pi) - np.pi 158 | 159 | 160 | def cdiff(alpha, beta): 161 | """ 162 | Difference between pairs :math:`x_i-y_i` around the circle, 163 | computed efficiently. 164 | 165 | :param alpha: sample of circular random variable 166 | :param beta: sample of circular random variable 167 | :return: distance between the pairs 168 | """ 169 | return center_angle(alpha - beta) 170 | 171 | 172 | def pairwise_cdiff(alpha, beta=None): 173 | """ 174 | All pairwise differences :math:`x_i-y_j` around the circle, 175 | computed efficiently. 176 | 177 | :param alpha: sample of circular random variable 178 | :param beta: sample of circular random variable 179 | :return: array with pairwise differences 180 | 181 | References: [Zar2009]_, p. 651 182 | """ 183 | if beta is None: 184 | beta = alpha 185 | 186 | # advanced slicing and broadcasting to make pairwise distance work 187 | # between arbitrary nd arrays 188 | reshaper_alpha = len(alpha.shape) * (slice(None, None),) + \ 189 | len(beta.shape) * (np.newaxis,) 190 | reshaper_beta = len(alpha.shape) * (np.newaxis,) + \ 191 | len(beta.shape) * (slice(None, None),) 192 | 193 | return center_angle(alpha[reshaper_alpha] - beta[reshaper_beta]) 194 | 195 | 196 | @mod2pi 197 | def mean(alpha, w=None, ci=None, d=None, axis=None, axial_correction=1): 198 | """ 199 | Compute mean direction of circular data. 200 | 201 | :param alpha: circular data 202 | :param w: weightings in case of binned angle data 203 | :param ci: if not None, the upper and lower 100*ci% confidence 204 | interval is returned as well 205 | :param d: spacing of bin centers for binned data, if supplied 206 | correction factor is used to correct for bias in 207 | estimation of r, in radians (!) 208 | :param axis: compute along this dimension, default is None 209 | (across all dimensions) 210 | :param axial_correction: axial correction (2,3,4,...), default is 1 211 | :return: circular mean if ci=None, or circular mean as well as lower and 212 | upper confidence interval limits 213 | 214 | Example: ### TODO: fix this example. Imports are not clear ### 215 | 216 | >>> import numpy as np 217 | >>> data = 2*np.pi*np.random.rand(10) 218 | >>> mu, (ci_l, ci_u) = mean(data, ci=0.95) 219 | 220 | """ 221 | 222 | cmean = _complex_mean(alpha, 223 | w=w, 224 | axis=axis, 225 | axial_correction=axial_correction) 226 | 227 | mu = np.angle(cmean) / axial_correction 228 | 229 | if ci is None: 230 | return mu 231 | else: 232 | if axial_correction > 1: # TODO: implement CI for axial correction 233 | warnings.warn("Axial correction ignored for confidence intervals.") 234 | t = mean_ci_limits(alpha, ci=ci, w=w, d=d, axis=axis) 235 | return mu, CI(mu - t, mu + t) 236 | 237 | 238 | def mean_ci_limits(alpha, ci=0.95, w=None, d=None, axis=None): 239 | """ 240 | Computes the confidence limits on the mean for circular data. 241 | 242 | :param alpha: sample of angles in radians 243 | :param ci: ci-confidence limits are computed, default 0.95 244 | :param w: number of incidences in case of binned angle data 245 | :param d: spacing of bin centers for binned data, if supplied 246 | correction factor is used to correct for bias in 247 | estimation of r, in radians (!) 248 | :param axis: compute along this dimension, default is None 249 | (across all dimensions) 250 | 251 | :return: confidence limit width d; mean +- d yields upper/lower 252 | (1-xi)% confidence limit 253 | 254 | References: [Fisher1995]_, [Jammalamadaka2001]_, [Zar2009]_ 255 | """ 256 | 257 | if w is None: 258 | w = np.ones_like(alpha) 259 | 260 | assert alpha.shape == w.shape, "Dimensions of data and w do not match!" 261 | 262 | r = np.atleast_1d(resultant_vector_length(alpha, w=w, d=d, axis=axis)) 263 | n = np.atleast_1d(np.sum(w, axis=axis)) 264 | 265 | R = n * r 266 | c2 = stats.chi2.ppf(ci, df=1) 267 | 268 | t = np.NaN * np.empty_like(r) 269 | 270 | idx = (r < .9) & (r > np.sqrt(c2 / 2 / n)) 271 | t[idx] = np.sqrt((2 * n[idx] * (2 * R[idx] ** 2 - n[idx] * c2)) 272 | / (4 * n[idx] - c2)) # eq. 26.24 273 | 274 | idx2 = (r >= .9) 275 | t[idx2] = np.sqrt(n[idx2] ** 2 - (n[idx2] ** 2 - R[idx2] ** 2) 276 | * np.exp(c2 / n[idx2])) # equ. 26.25 277 | 278 | if not np.all(idx | idx2): 279 | raise UserWarning("""Requirements for confidence levels not met: 280 | CI limits require a certain concentration of the data around the mean""") 281 | 282 | return np.squeeze(np.arccos(t / R)) 283 | 284 | 285 | @bootstrap(1, 'linear') 286 | def resultant_vector_length(alpha, w=None, d=None, axis=None, 287 | axial_correction=1, ci=None, bootstrap_iter=None): 288 | """ 289 | Computes mean resultant vector length for circular data. 290 | 291 | This statistic is sometimes also called vector strength. 292 | 293 | :param alpha: sample of angles in radians 294 | :param w: number of incidences in case of binned angle data 295 | :param ci: ci-confidence limits are computed via bootstrapping, 296 | default None. 297 | :param d: spacing of bin centers for binned data, if supplied 298 | correction factor is used to correct for bias in 299 | estimation of r, in radians (!) 300 | :param axis: compute along this dimension, default is None 301 | (across all dimensions) 302 | :param axial_correction: axial correction (2,3,4,...), default is 1 303 | :param bootstrap_iter: number of bootstrap iterations 304 | (number of samples if None) 305 | :return: mean resultant length 306 | 307 | References: [Fisher1995]_, [Jammalamadaka2001]_, [Zar2009]_ 308 | """ 309 | if axis is None: 310 | axis = 0 311 | alpha = alpha.ravel() 312 | if w is not None: 313 | w = w.ravel() 314 | 315 | cmean = _complex_mean(alpha, w=w, axis=axis, 316 | axial_correction=axial_correction) 317 | 318 | # obtain length 319 | r = np.abs(cmean) 320 | 321 | # for data with known spacing, apply correction factor to correct for bias 322 | # in the estimation of r (see Zar, p. 601, equ. 26.16) 323 | if d is not None: 324 | if axial_correction > 1: 325 | warnings.warn("Axial correction ignored for bias correction.") 326 | r *= d / 2 / np.sin(d / 2) 327 | return r 328 | 329 | # defines synonym for resultant_vector_length 330 | vector_strength = resultant_vector_length 331 | 332 | 333 | def _complex_mean(alpha, w=None, axis=None, axial_correction=1): 334 | if w is None: 335 | w = np.ones_like(alpha) 336 | alpha = np.asarray(alpha) 337 | 338 | assert w.shape == alpha.shape, "Dimensions of data " + str(alpha.shape) \ 339 | + " and w " + \ 340 | str(w.shape) + " do not match!" 341 | 342 | return ((w * np.exp(1j * alpha * axial_correction)).sum(axis=axis) / 343 | np.sum(w, axis=axis)) 344 | 345 | 346 | @mod2pi 347 | def center(*args, **kwargs): 348 | """ 349 | Centers the data on its circular mean. 350 | 351 | Each non-keyword argument is another data array that is centered. 352 | 353 | :param axis: the mean is computed along this dimension (default axis=None). 354 | **Must be used as a keyword argument!** 355 | :return: tuple of centered data arrays 356 | 357 | """ 358 | 359 | axis = kwargs.pop('axis', None) 360 | if axis is None: 361 | axis = 0 362 | args = [a.ravel() for a in args] 363 | 364 | reshaper = tuple(slice(None, None) if i != axis else np.newaxis 365 | for i in range(len(args[0].shape))) 366 | if len(args) == 1: 367 | return args[0] - mean(args[0], axis=axis) 368 | else: 369 | return tuple([a - mean(a, axis=axis)[reshaper] 370 | for a in args if isinstance(a, np.ndarray)]) 371 | 372 | 373 | @mod2pi 374 | @bootstrap(1, 'circular') 375 | def percentile(alpha, q, q0, axis=None, ci=None, bootstrap_iter=None): 376 | """ 377 | Computes circular percentiles 378 | 379 | :param alpha: array with circular samples 380 | :param q: percentiles in [0,100] (single number or iterable) 381 | :param q0: value of the 0 percentile 382 | :param axis: percentiles will be computed along this axis. 383 | If None percentiles will be computed over the entire array 384 | :param ci: if not None, confidence level is bootstrapped 385 | :param bootstrap_iter: number of bootstrap iterations 386 | (number of samples if None) 387 | 388 | :return: percentiles 389 | 390 | """ 391 | if axis is None: 392 | alpha = (alpha.ravel() - q0) % (2 * np.pi) 393 | else: 394 | if len(q0.shape) == len(alpha.shape) - 1: 395 | reshaper = tuple(slice(None, None) if i != axis else np.newaxis 396 | for i in range(len(alpha.shape))) 397 | q0 = q0[reshaper] 398 | elif not len(q0.shape) == len(alpha.shape): 399 | raise ValueError("Dimensions of start and alpha are inconsistent!") 400 | 401 | alpha = (alpha - q0) % (2 * np.pi) 402 | 403 | ret = [] 404 | if axis is not None: 405 | selector = tuple(slice(None) if i != axis else 0 406 | for i in range(len(alpha.shape))) 407 | q0 = q0[selector] 408 | 409 | for qq in np.atleast_1d(q): 410 | ret.append(np.percentile(alpha, qq, axis=axis) + q0) 411 | 412 | if not hasattr(q, "__iter__"): # if q is not some sort of list, array, etc 413 | return np.asarray(ret).squeeze() 414 | else: 415 | return np.asarray(ret) 416 | 417 | 418 | @bootstrap(1, 'linear') 419 | def var(alpha, w=None, d=None, axis=None, ci=None, bootstrap_iter=None): 420 | """ 421 | Computes circular variance for circular data (equ. 26.17/18, Zar). 422 | 423 | :param alpha: sample of angles in radian 424 | :param w: number of incidences in case of binned angle data 425 | :param d: spacing of bin centers for binned data, if supplied 426 | correction factor is used to correct for bias in 427 | estimation of r 428 | :param axis: compute along this dimension, 429 | default is None (across all dimensions) 430 | :param bootstrap_iter: number of bootstrap iterations 431 | (number of samples if None) 432 | :param ci: if not None, confidence level is bootstrapped 433 | :return: circular variance 1 - resultant vector length 434 | 435 | References: [Zar2009]_ 436 | """ 437 | 438 | if axis is None: 439 | axis = 0 440 | alpha = alpha.ravel() 441 | if w is not None: 442 | w = w.ravel() 443 | 444 | if w is None: 445 | w = np.ones_like(alpha) 446 | 447 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 448 | 449 | r = resultant_vector_length(alpha, w=w, d=d, axis=axis) 450 | 451 | return 1 - r 452 | 453 | 454 | @bootstrap(1, 'linear') 455 | def std(alpha, w=None, d=None, axis=None, ci=None, bootstrap_iter=None): 456 | """ 457 | Computes circular standard deviation for circular data. 458 | 459 | :param alpha: sample of angles in radian 460 | :param w: number of incidences in case of binned angle data 461 | :param d: spacing of bin centers for binned data, if supplied 462 | correction factor is used to correct for bias in 463 | estimation of r 464 | :param axis: compute along this dimension, 465 | default is None (across all dimensions) 466 | :param bootstrap_iter: number of bootstrap iterations 467 | (number of samples if None) 468 | :param ci: if not None, confidence level is bootstrapped 469 | :return: circular variance 1 - resultant vector length 470 | 471 | References: [Zar2009]_ 472 | """ 473 | 474 | if axis is None: 475 | axis = 0 476 | alpha = alpha.ravel() 477 | if w is not None: 478 | w = w.ravel() 479 | 480 | if w is None: 481 | w = np.ones_like(alpha) 482 | 483 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 484 | 485 | r = resultant_vector_length(alpha, w=w, d=d, axis=axis) 486 | 487 | return np.sqrt(-2 * np.log(r)) 488 | 489 | 490 | @bootstrap(1, 'linear') 491 | def avar(alpha, w=None, d=None, axis=None, ci=None, bootstrap_iter=None): 492 | """ 493 | Computes angular variance for circular data (equ. 26.17/18, Zar). 494 | 495 | :param alpha: sample of angles in radian 496 | :param w: number of incidences in case of binned angle data 497 | :param d: spacing of bin centers for binned data, if supplied 498 | correction factor is used to correct for bias in 499 | estimation of r 500 | :param axis: compute along this dimension, 501 | default is None (across all dimensions) 502 | :param bootstrap_iter: number of bootstrap iterations 503 | (number of samples if None) 504 | :param ci: if not None, confidence level is bootstrapped 505 | :return: 2 * circular variance 506 | 507 | References: [Zar2009]_ 508 | """ 509 | 510 | if axis is None: 511 | axis = 0 512 | alpha = alpha.ravel() 513 | if w is not None: 514 | w = w.ravel() 515 | 516 | if w is None: 517 | w = np.ones_like(alpha) 518 | 519 | return 2 * var(alpha, w=w, d=d, axis=axis, ci=None) 520 | 521 | 522 | @bootstrap(1, 'linear') 523 | def astd(alpha, w=None, d=None, axis=None, ci=None, bootstrap_iter=None): 524 | """ 525 | Computes angular standard deviation for circular data. 526 | 527 | :param alpha: sample of angles in radian 528 | :param w: number of incidences in case of binned angle data 529 | :param d: spacing of bin centers for binned data, if supplied 530 | correction factor is used to correct for bias in 531 | estimation of r 532 | :param axis: compute along this dimension, 533 | default is None (across all dimensions) 534 | :param bootstrap_iter: number of bootstrap iterations 535 | (number of samples if None) 536 | :param ci: if not None, confidence level is bootstrapped 537 | :return: Square root of angular variance 538 | 539 | References: [Zar2009]_ 540 | """ 541 | 542 | if axis is None: 543 | axis = 0 544 | alpha = alpha.ravel() 545 | if w is not None: 546 | w = w.ravel() 547 | 548 | if w is None: 549 | w = np.ones_like(alpha) 550 | 551 | return np.sqrt(avar(alpha, w=w, d=d, axis=axis, ci=None)) 552 | 553 | 554 | def axial(alpha, p=1): 555 | """ 556 | Transforms p-axial data to a common scale. 557 | 558 | :param alpha: sample of angles in radians 559 | :param p: number of modes 560 | :return: Transforms p-axial data to a common scale. 561 | 562 | References: [Fisher1995]_ 563 | """ 564 | return alpha * p % (2 * np.pi) 565 | 566 | 567 | def _corr(x, y, axis=0): 568 | return np.sum((x - x.mean(axis=axis, keepdims=True)) * \ 569 | (y - y.mean(axis=axis, keepdims=True)), axis=axis) \ 570 | / np.std(x, axis=axis) / np.std(y, axis=axis) / x.shape[axis] 571 | 572 | 573 | @bootstrap(1, 'linear') 574 | def corrcl(alpha, x, axis=None, ci=None, bootstrap_iter=None): 575 | """ 576 | Correlation coefficient between one circular and one linear random variable. 577 | 578 | 579 | :param alpha: sample of angles in radians 580 | :param x: sample of linear random variable 581 | :param axis: compute along this dimension, 582 | default is None (across all dimensions) 583 | :param bootstrap_iter: number of bootstrap iterations 584 | (number of samples if None) 585 | :param ci: if not None, confidence level is bootstrapped 586 | :return: correlation coefficient 587 | """ 588 | 589 | assert alpha.shape == x.shape, "Dimensions of alpha and x must match" 590 | 591 | if axis is None: 592 | alpha = alpha.ravel() 593 | x = x.ravel() 594 | axis = 0 595 | 596 | # compute correlation coefficient for sin and cos independently 597 | rxs = _corr(x, np.sin(alpha), axis=axis) 598 | rxc = _corr(x, np.cos(alpha), axis=axis) 599 | rcs = _corr(np.sin(alpha), np.cos(alpha), axis=axis) 600 | 601 | # compute angular-linear correlation (equ. 27.47) 602 | return np.sqrt( 603 | (rxc ** 2 + rxs ** 2 - 2 * rxc * rxs * rcs) / (1 - rcs ** 2)) 604 | 605 | 606 | @bootstrap(2, 'linear') 607 | def corrcc(alpha1, alpha2, ci=None, axis=None, bootstrap_iter=None): 608 | """ 609 | Circular correlation coefficient for two circular random variables. 610 | 611 | If a confidence level is specified, confidence limits are bootstrapped. 612 | The number of bootstrapping iterations is min(number of data points 613 | along axis, bootstrap_max_iter). 614 | 615 | :param alpha1: sample of angles in radians 616 | :param alpha2: sample of angles in radians 617 | :param axis: correlation coefficient is computed along this dimension 618 | (default axis=None, across all dimensions) 619 | :param ci: if not None, confidence level is bootstrapped 620 | :param bootstrap_iter: number of bootstrap iterations 621 | (number of samples if None) 622 | :return: correlation coefficient if ci=None, otherwise correlation 623 | coefficient with lower and upper confidence limits 624 | 625 | References: [Jammalamadaka2001]_ 626 | """ 627 | assert alpha1.shape == alpha2.shape, 'Input dimensions do not match.' 628 | 629 | # center data on circular mean 630 | alpha1, alpha2 = center(alpha1, alpha2, axis=axis) 631 | 632 | # compute correlation coeffcient from p. 176 633 | num = np.sum(np.sin(alpha1) * np.sin(alpha2), axis=axis) 634 | den = np.sqrt(np.sum(np.sin(alpha1) ** 2, axis=axis) * 635 | np.sum(np.sin(alpha2) ** 2, axis=axis)) 636 | return num / den 637 | 638 | 639 | @bootstrap(1, 'linear') 640 | @swap2zeroaxis(['alpha'], [0]) 641 | def moment(alpha, p=1, cent=False, 642 | w=None, d=None, axis=None, 643 | ci=None, bootstrap_iter=None): 644 | """ 645 | Computes the complex p-th centred or non-centred moment of the angular 646 | data in alpha. 647 | 648 | :param alpha: sample of angles in radian 649 | :param p: the p-th moment to be computed; default is 1. 650 | :param cent: if True, compute central moments. Default False. 651 | :param w: number of incidences in case of binned angle data 652 | :param d: spacing of bin centers for binned data, if supplied 653 | correction factor is used to correct for bias in 654 | estimation of r 655 | :param axis: compute along this dimension, 656 | default is None (across all dimensions) 657 | :param ci: if not None, confidence level is bootstrapped 658 | :param bootstrap_iter: number of bootstrap iterations 659 | (number of samples if None) 660 | :return: the complex p-th moment. 661 | rho_p magnitude of the p-th moment 662 | mu_p angle of the p-th moment 663 | 664 | Example: 665 | 666 | import numpy as np 667 | import pycircstat as circ 668 | data = 2*np.pi*np.random.rand(10) 669 | mp = circ.moment(data) 670 | 671 | You can then calculate the magnitude and angle of the p-th moment: 672 | 673 | rho_p = np.abs(mp) # magnitude 674 | mu_p = np.angle(mp) # angle 675 | 676 | You can also calculate bootstrap confidence intervals: 677 | 678 | mp, (ci_l, ci_u) = circ.moment(data, ci=0.95) 679 | 680 | References: [Fisher1995]_ p. 33/34 681 | """ 682 | 683 | if w is None: 684 | w = np.ones_like(alpha) 685 | 686 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 687 | 688 | if cent: 689 | theta = mean(alpha, w=w, d=d, axis=axis) 690 | theta2 = np.tile(theta, (alpha.shape[0],) + len(theta.shape) * (1,)) 691 | alpha = cdiff(alpha, theta2) 692 | 693 | n = alpha.shape[axis] 694 | cbar = np.sum(np.cos(p * alpha) * w, axis) / n 695 | sbar = np.sum(np.sin(p * alpha) * w, axis) / n 696 | mp = cbar + 1j * sbar 697 | 698 | return mp 699 | 700 | 701 | @bootstrap(1, 'linear') 702 | @swap2zeroaxis(['alpha'], [0]) 703 | def kurtosis( 704 | alpha, 705 | w=None, 706 | axis=None, 707 | mode='pewsey', 708 | ci=None, 709 | bootstrap_iter=None): 710 | """ 711 | Calculates a measure of angular kurtosis. 712 | 713 | :param alpha: sample of angles 714 | :param w: weightings in case of binned angle data 715 | :param axis: statistic computed along this dimension 716 | :param mode: which kurtosis to compute (options are 'pewsey' or 'fisher'; 'pewsey' is default) 717 | :param ci: if not None, confidence level is bootstrapped 718 | :param bootstrap_iter: number of bootstrap iterations 719 | :return: the kurtosis 720 | :raise ValueError: If the mode is not 'pewsey' or 'fisher' 721 | 722 | References: [Pewsey2004]_, [Fisher1995]_ p. 34 723 | """ 724 | if w is None: 725 | w = np.ones_like(alpha) 726 | else: 727 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 728 | 729 | theta = mean(alpha, w=w, axis=axis) 730 | 731 | if mode == 'pewsey': 732 | theta2 = np.tile(theta, (alpha.shape[0],) + len(theta.shape) * (1,)) 733 | return np.sum( 734 | w * (np.cos(2 * (cdiff(alpha, theta2)))), axis=0) / np.sum(w, axis=0) 735 | elif mode == 'fisher': 736 | mom = moment(alpha, p=2, w=w, axis=axis, cent=False) 737 | mu2, rho2 = np.angle(mom), np.abs(mom) 738 | R = resultant_vector_length(alpha, w=w, axis=axis) 739 | return (rho2 * np.cos(cdiff(mu2, 2 * theta)) - R**4) / \ 740 | (1 - R)**2 # (formula 2.30) 741 | else: 742 | raise ValueError("Mode %s not known!" % (mode, )) 743 | 744 | 745 | @bootstrap(1, 'linear') 746 | @swap2zeroaxis(['alpha'], [0]) 747 | def skewness( 748 | alpha, 749 | w=None, 750 | axis=None, 751 | ci=None, 752 | bootstrap_iter=None, 753 | mode='pewsey'): 754 | """ 755 | Calculates a measure of angular skewness. 756 | 757 | :param alpha: sample of angles 758 | :param w: weightings in case of binned angle data 759 | :param axis: statistic computed along this dimension (default None, collapse dimensions) 760 | :param ci: if not None, confidence level is bootstrapped 761 | :param bootstrap_iter: number of bootstrap iterations 762 | :param mode: which skewness to compute (options are 'pewsey' or 'fisher'; 'pewsey' is default) 763 | :return: the skewness 764 | :raise ValueError: 765 | 766 | References: [Pewsey2004]_, [Fisher1995]_ p. 34 767 | """ 768 | if w is None: 769 | w = np.ones_like(alpha) 770 | else: 771 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 772 | 773 | # compute neccessary values 774 | theta = mean(alpha, w=w, axis=axis) 775 | 776 | # compute skewness 777 | if mode == 'pewsey': 778 | theta2 = np.tile(theta, (alpha.shape[0],) + len(theta.shape) * (1,)) 779 | return np.sum( 780 | w * np.sin(2 * cdiff(alpha, theta2)), axis=axis) / np.sum(w, axis=axis) 781 | elif mode == 'fisher': 782 | mom = moment(alpha, p=2, w=w, axis=axis, cent=False) 783 | mu2, rho2 = np.angle(mom), np.abs(mom) 784 | R = resultant_vector_length(alpha, w=w, axis=axis) 785 | return rho2 * np.sin(cdiff(mu2, 2 * theta)) / \ 786 | (1 - R)**(3. / 2) # (formula 2.29) 787 | else: 788 | raise ValueError("Mode %s not known!" % (mode, )) 789 | -------------------------------------------------------------------------------- /pycircstat/distributions.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from scipy import stats 3 | 4 | from scipy.stats import rv_continuous 5 | import numpy as np 6 | import sys 7 | from .decorators import swap2zeroaxis 8 | from .descriptive import resultant_vector_length 9 | 10 | 11 | @swap2zeroaxis(['alpha'], [0]) 12 | def kappa(alpha, w=None, axis=None): 13 | """ 14 | Computes an approximation to the ML estimate of the concentration 15 | parameter kappa of the von Mises distribution. 16 | 17 | 18 | :param alpha: angles in radians OR alpha is length resultant 19 | :param w: number of incidences in case of binned angle data 20 | :param axis: kappa is computed along this axis 21 | :return: estimated value of kappa 22 | 23 | References: [Fisher1995]_ p. 88 24 | """ 25 | 26 | if w is None: 27 | w = np.ones_like(alpha) 28 | else: 29 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 30 | 31 | n = alpha.shape[axis] 32 | 33 | if n > 1: 34 | R = resultant_vector_length(alpha, w, axis=axis) 35 | else: 36 | R = alpha 37 | R = np.atleast_1d(R) 38 | 39 | kappa = np.asarray(0 * R) 40 | 41 | idx = R < 0.53 42 | 43 | kappa[idx] = 2. * R[idx] + R[idx]**3. + 5 * R[idx]**5. / 6 44 | 45 | idx = (R >= 0.53) & (R < 0.85) 46 | kappa[idx] = -.4 + 1.39 * R[idx] + 0.43 / (1. - R[idx]) 47 | 48 | idx = R > 0.85 49 | kappa[idx] = 1. / (R[idx]**3. - 4. * R[idx]**2. + 3. * R[idx]) 50 | 51 | if n < 15 and n > 1: 52 | idx = kappa < 2. 53 | kappa[idx] = kappa[idx] - 2 * (n * kappa[idx])**-1. 54 | idx0 = kappa < 0 55 | kappa[idx & idx0] = 0 56 | 57 | kappa[~idx] = (n - 1)**3 * kappa[~idx] / (n**3. + n) 58 | 59 | return kappa 60 | 61 | 62 | class cardioid_gen(rv_continuous): 63 | 64 | """ 65 | 66 | Cardioid distribution of a single random variable. 67 | 68 | .. math:: 69 | 70 | p(x) = \\frac{1 + 2 \\rho \\cos(x - \\mu)}{2\\pi} 71 | 72 | 73 | :param mu: mean (in [0, 2*pi]) 74 | :param rho: concentration parameter (in [-0.5,0.5]) 75 | 76 | 77 | 78 | **Note:** 79 | 80 | - To use the distribution, use *cardioid*, not *cardioid_gen*. 81 | - See scipy.stats how to use distributions. 82 | 83 | **Example:** 84 | 85 | :: 86 | 87 | from pycircstat.distributions import cardioid 88 | import matplotlib.pyplot as plt 89 | import numpy as np 90 | mu, rho = np.pi, .2 91 | t = np.linspace(0,2*np.pi,1000) 92 | x = cardioid.rvs(mu, rho, size=5000) 93 | plt.plot(t, cardioid.pdf(t, mu, rho)) 94 | plt.plot(t, cardioid.cdf(t, mu, rho)) 95 | plt.hist(x, bins=50, normed=True) 96 | plt.show() 97 | 98 | References: [Jammalamadaka2001]_ 99 | 100 | """ 101 | 102 | def _argcheck(self, mu, rho): 103 | return (-.5 <= rho <= .5) and (0 <= mu <= 2. * np.pi) 104 | 105 | def _stats(self, mu, rho): 106 | return mu, None, None, None 107 | 108 | def _pdf(self, x, mu, rho): 109 | x = x % (2 * np.pi) 110 | return (1 + 2 * rho * np.cos(x - mu)) / 2. / np.pi 111 | 112 | def _cdf(self, x, mu, rho): 113 | # x = (x - mu - np.pi) % (2*np.pi) 114 | # return (-mu + 2*rho*np.sin(x-mu) + x + np.pi)/2/np.pi 115 | x = x % (2 * np.pi) 116 | return (2 * rho * np.sin(x - mu) + x + 2 * rho * np.sin(mu)) / \ 117 | 2 / np.pi 118 | 119 | # hack for problems with numpy missing in readthedocs and mock 120 | if not 'sphinx' in sys.modules: 121 | cardioid = cardioid_gen(name='cardioid', shapes="mu, rho") 122 | 123 | 124 | class triangular_gen(rv_continuous): 125 | 126 | """ 127 | 128 | triangular distribution of a single random variable. 129 | 130 | .. math:: 131 | 132 | p(x) = \\frac{1}{8\\pi} (4-\\pi^2\\rho + 2\\pi\\rho |\\pi - x|) 133 | 134 | 135 | :param rho: concentration parameter (in [-0.5,0.5]) 136 | 137 | 138 | 139 | **Note:** 140 | 141 | - To use the distribution, use *triangular*, not *triangular_gen*. 142 | - See scipy.stats how to use distributions. 143 | 144 | **Example:** 145 | 146 | :: 147 | 148 | from pycircstat.distributions import triangular 149 | import matplotlib.pyplot as plt 150 | import numpy as np 151 | rho = .4 152 | t = np.linspace(0,2*np.pi,1000) 153 | x = triangular.rvs(rho, size=5000) 154 | plt.plot(t, triangular.pdf(t, rho)) 155 | plt.plot(t, triangular.cdf(t, rho)) 156 | plt.hist(x, bins=50, normed=True) 157 | plt.show() 158 | 159 | 160 | References: [Jammalamadaka2001]_ 161 | 162 | """ 163 | 164 | def _argcheck(self, rho): 165 | return 0 <= rho <= 4. / np.pi ** 2. 166 | 167 | def _stats(self, rho): 168 | return 0, None, None, None 169 | 170 | def _pdf(self, x, rho): 171 | x = x % (2 * np.pi) 172 | return ((4 - np.pi ** 2. * rho + 2. * np.pi * rho * np.abs(np.pi - x)) 173 | / 8. / np.pi) 174 | 175 | def _cdf(self, x, rho): 176 | x = x % (2 * np.pi) 177 | ret = 0 * x 178 | idx = (x < np.pi) 179 | ret[idx] = -0.125 * rho[idx] * x[idx] ** 2 + x[idx] * \ 180 | (0.125 * np.pi ** 2 * rho[idx] + 0.5) / np.pi 181 | ret[~idx] = 0.125 * rho[~idx] * x[~idx] ** 2 + 0.25 * \ 182 | np.pi ** 2 * rho[~idx] - x[~idx] * \ 183 | (0.375 * np.pi ** 2 * rho[~idx] - 0.5) / np.pi 184 | return ret 185 | 186 | # hack for problems with numpy missing in readthedocs and mock 187 | if not 'sphinx' in sys.modules: 188 | triangular = triangular_gen(name='triangular', shapes="rho") 189 | 190 | # wrapper for von Mises 191 | vonmises = stats.vonmises 192 | -------------------------------------------------------------------------------- /pycircstat/event_series.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import numpy as np 3 | from scipy import stats 4 | from pycircstat import var 5 | 6 | 7 | def convolve_dirac_gauss(t, trial, sigma=1.): 8 | """ 9 | Convolves event series represented as time points of Dirac deltas with 10 | the pdf of a Gaussian 11 | 12 | :param t: time points at which the convolution will be computed 13 | :param trial: array of event times 14 | :param sigma: std of the Gaussian convolution filter 15 | :returns: convolved event train 16 | """ 17 | ret = 0 * t 18 | for st in trial: 19 | ret[:] += stats.norm.pdf(t, loc=st, scale=sigma) 20 | return ret 21 | 22 | 23 | def vector_strength_spectrum(event_times, sampling_rate, time=None): 24 | """ 25 | Computes the vector strength (resultant vector length) between a series of events and a 26 | sinusoid of many frequencies. The resolution in frequency space is determines by the 27 | sampling rate. 28 | 29 | :param event_times: event times in seconds 30 | :param sampling_rate: sampling rate in Hz 31 | :param time: np.array of time points or two values that denote a (right open) time range 32 | :return: frequencies and vector strength between the events and sinusoids at these frequencies 33 | 34 | **Example** 35 | 36 | :: 37 | 38 | T = 3 # three seconds 39 | n = 20 40 | sampling_rate = 10000. # sampling rate in Hz 41 | events = T*np.random.rand(n) 42 | w, vs_spec = es.vector_strength_spectrum(events, sampling_rate) 43 | 44 | """ 45 | dt = 1. / sampling_rate 46 | if time is not None: 47 | if len(time) == 2: 48 | t = np.arange(time[0], time[1], dt) 49 | else: 50 | assert np.abs( 51 | dt - (time[1] - time[0])) < 1e-6, "Sampling rate and dt in time do not agree." 52 | t = time 53 | else: 54 | t = np.arange( 55 | np.amin(event_times) - 56 | 50. / 57 | sampling_rate, 58 | np.amax(event_times) + 59 | 50. / 60 | sampling_rate, 61 | dt) 62 | 63 | w = np.fft.fftfreq(len(t), d=dt) 64 | sigma = 1. / 2. / np.pi / sampling_rate * 8 65 | 66 | x = convolve_dirac_gauss(t, event_times, sigma=sigma) 67 | 68 | a = np.abs(np.fft.fft(x)) * dt / len(event_times) 69 | a[w == 0] = np.NaN 70 | gf = np.exp(-2 * np.pi**2 * sigma**2 * w**2) 71 | return w, a / gf 72 | 73 | def _vector_strength(param): 74 | event_times, w = param 75 | return 1-var( (event_times % (1./w) )*w*2*np.pi ) 76 | 77 | def direct_vector_strength_spectrum(event_times, frequencies): 78 | """ 79 | Computes the direct vector strength spectrum for the given frequencies. 80 | 81 | :param event_times: event times in seconds 82 | :param frequencies: locking frequencies in Hz 83 | :return: vector strength spectrum 84 | """ 85 | ret = np.asarray([1-var( (event_times % (1./w) )*w*2*np.pi ) for w in frequencies]) 86 | 87 | return ret -------------------------------------------------------------------------------- /pycircstat/iterators.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def simple_bootstrap(data, iterations): 4 | """ 5 | Generator to perform iterations bootstrap iterations along the first axis. 6 | 7 | :param data: data 8 | :param iterations: iterations 9 | """ 10 | m = data.shape[0] 11 | for _ in range(iterations): 12 | yield data[np.random.randint(0,m,m)] 13 | 14 | def index_bootstrap(m, iterations): 15 | """ 16 | Generator to perform iterations bootstrap selections among m elements. Returns indices. 17 | 18 | :param data: data 19 | :param iterations: iterations 20 | """ 21 | 22 | for _ in range(iterations): 23 | yield np.random.randint(0,m,m, dtype=int) 24 | 25 | 26 | def nd_bootstrap(data, iterations, axis=None, strip_tuple_if_one=True): 27 | """ 28 | Bootstrap iterator for several n-dimensional data arrays. 29 | 30 | :param data: Iterable containing the data arrays 31 | :param iterations: Number of bootstrap iterations. 32 | :param axis: Bootstrapping is performed along this axis. 33 | """ 34 | shape0 = data[0].shape 35 | if axis is None: 36 | axis = 0 37 | data = [d.ravel() for d in data] 38 | 39 | n = len(data[0].shape) 40 | K = len(data) 41 | data0 = [] 42 | 43 | if axis is not None: 44 | m = data[0].shape[axis] 45 | to = tuple([axis]) + tuple(range(axis)) + tuple(range(axis + 1, n)) 46 | fro = tuple(range(1, axis + 1)) + (0,) + tuple(range(axis + 1, n)) 47 | for i in range(K): 48 | data0.append(data[i].transpose(to)) 49 | 50 | for i in range(iterations): 51 | idx = np.random.randint(m, size=(m,)) 52 | if len(data) == 1 and strip_tuple_if_one: 53 | yield (data0[0][np.ix_(idx), ...].squeeze(). 54 | transpose(fro).reshape(shape0)) 55 | else: 56 | yield tuple(a[np.ix_(idx), ...].squeeze(). 57 | transpose(fro).reshape(shape0) for a in data0) 58 | -------------------------------------------------------------------------------- /pycircstat/regression.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import warnings 3 | from pycircstat import CI 4 | from pycircstat.iterators import index_bootstrap 5 | import numpy as np 6 | from scipy import stats 7 | import pandas as pd 8 | 9 | class BaseRegressor(object): 10 | """ 11 | Basic regressor object. Mother class to all other regressors. 12 | 13 | Regressors support indexing which is passed to the coefficients. 14 | 15 | Regressors also support calling. In this case the prediction function is called. 16 | """ 17 | 18 | def __init__(self): 19 | self._coef = None 20 | 21 | def istrained(self): 22 | """ 23 | Returns whether the regressor is trained of not. 24 | 25 | :return: True if trained 26 | """ 27 | return self._coef is not None 28 | 29 | def train(self, *args, **kwargs): 30 | raise NotImplementedError(u"{0:s}.train not implemented".format(self.__class__.__name__)) 31 | 32 | def test(self, *args, **kwargs): 33 | raise NotImplementedError(u"{0:s}.test not implemented".format(self.__class__.__name__)) 34 | 35 | 36 | def loss(self, x, y, lossfunc, ci=None, bootstrap_iter=1000): 37 | """ 38 | Computes loss function between the predictions f(x) and the true y. 39 | 40 | :param x: inputs in radians. If multidimensional, each row must 41 | be a specimen and each column a feature. 42 | :param y: desired outputs in radians. If multidimensional, each 43 | row must be a specimen and each column a feature. 44 | :param lossfunc: loss function, must take an array of input and outputs and compute the loss. 45 | :param ci: confidence interval in [0,1]. If not None, bootstrapping is performed. 46 | :param bootstrap_iter: number of bootstrap iterations if 47 | :return: loss as computed by the loss function. 48 | """ 49 | 50 | if ci is not None: 51 | yhat = self.predict(x) 52 | l = [lossfunc(y[idx], yhat[idx]) for idx in index_bootstrap(x.shape[0], bootstrap_iter)] 53 | mu = np.mean(l) 54 | q = 1 - ci 55 | return mu, CI(np.percentile(l, q / 2. * 100), np.percentile(l, 1 - q / 2. * 100)) 56 | return lossfunc(y, self.predict(x)) 57 | 58 | def predict(self, *args, **kwargs): 59 | raise NotImplementedError(u"{0:s}.predict not implemented".format(self.__class__.__name__)) 60 | 61 | def __getitem__(self, item): 62 | return self._coef.__getitem__(item) 63 | 64 | def __setitem__(self, key, value): 65 | return self._coef.__getitem__(key, value) 66 | 67 | def __call__(self, *args, **kwargs): 68 | assert self.istrained(), "Regressor must be trained first." 69 | return self.predict(*args, **kwargs) 70 | 71 | 72 | class CL1stOrderRegression(BaseRegressor): 73 | """ 74 | Implements a circular linear regression model of the form 75 | 76 | .. math:: 77 | x = m + a \\cos(\\alpha - \\alpha_0) 78 | 79 | The actual model is equivalently implemented as 80 | 81 | .. math:: 82 | x = c_1 \\cos(\\alpha) + c_2 \\sin(\\alpha) + m 83 | 84 | References: [Jammalamadaka2001]_ 85 | 86 | """ 87 | 88 | def __init__(self): 89 | super(CL1stOrderRegression, self).__init__() 90 | 91 | def train(self, alpha, x): 92 | """ 93 | Estimates the regression coefficients. Only works for 1D data. 94 | 95 | :param alpha: independent variable, angles in radians 96 | :param x: dependent variable 97 | """ 98 | assert alpha.shape == x.shape, "x and alpha need to have the same shape" 99 | assert len(alpha.shape) == 1, "regression only implemented for 1D data" 100 | assert len(x.shape) == 1, "regression only implemented for 1D data" 101 | 102 | X = np.c_[np.cos(alpha), np.sin(alpha), np.ones_like(alpha)] 103 | c = np.dot(np.linalg.pinv(X), x) 104 | self._coef = c 105 | 106 | def predict(self, alpha): 107 | """ 108 | Predicts linear values from the angles. 109 | 110 | :param alpha: inputs, angles in radians 111 | :return: predictions 112 | """ 113 | X = np.c_[np.cos(alpha), np.sin(alpha), np.ones_like(alpha)] 114 | 115 | return np.dot(X, self._coef) 116 | 117 | def test(self, alpha, x): 118 | """ 119 | Tests whether alpha and x are significantly correlated. 120 | The test assumes that x is normally distributed. The test 121 | function uses a Shapiro-Wilk test to test this assumption. 122 | 123 | :param alpha: independent variable, angles in radians 124 | :param x: dependent variable 125 | :return: test results of Shapiro-Wilk and Liddell-Ord test 126 | :rtype: pandas.DataFrame 127 | 128 | References: [Jammalamadaka2001]_ 129 | """ 130 | w, psw = stats.shapiro(x) 131 | if psw < 0.05: 132 | warnings.warn("This test requires Gaussian distributed x") 133 | 134 | rxc, rxs, rcs = np.corrcoef(x, np.cos(alpha))[0,1], np.corrcoef(x, np.sin(alpha))[0,1], \ 135 | np.corrcoef(np.cos(alpha), np.sin(alpha))[0,1] 136 | n = len(alpha) 137 | r2 = (rxc**2 + rxs**2 - 2*rxc*rxs*rcs)/(1 - rcs**2) 138 | f = (n-3)*r2/(1-r2) 139 | p = stats.f.sf(f, 2, n-3) 140 | 141 | df = pd.DataFrame(dict( 142 | test = ['Shapiro-Wilk','Liddell-Ord'], 143 | statistics = [w, f], 144 | p = [psw, p], 145 | dof = [None, (2, n-3)] 146 | )).set_index('test') 147 | return df 148 | 149 | class CCTrigonometricPolynomialRegression(BaseRegressor): 150 | """ 151 | Implements a circular circular regression model of the form 152 | 153 | .. math:: 154 | \\cos(\\beta) = a_0 + \\sum_{k=1}^d a_k \\cos(k\\alpha) + b_k \\sin(k\\alpha) 155 | 156 | \\sin(\\beta) = c_0 + \\sum_{k=1}^d c_k \\cos(k\\alpha) + d_k \\sin(k\\alpha) 157 | 158 | The angles :math:`\\beta` are estimated via :math:`\\hat\\beta = atan2(\\sin(\\beta), \\cos(\\beta))` 159 | 160 | 161 | 162 | :param degree: degree d of the trigonometric polynomials 163 | 164 | References: [Jammalamadaka2001]_ 165 | """ 166 | 167 | def __init__(self, degree=3): 168 | super(CCTrigonometricPolynomialRegression, self).__init__() 169 | self.degree = degree 170 | 171 | def train(self, alpha, beta): 172 | """ 173 | Estimates the regression coefficients. Only works for 1D data. 174 | 175 | :param alpha: independent variable, angles in radians 176 | :param beta: dependent variable, angles in radians 177 | """ 178 | X = np.vstack([np.ones_like(alpha)] + [np.cos(alpha*k) for k in np.arange(1., self.degree+1)] \ 179 | + [np.sin(alpha*k) for k in np.arange(1., self.degree+1)]).T 180 | self._coef = np.c_[np.dot(np.linalg.pinv(X), np.cos(beta)), 181 | np.dot(np.linalg.pinv(X), np.sin(beta))] 182 | 183 | def predict(self, alpha): 184 | """ 185 | Predicts linear values from the angles. 186 | 187 | :param alpha: inputs, angles in radians 188 | :return: predictions, angles in radians 189 | """ 190 | X = np.vstack([np.ones_like(alpha)] + [np.cos(alpha*k) for k in np.arange(1., self.degree+1)] \ 191 | + [np.sin(alpha*k) for k in np.arange(1., self.degree+1)]).T 192 | beta = np.dot(X, self._coef) 193 | return np.arctan2(beta[:,1], beta[:,0]) 194 | 195 | -------------------------------------------------------------------------------- /pycircstat/tests.py: -------------------------------------------------------------------------------- 1 | """ 2 | Statistical tests 3 | """ 4 | from __future__ import absolute_import, division 5 | import warnings 6 | from nose.tools import nottest 7 | 8 | import numpy as np 9 | from scipy import stats 10 | # import warnings 11 | from . import descriptive, swap2zeroaxis 12 | from . import utils 13 | from .distributions import kappa 14 | import pandas as pd 15 | from pycircstat.data import load_kuiper_table 16 | from scipy import special 17 | 18 | @swap2zeroaxis(['alpha', 'w'], [0, 1]) 19 | def rayleigh(alpha, w=None, d=None, axis=None): 20 | """ 21 | Computes Rayleigh test for non-uniformity of circular data. 22 | 23 | H0: the population is uniformly distributed around the circle 24 | HA: the populatoin is not distributed uniformly around the circle 25 | 26 | Assumption: the distribution has maximally one mode and the data is 27 | sampled from a von Mises distribution! 28 | 29 | :param alpha: sample of angles in radian 30 | :param w: number of incidences in case of binned angle data 31 | :param d: spacing of bin centers for binned data, if supplied 32 | correction factor is used to correct for bias in 33 | estimation of r 34 | :param axis: compute along this dimension, default is None 35 | if axis=None, array is raveled 36 | :return pval: two-tailed p-value 37 | :return z: value of the z-statistic 38 | 39 | References: [Fisher1995]_, [Jammalamadaka2001]_, [Zar2009]_ 40 | """ 41 | # if axis is None: 42 | # axis = 0 43 | # alpha = alpha.ravel() 44 | 45 | if w is None: 46 | w = np.ones_like(alpha) 47 | 48 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 49 | 50 | r = descriptive.resultant_vector_length(alpha, w=w, d=d, axis=axis) 51 | n = np.sum(w, axis=axis) 52 | 53 | # compute Rayleigh's R (equ. 27.1) 54 | R = n * r 55 | 56 | # compute Rayleigh's z (equ. 27.2) 57 | z = R ** 2 / n 58 | 59 | # compute p value using approxation in Zar, p. 617 60 | pval = np.exp(np.sqrt(1 + 4 * n + 4 * (n ** 2 - R ** 2)) - (1 + 2 * n)) 61 | 62 | return pval, z 63 | 64 | 65 | @swap2zeroaxis(['alpha', 'w'], [0, 1]) 66 | def omnibus(alpha, w=None, sz=np.radians(1), axis=None): 67 | """ 68 | Computes omnibus test for non-uniformity of circular data. The test is also 69 | known as Hodges-Ajne test. 70 | 71 | H0: the population is uniformly distributed around the circle 72 | HA: the populatoin is not distributed uniformly around the circle 73 | 74 | Alternative to the Rayleigh and Rao's test. Works well for unimodal, 75 | bimodal or multimodal data. If requirements of the Rayleigh test are 76 | met, the latter is more powerful. 77 | 78 | :param alpha: sample of angles in radian 79 | :param w: number of incidences in case of binned angle data 80 | :param sz: step size for evaluating distribution, default 1 deg 81 | :param axis: compute along this dimension, default is None 82 | if axis=None, array is raveled 83 | :return pval: two-tailed p-value 84 | :return m: minimum number of samples falling in one half of the circle 85 | 86 | References: [Fisher1995]_, [Jammalamadaka2001]_, [Zar2009]_ 87 | """ 88 | 89 | if w is None: 90 | w = np.ones_like(alpha) 91 | 92 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 93 | 94 | alpha = alpha % (2 * np.pi) 95 | n = np.sum(w, axis=axis) 96 | 97 | dg = np.arange(0, np.pi, np.radians(1)) 98 | 99 | m1 = np.zeros((len(dg),) + alpha.shape[1:]) 100 | m2 = np.zeros((len(dg),) + alpha.shape[1:]) 101 | 102 | for i, dg_val in enumerate(dg): 103 | m1[i, ...] = np.sum( 104 | w * ((alpha > dg_val) & (alpha < np.pi + dg_val)), axis=axis) 105 | m2[i, ...] = n - m1[i, ...] 106 | 107 | m = np.concatenate((m1, m2), axis=0).min(axis=axis) 108 | 109 | n = np.atleast_1d(n) 110 | m = np.atleast_1d(m) 111 | A = np.empty_like(n) 112 | pval = np.empty_like(n) 113 | idx50 = (n > 50) 114 | 115 | if np.any(idx50): 116 | A[idx50] = np.pi * np.sqrt(n[idx50]) / 2 / (n[idx50] - 2 * m[idx50]) 117 | pval[idx50] = np.sqrt(2 * np.pi) / A[idx50] * \ 118 | np.exp(-np.pi ** 2 / 8 / A[idx50] ** 2) 119 | 120 | if np.any(~idx50): 121 | pval[~idx50] = 2 ** (1 - n[~idx50]) * (n[~idx50] - \ 122 | 2 * m[~idx50]) * special.comb(n[~idx50], m[~idx50]) 123 | 124 | return pval.squeeze(), m 125 | 126 | 127 | @swap2zeroaxis(['alpha'], [0, 1, 2]) 128 | def raospacing(alpha, axis=None): 129 | """ 130 | Calculates Rao's spacing test by comparing distances between points on 131 | a circle to those expected from a uniform distribution. 132 | 133 | H0: Data is distributed uniformly around the circle. 134 | H1: Data is not uniformly distributed around the circle. 135 | 136 | Alternative to the Rayleigh test and the Omnibus test. Does not assume 137 | a unimodal distribution as alternative. Less powerful than the Rayleigh 138 | test when the distribution is unimodal on a global scale but uniform 139 | locally. 140 | 141 | Due to the complexity of the distribution of the test statistic, we 142 | resort to the tables published by Russel and Levitin (references below). 143 | 144 | Therefore the reported p-value is the smallest alpha level at which the 145 | test would still be significant. If the test is not significant at the 146 | alpha=0.1 level, we return the critical value for alpha = 0.05 and p = 147 | 0.5. 148 | 149 | :param alpha: sample of angles in radian 150 | :param axis: compute along this dimension, default is 0 151 | if axis=None, array is raveled 152 | :return pval: smallest p-value at which test is significant 153 | :return U: test statistic 154 | :return Uc: critical value at the p-value returned 155 | 156 | References: [Batschelet1981]_, [RusselLevitin1995]_ 157 | """ 158 | 159 | alpha = np.degrees(alpha) 160 | alpha = np.sort(alpha, axis=axis) 161 | 162 | n = alpha.shape[axis] 163 | assert n >= 4, 'Rao spacing test requires at least 4 samples' 164 | 165 | # compute test statistic along 0 dimension (swap2zeroaxis) 166 | U = 0. 167 | kappa = 360 / n 168 | for j in range(0, n - 1): 169 | ti = alpha[j + 1, ...] - alpha[j, ...] 170 | U = U + np.abs(ti - kappa) 171 | 172 | tn = 360 - alpha[-1, ...] + alpha[0, ...] 173 | U = U + abs(tn - kappa) 174 | 175 | U = .5 * U 176 | 177 | # get critical value from table 178 | pval, Uc = _critical_value_raospacing(n, U) 179 | 180 | return pval, U, Uc 181 | 182 | 183 | def _critical_value_raospacing(n, U): 184 | # Table II from Russel and Levitin, 1995 185 | 186 | alpha_level = np.array([0.001, .01, .05, .10]) 187 | 188 | table = np.array([ 189 | 4, 247.32, 221.14, 186.45, 168.02, 190 | 5, 245.19, 211.93, 183.44, 168.66, 191 | 6, 236.81, 206.79, 180.65, 166.30, 192 | 7, 229.46, 202.55, 177.83, 165.05, 193 | 8, 224.41, 198.46, 175.68, 163.56, 194 | 9, 219.52, 195.27, 173.68, 162.36, 195 | 10, 215.44, 192.37, 171.98, 161.23, 196 | 11, 211.87, 189.88, 170.45, 160.24, 197 | 12, 208.69, 187.66, 169.09, 159.33, 198 | 13, 205.87, 185.68, 167.87, 158.50, 199 | 14, 203.33, 183.90, 166.76, 157.75, 200 | 15, 201.04, 182.28, 165.75, 157.06, 201 | 16, 198.96, 180.81, 164.83, 156.43, 202 | 17, 197.05, 179.46, 163.98, 155.84, 203 | 18, 195.29, 178.22, 163.20, 155.29, 204 | 19, 193.67, 177.08, 162.47, 154.78, 205 | 20, 192.17, 176.01, 161.79, 154.31, 206 | 21, 190.78, 175.02, 161.16, 153.86, 207 | 22, 189.47, 174.10, 160.56, 153.44, 208 | 23, 188.25, 173.23, 160.01, 153.05, 209 | 24, 187.11, 172.41, 159.48, 152.68, 210 | 25, 186.03, 171.64, 158.99, 152.32, 211 | 26, 185.01, 170.92, 158.52, 151.99, 212 | 27, 184.05, 170.23, 158.07, 151.67, 213 | 28, 183.14, 169.58, 157.65, 151.37, 214 | 29, 182.28, 168.96, 157.25, 151.08, 215 | 30, 181.45, 168.38, 156.87, 150.80, 216 | 35, 177.88, 165.81, 155.19, 149.59, 217 | 40, 174.99, 163.73, 153.82, 148.60, 218 | 45, 172.58, 162.00, 152.68, 147.76, 219 | 50, 170.54, 160.53, 151.70, 147.05, 220 | 75, 163.60, 155.49, 148.34, 144.56, 221 | 100, 159.45, 152.46, 146.29, 143.03, 222 | 150, 154.51, 148.84, 143.83, 141.18, 223 | 200, 151.56, 146.67, 142.35, 140.06, 224 | 300, 148.06, 144.09, 140.57, 138.71, 225 | 400, 145.96, 142.54, 139.50, 137.89, 226 | 500, 144.54, 141.48, 138.77, 137.33, 227 | 600, 143.48, 140.70, 138.23, 136.91, 228 | 700, 142.66, 140.09, 137.80, 136.59, 229 | 800, 142.00, 139.60, 137.46, 136.33, 230 | 900, 141.45, 139.19, 137.18, 136.11, 231 | 1000, 140.99, 138.84, 136.94, 135.92]) 232 | table = table.reshape((-1, 5)) 233 | 234 | if not hasattr(U, 'shape'): 235 | U = np.array(U) 236 | 237 | old_shape = U.shape 238 | U = U.ravel() 239 | Uc, p = 0 * U, 0 * U 240 | 241 | for i, loop_u in enumerate(U): 242 | ridx = (table[:, 0] >= n).argmax() 243 | cidx = (table[ridx, 1:] < loop_u).argmax() 244 | 245 | if (cidx > 0) | ((cidx == 0) & (table[ridx, cidx + 1] < loop_u)): 246 | Uc[i] = table[ridx, cidx + 1] 247 | p[i] = alpha_level[cidx] 248 | else: 249 | Uc[i] = table[ridx, -1] 250 | p[i] = .5 251 | 252 | return p.reshape(old_shape), Uc.reshape(old_shape) 253 | 254 | 255 | @swap2zeroaxis(['alpha', 'w'], [0, 1]) 256 | def vtest(alpha, mu, w=None, d=None, axis=None): 257 | """ 258 | Computes V test for nonuniformity of circular data with a known mean 259 | direction of dir. 260 | 261 | H0: the population is uniformly distributed around the circle 262 | HA: the populatoin is not distributed uniformly around the circle but 263 | has a mean of mu 264 | 265 | Note: Not rejecting H0 may mean that the population is uniformly 266 | distributed around the circle OR that it has a mode but that this mode 267 | is not centered at dir. 268 | 269 | The V test has more power than the Rayleigh test and is preferred if 270 | there is reason to believe (before seeing the data!) in a specific 271 | mean direction. 272 | 273 | 274 | :param alpha: sample of angles in radian 275 | :param mu: suspected mean direction 276 | :param w: number of incidences in case of binned angle data 277 | :param d: spacing of bin centers for binned data, if supplied 278 | correction factor is used to correct for bias in 279 | estimation of r 280 | :param axis: compute along this dimension, default is None 281 | if axis=None, array is raveled 282 | :return pval: two-tailed p-value 283 | :return v: value of the v-statistic 284 | 285 | References: [Zar2009]_ 286 | """ 287 | 288 | if w is None: 289 | w = np.ones_like(alpha) 290 | assert w.shape == alpha.shape, "Dimensions of alpha and w must match" 291 | 292 | r = descriptive.resultant_vector_length(alpha, w=w, d=d, axis=axis) 293 | m = descriptive.mean(alpha, w=w, d=d, axis=axis) 294 | n = np.sum(w, axis=axis) 295 | 296 | # compute Rayleigh's R (equ. 27.1) 297 | R = n * r 298 | 299 | # compute V and u (equ. 27.5) 300 | V = R * np.cos(m - mu) 301 | u = V * np.sqrt(2 / n) 302 | # compute p value using approxation in Zar, p. 617 303 | pval = 1 - stats.norm.cdf(u) 304 | 305 | return pval, V 306 | 307 | 308 | @swap2zeroaxis(['alpha'], [0, 1]) 309 | def symtest(alpha, axis=None): 310 | """ 311 | Non-parametric test for symmetry around the median. Works by performing a 312 | Wilcoxon sign rank test on the differences to the median. 313 | 314 | H0: the population is symmetrical around the median 315 | HA: the population is not symmetrical around the median 316 | 317 | 318 | :param alpha: sample of angles in radian 319 | :param axis: compute along this dimension, default is None 320 | if axis=None, array is raveled 321 | :return pval: two-tailed p-value 322 | :return T: test statistics of underlying wilcoxon test 323 | 324 | 325 | References: [Zar2009]_ 326 | """ 327 | 328 | m = descriptive.median(alpha, axis=axis) 329 | 330 | d = np.angle(np.exp(1j * m[np.newaxis]) / np.exp(1j * alpha)) 331 | 332 | if axis is not None: 333 | oshape = d.shape[1:] 334 | d2 = d.reshape((d.shape[0], int(np.prod(d.shape[1:])))) 335 | T, pval = map(lambda x: np.asarray(x).reshape( 336 | oshape), zip(*[stats.wilcoxon(dd) for dd in d2.T])) 337 | else: 338 | T, pval = stats.wilcoxon(d) 339 | 340 | return pval, T 341 | 342 | 343 | @nottest 344 | def watson_williams(*args, **kwargs): 345 | """ 346 | Parametric Watson-Williams multi-sample test for equal means. Can be 347 | used as a one-way ANOVA test for circular data. 348 | 349 | H0: the s populations have equal means 350 | HA: the s populations have unequal means 351 | 352 | Note: 353 | Use with binned data is only advisable if binning is finer than 10 deg. 354 | In this case, alpha is assumed to correspond 355 | to bin centers. 356 | 357 | The Watson-Williams two-sample test assumes underlying von-Mises 358 | distributrions. All groups are assumed to have a common concentration 359 | parameter k. 360 | 361 | :param args: number of arrays containing the data; angles in radians 362 | :param w: list the same size as the number of args containing the number of 363 | incidences for each arg. Must be passed as keyword argument. 364 | :param axis: the test will be performed along this axis. Must be passed as keyword 365 | argument. 366 | 367 | :return pval, table: p-value and pandas dataframe containing the ANOVA table 368 | 369 | """ 370 | 371 | axis = kwargs.get('axis', None) 372 | w = kwargs.get('w', None) 373 | 374 | # argument checking 375 | if w is not None: 376 | assert len(w) == len( 377 | args), "w must have the same length as number of arrays" 378 | for i, (ww, alpha) in enumerate(zip(w, args)): 379 | assert ww.shape == alpha.shape, "w[%i] and argument %i must have same shape" % ( 380 | i, i) 381 | else: 382 | w = [np.ones_like(a) for a in args] 383 | 384 | if axis is None: 385 | alpha = list(map(np.ravel, args)) 386 | w = list(map(np.ravel, w)) 387 | else: 388 | alpha = args 389 | 390 | k = len(args) 391 | 392 | # np.asarray(list()) 393 | ni = list(map(lambda x: np.sum(x, axis=axis), w)) 394 | ri = np.asarray([descriptive.resultant_vector_length( 395 | a, ww, axis=axis) for a, ww in zip(alpha, w)]) 396 | 397 | r = descriptive.resultant_vector_length( 398 | np.concatenate( 399 | alpha, axis=axis), np.concatenate( 400 | w, axis=axis), axis=axis) 401 | # this must not be the numpy sum since the arrays are to be summed 402 | n = sum(ni) 403 | 404 | rw = sum([rii * nii / n for rii, nii in zip(ri, ni)]) 405 | kk = kappa(rw[None, ...], axis=0) 406 | 407 | beta = 1 + 3. / (8 * kk) 408 | A = sum([rii * nii for rii, nii in zip(ri, ni)]) - r * n 409 | B = n - sum([rii * nii for rii, nii in zip(ri, ni)]) 410 | 411 | F = (beta * (n - k) * A / (k - 1) / B).squeeze() 412 | pval = stats.f.sf(F, k - 1, n - k).squeeze() 413 | 414 | if np.any((n >= 11) & (rw < .45)): 415 | warnings.warn( 416 | 'Test not applicable. Average resultant vector length < 0.45.') 417 | elif np.any((n < 11) & (n >= 7) & (rw < .5)): 418 | warnings.warn( 419 | 'Test not applicable. Average number of samples per population 6 < x < 11 ' 420 | 'and average resultant vector length < 0.5.') 421 | elif np.any((n >= 5) & (n < 7) & (rw < .55)): 422 | warnings.warn( 423 | 'Test not applicable. Average number of samples per population 4 < x < 7 and ' 424 | 'average resultant vector length < 0.55.') 425 | elif np.any(n < 5): 426 | warnings.warn( 427 | 'Test not applicable. Average number of samples per population < 5.') 428 | 429 | if np.prod(pval.shape) > 1: 430 | T = np.zeros_like(pval, dtype=object) 431 | for idx, p in np.ndenumerate(pval): 432 | T[idx] = pd.DataFrame({'Source': ['Columns', 'Residual', 'Total'], 433 | 'df': [k - 1, n[idx] - k, n[idx] - 1], 434 | 'SS': [A[idx], B[idx], A[idx] + B[idx]], 435 | 'MS': [A[idx] / (k - 1), B[idx] / (n[idx] - k), np.NaN], 436 | 'F': [F[idx], np.NaN, np.NaN], 437 | 'p-value': [p, np.NaN, np.NaN]}).set_index('Source') 438 | 439 | else: 440 | T = pd.DataFrame({'Source': ['Columns', 'Residual', 'Total'], 441 | 'df': [k - 1, n - k, n - 1], 442 | 'SS': [A, B, A + B], 443 | 'MS': [A / (k - 1), B / (n - k), np.NaN], 444 | 'F': [F, np.NaN, np.NaN], 445 | 'p-value': [pval, np.NaN, np.NaN]}).set_index('Source') 446 | 447 | return pval, T 448 | 449 | 450 | @swap2zeroaxis(['alpha1', 'alpha2'], [0, 1]) 451 | def kuiper(alpha1, alpha2, res=100, axis=None): 452 | """ 453 | The Kuiper two-sample test tests whether the two samples differ 454 | significantly.The difference can be in any property, such as mean 455 | location and dispersion. It is a circular analogue of the 456 | Kolmogorov-Smirnov test. 457 | 458 | H0: The two distributions are identical. 459 | HA: The two distributions are different. 460 | 461 | :param alpha1: fist sample (in radians) 462 | :param alpha2: second sample (in radians) 463 | :param res: resolution at which the cdf is evaluated (default 100) 464 | :returns: p-value and test statistic 465 | p-value is the smallest of .10, .05, .02, .01, .005, .002, 466 | .001, for which the test statistic is still higher 467 | than the respective critical value. this is due to 468 | the use of tabulated values. if p>.1, pval is set to 1. 469 | 470 | References: [Batschelet1980]_ p. 112 471 | 472 | """ 473 | 474 | if axis is not None: 475 | assert alpha1.shape[ 476 | 1:] == alpha2.shape[ 477 | 1:], "Shapes of alphas not consistent with computation along axis." 478 | n, m = alpha1.shape[axis], alpha2.shape[axis] 479 | 480 | _, cdf1 = _sample_cdf(alpha1, res, axis=axis) 481 | _, cdf2 = _sample_cdf(alpha2, res, axis=axis) 482 | 483 | dplus = np.atleast_1d((cdf1 - cdf2).max(axis=axis)) 484 | dplus[dplus < 0] = 0. 485 | dminus = np.atleast_1d((cdf2 - cdf1).max(axis=axis)) 486 | dminus[dminus < 0] = 0. 487 | 488 | k = n * m * (dplus + dminus) 489 | mi = np.min([m, n]) 490 | fac = np.sqrt(n * m * (n + m)) 491 | pval = np.asarray([_kuiper_lookup(mi, kk / fac) 492 | for kk in k.ravel()]).reshape(k.shape) 493 | return pval, k 494 | 495 | 496 | def _kuiper_lookup(n, k): 497 | ktable = load_kuiper_table() 498 | 499 | alpha = np.asarray([.10, .05, .02, .01, .005, .002, .001]) 500 | nn = ktable[:, 0] 501 | 502 | isin = (nn == n) 503 | if np.any(isin): 504 | row = np.where(isin)[0] 505 | else: 506 | row = len(nn) - np.sum(n < nn) - 1 507 | 508 | if row == 0: 509 | raise ValueError('N too small.') 510 | else: 511 | warnings.warn( 512 | 'N=%d not found in table, using closest N=%d present.' % 513 | (n, nn[row])) 514 | 515 | idx = (ktable[row, 1:] < k).squeeze() 516 | if np.any(idx): 517 | return alpha[idx].min() 518 | else: 519 | return 1. 520 | 521 | 522 | @swap2zeroaxis(['alpha'], [1]) 523 | def _sample_cdf(alpha, resolution=100., axis=None): 524 | """ 525 | 526 | Helper function for circ_kuipertest. 527 | Evaluates CDF of sample in thetas. 528 | 529 | :param alpha: sample (in radians) 530 | :param resolution: resolution at which the cdf is evaluated (default 100) 531 | :param axis: axis along which the cdf is computed 532 | :returns: points at which cdf is evaluated, cdf values 533 | 534 | """ 535 | 536 | if axis is None: 537 | alpha = alpha.ravel() 538 | axis = 0 539 | bins = np.linspace(0, 2 * np.pi, resolution + 1) 540 | old_shape = alpha.shape 541 | alpha = alpha % (2 * np.pi) 542 | 543 | alpha = alpha.reshape((alpha.shape[0], int(np.prod(alpha.shape[1:])))).T 544 | cdf = np.array([np.histogram(a, bins=bins)[0] 545 | for a in alpha]).cumsum(axis=1) / float(alpha.shape[1]) 546 | cdf = cdf.T.reshape((len(bins) - 1,) + old_shape[1:]) 547 | 548 | return bins[:-1], cdf 549 | 550 | 551 | @nottest 552 | def cmtest(*args, **kwargs): 553 | """ 554 | Non parametric multi-sample test for equal medians. Similar to a 555 | Kruskal-Wallis test for linear data. 556 | 557 | H0: the s populations have equal medians 558 | HA: the s populations have unequal medians 559 | 560 | :param alpha1: angles in radians 561 | :param alpha2: angles in radians 562 | :returns: p-value and test statistic of the common median test 563 | 564 | 565 | References: [Fisher1995]_ 566 | 567 | """ 568 | axis = kwargs.get('axis', None) 569 | if axis is None: 570 | alpha = list(map(np.ravel, args)) 571 | else: 572 | alpha = args 573 | 574 | s = len(alpha) 575 | n = [(0 * a + 1).sum(axis=axis) for a in alpha] 576 | N = sum(n) 577 | 578 | med = descriptive.median(np.concatenate(alpha, axis=axis), axis=axis) 579 | if axis is not None: 580 | med = np.expand_dims(med, axis=axis) 581 | 582 | m = [np.sum(descriptive.cdiff(a, med) < 0, axis=axis) for a in alpha] 583 | if np.any([nn < 10 for nn in n]): 584 | warnings.warn('Test not applicable. Sample size in at least one group to small.') 585 | M = sum(m) 586 | P = (N ** 2. / (M * (N - M))) * sum([mm ** 2. / nn for mm, nn in zip(m, n)]) - N * M / (N - M) 587 | pval = stats.chi2.sf(P, df=s - 1) 588 | return pval, P 589 | 590 | 591 | @nottest 592 | def mtest(alpha, dir, xi=0.05, w=None, d=None, axis=None): 593 | """ 594 | One-Sample test for the mean angle. 595 | 596 | H0: the population has mean dir. 597 | HA: the population has not mean dir. 598 | 599 | Note: This is the equvivalent to a one-sample t-test with specified 600 | mean direction. 601 | 602 | :param alpha: sample of angles in radians 603 | :param dir: assumed mean direction 604 | :param w: number of incidences in case of binned angle data 605 | :param d: spacing of bin centers for binned data, if supplied 606 | correction factor is used to correct for bias in 607 | estimation of r, in radians (!) 608 | :param axis: test is computed along this axis 609 | :returns: 0 if H0 can not be rejected, 1 otherwise, mean, confidence interval 610 | 611 | References: [Zar2009]_ 612 | """ 613 | 614 | if w is None: 615 | w = np.ones_like(alpha, dtype=float) 616 | else: 617 | assert alpha.shape == w.shape, "Shape of w and alpha must match" 618 | 619 | dir = np.atleast_1d(dir) 620 | 621 | mu, ci = descriptive.mean(alpha, w=w, d=d, axis=axis, ci=1. - xi) 622 | t = np.abs(descriptive.cdiff(mu, ci.lower)) 623 | h = np.abs(descriptive.cdiff(mu, dir)) > t 624 | 625 | return h, mu, ci 626 | 627 | 628 | @nottest 629 | def medtest(alpha, md, axis=None): 630 | """ 631 | Tests for difference in the median against a fixed value. 632 | 633 | H0: the population has median angle md 634 | HA: the population has not median angle md 635 | 636 | :param alpha: sample of angles in radians 637 | :param md: median to test for 638 | :param axis: test is performed along this axis 639 | :returns: p-value 640 | """ 641 | 642 | md = np.atleast_1d(md) 643 | 644 | n = alpha.shape[axis] if axis is not None else len(alpha) 645 | 646 | d = descriptive.cdiff(alpha, md) 647 | 648 | n1 = np.atleast_1d(np.sum(d < 0, axis=axis)) 649 | n2 = np.atleast_1d(np.sum(d > 0, axis=axis)) 650 | 651 | # compute p-value with binomial test 652 | n_min = np.array(n1) 653 | n_min[n1 > n2] = n2[n1 > n2] 654 | 655 | n_max = np.array(n1) 656 | n_max[n1 < n2] = n2[n1 < n2] 657 | # TODO: this formula can actually give more than 1, e.g. if n_max == n_min; possibly change that 658 | return stats.binom.cdf(n_min, n, 0.5) + 1 - stats.binom.cdf(n_max - 1, n, 0.5) 659 | 660 | 661 | @nottest 662 | def hktest(alpha, idp, idq, inter=True, fn=None): 663 | if fn is None: 664 | fn = ['A', 'B'] 665 | p = len(np.unique(idp)) 666 | q = len(np.unique(idq)) 667 | df = pd.DataFrame({fn[0]: idp, fn[1]: idq, 'dependent': alpha}) 668 | n = len(df) 669 | tr = n * descriptive.resultant_vector_length(df['dependent']) 670 | kk = kappa(tr / n) 671 | 672 | # both factors 673 | gr = df.groupby(fn) 674 | cn = gr.count() 675 | cr = gr.agg(descriptive.resultant_vector_length) * cn 676 | cn = cn.unstack(fn[1]) 677 | cr = cr.unstack(fn[1]) 678 | 679 | # factor A 680 | gr = df.groupby(fn[0]) 681 | pn = gr.count()['dependent'] 682 | pr = gr.agg(descriptive.resultant_vector_length)['dependent'] * pn 683 | pm = gr.agg(descriptive.mean)['dependent'] 684 | # factor B 685 | gr = df.groupby(fn[1]) 686 | qn = gr.count()['dependent'] 687 | qr = gr.agg(descriptive.resultant_vector_length)['dependent'] * qn 688 | qm = gr.agg(descriptive.mean)['dependent'] 689 | 690 | if kk > 2: # large kappa 691 | # effect of factor 1 692 | eff_1 = sum(pr ** 2 / cn.sum(axis=1)) - tr ** 2 / n 693 | df_1 = p - 1 694 | ms_1 = eff_1 / df_1 695 | 696 | # effect of factor 2 697 | eff_2 = sum(qr ** 2. / cn.sum(axis=0)) - tr ** 2 / n 698 | df_2 = q - 1 699 | ms_2 = eff_2 / df_2 700 | 701 | # total effect 702 | eff_t = n - tr ** 2 / n 703 | df_t = n - 1 704 | m = cn.values.mean() 705 | 706 | if inter: 707 | # correction factor for improved F statistic 708 | beta = 1 / (1 - 1 / (5 * kk) - 1 / (10 * (kk ** 2))) 709 | # residual effects 710 | eff_r = n - (cr**2./cn).values.sum() 711 | df_r = p*q*(m-1) 712 | ms_r = eff_r / df_r 713 | 714 | # interaction effects 715 | eff_i = (cr**2./cn).values.sum() - sum(qr**2./qn) - sum(pr**2./pn) + tr**2/n 716 | df_i = (p-1)*(q-1) 717 | ms_i = eff_i/df_i; 718 | 719 | # interaction test statistic 720 | FI = ms_i / ms_r 721 | pI = 1 - stats.f.cdf(FI,df_i,df_r) 722 | else: 723 | # residual effect 724 | eff_r = n - sum(qr**2./qn)- sum(pr**2./pn) + tr**2/n 725 | df_r = (p-1)*(q-1) 726 | ms_r = eff_r / df_r 727 | 728 | # interaction effects 729 | eff_i = None 730 | df_i = None 731 | ms_i = None 732 | 733 | # interaction test statistic 734 | FI = None 735 | pI = np.NaN 736 | beta = 1 737 | 738 | 739 | F1 = beta * ms_1 / ms_r 740 | p1 = 1 - stats.f.cdf(F1,df_1,df_r) 741 | 742 | F2 = beta * ms_2 / ms_r 743 | p2 = 1 - stats.f.cdf(F2,df_2,df_r) 744 | 745 | else: #small kappa 746 | # correction factor 747 | # special.iv is Modified Bessel function of the first kind of real order 748 | rr = special.iv(1,kk) / special.iv(0,kk) 749 | f = 2/(1-rr**2) 750 | 751 | chi1 = f * (sum(pr**2./pn)- tr**2/n) 752 | df_1 = 2*(p-1) 753 | p1 = 1 - stats.chi2.cdf(chi1, df=df_1) 754 | 755 | chi2 = f * (sum(qr**2./qn)- tr**2/n) 756 | df_2 = 2*(q-1) 757 | p2 = 1 - stats.chi2.cdf(chi2, df=df_2) 758 | 759 | chiI = f * ( (cr**2./cn).values.sum() - sum(pr**2./pn) - sum(qr**2./qn) + tr**2/n) 760 | df_i = (p-1) * (q-1) 761 | pI = stats.chi2.sf(chiI, df=df_i) 762 | 763 | 764 | 765 | pval = (p1.squeeze(), p2.squeeze(), pI.squeeze()) 766 | 767 | if kk>2: 768 | table = pd.DataFrame({ 769 | 'Source': fn + ['Interaction', 'Residual', 'Total'], 770 | 'DoF': [df_1, df_2, df_i, df_r, df_t], 771 | 'SS': [eff_1, eff_2, eff_i, eff_r, eff_t], 772 | 'MS': [ms_1, ms_2, ms_i, ms_r, np.NaN], 773 | 'F': [F1.squeeze(), F2.squeeze(), FI, np.NaN, np.NaN], 774 | 'p': list(pval) + [np.NaN, np.NaN] 775 | }) 776 | table = table.set_index('Source') 777 | else: 778 | table = pd.DataFrame({ 779 | 'Source': fn + ['Interaction'], 780 | 'DoF': [df_1, df_2, df_i], 781 | 'chi2': [chi1.squeeze(), chi2.squeeze(), chiI.squeeze()], 782 | 'p': pval 783 | }) 784 | table = table.set_index('Source') 785 | 786 | return pval, table 787 | -------------------------------------------------------------------------------- /pycircstat/utils.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/circstat/pycircstat/dae8ecad8d594c402ea72fcc03dc3dd9172a66cf/pycircstat/utils.py -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name="pycircstat", 5 | version="0.0.2", 6 | author="Fabian Sinz, Philipp Berens, Matthias Kuemmerer, Tom Wallis", 7 | author_email="philipp.berens@uni-tuebingen.de", 8 | description=("Toolbox for circular statistics with Python"), 9 | # license="MIT", 10 | keywords="statistics", 11 | # url="http://packages.python.org/pycircstat", 12 | packages=['pycircstat', 'tests'], 13 | data_files=[('data', ['data/kuiper_table.npy'])], 14 | # long_description=read('README'), 15 | classifiers=[ 16 | "Development Status :: 3 - Alpha", 17 | "Topic :: Utilities", 18 | # "License :: OSI Approved :: MIT License", 19 | ], 20 | install_requires=['nose>=1.0', 'mock', 'sphinx_rtd_theme', "decorator", "pandas", "scipy"], 21 | ) 22 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/circstat/pycircstat/dae8ecad8d594c402ea72fcc03dc3dd9172a66cf/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_clustering.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | 5 | from numpy.testing import assert_allclose 6 | from nose.tools import assert_equal, assert_true, assert_raises, raises 7 | 8 | import pycircstat 9 | 10 | 11 | def test_clustering(): 12 | x = np.asarray([6.10599459, 0.14824723, 3.11272268, 3.45703846, 5.88211171, 3.53760218, 13 | 4.00392159, 2.76326071, 4.52222361, 4.05978276]) 14 | # x = np.asarray([6.10599459, 0.14824723, 3.11272268, 3.45703846, 5.88211171]) 15 | 16 | cl = pycircstat.clustering.AggCluster1D(numclust=4) 17 | _, ids = cl.train(x) 18 | 19 | assert_allclose(sorted(cl.centroids), sorted([6.139414042024, 2.937991695000, 3.497320320000, 4.193866918144]), 20 | atol=1e-4, rtol=1e-4) 21 | _, testids = cl.test(x) 22 | assert_allclose(ids, testids) 23 | if __name__ == "__main__": 24 | test_clustering() -------------------------------------------------------------------------------- /tests/test_decorators.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | 5 | from numpy.testing import assert_allclose 6 | from nose.tools import assert_equal, assert_true 7 | 8 | from pycircstat.decorators import swap2zeroaxis 9 | 10 | 11 | def test_swap2zeroaxis(): 12 | 13 | @swap2zeroaxis(['x', 'y'], [0, 1]) 14 | def dummy(x, y, z, axis=None): 15 | return np.mean(x[::2, ...], axis=0), np.mean(y[::2, ...], axis=0), z 16 | 17 | x = np.random.randn(3, 5, 7, 9) 18 | y = np.random.randn(3, 5, 7, 9) 19 | z = np.random.randn(3, 5, 7, 9) 20 | 21 | xx, yy, zz = dummy(x, y, z, axis=1) 22 | assert_allclose(xx, np.mean(x[:, ::2, ...], axis=1)) 23 | assert_allclose(yy, np.mean(y[:, ::2, ...], axis=1)) 24 | assert_allclose(zz, z) 25 | 26 | xx, yy, zz = dummy(x, y, z, 2) 27 | assert_allclose(xx, np.mean(x[:, :, ::2, ...], axis=2)) 28 | assert_allclose(yy, np.mean(y[:, :, ::2, ...], axis=2)) 29 | assert_allclose(zz, z) 30 | 31 | xx, yy, zz = dummy(x, y, z, axis=None) 32 | assert_allclose(xx, np.mean(x.ravel()[::2])) 33 | assert_allclose(yy, np.mean(y.ravel()[::2])) 34 | assert_allclose(zz, z) 35 | 36 | xx, yy, zz = dummy(x, y, z) 37 | assert_allclose(xx, np.mean(x.ravel()[::2])) 38 | assert_allclose(yy, np.mean(y.ravel()[::2])) 39 | assert_allclose(zz, z) 40 | 41 | @swap2zeroaxis(['x'], [0]) 42 | def dummy(x, axis=None): 43 | return np.mean(x[::2, ...], axis=0) 44 | 45 | xx = dummy(x, axis=0) 46 | assert_allclose(xx, np.mean(x[::2, ...], axis=0)) 47 | 48 | 49 | if __name__ == "__main__": 50 | test_swap2zeroaxis() 51 | -------------------------------------------------------------------------------- /tests/test_descriptive.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | 5 | from numpy.testing import assert_allclose 6 | from nose.tools import assert_equal, assert_true, assert_raises, raises 7 | 8 | import pycircstat 9 | 10 | axis_1arg_test_funcs = [pycircstat.astd, 11 | pycircstat.avar, 12 | pycircstat.mean, 13 | pycircstat.median, 14 | pycircstat.resultant_vector_length, 15 | pycircstat.std, 16 | pycircstat.var, 17 | pycircstat.skewness, 18 | pycircstat.kurtosis, 19 | pycircstat.moment] 20 | 21 | axis_2arg_test_funcs = [pycircstat.corrcc, 22 | pycircstat.corrcl] 23 | 24 | 25 | test_data_2d = np.array([ 26 | [0.58429, 0.88333], 27 | [1.14892, 2.22854], 28 | [2.87128, 3.06369], 29 | [1.07677, 1.49836], 30 | [2.96969, 1.51748], 31 | ]) 32 | 33 | test_data_1d = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 34 | 1.54256458, 5.19290675, 2.18474784, 35 | 4.77054777, 1.51736933, 0.72727580]) 36 | 37 | 38 | def test_axis_1arg(): 39 | data = np.random.rand(2, 3, 5) * np.pi 40 | for f in axis_1arg_test_funcs: 41 | for a in [None, 0, 1, 2]: 42 | ret = f(data, axis=a) 43 | 44 | if a is None: 45 | assert_true(isinstance(ret, np.ndarray) or np.isscalar(ret)) 46 | else: 47 | assert_equal(ret.shape, data.shape[:a] + data.shape[a + 1:]) 48 | 49 | 50 | def test_axis_2arg(): 51 | data = np.random.rand(2, 3, 5) * np.pi 52 | for f in axis_2arg_test_funcs: 53 | for a in [None, 0, 1, 2]: 54 | ret = f(data, data, axis=a) 55 | if a is None: 56 | assert_true(isinstance(ret, np.ndarray) or np.isscalar(ret)) 57 | else: 58 | assert_equal(ret.shape, data.shape[:a] + data.shape[a + 1:]) 59 | 60 | 61 | @raises(ValueError) 62 | def test_bootstrap(): 63 | """Tests whether wrong scale raises ValueError""" 64 | @pycircstat.descriptive.bootstrap(1, 'wrongscale') 65 | def testfunc(alpha, axis=None, ci=None, bootstrap_iter=100): 66 | return np.array(0) 67 | 68 | testfunc(np.array([0, 1, 2.3, 3]), ci=.8) 69 | 70 | 71 | @raises(ValueError) 72 | def test_bootstrap(): 73 | """Tests whether missing bootstrap_iter raises ValueError""" 74 | @pycircstat.descriptive.bootstrap(1, 'circular') 75 | def testfunc(alpha, axis=None, ci=None): 76 | return np.array(0) 77 | 78 | testfunc(np.array([0, 1, 2.3, 3]), ci=.8) 79 | 80 | 81 | @raises(ValueError) 82 | def test_bootstrap(): 83 | """Tests whether missing axis raises ValueError""" 84 | @pycircstat.descriptive.bootstrap(1, 'circular') 85 | def testfunc(alpha, bootstrap_iter=100, ci=None): 86 | return np.array(0) 87 | 88 | testfunc(np.array([0, 1, 2.3, 3]), ci=.8) 89 | 90 | 91 | def test_var(): 92 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 93 | 1.54256458, 5.19290675, 2.18474784, 94 | 4.77054777, 1.51736933, 0.72727580]) 95 | s = pycircstat.var(data) 96 | assert_allclose(0.65842, s, atol=0.001, rtol=0.001) 97 | 98 | 99 | def test_avar(): 100 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 101 | 1.54256458, 5.19290675, 2.18474784, 102 | 4.77054777, 1.51736933, 0.72727580]) 103 | s = pycircstat.avar(data) 104 | assert_allclose(1.3168, s, atol=0.001, rtol=0.001) 105 | 106 | 107 | def test_std(): 108 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 109 | 1.54256458, 5.19290675, 2.18474784, 110 | 4.77054777, 1.51736933, 0.72727580]) 111 | s = pycircstat.std(data) 112 | assert_allclose(1.4657, s, atol=0.001, rtol=0.001) 113 | 114 | 115 | def test_astd(): 116 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 117 | 1.54256458, 5.19290675, 2.18474784, 118 | 4.77054777, 1.51736933, 0.72727580]) 119 | s = pycircstat.astd(data) 120 | assert_allclose(1.1475, s, atol=0.001, rtol=0.001) 121 | 122 | 123 | def test_percentile(): 124 | alpha = np.linspace(0, 2 * np.pi, int(1. / 0.0001)) 125 | s = np.random.rand() * 2 * np.pi 126 | q = pycircstat.percentile(alpha, 5, q0=s) 127 | #print(q, s+0.05*np.pi*2) 128 | assert_allclose(q, (s + 0.05 * np.pi * 2) % 129 | (2 * np.pi), atol=0.001, rtol=0.001) 130 | 131 | 132 | def test_percentile_2q(): 133 | alpha = np.linspace(0, 2 * np.pi, int(1. / 0.0001)) 134 | s = np.random.rand() * 2 * np.pi 135 | q = pycircstat.percentile(alpha, [5, 10], q0=s) 136 | #print(q, s+np.array([0.05,0.1])*np.pi*2) 137 | assert_allclose(q, (s + np.array([0.05, 0.1]) * np.pi * 2) 138 | % (2 * np.pi), atol=0.001, rtol=0.001) 139 | 140 | 141 | def test_percentile_2d(): 142 | alpha = np.linspace(0, 2 * np.pi, int(1. / 0.0001))[None, :] * np.ones((2, 1)) 143 | s = np.random.rand(2) * 2 * np.pi 144 | q = pycircstat.percentile(alpha, 5, q0=s, axis=1) 145 | #print(q, (s+0.05*np.pi*2) % (2*np.pi)) 146 | assert_allclose(q, (s + 0.05 * np.pi * 2) % 147 | (2 * np.pi), atol=0.001, rtol=0.001) 148 | 149 | 150 | def test_percentile_2d_2q(): 151 | alpha = np.linspace(0, 2 * np.pi, int(1. / 0.0001))[None, :] * np.ones((2, 1)) 152 | s = np.random.rand(2) * 2 * np.pi 153 | q = pycircstat.percentile(alpha, [5, 10], q0=s, axis=1) 154 | #print(q, s[None,:]+np.array([[0.05,0.1]]).T*np.pi*2) 155 | assert_allclose(q, (s[None, :] + np.array([[0.05, 0.1]]).T * np.pi * 2) 156 | % (2 * np.pi), atol=0.001, rtol=0.001) 157 | 158 | 159 | def test_median(): 160 | alpha = np.array([ 161 | [3.73153000, 1.63904879, 4.03175622, 3.90422402, 4.61029613, 162 | 4.04117818, 5.79313473, 5.50863002, 5.81530225, 2.44973903], 163 | [2.12868554, 0.09073566, 0.05581025, 5.10673712, 1.68712454, 164 | 3.72915575, 4.45439608, 4.70694685, 3.58470730, 2.49742028] 165 | ]) 166 | m0 = np.array([2.93010777, 0.86489223, -1.09780942, -1.77770474, 167 | -3.13447497, -2.39801834, -1.15941990, -1.17539688, 168 | -1.58318053, 2.47357966]) % (2 * np.pi) 169 | m1 = np.array([-2.24671810, -1.24910966]) % (2 * np.pi) 170 | m11 = np.array([-2.24200713, -1.82878923]) % (2 * np.pi) 171 | mall = -2.2467 % (2 * np.pi) 172 | assert_allclose(pycircstat.median(alpha, axis=1), m1) 173 | assert_allclose(pycircstat.median(alpha[:, :-1], axis=1), m11) 174 | assert_allclose(pycircstat.median(alpha, axis=0), m0) 175 | assert_allclose(pycircstat.median(alpha), mall, atol=1e-4) 176 | 177 | 178 | def test_median_ci(): 179 | alpha = np.ones((2, 10)) 180 | m1 = np.ones(2) 181 | m0 = np.ones(10) 182 | mout1, ci_1 = pycircstat.median(alpha, axis=1, ci=.8) 183 | mout0, ci_0 = pycircstat.median(alpha, axis=0, ci=.8) 184 | moutall, ci_all = pycircstat.median(alpha, axis=0, ci=.8) 185 | 186 | assert_allclose(mout1, m1) 187 | assert_allclose(mout0, m0) 188 | assert_allclose(moutall, 1.) 189 | assert_allclose(ci_0.lower, m0) 190 | assert_allclose(ci_0.upper, m0) 191 | assert_allclose(ci_1.lower, m1) 192 | assert_allclose(ci_1.upper, m1) 193 | assert_allclose(ci_all.lower, 1.) 194 | assert_allclose(ci_all.upper, 1.) 195 | 196 | 197 | def test_circular_distance(): 198 | a = np.array([4.85065953, 0.79063862, 1.35698570]) 199 | assert_allclose(pycircstat.cdiff(a, a), np.zeros_like(a)) 200 | 201 | 202 | def test_pairwise_circular_distance(): 203 | a = np.array([4.85065953, 0.79063862, 1.35698570]) 204 | b = np.array([5.77091494, 2.02426471]) 205 | ret = np.array([ 206 | [-0.92025541, 2.82639482, ], 207 | [1.30290899, -1.23362610, ], 208 | [1.86925607, -0.66727901, ] 209 | ]) 210 | assert_allclose(pycircstat.pairwise_cdiff(a, b), ret) 211 | 212 | 213 | def test_mean_constant_data(): 214 | data = np.ones(10) 215 | 216 | # We cannot use `assert_equal`, due to numerical rounding errors. 217 | assert_allclose(pycircstat.mean(data), 1.0) 218 | 219 | 220 | def test_mean(): 221 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 222 | 1.54256458, 5.19290675, 2.18474784, 223 | 4.77054777, 1.51736933, 0.72727580]) 224 | 225 | # We cannot use `assert_equal`, due to numerical rounding errors. 226 | assert_allclose(pycircstat.mean(data), 1.35173983) 227 | 228 | 229 | def test_mean_axial(): 230 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 231 | 1.54256458, 5.19290675, 2.18474784, 232 | 4.77054777, 1.51736933, 0.72727580]) 233 | assert_allclose(pycircstat.mean(data, axial_correction=3), 0.95902619) 234 | 235 | 236 | def test_resultant_vector_length(): 237 | data = np.ones(10) 238 | assert_allclose(pycircstat.resultant_vector_length(data), 1.0) 239 | 240 | 241 | def test_resultant_vector_length_axis(): 242 | data = np.ones((10, 2)) 243 | assert_allclose(pycircstat.resultant_vector_length(data, axis=1), 244 | np.ones(10)) 245 | 246 | 247 | def test_mean_ci_limits(): 248 | data = np.array([ 249 | [0.58429, 0.88333], 250 | [1.14892, 2.22854], 251 | [2.87128, 3.06369], 252 | [1.07677, 1.49836], 253 | [2.96969, 1.51748], 254 | ]) 255 | out1 = np.array([0.76976, 0.50149]) 256 | out2 = np.array([0.17081, 0.72910, 0.10911, 0.24385, 0.95426]) 257 | assert_allclose(pycircstat.mean_ci_limits(data, ci=0.8, axis=0), 258 | out1, rtol=1e-4) 259 | assert_allclose(pycircstat.mean_ci_limits(data, ci=0.8, axis=1), 260 | out2, rtol=1e-4) 261 | 262 | 263 | def test_mean_ci_2d_warning(): 264 | data = np.array([ 265 | [0.58429, 0.88333], 266 | [1.14892, 2.22854], 267 | [2.87128, 3.06369], 268 | [1.07677, 1.49836], 269 | [2.96969, 1.51748], 270 | ]) 271 | muplus = np.array([np.NaN, 2.7003]) 272 | muminus = np.array([np.NaN, 0.89931]) 273 | mu = np.array([1.6537, 1.7998]) 274 | 275 | assert_raises(UserWarning, pycircstat.mean, data, ci=0.95, axis=0) 276 | 277 | 278 | def test_mean_ci_2d(): 279 | data = np.array([ 280 | [0.58429, 0.88333], 281 | [1.14892, 2.22854], 282 | [2.87128, 3.06369], 283 | [1.07677, 1.49836], 284 | [2.96969, 1.51748], 285 | ]) 286 | muplus = np.array([np.NaN, 2.7003]) 287 | muminus = np.array([np.NaN, 0.89931]) 288 | mu = np.array([1.6537, 1.7998]) 289 | 290 | try: 291 | mu_tmp, (muminus_tmp, muplus_tmp) = pycircstat.mean( 292 | data, ci=0.95, axis=0) 293 | assert_allclose(muplus, muplus_tmp, rtol=1e-4) 294 | assert_allclose(muminus, muminus_tmp, rtol=1e-4) 295 | assert_allclose(mu, mu_tmp, rtol=1e-4) 296 | except UserWarning: 297 | pass 298 | 299 | 300 | def test_mean_ci_1d(): 301 | data = np.array([0.88333, 2.22854, 3.06369, 1.49836, 1.51748]) 302 | muplus = 2.7003 303 | muminus = 0.89931 304 | mu = 1.7998 305 | 306 | mu_tmp, (muminus_tmp, muplus_tmp) = pycircstat.mean(data, ci=0.95) 307 | assert_allclose(muplus, muplus_tmp, rtol=1e-4) 308 | assert_allclose(muminus, muminus_tmp, rtol=1e-4) 309 | assert_allclose(mu, mu_tmp, rtol=1e-4) 310 | 311 | 312 | def test_center(): 313 | data = np.random.rand(1000) * 2 * np.pi 314 | try: 315 | assert_allclose(pycircstat.mean(pycircstat.center(data)), 316 | 0, rtol=1e-3, atol=1e-3) 317 | except: 318 | assert_allclose(pycircstat.mean(pycircstat.center(data)), 319 | 2 * np.pi, rtol=1e-3, atol=1e-3) 320 | 321 | 322 | def test_corrcc(): 323 | data1 = np.random.rand(50000) * 2 * np.pi 324 | data2 = np.random.rand(50000) * 2 * np.pi 325 | assert_allclose(pycircstat.corrcc(data1, data2), 326 | 0., rtol=3 * 1e-2, atol=3 * 1e-2) 327 | 328 | 329 | def test_corrcc_ci(): 330 | data1 = np.random.rand(200) * 2 * np.pi 331 | data2 = np.asarray(data1) 332 | exp = (1., pycircstat.CI(1., 1.)) 333 | assert_equal(pycircstat.corrcc(data1, data2, ci=0.95), exp) 334 | 335 | 336 | def test_corrcc_ci_2d(): 337 | data1 = np.random.rand(2, 200) * np.pi 338 | data2 = np.asarray(data1) 339 | 340 | out1, (out2, out3) = pycircstat.corrcc(data1, data2, ci=0.95, axis=1) 341 | exp1, (exp2, exp3) = (np.ones(2), pycircstat.CI(np.ones(2), np.ones(2))) 342 | assert_allclose(out1, exp1) 343 | assert_allclose(out2, exp2) 344 | assert_allclose(out3, exp3) 345 | 346 | 347 | def test_corrcl(): 348 | data1 = np.random.rand(50000) * 2 * np.pi 349 | data2 = np.random.randn(50000) 350 | assert_allclose(pycircstat.corrcl(data1, data2), 351 | 0., rtol=3 * 1e-2, atol=3 * 1e-2) 352 | 353 | 354 | def test_moment_basic(): 355 | """ circ.moment: test basic call... """ 356 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 357 | 1.54256458, 5.19290675, 2.18474784, 358 | 4.77054777, 1.51736933, 0.72727580]) 359 | mp = pycircstat.moment(data) 360 | assert_allclose(mp, 0.074229066428146 + 0.333420553996661j, rtol=1e-6) 361 | 362 | 363 | def test_kurtosis_basic(): 364 | """ circ.kurtosis: test basic call... """ 365 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 366 | 1.54256458, 5.19290675, 2.18474784, 367 | 4.77054777, 1.51736933, 0.72727580]) 368 | mp = pycircstat.kurtosis(data) 369 | assert_allclose(mp, 0.55260, rtol=1e-5) 370 | 371 | 372 | def test_moment_center(): 373 | """ circ.moment: test that the centering argument works... """ 374 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 375 | 1.54256458, 5.19290675, 2.18474784, 376 | 4.77054777, 1.51736933, 0.72727580]) 377 | mp = pycircstat.moment(data, cent=True) 378 | assert_allclose(mp, 3.415834014267002e-01, rtol=1e-7) 379 | 380 | 381 | def test_moment_second_order(): 382 | """ circ.moment: test second order... """ 383 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 384 | 1.54256458, 5.19290675, 2.18474784, 385 | 4.77054777, 1.51736933, 0.72727580]) 386 | mp = pycircstat.moment(data, p=2) 387 | assert_allclose(mp, -6.729059729506420e-01 - 1.337676350865910e-01j, 388 | rtol=1e-7) 389 | 390 | 391 | def test_moment_2d_data_axis0(): 392 | """circ.moment: test 2D data (axis=0)...""" 393 | data = np.array([ 394 | [0.58429, 0.88333], 395 | [1.14892, 2.22854], 396 | [2.87128, 3.06369], 397 | [1.07677, 1.49836], 398 | [2.96969, 1.51748], 399 | ]) 400 | mp = pycircstat.moment(data, axis=0) 401 | assert_allclose(mp, [-0.046239398678727 + 0.556490077122954j, 402 | -0.169610962142131 + 0.727602093024094j], rtol=1e-7) 403 | 404 | 405 | def test_moment_2d_data_axisNone(): 406 | """circ.moment: test 2D data (axis=0)...""" 407 | data = np.array([ 408 | [0.58429, 0.88333], 409 | [1.14892, 2.22854], 410 | [2.87128, 3.06369], 411 | [1.07677, 1.49836], 412 | [2.96969, 1.51748], 413 | ]) 414 | mp = pycircstat.moment(data) 415 | assert_allclose(mp, -0.10793 + 0.64205 * 1j, rtol=1e-5) 416 | 417 | 418 | def test_kurtosis_2d_data_axis0(): 419 | """circ.kurtosis: test 2D data (axis=0)...""" 420 | data = np.array([ 421 | [0.58429, 0.88333], 422 | [1.14892, 2.22854], 423 | [2.87128, 3.06369], 424 | [1.07677, 1.49836], 425 | [2.96969, 1.51748], 426 | ]) 427 | mp = pycircstat.kurtosis(data, axis=0) 428 | assert_allclose(mp, [-0.24688, 0.24924], rtol=1e-4) 429 | 430 | 431 | def test_kurtosis_2d_data_axisNone(): 432 | """circ.kurtosis: test 2D data (axis=0)...""" 433 | data = np.array([ 434 | [0.58429, 0.88333], 435 | [1.14892, 2.22854], 436 | [2.87128, 3.06369], 437 | [1.07677, 1.49836], 438 | [2.96969, 1.51748], 439 | ]) 440 | mp = pycircstat.kurtosis(data) 441 | assert_allclose(mp, -0.013320, rtol=1e-4) 442 | 443 | 444 | def test_kurtosis_2d_data_axisNone_fiser(): 445 | data = np.array([ 446 | [0.58429, 0.88333], 447 | [1.14892, 2.22854], 448 | [2.87128, 3.06369], 449 | [1.07677, 1.49836], 450 | [2.96969, 1.51748], 451 | ]) 452 | mp = pycircstat.kurtosis(data, mode='fisher') 453 | assert_allclose(mp, -1.5849, rtol=1e-4) 454 | 455 | 456 | def test_kurtosis_2d_data_axisNone_bootstrap(): 457 | "basically only test whether boostrapping does not throw an error" 458 | data = np.array([ 459 | [0.58429, 0.88333], 460 | [1.14892, 2.22854], 461 | [2.87128, 3.06369], 462 | [1.07677, 1.49836], 463 | [2.96969, 1.51748], 464 | ]) 465 | mp, (low, high) = pycircstat.kurtosis(data, ci=0.95) 466 | assert_allclose(mp, -0.013320, rtol=1e-4) 467 | 468 | 469 | def test_kurtosis_2d_data_axisNone_bootstrap_kw(): 470 | "basically only test whether boostrapping does not throw an error" 471 | data = np.array([ 472 | [0.58429, 0.88333], 473 | [1.14892, 2.22854], 474 | [2.87128, 3.06369], 475 | [1.07677, 1.49836], 476 | [2.96969, 1.51748], 477 | ]) 478 | mp, (low, high) = pycircstat.kurtosis(alpha=data, ci=0.95) 479 | assert_allclose(mp, -0.013320, rtol=1e-4) 480 | 481 | 482 | def test_kurtosis_2d_data_axis1(): 483 | data = np.array([ 484 | [0.58429, 0.88333], 485 | [1.14892, 2.22854], 486 | [2.87128, 3.06369], 487 | [1.07677, 1.49836], 488 | [2.96969, 1.51748], 489 | ]) 490 | mp = pycircstat.kurtosis(data, axis=1) 491 | assert_allclose( 492 | mp, [ 493 | 0.95562, 0.47166, 0.98155, 0.91244, 0.11831], rtol=1e-4) 494 | 495 | 496 | def test_kurtosis_2d_data_axis0_fisher(): 497 | data = np.array([ 498 | [0.58429, 0.88333], 499 | [1.14892, 2.22854], 500 | [2.87128, 3.06369], 501 | [1.07677, 1.49836], 502 | [2.96969, 1.51748], 503 | ]) 504 | mp = pycircstat.kurtosis(data, axis=0, mode='fisher') 505 | assert_allclose(mp, [-1.76462, -0.97441], rtol=1e-4) 506 | 507 | 508 | def test_moment_2d_data_axis1(): 509 | """circ.moment: test 2D data (axis=1)...""" 510 | data = np.array([ 511 | [0.58429, 0.88333], 512 | [1.14892, 2.22854], 513 | [2.87128, 3.06369], 514 | [1.07677, 1.49836], 515 | [2.96969, 1.51748], 516 | ]) 517 | mp = pycircstat.moment(data, axis=1) 518 | assert_allclose(mp, [0.734342496552133 + 0.662231811292906j, 519 | -0.100929912318249 + 0.851847926481377j, 520 | -0.980327288650584 + 0.172428315666164j, 521 | 0.273273810931458 + 0.938904227222693j, 522 | -0.465985008566902 + 0.584818144704480j], rtol=1e-7) 523 | 524 | 525 | def test_moment_2d_data_axis1_centering(): 526 | """circ.moment: test 2D data (axis=1) with centring...""" 527 | data = np.array([ 528 | [0.58429, 0.88333], 529 | [1.14892, 2.22854], 530 | [2.87128, 3.06369], 531 | [1.07677, 1.49836], 532 | [2.96969, 1.51748], 533 | ]) 534 | mp = pycircstat.moment(data, axis=1, cent=True) 535 | assert_allclose(mp, [0.988842694330449 - 0.000000000000000j, 536 | 0.857806351720009 + 0.000000000000000j, 537 | 0.995375867155958 - 0.000000000000000j, 538 | 0.977864880051249 + 0.000000000000000j, 539 | 0.747766200482936 - 0.000000000000000j], rtol=1e-7) 540 | 541 | 542 | def test_moment_7(): 543 | """ circ.moment: test bootstrapping... """ 544 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 545 | 1.54256458, 5.19290675, 2.18474784, 546 | 4.77054777, 1.51736933, 0.72727580]) 547 | mp, (lo, hi) = pycircstat.moment(data, ci=0.8) 548 | assert_allclose(mp, 0.074229066428146 + 0.333420553996661j, rtol=1e-6) 549 | # assert_allclose(lo, -0.0871916735424+0.71239443351j, rtol=1e-3) 550 | # assert_allclose(hi, 0.238513834062+0.140762896499j, rtol=1e-3) 551 | # not sure of a good way to do tests for bootstraps. 552 | #--------------------- 553 | 554 | 555 | def test_skewness_basic(): 556 | """ circ.skewness: test basic call... """ 557 | data = np.array([1.80044838, 2.02938314, 1.03534016, 4.84225057, 558 | 1.54256458, 5.19290675, 2.18474784, 559 | 4.77054777, 1.51736933, 0.72727580]) 560 | mp = pycircstat.skewness(data) 561 | assert_allclose(mp, 0.40660, rtol=1e-5) 562 | 563 | 564 | def test_skewness_2d_data_axis0(): 565 | """circ.skewness: test 2D data (axis=0)...""" 566 | data = np.array([ 567 | [0.58429, 0.88333], 568 | [1.14892, 2.22854], 569 | [2.87128, 3.06369], 570 | [1.07677, 1.49836], 571 | [2.96969, 1.51748], 572 | ]) 573 | mp = pycircstat.skewness(data, axis=0) 574 | assert_allclose(mp, [-0.29339, -0.14716], rtol=1e-4) 575 | 576 | 577 | def test_skewness_2d_data_axisNone(): 578 | """circ.skewness: test 2D data (axis=0)...""" 579 | data = np.array([ 580 | [0.58429, 0.88333], 581 | [1.14892, 2.22854], 582 | [2.87128, 3.06369], 583 | [1.07677, 1.49836], 584 | [2.96969, 1.51748], 585 | ]) 586 | mp = pycircstat.skewness(data) 587 | assert_allclose(mp, -0.18157, rtol=1e-4) 588 | 589 | 590 | def test_skewness_2d_data_axisNone_fisher(): 591 | data = np.array([ 592 | [0.58429, 0.88333], 593 | [1.14892, 2.22854], 594 | [2.87128, 3.06369], 595 | [1.07677, 1.49836], 596 | [2.96969, 1.51748], 597 | ]) 598 | mp = pycircstat.skewness(data, mode='fisher') 599 | assert_allclose(mp, -0.88086, rtol=1e-4) 600 | 601 | 602 | def test_skewness_2d_data_axisNone_bootstrap(): 603 | "basically only test whether boostrapping does not throw an error" 604 | data = np.array([ 605 | [0.58429, 0.88333], 606 | [1.14892, 2.22854], 607 | [2.87128, 3.06369], 608 | [1.07677, 1.49836], 609 | [2.96969, 1.51748], 610 | ]) 611 | mp, (low, high) = pycircstat.skewness(data, ci=0.95) 612 | assert_allclose(mp, -0.18157, rtol=1e-4) 613 | 614 | 615 | def test_skewness_2d_data_axis1(): 616 | data = np.array([ 617 | [0.58429, 0.88333], 618 | [1.14892, 2.22854], 619 | [2.87128, 3.06369], 620 | [1.07677, 1.49836], 621 | [2.96969, 1.51748], 622 | ]) 623 | mp = pycircstat.skewness(data, axis=1) 624 | assert_allclose( 625 | mp, [-8.3267e-17, 0.0000e+00, -1.3878e-17, 1.3878e-16, 0.0000e+00], atol=1e-4) 626 | 627 | 628 | def test_skewness_2d_data_axis0_fisher(): 629 | data = np.array([ 630 | [0.58429, 0.88333], 631 | [1.14892, 2.22854], 632 | [2.87128, 3.06369], 633 | [1.07677, 1.49836], 634 | [2.96969, 1.51748], 635 | ]) 636 | mp = pycircstat.skewness(data, axis=0, mode='fisher') 637 | assert_allclose(mp, [-0.999794, -1.157187], rtol=1e-4) 638 | 639 | -------------------------------------------------------------------------------- /tests/test_distributions.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | 5 | from numpy.testing import assert_allclose 6 | from nose.tools import assert_equal, assert_true, raises 7 | 8 | import pycircstat as circ 9 | 10 | test_data_2d = np.array([ 11 | [0.58429, 0.88333], 12 | [1.14892, 2.22854], 13 | [2.87128, 3.06369], 14 | [1.07677, 1.49836], 15 | [2.96969, 1.51748], 16 | ]) 17 | 18 | 19 | def test_kappa_basic(): 20 | """circ.kappa: test basic functionality of circ.distributions.kappa""" 21 | kappa = circ.distributions.kappa(test_data_2d) 22 | assert_allclose(kappa, 1.6221, rtol=1e-4) 23 | 24 | 25 | @raises(Exception) 26 | def test_kappa_assert(): 27 | """circ.kappa: test basic functionality of circ.distributions.kappa""" 28 | kappa = circ.distributions.kappa( 29 | test_data_2d, 30 | np.ones_like(test_data_2d)[ 31 | :, 32 | :-1]) 33 | 34 | 35 | def test_kappa_axis0(): 36 | """circ.kappa: test functionality of circ.distributions.kappa along axis=0""" 37 | kappa = circ.distributions.kappa(test_data_2d, axis=0) 38 | assert_allclose(kappa, [1.0536, 1.1514], rtol=1e-4) 39 | 40 | 41 | def test_kappa_axis0(): 42 | """circ.kappa: test functionality of circ.distributions.kappa along axis=1""" 43 | kappa = circ.distributions.kappa(test_data_2d, axis=1) 44 | assert_allclose( 45 | kappa, [ 46 | 4.50679, 0.38271, 10.83801, 2.28470, 0.23442], rtol=1e-4) 47 | 48 | 49 | def test_triangular_pdf(): 50 | a = np.linspace(0, np.pi * 2, 10000) 51 | loc = np.random.rand() * 2 * np.pi 52 | p = circ.distributions.triangular.pdf(a, .2, loc=loc) 53 | assert_allclose(loc, a[np.argmax(p)], atol=1e-3, rtol=1e-3) 54 | 55 | 56 | def test_triangular_cdf(): 57 | a = np.linspace(0, np.pi * 2, 10000) 58 | loc = np.random.rand() * 2 * np.pi 59 | p = circ.distributions.triangular.cdf(a, .2, loc=loc) 60 | assert_allclose(1., np.max(p), atol=1e-3, rtol=1e-3) 61 | 62 | 63 | def test_cardioid_pdf(): 64 | a = np.linspace(0, np.pi * 2, 10000) 65 | loc = np.random.rand() * 2 * np.pi 66 | p = circ.distributions.cardioid.pdf(a, loc, .1) 67 | assert_allclose(loc, a[np.argmax(p)], atol=1e-3, rtol=1e-3) 68 | 69 | 70 | def test_cardioid_cdf(): 71 | a = np.linspace(0, np.pi * 2, 10000) 72 | loc = np.random.rand() * 2 * np.pi 73 | p = circ.distributions.cardioid.cdf(a, loc, .1) 74 | assert_allclose(1., np.max(p), atol=1e-3, rtol=1e-3) 75 | -------------------------------------------------------------------------------- /tests/test_event_series.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | 5 | from numpy.testing import assert_allclose 6 | from nose.tools import assert_equal, assert_true 7 | 8 | import pycircstat 9 | from pycircstat import event_series as es 10 | 11 | 12 | def test_vector_strength_spectrum(): 13 | T = 3 # 2s 14 | sampling_rate = 10000. 15 | firing_rate = 10 # 1000Hz 16 | 17 | s = T * np.random.rand(np.random.poisson(firing_rate * T)) 18 | 19 | w, vs_spec = es.vector_strength_spectrum(s, sampling_rate) 20 | 21 | F0 = [] 22 | R = [] 23 | lowcut, highcut = 500, 550 24 | idx = (w >= lowcut) & (w <= highcut) 25 | for i in np.where(idx)[0]: 26 | f0 = w[i] 27 | p0 = 1 / f0 28 | rho = pycircstat.resultant_vector_length((s % p0) / p0 * 2 * np.pi) 29 | 30 | F0.append(f0) 31 | R.append(rho) 32 | assert_allclose(R, vs_spec[idx]) 33 | 34 | def test_direct_vector_strength_spectrum(): 35 | T = 3 # 2s 36 | sampling_rate = 10000. 37 | firing_rate = 10 # 1000Hz 38 | 39 | s = T * np.random.rand(np.random.poisson(firing_rate * T)) 40 | 41 | w, vs_spec = es.vector_strength_spectrum(s, sampling_rate) 42 | lowcut, highcut = 500, 550 43 | idx = (w >= lowcut) & (w <= highcut) 44 | vs_2 = es.direct_vector_strength_spectrum(s, w[idx]) 45 | assert_allclose(vs_2, vs_spec[idx]) 46 | 47 | def test_direct_vector_strength_spectrum_parallel(): 48 | T = 3 # 2s 49 | sampling_rate = 10000. 50 | firing_rate = 10 # 1000Hz 51 | 52 | s = T * np.random.rand(np.random.poisson(firing_rate * T)) 53 | 54 | w, vs_spec = es.vector_strength_spectrum(s, sampling_rate) 55 | lowcut, highcut = 1, 1400 56 | idx = (w >= lowcut) & (w <= highcut) 57 | vs_2 = es.direct_vector_strength_spectrum(s, w[idx]) 58 | assert_allclose(vs_2, vs_spec[idx], rtol=1e-4, atol=1e-4) 59 | 60 | -------------------------------------------------------------------------------- /tests/test_regression.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division 2 | 3 | import numpy as np 4 | 5 | from numpy.testing import assert_allclose 6 | from nose.tools import assert_equal, assert_true 7 | from pycircstat.regression import CL1stOrderRegression, CCTrigonometricPolynomialRegression 8 | 9 | 10 | def test_circlinregression(): 11 | alpha = np.random.rand(200)*np.pi*2 12 | a0 = np.random.rand()*2*np.pi 13 | A0 = np.abs(np.random.randn()) 14 | m0 = np.random.randn()*10 15 | 16 | x = m0 + A0*np.cos(alpha - a0) 17 | 18 | reg = CL1stOrderRegression() 19 | reg.train(alpha, x) 20 | m = reg._coef[-1] 21 | a = np.arctan2(reg._coef[1], reg._coef[0]) % (2*np.pi) 22 | A = np.sqrt(reg._coef[1]**2 + reg._coef[0]**2) 23 | 24 | 25 | assert_allclose(A,A0,err_msg="amplitudes do not match") 26 | assert_allclose(m,m0,err_msg="offsets do not match") 27 | assert_allclose(a,a0,err_msg="base angles do not match") 28 | 29 | 30 | def test_circlin_prediction(): 31 | alpha = np.random.rand(200)*np.pi*2 32 | a0 = np.random.rand()*2*np.pi 33 | A0 = np.abs(np.random.randn()) 34 | m0 = np.random.randn()*10 35 | 36 | x = m0 + A0*np.cos(alpha - a0) 37 | 38 | reg = CL1stOrderRegression() 39 | reg.train(alpha, x) 40 | 41 | x2 = reg(alpha) 42 | 43 | assert_allclose(x,x2,err_msg="predictions do not match") 44 | 45 | 46 | def test_circlin_test(): 47 | alpha = np.random.rand(200)*np.pi*2 48 | x = np.random.randn(200) 49 | 50 | reg = CL1stOrderRegression() 51 | res = reg.test(alpha, x) 52 | assert_true(res.loc['Liddell-Ord','p'] > 0.0001, 'p-value is smaller than 0.0001') 53 | 54 | 55 | 56 | def test_circcirc_regression(): 57 | alpha = np.random.rand(1000)*np.pi*2 58 | beta = np.cos(alpha + np.random.rand()*2*np.pi)*np.pi 59 | reg = CCTrigonometricPolynomialRegression(degree=10) 60 | reg.train(alpha, beta) 61 | beta2 = reg(alpha) 62 | 63 | assert_allclose(beta,beta2,err_msg="predictions do not match", atol=1e-4, rtol=1e-4) 64 | -------------------------------------------------------------------------------- /tests/test_tests.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import warnings 3 | 4 | import numpy as np 5 | 6 | from numpy.testing import assert_allclose 7 | from nose.tools import assert_equal, assert_true 8 | 9 | import pycircstat 10 | from pycircstat.tests import _sample_cdf 11 | 12 | 13 | def test_rayleigh(): 14 | data = np.array([ 15 | -0.94904375, 0.26575165, -0.03226759, 1.98389239, 2.0084918, 16 | 0.56965871, -0.19199522, -0.33080002, -0.03141245, 1.18560637, 17 | 4.31138236, 4.84776075, 2.10336819, 3.45465972, 4.64572697, 18 | 5.80976322, 6.27160926, 0.67979652, 1.33776004, 0.58059308]) 19 | p, z = pycircstat.tests.rayleigh(data) 20 | assert_allclose(0.0400878, p, atol=0.001, rtol=0.001) 21 | assert_allclose(3.1678, z, atol=0.001, rtol=0.001) 22 | 23 | 24 | def test_rayleightest2(): 25 | data = np.random.rand(10, 20, 5) * np.pi * 2. 26 | p, z = pycircstat.tests.rayleigh(data, axis=0) 27 | assert_true(p.shape == (20, 5)) 28 | for i in range(data.shape[1]): 29 | for j in range(data.shape[2]): 30 | p2, z2 = pycircstat.tests.rayleigh(data[:, i, j]) 31 | assert_allclose(p[i, j], p2, atol=0.001, rtol=0.001) 32 | assert_allclose(z[i, j], z2, atol=0.001, rtol=0.001) 33 | 34 | 35 | def test_rayleightest3(): 36 | data = np.random.rand(10, 20, 5) * np.pi * 2. 37 | p, z = pycircstat.tests.rayleigh(data, axis=1) 38 | assert_true(p.shape == (10, 5)) 39 | for i in range(data.shape[0]): 40 | for j in range(data.shape[2]): 41 | p2, z2 = pycircstat.tests.rayleigh(data[i, :, j]) 42 | assert_allclose(p[i, j], p2, atol=0.001, rtol=0.001) 43 | assert_allclose(z[i, j], z2, atol=0.001, rtol=0.001) 44 | 45 | 46 | def test_omnibus(): 47 | data = np.array([ 48 | -1.78277804, 0.20180845, -1.51291097, 0.57329272, 0.72195959, 49 | 1.59947271, 1.4666837, -0.36532379, 1.4455209, 0.77365236, 50 | 5.8678466, 2.58327349, 0.25429634, 1.74902778, 4.13215085, 51 | 0.20612467, 1.38484181, 1.72546928, 3.33570062, 3.96191276]) 52 | p, m = pycircstat.tests.omnibus(data) 53 | assert_allclose(0.295715, p, atol=0.001, rtol=0.001) 54 | assert_equal(5, m) 55 | 56 | 57 | def test_omnibus2(): 58 | data = np.random.rand(10, 20, 5) * np.pi * 2. 59 | p, m = pycircstat.tests.omnibus(data, axis=0) 60 | assert_true(p.shape == (20, 5)) 61 | assert_true(m.shape == (20, 5)) 62 | for i in range(data.shape[1]): 63 | for j in range(data.shape[2]): 64 | p2, m2 = pycircstat.tests.omnibus(data[:, i, j]) 65 | assert_allclose(p[i, j], p2, atol=0.001, rtol=0.001) 66 | assert_allclose(m[i, j], m2, atol=0.001, rtol=0.001) 67 | 68 | 69 | def test_omnibus3(): 70 | data = np.random.rand(10, 20, 5) * np.pi * 2. 71 | p, m = pycircstat.tests.omnibus(data, axis=1) 72 | assert_true(p.shape == (10, 5)) 73 | assert_true(m.shape == (10, 5)) 74 | for i in range(data.shape[0]): 75 | for j in range(data.shape[2]): 76 | p2, m2 = pycircstat.tests.omnibus(data[i, :, j]) 77 | assert_allclose(p[i, j], p2, atol=0.001, rtol=0.001) 78 | assert_allclose(m[i, j], m2, atol=0.001, rtol=0.001) 79 | 80 | 81 | def test_raospacing(): 82 | data = np.array([ 83 | -1.78277804, 0.20180845, -1.51291097, 0.57329272, 0.72195959, 84 | 1.59947271, 1.4666837, -0.36532379, 1.4455209, 0.77365236, 85 | 5.8678466, 2.58327349, 0.25429634, 1.74902778, 4.13215085, 86 | 0.20612467, 1.38484181, 1.72546928, 3.33570062, 3.96191276]) 87 | p, U, Uc = pycircstat.tests.raospacing(data) 88 | assert_allclose(0.001, p, atol=0.0001, rtol=0.001) 89 | assert_allclose(233.7789, U, atol=0.001, rtol=0.001) 90 | assert_allclose(192.17, Uc, atol=0.001, rtol=0.001) 91 | 92 | 93 | def test_raospacing2(): 94 | data = np.random.rand(10, 20, 5) * np.pi * 2. 95 | p, U, Uc = pycircstat.tests.raospacing(data, axis=0) 96 | assert_true(p.shape == (20, 5)) 97 | assert_true(U.shape == (20, 5)) 98 | assert_true(Uc.shape == (20, 5)) 99 | for i in range(data.shape[1]): 100 | for j in range(data.shape[2]): 101 | p2, U2, Uc2 = pycircstat.tests.raospacing(data[:, i, j]) 102 | assert_allclose(p[i, j], p2, atol=0.001, rtol=0.001) 103 | assert_allclose(U[i, j], U2, atol=0.001, rtol=0.001) 104 | assert_allclose(Uc[i, j], Uc2, atol=0.001, rtol=0.001) 105 | 106 | 107 | def test_raospacing3(): 108 | data = np.random.rand(10, 20, 5) * np.pi * 2. 109 | p, U, Uc = pycircstat.tests.raospacing(data, axis=1) 110 | assert_true(p.shape == (10, 5)) 111 | assert_true(U.shape == (10, 5)) 112 | assert_true(Uc.shape == (10, 5)) 113 | for i in range(data.shape[0]): 114 | for j in range(data.shape[2]): 115 | p2, U2, Uc2 = pycircstat.tests.raospacing(data[i, :, j]) 116 | assert_allclose(p[i, j], p2, atol=0.001, rtol=0.001) 117 | assert_allclose(U[i, j], U2, atol=0.001, rtol=0.001) 118 | assert_allclose(Uc[i, j], Uc2, atol=0.001, rtol=0.001) 119 | 120 | 121 | def test_vtest(): 122 | data = np.array([ 123 | -1.78277804, 0.20180845, -1.51291097, 0.57329272, 0.72195959, 124 | 1.59947271, 1.4666837, -0.36532379, 1.4455209, 0.77365236, 125 | 5.8678466, 2.58327349, 0.25429634, 1.74902778, 4.13215085, 126 | 0.20612467, 1.38484181, 1.72546928, 3.33570062, 3.96191276]) 127 | p, V = pycircstat.tests.vtest(data, 0) 128 | assert_allclose(0.1074, p, atol=0.001, rtol=0.001) 129 | assert_allclose(3.9230, V, atol=0.001, rtol=0.001) 130 | 131 | 132 | def test_vtest2(): 133 | data = np.random.rand(10, 20, 5) * np.pi * 2. 134 | p, V = pycircstat.tests.vtest(data, 0, axis=0) 135 | assert_true(p.shape == (20, 5)) 136 | for i in range(data.shape[1]): 137 | for j in range(data.shape[2]): 138 | p2, V2 = pycircstat.tests.vtest(data[:, i, j], 0) 139 | assert_allclose(p[i, j], p2, atol=0.001, rtol=0.001) 140 | assert_allclose(V[i, j], V2, atol=0.001, rtol=0.001) 141 | 142 | 143 | def test_vtest3(): 144 | data = np.random.rand(10, 20, 5) * np.pi * 2. 145 | p, V = pycircstat.tests.vtest(data, 0, axis=1) 146 | assert_true(p.shape == (10, 5)) 147 | for i in range(data.shape[0]): 148 | for j in range(data.shape[2]): 149 | p2, V2 = pycircstat.tests.vtest(data[i, :, j], 0) 150 | assert_allclose(p[i, j], p2, atol=0.001, rtol=0.001) 151 | assert_allclose(V[i, j], V2, atol=0.001, rtol=0.001) 152 | 153 | 154 | def test_symtest(): 155 | data = np.array([ 156 | -1.78277804, 0.20180845, -1.51291097, 0.57329272, 0.72195959, 157 | 1.59947271, 1.4666837, -0.36532379, 1.4455209, 0.77365236, 158 | 5.8678466, 2.58327349, 0.25429634, 1.74902778, 4.13215085, 159 | 0.20612467, 1.38484181, 1.72546928, 3.33570062, 3.96191276]) 160 | p, T = pycircstat.tests.symtest(data) 161 | assert_allclose(0.295877, p, atol=0.001, rtol=0.001) 162 | 163 | 164 | def test_symtest2(): 165 | data = np.random.rand(10, 20, 5) * np.pi * 2. 166 | p, T = pycircstat.tests.symtest(data, axis=0) 167 | assert_true(p.shape == (20, 5)) 168 | for i in range(data.shape[1]): 169 | for j in range(data.shape[2]): 170 | p2, T2 = pycircstat.tests.symtest(data[:, i, j]) 171 | assert_equal(p[i, j], p2) 172 | assert_equal(T[i, j], T2) 173 | 174 | 175 | def test_symtest3(): 176 | data = np.random.rand(10, 20, 5) * np.pi * 2. 177 | p, T = pycircstat.tests.symtest(data, axis=1) 178 | assert_true(p.shape == (10, 5)) 179 | for i in range(data.shape[0]): 180 | for j in range(data.shape[2]): 181 | p2, T2 = pycircstat.tests.symtest(data[i, :, j]) 182 | assert_equal(p[i, j], p2) 183 | assert_equal(T[i, j], T2) 184 | 185 | 186 | def test_watson_williams(): 187 | dat1 = np.radians([135., 145, 125, 140, 165, 170]) 188 | dat2 = np.radians([150, 130, 175, 190, 180, 220]) 189 | dat3 = np.radians([140, 165, 185, 180, 125, 175, 140]) 190 | p, T = pycircstat.watson_williams(dat1, dat2, dat3) 191 | assert_allclose(p, 0.1870637, atol=0.001, rtol=0.001) 192 | 193 | 194 | def test_watson_williams_nd(): 195 | dat1 = np.tile(np.radians([135., 145, 125, 140, 165, 170]), (3, 4, 1)) 196 | dat2 = np.tile(np.radians([150, 130, 175, 190, 180, 220]), (3, 4, 1)) 197 | dat3 = np.tile(np.radians([140, 165, 185, 180, 125, 175, 140]), (3, 4, 1)) 198 | p, T = pycircstat.watson_williams(dat1, dat2, dat3, axis=2) 199 | assert_true( 200 | p.shape == ( 201 | 3, 202 | 4), 203 | "return pvalue array does not have right shape") 204 | assert_allclose(p, 0.1870637, atol=0.0001, rtol=0.0001) 205 | 206 | 207 | def test_sample_cdf(): 208 | alpha = np.asarray([3.427109860970, 209 | 0.649035328217, 210 | 0.478105054362, 211 | 3.585976113724, 212 | 2.436564305605, 213 | 2.397389764489, 214 | 0.223860727758, 215 | 3.810441709350, 216 | 2.194680923763, 217 | 5.423136274255]) 218 | 219 | pos, cdf = _sample_cdf(alpha, resolution=5) 220 | pos0 = np.asarray([0.000000000000, 221 | 1.256637061436, 222 | 2.513274122872, 223 | 3.769911184308, 224 | 5.026548245744]) 225 | cdf0 = np.asarray([0.300000000000, 226 | 0.600000000000, 227 | 0.800000000000, 228 | 0.900000000000, 229 | 1.000000000000]) 230 | 231 | assert_allclose( 232 | pos, 233 | pos0, 234 | rtol=1e-4, 235 | atol=1e-4, 236 | err_msg="Error in evaluated positions.") 237 | assert_allclose( 238 | cdf, 239 | cdf0, 240 | rtol=1e-4, 241 | atol=1e-4, 242 | err_msg="Error in evaluated cdfs.") 243 | 244 | 245 | def test_kuiper_warning(): 246 | unknown_N = 26 247 | alpha1 = np.random.rand(unknown_N)*2.*np.pi 248 | alpha2 = np.random.rand(unknown_N)*2.*np.pi 249 | 250 | with warnings.catch_warnings(record=True) as w: 251 | warnings.simplefilter("always") 252 | p, k = pycircstat.kuiper(alpha1, alpha2) 253 | 254 | assert len(w) == 1 255 | assert issubclass(w[-1].category, UserWarning) 256 | assert "N=%d not found in table" % unknown_N in str(w[-1].message) 257 | 258 | 259 | def test_kuiper(): 260 | alpha1 = np.asarray([0.291662278945, 261 | 5.899415544666, 262 | 5.402236718096, 263 | 3.728212505263, 264 | 5.303188109786, 265 | 3.737946900082, 266 | 3.850015526787, 267 | 4.902154536516, 268 | 3.631621444982, 269 | 5.341562525096]) 270 | alpha2 = np.asarray([0.613650458799, 271 | 2.109660249330, 272 | 3.617555161298, 273 | 6.196794760548, 274 | 1.856071575830, 275 | 2.991480015107, 276 | 1.789200626487, 277 | 4.835921843822, 278 | 2.767491245457, 279 | 1.744565591973]) 280 | p0 = 0.1 281 | k0 = 70 282 | K0 = 67.395 283 | with warnings.catch_warnings(): 284 | warnings.simplefilter("ignore") 285 | p, k = pycircstat.kuiper(alpha1, alpha2) 286 | assert_allclose(p, p0, rtol=1e-4, atol=1e-4, err_msg="Error in p-values.") 287 | assert_allclose( 288 | k, 289 | k0, 290 | rtol=1e-4, 291 | atol=1e-4, 292 | err_msg="Error in statistic k.") 293 | 294 | 295 | def test_kuiper_anglerange(): 296 | alpha1 = np.asarray([0.291662278945, 297 | 5.899415544666, 298 | 5.402236718096, 299 | 3.728212505263, 300 | 5.303188109786, 301 | 3.737946900082, 302 | 3.850015526787, 303 | 4.902154536516, 304 | 3.631621444982, 305 | 5.341562525096]) 306 | alpha2 = np.asarray([0.613650458799, 307 | 2.109660249330, 308 | 3.617555161298, 309 | 6.196794760548, 310 | 1.856071575830, 311 | 2.991480015107, 312 | 1.789200626487, 313 | 4.835921843822, 314 | 2.767491245457, 315 | 1.744565591973]) 316 | alpha1b = alpha1 - np.pi 317 | alpha2b = alpha2 - np.pi 318 | 319 | with warnings.catch_warnings(): 320 | warnings.simplefilter("ignore") 321 | p, k = pycircstat.kuiper(alpha1, alpha2) 322 | pb, kb = pycircstat.kuiper(alpha1b, alpha2b) 323 | 324 | assert_allclose(p, 325 | pb, 326 | rtol=1e-4, 327 | atol=1e-4, 328 | err_msg="P-values are not equal" 329 | ) 330 | assert_allclose(k, 331 | kb, 332 | rtol=1e-4, 333 | atol=1e-4, 334 | err_msg="K statistics not equal" 335 | ) 336 | 337 | 338 | def test_kuiper2(): 339 | data1 = np.random.rand(10, 20, 30) * np.pi * 2. 340 | data2 = np.random.rand(10, 20, 30) * np.pi * 2. 341 | with warnings.catch_warnings(): 342 | warnings.simplefilter("ignore") 343 | p, k = pycircstat.tests.kuiper(data1, data2, axis=0) 344 | assert_true(p.shape == (20, 30)) 345 | for i in range(data1.shape[1]): 346 | for j in range(data1.shape[2]): 347 | p2, k2 = pycircstat.tests.kuiper( 348 | data1[ 349 | :, i, j], data2[ 350 | :, i, j]) 351 | assert_equal(p[i, j], p2) 352 | assert_equal(k[i, j], k2) 353 | 354 | 355 | def test_kuiper3(): 356 | data1 = np.random.rand(15, 20, 30) * np.pi * 2. 357 | data2 = np.random.rand(15, 20, 30) * np.pi * 2. 358 | with warnings.catch_warnings(): 359 | warnings.simplefilter("ignore") 360 | p, k = pycircstat.tests.kuiper(data1, data2, axis=1) 361 | assert_true(p.shape == (15, 30)) 362 | for i in range(data1.shape[0]): 363 | for j in range(data1.shape[2]): 364 | p2, k2 = pycircstat.tests.kuiper( 365 | data1[ 366 | i, :, j], data2[ 367 | i, :, j]) 368 | assert_equal(p[i, j], p2) 369 | assert_equal(k[i, j], k2) 370 | 371 | 372 | def test_cmtest(): 373 | dat1 = np.asarray([0.024891365256, 1.154163238164, 4.733014717534, 5.826436742709, 1.824161057257, 374 | 3.104228689593, 2.295866347225, 2.940906167843, 1.651790079113, 2.158209295126]) 375 | dat2 = np.asarray([1.794568099336, 0.773881541697, 3.937284518835, 6.227193753275, 5.824618281411, 376 | 1.244981875150, 4.016281148318, 5.767162249130, 1.546435573244, 2.879568308187, 377 | 4.359455634689, 3.048152268480, 5.747615761469, 2.118080736064, 1.604702366088]) 378 | 379 | p0 = 0.51329 380 | P0 = 0.42735 381 | p, P = pycircstat.cmtest(dat1, dat2) 382 | assert_allclose(p, p0, atol=1e-4, rtol=1e-4) 383 | assert_allclose(P, P0, atol=1e-4, rtol=1e-4) 384 | 385 | def test_cmtest2(): 386 | data1 = np.random.rand(3, 2, 15) * np.pi * 2. 387 | data2 = np.random.rand(3, 2, 15) * np.pi * 2. 388 | p, P = pycircstat.tests.cmtest(data1, data2, axis=2) 389 | assert_true(p.shape == (3, 2)) 390 | assert_true(P.shape == (3, 2)) 391 | for i in range(data1.shape[0]): 392 | for j in range(data1.shape[1]): 393 | p2, P2 = pycircstat.tests.cmtest( 394 | data1[i, j, :], 395 | data2[i, j, :]) 396 | assert_equal(p[i, j], p2) 397 | assert_equal(P[i, j], P2) 398 | 399 | def test_cmtest3(): 400 | data1 = np.random.rand(3, 15, 2) * np.pi * 2. 401 | data2 = np.random.rand(3, 15, 2) * np.pi * 2. 402 | p, P = pycircstat.tests.cmtest(data1, data2, axis=1) 403 | assert_true(p.shape == (3, 2)) 404 | assert_true(P.shape == (3, 2)) 405 | for i in range(data1.shape[0]): 406 | for j in range(data1.shape[2]): 407 | p2, P2 = pycircstat.tests.cmtest( 408 | data1[i, :, j], 409 | data2[i, :, j]) 410 | assert_equal(p[i, j], p2) 411 | assert_equal(P[i, j], P2) 412 | 413 | def test_mtest(): 414 | data = np.array([ 415 | [0.58429, 0.88333], 416 | [1.14892, 2.22854], 417 | [2.87128, 3.06369], 418 | [1.07677, 1.49836], 419 | [2.96969, 1.51748], 420 | ]) 421 | h, mu, ci = pycircstat.tests.mtest(data, [np.pi/2., np.pi], xi=.2, axis=0) 422 | out1 = np.array([0.76976, 0.50149]) 423 | assert_allclose(pycircstat.mean_ci_limits(data, ci=0.8, axis=0), 424 | out1, rtol=1e-4) 425 | assert_true(np.all(h == [False, True])) 426 | 427 | h, mu, ci = pycircstat.tests.mtest(data, np.pi/2., xi=.2, axis=1) 428 | out2 = np.array([0.17081, 0.72910, 0.10911, 0.24385, 0.95426]) 429 | assert_allclose(pycircstat.mean_ci_limits(data, ci=0.8, axis=1), 430 | out2, rtol=1e-4) 431 | assert_true(np.all(h == [True, False, True, True, False])) 432 | 433 | out3 = np.array([1.0577, 2.4170]) 434 | h, mu, ci = pycircstat.tests.mtest(data, np.pi/2., xi=.05, axis=None) 435 | assert_allclose(mu + pycircstat.mean_ci_limits(data, ci=0.95, axis=None), 436 | out3[1], rtol=1e-4) 437 | assert_allclose(mu - pycircstat.mean_ci_limits(data, ci=0.95, axis=None), 438 | out3[0], rtol=1e-4) 439 | assert_true(~h) 440 | assert_allclose(mu, 1.737335083370) 441 | 442 | 443 | def test_medtest(): 444 | x = np.asarray([3.315758071345, 3.142617638587, 2.997993073470, 3.100895432237, 2.943782900228, 445 | 2.986354128533, 3.101125456006, 3.338854852289, 3.266087254291, 3.075897304454]) 446 | p = pycircstat.medtest(x, np.pi) 447 | assert_allclose(p, 0.753906250000) 448 | 449 | 450 | def test_medtest2(): 451 | data1 = np.random.rand(3, 15, 2) * np.pi * 2. 452 | 453 | p = pycircstat.tests.medtest(data1, np.pi, axis=1) 454 | assert_true(p.shape == (3, 2)) 455 | 456 | for i in range(data1.shape[0]): 457 | for j in range(data1.shape[2]): 458 | p2 = pycircstat.tests.medtest( 459 | data1[i, :, j], 460 | np.pi) 461 | assert_equal(p[i, j], p2) 462 | 463 | 464 | 465 | def test_medtest3(): 466 | data1 = np.random.rand(4, 3, 2, 15) * np.pi * 2. 467 | 468 | p = pycircstat.tests.medtest(data1, np.pi, axis=3) 469 | assert_true(p.shape == (4, 3, 2)) 470 | 471 | for k in range(data1.shape[0]): 472 | for i in range(data1.shape[1]): 473 | for j in range(data1.shape[2]): 474 | p2 = pycircstat.tests.medtest( 475 | data1[k, i, j, :], 476 | np.pi) 477 | assert_equal(p[k, i, j], p2) 478 | 479 | 480 | def test_hktest_small_kk(): 481 | i1 = np.array([0,0,0,0,0,0,1,1,1,1,1,1], dtype=float) 482 | i2 = np.array([0,0,0,1,1,1,0,0,0,1,1,1], dtype=float) 483 | #x = i1 + i2 + pycircstat.distributions.vonmises.rvs(1., size=(12,)) 484 | x = np.asarray([0.99337167, 1.3048058, -0.564455, -0.58595483, 1.86393676, 1.64968293, 485 | 0.91179427, 1.39659711, -0.39081637, 4.14584743, 3.56588686, 0.21249064]) 486 | 487 | 488 | p, t = pycircstat.hktest(x,i1, i2) 489 | assert_allclose(p, (0.55048471,0.19841072, 0.22443191),rtol=1e-4, atol=1e-4) 490 | assert_allclose(t.loc['A','DoF'], 2) 491 | assert_allclose(t.loc['B','DoF'], 2) 492 | assert_allclose(t.loc['Interaction','DoF'], 1) 493 | assert_allclose(t.loc['A','chi2'], 1.19391219,rtol=1e-4, atol=1e-4) 494 | assert_allclose(t.loc['B','chi2'], 3.23483213,rtol=1e-4, atol=1e-4) 495 | assert_allclose(t.loc['Interaction','chi2'], 1.47580575,rtol=1e-4, atol=1e-4) 496 | assert_allclose(t.loc['A','p'], 0.55048471,rtol=1e-4, atol=1e-4) 497 | assert_allclose(t.loc['B','p'], 0.19841072,rtol=1e-4, atol=1e-4) 498 | assert_allclose(t.loc['Interaction','p'], 0.22443191,rtol=1e-4, atol=1e-4) 499 | 500 | def test_hktest_large_kk(): 501 | i1 = np.array([0,0,0,0,0,0,1,1,1,1,1,1], dtype=float) 502 | i2 = np.array([0,0,0,1,1,1,0,0,0,1,1,1], dtype=float) 503 | x = np.asarray([-0.31144765, 0.17687973, -0.08186773, 1.40032699, 1.51165367, 0.86895995, 504 | 1.43558624, 0.93851377, 1.06855936, 2.07143408, 1.80262882, 2.14141867]) 505 | 506 | 507 | p, t = pycircstat.hktest(x,i1, i2) 508 | assert_allclose(p, (0.00049360, 0.00021577, 0.01092744),rtol=1e-4, atol=1e-4) 509 | assert_allclose(t.loc['A','DoF'], 1) 510 | assert_allclose(t.loc['B','DoF'], 1) 511 | assert_allclose(t.loc['Interaction','DoF'], 1) 512 | assert_allclose(t.loc['Residual','DoF'], 8) 513 | 514 | assert_allclose(t.loc['A','SS'], 1.90311078,rtol=1e-4, atol=1e-4) 515 | assert_allclose(t.loc['A','MS'], 1.90311078,rtol=1e-4, atol=1e-4) 516 | assert_allclose(t.loc['A','F'], 31.67984670,rtol=1e-4, atol=1e-4) 517 | assert_allclose(t.loc['A','p'], 0.00049360,rtol=1e-4, atol=1e-4) 518 | 519 | assert_allclose(t.loc['B','SS'], 2.43814058,rtol=1e-4, atol=1e-4) 520 | assert_allclose(t.loc['B','MS'], 2.43814058,rtol=1e-4, atol=1e-4) 521 | assert_allclose(t.loc['B','F'], 40.58613970,rtol=1e-4, atol=1e-4) 522 | assert_allclose(t.loc['B','p'], 0.00021577,rtol=1e-4, atol=1e-4) 523 | 524 | assert_allclose(t.loc['Interaction','SS'], 0.73433912,rtol=1e-4, atol=1e-4) 525 | assert_allclose(t.loc['Interaction','MS'], 0.73433912,rtol=1e-4, atol=1e-4) 526 | assert_allclose(t.loc['Interaction','F'], 10.86215049,rtol=1e-4, atol=1e-4) 527 | assert_allclose(t.loc['Interaction','p'], 0.01092744,rtol=1e-4, atol=1e-4) 528 | 529 | assert_allclose(t.loc['Residual','SS'], 0.54084253,rtol=1e-4, atol=1e-4) 530 | assert_allclose(t.loc['Residual','MS'], 0.06760532,rtol=1e-4, atol=1e-4) 531 | 532 | assert_allclose(t.loc['Total','SS'], 5.616433,rtol=1e-4, atol=1e-4) 533 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import numpy as np 4 | 5 | from numpy.testing import assert_allclose 6 | 7 | from pycircstat import utils 8 | --------------------------------------------------------------------------------