├── .codeclimate.yml ├── .github └── workflows │ ├── black.yml │ ├── check-test-coverage.yml │ ├── python-publish-pypi.yml │ ├── python-publish-testpypi.yml │ ├── python-runtests-basic.yml │ └── python-runtests-img-comp.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── check_paramnormal.py ├── conda.recipe ├── dev │ └── meta.yaml └── stable │ └── meta.yaml ├── docs ├── Makefile ├── api.rst ├── api │ ├── activity.rst │ ├── paramnormal.rst │ └── utils.rst ├── conf.py ├── examples │ └── index.rst ├── includeme.rst ├── index.rst ├── make.bat ├── sphinxext │ ├── ipython_console_highlighting.py │ ├── ipython_directive.py │ ├── plot_directive.py │ ├── plot_generator.py │ └── requirements.txt └── tutorial │ ├── Makefile │ ├── activities.ipynb │ ├── fitting.ipynb │ ├── overview.ipynb │ └── tools │ ├── nb_to_doc.py │ └── nbstripout ├── paramnormal ├── __init__.py ├── activity.py ├── dist.py ├── tests │ ├── __init__.py │ ├── baseline_images │ │ ├── __init__.py │ │ └── test_activity │ │ │ ├── __init__.py │ │ │ ├── test_plot_cdf_basic.png │ │ │ ├── test_plot_cdf_fit.png │ │ │ ├── test_plot_cdf_xlog.png │ │ │ ├── test_plot_pdf_basic.png │ │ │ ├── test_plot_pdf_fit.png │ │ │ ├── test_plot_pdf_xlog.png │ │ │ ├── test_plot_sf_basic.png │ │ │ ├── test_plot_sf_fit.png │ │ │ └── test_plot_sf_xlog.png │ ├── test_activity.py │ ├── test_dist.py │ └── test_utils.py └── utils.py ├── readme.rst ├── requirements.txt ├── requirements_dev.txt ├── ruff.toml ├── setup.cfg └── setup.py /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | languages: 2 | Python: true 3 | exclude_paths: 4 | - "docs/*" 5 | - "docs/*/*" 6 | - "paramnormal/tests/*" 7 | - "conda.recipe/*" 8 | -------------------------------------------------------------------------------- /.github/workflows/black.yml: -------------------------------------------------------------------------------- 1 | name: ruff 'n' stuff 2 | on: [pull_request] 3 | 4 | jobs: 5 | build: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v3 9 | - name: Install Python 10 | uses: actions/setup-python@v4 11 | with: 12 | python-version: "3.12" 13 | - name: Install dependencies 14 | run: | 15 | python -m pip install --upgrade pip 16 | pip install ruff 17 | # Update output format to enable automatic inline annotations. 18 | - name: Run Ruff 19 | run: ruff check --output-format=github . 20 | -------------------------------------------------------------------------------- /.github/workflows/check-test-coverage.yml: -------------------------------------------------------------------------------- 1 | name: Run coverage via codecov 2 | on: 3 | push: 4 | branches: [ main ] 5 | pull_request: 6 | branches: [ main ] 7 | 8 | jobs: 9 | run: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Setup Python 14 | uses: actions/setup-python@v2 15 | with: 16 | python-version: 3.11 17 | - name: Generate coverage report 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install flake8 pytest 21 | if [ -f requirements_dev.txt ]; then pip install -r requirements_dev.txt; fi 22 | coverage run --source paramnormal check_paramnormal.py --doctest-modules --cov-report=xml 23 | - name: Upload coverage to Codecov 24 | uses: codecov/codecov-action@v1 25 | with: 26 | # directory: ./coverage/reports/ 27 | flags: unittests 28 | name: codecov-umbrella 29 | fail_ci_if_error: true 30 | path_to_write_report: ./codecov_report.gz 31 | -------------------------------------------------------------------------------- /.github/workflows/python-publish-pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python 🐍 distribution 📦 to PyPI 2 | on: 3 | push: 4 | # Pattern matched against refs/tags 5 | tags: 6 | - 'v*/release' # Push events to every that ends in /release (only 1 /-level) 7 | jobs: 8 | build: 9 | name: Build distribution 📦 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Set up Python 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: "3.x" 18 | - name: Install pypa/build 19 | run: >- 20 | python3 -m 21 | pip install 22 | build 23 | --user 24 | - name: Build a binary wheel and a source tarball 25 | run: python3 -m build 26 | - name: Store the distribution packages 27 | uses: actions/upload-artifact@v3 28 | with: 29 | name: python-package-distributions 30 | path: dist/ 31 | 32 | publish-to-pypi: 33 | name: >- 34 | Publish Python 🐍 distribution 📦 to PyPI 35 | if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes 36 | needs: 37 | - build 38 | runs-on: ubuntu-latest 39 | environment: 40 | name: release 41 | url: https://pypi.org/p/paramnormal # Replace with your PyPI project name 42 | permissions: 43 | id-token: write # IMPORTANT: mandatory for trusted publishing 44 | 45 | steps: 46 | - name: Download all the dists 47 | uses: actions/download-artifact@v4.1.7 48 | with: 49 | name: python-package-distributions 50 | path: dist/ 51 | - name: Publish distribution 📦 to PyPI 52 | uses: pypa/gh-action-pypi-publish@release/v1 53 | 54 | github-release: 55 | name: >- 56 | Sign the Python 🐍 distribution 📦 with Sigstore 57 | and upload them to GitHub Release 58 | needs: 59 | - publish-to-pypi 60 | runs-on: ubuntu-latest 61 | 62 | permissions: 63 | contents: write # IMPORTANT: mandatory for making GitHub Releases 64 | id-token: write # IMPORTANT: mandatory for sigstore 65 | 66 | steps: 67 | - name: Download all the dists 68 | uses: actions/download-artifact@v4.1.7 69 | with: 70 | name: python-package-distributions 71 | path: dist/ 72 | - name: Sign the dists with Sigstore 73 | uses: sigstore/gh-action-sigstore-python@v1.2.3 74 | with: 75 | inputs: >- 76 | ./dist/*.tar.gz 77 | ./dist/*.whl 78 | - name: Create GitHub Release 79 | env: 80 | GITHUB_TOKEN: ${{ github.token }} 81 | run: >- 82 | gh release create 83 | '${{ github.ref_name }}' 84 | --repo '${{ github.repository }}' 85 | --notes "" 86 | - name: Upload artifact signatures to GitHub Release 87 | env: 88 | GITHUB_TOKEN: ${{ github.token }} 89 | # Upload to GitHub Release using the `gh` CLI. 90 | # `dist/` contains the built packages, and the 91 | # sigstore-produced signatures and certificates. 92 | run: >- 93 | gh release upload 94 | '${{ github.ref_name }}' dist/** 95 | --repo '${{ github.repository }}' 96 | -------------------------------------------------------------------------------- /.github/workflows/python-publish-testpypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python 🐍 distribution 📦 to TestPyPI 2 | 3 | on: 4 | push: 5 | # Pattern matched against refs/tags 6 | tags: 7 | - 'v**test*' # Push events to every that ends in "test[something]" (can have many /-levels) 8 | 9 | jobs: 10 | build: 11 | name: Build distribution 📦 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: Set up Python 17 | uses: actions/setup-python@v4 18 | with: 19 | python-version: "3.x" 20 | - name: Install pypa/build 21 | run: >- 22 | python3 -m 23 | pip install 24 | build 25 | --user 26 | - name: Build a binary wheel and a source tarball 27 | run: python3 -m build 28 | - name: Store the distribution packages 29 | uses: actions/upload-artifact@v3 30 | with: 31 | name: python-package-distributions 32 | path: dist/ 33 | 34 | 35 | publish-to-testpypi: 36 | name: Publish Python 🐍 distribution 📦 to TestPyPI 37 | needs: 38 | - build 39 | runs-on: ubuntu-latest 40 | 41 | environment: 42 | name: test 43 | url: https://test.pypi.org/p/paramnormal 44 | 45 | permissions: 46 | id-token: write # IMPORTANT: mandatory for trusted publishing 47 | 48 | steps: 49 | - name: Download all the dists 50 | uses: actions/download-artifact@v4.1.7 51 | with: 52 | name: python-package-distributions 53 | path: dist/ 54 | - name: Publish distribution 📦 to TestPyPI 55 | uses: pypa/gh-action-pypi-publish@release/v1 56 | with: 57 | repository-url: https://test.pypi.org/legacy/ 58 | -------------------------------------------------------------------------------- /.github/workflows/python-runtests-basic.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Run basic unit tests 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | python-version: ["3.9", "3.10", "3.11", "3.12"] 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install pytest 30 | if [ -f requirements_dev.txt ]; then pip install -r requirements_dev.txt; fi 31 | - name: Test with pytest 32 | run: | 33 | python check_paramnormal.py --doctest-modules 34 | -------------------------------------------------------------------------------- /.github/workflows/python-runtests-img-comp.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Run image comparison tests 5 | 6 | on: 7 | push: 8 | branches: [ master ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Set up Python 3.12 19 | uses: actions/setup-python@v2 20 | with: 21 | python-version: 3.12 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --upgrade pip 25 | pip install flake8 pytest 26 | if [ -f requirements_dev.txt ]; then pip install -r requirements_dev.txt; fi 27 | - name: Test with pytest 28 | run: | 29 | export MPL_IMGCOMP_TOLERANCE=20 30 | python check_paramnormal.py --strict --verbose 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled source # 2 | ################### 3 | *.com 4 | *.class 5 | *.dll 6 | *.exe 7 | *.o 8 | *.so 9 | *.pyc 10 | *.egg-info 11 | .ruff_cache 12 | build 13 | 14 | # test output 15 | .noseids 16 | .coverage 17 | .cache 18 | result_images 19 | 20 | # Packages # 21 | ############ 22 | # it's better to unpack these files and commit the raw source 23 | # git has its own built in compression methods 24 | *.7z 25 | *.dmg 26 | *.gz 27 | *.iso 28 | *.jar 29 | *.rar 30 | *.tar 31 | *.zip 32 | build 33 | dist 34 | 35 | # Logs and databases # 36 | ###################### 37 | *.log 38 | *.sql 39 | *.sqlite 40 | 41 | # OS generated files # 42 | ###################### 43 | .DS_Store* 44 | ehthumbs.db 45 | Icon? 46 | Thumbs.db 47 | 48 | # VIM scratch files # 49 | ##################### 50 | *~ 51 | *.swp 52 | 53 | # Visual Studio stuff # 54 | ####################### 55 | *.pyproj 56 | *.sln 57 | *.suo 58 | 59 | # Sphinx Docs # 60 | ############### 61 | docs/_build 62 | docs/_static 63 | docs/_templates 64 | docs/tutorial/*.rst 65 | docs/tutorial/*_files 66 | scratch 67 | scratch.* 68 | .ipynb_checkpoints 69 | 70 | 71 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/astral-sh/ruff-pre-commit 3 | # Ruff version. 4 | rev: v0.2.0 5 | hooks: 6 | # Run the linter. 7 | - id: ruff 8 | args: [ --fix ] 9 | # Run the formatter. 10 | - id: ruff-format 11 | 12 | - repo: https://github.com/pycqa/isort 13 | rev: 5.13.2 14 | hooks: 15 | - id: isort 16 | language_version: python3 17 | 18 | - repo: https://github.com/asottile/pyupgrade 19 | rev: v3.15.0 20 | hooks: 21 | - id: pyupgrade 22 | args: 23 | - --py310-plus 24 | 25 | - repo: https://github.com/MarcoGorelli/absolufy-imports 26 | rev: v0.3.1 27 | hooks: 28 | - id: absolufy-imports 29 | name: absolufy-imports 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Paul Hobson 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /check_paramnormal.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import matplotlib 4 | 5 | matplotlib.use("agg") 6 | 7 | import paramnormal 8 | 9 | status = paramnormal.test(*sys.argv[1:]) 10 | sys.exit(status) 11 | -------------------------------------------------------------------------------- /conda.recipe/dev/meta.yaml: -------------------------------------------------------------------------------- 1 | package: 2 | name: paramnormal 3 | version: v0.2.dev 4 | 5 | source: 6 | path: ../.. 7 | 8 | build: 9 | number: 1 10 | script: python setup.py install 11 | 12 | requirements: 13 | build: 14 | - python 15 | - setuptools 16 | 17 | run: 18 | - python 19 | - numpy 20 | - scipy 21 | - matplotlib 22 | - nose 23 | 24 | test: 25 | imports: 26 | - paramnormal 27 | 28 | commands: 29 | - python -c "import paramnormal; paramnormal.test()" 30 | 31 | requires: 32 | - nose 33 | 34 | about: 35 | home: http://phobson.github.io/paramnormal/ 36 | license: BSD License 37 | summary: 'Conventionally parameterized probability distributions.' 38 | 39 | # See 40 | # http://docs.continuum.io/conda/build.html for 41 | # more information about meta.yaml/configure 42 | -------------------------------------------------------------------------------- /conda.recipe/stable/meta.yaml: -------------------------------------------------------------------------------- 1 | package: 2 | name: paramnormal 3 | version: 0.4.4 4 | 5 | source: 6 | git_url: https://github.com/phobson/paramnormal.git 7 | git_tag: v0.4.4 8 | 9 | build: 10 | number: 1 11 | script: python setup.py install 12 | 13 | requirements: 14 | build: 15 | - python 16 | - setuptools 17 | 18 | run: 19 | - python 20 | - numpy 21 | - scipy 22 | - matplotlib 23 | - nose 24 | 25 | test: 26 | imports: 27 | - paramnormal 28 | 29 | commands: 30 | - python -c "import paramnormal; paramnormal.test()" 31 | 32 | requires: 33 | - nose 34 | 35 | about: 36 | home: http://phobson.github.io/paramnormal/ 37 | license: BSD License 38 | summary: 'Conventionally parameterized probability distributions.' 39 | 40 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = ../../paramnormal-docs 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/paramnormal.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/paramnormal.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/paramnormal" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/paramnormal" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ------------- 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | api/activity.rst 8 | api/paramnormal.rst 9 | api/utils.rst 10 | -------------------------------------------------------------------------------- /docs/api/activity.rst: -------------------------------------------------------------------------------- 1 | .. _act_auto: 2 | 3 | Paramnormal Activity 4 | ==================== 5 | 6 | .. automodule:: paramnormal.activity 7 | :members: 8 | -------------------------------------------------------------------------------- /docs/api/paramnormal.rst: -------------------------------------------------------------------------------- 1 | .. _paramnormal_auto: 2 | 3 | Probability Distributions 4 | ========================= 5 | 6 | .. automodule:: paramnormal.dist 7 | :members: 8 | :undoc-members: 9 | :show-inheritance: 10 | -------------------------------------------------------------------------------- /docs/api/utils.rst: -------------------------------------------------------------------------------- 1 | .. _utils_auto: 2 | 3 | Miscellaneous Utilities 4 | ======================= 5 | 6 | .. automodule:: paramnormal.utils 7 | :members: 8 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # paramnormal documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Dec 2 17:36:54 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import os 16 | import sys 17 | 18 | import seaborn 19 | 20 | clean_bkgd = {"axes.facecolor": "none", "figure.facecolor": "none"} 21 | seaborn.set(style="ticks", rc=clean_bkgd) 22 | 23 | source_suffix = [".rst"] 24 | 25 | numpydoc_show_class_members = False 26 | autodoc_member_order = "bysource" 27 | html_theme = "sphinx_rtd_theme" 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | sys.path.insert(0, os.path.abspath("sphinxext")) 33 | extensions = [ 34 | "sphinx.ext.autodoc", 35 | "sphinx.ext.doctest", 36 | "sphinx.ext.intersphinx", 37 | "sphinx.ext.todo", 38 | "sphinx.ext.mathjax", 39 | "sphinx.ext.viewcode", 40 | "plot_generator", 41 | "plot_directive", 42 | "numpydoc", 43 | "ipython_directive", 44 | "ipython_console_highlighting", 45 | ] 46 | 47 | # Add any paths that contain templates here, relative to this directory. 48 | templates_path = ["_templates"] 49 | 50 | # The suffix(es) of source filenames. 51 | # You can specify multiple suffix as a list of string: 52 | # source_suffix = ['.rst', '.md'] 53 | source_suffix = ".rst" 54 | 55 | # The encoding of source files. 56 | # source_encoding = 'utf-8-sig' 57 | 58 | # Include the example source for plots in API docs 59 | plot_include_source = True 60 | plot_formats = [("png", 90)] 61 | plot_html_show_formats = False 62 | plot_html_show_source_link = False 63 | 64 | # The master toctree document. 65 | master_doc = "index" 66 | 67 | # General information about the project. 68 | project = "paramnormal" 69 | copyright = "2015 - 2016, Paul Hobson" 70 | author = "Paul Hobson" 71 | 72 | # The version info for the project you're documenting, acts as replacement for 73 | # |version| and |release|, also used in various other places throughout the 74 | # built documents. 75 | # 76 | # The short X.Y version. 77 | version = "0.4.4" 78 | # The full version, including alpha/beta/rc tags. 79 | release = "0.4.4" 80 | 81 | # The language for content autogenerated by Sphinx. Refer to documentation 82 | # for a list of supported languages. 83 | # 84 | # This is also used if you do content translation via gettext catalogs. 85 | # Usually you set "language" from the command line for these cases. 86 | language = None 87 | 88 | # There are two options for replacing |today|: either, you set today to some 89 | # non-false value, then it is used: 90 | # today = '' 91 | # Else, today_fmt is used as the format for a strftime call. 92 | # today_fmt = '%B %d, %Y' 93 | 94 | # List of patterns, relative to source directory, that match files and 95 | # directories to ignore when looking for source files. 96 | exclude_patterns = ["_build"] 97 | 98 | # The reST default role (used for this markup: `text`) to use for all 99 | # documents. 100 | # default_role = None 101 | 102 | # If true, '()' will be appended to :func: etc. cross-reference text. 103 | # add_function_parentheses = True 104 | 105 | # If true, the current module name will be prepended to all description 106 | # unit titles (such as .. function::). 107 | # add_module_names = True 108 | 109 | # If true, sectionauthor and moduleauthor directives will be shown in the 110 | # output. They are ignored by default. 111 | # show_authors = False 112 | 113 | # The name of the Pygments (syntax highlighting) style to use. 114 | pygments_style = "sphinx" 115 | 116 | # A list of ignored prefixes for module index sorting. 117 | # modindex_common_prefix = [] 118 | 119 | # If true, keep warnings as "system message" paragraphs in the built documents. 120 | # keep_warnings = False 121 | 122 | # If true, `todo` and `todoList` produce output, else they produce nothing. 123 | todo_include_todos = True 124 | 125 | 126 | # -- Options for HTML output ---------------------------------------------- 127 | 128 | # The theme to use for HTML and HTML Help pages. See the documentation for 129 | # a list of builtin themes. 130 | html_theme = "sphinx_rtd_theme" 131 | 132 | # Theme options are theme-specific and customize the look and feel of a theme 133 | # further. For a list of options available for each theme, see the 134 | # documentation. 135 | # html_theme_options = {} 136 | 137 | # Add any paths that contain custom themes here, relative to this directory. 138 | # html_theme_path = [] 139 | 140 | # The name for this set of Sphinx documents. If None, it defaults to 141 | # " v documentation". 142 | # html_title = None 143 | 144 | # A shorter title for the navigation bar. Default is the same as html_title. 145 | # html_short_title = None 146 | 147 | # The name of an image file (relative to this directory) to place at the top 148 | # of the sidebar. 149 | # html_logo = None 150 | 151 | # The name of an image file (within the static path) to use as favicon of the 152 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 153 | # pixels large. 154 | # html_favicon = None 155 | 156 | # Add any paths that contain custom static files (such as style sheets) here, 157 | # relative to this directory. They are copied after the builtin static files, 158 | # so a file named "default.css" will overwrite the builtin "default.css". 159 | html_static_path = ["_static"] 160 | 161 | # Add any extra paths that contain custom files (such as robots.txt or 162 | # .htaccess) here, relative to this directory. These files are copied 163 | # directly to the root of the documentation. 164 | # html_extra_path = [] 165 | 166 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 167 | # using the given strftime format. 168 | # html_last_updated_fmt = '%b %d, %Y' 169 | 170 | # If true, SmartyPants will be used to convert quotes and dashes to 171 | # typographically correct entities. 172 | # html_use_smartypants = True 173 | 174 | # Custom sidebar templates, maps document names to template names. 175 | # html_sidebars = {} 176 | 177 | # Additional templates that should be rendered to pages, maps page names to 178 | # template names. 179 | # html_additional_pages = {} 180 | 181 | # If false, no module index is generated. 182 | # html_domain_indices = True 183 | 184 | # If false, no index is generated. 185 | # html_use_index = True 186 | 187 | # If true, the index is split into individual pages for each letter. 188 | # html_split_index = False 189 | 190 | # If true, links to the reST sources are added to the pages. 191 | # html_show_sourcelink = True 192 | 193 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 194 | # html_show_sphinx = True 195 | 196 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 197 | # html_show_copyright = True 198 | 199 | # If true, an OpenSearch description file will be output, and all pages will 200 | # contain a tag referring to it. The value of this option must be the 201 | # base URL from which the finished HTML is served. 202 | # html_use_opensearch = '' 203 | 204 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 205 | # html_file_suffix = None 206 | 207 | # Language to be used for generating the HTML full-text search index. 208 | # Sphinx supports the following languages: 209 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' 210 | # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' 211 | # html_search_language = 'en' 212 | 213 | # A dictionary with options for the search language support, empty by default. 214 | # Now only 'ja' uses this config value 215 | # html_search_options = {'type': 'default'} 216 | 217 | # The name of a javascript file (relative to the configuration directory) that 218 | # implements a search results scorer. If empty, the default will be used. 219 | # html_search_scorer = 'scorer.js' 220 | 221 | # Output file base name for HTML help builder. 222 | htmlhelp_basename = "paramnormaldoc" 223 | 224 | # -- Options for LaTeX output --------------------------------------------- 225 | 226 | latex_elements = { 227 | # The paper size ('letterpaper' or 'a4paper'). 228 | #'papersize': 'letterpaper', 229 | # The font size ('10pt', '11pt' or '12pt'). 230 | #'pointsize': '10pt', 231 | # Additional stuff for the LaTeX preamble. 232 | #'preamble': '', 233 | # Latex figure (float) alignment 234 | #'figure_align': 'htbp', 235 | } 236 | 237 | # Grouping the document tree into LaTeX files. List of tuples 238 | # (source start file, target name, title, 239 | # author, documentclass [howto, manual, or own class]). 240 | latex_documents = [ 241 | (master_doc, "paramnormal.tex", "paramnormal Documentation", "Paul Hobson", "manual"), 242 | ] 243 | 244 | # The name of an image file (relative to this directory) to place at the top of 245 | # the title page. 246 | # latex_logo = None 247 | 248 | # For "manual" documents, if this is true, then toplevel headings are parts, 249 | # not chapters. 250 | # latex_use_parts = False 251 | 252 | # If true, show page references after internal links. 253 | # latex_show_pagerefs = False 254 | 255 | # If true, show URL addresses after external links. 256 | # latex_show_urls = False 257 | 258 | # Documents to append as an appendix to all manuals. 259 | # latex_appendices = [] 260 | 261 | # If false, no module index is generated. 262 | # latex_domain_indices = True 263 | 264 | 265 | # -- Options for manual page output --------------------------------------- 266 | 267 | # One entry per manual page. List of tuples 268 | # (source start file, name, description, authors, manual section). 269 | man_pages = [(master_doc, "paramnormal", "paramnormal Documentation", [author], 1)] 270 | 271 | # If true, show URL addresses after external links. 272 | # man_show_urls = False 273 | 274 | 275 | # -- Options for Texinfo output ------------------------------------------- 276 | 277 | # Grouping the document tree into Texinfo files. List of tuples 278 | # (source start file, target name, title, author, 279 | # dir menu entry, description, category) 280 | texinfo_documents = [ 281 | ( 282 | master_doc, 283 | "paramnormal", 284 | "paramnormal Documentation", 285 | author, 286 | "paramnormal", 287 | "One line description of project.", 288 | "Miscellaneous", 289 | ), 290 | ] 291 | 292 | # Documents to append as an appendix to all manuals. 293 | # texinfo_appendices = [] 294 | 295 | # If false, no module index is generated. 296 | # texinfo_domain_indices = True 297 | 298 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 299 | # texinfo_show_urls = 'footnote' 300 | 301 | # If true, do not generate a @detailmenu in the "Top" node's menu. 302 | # texinfo_no_detailmenu = False 303 | 304 | 305 | # Example configuration for intersphinx: refer to the Python standard library. 306 | intersphinx_mapping = {"https://docs.python.org/": None} 307 | -------------------------------------------------------------------------------- /docs/examples/index.rst: -------------------------------------------------------------------------------- 1 | 2 | 3 | .. raw:: html 4 | 5 | 66 | 67 | .. _example_gallery: 68 | 69 | Example gallery 70 | =============== 71 | 72 | 73 | 74 | .. toctree:: 75 | :hidden: 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | .. raw:: html 84 | 85 |
86 | -------------------------------------------------------------------------------- /docs/includeme.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../readme.rst 2 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../readme.rst 2 | 3 | 4 | Tutorials 5 | ========= 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | tutorial/overview 11 | tutorial/fitting 12 | tutorial/activities 13 | 14 | API Reference 15 | ============= 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | 20 | api.rst 21 | 22 | 23 | Indices and tables 24 | ================== 25 | 26 | * :ref:`genindex` 27 | * :ref:`modindex` 28 | * :ref:`search` 29 | 30 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=../../paramnormal-docs 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\paramnormal.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\paramnormal.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/sphinxext/ipython_console_highlighting.py: -------------------------------------------------------------------------------- 1 | """reST directive for syntax-highlighting ipython interactive sessions. 2 | 3 | XXX - See what improvements can be made based on the new (as of Sept 2009) 4 | 'pycon' lexer for the python console. At the very least it will give better 5 | highlighted tracebacks. 6 | """ 7 | 8 | # ----------------------------------------------------------------------------- 9 | # Needed modules 10 | 11 | # Standard library 12 | import re 13 | 14 | # Third party 15 | from pygments.lexer import Lexer, do_insertions 16 | from pygments.lexers.agile import PythonLexer, PythonTracebackLexer 17 | from pygments.token import Comment, Generic 18 | from sphinx import highlighting 19 | 20 | # ----------------------------------------------------------------------------- 21 | # Global constants 22 | line_re = re.compile(".*?\n") 23 | 24 | # ----------------------------------------------------------------------------- 25 | # Code begins - classes and functions 26 | 27 | 28 | class IPythonConsoleLexer(Lexer): 29 | 30 | """ 31 | For IPython console output or doctests, such as: 32 | 33 | .. sourcecode:: ipython 34 | 35 | In [1]: a = 'foo' 36 | 37 | In [2]: a 38 | Out[2]: 'foo' 39 | 40 | In [3]: print(a) 41 | foo 42 | 43 | In [4]: 1 / 0 44 | 45 | Notes: 46 | 47 | - Tracebacks are not currently supported. 48 | 49 | - It assumes the default IPython prompts, not customized ones. 50 | """ 51 | 52 | name = "IPython console session" 53 | aliases = ["ipython"] 54 | mimetypes = ["text/x-ipython-console"] 55 | input_prompt = re.compile(r"(In \[[0-9]+\]: )|( \.\.\.+:)") 56 | output_prompt = re.compile(r"(Out\[[0-9]+\]: )|( \.\.\.+:)") 57 | continue_prompt = re.compile(r" \.\.\.+:") 58 | tb_start = re.compile(r"\-+") 59 | 60 | def get_tokens_unprocessed(self, text): 61 | pylexer = PythonLexer(**self.options) 62 | PythonTracebackLexer(**self.options) 63 | 64 | curcode = "" 65 | insertions = [] 66 | for match in line_re.finditer(text): 67 | line = match.group() 68 | input_prompt = self.input_prompt.match(line) 69 | continue_prompt = self.continue_prompt.match(line.rstrip()) 70 | output_prompt = self.output_prompt.match(line) 71 | if line.startswith("#"): 72 | insertions.append((len(curcode), [(0, Comment, line)])) 73 | elif input_prompt is not None: 74 | insertions.append((len(curcode), [(0, Generic.Prompt, input_prompt.group())])) 75 | curcode += line[input_prompt.end() :] 76 | elif continue_prompt is not None: 77 | insertions.append((len(curcode), [(0, Generic.Prompt, continue_prompt.group())])) 78 | curcode += line[continue_prompt.end() :] 79 | elif output_prompt is not None: 80 | # Use the 'error' token for output. We should probably make 81 | # our own token, but error is typicaly in a bright color like 82 | # red, so it works fine for our output prompts. 83 | insertions.append((len(curcode), [(0, Generic.Error, output_prompt.group())])) 84 | curcode += line[output_prompt.end() :] 85 | else: 86 | if curcode: 87 | for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): 88 | yield item 89 | curcode = "" 90 | insertions = [] 91 | yield match.start(), Generic.Output, line 92 | if curcode: 93 | for item in do_insertions(insertions, pylexer.get_tokens_unprocessed(curcode)): 94 | yield item 95 | 96 | 97 | def setup(app): 98 | """Setup as a sphinx extension.""" 99 | 100 | # This is only a lexer, so adding it below to pygments appears sufficient. 101 | # But if somebody knows that the right API usage should be to do that via 102 | # sphinx, by all means fix it here. At least having this setup.py 103 | # suppresses the sphinx warning we'd get without it. 104 | pass 105 | 106 | 107 | # ----------------------------------------------------------------------------- 108 | # Register the extension as a valid pygments lexer 109 | highlighting.lexers["ipython"] = IPythonConsoleLexer() 110 | -------------------------------------------------------------------------------- /docs/sphinxext/ipython_directive.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Sphinx directive to support embedded IPython code. 3 | 4 | This directive allows pasting of entire interactive IPython sessions, prompts 5 | and all, and their code will actually get re-executed at doc build time, with 6 | all prompts renumbered sequentially. It also allows you to input code as a pure 7 | python input by giving the argument python to the directive. The output looks 8 | like an interactive ipython section. 9 | 10 | To enable this directive, simply list it in your Sphinx ``conf.py`` file 11 | (making sure the directory where you placed it is visible to sphinx, as is 12 | needed for all Sphinx directives). For example, to enable syntax highlighting 13 | and the IPython directive:: 14 | 15 | extensions = ['IPython.sphinxext.ipython_console_highlighting', 16 | 'IPython.sphinxext.ipython_directive'] 17 | 18 | The IPython directive outputs code-blocks with the language 'ipython'. So 19 | if you do not have the syntax highlighting extension enabled as well, then 20 | all rendered code-blocks will be uncolored. By default this directive assumes 21 | that your prompts are unchanged IPython ones, but this can be customized. 22 | The configurable options that can be placed in conf.py are: 23 | 24 | ipython_savefig_dir: 25 | The directory in which to save the figures. This is relative to the 26 | Sphinx source directory. The default is `html_static_path`. 27 | ipython_rgxin: 28 | The compiled regular expression to denote the start of IPython input 29 | lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You 30 | shouldn't need to change this. 31 | ipython_rgxout: 32 | The compiled regular expression to denote the start of IPython output 33 | lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You 34 | shouldn't need to change this. 35 | ipython_promptin: 36 | The string to represent the IPython input prompt in the generated ReST. 37 | The default is 'In [%d]:'. This expects that the line numbers are used 38 | in the prompt. 39 | ipython_promptout: 40 | The string to represent the IPython prompt in the generated ReST. The 41 | default is 'Out [%d]:'. This expects that the line numbers are used 42 | in the prompt. 43 | ipython_mplbackend: 44 | The string which specifies if the embedded Sphinx shell should import 45 | Matplotlib and set the backend. The value specifies a backend that is 46 | passed to `matplotlib.use()` before any lines in `ipython_execlines` are 47 | executed. If not specified in conf.py, then the default value of 'agg' is 48 | used. To use the IPython directive without matplotlib as a dependency, set 49 | the value to `None`. It may end up that matplotlib is still imported 50 | if the user specifies so in `ipython_execlines` or makes use of the 51 | @savefig pseudo decorator. 52 | ipython_execlines: 53 | A list of strings to be exec'd in the embedded Sphinx shell. Typical 54 | usage is to make certain packages always available. Set this to an empty 55 | list if you wish to have no imports always available. If specified in 56 | conf.py as `None`, then it has the effect of making no imports available. 57 | If omitted from conf.py altogether, then the default value of 58 | ['import numpy as np', 'import matplotlib.pyplot as plt'] is used. 59 | ipython_holdcount 60 | When the @suppress pseudo-decorator is used, the execution count can be 61 | incremented or not. The default behavior is to hold the execution count, 62 | corresponding to a value of `True`. Set this to `False` to increment 63 | the execution count after each suppressed command. 64 | 65 | As an example, to use the IPython directive when `matplotlib` is not available, 66 | one sets the backend to `None`:: 67 | 68 | ipython_mplbackend = None 69 | 70 | An example usage of the directive is: 71 | 72 | .. code-block:: rst 73 | 74 | .. ipython:: 75 | 76 | In [1]: x = 1 77 | 78 | In [2]: y = x**2 79 | 80 | In [3]: print(y) 81 | 82 | See http://matplotlib.org/sampledoc/ipython_directive.html for additional 83 | documentation. 84 | 85 | ToDo 86 | ---- 87 | 88 | - Turn the ad-hoc test() function into a real test suite. 89 | - Break up ipython-specific functionality from matplotlib stuff into better 90 | separated code. 91 | 92 | Authors 93 | ------- 94 | 95 | - John D Hunter: orignal author. 96 | - Fernando Perez: refactoring, documentation, cleanups, port to 0.11. 97 | - VáclavŠmilauer : Prompt generalizations. 98 | - Skipper Seabold, refactoring, cleanups, pure python addition 99 | """ 100 | 101 | # ----------------------------------------------------------------------------- 102 | # Imports 103 | # ----------------------------------------------------------------------------- 104 | 105 | import ast 106 | import contextlib 107 | 108 | # Stdlib 109 | import os 110 | import re 111 | import sys 112 | import tempfile 113 | import warnings 114 | 115 | from pandas.compat import cStringIO as StringIO 116 | from pandas.compat import range 117 | 118 | # To keep compatibility with various python versions 119 | with contextlib.suppress(ImportError): 120 | pass 121 | 122 | # Third-party 123 | from docutils.parsers.rst import directives 124 | from IPython import InteractiveShell 125 | from IPython.core.profiledir import ProfileDir 126 | from IPython.utils import io 127 | from IPython.utils.py3compat import PY3 128 | from sphinx.util.compat import Directive 129 | 130 | # Our own 131 | from traitlets.config import Config 132 | 133 | if PY3: 134 | from io import StringIO 135 | 136 | text_type = str 137 | else: 138 | from StringIO import StringIO 139 | 140 | # ----------------------------------------------------------------------------- 141 | # Globals 142 | # ----------------------------------------------------------------------------- 143 | # for tokenizing blocks 144 | COMMENT, INPUT, OUTPUT = range(3) 145 | 146 | # ----------------------------------------------------------------------------- 147 | # Functions and class declarations 148 | # ----------------------------------------------------------------------------- 149 | 150 | 151 | def block_parser(part, rgxin, rgxout, fmtin, fmtout): 152 | """ 153 | part is a string of ipython text, comprised of at most one 154 | input, one ouput, comments, and blank lines. The block parser 155 | parses the text into a list of:: 156 | 157 | blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...] 158 | 159 | where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and 160 | data is, depending on the type of token:: 161 | 162 | COMMENT : the comment string 163 | 164 | INPUT: the (DECORATOR, INPUT_LINE, REST) where 165 | DECORATOR: the input decorator (or None) 166 | INPUT_LINE: the input as string (possibly multi-line) 167 | REST : any stdout generated by the input line (not OUTPUT) 168 | 169 | OUTPUT: the output string, possibly multi-line 170 | 171 | """ 172 | block = [] 173 | lines = part.split("\n") 174 | N = len(lines) 175 | i = 0 176 | decorator = None 177 | while 1: 178 | if i == N: 179 | # nothing left to parse -- the last line 180 | break 181 | 182 | line = lines[i] 183 | i += 1 184 | line_stripped = line.strip() 185 | if line_stripped.startswith("#"): 186 | block.append((COMMENT, line)) 187 | continue 188 | 189 | if line_stripped.startswith("@"): 190 | # we're assuming at most one decorator -- may need to 191 | # rethink 192 | decorator = line_stripped 193 | continue 194 | 195 | # does this look like an input line? 196 | matchin = rgxin.match(line) 197 | if matchin: 198 | lineno, inputline = int(matchin.group(1)), matchin.group(2) 199 | 200 | # the ....: continuation string 201 | continuation = " %s:" % "".join(["."] * (len(str(lineno)) + 2)) 202 | Nc = len(continuation) 203 | # input lines can continue on for more than one line, if 204 | # we have a '\' line continuation char or a function call 205 | # echo line 'print'. The input line can only be 206 | # terminated by the end of the block or an output line, so 207 | # we parse out the rest of the input line if it is 208 | # multiline as well as any echo text 209 | 210 | rest = [] 211 | while i < N: 212 | # look ahead; if the next line is blank, or a comment, or 213 | # an output line, we're done 214 | 215 | nextline = lines[i] 216 | matchout = rgxout.match(nextline) 217 | 218 | if matchout or nextline.startswith("#"): 219 | break 220 | elif nextline.startswith(continuation): 221 | nextline = nextline[Nc:] 222 | if nextline and nextline[0] == " ": 223 | nextline = nextline[1:] 224 | 225 | inputline += "\n" + nextline 226 | 227 | else: 228 | rest.append(nextline) 229 | i += 1 230 | 231 | block.append((INPUT, (decorator, inputline, "\n".join(rest)))) 232 | continue 233 | 234 | # if it looks like an output line grab all the text to the end 235 | # of the block 236 | matchout = rgxout.match(line) 237 | if matchout: 238 | lineno, output = int(matchout.group(1)), matchout.group(2) 239 | if i < N - 1: 240 | output = "\n".join([output] + lines[i:]) 241 | 242 | block.append((OUTPUT, output)) 243 | break 244 | 245 | return block 246 | 247 | 248 | class DecodingStringIO(StringIO): 249 | def __init__(self, buf="", encodings=("utf8",), *args, **kwds): 250 | super().__init__(buf, *args, **kwds) 251 | self.set_encodings(encodings) 252 | 253 | def set_encodings(self, encodings): 254 | self.encodings = encodings 255 | 256 | def write(self, data): 257 | if isinstance(data, text_type): 258 | return super().write(data) 259 | else: 260 | for enc in self.encodings: 261 | try: 262 | data = data.decode(enc) 263 | return super().write(data) 264 | except Exception: 265 | pass 266 | # default to brute utf8 if no encoding succeded 267 | return super().write(data.decode("utf8", "replace")) 268 | 269 | 270 | class EmbeddedSphinxShell: 271 | """An embedded IPython instance to run inside Sphinx""" 272 | 273 | def __init__(self, exec_lines=None, state=None): 274 | self.cout = DecodingStringIO("") 275 | 276 | if exec_lines is None: 277 | exec_lines = [] 278 | 279 | self.state = state 280 | 281 | # Create config object for IPython 282 | config = Config() 283 | config.InteractiveShell.autocall = False 284 | config.InteractiveShell.autoindent = False 285 | config.InteractiveShell.colors = "NoColor" 286 | 287 | # create a profile so instance history isn't saved 288 | tmp_profile_dir = tempfile.mkdtemp(prefix="profile_") 289 | profname = "auto_profile_sphinx_build" 290 | pdir = os.path.join(tmp_profile_dir, profname) 291 | profile = ProfileDir.create_profile_dir(pdir) 292 | 293 | # Create and initialize global ipython, but don't start its mainloop. 294 | # This will persist across different EmbededSphinxShell instances. 295 | IP = InteractiveShell.instance(config=config, profile_dir=profile) 296 | 297 | # io.stdout redirect must be done after instantiating InteractiveShell 298 | io.stdout = self.cout 299 | io.stderr = self.cout 300 | 301 | # For debugging, so we can see normal output, use this: 302 | # from IPython.utils.io import Tee 303 | # io.stdout = Tee(self.cout, channel='stdout') # dbg 304 | # io.stderr = Tee(self.cout, channel='stderr') # dbg 305 | 306 | # Store a few parts of IPython we'll need. 307 | self.IP = IP 308 | self.user_ns = self.IP.user_ns 309 | self.user_global_ns = self.IP.user_global_ns 310 | 311 | self.input = "" 312 | self.output = "" 313 | 314 | self.is_verbatim = False 315 | self.is_doctest = False 316 | self.is_suppress = False 317 | 318 | # Optionally, provide more detailed information to shell. 319 | self.directive = None 320 | 321 | # on the first call to the savefig decorator, we'll import 322 | # pyplot as plt so we can make a call to the plt.gcf().savefig 323 | self._pyplot_imported = False 324 | 325 | # Prepopulate the namespace. 326 | for line in exec_lines: 327 | self.process_input_line(line, store_history=False) 328 | 329 | def clear_cout(self): 330 | self.cout.seek(0) 331 | self.cout.truncate(0) 332 | 333 | def process_input_line(self, line, store_history=True): 334 | """process the input, capturing stdout""" 335 | 336 | stdout = sys.stdout 337 | splitter = self.IP.input_splitter 338 | try: 339 | sys.stdout = self.cout 340 | splitter.push(line) 341 | more = splitter.push_accepts_more() 342 | if not more: 343 | try: 344 | source_raw = splitter.source_raw_reset()[1] 345 | except Exception: 346 | # recent ipython #4504 347 | source_raw = splitter.raw_reset() 348 | self.IP.run_cell(source_raw, store_history=store_history) 349 | finally: 350 | sys.stdout = stdout 351 | 352 | def process_image(self, decorator): 353 | """ 354 | # build out an image directive like 355 | # .. image:: somefile.png 356 | # :width 4in 357 | # 358 | # from an input like 359 | # savefig somefile.png width=4in 360 | """ 361 | savefig_dir = self.savefig_dir 362 | source_dir = self.source_dir 363 | saveargs = decorator.split(" ") 364 | filename = saveargs[1] 365 | # insert relative path to image file in source 366 | outfile = os.path.relpath(os.path.join(savefig_dir, filename), source_dir) 367 | 368 | imagerows = [".. image:: %s" % outfile] 369 | 370 | for kwarg in saveargs[2:]: 371 | arg, val = kwarg.split("=") 372 | arg = arg.strip() 373 | val = val.strip() 374 | imagerows.append(f" :{arg}: {val}") 375 | 376 | image_file = os.path.basename(outfile) # only return file name 377 | image_directive = "\n".join(imagerows) 378 | return image_file, image_directive 379 | 380 | # Callbacks for each type of token 381 | def process_input(self, data, input_prompt, lineno): 382 | """ 383 | Process data block for INPUT token. 384 | 385 | """ 386 | decorator, input, rest = data 387 | image_file = None 388 | image_directive = None 389 | 390 | is_verbatim = decorator == "@verbatim" or self.is_verbatim 391 | is_doctest = (decorator is not None and decorator.startswith("@doctest")) or self.is_doctest 392 | is_suppress = decorator == "@suppress" or self.is_suppress 393 | is_okexcept = decorator == "@okexcept" or self.is_okexcept 394 | is_okwarning = decorator == "@okwarning" or self.is_okwarning 395 | is_savefig = decorator is not None and decorator.startswith("@savefig") 396 | 397 | # set the encodings to be used by DecodingStringIO 398 | # to convert the execution output into unicode if 399 | # needed. this attrib is set by IpythonDirective.run() 400 | # based on the specified block options, defaulting to ['ut 401 | self.cout.set_encodings(self.output_encoding) 402 | 403 | input_lines = input.split("\n") 404 | 405 | if len(input_lines) > 1 and input_lines[-1] != "": 406 | input_lines.append("") # make sure there's a blank line 407 | # so splitter buffer gets reset 408 | 409 | continuation = " %s:" % "".join(["."] * (len(str(lineno)) + 2)) 410 | 411 | if is_savefig: 412 | image_file, image_directive = self.process_image(decorator) 413 | 414 | ret = [] 415 | is_semicolon = False 416 | 417 | # Hold the execution count, if requested to do so. 418 | store_history = not (is_suppress and self.hold_count) 419 | 420 | # Note: catch_warnings is not thread safe 421 | with warnings.catch_warnings(record=True) as ws: 422 | for i, line in enumerate(input_lines): 423 | if line.endswith(";"): 424 | is_semicolon = True 425 | 426 | if i == 0: 427 | # process the first input line 428 | if is_verbatim: 429 | self.process_input_line("") 430 | self.IP.execution_count += 1 # increment it anyway 431 | else: 432 | # only submit the line in non-verbatim mode 433 | self.process_input_line(line, store_history=store_history) 434 | formatted_line = f"{input_prompt} {line}" 435 | else: 436 | # process a continuation line 437 | if not is_verbatim: 438 | self.process_input_line(line, store_history=store_history) 439 | 440 | formatted_line = f"{continuation} {line}" 441 | 442 | if not is_suppress: 443 | ret.append(formatted_line) 444 | 445 | if not is_suppress and len(rest.strip()) and is_verbatim: 446 | # the "rest" is the standard output of the 447 | # input, which needs to be added in 448 | # verbatim mode 449 | ret.append(rest) 450 | 451 | self.cout.seek(0) 452 | output = self.cout.read() 453 | if not is_suppress and not is_semicolon: 454 | ret.append(output) 455 | elif is_semicolon: # get spacing right 456 | ret.append("") 457 | 458 | # context information 459 | filename = self.state.document.current_source 460 | lineno = self.state.document.current_line 461 | 462 | # output any exceptions raised during execution to stdout 463 | # unless :okexcept: has been specified. 464 | if not is_okexcept and "Traceback" in output: 465 | s = f"\nException in {filename} at block ending on line {lineno}\n" 466 | s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n" 467 | sys.stdout.write("\n\n>>>" + ("-" * 73)) 468 | sys.stdout.write(s) 469 | sys.stdout.write(output) 470 | sys.stdout.write("<<<" + ("-" * 73) + "\n\n") 471 | 472 | # output any warning raised during execution to stdout 473 | # unless :okwarning: has been specified. 474 | if not is_okwarning: 475 | for w in ws: 476 | s = f"\nWarning in {filename} at block ending on line {lineno}\n" 477 | s += ( 478 | "Specify :okwarning: as an option in " 479 | "the ipython:: block to suppress this message\n" 480 | ) 481 | sys.stdout.write("\n\n>>>" + ("-" * 73)) 482 | sys.stdout.write(s) 483 | sys.stdout.write("-" * 76 + "\n") 484 | s = warnings.formatwarning(w.message, w.category, w.filename, w.lineno, w.line) 485 | sys.stdout.write(s) 486 | sys.stdout.write("<<<" + ("-" * 73) + "\n") 487 | 488 | self.cout.truncate(0) 489 | return (ret, input_lines, output, is_doctest, decorator, image_file, image_directive) 490 | 491 | def process_output( 492 | self, data, output_prompt, input_lines, output, is_doctest, decorator, image_file 493 | ): 494 | """ 495 | Process data block for OUTPUT token. 496 | 497 | """ 498 | TAB = " " * 4 499 | 500 | if is_doctest and output is not None: 501 | found = output 502 | found = found.strip() 503 | submitted = data.strip() 504 | 505 | if self.directive is None: 506 | source = "Unavailable" 507 | content = "Unavailable" 508 | else: 509 | source = self.directive.state.document.current_source 510 | content = self.directive.content 511 | # Add tabs and join into a single string. 512 | content = "\n".join([TAB + line for line in content]) 513 | 514 | # Make sure the output contains the output prompt. 515 | ind = found.find(output_prompt) 516 | if ind < 0: 517 | e = ( 518 | "output does not contain output prompt\n\n" 519 | "Document source: {0}\n\n" 520 | "Raw content: \n{1}\n\n" 521 | "Input line(s):\n{TAB}{2}\n\n" 522 | "Output line(s):\n{TAB}{3}\n\n" 523 | ) 524 | e = e.format(source, content, "\n".join(input_lines), repr(found), TAB=TAB) 525 | raise RuntimeError(e) 526 | found = found[len(output_prompt) :].strip() 527 | 528 | # Handle the actual doctest comparison. 529 | if decorator.strip() == "@doctest": 530 | # Standard doctest 531 | if found != submitted: 532 | e = ( 533 | "doctest failure\n\n" 534 | "Document source: {0}\n\n" 535 | "Raw content: \n{1}\n\n" 536 | "On input line(s):\n{TAB}{2}\n\n" 537 | "we found output:\n{TAB}{3}\n\n" 538 | "instead of the expected:\n{TAB}{4}\n\n" 539 | ) 540 | e = e.format( 541 | source, 542 | content, 543 | "\n".join(input_lines), 544 | repr(found), 545 | repr(submitted), 546 | TAB=TAB, 547 | ) 548 | raise RuntimeError(e) 549 | else: 550 | self.custom_doctest(decorator, input_lines, found, submitted) 551 | 552 | def process_comment(self, data): 553 | """Process data fPblock for COMMENT token.""" 554 | if not self.is_suppress: 555 | return [data] 556 | 557 | def save_image(self, image_file): 558 | """ 559 | Saves the image file to disk. 560 | """ 561 | self.ensure_pyplot() 562 | command = 'plt.gcf().savefig("%s", bbox_inches="tight", ' "dpi=100)" % image_file 563 | 564 | # print 'SAVEFIG', command # dbg 565 | self.process_input_line("bookmark ipy_thisdir", store_history=False) 566 | self.process_input_line("cd -b ipy_savedir", store_history=False) 567 | self.process_input_line(command, store_history=False) 568 | self.process_input_line("cd -b ipy_thisdir", store_history=False) 569 | self.process_input_line("bookmark -d ipy_thisdir", store_history=False) 570 | self.clear_cout() 571 | 572 | def process_block(self, block): 573 | """ 574 | process block from the block_parser and return a list of processed lines 575 | """ 576 | ret = [] 577 | output = None 578 | input_lines = None 579 | lineno = self.IP.execution_count 580 | 581 | input_prompt = self.promptin % lineno 582 | output_prompt = self.promptout % lineno 583 | image_file = None 584 | image_directive = None 585 | 586 | for token, data in block: 587 | if token == COMMENT: 588 | out_data = self.process_comment(data) 589 | elif token == INPUT: 590 | ( 591 | out_data, 592 | input_lines, 593 | output, 594 | is_doctest, 595 | decorator, 596 | image_file, 597 | image_directive, 598 | ) = self.process_input(data, input_prompt, lineno) 599 | elif token == OUTPUT: 600 | out_data = self.process_output( 601 | data, output_prompt, input_lines, output, is_doctest, decorator, image_file 602 | ) 603 | if out_data: 604 | ret.extend(out_data) 605 | 606 | # save the image files 607 | if image_file is not None: 608 | self.save_image(image_file) 609 | 610 | return ret, image_directive 611 | 612 | def ensure_pyplot(self): 613 | """ 614 | Ensures that pyplot has been imported into the embedded IPython shell. 615 | 616 | Also, makes sure to set the backend appropriately if not set already. 617 | 618 | """ 619 | # We are here if the @figure pseudo decorator was used. Thus, it's 620 | # possible that we could be here even if python_mplbackend were set to 621 | # `None`. That's also strange and perhaps worthy of raising an 622 | # exception, but for now, we just set the backend to 'agg'. 623 | 624 | if not self._pyplot_imported: 625 | if "matplotlib.backends" not in sys.modules: 626 | # Then ipython_matplotlib was set to None but there was a 627 | # call to the @figure decorator (and ipython_execlines did 628 | # not set a backend). 629 | # raise Exception("No backend was set, but @figure was used!") 630 | import matplotlib 631 | 632 | matplotlib.use("agg") 633 | 634 | # Always import pyplot into embedded shell. 635 | self.process_input_line("import matplotlib.pyplot as plt", store_history=False) 636 | self._pyplot_imported = True 637 | 638 | def process_pure_python(self, content): 639 | """ 640 | content is a list of strings. it is unedited directive content 641 | 642 | This runs it line by line in the InteractiveShell, prepends 643 | prompts as needed capturing stderr and stdout, then returns 644 | the content as a list as if it were ipython code 645 | """ 646 | output = [] 647 | savefig = False # keep up with this to clear figure 648 | multiline = False # to handle line continuation 649 | multiline_start = None 650 | fmtin = self.promptin 651 | 652 | ct = 0 653 | 654 | for lineno, line in enumerate(content): 655 | line_stripped = line.strip() 656 | if not len(line): 657 | output.append(line) 658 | continue 659 | 660 | # handle decorators 661 | if line_stripped.startswith("@"): 662 | output.extend([line]) 663 | if "savefig" in line: 664 | savefig = True # and need to clear figure 665 | continue 666 | 667 | # handle comments 668 | if line_stripped.startswith("#"): 669 | output.extend([line]) 670 | continue 671 | 672 | # deal with lines checking for multiline 673 | continuation = " %s:" % "".join(["."] * (len(str(ct)) + 2)) 674 | if not multiline: 675 | modified = f"{fmtin % ct} {line_stripped}" 676 | output.append(modified) 677 | ct += 1 678 | try: 679 | ast.parse(line_stripped) 680 | output.append("") 681 | except Exception: # on a multiline 682 | multiline = True 683 | multiline_start = lineno 684 | else: # still on a multiline 685 | modified = f"{continuation} {line}" 686 | output.append(modified) 687 | 688 | # if the next line is indented, it should be part of multiline 689 | if len(content) > lineno + 1: 690 | nextline = content[lineno + 1] 691 | if len(nextline) - len(nextline.lstrip()) > 3: 692 | continue 693 | try: 694 | mod = ast.parse("\n".join(content[multiline_start : lineno + 1])) 695 | if isinstance(mod.body[0], ast.FunctionDef): 696 | # check to see if we have the whole function 697 | for element in mod.body[0].body: 698 | if isinstance(element, ast.Return): 699 | multiline = False 700 | else: 701 | output.append("") 702 | multiline = False 703 | except Exception: 704 | pass 705 | 706 | if savefig: # clear figure if plotted 707 | self.ensure_pyplot() 708 | self.process_input_line("plt.clf()", store_history=False) 709 | self.clear_cout() 710 | savefig = False 711 | 712 | return output 713 | 714 | def custom_doctest(self, decorator, input_lines, found, submitted): 715 | """ 716 | Perform a specialized doctest. 717 | 718 | """ 719 | from docs.sphinxext.custom_doctests import doctests 720 | 721 | args = decorator.split() 722 | doctest_type = args[1] 723 | if doctest_type in doctests: 724 | doctests[doctest_type](self, args, input_lines, found, submitted) 725 | else: 726 | e = f"Invalid option to @doctest: {doctest_type}" 727 | raise Exception(e) 728 | 729 | 730 | class IPythonDirective(Directive): 731 | has_content = True 732 | required_arguments = 0 733 | optional_arguments = 4 # python, suppress, verbatim, doctest 734 | final_argumuent_whitespace = True 735 | option_spec = { 736 | "python": directives.unchanged, 737 | "suppress": directives.flag, 738 | "verbatim": directives.flag, 739 | "doctest": directives.flag, 740 | "okexcept": directives.flag, 741 | "okwarning": directives.flag, 742 | "output_encoding": directives.unchanged_required, 743 | } 744 | 745 | shell = None 746 | 747 | seen_docs = set() 748 | 749 | def get_config_options(self): 750 | # contains sphinx configuration variables 751 | config = self.state.document.settings.env.config 752 | 753 | # get config variables to set figure output directory 754 | confdir = self.state.document.settings.env.app.confdir 755 | savefig_dir = config.ipython_savefig_dir 756 | source_dir = os.path.dirname(self.state.document.current_source) 757 | if savefig_dir is None: 758 | savefig_dir = config.html_static_path 759 | if isinstance(savefig_dir, list): 760 | savefig_dir = savefig_dir[0] # safe to assume only one path? 761 | savefig_dir = os.path.join(confdir, savefig_dir) 762 | 763 | # get regex and prompt stuff 764 | rgxin = config.ipython_rgxin 765 | rgxout = config.ipython_rgxout 766 | promptin = config.ipython_promptin 767 | promptout = config.ipython_promptout 768 | mplbackend = config.ipython_mplbackend 769 | exec_lines = config.ipython_execlines 770 | hold_count = config.ipython_holdcount 771 | 772 | return ( 773 | savefig_dir, 774 | source_dir, 775 | rgxin, 776 | rgxout, 777 | promptin, 778 | promptout, 779 | mplbackend, 780 | exec_lines, 781 | hold_count, 782 | ) 783 | 784 | def setup(self): 785 | # Get configuration values. 786 | ( 787 | savefig_dir, 788 | source_dir, 789 | rgxin, 790 | rgxout, 791 | promptin, 792 | promptout, 793 | mplbackend, 794 | exec_lines, 795 | hold_count, 796 | ) = self.get_config_options() 797 | 798 | if self.shell is None: 799 | # We will be here many times. However, when the 800 | # EmbeddedSphinxShell is created, its interactive shell member 801 | # is the same for each instance. 802 | 803 | if mplbackend: 804 | import matplotlib 805 | 806 | # Repeated calls to use() will not hurt us since `mplbackend` 807 | # is the same each time. 808 | matplotlib.use(mplbackend) 809 | 810 | # Must be called after (potentially) importing matplotlib and 811 | # setting its backend since exec_lines might import pylab. 812 | self.shell = EmbeddedSphinxShell(exec_lines, self.state) 813 | 814 | # Store IPython directive to enable better error messages 815 | self.shell.directive = self 816 | 817 | # reset the execution count if we haven't processed this doc 818 | # NOTE: this may be borked if there are multiple seen_doc tmp files 819 | # check time stamp? 820 | if self.state.document.current_source not in self.seen_docs: 821 | self.shell.IP.history_manager.reset() 822 | self.shell.IP.execution_count = 1 823 | self.shell.IP.prompt_manager.width = 0 824 | self.seen_docs.add(self.state.document.current_source) 825 | 826 | # and attach to shell so we don't have to pass them around 827 | self.shell.rgxin = rgxin 828 | self.shell.rgxout = rgxout 829 | self.shell.promptin = promptin 830 | self.shell.promptout = promptout 831 | self.shell.savefig_dir = savefig_dir 832 | self.shell.source_dir = source_dir 833 | self.shell.hold_count = hold_count 834 | 835 | # setup bookmark for saving figures directory 836 | self.shell.process_input_line("bookmark ipy_savedir %s" % savefig_dir, store_history=False) 837 | self.shell.clear_cout() 838 | 839 | return rgxin, rgxout, promptin, promptout 840 | 841 | def teardown(self): 842 | # delete last bookmark 843 | self.shell.process_input_line("bookmark -d ipy_savedir", store_history=False) 844 | self.shell.clear_cout() 845 | 846 | def run(self): 847 | debug = False 848 | 849 | # TODO, any reason block_parser can't be a method of embeddable shell 850 | # then we wouldn't have to carry these around 851 | rgxin, rgxout, promptin, promptout = self.setup() 852 | 853 | options = self.options 854 | self.shell.is_suppress = "suppress" in options 855 | self.shell.is_doctest = "doctest" in options 856 | self.shell.is_verbatim = "verbatim" in options 857 | self.shell.is_okexcept = "okexcept" in options 858 | self.shell.is_okwarning = "okwarning" in options 859 | 860 | self.shell.output_encoding = [options.get("output_encoding", "utf8")] 861 | 862 | # handle pure python code 863 | if "python" in self.arguments: 864 | content = self.content 865 | self.content = self.shell.process_pure_python(content) 866 | 867 | parts = "\n".join(self.content).split("\n\n") 868 | 869 | lines = [".. code-block:: ipython", ""] 870 | figures = [] 871 | 872 | for part in parts: 873 | block = block_parser(part, rgxin, rgxout, promptin, promptout) 874 | if len(block): 875 | rows, figure = self.shell.process_block(block) 876 | for row in rows: 877 | lines.extend([" %s" % line for line in row.split("\n")]) 878 | 879 | if figure is not None: 880 | figures.append(figure) 881 | 882 | for figure in figures: 883 | lines.append("") 884 | lines.extend(figure.split("\n")) 885 | lines.append("") 886 | 887 | if len(lines) > 2: 888 | if debug: 889 | print("\n".join(lines)) 890 | else: 891 | # This has to do with input, not output. But if we comment 892 | # these lines out, then no IPython code will appear in the 893 | # final output. 894 | self.state_machine.insert_input(lines, self.state_machine.input_lines.source(0)) 895 | 896 | # cleanup 897 | self.teardown() 898 | 899 | return [] 900 | 901 | 902 | # Enable as a proper Sphinx directive 903 | def setup(app): 904 | setup.app = app 905 | 906 | app.add_directive("ipython", IPythonDirective) 907 | app.add_config_value("ipython_savefig_dir", None, "env") 908 | app.add_config_value("ipython_rgxin", re.compile(r"In \[(\d+)\]:\s?(.*)\s*"), "env") 909 | app.add_config_value("ipython_rgxout", re.compile(r"Out\[(\d+)\]:\s?(.*)\s*"), "env") 910 | app.add_config_value("ipython_promptin", "In [%d]:", "env") 911 | app.add_config_value("ipython_promptout", "Out[%d]:", "env") 912 | 913 | # We could just let matplotlib pick whatever is specified as the default 914 | # backend in the matplotlibrc file, but this would cause issues if the 915 | # backend didn't work in headless environments. For this reason, 'agg' 916 | # is a good default backend choice. 917 | app.add_config_value("ipython_mplbackend", "agg", "env") 918 | 919 | # If the user sets this config value to `None`, then EmbeddedSphinxShell's 920 | # __init__ method will treat it as []. 921 | execlines = ["import numpy as np", "import matplotlib.pyplot as plt"] 922 | app.add_config_value("ipython_execlines", execlines, "env") 923 | 924 | app.add_config_value("ipython_holdcount", True, "env") 925 | 926 | 927 | # Simple smoke test, needs to be converted to a proper automatic test. 928 | def test(): 929 | examples = [ 930 | r""" 931 | In [9]: pwd 932 | Out[9]: '/home/jdhunter/py4science/book' 933 | 934 | In [10]: cd bookdata/ 935 | /home/jdhunter/py4science/book/bookdata 936 | 937 | In [2]: from pylab import * 938 | 939 | In [2]: ion() 940 | 941 | In [3]: im = imread('stinkbug.png') 942 | 943 | @savefig mystinkbug.png width=4in 944 | In [4]: imshow(im) 945 | Out[4]: 946 | 947 | """, 948 | r""" 949 | 950 | In [1]: x = 'hello world' 951 | 952 | # string methods can be 953 | # used to alter the string 954 | @doctest 955 | In [2]: x.upper() 956 | Out[2]: 'HELLO WORLD' 957 | 958 | @verbatim 959 | In [3]: x.st 960 | x.startswith x.strip 961 | """, 962 | r""" 963 | 964 | In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\ 965 | .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv' 966 | 967 | In [131]: print url.split('&') 968 | ['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 969 | 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv'] 970 | 971 | In [60]: import urllib 972 | 973 | """, 974 | r"""\ 975 | 976 | In [133]: import numpy.random 977 | 978 | @suppress 979 | In [134]: numpy.random.seed(2358) 980 | 981 | @doctest 982 | In [135]: numpy.random.rand(10,2) 983 | Out[135]: 984 | array([[ 0.64524308, 0.59943846], 985 | [ 0.47102322, 0.8715456 ], 986 | [ 0.29370834, 0.74776844], 987 | [ 0.99539577, 0.1313423 ], 988 | [ 0.16250302, 0.21103583], 989 | [ 0.81626524, 0.1312433 ], 990 | [ 0.67338089, 0.72302393], 991 | [ 0.7566368 , 0.07033696], 992 | [ 0.22591016, 0.77731835], 993 | [ 0.0072729 , 0.34273127]]) 994 | 995 | """, 996 | r""" 997 | In [106]: print x 998 | jdh 999 | 1000 | In [109]: for i in range(10): 1001 | .....: print i 1002 | .....: 1003 | .....: 1004 | 0 1005 | 1 1006 | 2 1007 | 3 1008 | 4 1009 | 5 1010 | 6 1011 | 7 1012 | 8 1013 | 9 1014 | """, 1015 | r""" 1016 | 1017 | In [144]: from pylab import * 1018 | 1019 | In [145]: ion() 1020 | 1021 | # use a semicolon to suppress the output 1022 | @savefig test_hist.png width=4in 1023 | In [151]: hist(np.random.randn(10000), 100); 1024 | 1025 | 1026 | @savefig test_plot.png width=4in 1027 | In [151]: plot(np.random.randn(10000), 'o'); 1028 | """, 1029 | r""" 1030 | # use a semicolon to suppress the output 1031 | In [151]: plt.clf() 1032 | 1033 | @savefig plot_simple.png width=4in 1034 | In [151]: plot([1,2,3]) 1035 | 1036 | @savefig hist_simple.png width=4in 1037 | In [151]: hist(np.random.randn(10000), 100); 1038 | 1039 | """, 1040 | r""" 1041 | # update the current fig 1042 | In [151]: ylabel('number') 1043 | 1044 | In [152]: title('normal distribution') 1045 | 1046 | 1047 | @savefig hist_with_text.png 1048 | In [153]: grid(True) 1049 | 1050 | @doctest float 1051 | In [154]: 0.1 + 0.2 1052 | Out[154]: 0.3 1053 | 1054 | @doctest float 1055 | In [155]: np.arange(16).reshape(4,4) 1056 | Out[155]: 1057 | array([[ 0, 1, 2, 3], 1058 | [ 4, 5, 6, 7], 1059 | [ 8, 9, 10, 11], 1060 | [12, 13, 14, 15]]) 1061 | 1062 | In [1]: x = np.arange(16, dtype=float).reshape(4,4) 1063 | 1064 | In [2]: x[0,0] = np.inf 1065 | 1066 | In [3]: x[0,1] = np.nan 1067 | 1068 | @doctest float 1069 | In [4]: x 1070 | Out[4]: 1071 | array([[ inf, nan, 2., 3.], 1072 | [ 4., 5., 6., 7.], 1073 | [ 8., 9., 10., 11.], 1074 | [ 12., 13., 14., 15.]]) 1075 | 1076 | 1077 | """, 1078 | ] 1079 | # skip local-file depending first example: 1080 | examples = examples[1:] 1081 | 1082 | # ipython_directive.DEBUG = True # dbg 1083 | # options = dict(suppress=True) # dbg 1084 | options = dict() 1085 | for example in examples: 1086 | content = example.split("\n") 1087 | IPythonDirective( 1088 | "debug", 1089 | arguments=None, 1090 | options=options, 1091 | content=content, 1092 | lineno=0, 1093 | content_offset=None, 1094 | block_text=None, 1095 | state=None, 1096 | state_machine=None, 1097 | ) 1098 | 1099 | 1100 | # Run test suite as a script 1101 | if __name__ == "__main__": 1102 | if not os.path.isdir("_static"): 1103 | os.mkdir("_static") 1104 | test() 1105 | print("All OK? Check figures in _static/") 1106 | -------------------------------------------------------------------------------- /docs/sphinxext/plot_directive.py: -------------------------------------------------------------------------------- 1 | # noqa 2 | """ 3 | A directive for including a matplotlib plot in a Sphinx document. 4 | 5 | By default, in HTML output, `plot` will include a .png file with a 6 | link to a high-res .png and .pdf. In LaTeX output, it will include a 7 | .pdf. 8 | 9 | The source code for the plot may be included in one of three ways: 10 | 11 | 1. **A path to a source file** as the argument to the directive:: 12 | 13 | .. plot:: path/to/plot.py 14 | 15 | When a path to a source file is given, the content of the 16 | directive may optionally contain a caption for the plot:: 17 | 18 | .. plot:: path/to/plot.py 19 | 20 | This is the caption for the plot 21 | 22 | Additionally, one my specify the name of a function to call (with 23 | no arguments) immediately after importing the module:: 24 | 25 | .. plot:: path/to/plot.py plot_function1 26 | 27 | 2. Included as **inline content** to the directive:: 28 | 29 | .. plot:: 30 | 31 | import matplotlib.pyplot as plt 32 | import matplotlib.image as mpimg 33 | import numpy as np 34 | img = mpimg.imread('_static/stinkbug.png') 35 | imgplot = plt.imshow(img) 36 | 37 | 3. Using **doctest** syntax:: 38 | 39 | .. plot:: 40 | A plotting example: 41 | >>> import matplotlib.pyplot as plt 42 | >>> plt.plot([1,2,3], [4,5,6]) 43 | 44 | Options 45 | ------- 46 | 47 | The ``plot`` directive supports the following options: 48 | 49 | format : {'python', 'doctest'} 50 | Specify the format of the input 51 | 52 | include-source : bool 53 | Whether to display the source code. The default can be changed 54 | using the `plot_include_source` variable in conf.py 55 | 56 | encoding : str 57 | If this source file is in a non-UTF8 or non-ASCII encoding, 58 | the encoding must be specified using the `:encoding:` option. 59 | The encoding will not be inferred using the ``-*- coding -*-`` 60 | metacomment. 61 | 62 | context : bool or str 63 | If provided, the code will be run in the context of all 64 | previous plot directives for which the `:context:` option was 65 | specified. This only applies to inline code plot directives, 66 | not those run from files. If the ``:context: reset`` option is 67 | specified, the context is reset for this and future plots, and 68 | previous figures are closed prior to running the code. 69 | ``:context:close-figs`` keeps the context but closes previous figures 70 | before running the code. 71 | 72 | nofigs : bool 73 | If specified, the code block will be run, but no figures will 74 | be inserted. This is usually useful with the ``:context:`` 75 | option. 76 | 77 | Additionally, this directive supports all of the options of the 78 | `image` directive, except for `target` (since plot will add its own 79 | target). These include `alt`, `height`, `width`, `scale`, `align` and 80 | `class`. 81 | 82 | Configuration options 83 | --------------------- 84 | 85 | The plot directive has the following configuration options: 86 | 87 | plot_include_source 88 | Default value for the include-source option 89 | 90 | plot_html_show_source_link 91 | Whether to show a link to the source in HTML. 92 | 93 | plot_pre_code 94 | Code that should be executed before each plot. 95 | 96 | plot_basedir 97 | Base directory, to which ``plot::`` file names are relative 98 | to. (If None or empty, file names are relative to the 99 | directory where the file containing the directive is.) 100 | 101 | plot_formats 102 | File formats to generate. List of tuples or strings:: 103 | 104 | [(suffix, dpi), suffix, ...] 105 | 106 | that determine the file format and the DPI. For entries whose 107 | DPI was omitted, sensible defaults are chosen. 108 | 109 | plot_html_show_formats 110 | Whether to show links to the files in HTML. 111 | 112 | plot_rcparams 113 | A dictionary containing any non-standard rcParams that should 114 | be applied before each plot. 115 | 116 | plot_apply_rcparams 117 | By default, rcParams are applied when `context` option is not used in 118 | a plot directive. This configuration option overrides this behavior 119 | and applies rcParams before each plot. 120 | 121 | plot_working_directory 122 | By default, the working directory will be changed to the directory of 123 | the example, so the code can get at its data files, if any. Also its 124 | path will be added to `sys.path` so it can import any helper modules 125 | sitting beside it. This configuration option can be used to specify 126 | a central directory (also added to `sys.path`) where data files and 127 | helper modules for all code are located. 128 | 129 | plot_template 130 | Provide a customized template for preparing restructured text. 131 | """ 132 | 133 | import io 134 | import os 135 | import re 136 | import shutil 137 | import sys 138 | import textwrap 139 | import traceback 140 | from os.path import relpath 141 | 142 | import sphinx 143 | from docutils.parsers.rst import directives 144 | from docutils.parsers.rst.directives.images import Image 145 | 146 | align = Image.align 147 | 148 | sphinx_version = sphinx.__version__.split(".") 149 | # The split is necessary for sphinx beta versions where the string is 150 | # '6b1' 151 | sphinx_version = tuple([int(re.split("[^0-9]", x)[0]) for x in sphinx_version[:2]]) 152 | 153 | try: 154 | # Sphinx depends on either Jinja or Jinja2 155 | import jinja2 156 | 157 | def format_template(template, **kw): 158 | return jinja2.Template(template).render(**kw) 159 | except ImportError: 160 | import jinja 161 | 162 | def format_template(template, **kw): 163 | return jinja.from_string(template, **kw) 164 | 165 | 166 | import matplotlib # noqa 167 | import matplotlib.cbook as cbook # noqa 168 | 169 | matplotlib.use("Agg") 170 | import matplotlib.pyplot as plt # noqa 171 | from matplotlib import _pylab_helpers # noqa 172 | 173 | __version__ = 2 174 | 175 | # ------------------------------------------------------------------------------ 176 | # Registration hook 177 | # ------------------------------------------------------------------------------ 178 | 179 | 180 | def plot_directive( 181 | name, arguments, options, content, lineno, content_offset, block_text, state, state_machine 182 | ): 183 | return run(arguments, content, options, state_machine, state, lineno) 184 | 185 | 186 | plot_directive.__doc__ = __doc__ 187 | 188 | 189 | def _option_boolean(arg): 190 | if not arg or not arg.strip(): 191 | # no argument given, assume used as a flag 192 | return True 193 | elif arg.strip().lower() in ("no", "0", "false"): 194 | return False 195 | elif arg.strip().lower() in ("yes", "1", "true"): 196 | return True 197 | else: 198 | raise ValueError('"%s" unknown boolean' % arg) 199 | 200 | 201 | def _option_context(arg): 202 | if arg in [None, "reset", "close-figs"]: 203 | return arg 204 | raise ValueError("argument should be None or 'reset' or 'close-figs'") 205 | 206 | 207 | def _option_format(arg): 208 | return directives.choice(arg, ("python", "doctest")) 209 | 210 | 211 | def _option_align(arg): 212 | return directives.choice(arg, ("top", "middle", "bottom", "left", "center", "right")) 213 | 214 | 215 | def mark_plot_labels(app, document): 216 | """ 217 | To make plots referenceable, we need to move the reference from 218 | the "htmlonly" (or "latexonly") node to the actual figure node 219 | itself. 220 | """ 221 | for name, explicit in document.nametypes.items(): 222 | if not explicit: 223 | continue 224 | labelid = document.nameids[name] 225 | if labelid is None: 226 | continue 227 | node = document.ids[labelid] 228 | if node.tagname in ("html_only", "latex_only"): 229 | for n in node: 230 | if n.tagname == "figure": 231 | sectname = name 232 | for c in n: 233 | if c.tagname == "caption": 234 | sectname = c.astext() 235 | break 236 | 237 | node["ids"].remove(labelid) 238 | node["names"].remove(name) 239 | n["ids"].append(labelid) 240 | n["names"].append(name) 241 | document.settings.env.labels[name] = ( 242 | document.settings.env.docname, 243 | labelid, 244 | sectname, 245 | ) 246 | break 247 | 248 | 249 | def setup(app): 250 | setup.app = app 251 | setup.config = app.config 252 | setup.confdir = app.confdir 253 | 254 | options = { 255 | "alt": directives.unchanged, 256 | "height": directives.length_or_unitless, 257 | "width": directives.length_or_percentage_or_unitless, 258 | "scale": directives.nonnegative_int, 259 | "align": _option_align, 260 | "class": directives.class_option, 261 | "include-source": _option_boolean, 262 | "format": _option_format, 263 | "context": _option_context, 264 | "nofigs": directives.flag, 265 | "encoding": directives.encoding, 266 | } 267 | 268 | app.add_directive("plot", plot_directive, True, (0, 2, False), **options) 269 | app.add_config_value("plot_pre_code", None, True) 270 | app.add_config_value("plot_include_source", False, True) 271 | app.add_config_value("plot_html_show_source_link", True, True) 272 | app.add_config_value("plot_formats", ["png", "hires.png", "pdf"], True) 273 | app.add_config_value("plot_basedir", None, True) 274 | app.add_config_value("plot_html_show_formats", True, True) 275 | app.add_config_value("plot_rcparams", {}, True) 276 | app.add_config_value("plot_apply_rcparams", False, True) 277 | app.add_config_value("plot_working_directory", None, True) 278 | app.add_config_value("plot_template", None, True) 279 | 280 | app.connect("doctree-read", mark_plot_labels) 281 | 282 | 283 | # ------------------------------------------------------------------------------ 284 | # Doctest handling 285 | # ------------------------------------------------------------------------------ 286 | 287 | 288 | def contains_doctest(text): 289 | try: 290 | # check if it's valid Python as-is 291 | compile(text, "", "exec") 292 | return False 293 | except SyntaxError: 294 | pass 295 | r = re.compile(r"^\s*>>>", re.M) 296 | m = r.search(text) 297 | return bool(m) 298 | 299 | 300 | def unescape_doctest(text): 301 | """ 302 | Extract code from a piece of text, which contains either Python code 303 | or doctests. 304 | 305 | """ 306 | if not contains_doctest(text): 307 | return text 308 | 309 | code = "" 310 | for line in text.split("\n"): 311 | m = re.match(r"^\s*(>>>|\.\.\.) (.*)$", line) 312 | if m: 313 | code += m.group(2) + "\n" 314 | elif line.strip(): 315 | code += "# " + line.strip() + "\n" 316 | else: 317 | code += "\n" 318 | return code 319 | 320 | 321 | def split_code_at_show(text): 322 | """ 323 | Split code at plt.show() 324 | 325 | """ 326 | 327 | parts = [] 328 | is_doctest = contains_doctest(text) 329 | 330 | part = [] 331 | for line in text.split("\n"): 332 | if (not is_doctest and line.strip() == "plt.show()") or ( 333 | is_doctest and line.strip() == ">>> plt.show()" 334 | ): 335 | part.append(line) 336 | parts.append("\n".join(part)) 337 | part = [] 338 | else: 339 | part.append(line) 340 | if "\n".join(part).strip(): 341 | parts.append("\n".join(part)) 342 | return parts 343 | 344 | 345 | def remove_coding(text): 346 | """ 347 | Remove the coding comment, which six.exec_ doesn't like. 348 | """ 349 | sub_re = re.compile(r"^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE) 350 | return sub_re.sub("", text) 351 | 352 | 353 | # ------------------------------------------------------------------------------ 354 | # Template 355 | # ------------------------------------------------------------------------------ 356 | 357 | 358 | TEMPLATE = """ 359 | {{ source_code }} 360 | 361 | {{ only_html }} 362 | 363 | {% if source_link or (html_show_formats and not multi_image) %} 364 | ( 365 | {%- if source_link -%} 366 | `Source code <{{ source_link }}>`__ 367 | {%- endif -%} 368 | {%- if html_show_formats and not multi_image -%} 369 | {%- for img in images -%} 370 | {%- for fmt in img.formats -%} 371 | {%- if source_link or not loop.first -%}, {% endif -%} 372 | `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ 373 | {%- endfor -%} 374 | {%- endfor -%} 375 | {%- endif -%} 376 | ) 377 | {% endif %} 378 | 379 | {% for img in images %} 380 | .. figure:: {{ build_dir }}/{{ img.basename }}.png 381 | {% for option in options -%} 382 | {{ option }} 383 | {% endfor %} 384 | 385 | {% if html_show_formats and multi_image -%} 386 | ( 387 | {%- for fmt in img.formats -%} 388 | {%- if not loop.first -%}, {% endif -%} 389 | `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ 390 | {%- endfor -%} 391 | ) 392 | {%- endif -%} 393 | 394 | {{ caption }} 395 | {% endfor %} 396 | 397 | {{ only_latex }} 398 | 399 | {% for img in images %} 400 | {% if 'pdf' in img.formats -%} 401 | .. image:: {{ build_dir }}/{{ img.basename }}.pdf 402 | {% endif -%} 403 | {% endfor %} 404 | 405 | {{ only_texinfo }} 406 | 407 | {% for img in images %} 408 | .. image:: {{ build_dir }}/{{ img.basename }}.png 409 | {% for option in options -%} 410 | {{ option }} 411 | {% endfor %} 412 | 413 | {% endfor %} 414 | 415 | """ 416 | 417 | exception_template = """ 418 | .. htmlonly:: 419 | 420 | [`source code <%(linkdir)s/%(basename)s.py>`__] 421 | 422 | Exception occurred rendering plot. 423 | 424 | """ 425 | 426 | # the context of the plot for all directives specified with the 427 | # :context: option 428 | plot_context = dict() 429 | 430 | 431 | class ImageFile: 432 | def __init__(self, basename, dirname): 433 | self.basename = basename 434 | self.dirname = dirname 435 | self.formats = [] 436 | 437 | def filename(self, format): 438 | return os.path.join(self.dirname, f"{self.basename}.{format}") 439 | 440 | def filenames(self): 441 | return [self.filename(fmt) for fmt in self.formats] 442 | 443 | 444 | def out_of_date(original, derived): 445 | """ 446 | Returns True if derivative is out-of-date wrt original, 447 | both of which are full file paths. 448 | """ 449 | return not os.path.exists(derived) or ( 450 | os.path.exists(original) and os.stat(derived).st_mtime < os.stat(original).st_mtime 451 | ) 452 | 453 | 454 | class PlotError(RuntimeError): 455 | pass 456 | 457 | 458 | def run_code(code, code_path, ns=None, function_name=None): 459 | """ 460 | Import a Python module from a path, and run the function given by 461 | name, if function_name is not None. 462 | """ 463 | 464 | # Change the working directory to the directory of the example, so 465 | # it can get at its data files, if any. Add its path to sys.path 466 | # so it can import any helper modules sitting beside it. 467 | pwd = os.getcwd() 468 | old_sys_path = list(sys.path) 469 | if setup.config.plot_working_directory is not None: 470 | try: 471 | os.chdir(setup.config.plot_working_directory) 472 | except OSError as err: 473 | raise OSError( 474 | str(err) + "\n`plot_working_directory` option in" 475 | "Sphinx configuration file must be a valid " 476 | "directory path" 477 | ) 478 | except TypeError as err: 479 | raise TypeError( 480 | str(err) + "\n`plot_working_directory` option in " 481 | "Sphinx configuration file must be a string or " 482 | "None" 483 | ) 484 | sys.path.insert(0, setup.config.plot_working_directory) 485 | elif code_path is not None: 486 | dirname = os.path.abspath(os.path.dirname(code_path)) 487 | os.chdir(dirname) 488 | sys.path.insert(0, dirname) 489 | 490 | # Reset sys.argv 491 | old_sys_argv = sys.argv 492 | sys.argv = [code_path] 493 | 494 | # Redirect stdout 495 | stdout = sys.stdout 496 | sys.stdout = io.StringIO() 497 | 498 | # Assign a do-nothing print function to the namespace. There 499 | # doesn't seem to be any other way to provide a way to (not) print 500 | # that works correctly across Python 2 and 3. 501 | def _dummy_print(*arg, **kwarg): 502 | pass 503 | 504 | try: 505 | try: 506 | code = unescape_doctest(code) 507 | if ns is None: 508 | ns = {} 509 | if not ns: 510 | if setup.config.plot_pre_code is None: 511 | exec( 512 | str("import numpy as np\n" + "from matplotlib import pyplot as plt\n"), 513 | ns, 514 | ) 515 | else: 516 | exec(str(setup.config.plot_pre_code), ns) 517 | ns["print"] = _dummy_print 518 | if "__main__" in code: 519 | exec("__name__ = '__main__'", ns) 520 | code = remove_coding(code) 521 | exec(code, ns) 522 | if function_name is not None: 523 | exec(function_name + "()", ns) 524 | except (Exception, SystemExit): 525 | raise PlotError(traceback.format_exc()) 526 | finally: 527 | os.chdir(pwd) 528 | sys.argv = old_sys_argv 529 | sys.path[:] = old_sys_path 530 | sys.stdout = stdout 531 | return ns 532 | 533 | 534 | def clear_state(plot_rcparams, close=True): 535 | if close: 536 | plt.close("all") 537 | matplotlib.rc_file_defaults() 538 | matplotlib.rcParams.update(plot_rcparams) 539 | 540 | 541 | def render_figures( 542 | code, 543 | code_path, 544 | output_dir, 545 | output_base, 546 | context, 547 | function_name, 548 | config, 549 | context_reset=False, 550 | close_figs=False, 551 | ): 552 | """ 553 | Run a pyplot script and save the low and high res PNGs and a PDF 554 | in *output_dir*. 555 | 556 | Save the images under *output_dir* with file names derived from 557 | *output_base* 558 | """ 559 | # -- Parse format list 560 | default_dpi = {"png": 80, "hires.png": 200, "pdf": 200} 561 | formats = [] 562 | plot_formats = config.plot_formats 563 | if isinstance(plot_formats, str): 564 | plot_formats = eval(plot_formats) 565 | for fmt in plot_formats: 566 | if isinstance(fmt, str): 567 | formats.append((fmt, default_dpi.get(fmt, 80))) 568 | elif type(fmt) in (tuple, list) and len(fmt) == 2: 569 | formats.append((str(fmt[0]), int(fmt[1]))) 570 | else: 571 | raise PlotError('invalid image format "%r" in plot_formats' % fmt) 572 | 573 | # -- Try to determine if all images already exist 574 | 575 | code_pieces = split_code_at_show(code) 576 | 577 | # Look for single-figure output files first 578 | all_exists = True 579 | img = ImageFile(output_base, output_dir) 580 | for format, dpi in formats: 581 | if out_of_date(code_path, img.filename(format)): 582 | all_exists = False 583 | break 584 | img.formats.append(format) 585 | 586 | if all_exists: 587 | return [(code, [img])] 588 | 589 | # Then look for multi-figure output files 590 | results = [] 591 | all_exists = True 592 | for i, code_piece in enumerate(code_pieces): 593 | images = [] 594 | for j in range(1000): 595 | if len(code_pieces) > 1: 596 | img = ImageFile("%s_%02d_%02d" % (output_base, i, j), output_dir) 597 | else: 598 | img = ImageFile("%s_%02d" % (output_base, j), output_dir) 599 | for format, dpi in formats: 600 | if out_of_date(code_path, img.filename(format)): 601 | all_exists = False 602 | break 603 | img.formats.append(format) 604 | 605 | # assume that if we have one, we have them all 606 | if not all_exists: 607 | all_exists = j > 0 608 | break 609 | images.append(img) 610 | if not all_exists: 611 | break 612 | results.append((code_piece, images)) 613 | 614 | if all_exists: 615 | return results 616 | 617 | # We didn't find the files, so build them 618 | 619 | results = [] 620 | ns = plot_context if context else {} 621 | 622 | if context_reset: 623 | clear_state(config.plot_rcparams) 624 | plot_context.clear() 625 | 626 | close_figs = not context or close_figs 627 | 628 | for i, code_piece in enumerate(code_pieces): 629 | if not context or config.plot_apply_rcparams: 630 | clear_state(config.plot_rcparams, close_figs) 631 | elif close_figs: 632 | plt.close("all") 633 | 634 | run_code(code_piece, code_path, ns, function_name) 635 | 636 | images = [] 637 | fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() 638 | for j, figman in enumerate(fig_managers): 639 | if len(fig_managers) == 1 and len(code_pieces) == 1: 640 | img = ImageFile(output_base, output_dir) 641 | elif len(code_pieces) == 1: 642 | img = ImageFile("%s_%02d" % (output_base, j), output_dir) 643 | else: 644 | img = ImageFile("%s_%02d_%02d" % (output_base, i, j), output_dir) 645 | images.append(img) 646 | for format, dpi in formats: 647 | try: 648 | figman.canvas.figure.savefig(img.filename(format), dpi=dpi, bbox_inches="tight") 649 | except Exception: 650 | raise PlotError(traceback.format_exc()) 651 | img.formats.append(format) 652 | 653 | results.append((code_piece, images)) 654 | 655 | if not context or config.plot_apply_rcparams: 656 | clear_state(config.plot_rcparams, close=not context) 657 | 658 | return results 659 | 660 | 661 | def run(arguments, content, options, state_machine, state, lineno): 662 | # The user may provide a filename *or* Python code content, but not both 663 | if arguments and content: 664 | raise RuntimeError("plot:: directive can't have both args and content") 665 | 666 | document = state_machine.document 667 | config = document.settings.env.config 668 | nofigs = "nofigs" in options 669 | 670 | options.setdefault("include-source", config.plot_include_source) 671 | keep_context = "context" in options 672 | context_opt = None if not keep_context else options["context"] 673 | 674 | rst_file = document.attributes["source"] 675 | rst_dir = os.path.dirname(rst_file) 676 | 677 | if len(arguments): 678 | if not config.plot_basedir: 679 | source_file_name = os.path.join(setup.app.builder.srcdir, directives.uri(arguments[0])) 680 | else: 681 | source_file_name = os.path.join( 682 | setup.confdir, config.plot_basedir, directives.uri(arguments[0]) 683 | ) 684 | 685 | # If there is content, it will be passed as a caption. 686 | caption = "\n".join(content) 687 | 688 | # If the optional function name is provided, use it 689 | function_name = arguments[1] if len(arguments) == 2 else None 690 | 691 | with open(source_file_name, encoding="utf-8") as fd: 692 | code = fd.read() 693 | output_base = os.path.basename(source_file_name) 694 | else: 695 | source_file_name = rst_file 696 | code = textwrap.dedent("\n".join(map(str, content))) 697 | counter = document.attributes.get("_plot_counter", 0) + 1 698 | document.attributes["_plot_counter"] = counter 699 | base, ext = os.path.splitext(os.path.basename(source_file_name)) 700 | output_base = "%s-%d.py" % (base, counter) 701 | function_name = None 702 | caption = "" 703 | 704 | base, source_ext = os.path.splitext(output_base) 705 | if source_ext in (".py", ".rst", ".txt"): 706 | output_base = base 707 | else: 708 | source_ext = "" 709 | 710 | # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames 711 | output_base = output_base.replace(".", "-") 712 | 713 | # is it in doctest format? 714 | is_doctest = contains_doctest(code) 715 | if "format" in options: 716 | is_doctest = options["format"] != "python" 717 | 718 | # determine output directory name fragment 719 | source_rel_name = relpath(source_file_name, setup.confdir) 720 | source_rel_dir = os.path.dirname(source_rel_name) 721 | while source_rel_dir.startswith(os.path.sep): 722 | source_rel_dir = source_rel_dir[1:] 723 | 724 | # build_dir: where to place output files (temporarily) 725 | build_dir = os.path.join( 726 | os.path.dirname(setup.app.doctreedir), "plot_directive", source_rel_dir 727 | ) 728 | # get rid of .. in paths, also changes pathsep 729 | # see note in Python docs for warning about symbolic links on Windows. 730 | # need to compare source and dest paths at end 731 | build_dir = os.path.normpath(build_dir) 732 | 733 | if not os.path.exists(build_dir): 734 | os.makedirs(build_dir) 735 | 736 | # output_dir: final location in the builder's directory 737 | dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, source_rel_dir)) 738 | if not os.path.exists(dest_dir): 739 | os.makedirs(dest_dir) # no problem here for me, but just use built-ins 740 | 741 | # how to link to files from the RST file 742 | dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), source_rel_dir).replace( 743 | os.path.sep, "/" 744 | ) 745 | build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, "/") 746 | source_link = dest_dir_link + "/" + output_base + source_ext 747 | 748 | # make figures 749 | try: 750 | results = render_figures( 751 | code, 752 | source_file_name, 753 | build_dir, 754 | output_base, 755 | keep_context, 756 | function_name, 757 | config, 758 | context_reset=context_opt == "reset", 759 | close_figs=context_opt == "close-figs", 760 | ) 761 | errors = [] 762 | except PlotError as err: 763 | reporter = state.memo.reporter 764 | sm = reporter.system_message( 765 | 2, 766 | f"Exception occurred in plotting {output_base}\n from {source_file_name}:\n{err}", 767 | line=lineno, 768 | ) 769 | results = [(code, [])] 770 | errors = [sm] 771 | 772 | # Properly indent the caption 773 | caption = "\n".join(" " + line.strip() for line in caption.split("\n")) 774 | 775 | # generate output restructuredtext 776 | total_lines = [] 777 | for j, (code_piece, images) in enumerate(results): 778 | if options["include-source"]: 779 | if is_doctest: 780 | lines = [""] 781 | lines += [row.rstrip() for row in code_piece.split("\n")] 782 | else: 783 | lines = [".. code-block:: python", ""] 784 | lines += [" %s" % row.rstrip() for row in code_piece.split("\n")] 785 | source_code = "\n".join(lines) 786 | else: 787 | source_code = "" 788 | 789 | if nofigs: 790 | images = [] 791 | 792 | opts = [ 793 | f":{key}: {val}" 794 | for key, val in options.items() 795 | if key in ("alt", "height", "width", "scale", "align", "class") 796 | ] 797 | 798 | only_html = ".. only:: html" 799 | only_latex = ".. only:: latex" 800 | only_texinfo = ".. only:: texinfo" 801 | 802 | # Not-None src_link signals the need for a source link in the generated 803 | # html 804 | src_link = source_link if j == 0 and config.plot_html_show_source_link else None 805 | 806 | result = format_template( 807 | config.plot_template or TEMPLATE, 808 | dest_dir=dest_dir_link, 809 | build_dir=build_dir_link, 810 | source_link=src_link, 811 | multi_image=len(images) > 1, 812 | only_html=only_html, 813 | only_latex=only_latex, 814 | only_texinfo=only_texinfo, 815 | options=opts, 816 | images=images, 817 | source_code=source_code, 818 | html_show_formats=config.plot_html_show_formats and not nofigs, 819 | caption=caption, 820 | ) 821 | 822 | total_lines.extend(result.split("\n")) 823 | total_lines.extend("\n") 824 | 825 | if total_lines: 826 | state_machine.insert_input(total_lines, source=source_file_name) 827 | 828 | # copy image files to builder's output directory, if necessary 829 | if not os.path.exists(dest_dir): 830 | cbook.mkdirs(dest_dir) 831 | 832 | for code_piece, images in results: 833 | for img in images: 834 | for fn in img.filenames(): 835 | destimg = os.path.join(dest_dir, os.path.basename(fn)) 836 | if fn != destimg: 837 | shutil.copyfile(fn, destimg) 838 | 839 | # copy script (if necessary) 840 | target_name = os.path.join(dest_dir, output_base + source_ext) 841 | with open(target_name, "w", encoding="utf-8") as f: 842 | code_escaped = unescape_doctest(code) if source_file_name == rst_file else code 843 | f.write(code_escaped) 844 | 845 | return errors 846 | -------------------------------------------------------------------------------- /docs/sphinxext/plot_generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sphinx plugin to run example scripts and create a gallery page. 3 | 4 | Lightly modified from the mpld3 project. 5 | 6 | """ 7 | import glob 8 | import os 9 | import os.path as op 10 | import re 11 | import shutil 12 | import token 13 | import tokenize 14 | 15 | import matplotlib 16 | 17 | matplotlib.use("Agg") 18 | import matplotlib.pyplot as plt 19 | from matplotlib import image 20 | 21 | RST_TEMPLATE = """ 22 | .. _{sphinx_tag}: 23 | 24 | {docstring} 25 | 26 | .. image:: {img_file} 27 | 28 | **Python source code:** :download:`[download source: {fname}]<{fname}>` 29 | 30 | .. literalinclude:: {fname} 31 | :lines: {end_line}- 32 | """ 33 | 34 | 35 | INDEX_TEMPLATE = """ 36 | 37 | .. raw:: html 38 | 39 | 100 | 101 | .. _{sphinx_tag}: 102 | 103 | Example gallery 104 | =============== 105 | 106 | {toctree} 107 | 108 | {contents} 109 | 110 | .. raw:: html 111 | 112 |
113 | """ 114 | 115 | 116 | def create_thumbnail(infile, thumbfile, width=300, height=300, cx=0.5, cy=0.5, border=4): 117 | baseout, extout = op.splitext(thumbfile) 118 | 119 | im = image.imread(infile) 120 | rows, cols = im.shape[:2] 121 | x0 = int(cx * cols - 0.5 * width) 122 | y0 = int(cy * rows - 0.5 * height) 123 | xslice = slice(x0, x0 + width) 124 | yslice = slice(y0, y0 + height) 125 | thumb = im[yslice, xslice] 126 | thumb[:border, :, :3] = thumb[-border:, :, :3] = 0 127 | thumb[:, :border, :3] = thumb[:, -border:, :3] = 0 128 | 129 | dpi = 100 130 | fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi) 131 | 132 | ax = fig.add_axes([0, 0, 1, 1], aspect="auto", frameon=False, xticks=[], yticks=[]) 133 | ax.imshow(thumb, aspect="auto", resample=True, interpolation="bilinear") 134 | fig.savefig(thumbfile, dpi=dpi) 135 | return fig 136 | 137 | 138 | def indent(s, N=4): 139 | """indent a string""" 140 | return s.replace("\n", "\n" + N * " ") 141 | 142 | 143 | class ExampleGenerator: 144 | """Tools for generating an example page from a file""" 145 | 146 | def __init__(self, filename, target_dir): 147 | self.filename = filename 148 | self.target_dir = target_dir 149 | self.thumbloc = 0.5, 0.5 150 | self.extract_docstring() 151 | with open(filename) as fid: 152 | self.filetext = fid.read() 153 | 154 | outfilename = op.join(target_dir, self.rstfilename) 155 | 156 | # Only actually run it if the output RST file doesn't 157 | # exist or it was modified less recently than the example 158 | if not op.exists(outfilename) or (op.getmtime(outfilename) < op.getmtime(filename)): 159 | self.exec_file() 160 | else: 161 | print(f"skipping {self.filename}") 162 | 163 | @property 164 | def dirname(self): 165 | return op.split(self.filename)[0] 166 | 167 | @property 168 | def fname(self): 169 | return op.split(self.filename)[1] 170 | 171 | @property 172 | def modulename(self): 173 | return op.splitext(self.fname)[0] 174 | 175 | @property 176 | def pyfilename(self): 177 | return self.modulename + ".py" 178 | 179 | @property 180 | def rstfilename(self): 181 | return self.modulename + ".rst" 182 | 183 | @property 184 | def htmlfilename(self): 185 | return self.modulename + ".html" 186 | 187 | @property 188 | def pngfilename(self): 189 | pngfile = self.modulename + ".png" 190 | return "_images/" + pngfile 191 | 192 | @property 193 | def thumbfilename(self): 194 | pngfile = self.modulename + "_thumb.png" 195 | return pngfile 196 | 197 | @property 198 | def sphinxtag(self): 199 | return self.modulename 200 | 201 | @property 202 | def pagetitle(self): 203 | return self.docstring.strip().split("\n")[0].strip() 204 | 205 | @property 206 | def plotfunc(self): 207 | match = re.search(r"sns\.(.+plot)\(", self.filetext) 208 | if match: 209 | return match.group(1) 210 | match = re.search(r"sns\.(.+map)\(", self.filetext) 211 | if match: 212 | return match.group(1) 213 | match = re.search(r"sns\.(.+Grid)\(", self.filetext) 214 | if match: 215 | return match.group(1) 216 | return "" 217 | 218 | def extract_docstring(self): 219 | """Extract a module-level docstring""" 220 | with open(self.filename) as f: 221 | lines = f.readlines() 222 | start_row = 0 223 | if lines[0].startswith("#!"): 224 | lines.pop(0) 225 | start_row = 1 226 | 227 | docstring = "" 228 | first_par = "" 229 | tokens = tokenize.generate_tokens(lines.__iter__().next) 230 | for tok_type, tok_content, _, (erow, _), _ in tokens: 231 | tok_type = token.tok_name[tok_type] 232 | if tok_type in ("NEWLINE", "COMMENT", "NL", "INDENT", "DEDENT"): 233 | continue 234 | elif tok_type == "STRING": 235 | docstring = eval(tok_content) 236 | # If the docstring is formatted with several paragraphs, 237 | # extract the first one: 238 | paragraphs = "\n".join(line.rstrip() for line in docstring.split("\n")).split( 239 | "\n\n" 240 | ) 241 | if len(paragraphs) > 0: 242 | first_par = paragraphs[0] 243 | break 244 | 245 | thumbloc = None 246 | for i, line in enumerate(docstring.split("\n")): 247 | m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line) 248 | if m: 249 | thumbloc = float(m.group(1)), float(m.group(2)) 250 | break 251 | if thumbloc is not None: 252 | self.thumbloc = thumbloc 253 | docstring = "\n".join( 254 | [ # noqa 255 | word 256 | for word in docstring.split("\n") 257 | if not word.startswith("_thumb") # noqa 258 | ] 259 | ) # noqa 260 | 261 | self.docstring = docstring 262 | self.short_desc = first_par 263 | self.end_line = erow + 1 + start_row 264 | 265 | def exec_file(self): 266 | print(f"running {self.filename}") 267 | 268 | plt.close("all") 269 | my_globals = {"pl": plt, "plt": plt} # noqa 270 | # execfile(self.filename, my_globals) # noqa 271 | 272 | fig = plt.gcf() 273 | fig.canvas.draw() 274 | pngfile = op.join(self.target_dir, self.pngfilename) 275 | thumbfile = op.join("example_thumbs", self.thumbfilename) 276 | self.html = "" % self.pngfilename 277 | fig.savefig(pngfile, dpi=75, bbox_inches="tight") 278 | 279 | cx, cy = self.thumbloc 280 | create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy) 281 | 282 | def toctree_entry(self): 283 | return " ./%s\n\n" % op.splitext(self.htmlfilename)[0] 284 | 285 | def contents_entry(self): 286 | return ( 287 | ".. raw:: html\n\n" 288 | " \n\n" 296 | "\n\n" 297 | "" 298 | ) 299 | 300 | 301 | def main(app): 302 | static_dir = op.join(app.builder.srcdir, "_static") 303 | target_dir = op.join(app.builder.srcdir, "examples") 304 | image_dir = op.join(app.builder.srcdir, "examples/_images") 305 | thumb_dir = op.join(app.builder.srcdir, "example_thumbs") 306 | source_dir = op.abspath(op.join(app.builder.srcdir, "..", "examples")) 307 | if not op.exists(static_dir): 308 | os.makedirs(static_dir) 309 | 310 | if not op.exists(target_dir): 311 | os.makedirs(target_dir) 312 | 313 | if not op.exists(image_dir): 314 | os.makedirs(image_dir) 315 | 316 | if not op.exists(thumb_dir): 317 | os.makedirs(thumb_dir) 318 | 319 | if not op.exists(source_dir): 320 | os.makedirs(source_dir) 321 | 322 | banner_data = [] 323 | 324 | toctree = "\n\n" ".. toctree::\n" " :hidden:\n\n" 325 | contents = "\n\n" 326 | 327 | # Write individual example files 328 | for filename in glob.glob(op.join(source_dir, "*.py")): 329 | ex = ExampleGenerator(filename, target_dir) 330 | 331 | banner_data.append( 332 | { 333 | "title": ex.pagetitle, 334 | "url": op.join("examples", ex.htmlfilename), 335 | "thumb": op.join(ex.thumbfilename), 336 | } 337 | ) 338 | shutil.copyfile(filename, op.join(target_dir, ex.pyfilename)) 339 | output = RST_TEMPLATE.format( 340 | sphinx_tag=ex.sphinxtag, 341 | docstring=ex.docstring, 342 | end_line=ex.end_line, 343 | fname=ex.pyfilename, 344 | img_file=ex.pngfilename, 345 | ) 346 | with open(op.join(target_dir, ex.rstfilename), "w") as f: 347 | f.write(output) 348 | 349 | toctree += ex.toctree_entry() 350 | contents += ex.contents_entry() 351 | 352 | if len(banner_data) < 10: 353 | banner_data = (4 * banner_data)[:10] 354 | 355 | # write index file 356 | index_file = op.join(target_dir, "index.rst") 357 | with open(index_file, "w") as index: 358 | index.write( 359 | INDEX_TEMPLATE.format(sphinx_tag="example_gallery", toctree=toctree, contents=contents) 360 | ) 361 | 362 | 363 | def setup(app): 364 | app.connect("builder-inited", main) 365 | -------------------------------------------------------------------------------- /docs/sphinxext/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.8 2 | numpydoc 3 | -------------------------------------------------------------------------------- /docs/tutorial/Makefile: -------------------------------------------------------------------------------- 1 | notebooks: 2 | 3 | tools/nb_to_doc.py overview 4 | tools/nb_to_doc.py fitting 5 | tools/nb_to_doc.py activities -------------------------------------------------------------------------------- /docs/tutorial/activities.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Paramnormal Activity\n", 8 | "\n", 9 | "Perhaps the most convenient way to access the functionality of `paramnormal` is through the `activity` module.\n", 10 | "\n", 11 | "Random number generation, distribution fitting, and basic plotting are exposed through `activity`." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": { 18 | "collapsed": true 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "%matplotlib inline" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": { 29 | "collapsed": false 30 | }, 31 | "outputs": [], 32 | "source": [ 33 | "import warnings\n", 34 | "warnings.simplefilter('ignore')\n", 35 | "\n", 36 | "from numpy.random import seed\n", 37 | "from scipy import stats\n", 38 | "from matplotlib import pyplot\n", 39 | "import seaborn\n", 40 | "\n", 41 | "import paramnormal\n", 42 | "\n", 43 | "clean_bkgd = {'axes.facecolor':'none', 'figure.facecolor':'none'}\n", 44 | "seaborn.set(style='ticks', rc=clean_bkgd)" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "### Random number generation\n", 52 | "Through the top-level API, you could do the following to generate lognormal random numbers." 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": { 59 | "collapsed": false 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "seed(0)\n", 64 | "paramnormal.lognormal(mu=0.75, sigma=1.25).rvs(5)" 65 | ] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "metadata": {}, 70 | "source": [ 71 | "What's happening here is that `paramnormal.lognormal(mu=0.75, sigma=1.25)` translates the arguments, passes them to `scipy.stats.lognorm`, and returns scipy's distribution object. Then we call the `rvs` method of that object to generate five random numbers in an array.\n", 72 | "\n", 73 | "Through the `activity` API, that equivalent to:" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": { 80 | "collapsed": false 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "seed(0)\n", 85 | "paramnormal.activity.random('lognormal', mu=0.75, sigma=1.25, shape=5)" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "metadata": {}, 91 | "source": [ 92 | "And of course, Greek letters are still supported." 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": { 99 | "collapsed": false 100 | }, 101 | "outputs": [], 102 | "source": [ 103 | "seed(0)\n", 104 | "paramnormal.activity.random('lognormal', μ=0.75, σ=1.25, shape=5)" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": {}, 110 | "source": [ 111 | "Lastly, you can reuse an already full-specified distribution and the `shape` parameter can take a tuple to return *N*-dimensional arrays." 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": { 118 | "collapsed": false 119 | }, 120 | "outputs": [], 121 | "source": [ 122 | "seed(0)\n", 123 | "my_dist = paramnormal.lognormal(μ=0.75, σ=1.25)\n", 124 | "paramnormal.activity.random(my_dist, shape=(2, 4))" 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "metadata": {}, 130 | "source": [ 131 | "### Fitting distributions\n", 132 | "Fitting distributions to data follows a similar pattern." 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": null, 138 | "metadata": { 139 | "collapsed": false 140 | }, 141 | "outputs": [], 142 | "source": [ 143 | "data = paramnormal.activity.random('beta', α=3, β=2, shape=37)\n", 144 | "paramnormal.activity.fit('beta', data)" 145 | ] 146 | }, 147 | { 148 | "cell_type": "markdown", 149 | "metadata": {}, 150 | "source": [ 151 | "Equivalent command to perform the same fits in raw scipy is shown below:" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": { 158 | "collapsed": false 159 | }, 160 | "outputs": [], 161 | "source": [ 162 | "# constrained loc and scale\n", 163 | "stats.beta.fit(data, floc=0, fscale=1)" 164 | ] 165 | }, 166 | { 167 | "cell_type": "markdown", 168 | "metadata": {}, 169 | "source": [ 170 | "You can still fix the primary parameters and unconstrain the defaults." 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": null, 176 | "metadata": { 177 | "collapsed": false 178 | }, 179 | "outputs": [], 180 | "source": [ 181 | "paramnormal.activity.fit('beta', data, β=2, loc=None)" 182 | ] 183 | }, 184 | { 185 | "cell_type": "markdown", 186 | "metadata": {}, 187 | "source": [ 188 | "And again in raw scipy:" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": null, 194 | "metadata": { 195 | "collapsed": false 196 | }, 197 | "outputs": [], 198 | "source": [ 199 | "# constrained beta and scale, unconstrained loc\n", 200 | "stats.beta.fit(data, f1=2, fscale=1)" 201 | ] 202 | }, 203 | { 204 | "cell_type": "markdown", 205 | "metadata": {}, 206 | "source": [ 207 | "### Plotting\n", 208 | "There is very limited plotting functionality built into paramnormal.\n", 209 | "The probability distribution function (PDF) is plotted by default, but any other method of the distributions can be plotted by specifying the ``which`` parameters." 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": null, 215 | "metadata": { 216 | "collapsed": false 217 | }, 218 | "outputs": [], 219 | "source": [ 220 | "ax = paramnormal.activity.plot('beta', α=3, β=2)\n", 221 | "paramnormal.activity.plot('beta', α=3, β=2, ax=ax, which='CDF')\n", 222 | "ax.legend()\n" 223 | ] 224 | }, 225 | { 226 | "cell_type": "markdown", 227 | "metadata": {}, 228 | "source": [ 229 | "You can plot on an existing figure through the `ax` argument and control the line style through `line_opts`." 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "metadata": { 236 | "collapsed": false 237 | }, 238 | "outputs": [], 239 | "source": [ 240 | "fig, (ax, ax2) = pyplot.subplots(nrows=2, sharex=True, sharey=True)\n", 241 | "paramnormal.activity.plot('beta', α=6, β=2, ax=ax, line_opts=dict(color='firebrick', lw=3))\n", 242 | "paramnormal.activity.plot('beta', α=2, β=6, ax=ax2, line_opts=dict(color='forestgreen', lw=1.25))\n", 243 | "ax.set_ylabel('α=6, β=2')\n", 244 | "ax2.set_ylabel('α=2, β=6')\n", 245 | "seaborn.despine(fig)" 246 | ] 247 | }, 248 | { 249 | "cell_type": "markdown", 250 | "metadata": {}, 251 | "source": [ 252 | "Of course, you can create a fully-specified distribtion and omit the distribution parameters." 253 | ] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "execution_count": null, 258 | "metadata": { 259 | "collapsed": false 260 | }, 261 | "outputs": [], 262 | "source": [ 263 | "beta = paramnormal.beta(α=3, β=2)\n", 264 | "ax = paramnormal.activity.plot(beta)" 265 | ] 266 | }, 267 | { 268 | "cell_type": "markdown", 269 | "metadata": {}, 270 | "source": [ 271 | "And finally, you can pass an array of data and an unfrozen distribution, and a new distribution will be fit to your data." 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": null, 277 | "metadata": { 278 | "collapsed": false 279 | }, 280 | "outputs": [], 281 | "source": [ 282 | "data = paramnormal.activity.random('beta', α=2, β=6, shape=37) + \\\n", 283 | " paramnormal.activity.random('normal', μ=5, σ=1, shape=37)\n", 284 | "ax = paramnormal.activity.plot('normal', data=data, line_opts=dict(label='Emperical Fit'))\n", 285 | "ax = paramnormal.activity.plot('normal', μ=5, σ=1, line_opts=dict(label='Theoretical'))\n", 286 | "ax.legend()" 287 | ] 288 | } 289 | ], 290 | "metadata": { 291 | "kernelspec": { 292 | "display_name": "Python 3", 293 | "language": "python", 294 | "name": "python3" 295 | }, 296 | "language_info": { 297 | "codemirror_mode": { 298 | "name": "ipython", 299 | "version": 3 300 | }, 301 | "file_extension": ".py", 302 | "mimetype": "text/x-python", 303 | "name": "python", 304 | "nbconvert_exporter": "python", 305 | "pygments_lexer": "ipython3", 306 | "version": "3.5.1" 307 | } 308 | }, 309 | "nbformat": 4, 310 | "nbformat_minor": 0 311 | } 312 | -------------------------------------------------------------------------------- /docs/tutorial/fitting.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Fitting distributions to data with `paramnormal`.\n", 8 | "\n", 9 | "In addition to explicitly creating distributions from known parameters, `paramnormal.[dist].fit` provides a similar, interface to `scipy.stats` maximum-likelihood estimatation methods.\n", 10 | "\n", 11 | "Again, we'll demonstrate with a lognormal distribution and compare parameter estimatation with scipy." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": { 18 | "collapsed": true 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "%matplotlib inline" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "metadata": { 29 | "collapsed": false 30 | }, 31 | "outputs": [], 32 | "source": [ 33 | "import warnings\n", 34 | "warnings.simplefilter('ignore')\n", 35 | "\n", 36 | "import numpy as np\n", 37 | "import matplotlib.pyplot as plt\n", 38 | "import seaborn\n", 39 | "\n", 40 | "import paramnormal\n", 41 | "\n", 42 | "clean_bkgd = {'axes.facecolor':'none', 'figure.facecolor':'none'}\n", 43 | "seaborn.set(style='ticks', rc=clean_bkgd)" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "Let's start by generating a reasonably-sized random dataset and plotting a histogram.\n", 51 | "\n", 52 | "The primary method of creating a distribution from named parameters is shown below.\n", 53 | "\n", 54 | "The call to `paramnormal.lognornal` translates the parameter to be compatible with scipy. We then chain a call to the `rvs` (random variates) method of the returned scipy distribution." 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": null, 60 | "metadata": { 61 | "collapsed": false 62 | }, 63 | "outputs": [], 64 | "source": [ 65 | "np.random.seed(0)\n", 66 | "x = paramnormal.lognormal(mu=1.75, sigma=0.75).rvs(370)" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "metadata": {}, 72 | "source": [ 73 | "Here's a histogram to illustrate the distribution." 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": { 80 | "collapsed": false 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "bins = np.logspace(-0.5, 1.75, num=25)\n", 85 | "fig, ax = plt.subplots()\n", 86 | "_ = ax.hist(x, bins=bins, normed=True)\n", 87 | "ax.set_xscale('log')\n", 88 | "ax.set_xlabel('$X$')\n", 89 | "ax.set_ylabel('Probability')\n", 90 | "seaborn.despine()\n", 91 | "fig" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "Pretending for a moment that we didn't generate this dataset with explicit distribution parameters, how would we go about estimating them?\n", 99 | "\n", 100 | "Scipy provides a maximum-likelihood estimation for estimating parameters:" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "metadata": { 107 | "collapsed": false 108 | }, 109 | "outputs": [], 110 | "source": [ 111 | "from scipy import stats\n", 112 | "print(stats.lognorm.fit(x))" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "metadata": {}, 118 | "source": [ 119 | "Unfortunately those parameters don't really make any sense based on what we know about our articifical dataset.\n", 120 | "\n", 121 | "That's where paramnormal comes in:" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": { 128 | "collapsed": false 129 | }, 130 | "outputs": [], 131 | "source": [ 132 | "params = paramnormal.lognormal.fit(x)\n", 133 | "print(params)" 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "metadata": {}, 139 | "source": [ 140 | "This matches well with our understanding of the distribution.\n", 141 | "\n", 142 | "The returned `params` variable is a `namedtuple` that we can easily use to create a distribution via the `.from_params` methods. From there, we can create a nice plot of the probability distribution function with our histogram." 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": { 149 | "collapsed": false 150 | }, 151 | "outputs": [], 152 | "source": [ 153 | "dist = paramnormal.lognormal.from_params(params)\n", 154 | "\n", 155 | "# theoretical PDF\n", 156 | "x_hat = np.logspace(-0.5, 1.75, num=100)\n", 157 | "y_hat = dist.pdf(x_hat)\n", 158 | "\n", 159 | "bins = np.logspace(-0.5, 1.75, num=25)\n", 160 | "fig, ax = plt.subplots()\n", 161 | "_ = ax.hist(x, bins=bins, normed=True, alpha=0.375)\n", 162 | "ax.plot(x_hat, y_hat, zorder=2, color='g')\n", 163 | "ax.set_xscale('log')\n", 164 | "ax.set_xlabel('$X$')\n", 165 | "ax.set_ylabel('Probability')\n", 166 | "seaborn.despine()" 167 | ] 168 | }, 169 | { 170 | "cell_type": "markdown", 171 | "metadata": {}, 172 | "source": [ 173 | "### Recap\n", 174 | "#### Fitting data" 175 | ] 176 | }, 177 | { 178 | "cell_type": "code", 179 | "execution_count": null, 180 | "metadata": { 181 | "collapsed": false 182 | }, 183 | "outputs": [], 184 | "source": [ 185 | "params = paramnormal.lognormal.fit(x)\n", 186 | "print(params)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "#### Creating distributions \n", 194 | "The manual way:" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": { 201 | "collapsed": false 202 | }, 203 | "outputs": [], 204 | "source": [ 205 | "paramnormal.lognormal(mu=1.75, sigma=0.75, offset=0)" 206 | ] 207 | }, 208 | { 209 | "cell_type": "markdown", 210 | "metadata": {}, 211 | "source": [ 212 | "From fit parameters:" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "metadata": { 219 | "collapsed": false 220 | }, 221 | "outputs": [], 222 | "source": [ 223 | "paramnormal.lognormal.from_params(params)" 224 | ] 225 | } 226 | ], 227 | "metadata": { 228 | "kernelspec": { 229 | "display_name": "Python 3", 230 | "language": "python", 231 | "name": "python3" 232 | }, 233 | "language_info": { 234 | "codemirror_mode": { 235 | "name": "ipython", 236 | "version": 3 237 | }, 238 | "file_extension": ".py", 239 | "mimetype": "text/x-python", 240 | "name": "python", 241 | "nbconvert_exporter": "python", 242 | "pygments_lexer": "ipython3", 243 | "version": "3.5.1" 244 | } 245 | }, 246 | "nbformat": 4, 247 | "nbformat_minor": 0 248 | } -------------------------------------------------------------------------------- /docs/tutorial/overview.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Why `paramnormal` ?\n", 8 | "### The currect state of the ecosystem", 9 | "\n", 10 | "Both in `numpy` and `scipy.stats` and in the field of statistics in general, you can refer to the *location* (`loc`) and scale (`scale`) parameters of a distribution. Roughly speaking, they refer to the position and spread of the distribution, respectively. For normal distribtions `loc` refers the mean (symbolized as $\\mu$) and `scale` refers to the standard deviation (a.k.a. $\\sigma$).\n", 11 | "\n", 12 | "The main problem that `paramnormal` is trying to solve is that sometimes, creating a probability distribution using these parameters (and others) in `scipy.stats` can be confusing. Also the parameters in `numpy.random` can be inconsistently named (admittedly, just a minor inconvenience). " 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "%matplotlib inline" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": { 30 | "collapsed": true 31 | }, 32 | "outputs": [], 33 | "source": [ 34 | "import numpy as np\n", 35 | "from scipy import stats" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "metadata": {}, 41 | "source": [ 42 | "Consider the lognormal distribution. \n", 43 | "\n", 44 | "> In probability theory, a log-normal (or lognormal) distribution is a continuous probability distribution of a random variable whose logarithm is normally distributed. Thus, if the random variable $X$ is log-normally distributed, then $Y = \\ln(X)$ has a normal distribution. Likewise, if $Y$ has a normal distribution, then $X = \\exp(Y)$ has a log-normal distribution. [(from wikipedia)](https://en.wikipedia.org/wiki/Log-normal_distribution)\n", 45 | "\n", 46 | "\n", 47 | "In numpy, you specify the \"mean\" and \"sigma\" of the underlying normal distribution. A lot lof scientific programmers know what that would mean. But `mean` and `standard_deviation`, `loc` and `scale` or `mu` and `sigma` would have been better choices.\n", 48 | "\n", 49 | "Still, generating random numbers is pretty straight-forward:" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "metadata": { 56 | "collapsed": false 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "np.random.seed(0)\n", 61 | "mu = 0\n", 62 | "sigma = 1\n", 63 | "N = 3\n", 64 | "np.random.lognormal(mean=mu, sigma=sigma, size=N)" 65 | ] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "metadata": {}, 70 | "source": [ 71 | "In scipy, you need an additional shape parameter (`s`), plus the usual `loc` and `scale`. Aside from the mystery behind what `s` might bem that seems straight-forward enough.\n", 72 | "\n", 73 | "Except it's not.\n", 74 | "\n", 75 | "That shape parameter is actually the standard deviation ($\\sigma$) of the underlying normal distribution. The `scale` should be set to the exponentiated location parameter of the raw distribution ($e ^ \\mu$). Finally, `loc` actually refers to a sort of offset that can be applied to entire distribution. In other words, you can translate the distribution up and down to e.g., negative values.\n", 76 | "\n", 77 | "In my field (civil/environmental engineering) variables that are often assumed to be lognormally distributed (e.g., pollutant concentration) can never have values less than or equal to zerlo. So in that sense, the `loc` parameter in scipy's lognormal distribution **nearly always should be set to zero**.\n", 78 | "\n", 79 | "With that out of the way, recreating the three numbers above in scipy is done as follows: " 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "metadata": { 86 | "collapsed": false 87 | }, 88 | "outputs": [], 89 | "source": [ 90 | "np.random.seed(0)\n", 91 | "stats.lognorm(sigma, loc=0, scale=np.exp(mu)).rvs(size=N)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "### A new challenger appears\n", 99 | "\n", 100 | "\n", 101 | "`paramnormal` really just hopes to take away some of this friction. Consider the following:" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": { 108 | "collapsed": false 109 | }, 110 | "outputs": [], 111 | "source": [ 112 | "import paramnormal\n", 113 | "\n", 114 | "np.random.seed(0)\n", 115 | "paramnormal.lognormal(mu=mu, sigma=sigma).rvs(size=N)" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "Hopefully that's much more readable and straight-forward." 123 | ] 124 | }, 125 | { 126 | "cell_type": "markdown", 127 | "metadata": {}, 128 | "source": [ 129 | "### Greek-letter support\n", 130 | "\n", 131 | "Tom Augspurger added a [lovely little decorator](https://github.com/phobson/paramnormal/commit/b859e601f7ef45acc1d90b5e8cbf14028c715bf1#diff-9486966ced6b55cf6eb6d3aac274249eR14) to let you use greek letters in the function signature." 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": null, 137 | "metadata": { 138 | "collapsed": false 139 | }, 140 | "outputs": [], 141 | "source": [ 142 | "np.random.seed(0)\n", 143 | "paramnormal.lognormal(μ=mu, σ=sigma).rvs(size=N)" 144 | ] 145 | }, 146 | { 147 | "cell_type": "markdown", 148 | "metadata": {}, 149 | "source": [ 150 | "### Other distributions \n", 151 | "As of now, we provide a convenient interface for the following distributions in `scipy.stats`:" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": { 158 | "collapsed": false 159 | }, 160 | "outputs": [], 161 | "source": [ 162 | "for d in paramnormal.dist.__all__:\n", 163 | " print(d)" 164 | ] 165 | }, 166 | { 167 | "cell_type": "markdown", 168 | "metadata": {}, 169 | "source": [ 170 | "Feel free to submit a pull request at [Github](https://github.com/phobson/paramnormal) to add new distributions." 171 | ] 172 | } 173 | ], 174 | "metadata": { 175 | "kernelspec": { 176 | "display_name": "Python 3", 177 | "language": "python", 178 | "name": "python3" 179 | }, 180 | "language_info": { 181 | "codemirror_mode": { 182 | "name": "ipython", 183 | "version": 3 184 | }, 185 | "file_extension": ".py", 186 | "mimetype": "text/x-python", 187 | "name": "python", 188 | "nbconvert_exporter": "python", 189 | "pygments_lexer": "ipython3", 190 | "version": "3.5.1" 191 | } 192 | }, 193 | "nbformat": 4, 194 | "nbformat_minor": 0 195 | } 196 | -------------------------------------------------------------------------------- /docs/tutorial/tools/nb_to_doc.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | """ 3 | Convert empty IPython notebook to a sphinx doc page. 4 | 5 | """ 6 | import sys 7 | from subprocess import check_call as sh 8 | 9 | 10 | def convert_nb(nbname): 11 | 12 | # Execute the notebook 13 | sh(["jupyter", "nbconvert", "--to", "notebook", 14 | "--execute", "--inplace", nbname + ".ipynb"]) 15 | 16 | # Convert to .rst for Sphinx 17 | sh(["jupyter", "nbconvert", "--to", "rst", nbname + ".ipynb"]) 18 | 19 | # Clear notebook output 20 | sh(["jupyter", "nbconvert", "--to", "notebook", "--inplace", 21 | "--ClearOutputPreprocessor.enabled=True", nbname + ".ipynb"]) 22 | 23 | 24 | if __name__ == "__main__": 25 | 26 | for nbname in sys.argv[1:]: 27 | convert_nb(nbname) 28 | -------------------------------------------------------------------------------- /docs/tutorial/tools/nbstripout: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """strip outputs from an IPython Notebook 3 | 4 | Opens a notebook, strips its output, and writes the outputless version to the original file. 5 | 6 | Useful mainly as a git pre-commit hook for users who don't want to track output in VCS. 7 | 8 | This does mostly the same thing as the `Clear All Output` command in the notebook UI. 9 | """ 10 | 11 | import io 12 | import sys 13 | 14 | from IPython.nbformat import current 15 | 16 | def strip_output(nb): 17 | """strip the outputs from a notebook object""" 18 | for cell in nb.worksheets[0].cells: 19 | if 'outputs' in cell: 20 | cell['outputs'] = [] 21 | if 'prompt_number' in cell: 22 | cell['prompt_number'] = None 23 | return nb 24 | 25 | if __name__ == '__main__': 26 | filename = sys.argv[1] 27 | with io.open(filename, 'r', encoding='utf8') as f: 28 | nb = current.read(f, 'json') 29 | nb = strip_output(nb) 30 | with io.open(filename, 'w', encoding='utf8') as f: 31 | current.write(nb, f, 'json') 32 | -------------------------------------------------------------------------------- /paramnormal/__init__.py: -------------------------------------------------------------------------------- 1 | from paramnormal.dist import * # noqa 2 | from paramnormal.tests import test # noqa 3 | -------------------------------------------------------------------------------- /paramnormal/activity.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from matplotlib import pyplot 3 | 4 | from paramnormal import dist, utils 5 | 6 | 7 | def _check_distro(distro, **params): 8 | # check if we're returning the class definition or an instance 9 | as_class = params.pop("as_class", False) 10 | if hasattr(distro, "pdf"): 11 | return distro 12 | elif hasattr(distro, "from_params"): 13 | return _check_distro(distro.name, as_class=as_class, **params) 14 | else: 15 | try: 16 | distro = getattr(dist, distro) 17 | except (AttributeError, TypeError): 18 | raise ValueError(f"{distro} is not a valid paramnormal distribution") 19 | 20 | _params = utils._remove_nones(**params) 21 | if as_class: 22 | return distro 23 | else: 24 | return distro(**_params) 25 | 26 | 27 | def _check_ax(ax): 28 | if ax is None: 29 | ax = pyplot.gca() 30 | fig = ax.figure 31 | else: 32 | fig = ax.figure 33 | 34 | return fig, ax 35 | 36 | 37 | def random(distro, **params): 38 | """ 39 | Generate random data from a probability distribution. 40 | 41 | Parameters 42 | ---------- 43 | distro : str or paramnormal class 44 | The distribution from which the random data will be generated. 45 | params : keyword arguments of floats 46 | The parameters required to define the distribution. See each 47 | distribution's docstring for more info. 48 | shape : int or tuple of ints, optional 49 | The shape of the array into which the generated data will be 50 | placed. If `None`, a scalar will be retured. 51 | 52 | Returns 53 | ------- 54 | random : numpy.array or scalar 55 | The random array (or scalar) generated. 56 | 57 | Examples 58 | -------- 59 | >>> import numpy 60 | >>> import paramnormal 61 | >>> numpy.random.seed(0) 62 | >>> # define dist with a string 63 | >>> paramnormal.activity.random('normal', mu=5, sigma=1.75, shape=1) 64 | array([ 8.08709161]) 65 | >>> # or you can specify the actual class 66 | >>> paramnormal.activity.random(paramnormal.normal, mu=5, sigma=1.75, shape=None) 67 | 5.700275114642641 68 | >>> # greek letters still work 69 | >>> paramnormal.activity.random('beta', α=2.5, β=1.2, shape=(3,3)) 70 | array([[ 0.43771761, 0.84131634, 0.4390664 ], 71 | [ 0.7037142 , 0.88282672, 0.09080825], 72 | [ 0.98747135, 0.63227551, 0.98108498]]) 73 | 74 | """ 75 | 76 | shape = params.pop("shape", None) 77 | distro = _check_distro(distro, **params) 78 | return distro.rvs(size=shape) 79 | 80 | 81 | def fit(distro, data, as_params=True, **guesses): 82 | """ 83 | Estimate the distribution parameters of sample data. 84 | 85 | Parameters 86 | ---------- 87 | distro : str or paramnormal class 88 | The distribution from which the random data will be generated. 89 | data : array-like 90 | The data from which the distribution will be fit. 91 | guesses : named arguments of floats 92 | Inital guess values for certain parameters of the 93 | distribution. See the class docstring for more information 94 | on the parameters. 95 | 96 | Returns 97 | ------- 98 | params : namedtuple 99 | A namedtuple containing all of the paramaters of the 100 | distribution. 101 | 102 | Examples 103 | -------- 104 | >>> import numpy 105 | >>> import paramnormal 106 | >>> numpy.random.seed(0) 107 | >>> x = numpy.random.normal(loc=5.75, scale=2.25, size=37) 108 | >>> paramnormal.activity.fit('normal', x) 109 | params(mu=6.4790576880446782, sigma=2.4437818960405617) 110 | 111 | >>> paramnormal.activity.fit('normal', x, sigma=2) 112 | params(mu=6.4790576880446782, sigma=2) 113 | """ 114 | 115 | distro = _check_distro(distro, as_class=True) 116 | params = distro.fit(data, **guesses) 117 | if as_params: 118 | return params 119 | else: 120 | return distro.from_params(params) 121 | 122 | 123 | def plot( 124 | distro, 125 | which="PDF", 126 | data=None, 127 | fit_dist=True, 128 | ax=None, 129 | pad=0.05, 130 | xscale="linear", 131 | line_opts=None, 132 | **guesses, 133 | ): 134 | """ 135 | Plot the PDF of a dataset and other representations of the 136 | distribution (histogram, kernel density estimate, and rug plot). 137 | 138 | Parameters 139 | ---------- 140 | distro : str or distribution 141 | The (name of) the distribution to be plotted. 142 | data : array-like, optional 143 | An array-like object that can be passed to 144 | :func:`~seaborn.distplot` and :func:`~fit`. 145 | fit_dist : bool, optional 146 | Toggles fitting ``distro`` to ``data``. If False, ``distro`` 147 | must be a fully specified distribution so that the PDF can be 148 | plotted. 149 | ax : matplotlib.Axes, optional 150 | Axes on which the everything gets drawn. If not provided, a new 151 | one is created. 152 | pad : float, optional 153 | The fraction of beyond min and max values of data where the PDF 154 | will be drawn. 155 | xscale : str, optional 156 | Specfifies a `'log'` or `'linear'` scale on the plot. 157 | line_opts : dict, optional 158 | Plotting options passed to :meth:`~ax.plot` when drawing the PDF. 159 | distplot : bool, Optional 160 | Toggles the use of :func:`~seaborn.distplot`. The default is 161 | `False`. 162 | 163 | .. note: 164 | ``data`` must not be `None` for this to have an effect. 165 | 166 | distplot_opts : dict, optional 167 | Dictionary of parameters to be passed to 168 | :func:`~seaborn.distplot`. 169 | guesses : keyword arguments, optional 170 | Additional parameters for specifying the distribution. 171 | 172 | Returns 173 | ------- 174 | ax : matplotlib.Axes 175 | 176 | See Also 177 | -------- 178 | seaborn.distplot 179 | 180 | Examples 181 | -------- 182 | 183 | Plot a simple PDF of a fully-specified normal distribution. 184 | 185 | .. plot:: 186 | :context: close-figs 187 | 188 | >>> import numpy 189 | >>> import seaborn 190 | >>> import paramnormal 191 | >>> clean_bkgd = {'axes.facecolor': 'none', 'figure.facecolor': 'none'} 192 | >>> seaborn.set(style='ticks', rc=clean_bkgd) 193 | >>> norm_dist = paramnormal.normal(μ=5.4, σ=2.5) 194 | >>> ax = paramnormal.activity.plot(norm_dist) 195 | 196 | Pass a data sample to fit the distribution on-the-fly. 197 | 198 | .. plot:: 199 | :context: close-figs 200 | 201 | >>> paramnormal.utils.seed(0) 202 | >>> data = paramnormal.activity.random('normal', μ=5.4, σ=2.5, shape=(37)) 203 | >>> ax = paramnormal.activity.plot('normal', data=data) 204 | 205 | Use seaborn to show other representations of the distribution of 206 | real data: 207 | 208 | .. plot:: 209 | :context: close-figs 210 | 211 | >>> ax = paramnormal.activity.plot('normal', data=data, distplot=True) 212 | >>> ax.legend(loc='upper left') 213 | 214 | Use ``line_opts`` and ``distplot_opts`` to customize more complex 215 | plots. 216 | 217 | .. plot:: 218 | :context: close-figs 219 | 220 | >>> paramnormal.utils.seed(0) 221 | >>> data = paramnormal.activity.random('lognormal', μ=0.75, σ=1.2, shape=125) 222 | >>> logdata = numpy.log10(data) 223 | >>> line_opts = dict(color='firebrick', lw=3.5, label='Fit PDF') 224 | >>> distplot_opts = dict(rug=True, kde=False, norm_hist=True) 225 | >>> ax = paramnormal.activity.plot('lognormal', data=data, distplot=True, 226 | ... xscale='log', pad=0.01, 227 | ... line_opts=line_opts, 228 | ... distplot_opts=distplot_opts) 229 | 230 | 231 | 232 | Notice that the bins in log-space don't work so well. We can 233 | compute them outselves. 234 | 235 | .. plot:: 236 | :context: close-figs 237 | 238 | >>> paramnormal.utils.seed(0) 239 | >>> data = paramnormal.activity.random('lognormal', μ=0.75, σ=1.2, shape=125) 240 | >>> logdata = numpy.log10(data) 241 | >>> bins = numpy.logspace(logdata.min(), logdata.max(), num=30) 242 | >>> distplot_opts = dict(rug=True, kde=False, norm_hist=True, bins=bins) 243 | >>> ax = paramnormal.activity.plot('lognormal', data=data, distplot=True, 244 | ... xscale='log', pad=0.01, 245 | ... line_opts=line_opts, 246 | ... distplot_opts=distplot_opts) 247 | 248 | """ 249 | 250 | # validate the axes and distribution function (`which`) 251 | fig, ax = _check_ax(ax) 252 | if data is not None: 253 | distro = fit(distro, data, as_params=False, **guesses) 254 | else: 255 | distro = _check_distro(distro, **guesses) 256 | fxn = getattr(distro, which.lower()) 257 | 258 | # determine and set the xlimits of the plot 259 | xlimits = distro.ppf([pad / 100, 1 - pad / 100]) 260 | 261 | # determine the x-values 262 | if xscale == "log": 263 | # xlimits = numpy.log10(xlimits) 264 | x_hat = numpy.logspace(*numpy.log10(xlimits), num=100) 265 | else: 266 | x_hat = numpy.linspace(*xlimits, num=100) 267 | 268 | # compute y-values 269 | y_hat = fxn(x_hat) 270 | 271 | line_opts = dict() if line_opts is None else line_opts 272 | line_opts["label"] = line_opts.pop("label", which) 273 | 274 | (line,) = ax.plot(x_hat, y_hat, **line_opts) 275 | ax.set_xscale(xscale) 276 | 277 | return ax 278 | -------------------------------------------------------------------------------- /paramnormal/dist.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | import numpy 4 | from scipy import stats 5 | 6 | from paramnormal import utils 7 | 8 | 9 | class BaseDist_Mixin: 10 | def __new__(cls, **params): 11 | dist_params = cls._process_args(fit=False, **params) 12 | return cls.dist(**dist_params) 13 | 14 | @classmethod 15 | def _fit(cls, data, **guesses): 16 | args = utils._remove_nones(**cls._process_args(fit=True, **guesses)) 17 | _sp_params = cls.dist.fit(data, **args) 18 | return _sp_params 19 | 20 | @classmethod 21 | def fit(cls, data, **guesses): 22 | """Fit a distribution to sample using scipy's maximum 23 | likelihood estimation methods. 24 | 25 | Parameters 26 | ---------- 27 | data : array-like 28 | A sample whose distribution parameters will be estimated. 29 | guesses : named arguments of floats 30 | Inital guess values for certain parameters of the 31 | distribution. See the class docstring for more information 32 | on the parameters. 33 | 34 | Returns 35 | ------- 36 | params : namedtuple 37 | A namedtuple containing all of the paramaters of the 38 | distribution. 39 | 40 | """ 41 | 42 | return cls.param_template(*cls._fit(data, **guesses)) 43 | 44 | @classmethod 45 | def from_params(cls, params): 46 | """Create a distribution from the ``namedtuple`` 47 | result of the :meth:`~fit` method. 48 | 49 | Examples 50 | -------- 51 | >>> import numpy 52 | >>> import paramnormal 53 | >>> # silly fake data 54 | >>> x = numpy.random.normal(size=37) 55 | >>> params = paramnormal.normal.fit(x) 56 | >>> dist = paramnormal.normal.from_params(params) 57 | 58 | """ 59 | 60 | kwargs = dict(zip(params._fields, params)) 61 | return cls(**kwargs) 62 | 63 | 64 | class normal(BaseDist_Mixin): 65 | """ 66 | Create and fit data to a normal distribution. 67 | 68 | Methods 69 | ------- 70 | fit 71 | Use scipy's maximum likelihood estimation methods to estimate 72 | the parameters of the data's distribution. 73 | from_params 74 | Create a new distribution instances from the ``namedtuple`` 75 | result of the :meth:`~fit` method. 76 | 77 | Parameters 78 | ---------- 79 | mu : float 80 | The expected value (mean) of the underlying normal distribution. 81 | Acts as the location parameter of the distribution. 82 | sigma : float 83 | The standard deviation of the underlying normal distribution. 84 | Also acts as the scale parameter of distribution. 85 | 86 | Examples 87 | -------- 88 | >>> import numpy 89 | >>> import paramnormal as pn 90 | >>> numpy.random.seed(0) 91 | >>> pn.normal(mu=5, sigma=2).rvs(size=3) 92 | array([ 8.52810469, 5.80031442, 6.95747597]) 93 | 94 | >>> # english names and greek symbols are interchangeable 95 | >>> numpy.random.seed(0) 96 | >>> pn.normal(μ=5, σ=2).rvs(size=3) 97 | array([ 8.52810469, 5.80031442, 6.95747597]) 98 | 99 | >>> # silly fake data 100 | >>> numpy.random.seed(0) 101 | >>> data = numpy.random.normal(5, 2, size=37) 102 | >>> # pretend `data` is unknown and we want to fit a dist. to it 103 | >>> pn.normal.fit(data) 104 | params(mu=5.6480512782619359, sigma=2.1722505742582769) 105 | 106 | >>> # estimate sigma when mu is fixed a known value: 107 | >>> pn.normal.fit(data, mu=4.75) 108 | params(mu=4.75, sigma=2.3505677305181645) 109 | 110 | References 111 | ---------- 112 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norm.html 113 | https://en.wikipedia.org/wiki/normal_distribution 114 | 115 | See Also 116 | -------- 117 | scipy.stats.norm 118 | numpy.random.normal 119 | 120 | """ 121 | 122 | dist = stats.norm 123 | param_template = namedtuple("params", ["mu", "sigma"]) 124 | name = "normal" 125 | 126 | @staticmethod 127 | @utils.greco_deco 128 | def _process_args(mu=None, sigma=None, fit=False): 129 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 130 | return {loc_key: mu, scale_key: sigma} 131 | 132 | 133 | class lognormal(BaseDist_Mixin): 134 | """ 135 | Create and fit data to a lognormal distribution. 136 | 137 | Methods 138 | ------- 139 | fit 140 | Use scipy's maximum likelihood estimation methods to estimate 141 | the parameters of the data's distribution. By default, `offset` 142 | is fixed at 0. Thus, only `mu` and `sigma` are estimated unless 143 | the `offset` is explicitly set to `None`. 144 | from_params 145 | Create a new distribution instances from the ``namedtuple`` 146 | result of the :meth:`~fit` method. 147 | 148 | Parameters 149 | ---------- 150 | mu : float 151 | The expected value (mean) of the underlying normal distribution. 152 | Acts as the scale parameter of the distribution. 153 | sigma : float 154 | The standard deviation of the underlying normal distribution. 155 | Also acts as the shape parameter of distribution. 156 | offset : float, optional 157 | The location parameter of the distribution. It's effectively 158 | the lower bound of the distribution. In other works, if you're 159 | investigating some quantity that cannot go below zero (e.g., 160 | pollutant concentrations), leave this as the default (zero). 161 | 162 | .. note :: 163 | When fitting a lognormal distribution to a dataset, this will 164 | be fixed at its default value unless you explicitly set 165 | it to another value. Set it to `None` if wish that it be 166 | estimated entirely from scratch. 167 | 168 | Examples 169 | -------- 170 | >>> import numpy 171 | >>> import paramnormal as pn 172 | >>> numpy.random.seed(0) 173 | >>> pn.lognormal(mu=5, sigma=2).rvs(size=3) 174 | array([ 5054.85624027, 330.40342795, 1050.97750604]) 175 | 176 | >>> # you can also use greek letters 177 | >>> numpy.random.seed(0) 178 | >>> pn.lognormal(μ=5, σ=2).rvs(size=3) 179 | array([ 5054.85624027, 330.40342795, 1050.97750604]) 180 | 181 | >>> # silly fake data 182 | >>> numpy.random.seed(0) 183 | >>> data = numpy.random.lognormal(5, 2, size=37) 184 | >>> # pretend `data` is unknown and we want to fit a dist. to it 185 | >>> pn.lognormal.fit(data) 186 | params(mu=5.6480511731060181, sigma=2.172250571711877, offset=0) 187 | 188 | >>> # estimate sigma when mu is fixed a known value: 189 | >>> pn.lognormal.fit(data, mu=4.75) 190 | params(mu=4.75, sigma=2.3505859375000036, offset=0) 191 | 192 | >>> # include `offset` in the estimate 193 | >>> pn.lognormal.fit(data, offset=None) 194 | params(mu=5.6538159643068386, sigma=2.1596452081058795, offset=-0.12039282461824304) 195 | 196 | References 197 | ---------- 198 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.lognorm.html 199 | https://en.wikipedia.org/wiki/lognormal_distribution 200 | 201 | See Also 202 | -------- 203 | scipy.stats.lognorm 204 | numpy.random.lognormal 205 | 206 | """ 207 | 208 | dist = stats.lognorm 209 | param_template = namedtuple("params", ["mu", "sigma", "offset"]) 210 | name = "lognormal" 211 | 212 | @staticmethod 213 | @utils.greco_deco 214 | def _process_args(mu=None, sigma=None, offset=0, fit=False): 215 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 216 | key = "f0" if fit else "s" 217 | if offset is None and not fit: 218 | raise ValueError("`offset` parameter is required. Recommended value is 0.") 219 | return {key: sigma, scale_key: numpy.exp(mu) if mu is not None else mu, loc_key: offset} 220 | 221 | @classmethod 222 | def fit(cls, data, **guesses): 223 | params = cls._fit(data, **guesses) 224 | return cls.param_template(mu=numpy.log(params[2]), sigma=params[0], offset=params[1]) 225 | 226 | 227 | class weibull(BaseDist_Mixin): 228 | """ 229 | Create and fit data to a weibull distribution. 230 | 231 | Methods 232 | ------- 233 | fit 234 | Use scipy's maximum likelihood estimation methods to estimate 235 | the parameters of the data's distribution. By default, `loc` 236 | and `scale` are fixed at 0 and 1, respectively. Thus, only `k` 237 | is estimated unless `loc` or `scale` are explicitly set to 238 | `None`. 239 | from_params 240 | Create a new distribution instances from the ``namedtuple`` 241 | result of the :meth:`~fit` method. 242 | 243 | Parameters 244 | ---------- 245 | k : float 246 | The shape parameter of the distribution. 247 | 248 | .. note :: 249 | Strictly speaking, the weibull distribution has a second 250 | shape parameter, lambda. However, it seems to be always 251 | set to 1. So much so that scipy doesn't give you any other 252 | option. 253 | 254 | loc, scale : floats, optional 255 | Location and scale parameters of the distribution. These 256 | default to, and should probably be left at, 0 and 1, 257 | respectively. 258 | 259 | .. note :: 260 | When fitting a weibull distribution to a dataset, these will 261 | be fixed at their default values unless you explicitly set 262 | them to other values. Set them to `None` if you wish that 263 | they be estimated entirely from scratch. 264 | 265 | Examples 266 | -------- 267 | >>> import numpy 268 | >>> import paramnormal as pn 269 | >>> numpy.random.seed(0) 270 | >>> pn.weibull(k=5).rvs(size=3) 271 | array([ 0.9553641 , 1.04662991, 0.98415009]) 272 | 273 | >>> # silly fake data 274 | >>> numpy.random.seed(0) 275 | >>> data = numpy.random.weibull(5, size=37) 276 | >>> # pretend `data` is unknown and we want to fit a dist. to it 277 | >>> pn.weibull.fit(data) 278 | params(k=5.4158203125000091, loc=0, scale=1) 279 | 280 | >>> # include `loc` and `scale` in the estimate 281 | >>> pn.weibull.fit(data, loc=None, scale=None) 282 | params(k=14.120107702486127, loc=-1.389856535577052, scale=2.4320324339845572) 283 | 284 | References 285 | ---------- 286 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.weibull_min.html 287 | https://en.wikipedia.org/wiki/weibull_distribution 288 | 289 | See Also 290 | -------- 291 | scipy.stats.weibull_min 292 | scipy.stats.frechet_min 293 | numpy.random.weibull 294 | 295 | """ 296 | 297 | dist = stats.weibull_min 298 | param_template = namedtuple("params", ["k", "loc", "scale"]) 299 | name = "weibull" 300 | 301 | @staticmethod 302 | @utils.greco_deco 303 | def _process_args(k=None, loc=0, scale=1, fit=False): 304 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 305 | key = "f0" if fit else "c" 306 | return {key: k, loc_key: loc, scale_key: scale} 307 | 308 | 309 | class alpha(BaseDist_Mixin): 310 | """ 311 | Create and fit data to a alpha distribution. 312 | 313 | Methods 314 | ------- 315 | fit 316 | Use scipy's maximum likelihood estimation methods to estimate 317 | the parameters of the data's distribution. By default, `loc` 318 | and `scale` are fixed at 0 and 1, respectively. Thus, only 319 | `alpha` is estimated unless `loc` or `scale` are explicitly set 320 | to `None`. 321 | from_params 322 | Create a new distribution instances from the ``namedtuple`` 323 | result of the :meth:`~fit` method. 324 | 325 | Parameters 326 | ---------- 327 | alpha : float 328 | The shape parameter of the distribution. 329 | 330 | loc, scale : floats, optional 331 | Location and scale parameters of the distribution. These 332 | default to, and should probably be left at, 0 and 1, 333 | respectively. 334 | 335 | .. note :: 336 | When fitting a alpha distribution to a dataset, these will 337 | be fixed at their default values unless you explicitly set 338 | them to other values. Set them to `None` if you wish that 339 | they be estimated entirely from scratch. 340 | 341 | Examples 342 | -------- 343 | >>> import numpy 344 | >>> from scipy import stats 345 | >>> import paramnormal as pn 346 | >>> numpy.random.seed(0) 347 | >>> pn.alpha(alpha=5).rvs(size=3) 348 | array([ 0.20502995, 0.22566277, 0.21099298]) 349 | 350 | >>> # you can also use greek letters 351 | >>> numpy.random.seed(0) 352 | >>> pn.alpha(α=5).rvs(size=3) 353 | array([ 0.20502995, 0.22566277, 0.21099298]) 354 | 355 | >>> # silly fake data 356 | >>> numpy.random.seed(0) 357 | >>> data = stats.alpha.rvs(5, size=37) 358 | >>> # pretend `data` is unknown and we want to fit a dist. to it 359 | >>> pn.alpha.fit(data) 360 | params(alpha=4.8356445312500096, loc=0, scale=1) 361 | 362 | >>> # include `loc` and `scale` in the estimate 363 | >>> pn.alpha.fit(data, loc=None, scale=None) 364 | params(alpha=8.6781299501492342, loc=-0.15002784429644306, scale=3.1262971852456447) 365 | 366 | References 367 | ---------- 368 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.alpha.html 369 | 370 | See Also 371 | -------- 372 | scipy.stats.alpha 373 | 374 | """ 375 | 376 | dist = stats.alpha 377 | param_template = namedtuple("params", ["alpha", "loc", "scale"]) 378 | name = "alpha" 379 | 380 | @staticmethod 381 | @utils.greco_deco 382 | def _process_args(alpha=None, loc=0, scale=1, fit=False): 383 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 384 | alpha_key = "f0" if fit else "a" 385 | return {alpha_key: alpha, loc_key: loc, scale_key: scale} 386 | 387 | 388 | class beta(BaseDist_Mixin): 389 | """ 390 | Create and fit data to a beta distribution. 391 | 392 | Methods 393 | ------- 394 | fit 395 | Use scipy's maximum likelihood estimation methods to estimate 396 | the parameters of the data's distribution. By default, `loc` 397 | and `scale` are fixed at 0 and 1, respectively. Thus, only 398 | `alpha` and `beta` are estimated unless `loc` or `scale` are 399 | explicitly set to `None`. 400 | from_params 401 | Create a new distribution instances from the ``namedtuple`` 402 | result of the :meth:`~fit` method. 403 | 404 | Parameters 405 | ---------- 406 | alpha, beta : float 407 | The (positive) shape parameters of the distribution. 408 | loc, scale : floats, optional 409 | Location and scale parameters of the distribution. These 410 | default to, and should probably be left at, 0 and 1, 411 | respectively. 412 | 413 | .. note :: 414 | When fitting a beta distribution to a dataset, these will 415 | be fixed at their default values unless you explicitly set 416 | them to other values. Set them to `None` if you wish that 417 | they be estimated entirely from scratch. 418 | 419 | Examples 420 | -------- 421 | >>> import numpy 422 | >>> import paramnormal as pn 423 | >>> numpy.random.seed(0) 424 | >>> pn.beta(alpha=2, beta=5).rvs(size=3) 425 | array([ 0.47917138, 0.6550558 , 0.21501632]) 426 | 427 | >>> # you can also use greek letters 428 | >>> numpy.random.seed(0) 429 | >>> pn.beta(α=2, β=5).rvs(size=3) 430 | array([ 0.47917138, 0.6550558 , 0.21501632]) 431 | 432 | >>> # silly fake data 433 | >>> numpy.random.seed(0) 434 | >>> data = pn.beta(alpha=2, beta=5).rvs(size=37) 435 | >>> # pretend `data` is unknown and we want to fit a dist. to it 436 | >>> pn.beta.fit(data) 437 | params(alpha=1.6784891179355115, beta=4.2459121691279398, loc=0, scale=1) 438 | 439 | >>> # just estimate beta with a known alpha 440 | >>> pn.beta.fit(data, alpha=2) 441 | params(alpha=2, beta=4.9699264393421139, loc=0, scale=1) 442 | 443 | >>> # include `loc` and `scale` in the estimate 444 | >>> pn.beta.fit(data, loc=None, scale=None) 445 | params( 446 | alpha=1.8111139255547926, 447 | beta=4.6972775768688697, 448 | loc=-0.0054013993799938431, 449 | scale=1.0388376932132561 450 | ) 451 | 452 | References 453 | ---------- 454 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html 455 | https://en.wikipedia.org/wiki/beta_distribution 456 | 457 | See Also 458 | -------- 459 | scipy.stats.beta 460 | numpy.random.beta 461 | 462 | """ 463 | 464 | dist = stats.beta 465 | param_template = namedtuple("params", ["alpha", "beta", "loc", "scale"]) 466 | name = "beta" 467 | 468 | @staticmethod 469 | @utils.greco_deco 470 | def _process_args(alpha=None, beta=None, loc=0, scale=1, fit=False): 471 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 472 | alpha_key = "f0" if fit else "a" 473 | beta_key = "f1" if fit else "b" 474 | return {alpha_key: alpha, beta_key: beta, loc_key: loc, scale_key: scale} 475 | 476 | 477 | class gamma(BaseDist_Mixin): 478 | """ 479 | Create and fit data to a gamma distribution. 480 | 481 | Methods 482 | ------- 483 | fit 484 | Use scipy's maximum likelihood estimation methods to estimate 485 | the parameters of the data's distribution. By default, `loc` 486 | and `scale` are fixed at 0 and 1, respectively. Thus, only 487 | `alpha` and `beta` are estimated unless `loc` or `scale` are 488 | explicitly set to `None`. 489 | from_params 490 | Create a new distribution instances from the ``namedtuple`` 491 | result of the :meth:`~fit` method. 492 | 493 | Parameters 494 | ---------- 495 | k, theta : float 496 | The shape and scale parameters of the distribution, 497 | respectively. 498 | loc : float, optional 499 | Location parameter of the distribution. This defaults to, and 500 | should probably be left at, 0. 501 | 502 | .. note :: 503 | When fitting a beta distribution to a dataset, this will 504 | be fixed at its default value unless you explicitly set 505 | it to other values. Set to `None` if you wish that it be 506 | estimated entirely from scratch. 507 | 508 | Examples 509 | -------- 510 | >>> import numpy 511 | >>> import paramnormal as pn 512 | >>> numpy.random.seed(0) 513 | >>> pn.gamma(k=2, theta=5).rvs(size=3) 514 | array([ 25.69414788, 11.19240456, 27.13566137]) 515 | 516 | >>> # you can also use greek letters 517 | >>> numpy.random.seed(0) 518 | >>> pn.gamma(k=2, θ=5).rvs(size=3) 519 | array([ 25.69414788, 11.19240456, 27.13566137]) 520 | 521 | >>> # silly fake data 522 | >>> numpy.random.seed(0) 523 | >>> data = pn.gamma(k=2, θ=5).rvs(size=37) 524 | >>> # pretend `data` is unknown and we want to fit a dist. to it 525 | >>> pn.gamma.fit(data) 526 | params(k=1.3379069223213471, loc=0, theta=7.5830062081633622) 527 | 528 | >>> # just estimate theta with a known k 529 | >>> pn.gamma.fit(data, theta=5) 530 | params(k=1.8060453251225814, loc=0, theta=5) 531 | 532 | >>> # include `loc` in the estimate 533 | >>> pn.gamma.fit(data, loc=None) 534 | params(k=1.0996117768860174, loc=0.29914735266576881, theta=8.9542450315590756) 535 | 536 | References 537 | ---------- 538 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gamma.html 539 | https://en.wikipedia.org/wiki/gamma_distribution 540 | 541 | See Also 542 | -------- 543 | scipy.stats.gamma 544 | numpy.random.gamma 545 | 546 | """ 547 | 548 | dist = stats.gamma 549 | param_template = namedtuple("params", ["k", "loc", "theta"]) 550 | name = "gamma" 551 | 552 | @staticmethod 553 | @utils.greco_deco 554 | def _process_args(k=None, theta=None, loc=0, fit=False): 555 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 556 | key = "f0" if fit else "a" 557 | return {key: k, loc_key: loc, scale_key: theta} 558 | 559 | 560 | class chi_squared(BaseDist_Mixin): 561 | """ 562 | Create and fit data to a chi-squared distribution. 563 | 564 | Methods 565 | ------- 566 | fit 567 | Use scipy's maximum likelihood estimation methods to estimate 568 | the parameters of the data's distribution. By default, `loc` 569 | and `scale` are fixed at 0 and 1, respectively. Thus, only 570 | `alpha` and `beta` are estimated unless `loc` or `scale` are 571 | explicitly set to `None`. 572 | from_params 573 | Create a new distribution instances from the ``namedtuple`` 574 | result of the :meth:`~fit` method. 575 | 576 | Parameters 577 | ---------- 578 | k : float 579 | The degrees of freedom of the distribution, 580 | respectively. 581 | loc, scale : floats, optional 582 | Location and scale parameters of the distribution. These 583 | default to, and should probably be left at, 0 and 1, 584 | respectively. 585 | 586 | .. note :: 587 | When fitting a chi-squared distribution to a dataset, these 588 | will be fixed at their default value unless you explicitly 589 | set them to other values. Set to `None` if you wish that they 590 | be estimated entirely from scratch. 591 | 592 | Examples 593 | -------- 594 | >>> import numpy 595 | >>> import paramnormal as pn 596 | >>> numpy.random.seed(0) 597 | >>> pn.chi_squared(k=2).rvs(size=3) 598 | array([ 1.59174902, 2.51186153, 1.84644629]) 599 | 600 | >>> # silly fake data 601 | >>> numpy.random.seed(0) 602 | >>> data = pn.chi_squared(k=2).rvs(size=37) 603 | >>> # pretend `data` is unknown and we want to fit a dist. to it 604 | >>> pn.chi_squared.fit(data) 605 | params(k=2.2668945312500028, loc=0, scale=1) 606 | 607 | >>> # include `loc` in the estimate 608 | >>> pn.chi_squared.fit(data, loc=None) 609 | params(k=1.9361813889429524, loc=0.037937143324767775, scale=1) 610 | 611 | References 612 | ---------- 613 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chi2.html 614 | https://en.wikipedia.org/wiki/Chi-squared_distribution 615 | 616 | See Also 617 | -------- 618 | scipy.stats.chi2 619 | numpy.random.chisquare 620 | 621 | """ 622 | 623 | dist = stats.chi2 624 | param_template = namedtuple("params", ["k", "loc", "scale"]) 625 | nane = "chi_squared" 626 | 627 | @staticmethod 628 | @utils.greco_deco 629 | def _process_args(k=None, loc=0, scale=1, fit=False): 630 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 631 | key = "f0" if fit else "df" 632 | return {key: k, loc_key: loc, scale_key: 1} 633 | 634 | 635 | class pareto(BaseDist_Mixin): 636 | """ 637 | Create and fit data to a pareto distribution. 638 | 639 | Methods 640 | ------- 641 | fit 642 | Use scipy's maximum likelihood estimation methods to estimate 643 | the parameters of the data's distribution. By default, `loc` 644 | and `scale` are fixed at 0 and 1, respectively. Thus, only 645 | `alpha` is estimated unless `loc` or `scale` are explicitly 646 | set to `None`. 647 | from_params 648 | Create a new distribution instances from the ``namedtuple`` 649 | result of the :meth:`~fit` method. 650 | 651 | Parameters 652 | ---------- 653 | alpha : float 654 | The shape parameter of the distribution. 655 | loc, scale : floats, optional 656 | Location and scale parameters of the distribution. These 657 | default to, and should probably be left at, 0 and 1, 658 | respectively. 659 | 660 | .. note :: 661 | When fitting a pareto distribution to a dataset, this will 662 | be fixed at its default value unless you explicitly set 663 | it to other values. Set to `None` if you wish that it be 664 | estimated entirely from scratch. 665 | 666 | Examples 667 | -------- 668 | >>> import numpy 669 | >>> import paramnormal as pn 670 | >>> numpy.random.seed(0) 671 | >>> pn.pareto(alpha=2).rvs(size=3) 672 | array([ 1.48875061, 1.87379424, 1.58662889]) 673 | 674 | >>> # you can also use greek letters 675 | >>> numpy.random.seed(0) 676 | >>> pn.pareto(α=2).rvs(size=3) 677 | array([ 1.48875061, 1.87379424, 1.58662889]) 678 | 679 | >>> # silly fake data 680 | >>> numpy.random.seed(0) 681 | >>> data = pn.pareto(alpha=2).rvs(size=37) 682 | >>> # pretend `data` is unknown and we want to fit a dist. to it 683 | >>> pn.pareto.fit(data) 684 | params(alpha=1.7850585937500019, loc=0, scale=1) 685 | 686 | >>> # include `loc` in the estimate 687 | >>> pn.pareto.fit(data, loc=None) 688 | params(alpha=1.8040853559635659, loc=0.009529403810858695, scale=1) 689 | 690 | References 691 | ---------- 692 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pareto.html 693 | https://en.wikipedia.org/wiki/pareto_distribution 694 | 695 | See Also 696 | -------- 697 | scipy.stats.pareto 698 | numpy.random.pareto 699 | 700 | """ 701 | 702 | dist = stats.pareto 703 | param_template = namedtuple("params", ["alpha", "loc", "scale"]) 704 | name = "pareto" 705 | 706 | @staticmethod 707 | @utils.greco_deco 708 | def _process_args(alpha=None, loc=0, scale=1, fit=False): 709 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 710 | key = "f0" if fit else "b" 711 | return {key: alpha, loc_key: loc, scale_key: scale} 712 | 713 | 714 | class exponential(BaseDist_Mixin): 715 | """ 716 | Create and fit data to an exponential distribution. 717 | 718 | Methods 719 | ------- 720 | fit 721 | Use scipy's maximum likelihood estimation methods to estimate 722 | the parameters of the data's distribution. By default, `loc` 723 | is fixed at 0. Thus, only `lamda` is estimated unless `loc` is 724 | explicitly set to `None`. 725 | from_params 726 | Create a new distribution instances from the ``namedtuple`` 727 | result of the :meth:`~fit` method. 728 | 729 | Parameters 730 | ---------- 731 | lamda : float 732 | The shape parameter of the distribution. 733 | 734 | .. note :: 735 | For our purposes, we spell `lambda` as `lamda` to avoid 736 | conflicting with the python keyword ``lambda``. 737 | 738 | loc : float, optional 739 | Location parameter of the distribution. This defaults to, and 740 | should probably be left at, 0. 741 | 742 | .. note :: 743 | When fitting an exponential distribution to a dataset, this 744 | will be fixed at its default value unless you explicitly set 745 | it to other values. Set to `None` if you wish that it be 746 | estimated entirely from scratch. 747 | 748 | Examples 749 | -------- 750 | >>> import numpy 751 | >>> import paramnormal as pn 752 | >>> numpy.random.seed(0) 753 | >>> pn.exponential(lamda=2).rvs(size=3) 754 | array([ 0.39793725, 0.62796538, 0.46161157]) 755 | 756 | >>> # you can also use greek letters 757 | >>> numpy.random.seed(0) 758 | >>> pn.exponential(λ=2).rvs(size=3) 759 | array([ 0.39793725, 0.62796538, 0.46161157]) 760 | 761 | >>> # silly fake data 762 | >>> numpy.random.seed(0) 763 | >>> data = pn.exponential(λ=2).rvs(size=37) 764 | >>> # pretend `data` is unknown and we want to fit a dist. to it 765 | >>> pn.exponential.fit(data) 766 | params(lamda=1.7849050026146085, loc=0) 767 | 768 | >>> # include `loc` in the estimate 769 | >>> pn.exponential.fit(data, loc=None) 770 | params(lamda=1.8154701618164411, loc=0.0094842718426853996) 771 | 772 | References 773 | ---------- 774 | http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html 775 | https://en.wikipedia.org/wiki/exponential_distribution 776 | 777 | See Also 778 | -------- 779 | scipy.stats.expon 780 | numpy.random.exponential 781 | 782 | """ 783 | 784 | dist = stats.expon 785 | param_template = namedtuple("params", ["lamda", "loc"]) 786 | name = "exponential" 787 | 788 | @staticmethod 789 | @utils.greco_deco 790 | def _process_args(lamda=None, loc=0, fit=False): 791 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 792 | return {loc_key: loc, scale_key: lamda**-1 if lamda is not None else lamda} 793 | 794 | @classmethod 795 | def fit(cls, data, **guesses): 796 | params = cls._fit(data, **guesses) 797 | return cls.param_template(loc=params[0], lamda=params[1] ** -1) 798 | 799 | 800 | class rice(BaseDist_Mixin): 801 | """ 802 | Create and fit data to a Rice distribution. 803 | 804 | Methods 805 | ------- 806 | fit 807 | Use scipy's maximum likelihood estimation methods to estimate 808 | the parameters of the data's distribution. By default, `loc` 809 | is fixed at 0. Thus, only `R` and `sigma` are estimated unless 810 | `loc` is explicitly set to `None`. 811 | from_params 812 | Create a new distribution instances from the ``namedtuple`` 813 | result of the :meth:`~fit` method. 814 | 815 | Parameters 816 | ---------- 817 | R : float 818 | The shape parameter of the distribution. 819 | sigma : float 820 | The standard deviate of the distribution. 821 | loc : float, optional 822 | Location parameter of the distribution. This defaults to, and 823 | should probably be left at, 0. 824 | 825 | .. note :: 826 | When fitting an Rice distribution to a dataset, this 827 | will be fixed at its default value unless you explicitly set 828 | it to other values. Set to `None` if you wish that it be 829 | estimated entirely from scratch. 830 | 831 | Examples 832 | -------- 833 | >>> import numpy 834 | >>> import paramnormal as pn 835 | >>> numpy.random.seed(0) 836 | >>> pn.rice(R=10, sigma=2).rvs(size=3) 837 | array([ 15.67835764, 13.36907874, 10.37753817]) 838 | 839 | >>> # you can also use greek letters 840 | >>> numpy.random.seed(0) 841 | >>> pn.rice(R=10, σ=2).rvs(size=3) 842 | array([ 15.67835764, 13.36907874, 10.37753817]) 843 | 844 | >>> # silly fake data 845 | >>> numpy.random.seed(0) 846 | >>> data = pn.rice(R=10, sigma=2).rvs(size=37) 847 | >>> # pretend `data` is unknown and we want to fit a dist. to it 848 | >>> pn.rice.fit(data) 849 | params(R=10.100674084593422, sigma=1.759817171541185, loc=0) 850 | 851 | >>> # include `loc` in the estimate (bad idea) 852 | >>> pn.rice.fit(data, loc=None) 853 | params(R=4.249154300734, sigma=1.862167512728, loc=5.570921659394) 854 | 855 | References 856 | ---------- 857 | http://scipy.github.io/devdocs/generated/scipy.stats.rice 858 | https://en.wikipedia.org/wiki/Rice_distribution 859 | 860 | See Also 861 | -------- 862 | scipy.stats.rice 863 | numpy.random.exponential 864 | 865 | """ 866 | 867 | dist = stats.rice 868 | param_template = namedtuple("params", ["R", "sigma", "loc"]) 869 | name = "rice" 870 | 871 | @staticmethod 872 | @utils.greco_deco 873 | def _process_args(R=None, loc=0, sigma=None, fit=False): 874 | loc_key, scale_key = utils._get_loc_scale_keys(fit=fit) 875 | bkey = "fb" if fit else "b" 876 | 877 | b = None 878 | if R is not None and sigma is not None: 879 | b = R / sigma 880 | return {loc_key: loc, scale_key: sigma, bkey: b} 881 | 882 | @classmethod 883 | def fit(cls, data, **guesses): 884 | b, loc, sigma = cls._fit(data, **guesses) 885 | return cls.param_template(R=b * sigma, loc=loc, sigma=sigma) 886 | 887 | 888 | __all__ = [ 889 | "normal", 890 | "lognormal", 891 | "weibull", 892 | "alpha", 893 | "beta", 894 | "gamma", 895 | "chi_squared", 896 | "pareto", 897 | "exponential", 898 | "rice", 899 | ] 900 | -------------------------------------------------------------------------------- /paramnormal/tests/__init__.py: -------------------------------------------------------------------------------- 1 | from pkg_resources import resource_filename 2 | 3 | 4 | def test(*args): 5 | try: 6 | import pytest 7 | except ImportError: 8 | raise ImportError("`pytest` is required to run the test suite") 9 | 10 | options = [resource_filename("paramnormal", "tests")] 11 | options.extend(list(args)) 12 | return pytest.main(options) 13 | -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/__init__.py -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/__init__.py -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_cdf_basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_cdf_basic.png -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_cdf_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_cdf_fit.png -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_cdf_xlog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_cdf_xlog.png -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_pdf_basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_pdf_basic.png -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_pdf_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_pdf_fit.png -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_pdf_xlog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_pdf_xlog.png -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_sf_basic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_sf_basic.png -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_sf_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_sf_fit.png -------------------------------------------------------------------------------- /paramnormal/tests/baseline_images/test_activity/test_plot_sf_xlog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phobson/paramnormal/a6dfba494c6a88bc9c0f5bf62f107f7d68c31399/paramnormal/tests/baseline_images/test_activity/test_plot_sf_xlog.png -------------------------------------------------------------------------------- /paramnormal/tests/test_activity.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import pytest 3 | from matplotlib import pyplot 4 | from scipy import stats 5 | 6 | from paramnormal import activity, dist 7 | from paramnormal.utils import seed 8 | 9 | BASELINE_DIR = "baseline_images/test_activity" 10 | TOLERANCE = 15 11 | 12 | 13 | def assert_dists_are_equivalent(dist1, dist2): 14 | numpy.random.seed(0) 15 | x1 = dist1.rvs(3) 16 | 17 | numpy.random.seed(0) 18 | x2 = dist2.rvs(3) 19 | 20 | assert numpy.all((x1 - x2) < 0.0001) 21 | 22 | 23 | def check_params(*value_pairs): 24 | for result, expected in value_pairs: 25 | assert (result - expected) < 0.00001 26 | 27 | 28 | def test_string_bad(): 29 | with pytest.raises(ValueError): 30 | activity._check_distro("junk") 31 | 32 | 33 | def test_number(): 34 | with pytest.raises(ValueError): 35 | activity._check_distro(45) 36 | 37 | 38 | def test_pndist_as_class(): 39 | assert activity._check_distro(dist.normal, as_class=True) == dist.normal 40 | 41 | 42 | def test_string_good_as_class(): 43 | assert activity._check_distro("normal", as_class=True) == dist.normal 44 | 45 | 46 | def test_pndist(): 47 | assert_dists_are_equivalent( 48 | activity._check_distro(dist.normal, mu=0, sigma=1), stats.norm(0, 1) 49 | ) 50 | 51 | 52 | def test_string(): 53 | assert_dists_are_equivalent(activity._check_distro("normal", mu=0, sigma=1), stats.norm(0, 1)) 54 | 55 | 56 | def test_scipy_dist(): 57 | assert_dists_are_equivalent(activity._check_distro(stats.lognorm(s=2)), stats.lognorm(s=2)) 58 | 59 | 60 | @pytest.mark.parametrize("ax", [None, pyplot.gca(), "junk"]) 61 | def test__check_ax(ax): 62 | if ax == "junk": 63 | with pytest.raises(AttributeError): 64 | activity._check_ax(ax) 65 | else: 66 | fig, ax1 = activity._check_ax(ax) 67 | 68 | assert isinstance(fig, pyplot.Figure) 69 | assert isinstance(ax1, pyplot.Axes) 70 | if ax is not None: 71 | assert ax == ax1 72 | 73 | 74 | def test_random_normal(): 75 | numpy.random.seed(0) 76 | x1 = activity.random("normal", mu=0, sigma=1, shape=(3, 4)) 77 | 78 | numpy.random.seed(0) 79 | x2 = numpy.random.normal(0, 1, size=(3, 4)) 80 | assert numpy.all((x1 - x2) < 0.0001) 81 | 82 | 83 | def test_random_beta(): 84 | numpy.random.seed(0) 85 | x1 = activity.random("beta", alpha=2, beta=3, shape=(5, 2)) 86 | 87 | numpy.random.seed(0) 88 | x2 = numpy.random.beta(2, 3, size=(5, 2)) 89 | assert numpy.all((x1 - x2) < 0.0001) 90 | 91 | 92 | @seed 93 | def test_create_normal(): 94 | data = numpy.random.normal(loc=2.0, scale=6.7, size=37) 95 | params = activity.fit("normal", data) 96 | dist = activity.fit("normal", data, as_params=False) 97 | 98 | check_params( 99 | (params.mu, 4.1709713618), 100 | (params.sigma, 7.2770395662), 101 | ) 102 | 103 | assert_dists_are_equivalent(dist, stats.norm(params.mu, params.sigma)) 104 | 105 | 106 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 107 | @seed 108 | def test_plot_pdf_basic(): 109 | # first 110 | fig, ax1 = pyplot.subplots() 111 | norm_dist = dist.normal(μ=5.4, σ=2.5) 112 | ax1 = activity.plot(norm_dist, ax=ax1) 113 | return fig 114 | 115 | 116 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 117 | @seed 118 | def test_plot_pdf_fit(): 119 | # second 120 | fig2, ax2 = pyplot.subplots() 121 | norm_dist = dist.normal(μ=5.4, σ=2.5) 122 | data = activity.random("normal", μ=5.4, σ=2.5, shape=37) 123 | ax2 = activity.plot(norm_dist, ax=ax2, line_opts=dict(label="Theoretical PDF")) 124 | ax2 = activity.plot("normal", data=data, ax=ax2, line_opts=dict(label="Fit PDF")) 125 | ax2.legend() 126 | return fig2 127 | 128 | 129 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 130 | @seed 131 | def test_plot_pdf_xlog(): 132 | # first 133 | fig, ax1 = pyplot.subplots() 134 | loc_dist = dist.lognormal(μ=1.25, σ=0.75) 135 | ax1 = activity.plot(loc_dist, ax=ax1, xscale="log") 136 | return fig 137 | 138 | 139 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 140 | @seed 141 | def test_plot_cdf_basic(): 142 | # first 143 | fig, ax1 = pyplot.subplots() 144 | norm_dist = dist.normal(μ=5.4, σ=2.5) 145 | ax1 = activity.plot(norm_dist, ax=ax1, which="cdf") 146 | return fig 147 | 148 | 149 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 150 | @seed 151 | def test_plot_cdf_fit(): 152 | # second 153 | fig2, ax2 = pyplot.subplots() 154 | norm_dist = dist.normal(μ=5.4, σ=2.5) 155 | data = activity.random("normal", μ=5.4, σ=2.5, shape=37) 156 | ax2 = activity.plot(norm_dist, ax=ax2, line_opts=dict(label="Theoretical CDF"), which="cdf") 157 | ax2 = activity.plot("normal", data=data, ax=ax2, line_opts=dict(label="Fit CDF"), which="cdf") 158 | ax2.legend() 159 | return fig2 160 | 161 | 162 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 163 | @seed 164 | def test_plot_cdf_xlog(): 165 | # first 166 | fig, ax1 = pyplot.subplots() 167 | loc_dist = dist.lognormal(μ=1.25, σ=0.75) 168 | ax1 = activity.plot(loc_dist, ax=ax1, xscale="log", which="CDF") 169 | ax1.legend() 170 | return fig 171 | 172 | 173 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 174 | @seed 175 | def test_plot_sf_basic(): 176 | # first 177 | fig, ax1 = pyplot.subplots() 178 | norm_dist = dist.normal(μ=5.4, σ=2.5) 179 | ax1 = activity.plot(norm_dist, ax=ax1, which="sf") 180 | return fig 181 | 182 | 183 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 184 | @seed 185 | def test_plot_sf_fit(): 186 | # second 187 | fig2, ax2 = pyplot.subplots() 188 | norm_dist = dist.normal(μ=5.4, σ=2.5) 189 | data = activity.random("normal", μ=5.4, σ=2.5, shape=37) 190 | ax2 = activity.plot(norm_dist, ax=ax2, line_opts=dict(label="Theoretical sf"), which="sf") 191 | ax2 = activity.plot("normal", data=data, ax=ax2, line_opts=dict(label="Fit sf"), which="sf") 192 | ax2.legend() 193 | return fig2 194 | 195 | 196 | @pytest.mark.mpl_image_compare(baseline_dir=BASELINE_DIR, tolerance=TOLERANCE) 197 | @seed 198 | def test_plot_sf_xlog(): 199 | # first 200 | fig, ax1 = pyplot.subplots() 201 | loc_dist = dist.lognormal(μ=1.25, σ=0.75) 202 | ax1 = activity.plot(loc_dist, ax=ax1, xscale="log", which="sf") 203 | ax1.legend() 204 | return fig 205 | 206 | 207 | def test_plot_bad_attribute(): 208 | with pytest.raises(AttributeError): 209 | loc_dist = dist.lognormal(μ=1.25, σ=0.75) 210 | activity.plot(loc_dist, xscale="log", which="JUNK") 211 | -------------------------------------------------------------------------------- /paramnormal/tests/test_dist.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import numpy.testing as nptest 3 | import pytest 4 | from scipy import stats 5 | 6 | from paramnormal import dist 7 | from paramnormal.utils import seed 8 | 9 | 10 | @seed 11 | def generate_knowns(np_rand_fxn, size, *args, **kwargs): 12 | # numpy.random.pareto is actually a Lomax and needs 13 | # to be shifted by 1 14 | shift = kwargs.pop("shift", 0) 15 | kwargs.update(dict(size=size)) 16 | return np_rand_fxn(*args, **kwargs) + shift 17 | 18 | 19 | @seed 20 | def generate_test_dist(distro, size, *cargs, **ckwargs): 21 | return distro(*cargs, **ckwargs).rvs(size=size) 22 | 23 | 24 | def check_params(*value_pairs): 25 | for result, expected in value_pairs: 26 | assert (result - expected) < 0.0001 27 | 28 | 29 | @pytest.mark.parametrize( 30 | "distro, cargs, ckwds, np_rand_fxn, npargs, npkwds", 31 | [ 32 | (dist.normal, [], dict(mu=4, sigma=1.75), numpy.random.normal, [], dict(loc=4, scale=1.75)), 33 | ( 34 | dist.lognormal, 35 | [], 36 | dict(mu=4, sigma=1.75), 37 | numpy.random.lognormal, 38 | [], 39 | dict(mean=4, sigma=1.75), 40 | ), 41 | (dist.weibull, [], dict(k=2), numpy.random.weibull, [2], dict()), 42 | (dist.beta, [], dict(alpha=2, beta=3), numpy.random.beta, [2, 3], dict()), 43 | (dist.gamma, [], dict(k=2, theta=1), numpy.random.gamma, [2, 1], dict()), 44 | (dist.chi_squared, [], dict(k=2), numpy.random.chisquare, [2], dict()), 45 | (dist.pareto, [], dict(alpha=2), numpy.random.pareto, [2], dict(shift=1)), 46 | (dist.rice, [], dict(R=10, sigma=2), stats.rice.rvs, [5], dict(loc=0, scale=2)), 47 | ], 48 | ) 49 | @pytest.mark.parametrize("size", [10, 37, 100, 3737]) 50 | def test_random(size, distro, cargs, ckwds, np_rand_fxn, npargs, npkwds): 51 | result = generate_test_dist(distro, size, *cargs, **ckwds) 52 | known = generate_knowns(np_rand_fxn, size, *npargs, **npkwds) 53 | nptest.assert_array_almost_equal(result, known) 54 | 55 | 56 | @pytest.mark.parametrize( 57 | "distro, cargs, ckwds", 58 | [ 59 | (dist.normal, [], dict(mu=4, sigma=1.75)), 60 | (dist.lognormal, [], dict(mu=4, sigma=1.75)), 61 | (dist.weibull, [], dict(k=2)), 62 | (dist.alpha, [], dict(alpha=2)), 63 | (dist.beta, [], dict(alpha=2, beta=3)), 64 | (dist.gamma, [], dict(k=2, theta=1)), 65 | (dist.chi_squared, [], dict(k=2)), 66 | (dist.pareto, [], dict(alpha=2)), 67 | (dist.rice, [], dict(R=10, sigma=2)), 68 | ], 69 | ) 70 | def test_from_params(distro, cargs, ckwds): 71 | data = generate_test_dist(distro, 37, *cargs, **ckwds) 72 | params = distro.fit(data) 73 | newdist = distro.from_params(params) 74 | assert isinstance(newdist, stats._distn_infrastructure.rv_frozen) 75 | 76 | 77 | @pytest.mark.parametrize( 78 | "distro, cargs, ckwds", 79 | [ 80 | (dist.normal, [], dict(mu=4, sigma=1.75)), 81 | (dist.lognormal, [], dict(mu=4, sigma=1.75)), 82 | (dist.weibull, [], dict(k=2)), 83 | (dist.alpha, [], dict(alpha=2)), 84 | (dist.beta, [], dict(alpha=2, beta=3)), 85 | (dist.gamma, [], dict(k=2, theta=1)), 86 | (dist.chi_squared, [], dict(k=2)), 87 | (dist.pareto, [], dict(alpha=2)), 88 | (dist.rice, [], dict(R=10, sigma=2)), 89 | ], 90 | ) 91 | def test_xxx(distro, cargs, ckwds): 92 | distro(*cargs, **ckwds) 93 | 94 | 95 | @pytest.mark.parametrize( 96 | "distro, ckwds, fit, expected", 97 | [ 98 | (dist.normal, dict(mu=2, sigma=2.45), False, dict(loc=2, scale=2.45)), 99 | (dist.normal, dict(mu=2, sigma=2.45), True, dict(floc=2, fscale=2.45)), 100 | (dist.lognormal, dict(mu=2, sigma=2.45), False, dict(scale=numpy.exp(2), s=2.45, loc=0)), 101 | (dist.lognormal, dict(mu=2, sigma=2.45), True, dict(fscale=numpy.exp(2), f0=2.45, floc=0)), 102 | (dist.weibull, dict(k=2), False, dict(c=2, loc=0, scale=1)), 103 | (dist.weibull, dict(k=2), True, dict(f0=2, floc=0, fscale=1)), 104 | (dist.alpha, dict(alpha=2), False, dict(a=2, loc=0, scale=1)), 105 | (dist.alpha, dict(alpha=2), True, dict(f0=2, floc=0, fscale=1)), 106 | (dist.beta, dict(alpha=2, beta=5), False, dict(a=2, b=5, loc=0, scale=1)), 107 | (dist.beta, dict(alpha=2, beta=5), True, dict(f0=2, f1=5, floc=0, fscale=1)), 108 | (dist.gamma, dict(k=1, theta=2), False, dict(a=1, loc=0, scale=2)), 109 | (dist.gamma, dict(k=1, theta=2), True, dict(f0=1, floc=0, fscale=2)), 110 | (dist.chi_squared, dict(k=5), False, dict(df=5, loc=0, scale=1)), 111 | (dist.chi_squared, dict(k=5), True, dict(f0=5, floc=0, fscale=1)), 112 | (dist.pareto, dict(alpha=4.78), False, dict(b=4.78, loc=0, scale=1)), 113 | (dist.pareto, dict(alpha=4.78), True, dict(f0=4.78, floc=0, fscale=1)), 114 | (dist.exponential, dict(lamda=2.0), False, dict(loc=0, scale=0.5)), 115 | (dist.exponential, dict(lamda=2.0), True, dict(floc=0, fscale=0.5)), 116 | (dist.rice, dict(R=10, sigma=2), False, dict(b=5, loc=0, scale=2)), 117 | (dist.rice, dict(R=10, sigma=2), True, dict(fb=5, floc=0, fscale=2)), 118 | ], 119 | ) 120 | def test_processargs(distro, ckwds, fit, expected): 121 | result = distro._process_args(**ckwds, fit=fit) 122 | assert result == expected 123 | 124 | 125 | def test_process_args_no_offset(): 126 | with pytest.raises(ValueError): 127 | dist.lognormal._process_args(offset=None) 128 | 129 | 130 | @seed 131 | def test_fit_normal(): 132 | data = numpy.random.normal(loc=2.0, scale=6.7, size=37) 133 | params = dist.normal.fit(data) 134 | check_params( 135 | (params.mu, 4.1709713618), 136 | (params.sigma, 7.2770395662), 137 | ) 138 | 139 | 140 | @seed 141 | def test_fit_lognormal(): 142 | data = numpy.random.lognormal(mean=2.0, sigma=6.7, size=37) 143 | params = dist.lognormal.fit(data) 144 | check_params((params.mu, 4.1709713618), (params.sigma, 7.2770395662), (params.offset, 0.0)) 145 | 146 | 147 | @seed 148 | def test_fit_weibull(): 149 | data = numpy.random.weibull(2, size=37) 150 | params = dist.weibull.fit(data) 151 | check_params( 152 | (params.k, 2.1663085937500024), 153 | (params.loc, 0), 154 | (params.scale, 1), 155 | ) 156 | 157 | 158 | @seed 159 | def test_fit_alpha(): 160 | data = stats.alpha(5).rvs(size=37) 161 | params = dist.alpha.fit(data) 162 | check_params( 163 | (params.alpha, 4.8356445312500096), 164 | (params.loc, 0), 165 | (params.scale, 1), 166 | ) 167 | 168 | 169 | @seed 170 | def test_fit_beta(): 171 | data = numpy.random.beta(2, 7, size=37) 172 | 173 | no_guesses = dist.beta.fit(data) 174 | check_params( 175 | (no_guesses.alpha, 1.65675833325), 176 | (no_guesses.beta, 5.78176888942), 177 | (no_guesses.loc, 0), 178 | (no_guesses.scale, 1), 179 | ) 180 | 181 | guess_alpha = dist.beta.fit(data, alpha=2) 182 | check_params( 183 | (guess_alpha.alpha, 2), 184 | (guess_alpha.beta, 6.8812340590409891), 185 | (guess_alpha.loc, 0), 186 | (guess_alpha.scale, 1), 187 | ) 188 | 189 | guess_beta = dist.beta.fit(data, beta=7) 190 | check_params( 191 | (guess_beta.alpha, 1.91476622934291), 192 | (guess_beta.beta, 7), 193 | (guess_beta.loc, 0), 194 | (guess_beta.scale, 1), 195 | ) 196 | 197 | 198 | @seed 199 | def test_fit_gamma(): 200 | data = numpy.random.gamma(2, 5, size=37) 201 | params = dist.gamma.fit(data) 202 | check_params( 203 | (params.k, 1.3379069223213478), 204 | (params.loc, 0), 205 | (params.theta, 7.5830062081633587), 206 | ) 207 | 208 | 209 | @seed 210 | def test_fit_chi_squareed(): 211 | data = numpy.random.chisquare(2, size=37) 212 | params = dist.chi_squared.fit(data) 213 | check_params( 214 | (params.k, 2.2668945312500028), 215 | (params.loc, 0), 216 | (params.scale, 1), 217 | ) 218 | 219 | 220 | @seed 221 | def test_fit_pareto(): 222 | data = numpy.random.pareto(a=2, size=37) + 1 223 | params = dist.pareto.fit(data) 224 | check_params( 225 | (params.alpha, 1.7850585937500019), 226 | (params.loc, 0), 227 | (params.scale, 1), 228 | ) 229 | 230 | 231 | @seed 232 | def test_fit_exponential(): 233 | data = numpy.random.exponential(0.5, size=37) 234 | params = dist.exponential.fit(data) 235 | check_params( 236 | (params.lamda, 1.785060162078026), 237 | (params.loc, 0), 238 | ) 239 | 240 | 241 | @seed 242 | def test_fit_rice(): 243 | data = stats.rice(5, loc=0, scale=2).rvs(size=37) 244 | params = dist.rice.fit(data) 245 | check_params( 246 | (params.R, 10.100674084593422), 247 | (params.sigma, 1.759817171541185), 248 | (params.loc, 0), 249 | ) 250 | -------------------------------------------------------------------------------- /paramnormal/tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from paramnormal import dist, utils 4 | 5 | 6 | @pytest.mark.parametrize( 7 | ("distro", "distargs", "expected"), 8 | [ 9 | (dist.normal, dict(mu=1, sigma=2), dict(loc=1, scale=2)), 10 | (dist.normal, dict(μ=1, σ=2), dict(loc=1, scale=2)), 11 | (dist.beta, dict(alpha=1, beta=2), dict(a=1, b=2, loc=0, scale=1)), 12 | (dist.beta, dict(α=1, β=2), dict(a=1, b=2, loc=0, scale=1)), 13 | (dist.gamma, dict(k=1, theta=2), dict(a=1, loc=0, scale=2)), 14 | (dist.gamma, dict(k=1, θ=2), dict(a=1, loc=0, scale=2)), 15 | ], 16 | ) 17 | def test_greco_deco(distro, distargs, expected): 18 | result = distro._process_args(**distargs) 19 | assert result == expected 20 | 21 | 22 | @pytest.mark.parametrize( 23 | ("a", "b", "c", "expected"), 24 | [ 25 | (1, 2, 3, dict(a=1, b=2, c=3)), 26 | (1, None, 3, dict(a=1, c=3)), 27 | (None, None, None, dict()), 28 | ], 29 | ) 30 | def test__remove_nones(a, b, c, expected): 31 | result = utils._remove_nones(a=a, b=b, c=c) 32 | assert result == expected 33 | 34 | 35 | @pytest.mark.parametrize( 36 | ("fit", "expected"), 37 | [ 38 | (True, ("floc", "fscale")), 39 | (False, ("loc", "scale")), 40 | ], 41 | ) 42 | def test__get_loc_scale_keys(fit, expected): 43 | result = utils._get_loc_scale_keys(fit=fit) 44 | assert result == expected 45 | -------------------------------------------------------------------------------- /paramnormal/utils.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from inspect import signature 3 | 4 | import numpy 5 | 6 | SYMBOLS = { 7 | "μ": "mu", 8 | "σ": "sigma", 9 | "α": "alpha", 10 | "β": "beta", 11 | "γ": "gamma", 12 | "λ": "lamda", 13 | "θ": "theta", 14 | } 15 | 16 | 17 | def greco_deco(func): 18 | """Decorator to let you use greek characters for fxn kwargs.""" 19 | 20 | @wraps(func) 21 | def wrapper(*args, **kwargs): 22 | sig = signature(func) 23 | kwargs = {SYMBOLS.get(k, k): v for k, v in kwargs.items()} 24 | bound = sig.bind(*args, **kwargs) 25 | return func(**bound.arguments) 26 | 27 | return wrapper 28 | 29 | 30 | def seed(func): 31 | """Decorator to seed the RNG before any function.""" 32 | 33 | @wraps(func) 34 | def wrapper(*args, **kwargs): 35 | numpy.random.seed(0) 36 | return func(*args, **kwargs) 37 | 38 | return wrapper 39 | 40 | 41 | def _get_loc_scale_keys(fit=False): 42 | if fit: 43 | return "floc", "fscale" 44 | else: 45 | return "loc", "scale" 46 | 47 | 48 | def _remove_nones(**kwargs): 49 | """ 50 | Removes any kwargs whose values are `None`. 51 | """ 52 | 53 | final = kwargs.copy() 54 | for k in kwargs: 55 | if kwargs[k] is None: 56 | final.pop(k) 57 | return final 58 | -------------------------------------------------------------------------------- /readme.rst: -------------------------------------------------------------------------------- 1 | paramnormal: Conventionally parameterized probability distributions 2 | =================================================================== 3 | 4 | .. image:: https://coveralls.io/repos/phobson/paramnormal/badge.svg?branch=master&service=github 5 | :target: https://coveralls.io/github/phobson/paramnormal?branch=master 6 | 7 | 8 | Scipy distributions are weird. 9 | Maybe these will be less weird. 10 | 11 | 12 | The problem this is solving 13 | --------------------------- 14 | 15 | Let's look at the lognormal distribution. 16 | The `wikipedia article `__ states that they are parameterized by μ and σ, the mean and standard deviation of the underlying normal distribution. 17 | In this case, μ and σ can also be known as the location and scale parameters, respectively. 18 | 19 | However, to create a lognormal distribution in scipy, you need three parameters: location, scale, and shape. 20 | The tricky part is, however, is that "location" in scipy refers to an offset from zero, "shape" refers to σ, and the "scale" refers to :math:`e^\mu`. 21 | This is all explained the scipy documentation, but it took me a couple of readings and bad mistakes to figure it out. 22 | It's also never really explicitly stated that scipy's location parameter should be zero 99.999% of the time. 23 | That's a very import point to understand when you're fitting lognormal parameters to a sample and you end with three crazy numbers that don't make any sense and distribution with values less than zero despite the fact that you thought lognormal distribution couldn't have values less than zero. 24 | 25 | Point of all of this is that *paramnormal* is trying to make easy what scipy sometimes makes tricky. 26 | 27 | So where as in scipy, you would do this: 28 | 29 | .. code:: python 30 | 31 | import numpy 32 | from scipy import stats 33 | mu = 0.75 34 | sigma = 1.25 35 | dist = stats.lognorm(sigma, loc=0, scale=numpy.exp(mu)) 36 | 37 | In paramnormal, you can do this: 38 | 39 | .. code:: python 40 | 41 | import paramnormal 42 | dist = paramnormal.lognormal(mu=0.75, sigma=1.25) 43 | 44 | You can even use Greek letters 45 | 46 | .. code:: python 47 | 48 | dist = paramnormal.lognormal(μ=0.75, σ=1.25) 49 | 50 | All three snippets return the same scipy distribution objects and have the same numerical methods (e.g., ``cdf``, ``pdf``, ``rvs``, ``ppf``). 51 | Paramnormal just provides a short cut that let's you only specify the traditional distribution parameters you read about in your text book. 52 | 53 | Documentation 54 | ------------- 55 | We have `HTML docs built with sphinx `_. 56 | 57 | Installation 58 | ------------ 59 | Binaries are available through my conda channel 60 | 61 | ``conda install --channel=phobson paramnormal`` 62 | 63 | This is a pure python package, so installation from source should be as easy as running 64 | ``pip install .`` from the source directory if you've cloned the repo. 65 | 66 | Otherwise, I think ``pip install git+https://github.com/phobson/paramnormal.git`` will work. 67 | (I'll upload to pip after this has sat around for a while. 68 | 69 | Development status 70 | ------------------ 71 | From my perspective this is now feature complete, meaning it has all of the distribution that I use somewhat regularly. 72 | If you want to add a new distribution, get in touch. 73 | Otherwise, I'll just be fixing bugs and typos/ommisions in the documentaion. 74 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.6 2 | scipy>=0.14 3 | matplotlib>=1.5 -------------------------------------------------------------------------------- /requirements_dev.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.6 2 | scipy>=0.14 3 | matplotlib>=1.5 4 | pytest 5 | pytest-cov 6 | pytest-mpl 7 | coverage 8 | docopt 9 | requests 10 | pyyaml 11 | ruff 12 | pre-commit 13 | isort -------------------------------------------------------------------------------- /ruff.toml: -------------------------------------------------------------------------------- 1 | line-length = 100 2 | 3 | [lint] 4 | select = [ 5 | # pycodestyle 6 | "E", 7 | # Pyflakes 8 | "F", 9 | # pyupgrade 10 | "UP", 11 | ## flake8-bugbear 12 | # "B", 13 | # flake8-simplify 14 | "SIM", 15 | ] 16 | 17 | [format] 18 | indent-style = "space" 19 | 20 | exclude = [ 21 | ".bzr", 22 | ".direnv", 23 | ".eggs", 24 | ".git", 25 | ".git-rewrite", 26 | ".hg", 27 | ".ipynb_checkpoints", 28 | ".mypy_cache", 29 | ".nox", 30 | ".pants.d", 31 | ".pyenv", 32 | ".pytest_cache", 33 | ".pytype", 34 | ".ruff_cache", 35 | ".svn", 36 | ".tox", 37 | ".venv", 38 | ".vscode", 39 | "__pypackages__", 40 | "_build", 41 | "buck-out", 42 | "build", 43 | "dist", 44 | "node_modules", 45 | "site-packages", 46 | "venv", 47 | "docs", 48 | "docs/sphinxext", 49 | ] 50 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = readme.rst 3 | 4 | markers = 5 | mpl_image_compare 6 | 7 | [tool:pytest] 8 | pep8ignore = 9 | E501 10 | E131 11 | check_paramnormal.py ALL 12 | paramnormal/examples/* ALL 13 | paramnormal/tests/* E501 E241 14 | docs/* ALL 15 | 16 | [pep8] 17 | max-line-length = 100 18 | 19 | [isort] 20 | profile=black 21 | src_paths=paramnormal -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Setup script for the paramnormal package 2 | # 3 | # Usage: python setup.py install 4 | 5 | from setuptools import find_packages, setup 6 | 7 | DESCRIPTION = "paramnormal: Conventionally parameterized probability distributions" 8 | LONG_DESCRIPTION = DESCRIPTION 9 | NAME = "paramnormal" 10 | VERSION = "v0.4.4" 11 | AUTHOR = "Paul Hobson" 12 | AUTHOR_EMAIL = "pmhobson@gmail.com" 13 | URL = "http://phobson.github.io/paramnormal/" 14 | DOWNLOAD_URL = "https://github.com/phobson/paramnormal/archive/master.zip" 15 | LICENSE = "BSD 3-clause" 16 | PACKAGES = find_packages() 17 | PLATFORMS = "Python 3.10 and later." 18 | CLASSIFIERS = [ 19 | "Programming Language :: Python", 20 | "Operating System :: OS Independent", 21 | "License :: OSI Approved :: BSD License", 22 | "Development Status :: 5 - Production/Stable", 23 | "Intended Audience :: Science/Research", 24 | "Topic :: Scientific/Engineering :: Mathematics", 25 | "Programming Language :: Python :: 3.10", 26 | "Programming Language :: Python :: 3.11", 27 | "Programming Language :: Python :: 3.12", 28 | ] 29 | INSTALL_REQUIRES = ["numpy", "scipy", "matplotlib"] 30 | PACKAGE_DATA = { 31 | "paramnormal.tests.baseline_images.test_activity": ["*png"], 32 | } 33 | DATA_FILES = [] 34 | 35 | 36 | if __name__ == "__main__": 37 | setup( 38 | name=NAME, 39 | version=VERSION, 40 | author=AUTHOR, 41 | author_email=AUTHOR_EMAIL, 42 | url=URL, 43 | description=DESCRIPTION, 44 | long_description=LONG_DESCRIPTION, 45 | download_url=DOWNLOAD_URL, 46 | license=LICENSE, 47 | packages=PACKAGES, 48 | package_data=PACKAGE_DATA, 49 | data_files=DATA_FILES, 50 | platforms=PLATFORMS, 51 | classifiers=CLASSIFIERS, 52 | install_requires=INSTALL_REQUIRES, 53 | zip_safe=False, 54 | ) 55 | --------------------------------------------------------------------------------