├── .clang-format ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md ├── bottleneck-action │ └── action.yml ├── renovate.json └── workflows │ └── ci.yml ├── .gitignore ├── LICENSE ├── LICENSES ├── NUMPY_LICENSE ├── PANDAS_LICENSE ├── SCIPY_LICENSE └── SETUPTOOLS_LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── RELEASE.rst ├── asv_bench ├── asv.conf.json └── benchmarks │ ├── __init__.py │ ├── memory.py │ ├── move.py │ ├── nonreduce.py │ ├── nonreduce_axis.py │ └── reduce.py ├── bottleneck ├── .gitignore ├── __init__.py ├── _pytesttester.py ├── _version.py ├── benchmark │ ├── __init__.py │ ├── autotimeit.py │ ├── bench.py │ └── bench_detailed.py ├── conftest.py ├── include │ ├── bottleneck.h │ └── iterators.h ├── slow │ ├── __init__.py │ ├── move.py │ ├── nonreduce.py │ ├── nonreduce_axis.py │ └── reduce.py ├── src │ ├── .gitignore │ ├── __init__.py │ ├── bn_config.py │ ├── bn_template.py │ ├── bottleneck.h │ ├── iterators.h │ ├── move_median │ │ ├── .gitignore │ │ ├── makefile │ │ ├── move_median.c │ │ ├── move_median.h │ │ └── move_median_debug.c │ ├── move_template.c │ ├── nonreduce_axis_template.c │ ├── nonreduce_template.c │ └── reduce_template.c └── tests │ ├── __init__.py │ ├── common.py │ ├── data │ └── template_test │ │ ├── test_template.c │ │ └── truth.c │ ├── docker │ ├── centos_7_min_deps │ │ └── Dockerfile │ ├── centos_8_min_deps │ │ └── Dockerfile │ ├── release_tests.sh │ ├── ubuntu_devel_min_deps │ │ └── Dockerfile │ └── ubuntu_lts_min_deps │ │ └── Dockerfile │ ├── input_modification_test.py │ ├── list_input_test.py │ ├── memory_test.py │ ├── move_test.py │ ├── nonreduce_axis_test.py │ ├── nonreduce_test.py │ ├── reduce_test.py │ ├── scalar_input_test.py │ ├── test_template.py │ └── util.py ├── codecov.yml ├── doc ├── doc_howto ├── image │ ├── icon.png │ ├── icon.xcf │ └── icon14.png ├── source │ ├── _templates │ │ └── layout.html │ ├── bottleneck.benchmark.rst │ ├── bottleneck.move.rst │ ├── bottleneck.nonreduce.rst │ ├── bottleneck.nonreduce_axis.rst │ ├── bottleneck.reduce.rst │ ├── bottleneck.rst │ ├── bottleneck.slow.rst │ ├── bottleneck.src.rst │ ├── bottleneck.tests.rst │ ├── conf.py │ ├── index.rst │ ├── installing.rst │ ├── intro.rst │ ├── license.rst │ ├── reference.rst │ ├── release.rst │ └── releases │ │ ├── RELEASE.rst │ │ ├── v0.1.0.rst │ │ ├── v0.2.0.rst │ │ ├── v0.3.0.rst │ │ ├── v0.4.0.rst │ │ ├── v0.4.1.rst │ │ ├── v0.4.2.rst │ │ ├── v0.4.3.rst │ │ ├── v0.5.0.rst │ │ ├── v0.6.0.rst │ │ ├── v0.7.0.rst │ │ ├── v0.8.0.rst │ │ ├── v1.0.0.rst │ │ ├── v1.1.0.rst │ │ ├── v1.2.0.rst │ │ ├── v1.2.1.rst │ │ ├── v1.3.0.rst │ │ ├── v1.3.1.rst │ │ ├── v1.3.2.rst │ │ └── v1.4.0.rst └── sphinxext │ ├── announce.py │ └── contributors.py ├── pyproject.toml ├── readthedocs.yml ├── setup.cfg ├── setup.py ├── tools ├── appveyor │ ├── conda_setup.py │ ├── conda_wrapper.py │ └── windows_sdk.cmd ├── test-installed-bottleneck.py ├── travis │ ├── bn_setup.sh │ ├── conda_install.sh │ └── conda_setup.sh └── update_readme.py └── versioneer.py /.clang-format: -------------------------------------------------------------------------------- 1 | 2 | BasedOnStyle: LLVM 3 | UseTab: Never 4 | IndentWidth: 4 5 | TabWidth: 4 6 | AllowShortIfStatementsOnASingleLine: true 7 | IndentCaseLabels: true 8 | ColumnLimit: 0 9 | # AccessModifierOffset: 0 10 | NamespaceIndentation: All 11 | AlignConsecutiveMacros: true 12 | 13 | Language: Cpp 14 | DerivePointerAlignment: true 15 | PointerAlignment: Right 16 | AlignEscapedNewlines: Right 17 | AlignConsecutiveDeclarations: true 18 | ReflowComments: true 19 | SpacesBeforeTrailingComments: 2 20 | IndentPPDirectives: BeforeHash 21 | AllowAllArgumentsOnNextLine: false 22 | AllowAllParametersOfDeclarationOnNextLine: false 23 | BinPackParameters: false 24 | BinPackArguments: false 25 | ForEachMacros: ['WHILE', 'FOR', 'WHILE0', 'WHILE1', 'WHILE2', 'REDUCE_SPECIALIZE'] 26 | 27 | BreakBeforeBraces: Custom 28 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | bottleneck/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: bug 6 | --- 7 | 8 | **Describe the bug** 9 | A clear and concise description of what the bug is. 10 | 11 | **To Reproduce** 12 | To assist in reproducing the bug, please include the following: 13 | 14 | 1. Command/code being executed 15 | 2. Python version and OS 16 | 3. `pip` version 17 | 4. Output of `pip list` or `conda list` 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Additional context** 23 | Add any other context about the problem here. 24 | -------------------------------------------------------------------------------- /.github/bottleneck-action/action.yml: -------------------------------------------------------------------------------- 1 | name: build-test-bottleneck 2 | description: "checkout repo, build, and test numpy" 3 | runs: 4 | using: composite 5 | steps: 6 | - name: Set up Python ${{ matrix.python-version }} 7 | uses: actions/setup-python@v5 8 | with: 9 | python-version: ${{ matrix.python-version }} 10 | - name: Install 11 | shell: bash 12 | run: | 13 | pip install . -v 14 | 15 | - name: Test with pytest 16 | shell: bash 17 | run: | 18 | pip install pytest 19 | cd doc # avoid picking up bottleneck from the source dir 20 | pytest --pyargs bottleneck 21 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Github Actions 2 | 3 | on: ["push", "pull_request"] 4 | 5 | jobs: 6 | test-linux-macos: 7 | runs-on: ${{ matrix.os }} 8 | strategy: 9 | matrix: 10 | python-version: ["3.9", "3.13"] 11 | os: 12 | # Note that macos-13 is x86-64 (deprecated already), 13 | # and macos-latest is arm64. 14 | [ubuntu-22.04, ubuntu-24.04-arm, macos-13, macos-latest] 15 | steps: 16 | - uses: actions/checkout@v4 17 | - uses: ./.github/bottleneck-action 18 | 19 | test-windows: 20 | runs-on: ${{ matrix.os }} 21 | strategy: 22 | matrix: 23 | python-version: ["3.9", "3.13", "3.13t"] 24 | architecture: [x86, x64] 25 | os: [windows-latest, windows-2019] 26 | steps: 27 | - uses: actions/checkout@v4 28 | - uses: ./.github/bottleneck-action 29 | 30 | test-pyversions: 31 | runs-on: ubuntu-latest 32 | strategy: 33 | matrix: 34 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.13t"] 35 | steps: 36 | - uses: actions/checkout@v4 37 | - uses: ./.github/bottleneck-action 38 | 39 | check: 40 | # This job is here is the "Required" one for merging PRs, and 41 | # it only runs after all the `test-*` jobs above have run. Hence 42 | # it serves as a check that CI actually ran before a PR gets merged. 43 | needs: [test-linux-macos, test-windows] 44 | runs-on: ubuntu-latest 45 | steps: 46 | - name: Placeholder for CI checks in PRs 47 | run: echo "Done" 48 | 49 | build_wheels: 50 | needs: [test-linux-macos, test-windows] 51 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') 52 | runs-on: ${{ matrix.os }} 53 | strategy: 54 | matrix: 55 | os: [macos-latest, windows-latest, ubuntu-latest, ubuntu-24.04-arm] 56 | steps: 57 | - uses: actions/checkout@v4 58 | with: 59 | fetch-depth: 0 60 | 61 | - name: Build wheels 62 | uses: pypa/cibuildwheel@v2.23.2 63 | env: 64 | CIBW_SKIP: "pp* *_i686" 65 | CIBW_ENABLE: cpython-freethreading 66 | 67 | - name: Store wheel artifacts 68 | uses: actions/upload-artifact@v4 69 | with: 70 | name: cibw-wheels-${{ matrix.os }}-${{ strategy.job-index }} 71 | path: ./wheelhouse/*.whl 72 | 73 | build_sdist: 74 | needs: [test-linux-macos, test-windows] 75 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') 76 | runs-on: ubuntu-latest 77 | steps: 78 | - uses: actions/checkout@v4 79 | with: 80 | fetch-depth: 0 81 | 82 | - name: Build sdist 83 | run: pipx run build --sdist 84 | 85 | - uses: actions/upload-artifact@v4 86 | with: 87 | path: dist/*.tar.gz 88 | 89 | release: 90 | needs: [build_wheels, build_sdist] 91 | if: github.event_name == 'push' 92 | runs-on: ubuntu-latest 93 | steps: 94 | - uses: actions/download-artifact@v4 95 | with: 96 | path: dist 97 | merge-multiple: true 98 | 99 | - uses: pypa/gh-action-pypi-publish@release/v1 100 | with: 101 | user: __token__ 102 | password: ${{ secrets.PYPI_API_TOKEN}} 103 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | build 3 | dist 4 | MANIFEST 5 | *.so 6 | .tox 7 | .coverage 8 | *.egg-info 9 | .*.swp 10 | *~ 11 | \#*# 12 | bottleneck/src/bn_config.h 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010-2019 Keith Goodman 2 | Copyright (c) 2019 Bottleneck Developers 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright notice, 9 | this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above copyright 12 | notice, this list of conditions and the following disclaimer in the 13 | documentation and/or other materials provided with the distribution. 14 | 15 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 19 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 | POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /LICENSES/NUMPY_LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2005-2019, NumPy Developers. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of the NumPy Developers nor the names of any 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /LICENSES/PANDAS_LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2008-2012, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /LICENSES/SCIPY_LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions 6 | are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /LICENSES/SETUPTOOLS_LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2016 Jason R Coombs 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 7 | of the Software, and to permit persons to whom the Software is furnished to do 8 | so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst RELEASE.rst 2 | include LICENSE 3 | include Makefile 4 | include pyproject.toml 5 | 6 | graft LICENSES 7 | 8 | recursive-include bottleneck/src *.c *.h 9 | exclude bottleneck/src/reduce.c 10 | exclude bottleneck/src/move.c 11 | exclude bottleneck/src/nonreduce.c 12 | exclude bottleneck/src/nonreduce_axis.c 13 | exclude bottleneck/src/bn_config.h 14 | 15 | recursive-include doc * 16 | recursive-exclude doc/build * 17 | include versioneer.py 18 | include bottleneck/_version.py 19 | 20 | global-exclude __pycache__ 21 | global-exclude *.pyc 22 | global-exclude *~ 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Bottleneck Makefile 2 | 3 | PYTHON=python 4 | 5 | srcdir := bottleneck 6 | 7 | help: 8 | @echo "Available tasks:" 9 | @echo "help --> This help page" 10 | @echo "all --> clean, build, flake8, test" 11 | @echo "build --> Build the Python C extensions" 12 | @echo "clean --> Remove all the build files for a fresh start" 13 | @echo "test --> Run unit tests" 14 | @echo "flake8 --> Check for pep8 errors" 15 | @echo "readme --> Update benchmark results in README.rst" 16 | @echo "bench --> Run performance benchmark" 17 | @echo "detail --> Detailed benchmarks for all functions" 18 | @echo "sdist --> Make source distribution" 19 | @echo "doc --> Build Sphinx manual" 20 | @echo "pypi --> Upload to pypi" 21 | 22 | all: clean build test flake8 23 | 24 | build: 25 | ${PYTHON} setup.py build_ext --inplace 26 | 27 | test: 28 | ${PYTHON} -c "import bottleneck;bottleneck.test()" 29 | 30 | flake8: 31 | flake8 32 | 33 | black: 34 | black . --exclude "(build/|dist/|\.git/|\.mypy_cache/|\.tox/|\.venv/\.asv/|env|\.eggs)" 35 | 36 | readme: 37 | PYTHONPATH=`pwd`:PYTHONPATH ${PYTHON} tools/update_readme.py 38 | 39 | bench: 40 | ${PYTHON} -c "import bottleneck; bottleneck.bench()" 41 | 42 | detail: 43 | ${PYTHON} -c "import bottleneck; bottleneck.bench_detailed('all')" 44 | 45 | sdist: clean 46 | ${PYTHON} setup.py sdist 47 | git status 48 | 49 | pypi: clean 50 | ${PYTHON} setup.py sdist upload -r pypi 51 | 52 | # doc directory exists so use phony 53 | .PHONY: doc 54 | doc: clean build 55 | rm -rf build/sphinx 56 | ${PYTHON} setup.py build_sphinx 57 | 58 | clean: 59 | rm -rf build dist Bottleneck.egg-info 60 | find . -name \*.pyc -delete 61 | rm -f MANIFEST 62 | rm -rf ${srcdir}/*.html ${srcdir}/build 63 | rm -rf ${srcdir}/*.c 64 | rm -rf ${srcdir}/*.so 65 | rm -rf ${srcdir}/bn_config.h 66 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: https://github.com/pydata/bottleneck/workflows/Github%20Actions/badge.svg 2 | :target: https://github.com/pydata/bottleneck/actions 3 | 4 | ========== 5 | Bottleneck 6 | ========== 7 | 8 | Bottleneck is a collection of fast NumPy array functions written in C. 9 | 10 | Let's give it a try. Create a NumPy array: 11 | 12 | .. code-block:: pycon 13 | 14 | >>> import numpy as np 15 | >>> a = np.array([1, 2, np.nan, 4, 5]) 16 | 17 | Find the nanmean: 18 | 19 | .. code-block:: pycon 20 | 21 | >>> import bottleneck as bn 22 | >>> bn.nanmean(a) 23 | 3.0 24 | 25 | Moving window mean: 26 | 27 | .. code-block:: pycon 28 | 29 | >>> bn.move_mean(a, window=2, min_count=1) 30 | array([ 1. , 1.5, 2. , 4. , 4.5]) 31 | 32 | Benchmark 33 | ========= 34 | 35 | Bottleneck comes with a benchmark suite: 36 | 37 | .. code-block:: pycon 38 | 39 | >>> bn.bench() 40 | Bottleneck performance benchmark 41 | Bottleneck 1.3.0.dev0+122.gb1615d7; Numpy 1.16.4 42 | Speed is NumPy time divided by Bottleneck time 43 | NaN means approx one-fifth NaNs; float64 used 44 | 45 | no NaN no NaN NaN no NaN NaN 46 | (100,) (1000,1000)(1000,1000)(1000,1000)(1000,1000) 47 | axis=0 axis=0 axis=0 axis=1 axis=1 48 | nansum 29.7 1.4 1.6 2.0 2.1 49 | nanmean 99.0 2.0 1.8 3.2 2.5 50 | nanstd 145.6 1.8 1.8 2.7 2.5 51 | nanvar 138.4 1.8 1.8 2.8 2.5 52 | nanmin 27.6 0.5 1.7 0.7 2.4 53 | nanmax 26.6 0.6 1.6 0.7 2.5 54 | median 120.6 1.3 4.9 1.1 5.7 55 | nanmedian 117.8 5.0 5.7 4.8 5.5 56 | ss 13.2 1.2 1.3 1.5 1.5 57 | nanargmin 66.8 5.5 4.8 3.5 7.1 58 | nanargmax 57.6 2.9 5.1 2.5 5.3 59 | anynan 10.2 0.3 52.3 0.8 41.6 60 | allnan 15.1 196.0 156.3 135.8 111.2 61 | rankdata 45.9 1.2 1.2 2.1 2.1 62 | nanrankdata 50.5 1.4 1.3 2.4 2.3 63 | partition 3.3 1.1 1.6 1.0 1.5 64 | argpartition 3.4 1.2 1.5 1.1 1.6 65 | replace 9.0 1.5 1.5 1.5 1.5 66 | push 1565.6 5.9 7.0 13.0 10.9 67 | move_sum 2159.3 31.1 83.6 186.9 182.5 68 | move_mean 6264.3 66.2 111.9 361.1 246.5 69 | move_std 8653.6 86.5 163.7 232.0 317.7 70 | move_var 8856.0 96.3 171.6 267.9 332.9 71 | move_min 1186.6 13.4 30.9 23.5 45.0 72 | move_max 1188.0 14.6 29.9 23.5 46.0 73 | move_argmin 2568.3 33.3 61.0 49.2 86.8 74 | move_argmax 2475.8 30.9 58.6 45.0 82.8 75 | move_median 2236.9 153.9 151.4 171.3 166.9 76 | move_rank 847.1 1.2 1.4 2.3 2.6 77 | 78 | You can also run a detailed benchmark for a single function using, for 79 | example, the command: 80 | 81 | .. code-block:: pycon 82 | 83 | >>> bn.bench_detailed("move_median", fraction_nan=0.3) 84 | 85 | Only arrays with data type (dtype) int32, int64, float32, and float64 are 86 | accelerated. All other dtypes result in calls to slower, unaccelerated 87 | functions. In the rare case of a byte-swapped input array (e.g. a big-endian 88 | array on a little-endian operating system) the function will not be 89 | accelerated regardless of dtype. 90 | 91 | Where 92 | ===== 93 | 94 | =================== ======================================================== 95 | download https://pypi.python.org/pypi/Bottleneck 96 | docs https://bottleneck.readthedocs.io 97 | code https://github.com/pydata/bottleneck 98 | mailing list https://groups.google.com/group/bottle-neck 99 | =================== ======================================================== 100 | 101 | License 102 | ======= 103 | 104 | Bottleneck is distributed under a Simplified BSD license. See the LICENSE file 105 | and LICENSES directory for details. 106 | 107 | Install 108 | ======= 109 | 110 | Bottleneck provides binary wheels on PyPI for all the most common platforms. 111 | Binary packages are also available in conda-forge. We recommend installing binaries 112 | with ``pip``, ``uv``, ``conda`` or similar - it's faster and easier than building 113 | from source. 114 | 115 | Installing from source 116 | ---------------------- 117 | 118 | Requirements: 119 | 120 | ======================== ============================================================================ 121 | Bottleneck Python >=3.9; NumPy 1.16.0+ 122 | Compile gcc, clang, MinGW or MSVC 123 | Unit tests pytest 124 | Documentation sphinx, numpydoc 125 | ======================== ============================================================================ 126 | 127 | To install Bottleneck on Linux, Mac OS X, et al.: 128 | 129 | .. code-block:: console 130 | 131 | $ pip install . 132 | 133 | To install bottleneck on Windows, first install MinGW and add it to your 134 | system path. Then install Bottleneck with the command: 135 | 136 | .. code-block:: console 137 | 138 | $ python setup.py install --compiler=mingw32 139 | 140 | Unit tests 141 | ========== 142 | 143 | After you have installed Bottleneck, run the suite of unit tests: 144 | 145 | .. code-block:: pycon 146 | 147 | In [1]: import bottleneck as bn 148 | 149 | In [2]: bn.test() 150 | ============================= test session starts ============================= 151 | platform linux -- Python 3.7.4, pytest-4.3.1, py-1.8.0, pluggy-0.12.0 152 | hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/home/chris/code/bottleneck/.hypothesis/examples') 153 | rootdir: /home/chris/code/bottleneck, inifile: setup.cfg 154 | plugins: openfiles-0.3.2, remotedata-0.3.2, doctestplus-0.3.0, mock-1.10.4, forked-1.0.2, cov-2.7.1, hypothesis-4.32.2, xdist-1.26.1, arraydiff-0.3 155 | collected 190 items 156 | 157 | bottleneck/tests/input_modification_test.py ........................... [ 14%] 158 | .. [ 15%] 159 | bottleneck/tests/list_input_test.py ............................. [ 30%] 160 | bottleneck/tests/move_test.py ................................. [ 47%] 161 | bottleneck/tests/nonreduce_axis_test.py .................... [ 58%] 162 | bottleneck/tests/nonreduce_test.py .......... [ 63%] 163 | bottleneck/tests/reduce_test.py ....................................... [ 84%] 164 | ............ [ 90%] 165 | bottleneck/tests/scalar_input_test.py .................. [100%] 166 | 167 | ========================= 190 passed in 46.42 seconds ========================= 168 | Out[2]: True 169 | 170 | If developing in the git repo, simply run ``py.test`` 171 | -------------------------------------------------------------------------------- /asv_bench/asv.conf.json: -------------------------------------------------------------------------------- 1 | { 2 | // The version of the config file format. Do not change, unless 3 | // you know what you are doing. 4 | "version": 1, 5 | 6 | // The name of the project being benchmarked 7 | "project": "bottleneck", 8 | 9 | // The project's homepage 10 | "project_url": "http://github.com/pydata/bottleneck", 11 | 12 | // The URL or local path of the source code repository for the 13 | // project being benchmarked 14 | "repo": "..", 15 | 16 | // The Python project's subdirectory in your repo. If missing or 17 | // the empty string, the project is assumed to be located at the root 18 | // of the repository. 19 | // "repo_subdir": "", 20 | 21 | // Customizable commands for building, installing, and 22 | // uninstalling the project. See asv.conf.json documentation. 23 | // 24 | // "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"], 25 | // "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"], 26 | // "build_command": [ 27 | // "python setup.py bdist_wheel" 28 | // "PIP_NO_BUILD_ISOLATION=false python -mpip wheel --no-deps --no-index -w {build_cache_dir} {build_dir}" 29 | // ], 30 | 31 | // List of branches to benchmark. If not provided, defaults to "master" 32 | // (for git) or "default" (for mercurial). 33 | // "branches": ["master"], // for git 34 | // "branches": ["default"], // for mercurial 35 | 36 | // The DVCS being used. If not set, it will be automatically 37 | // determined from "repo" by looking at the protocol in the URL 38 | // (if remote), or by looking for special directories, such as 39 | // ".git" (if local). 40 | "dvcs": "git", 41 | 42 | // The tool to use to create environments. May be "conda", 43 | // "virtualenv" or other value depending on the plugins in use. 44 | // If missing or the empty string, the tool will be automatically 45 | // determined by looking for tools on the PATH environment 46 | // variable. 47 | "environment_type": "conda", 48 | 49 | // timeout in seconds for installing any dependencies in environment 50 | // defaults to 10 min 51 | //"install_timeout": 600, 52 | 53 | // the base URL to show a commit for the project. 54 | "show_commit_url": "http://github.com/pydata/bottleneck/commit/", 55 | 56 | // The Pythons you'd like to test against. If not provided, defaults 57 | // to the current version of Python used to run `asv`. 58 | "pythons": ["2.7", "3.7"], 59 | 60 | // The list of conda channel names to be searched for benchmark 61 | // dependency packages in the specified order 62 | "conda_channels": ["defaults"], 63 | 64 | // A conda environment file that is used for environment creation. 65 | // "conda_environment_file": "environment.yml", 66 | 67 | // The matrix of dependencies to test. Each key of the "req" 68 | // requirements dictionary is the name of a package (in PyPI) and 69 | // the values are version numbers. An empty list or empty string 70 | // indicates to just test against the default (latest) 71 | // version. null indicates that the package is to not be 72 | // installed. If the package to be tested is only available from 73 | // PyPi, and the 'environment_type' is conda, then you can preface 74 | // the package name by 'pip+', and the package will be installed 75 | // via pip (with all the conda available packages installed first, 76 | // followed by the pip installed packages). 77 | // 78 | // The ``@env`` and ``@env_nobuild`` keys contain the matrix of 79 | // environment variables to pass to build and benchmark commands. 80 | // An environment will be created for every combination of the 81 | // cartesian product of the "@env" variables in this matrix. 82 | // Variables in "@env_nobuild" will be passed to every environment 83 | // during the benchmark phase, but will not trigger creation of 84 | // new environments. A value of ``null`` means that the variable 85 | // will not be set for the current combination. 86 | // 87 | "matrix": { 88 | "req": { 89 | "numpy": ["1.16"], 90 | }, 91 | "env": { 92 | "CC": ["gcc"], 93 | "CXX": ["g++"], 94 | } 95 | }, 96 | 97 | // Combinations of libraries/python versions can be excluded/included 98 | // from the set to test. Each entry is a dictionary containing additional 99 | // key-value pairs to include/exclude. 100 | // 101 | // An exclude entry excludes entries where all values match. The 102 | // values are regexps that should match the whole string. 103 | // 104 | // An include entry adds an environment. Only the packages listed 105 | // are installed. The 'python' key is required. The exclude rules 106 | // do not apply to includes. 107 | // 108 | // In addition to package names, the following keys are available: 109 | // 110 | // - python 111 | // Python version, as in the *pythons* variable above. 112 | // - environment_type 113 | // Environment type, as above. 114 | // - sys_platform 115 | // Platform, as in sys.platform. Possible values for the common 116 | // cases: 'linux2', 'win32', 'cygwin', 'darwin'. 117 | // - req 118 | // Required packages 119 | // - env 120 | // Environment variables 121 | // - env_nobuild 122 | // Non-build environment variables 123 | // 124 | // "exclude": [ 125 | // {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows 126 | // {"environment_type": "conda", "req": {"six": null}}, // don't run without six on conda 127 | // {"env": {"ENV_VAR_1": "val2"}}, // skip val2 for ENV_VAR_1 128 | // ], 129 | // 130 | "include": [ 131 | { 132 | "req": { 133 | "numpy": "1.16", 134 | }, 135 | "env": { 136 | "CC": "clang", 137 | "CXX": "clang++", 138 | }, 139 | "python": "3.7" 140 | } 141 | ], 142 | 143 | // The directory (relative to the current directory) that benchmarks are 144 | // stored in. If not provided, defaults to "benchmarks" 145 | // "benchmark_dir": "benchmarks", 146 | 147 | // The directory (relative to the current directory) to cache the Python 148 | // environments in. If not provided, defaults to "env" 149 | "env_dir": "env", 150 | 151 | // The directory (relative to the current directory) that raw benchmark 152 | // results are stored in. If not provided, defaults to "results". 153 | "results_dir": "results", 154 | 155 | // The directory (relative to the current directory) that the html tree 156 | // should be written to. If not provided, defaults to "html". 157 | "html_dir": "html", 158 | 159 | // The number of characters to retain in the commit hashes. 160 | // "hash_length": 8, 161 | 162 | // `asv` will cache results of the recent builds in each 163 | // environment, making them faster to install next time. This is 164 | // the number of builds to keep, per environment. 165 | "build_cache_size": 8, 166 | 167 | // The commits after which the regression search in `asv publish` 168 | // should start looking for regressions. Dictionary whose keys are 169 | // regexps matching to benchmark names, and values corresponding to 170 | // the commit (exclusive) after which to start looking for 171 | // regressions. The default is to start from the first commit 172 | // with results. If the commit is `null`, regression detection is 173 | // skipped for the matching benchmark. 174 | // 175 | // "regressions_first_commits": { 176 | // "some_benchmark": "352cdf", // Consider regressions only after this commit 177 | // "another_benchmark": null, // Skip regression detection altogether 178 | // }, 179 | 180 | // The thresholds for relative change in results, after which `asv 181 | // publish` starts reporting regressions. Dictionary of the same 182 | // form as in ``regressions_first_commits``, with values 183 | // indicating the thresholds. If multiple entries match, the 184 | // maximum is taken. If no entry matches, the default is 5%. 185 | // 186 | // "regressions_thresholds": { 187 | // "some_benchmark": 0.01, // Threshold of 1% 188 | // "another_benchmark": 0.5, // Threshold of 50% 189 | // }, 190 | } 191 | -------------------------------------------------------------------------------- /asv_bench/benchmarks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pydata/bottleneck/7a0110dff7e14012fb2924ba50668b2f1f83008c/asv_bench/benchmarks/__init__.py -------------------------------------------------------------------------------- /asv_bench/benchmarks/memory.py: -------------------------------------------------------------------------------- 1 | import bottleneck as bn 2 | import numpy as np 3 | 4 | 5 | class Memory: 6 | def peakmem_nanmedian(self): 7 | arr = np.arange(1).reshape((1, 1)) 8 | for i in range(1000000): 9 | bn.nanmedian(arr) 10 | -------------------------------------------------------------------------------- /asv_bench/benchmarks/move.py: -------------------------------------------------------------------------------- 1 | import bottleneck as bn 2 | from .reduce import get_cached_rand_array 3 | 4 | 5 | class Time1DMove: 6 | params = [ 7 | ["int32", "int64", "float32", "float64"], 8 | [(10 ** 3,), (10 ** 5,), (10 ** 7,)], 9 | [10], 10 | ] 11 | param_names = ["dtype", "shape", "window"] 12 | 13 | def setup(self, dtype, shape, window): 14 | self.arr = get_cached_rand_array(shape, dtype, "C") 15 | 16 | def time_move_sum(self, dtype, shape, window): 17 | bn.move_sum(self.arr, window) 18 | 19 | def time_move_mean(self, dtype, shape, window): 20 | bn.move_mean(self.arr, window) 21 | 22 | def time_move_std(self, dtype, shape, window): 23 | bn.move_std(self.arr, window) 24 | 25 | def time_move_var(self, dtype, shape, window): 26 | bn.move_var(self.arr, window) 27 | 28 | def time_move_min(self, dtype, shape, window): 29 | bn.move_min(self.arr, window) 30 | 31 | def time_move_max(self, dtype, shape, window): 32 | bn.move_max(self.arr, window) 33 | 34 | def time_move_argmin(self, dtype, shape, window): 35 | bn.move_argmin(self.arr, window) 36 | 37 | def time_move_argmax(self, dtype, shape, window): 38 | bn.move_argmax(self.arr, window) 39 | 40 | def time_move_median(self, dtype, shape, window): 41 | bn.move_median(self.arr, window) 42 | 43 | def time_move_rank(self, dtype, shape, window): 44 | bn.move_rank(self.arr, window) 45 | 46 | 47 | class Time2DMove: 48 | params = [ 49 | ["int32", "int64", "float32", "float64"], 50 | [(10 ** 3, 10 ** 3)], 51 | ["C", "F"], 52 | [0, 1], 53 | [10], 54 | ] 55 | param_names = ["dtype", "shape", "order", "axis", "window"] 56 | 57 | def setup(self, dtype, shape, order, axis, window): 58 | self.arr = get_cached_rand_array(shape, dtype, order) 59 | 60 | def time_move_sum(self, dtype, shape, order, axis, window): 61 | bn.move_sum(self.arr, window, axis=axis) 62 | 63 | def time_move_mean(self, dtype, shape, order, axis, window): 64 | bn.move_mean(self.arr, window, axis=axis) 65 | 66 | def time_move_std(self, dtype, shape, order, axis, window): 67 | bn.move_std(self.arr, window, axis=axis) 68 | 69 | def time_move_var(self, dtype, shape, order, axis, window): 70 | bn.move_var(self.arr, window, axis=axis) 71 | 72 | def time_move_min(self, dtype, shape, order, axis, window): 73 | bn.move_min(self.arr, window, axis=axis) 74 | 75 | def time_move_max(self, dtype, shape, order, axis, window): 76 | bn.move_max(self.arr, window, axis=axis) 77 | 78 | def time_move_argmin(self, dtype, shape, order, axis, window): 79 | bn.move_argmin(self.arr, window, axis=axis) 80 | 81 | def time_move_argmax(self, dtype, shape, order, axis, window): 82 | bn.move_argmax(self.arr, window, axis=axis) 83 | 84 | def time_move_median(self, dtype, shape, order, axis, window): 85 | bn.move_median(self.arr, window, axis=axis) 86 | 87 | def time_move_rank(self, dtype, shape, order, axis, window): 88 | bn.move_rank(self.arr, window, axis=axis) 89 | -------------------------------------------------------------------------------- /asv_bench/benchmarks/nonreduce.py: -------------------------------------------------------------------------------- 1 | import bottleneck as bn 2 | import numpy as np 3 | 4 | 5 | class TimeReplace2D: 6 | params = [ 7 | ["int32", "int64", "float32", "float64"], 8 | [(10 ** 3, 10 ** 3)], 9 | ["C", "F"], 10 | ] 11 | param_names = ["dtype", "shape", "order"] 12 | 13 | def setup(self, dtype, shape, order): 14 | self.arr = np.full(shape, 0, dtype=dtype, order=order) 15 | 16 | assert self.arr.flags[order + "_CONTIGUOUS"] 17 | 18 | self.old = 0 19 | self.new = 1 20 | 21 | def time_replace(self, dtype, shape, order): 22 | bn.replace(self.arr, self.old, self.new) 23 | -------------------------------------------------------------------------------- /asv_bench/benchmarks/nonreduce_axis.py: -------------------------------------------------------------------------------- 1 | import bottleneck as bn 2 | from .reduce import get_cached_rand_array 3 | 4 | 5 | class Time1DNonreduceAxis: 6 | params = [ 7 | ["int32", "int64", "float32", "float64"], 8 | [(10 ** 3,), (10 ** 5,), (10 ** 7,)], 9 | ] 10 | param_names = ["dtype", "shape"] 11 | 12 | def setup(self, dtype, shape): 13 | self.arr = get_cached_rand_array(shape, dtype, "C") 14 | self.half = shape[0] // 2 15 | 16 | def time_partition(self, dtype, shape): 17 | bn.partition(self.arr, self.half) 18 | 19 | def time_argpartition(self, dtype, shape): 20 | bn.argpartition(self.arr, self.half) 21 | 22 | def time_rankdata(self, dtype, shape): 23 | bn.rankdata(self.arr) 24 | 25 | def time_nanrankdata(self, dtype, shape): 26 | bn.nanrankdata(self.arr) 27 | 28 | def time_push(self, dtype, shape): 29 | bn.push(self.arr) 30 | -------------------------------------------------------------------------------- /asv_bench/benchmarks/reduce.py: -------------------------------------------------------------------------------- 1 | import bottleneck as bn 2 | import numpy as np 3 | 4 | 5 | RAND_ARRAY_CACHE = {} 6 | 7 | 8 | def get_cached_rand_array(shape, dtype, order): 9 | key = (shape, dtype, order) 10 | if key not in RAND_ARRAY_CACHE: 11 | assert order in ["C", "F"] 12 | random_state = np.random.RandomState(1234) 13 | if "int" in shape: 14 | dtype_info = np.iinfo(dtype) 15 | arr = random_state.randint( 16 | dtype_info.min, dtype_info.max, size=shape, dtype=dtype 17 | ) 18 | else: 19 | arr = 10000 * random_state.standard_normal(shape).astype(dtype) 20 | 21 | if order == "F": 22 | arr = np.asfortranarray(arr) 23 | 24 | assert arr.flags[order + "_CONTIGUOUS"] 25 | 26 | RAND_ARRAY_CACHE[key] = arr 27 | 28 | return RAND_ARRAY_CACHE[key].copy(order=order) 29 | 30 | 31 | class Time1DReductions: 32 | params = [ 33 | ["int32", "int64", "float32", "float64"], 34 | [(10 ** 3,), (10 ** 5,), (10 ** 7,)], 35 | ] 36 | param_names = ["dtype", "shape"] 37 | 38 | def setup(self, dtype, shape): 39 | self.arr = get_cached_rand_array(shape, dtype, "C") 40 | 41 | def time_nanmin(self, dtype, shape): 42 | bn.nanmin(self.arr) 43 | 44 | def time_nanmax(self, dtype, shape): 45 | bn.nanmin(self.arr) 46 | 47 | def time_nanargmin(self, dtype, shape): 48 | bn.nanargmin(self.arr) 49 | 50 | def time_nanargmax(self, dtype, shape): 51 | bn.nanargmax(self.arr) 52 | 53 | def time_nansum(self, dtype, shape): 54 | bn.nansum(self.arr) 55 | 56 | def time_nanmean(self, dtype, shape): 57 | bn.nanmean(self.arr) 58 | 59 | def time_nanstd(self, dtype, shape): 60 | bn.nanstd(self.arr) 61 | 62 | def time_nanvar(self, dtype, shape): 63 | bn.nanvar(self.arr) 64 | 65 | def time_median(self, dtype, shape): 66 | bn.median(self.arr) 67 | 68 | def time_nanmedian(self, dtype, shape): 69 | bn.nanmedian(self.arr) 70 | 71 | def time_ss(self, dtype, shape): 72 | bn.ss(self.arr) 73 | 74 | 75 | class Time2DReductions: 76 | params = [ 77 | ["int32", "int64", "float32", "float64"], 78 | [(10 ** 3, 10 ** 3)], 79 | ["C", "F"], 80 | [None, 0, 1], 81 | ] 82 | param_names = ["dtype", "shape", "order", "axis"] 83 | 84 | def setup(self, dtype, shape, order, axis): 85 | self.arr = get_cached_rand_array(shape, dtype, order) 86 | 87 | def time_nanmin(self, dtype, shape, order, axis): 88 | bn.nanmin(self.arr, axis=axis) 89 | 90 | def time_nanmax(self, dtype, shape, order, axis): 91 | bn.nanmin(self.arr, axis=axis) 92 | 93 | def time_nanargmin(self, dtype, shape, order, axis): 94 | bn.nanargmin(self.arr, axis=axis) 95 | 96 | def time_nanargmax(self, dtype, shape, order, axis): 97 | bn.nanargmax(self.arr, axis=axis) 98 | 99 | def time_nansum(self, dtype, shape, order, axis): 100 | bn.nansum(self.arr, axis=axis) 101 | 102 | def time_nanmean(self, dtype, shape, order, axis): 103 | bn.nanmean(self.arr, axis=axis) 104 | 105 | def time_nanstd(self, dtype, shape, order, axis): 106 | bn.nanstd(self.arr, axis=axis) 107 | 108 | def time_nanvar(self, dtype, shape, order, axis): 109 | bn.nanvar(self.arr, axis=axis) 110 | 111 | def time_median(self, dtype, shape, order, axis): 112 | bn.median(self.arr, axis=axis) 113 | 114 | def time_nanmedian(self, dtype, shape, order, axis): 115 | bn.nanmedian(self.arr, axis=axis) 116 | 117 | def time_ss(self, dtype, shape, order, axis): 118 | bn.ss(self.arr, axis=axis) 119 | 120 | 121 | class TimeAnyNan2D: 122 | params = [ 123 | ["int32", "int64", "float32", "float64"], 124 | [(10 ** 3, 10 ** 3)], 125 | ["C", "F"], 126 | [None, 0, 1], 127 | ["fast", "slow"], 128 | ] 129 | param_names = ["dtype", "shape", "order", "axis", "case"] 130 | 131 | def setup(self, dtype, shape, order, axis, case): 132 | self.arr = np.full(shape, 0, dtype=dtype, order=order) 133 | 134 | if "float" in dtype: 135 | if case == "fast": 136 | self.arr[:] = np.nan 137 | 138 | assert self.arr.flags[order + "_CONTIGUOUS"] 139 | 140 | def time_anynan(self, dtype, shape, order, axis, case): 141 | bn.anynan(self.arr, axis=axis) 142 | 143 | 144 | class TimeAllNan2D: 145 | params = [ 146 | ["int32", "int64", "float32", "float64"], 147 | [(10 ** 3, 10 ** 3)], 148 | ["C", "F"], 149 | [None, 0, 1], 150 | ["fast", "slow"], 151 | ] 152 | param_names = ["dtype", "shape", "order", "axis", "case"] 153 | 154 | def setup(self, dtype, shape, order, axis, case): 155 | self.arr = np.full(shape, 0, dtype=dtype, order=order) 156 | 157 | if "float" in dtype: 158 | if case == "slow": 159 | self.arr[:] = np.nan 160 | 161 | assert self.arr.flags[order + "_CONTIGUOUS"] 162 | 163 | def time_allnan(self, dtype, shape, order, axis, case): 164 | bn.allnan(self.arr, axis=axis) 165 | -------------------------------------------------------------------------------- /bottleneck/.gitignore: -------------------------------------------------------------------------------- 1 | /reduce.* 2 | /nonreduce.* 3 | /nonreduce_axis.* 4 | /move.* 5 | -------------------------------------------------------------------------------- /bottleneck/__init__.py: -------------------------------------------------------------------------------- 1 | from bottleneck.benchmark.bench import bench 2 | from bottleneck.benchmark.bench_detailed import bench_detailed 3 | from bottleneck.tests.util import get_functions 4 | 5 | from . import slow 6 | from ._pytesttester import PytestTester 7 | from .move import (move_argmax, move_argmin, move_max, move_mean, move_median, 8 | move_min, move_rank, move_std, move_sum, move_var) 9 | from .nonreduce import replace 10 | from .nonreduce_axis import (argpartition, nanrankdata, partition, push, 11 | rankdata) 12 | from .reduce import (allnan, anynan, median, nanargmax, nanargmin, nanmax, 13 | nanmean, nanmedian, nanmin, nanstd, nansum, nanvar, ss) 14 | 15 | test = PytestTester(__name__) 16 | del PytestTester 17 | 18 | from ._version import get_versions # noqa: E402 19 | 20 | __version__ = get_versions()["version"] 21 | del get_versions 22 | 23 | from . import _version 24 | __version__ = _version.get_versions()['version'] 25 | -------------------------------------------------------------------------------- /bottleneck/_pytesttester.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generic test utilities. 3 | 4 | Based on scipy._libs._testutils 5 | """ 6 | 7 | from __future__ import division, print_function, absolute_import 8 | 9 | import os 10 | import sys 11 | 12 | 13 | __all__ = ["PytestTester"] 14 | 15 | 16 | class PytestTester(object): 17 | """ 18 | Pytest test runner entry point. 19 | """ 20 | 21 | def __init__(self, module_name): 22 | self.module_name = module_name 23 | 24 | def __call__( 25 | self, 26 | label="fast", 27 | verbose=1, 28 | extra_argv=None, 29 | doctests=False, 30 | coverage=False, 31 | tests=None, 32 | parallel=None, 33 | ): 34 | import pytest 35 | 36 | module = sys.modules[self.module_name] 37 | module_path = os.path.abspath(module.__path__[0]) 38 | 39 | pytest_args = ["-l"] 40 | 41 | if doctests: 42 | raise ValueError("Doctests not supported") 43 | 44 | if extra_argv: 45 | pytest_args += list(extra_argv) 46 | 47 | if verbose and int(verbose) > 1: 48 | pytest_args += ["-" + "v" * (int(verbose) - 1)] 49 | 50 | if coverage: 51 | pytest_args += ["--cov=" + module_path] 52 | 53 | if label == "fast": 54 | pytest_args += ["-m", "not slow"] 55 | elif label != "full": 56 | pytest_args += ["-m", label] 57 | 58 | if tests is None: 59 | tests = [self.module_name] 60 | 61 | if parallel is not None and parallel > 1: 62 | if _pytest_has_xdist(): 63 | pytest_args += ["-n", str(parallel)] 64 | else: 65 | import warnings 66 | 67 | warnings.warn( 68 | "Could not run tests in parallel because " 69 | "pytest-xdist plugin is not available." 70 | ) 71 | 72 | pytest_args += ["--pyargs"] + list(tests) 73 | 74 | try: 75 | code = pytest.main(pytest_args) 76 | except SystemExit as exc: 77 | code = exc.code 78 | 79 | return code == 0 80 | 81 | 82 | def _pytest_has_xdist(): 83 | """ 84 | Check if the pytest-xdist plugin is installed, providing parallel tests 85 | """ 86 | # Check xdist exists without importing, otherwise pytests emits warnings 87 | from importlib.util import find_spec 88 | 89 | return find_spec("xdist") is not None 90 | -------------------------------------------------------------------------------- /bottleneck/benchmark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pydata/bottleneck/7a0110dff7e14012fb2924ba50668b2f1f83008c/bottleneck/benchmark/__init__.py -------------------------------------------------------------------------------- /bottleneck/benchmark/autotimeit.py: -------------------------------------------------------------------------------- 1 | import timeit 2 | 3 | 4 | def autotimeit(stmt, setup="pass", repeat=3, mintime=0.2): 5 | timer = timeit.Timer(stmt, setup) 6 | number, time1 = autoscaler(timer, mintime) 7 | time2 = timer.repeat(repeat=repeat - 1, number=number) 8 | return min(time2 + [time1]) / number 9 | 10 | 11 | def autoscaler(timer, mintime): 12 | number = 1 13 | for i in range(12): 14 | time = timer.timeit(number) 15 | if time > mintime: 16 | return number, time 17 | number *= 10 18 | raise RuntimeError("function is too fast to test") 19 | -------------------------------------------------------------------------------- /bottleneck/benchmark/bench.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import bottleneck as bn 3 | from .autotimeit import autotimeit 4 | 5 | __all__ = ["bench"] 6 | 7 | 8 | def bench( 9 | shapes=[(100,), (1000, 1000), (1000, 1000), (1000, 1000), (1000, 1000)], 10 | axes=[0, 0, 0, 1, 1], 11 | nans=[False, False, True, False, True], 12 | dtype="float64", 13 | order="C", 14 | functions=None, 15 | ): 16 | """ 17 | Bottleneck benchmark. 18 | 19 | Parameters 20 | ---------- 21 | shapes : list, optional 22 | A list of tuple shapes of input arrays to use in the benchmark. 23 | axes : list, optional 24 | List of axes along which to perform the calculations that are being 25 | benchmarked. 26 | nans : list, optional 27 | A list of the bools (True or False), one for each tuple in the 28 | `shapes` list, that tells whether the input arrays should be randomly 29 | filled with one-fifth NaNs. 30 | dtype : str, optional 31 | Data type string such as 'float64', which is the default. 32 | order : {'C', 'F'}, optional 33 | Whether to store multidimensional data in C- or Fortran-contiguous 34 | (row- or column-wise) order in memory. 35 | functions : {list, None}, optional 36 | A list of strings specifying which functions to include in the 37 | benchmark. By default (None) all functions are included in the 38 | benchmark. 39 | 40 | Returns 41 | ------- 42 | A benchmark report is printed to stdout. 43 | 44 | """ 45 | 46 | if len(shapes) != len(nans): 47 | raise ValueError("`shapes` and `nans` must have the same length") 48 | if len(shapes) != len(axes): 49 | raise ValueError("`shapes` and `axes` must have the same length") 50 | 51 | # Header 52 | print("Bottleneck performance benchmark") 53 | print(" Bottleneck %s; Numpy %s" % (bn.__version__, np.__version__)) 54 | print(" Speed is NumPy time divided by Bottleneck time") 55 | print(" NaN means approx one-fifth NaNs; %s used" % str(dtype)) 56 | 57 | print("") 58 | header = [" " * 11] 59 | for nan in nans: 60 | if nan: 61 | header.append("NaN".center(11)) 62 | else: 63 | header.append("no NaN".center(11)) 64 | print("".join(header)) 65 | header = ["".join(str(shape).split(" ")).center(11) for shape in shapes] 66 | header = [" " * 12] + header 67 | print("".join(header)) 68 | header = ["".join(("axis=" + str(axis)).split(" ")).center(11) for axis in axes] 69 | header = [" " * 12] + header 70 | print("".join(header)) 71 | 72 | suite = benchsuite(shapes, dtype, nans, axes, order, functions) 73 | for test in suite: 74 | name = test["name"].ljust(12) 75 | fmt = name + "%7.1f" + "%11.1f" * (len(shapes) - 1) 76 | speed = timer(test["statements"], test["setups"]) 77 | print(fmt % tuple(speed)) 78 | 79 | 80 | def timer(statements, setups): 81 | speed = [] 82 | if len(statements) != 2: 83 | raise ValueError("Two statements needed.") 84 | for setup in setups: 85 | with np.errstate(invalid="ignore"): 86 | t0 = autotimeit(statements[0], setup) 87 | t1 = autotimeit(statements[1], setup) 88 | speed.append(t1 / t0) 89 | return speed 90 | 91 | 92 | def getarray(shape, dtype, nans, order): 93 | a = np.arange(np.prod(shape), dtype=dtype) 94 | if nans and issubclass(a.dtype.type, np.inexact): 95 | a[::5] = np.nan 96 | rs = np.random.RandomState(shape) 97 | rs.shuffle(a) 98 | return np.array(a.reshape(*shape), order=order) 99 | 100 | 101 | def benchsuite(shapes, dtype, nans, axes, order, functions): 102 | 103 | suite = [] 104 | 105 | def getsetups(setup, shapes, nans, axes, dtype, order): 106 | template = """ 107 | from bottleneck.benchmark.bench import getarray 108 | a = getarray(%s, '%s', %s, '%s') 109 | axis=%s 110 | %s""" 111 | setups = [] 112 | for shape, axis, nan in zip(shapes, axes, nans): 113 | s = template % ( 114 | str(shape), 115 | str(dtype), 116 | str(nan), 117 | str(order), 118 | str(axis), 119 | setup, 120 | ) 121 | s = "\n".join([line.strip() for line in s.split("\n")]) 122 | setups.append(s) 123 | return setups 124 | 125 | # non-moving window functions 126 | funcs = bn.get_functions("reduce", as_string=True) 127 | funcs += ["rankdata", "nanrankdata"] 128 | for func in funcs: 129 | if functions is not None and func not in functions: 130 | continue 131 | run = {} 132 | run["name"] = func 133 | run["statements"] = ["bn_func(a, axis)", "sl_func(a, axis)"] 134 | setup = """ 135 | from bottleneck import %s as bn_func 136 | try: from numpy import %s as sl_func 137 | except ImportError: from bottleneck.slow import %s as sl_func 138 | if "%s" == "median": from bottleneck.slow import median as sl_func 139 | """ % ( 140 | func, 141 | func, 142 | func, 143 | func, 144 | ) 145 | run["setups"] = getsetups(setup, shapes, nans, axes, dtype, order) 146 | suite.append(run) 147 | 148 | # partition, argpartition 149 | funcs = ["partition", "argpartition"] 150 | for func in funcs: 151 | if functions is not None and func not in functions: 152 | continue 153 | run = {} 154 | run["name"] = func 155 | run["statements"] = ["bn_func(a, n, axis)", "sl_func(a, n, axis)"] 156 | setup = """ 157 | from bottleneck import %s as bn_func 158 | from bottleneck.slow import %s as sl_func 159 | if axis is None: n = a.size 160 | else: n = a.shape[axis] - 1 161 | n = max(n // 2, 0) 162 | """ % ( 163 | func, 164 | func, 165 | ) 166 | run["setups"] = getsetups(setup, shapes, nans, axes, dtype, order) 167 | suite.append(run) 168 | 169 | # replace, push 170 | funcs = ["replace", "push"] 171 | for func in funcs: 172 | if functions is not None and func not in functions: 173 | continue 174 | run = {} 175 | run["name"] = func 176 | if func == "replace": 177 | run["statements"] = ["bn_func(a, nan, 0)", "slow_func(a, nan, 0)"] 178 | elif func == "push": 179 | run["statements"] = ["bn_func(a, 5, axis)", "slow_func(a, 5, axis)"] 180 | else: 181 | raise ValueError("Unknow function name") 182 | setup = """ 183 | from numpy import nan 184 | from bottleneck import %s as bn_func 185 | from bottleneck.slow import %s as slow_func 186 | """ % ( 187 | func, 188 | func, 189 | ) 190 | run["setups"] = getsetups(setup, shapes, nans, axes, dtype, order) 191 | suite.append(run) 192 | 193 | # moving window functions 194 | funcs = bn.get_functions("move", as_string=True) 195 | for func in funcs: 196 | if functions is not None and func not in functions: 197 | continue 198 | run = {} 199 | run["name"] = func 200 | run["statements"] = ["bn_func(a, w, 1, axis)", "sw_func(a, w, 1, axis)"] 201 | setup = """ 202 | from bottleneck.slow.move import %s as sw_func 203 | from bottleneck import %s as bn_func 204 | w = a.shape[axis] // 5 205 | """ % ( 206 | func, 207 | func, 208 | ) 209 | run["setups"] = getsetups(setup, shapes, nans, axes, dtype, order) 210 | suite.append(run) 211 | 212 | return suite 213 | -------------------------------------------------------------------------------- /bottleneck/benchmark/bench_detailed.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import bottleneck as bn 3 | from .autotimeit import autotimeit 4 | 5 | __all__ = ["bench_detailed"] 6 | 7 | 8 | def bench_detailed(function="nansum", fraction_nan=0.0): 9 | """ 10 | Benchmark a single function in detail or, optionally, all functions. 11 | 12 | Parameters 13 | ---------- 14 | function : str, optional 15 | Name of function, as a string, to benchmark. Default ('nansum') is 16 | to benchmark bn.nansum. If `function` is 'all' then detailed 17 | benchmarks are run on all bottleneck functions. 18 | fraction_nan : float, optional 19 | Fraction of array elements that should, on average, be NaN. The 20 | default (0.0) is not to set any elements to NaN. 21 | 22 | Returns 23 | ------- 24 | A benchmark report is printed to stdout. 25 | 26 | """ 27 | 28 | if function == "all": 29 | # benchmark all bottleneck functions 30 | funcs = bn.get_functions("all", as_string=True) 31 | funcs.sort() 32 | for func in funcs: 33 | bench_detailed(func, fraction_nan) 34 | 35 | if fraction_nan < 0 or fraction_nan > 1: 36 | raise ValueError("`fraction_nan` must be between 0 and 1, inclusive") 37 | 38 | tab = " " 39 | 40 | # Header 41 | print("%s benchmark" % function) 42 | print("%sBottleneck %s; Numpy %s" % (tab, bn.__version__, np.__version__)) 43 | print("%sSpeed is NumPy time divided by Bottleneck time" % tab) 44 | if fraction_nan == 0: 45 | print("%sNone of the array elements are NaN" % tab) 46 | else: 47 | print( 48 | "%s%.1f%% of the array elements are NaN (on average)" 49 | % (tab, fraction_nan * 100) 50 | ) 51 | print("") 52 | 53 | print(" Speed Call Array") 54 | suite = benchsuite(function, fraction_nan) 55 | for test in suite: 56 | name = test["name"] 57 | speed = timer(test["statements"], test["setup"], test["repeat"]) 58 | print("%8.1f %s %s" % (speed, name[0].ljust(27), name[1])) 59 | 60 | 61 | def timer(statements, setup, repeat): 62 | if len(statements) != 2: 63 | raise ValueError("Two statements needed.") 64 | with np.errstate(invalid="ignore"): 65 | t0 = autotimeit(statements[0], setup, repeat=repeat) 66 | t1 = autotimeit(statements[1], setup, repeat=repeat) 67 | speed = t1 / t0 68 | return speed 69 | 70 | 71 | def benchsuite(function, fraction_nan): 72 | 73 | # setup is called before each run of each function 74 | setup = """ 75 | from bottleneck import %s as bn_fn 76 | try: from numpy import %s as sl_fn 77 | except ImportError: from bottleneck.slow import %s as sl_fn 78 | 79 | # avoid all-nan slice warnings from np.median and np.nanmedian 80 | if "%s" == "median": from bottleneck.slow import median as sl_fn 81 | if "%s" == "nanmedian": from bottleneck.slow import nanmedian as sl_fn 82 | 83 | from numpy import array, nan 84 | from numpy.random import RandomState 85 | rand = RandomState(123).rand 86 | 87 | a = %s 88 | if %s != 0: a[a < %s] = nan 89 | """ 90 | setup = "\n".join([s.strip() for s in setup.split("\n")]) 91 | 92 | # what kind of function signature do we need to use? 93 | if function in bn.get_functions("reduce", as_string=True): 94 | index = 0 95 | elif function in ["rankdata", "nanrankdata"]: 96 | index = 0 97 | elif function in bn.get_functions("move", as_string=True): 98 | index = 1 99 | elif function in ["partition", "argpartition", "push"]: 100 | index = 2 101 | elif function == "replace": 102 | index = 3 103 | else: 104 | raise ValueError("`function` (%s) not recognized" % function) 105 | 106 | # create benchmark suite 107 | instructions = get_instructions() 108 | f = function 109 | suite = [] 110 | for instruction in instructions: 111 | signature = instruction[index + 1] 112 | if signature is None: 113 | continue 114 | array = instruction[0] 115 | repeat = instruction[-1] 116 | run = {} 117 | run["name"] = [f + signature, array] 118 | run["statements"] = ["bn_fn" + signature, "sl_fn" + signature] 119 | run["setup"] = setup % (f, f, f, f, f, array, fraction_nan, fraction_nan) 120 | run["repeat"] = repeat 121 | suite.append(run) 122 | 123 | return suite 124 | 125 | 126 | def get_instructions(): 127 | 128 | instructions = [ 129 | # 1d input array 130 | ( 131 | "rand(1)", 132 | "(a)", # reduce + (nan)rankdata 133 | "(a, 1)", # move 134 | "(a, 0)", # (arg)partition 135 | "(a, np.nan, 0)", # replace 136 | 10, 137 | ), 138 | ("rand(10)", "(a)", "(a, 2)", "(a, 2)", "(a, np.nan, 0)", 10), 139 | ("rand(100)", "(a)", "(a, 20)", "(a, 20)", "(a, np.nan, 0)", 6), 140 | ("rand(1000)", "(a)", "(a, 200)", "(a, 200)", "(a, np.nan, 0)", 3), 141 | ("rand(1000000)", "(a)", "(a, 200)", "(a, 200)", "(a, np.nan, 0)", 2), 142 | # 2d input array 143 | ("rand(10, 10)", "(a)", "(a, 2)", "(a, 2)", "(a, np.nan, 0)", 6), 144 | ("rand(100, 100)", "(a)", "(a, 20)", "(a, 20)", "(a, np.nan, 0)", 3), 145 | ("rand(1000, 1000)", "(a)", "(a, 200)", "(a, 200)", "(a, np.nan, 0)", 2), 146 | ("rand(10, 10)", "(a, 1)", None, None, None, 6), 147 | ("rand(100, 100)", "(a, 1)", None, None, None, 3), 148 | ("rand(1000, 1000)", "(a, 1)", None, None, None, 2), 149 | ("rand(100000, 2)", "(a, 1)", "(a, 1)", "(a, 1)", None, 2), 150 | ("rand(10, 10)", "(a, 0)", None, None, None, 6), 151 | ("rand(100, 100)", "(a, 0)", "(a, 20, axis=0)", None, None, 3), 152 | ("rand(1000, 1000)", "(a, 0)", "(a, 200, axis=0)", None, None, 2), 153 | # 3d input array 154 | ( 155 | "rand(100, 100, 100)", 156 | "(a, 0)", 157 | "(a, 20, axis=0)", 158 | "(a, 20, axis=0)", 159 | None, 160 | 2, 161 | ), 162 | ( 163 | "rand(100, 100, 100)", 164 | "(a, 1)", 165 | "(a, 20, axis=1)", 166 | "(a, 20, axis=1)", 167 | None, 168 | 2, 169 | ), 170 | ( 171 | "rand(100, 100, 100)", 172 | "(a, 2)", 173 | "(a, 20, axis=2)", 174 | "(a, 20, axis=2)", 175 | "(a, np.nan, 0)", 176 | 2, 177 | ), 178 | # 0d input array 179 | ("array(1.0)", "(a)", None, None, "(a, 0, 2)", 10), 180 | ] 181 | 182 | return instructions 183 | -------------------------------------------------------------------------------- /bottleneck/conftest.py: -------------------------------------------------------------------------------- 1 | 2 | import pytest 3 | 4 | try: 5 | import pytest_run_parallel # noqa:F401 6 | 7 | PARALLEL_RUN_AVAILABLE = True 8 | except Exception: 9 | PARALLEL_RUN_AVAILABLE = False 10 | 11 | 12 | def pytest_configure(config): 13 | if not PARALLEL_RUN_AVAILABLE: 14 | config.addinivalue_line( 15 | 'markers', 16 | 'parallel_threads(n): run the given test function in parallel ' 17 | 'using `n` threads.', 18 | ) 19 | config.addinivalue_line( 20 | "markers", 21 | "thread_unsafe: mark the test function as single-threaded", 22 | ) 23 | config.addinivalue_line( 24 | "markers", 25 | "iterations(n): run the given test function `n` times in each thread", 26 | ) 27 | 28 | 29 | if not PARALLEL_RUN_AVAILABLE: 30 | @pytest.fixture 31 | def num_parallel_threads(): 32 | return 1 33 | -------------------------------------------------------------------------------- /bottleneck/include/bottleneck.h: -------------------------------------------------------------------------------- 1 | // Copyright 2010-2019 Keith Goodman 2 | // Copyright 2019 Bottleneck Developers 3 | #ifndef BOTTLENECK_H_ 4 | #define BOTTLENECK_H_ 5 | 6 | #include 7 | #define NPY_NO_DEPRECATED_API NPY_1_11_API_VERSION 8 | #include 9 | #include 10 | 11 | /* THREADS=1 releases the GIL but increases function call 12 | * overhead. THREADS=0 does not release the GIL but keeps 13 | * function call overhead low. Curly brackets are for C89 14 | * support. */ 15 | #define THREADS 1 16 | #if THREADS 17 | #define BN_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS { 18 | #define BN_END_ALLOW_THREADS \ 19 | ; \ 20 | } \ 21 | Py_END_ALLOW_THREADS 22 | #else 23 | #define BN_BEGIN_ALLOW_THREADS { 24 | #define BN_END_ALLOW_THREADS } 25 | #endif 26 | 27 | /* for ease of dtype templating */ 28 | #define NPY_float64 NPY_FLOAT64 29 | #define NPY_float32 NPY_FLOAT32 30 | #define NPY_int64 NPY_INT64 31 | #define NPY_int32 NPY_INT32 32 | #define NPY_intp NPY_INTP 33 | #define NPY_long NPY_LONG 34 | #define NPY_MAX_int64 NPY_MAX_INT64 35 | #define NPY_MAX_int32 NPY_MAX_INT32 36 | #define NPY_MIN_int64 NPY_MIN_INT64 37 | #define NPY_MIN_int32 NPY_MIN_INT32 38 | 39 | #define VARKEY (((unsigned)METH_VARARGS) | ((unsigned)METH_KEYWORDS)) 40 | #define error_converting(x) (((x) == -1) && PyErr_Occurred()) 41 | 42 | #define VALUE_ERR(text) PyErr_SetString(PyExc_ValueError, text) 43 | #define TYPE_ERR(text) PyErr_SetString(PyExc_TypeError, text) 44 | #define MEMORY_ERR(text) PyErr_SetString(PyExc_MemoryError, text) 45 | #define RUNTIME_ERR(text) PyErr_SetString(PyExc_RuntimeError, text) 46 | 47 | /* `inline`, `opt_3`, and isnan copied from NumPy. */ 48 | #if HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 49 | #define BN_OPT_3 __attribute__((optimize("O3"))) 50 | #else 51 | #define BN_OPT_3 52 | #endif 53 | 54 | #if HAVE___BUILTIN_ISNAN 55 | #define bn_isnan(x) __builtin_isnan(x) 56 | #elif HAVE_ISNAN 57 | #define bn_isnan(x) isnan(x) 58 | #elif HAVE__ISNAN 59 | #define bn_isnan(x) _isnan(x) 60 | #else 61 | #define bn_isnan(x) ((x) != (x)) 62 | #endif 63 | 64 | /* 65 | * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 66 | * for INFINITY). Copied from NumPy. 67 | */ 68 | static inline float __bn_inff(void) { 69 | const union { 70 | npy_uint32 __i; 71 | float __f; 72 | } __bint = {0x7f800000UL}; 73 | return __bint.__f; 74 | } 75 | 76 | static inline float __bn_nanf(void) { 77 | const union { 78 | npy_uint32 __i; 79 | float __f; 80 | } __bint = {0x7fc00000UL}; 81 | return __bint.__f; 82 | } 83 | 84 | #define BN_INFINITYF __bn_inff() 85 | #define BN_NANF __bn_nanf() 86 | #define BN_INFINITY ((npy_double)BN_INFINITYF) 87 | #define BN_NAN ((npy_double)BN_NANF) 88 | 89 | /* WIRTH ----------------------------------------------------------------- */ 90 | 91 | /* 92 | WIRTH macro based on: 93 | Fast median search: an ANSI C implementation 94 | Nicolas Devillard - ndevilla AT free DOT fr 95 | July 1998 96 | which, in turn, took the algorithm from 97 | Wirth, Niklaus 98 | Algorithms + data structures = programs, p. 366 99 | Englewood Cliffs: Prentice-Hall, 1976 100 | 101 | Adapted for Bottleneck: 102 | (C) 2016 Keith Goodman 103 | */ 104 | 105 | #define WIRTH(dtype) \ 106 | npy_##dtype x = B(dtype, k); \ 107 | npy_intp m = l; \ 108 | j = r; \ 109 | do { \ 110 | while (B(dtype, m) < x) \ 111 | m++; \ 112 | while (x < B(dtype, j)) \ 113 | j--; \ 114 | if (m <= j) { \ 115 | const npy_##dtype atmp = B(dtype, m); \ 116 | B(dtype, m) = B(dtype, j); \ 117 | B(dtype, j) = atmp; \ 118 | m++; \ 119 | j--; \ 120 | } \ 121 | } while (m <= j); \ 122 | if (j < k) l = m; \ 123 | if (k < m) r = j; 124 | 125 | /* partition ------------------------------------------------------------- */ 126 | 127 | #define PARTITION(dtype) \ 128 | while (l < r) { \ 129 | const npy_##dtype al = B(dtype, l); \ 130 | const npy_##dtype ak = B(dtype, k); \ 131 | const npy_##dtype ar = B(dtype, r); \ 132 | if (al > ak) { \ 133 | if (ak < ar) { \ 134 | if (al < ar) { \ 135 | B(dtype, k) = al; \ 136 | B(dtype, l) = ak; \ 137 | } else { \ 138 | B(dtype, k) = ar; \ 139 | B(dtype, r) = ak; \ 140 | } \ 141 | } \ 142 | } else { \ 143 | if (ak > ar) { \ 144 | if (al > ar) { \ 145 | B(dtype, k) = al; \ 146 | B(dtype, l) = ak; \ 147 | } else { \ 148 | B(dtype, k) = ar; \ 149 | B(dtype, r) = ak; \ 150 | } \ 151 | } \ 152 | } \ 153 | WIRTH(dtype) \ 154 | } 155 | 156 | /* slow ------------------------------------------------------------------ */ 157 | 158 | static PyObject *slow_module = NULL; 159 | 160 | static PyObject * 161 | slow(char *name, PyObject *args, PyObject *kwds) { 162 | PyObject *func = NULL; 163 | PyObject *out = NULL; 164 | 165 | if (slow_module == NULL) { 166 | /* bottleneck.slow has not been imported during the current 167 | * python session. Only import it once per session to save time */ 168 | slow_module = PyImport_ImportModule("bottleneck.slow"); 169 | if (slow_module == NULL) { 170 | PyErr_SetString(PyExc_RuntimeError, 171 | "Cannot import bottleneck.slow"); 172 | return NULL; 173 | } 174 | } 175 | 176 | func = PyObject_GetAttrString(slow_module, name); 177 | if (func == NULL) { 178 | PyErr_Format(PyExc_RuntimeError, 179 | "Cannot import %s from bottleneck.slow", 180 | name); 181 | return NULL; 182 | } 183 | if (PyCallable_Check(func)) { 184 | out = PyObject_Call(func, args, kwds); 185 | if (out == NULL) { 186 | Py_XDECREF(func); 187 | return NULL; 188 | } 189 | } else { 190 | Py_XDECREF(func); 191 | PyErr_Format(PyExc_RuntimeError, 192 | "bottleneck.slow.%s is not callable", 193 | name); 194 | return NULL; 195 | } 196 | Py_XDECREF(func); 197 | 198 | return out; 199 | } 200 | 201 | #endif // BOTTLENECK_H_ 202 | -------------------------------------------------------------------------------- /bottleneck/slow/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | 3 | from bottleneck.slow.reduce import * 4 | from bottleneck.slow.nonreduce import * 5 | from bottleneck.slow.nonreduce_axis import * 6 | from bottleneck.slow.move import * 7 | -------------------------------------------------------------------------------- /bottleneck/slow/nonreduce.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | __all__ = ["replace"] 4 | 5 | 6 | def replace(a, old, new): 7 | "Slow replace (inplace) used for unaccelerated dtypes." 8 | if type(a) is not np.ndarray: 9 | raise TypeError("`a` must be a numpy array.") 10 | if not issubclass(a.dtype.type, np.inexact): 11 | if old != old: 12 | # int arrays do not contain NaN 13 | return 14 | if int(old) != old: 15 | raise ValueError("Cannot safely cast `old` to int.") 16 | if int(new) != new: 17 | raise ValueError("Cannot safely cast `new` to int.") 18 | if old != old: 19 | mask = np.isnan(a) 20 | else: 21 | mask = a == old 22 | np.putmask(a, mask, new) 23 | -------------------------------------------------------------------------------- /bottleneck/slow/nonreduce_axis.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy import partition, argpartition 3 | 4 | __all__ = ["rankdata", "nanrankdata", "partition", "argpartition", "push"] 5 | 6 | 7 | def rankdata(a, axis=None): 8 | "Slow rankdata function used for unaccelerated dtypes." 9 | return _rank(scipy_rankdata, a, axis) 10 | 11 | 12 | def nanrankdata(a, axis=None): 13 | "Slow nanrankdata function used for unaccelerated dtypes." 14 | return _rank(_nanrankdata_1d, a, axis) 15 | 16 | 17 | def _rank(func1d, a, axis): 18 | a = np.asarray(a) 19 | if axis is None: 20 | a = a.ravel() 21 | axis = 0 22 | if a.size == 0: 23 | y = a.astype(np.float64, copy=True) 24 | else: 25 | y = np.apply_along_axis(func1d, axis, a) 26 | if a.dtype != np.float64: 27 | y = y.astype(np.float64) 28 | return y 29 | 30 | 31 | def _nanrankdata_1d(a): 32 | y = np.empty(a.shape, dtype=np.float64) 33 | y.fill(np.nan) 34 | idx = ~np.isnan(a) 35 | y[idx] = scipy_rankdata(a[idx]) 36 | return y 37 | 38 | 39 | def push(a, n=None, axis=-1): 40 | "Slow push used for unaccelerated dtypes." 41 | if n is None: 42 | n = np.inf 43 | y = np.array(a) 44 | ndim = y.ndim 45 | if axis != -1 or axis != ndim - 1: 46 | y = np.rollaxis(y, axis, ndim) 47 | if ndim == 1: 48 | y = y[None, :] 49 | elif ndim == 0: 50 | return y 51 | fidx = ~np.isnan(y) 52 | recent = np.empty(y.shape[:-1]) 53 | count = np.empty(y.shape[:-1]) 54 | recent.fill(np.nan) 55 | count.fill(np.nan) 56 | with np.errstate(invalid="ignore"): 57 | for i in range(y.shape[-1]): 58 | idx = (i - count) > n 59 | recent[idx] = np.nan 60 | idx = ~fidx[..., i] 61 | y[idx, i] = recent[idx] 62 | idx = fidx[..., i] 63 | count[idx] = i 64 | recent[idx] = y[idx, i] 65 | if axis != -1 or axis != ndim - 1: 66 | y = np.rollaxis(y, ndim - 1, axis) 67 | if ndim == 1: 68 | return y[0] 69 | return y 70 | 71 | 72 | # --------------------------------------------------------------------------- 73 | # 74 | # SciPy 75 | # 76 | # Local copy of SciPy's rankdata to avoid a SciPy dependency. The SciPy 77 | # license is included in the Bottleneck license file, which is distributed 78 | # with Bottleneck. 79 | # 80 | # Code taken from scipy master branch on Aug 31, 2016. 81 | 82 | 83 | def scipy_rankdata(a, method="average"): 84 | """ 85 | rankdata(a, method='average') 86 | Assign ranks to data, dealing with ties appropriately. 87 | Ranks begin at 1. The `method` argument controls how ranks are assigned 88 | to equal values. See [1]_ for further discussion of ranking methods. 89 | Parameters 90 | ---------- 91 | a : array_like 92 | The array of values to be ranked. The array is first flattened. 93 | method : str, optional 94 | The method used to assign ranks to tied elements. 95 | The options are 'average', 'min', 'max', 'dense' and 'ordinal'. 96 | 'average': 97 | The average of the ranks that would have been assigned to 98 | all the tied values is assigned to each value. 99 | 'min': 100 | The minimum of the ranks that would have been assigned to all 101 | the tied values is assigned to each value. (This is also 102 | referred to as "competition" ranking.) 103 | 'max': 104 | The maximum of the ranks that would have been assigned to all 105 | the tied values is assigned to each value. 106 | 'dense': 107 | Like 'min', but the rank of the next highest element is assigned 108 | the rank immediately after those assigned to the tied elements. 109 | 'ordinal': 110 | All values are given a distinct rank, corresponding to the order 111 | that the values occur in `a`. 112 | The default is 'average'. 113 | Returns 114 | ------- 115 | ranks : ndarray 116 | An array of length equal to the size of `a`, containing rank 117 | scores. 118 | References 119 | ---------- 120 | .. [1] "Ranking", http://en.wikipedia.org/wiki/Ranking 121 | Examples 122 | -------- 123 | >>> from scipy.stats import rankdata 124 | >>> rankdata([0, 2, 3, 2]) 125 | array([ 1. , 2.5, 4. , 2.5]) 126 | >>> rankdata([0, 2, 3, 2], method='min') 127 | array([ 1, 2, 4, 2]) 128 | >>> rankdata([0, 2, 3, 2], method='max') 129 | array([ 1, 3, 4, 3]) 130 | >>> rankdata([0, 2, 3, 2], method='dense') 131 | array([ 1, 2, 3, 2]) 132 | >>> rankdata([0, 2, 3, 2], method='ordinal') 133 | array([ 1, 2, 4, 3]) 134 | """ 135 | if method not in ("average", "min", "max", "dense", "ordinal"): 136 | raise ValueError('unknown method "{0}"'.format(method)) 137 | 138 | a = np.ravel(np.asarray(a)) 139 | algo = "mergesort" if method == "ordinal" else "quicksort" 140 | sorter = np.argsort(a, kind=algo) 141 | 142 | inv = np.empty(sorter.size, dtype=np.intp) 143 | inv[sorter] = np.arange(sorter.size, dtype=np.intp) 144 | 145 | if method == "ordinal": 146 | return inv + 1 147 | 148 | a = a[sorter] 149 | obs = np.r_[True, a[1:] != a[:-1]] 150 | dense = obs.cumsum()[inv] 151 | 152 | if method == "dense": 153 | return dense 154 | 155 | # cumulative counts of each unique value 156 | count = np.r_[np.nonzero(obs)[0], len(obs)] 157 | 158 | if method == "max": 159 | return count[dense] 160 | 161 | if method == "min": 162 | return count[dense - 1] + 1 163 | 164 | # average method 165 | return 0.5 * (count[dense] + count[dense - 1] + 1) 166 | -------------------------------------------------------------------------------- /bottleneck/slow/reduce.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import numpy as np 3 | from numpy import nanmean, nansum 4 | 5 | __all__ = [ 6 | "median", 7 | "nanmedian", 8 | "nansum", 9 | "nanmean", 10 | "nanvar", 11 | "nanstd", 12 | "nanmin", 13 | "nanmax", 14 | "nanargmin", 15 | "nanargmax", 16 | "ss", 17 | "anynan", 18 | "allnan", 19 | ] 20 | 21 | 22 | def nanargmin(a, axis=None): 23 | "Slow nanargmin function used for unaccelerated dtypes." 24 | with warnings.catch_warnings(): 25 | warnings.simplefilter("ignore") 26 | return np.nanargmin(a, axis=axis) 27 | 28 | 29 | def nanargmax(a, axis=None): 30 | "Slow nanargmax function used for unaccelerated dtypes." 31 | with warnings.catch_warnings(): 32 | warnings.simplefilter("ignore") 33 | return np.nanargmax(a, axis=axis) 34 | 35 | 36 | def nanvar(a, axis=None, ddof=0): 37 | "Slow nanvar function used for unaccelerated dtypes." 38 | with warnings.catch_warnings(): 39 | warnings.simplefilter("ignore") 40 | return np.nanvar(a, axis=axis, ddof=ddof) 41 | 42 | 43 | def nanstd(a, axis=None, ddof=0): 44 | "Slow nanstd function used for unaccelerated dtypes." 45 | with warnings.catch_warnings(): 46 | warnings.simplefilter("ignore") 47 | return np.nanstd(a, axis=axis, ddof=ddof) 48 | 49 | 50 | def nanmin(a, axis=None): 51 | "Slow nanmin function used for unaccelerated dtypes." 52 | with warnings.catch_warnings(): 53 | warnings.simplefilter("ignore") 54 | return np.nanmin(a, axis=axis) 55 | 56 | 57 | def nanmax(a, axis=None): 58 | "Slow nanmax function used for unaccelerated dtypes." 59 | with warnings.catch_warnings(): 60 | warnings.simplefilter("ignore") 61 | return np.nanmax(a, axis=axis) 62 | 63 | 64 | def median(a, axis=None): 65 | "Slow median function used for unaccelerated dtypes." 66 | with warnings.catch_warnings(): 67 | warnings.simplefilter("ignore") 68 | return np.median(a, axis=axis) 69 | 70 | 71 | def nanmedian(a, axis=None): 72 | "Slow nanmedian function used for unaccelerated dtypes." 73 | with warnings.catch_warnings(): 74 | warnings.simplefilter("ignore") 75 | return np.nanmedian(a, axis=axis) 76 | 77 | 78 | def ss(a, axis=None): 79 | "Slow sum of squares used for unaccelerated dtypes." 80 | a = np.asarray(a) 81 | y = np.multiply(a, a).sum(axis) 82 | return y 83 | 84 | 85 | def anynan(a, axis=None): 86 | "Slow check for Nans used for unaccelerated dtypes." 87 | return np.isnan(a).any(axis) 88 | 89 | 90 | def allnan(a, axis=None): 91 | "Slow check for all Nans used for unaccelerated dtypes." 92 | return np.isnan(a).all(axis) 93 | -------------------------------------------------------------------------------- /bottleneck/src/.gitignore: -------------------------------------------------------------------------------- 1 | /reduce.c 2 | /move.c 3 | /nonreduce.c 4 | /nonreduce_axis.c 5 | -------------------------------------------------------------------------------- /bottleneck/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pydata/bottleneck/7a0110dff7e14012fb2924ba50668b2f1f83008c/bottleneck/src/__init__.py -------------------------------------------------------------------------------- /bottleneck/src/bn_config.py: -------------------------------------------------------------------------------- 1 | """ Based on numpy's approach to exposing compiler features via a config header. 2 | Unfortunately that file is not exposed, so re-implement the portions we need. 3 | """ 4 | import os 5 | import textwrap 6 | 7 | OPTIONAL_FUNCTION_ATTRIBUTES = [ 8 | ("HAVE_ATTRIBUTE_OPTIMIZE_OPT_3", '__attribute__((optimize("O3")))') 9 | ] 10 | 11 | 12 | def _get_compiler_list(cmd): 13 | """ Return the compiler command as a list of strings. Distutils provides a 14 | wildly inconsistent API here: 15 | - UnixCCompiler returns a list 16 | - MSVCCompiler intentionally doesn't set this variable 17 | - CygwinCompiler returns a string 18 | 19 | As we are focused on identifying gcc vs clang right now, we ignore MSVC's 20 | bad result and convert all results into lists of strings 21 | """ 22 | compiler = getattr(cmd.compiler, "compiler", "") 23 | if isinstance(compiler, str): 24 | compiler = compiler.split() 25 | return compiler 26 | 27 | 28 | def is_gcc(cmd): 29 | return any("gcc" in x for x in _get_compiler_list(cmd)) 30 | 31 | 32 | def is_clang(cmd): 33 | return any("clang" in x for x in _get_compiler_list(cmd)) 34 | 35 | 36 | def check_inline(cmd): 37 | """Return the inline identifier (may be empty).""" 38 | cmd._check_compiler() 39 | body = textwrap.dedent( 40 | """ 41 | #ifndef __cplusplus 42 | static %(inline)s int static_func (void) 43 | { 44 | return 0; 45 | } 46 | %(inline)s int nostatic_func (void) 47 | { 48 | return 0; 49 | } 50 | #endif 51 | int main(void) { 52 | int r1 = static_func(); 53 | int r2 = nostatic_func(); 54 | return r1 + r2; 55 | } 56 | """ 57 | ) 58 | 59 | for kw in ["inline", "__inline__", "__inline"]: 60 | st = cmd.try_compile(body % {"inline": kw}, None, None) 61 | if st: 62 | return kw 63 | 64 | return "" 65 | 66 | 67 | def check_gcc_function_attribute(cmd, attribute, name): 68 | """Return True if the given function attribute is supported.""" 69 | cmd._check_compiler() 70 | if is_gcc(cmd): 71 | pragma = '#pragma GCC diagnostic error "-Wattributes"' 72 | elif is_clang(cmd): 73 | pragma = '#pragma clang diagnostic error "-Wattributes"' 74 | else: 75 | pragma = "" 76 | 77 | body = ( 78 | textwrap.dedent( 79 | """ 80 | %s 81 | 82 | int %s %s(void*); 83 | 84 | int main(void) 85 | { 86 | return 0; 87 | } 88 | """ 89 | ) 90 | % (pragma, attribute, name) 91 | ) 92 | return cmd.try_compile(body, None, None) != 0 93 | 94 | 95 | def create_config_h(config): 96 | dirname = os.path.dirname(__file__) 97 | config_h = os.path.join(dirname, "bn_config.h") 98 | 99 | if ( 100 | os.path.exists(config_h) 101 | and os.stat(__file__).st_mtime < os.stat(config_h).st_mtime 102 | ): 103 | return 104 | 105 | output = [] 106 | 107 | for config_attr, func_attr in OPTIONAL_FUNCTION_ATTRIBUTES: 108 | if check_gcc_function_attribute(config, func_attr, config_attr.lower()): 109 | output.append((config_attr, "1")) 110 | else: 111 | output.append((config_attr, "0")) 112 | 113 | inline_alias = check_inline(config) 114 | 115 | with open(config_h, "w") as f: 116 | for setting in output: 117 | f.write("#define {} {}\n".format(*setting)) 118 | 119 | if inline_alias == "inline": 120 | f.write("/* undef inline */\n") 121 | else: 122 | f.write("#define inline {}\n".format(inline_alias)) 123 | -------------------------------------------------------------------------------- /bottleneck/src/bn_template.py: -------------------------------------------------------------------------------- 1 | import ast 2 | import os 3 | import posixpath as path 4 | import re 5 | from typing import Dict, List, Optional, Pattern, Tuple 6 | 7 | 8 | def make_c_files( 9 | dirpath: Optional[str] = None, modules: Optional[List[str]] = None 10 | ) -> None: 11 | if modules is None: 12 | modules = ["reduce", "move", "nonreduce", "nonreduce_axis"] 13 | if dirpath is None: 14 | dirpath = os.path.dirname(__file__) 15 | for module in modules: 16 | template_file = os.path.join(dirpath, module + "_template.c") 17 | posix_template = path.relpath(path.join(dirpath, module + "_template.c")) 18 | target_file = os.path.join(dirpath, module + ".c") 19 | 20 | if ( 21 | os.path.exists(target_file) 22 | and os.stat(template_file).st_mtime < os.stat(target_file).st_mtime 23 | ): 24 | continue 25 | 26 | with open(template_file, "r") as f: 27 | src_str = f.read() 28 | src_str = '#line 1 "{}"\n'.format(posix_template) + template(src_str) 29 | if len(src_str) and src_str[-1] != "\n": 30 | src_str += "\n" 31 | with open(target_file, "w") as f: 32 | f.write(src_str) 33 | 34 | 35 | def template(src_str: str) -> str: 36 | src_list = src_str.splitlines() 37 | line_numbers = [] 38 | last_empty_ind = 0 39 | for i, l in enumerate(src_list): 40 | if l.strip().endswith("{") and not l.startswith(" "): 41 | line_numbers.append(last_empty_ind) 42 | 43 | if len(l.strip()) == 0 or "*/" in l: 44 | last_empty_ind = i + 1 45 | 46 | distinct_line_numbers = set(line_numbers) 47 | new_src_list = [] 48 | for i, l in enumerate(src_list): 49 | if i in distinct_line_numbers: 50 | new_src_list.append("#line {}".format(i + 1)) 51 | new_src_list.append(l) 52 | 53 | src_list = repeat_templating(new_src_list) 54 | src_list = dtype_templating(src_list) 55 | src_list = string_templating(src_list) 56 | src_str = "\n".join(src_list) 57 | src_str = re.sub(r"\n\s*\n\s*\n", r"\n\n", src_str) 58 | return src_str 59 | 60 | 61 | # repeat -------------------------------------------------------------------- 62 | 63 | REPEAT_BEGIN = re.compile(r"^/\*\s*repeat\s*=\s*") 64 | REPEAT_END = re.compile(r"^/\*\s*repeat end") 65 | COMMENT_END = re.compile(r".*\*\/.*") 66 | 67 | 68 | def repeat_templating(lines: List[str]) -> List[str]: 69 | index = 0 70 | while True: 71 | idx0, idx1 = next_block(lines, index, REPEAT_BEGIN, REPEAT_END) 72 | if idx0 is None or idx1 is None: 73 | break 74 | func_list = lines[idx0:idx1] 75 | func_list = expand_functions_repeat(func_list) 76 | # the +1 below is to skip the /* repeat end */ line 77 | lines = lines[:idx0] + func_list + lines[idx1 + 1 :] 78 | index = idx0 79 | return lines 80 | 81 | 82 | def expand_functions_repeat(lines: List[str]) -> List[str]: 83 | idx = first_occurence(COMMENT_END, lines) 84 | repeat_dict = repeat_info(lines[: idx + 1]) 85 | lines = lines[idx + 1 :] 86 | func_str = "\n".join(lines) 87 | func_list = expand_repeat(func_str, repeat_dict) 88 | return func_list 89 | 90 | 91 | def repeat_info(lines: List[str]) -> Dict[str, str]: 92 | line = "".join(lines) 93 | repeat = re.findall(r"\{.*\}", line) 94 | repeat_dict: Dict[str, str] = ast.literal_eval(repeat[0]) 95 | return repeat_dict 96 | 97 | 98 | def expand_repeat(func_str: str, repeat_dict: Dict[str, str]) -> List[str]: 99 | nrepeats = [len(repeat_dict[key]) for key in repeat_dict] 100 | if len(set(nrepeats)) != 1: 101 | raise ValueError("All repeat lists must be the same length") 102 | nrepeat = nrepeats[0] 103 | func_list = [] 104 | for i in range(nrepeat): 105 | f = func_str[:] 106 | for key in repeat_dict: 107 | f = f.replace(key, repeat_dict[key][i]) 108 | func_list.append("\n" + f) 109 | func_list = ("".join(func_list)).splitlines() 110 | return func_list 111 | 112 | 113 | # dtype --------------------------------------------------------------------- 114 | 115 | DTYPE_BEGIN = re.compile(r"^/\*\s*dtype\s*=\s*") 116 | DTYPE_END = re.compile(r"^/\*\s*dtype end") 117 | 118 | 119 | def dtype_templating(lines: List[str]) -> List[str]: 120 | index = 0 121 | while True: 122 | idx0, idx1 = next_block(lines, index, DTYPE_BEGIN, DTYPE_END) 123 | if idx0 is None or idx1 is None: 124 | break 125 | func_list = lines[idx0:idx1] 126 | func_list = expand_functions_dtype(func_list) 127 | # the +1 below is to skip the /* dtype end */ line 128 | lines = lines[:idx0] + func_list + lines[idx1 + 1 :] 129 | index = idx0 130 | return lines 131 | 132 | 133 | def expand_functions_dtype(lines: List[str]) -> List[str]: 134 | idx = first_occurence(COMMENT_END, lines) 135 | dtypes = dtype_info(lines[: idx + 1]) 136 | lines = lines[idx + 1 :] 137 | func_str = "\n".join(lines) 138 | func_list = expand_dtypes(func_str, dtypes) 139 | return func_list 140 | 141 | 142 | def dtype_info(lines: List[str]) -> List[str]: 143 | line = "".join(lines) 144 | dtypes = re.findall(r"\[.*\]", line) 145 | if len(dtypes) != 1: 146 | raise ValueError("expecting exactly one dtype specification") 147 | dtypes = ast.literal_eval(dtypes[0]) 148 | return dtypes 149 | 150 | 151 | def expand_dtypes(func_str: str, dtypes: List[str]) -> List[str]: 152 | if "DTYPE" not in func_str: 153 | raise ValueError("cannot find dtype marker") 154 | func_list = [] 155 | for dtype in dtypes: 156 | f = func_str[:] 157 | for i, dt in enumerate(dtype): 158 | f = f.replace("DTYPE%d" % i, dt) 159 | if i > 0: 160 | f = f + "\n" 161 | func_list.append("\n\n" + f) 162 | return func_list 163 | 164 | 165 | # multiline strings --------------------------------------------------------- 166 | 167 | STRING_BEGIN = re.compile(r".*MULTILINE STRING BEGIN.*") 168 | STRING_END = re.compile(r".*MULTILINE STRING END.*") 169 | 170 | 171 | def string_templating(lines: List[str]) -> List[str]: 172 | index = 0 173 | while True: 174 | idx0, idx1 = next_block(lines, index, STRING_BEGIN, STRING_END) 175 | if idx0 is None or idx1 is None: 176 | break 177 | str_list = lines[idx0 + 1 : idx1] 178 | str_list = quote_string(str_list) 179 | lines = lines[:idx0] + str_list + lines[idx1 + 1 :] 180 | index = idx0 181 | return lines 182 | 183 | 184 | def quote_string(lines: List[str]) -> List[str]: 185 | for i in range(len(lines)): 186 | lines[i] = '"' + lines[i] + r"\n" + '"' 187 | lines[-1] = lines[-1] + ";" 188 | return lines 189 | 190 | 191 | # utility ------------------------------------------------------------------- 192 | 193 | 194 | def first_occurence(pattern: Pattern[str], lines: List[str]) -> int: 195 | for i in range(len(lines)): 196 | if re.match(pattern, lines[i]): 197 | return i 198 | raise ValueError("`pattern` not found") 199 | 200 | 201 | def next_block( 202 | lines: List[str], index: int, begin_pattern: Pattern[str], end_pattern: Pattern[str] 203 | ) -> Tuple[Optional[int], Optional[int]]: 204 | idx = None 205 | for i in range(index, len(lines)): 206 | line = lines[i] 207 | if re.match(begin_pattern, line): 208 | idx = i 209 | elif re.match(end_pattern, line): 210 | if idx is None: 211 | raise ValueError("found end of function before beginning") 212 | return idx, i 213 | return None, None 214 | -------------------------------------------------------------------------------- /bottleneck/src/bottleneck.h: -------------------------------------------------------------------------------- 1 | // Copyright 2010-2019 Keith Goodman 2 | // Copyright 2019 Bottleneck Developers 3 | #ifndef BOTTLENECK_H_ 4 | #define BOTTLENECK_H_ 5 | 6 | #include 7 | #define NPY_NO_DEPRECATED_API NPY_1_11_API_VERSION 8 | #include 9 | #include 10 | 11 | /* THREADS=1 releases the GIL but increases function call 12 | * overhead. THREADS=0 does not release the GIL but keeps 13 | * function call overhead low. Curly brackets are for C89 14 | * support. */ 15 | #define THREADS 1 16 | #if THREADS 17 | #define BN_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS { 18 | #define BN_END_ALLOW_THREADS ;} Py_END_ALLOW_THREADS 19 | #else 20 | #define BN_BEGIN_ALLOW_THREADS { 21 | #define BN_END_ALLOW_THREADS } 22 | #endif 23 | 24 | /* for ease of dtype templating */ 25 | #define NPY_float64 NPY_FLOAT64 26 | #define NPY_float32 NPY_FLOAT32 27 | #define NPY_int64 NPY_INT64 28 | #define NPY_int32 NPY_INT32 29 | #define NPY_intp NPY_INTP 30 | #define NPY_MAX_int64 NPY_MAX_INT64 31 | #define NPY_MAX_int32 NPY_MAX_INT32 32 | #define NPY_MIN_int64 NPY_MIN_INT64 33 | #define NPY_MIN_int32 NPY_MIN_INT32 34 | 35 | #define VARKEY METH_VARARGS | METH_KEYWORDS 36 | #define error_converting(x) (((x) == -1) && PyErr_Occurred()) 37 | 38 | #define VALUE_ERR(text) PyErr_SetString(PyExc_ValueError, text) 39 | #define TYPE_ERR(text) PyErr_SetString(PyExc_TypeError, text) 40 | #define MEMORY_ERR(text) PyErr_SetString(PyExc_MemoryError, text) 41 | #define RUNTIME_ERR(text) PyErr_SetString(PyExc_RuntimeError, text) 42 | 43 | /* `inline` and `opt_3` copied from NumPy. */ 44 | #if HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 45 | #define BN_OPT_3 __attribute__((optimize("O3"))) 46 | #else 47 | #define BN_OPT_3 48 | #endif 49 | 50 | /* 51 | * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 52 | * for INFINITY). Copied from NumPy. 53 | */ 54 | static inline float __bn_inff(void) 55 | { 56 | const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; 57 | return __bint.__f; 58 | } 59 | 60 | static inline float __bn_nanf(void) 61 | { 62 | const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; 63 | return __bint.__f; 64 | } 65 | 66 | #define BN_INFINITYF __bn_inff() 67 | #define BN_NANF __bn_nanf() 68 | #define BN_INFINITY ((npy_double)BN_INFINITYF) 69 | #define BN_NAN ((npy_double)BN_NANF) 70 | 71 | #define C_CONTIGUOUS(a) PyArray_CHKFLAGS(a, NPY_ARRAY_C_CONTIGUOUS) 72 | #define F_CONTIGUOUS(a) PyArray_CHKFLAGS(a, NPY_ARRAY_F_CONTIGUOUS) 73 | #define IS_CONTIGUOUS(a) (C_CONTIGUOUS(a) || F_CONTIGUOUS(a)) 74 | 75 | /* WIRTH ----------------------------------------------------------------- */ 76 | 77 | /* 78 | WIRTH macro based on: 79 | Fast median search: an ANSI C implementation 80 | Nicolas Devillard - ndevilla AT free DOT fr 81 | July 1998 82 | which, in turn, took the algorithm from 83 | Wirth, Niklaus 84 | Algorithms + data structures = programs, p. 366 85 | Englewood Cliffs: Prentice-Hall, 1976 86 | 87 | Adapted for Bottleneck: 88 | (C) 2016 Keith Goodman 89 | */ 90 | 91 | #define WIRTH(dtype) \ 92 | x = B(dtype, k); \ 93 | i = l; \ 94 | j = r; \ 95 | do { \ 96 | while (B(dtype, i) < x) i++; \ 97 | while (x < B(dtype, j)) j--; \ 98 | if (i <= j) { \ 99 | npy_##dtype atmp = B(dtype, i); \ 100 | B(dtype, i) = B(dtype, j); \ 101 | B(dtype, j) = atmp; \ 102 | i++; \ 103 | j--; \ 104 | } \ 105 | } while (i <= j); \ 106 | if (j < k) l = i; \ 107 | if (k < i) r = j; 108 | 109 | /* partition ------------------------------------------------------------- */ 110 | 111 | #define PARTITION(dtype) \ 112 | while (l < r) { \ 113 | npy_##dtype x; \ 114 | npy_##dtype al = B(dtype, l); \ 115 | npy_##dtype ak = B(dtype, k); \ 116 | npy_##dtype ar = B(dtype, r); \ 117 | if (al > ak) { \ 118 | if (ak < ar) { \ 119 | if (al < ar) { \ 120 | B(dtype, k) = al; \ 121 | B(dtype, l) = ak; \ 122 | } else { \ 123 | B(dtype, k) = ar; \ 124 | B(dtype, r) = ak; \ 125 | } \ 126 | } \ 127 | } else { \ 128 | if (ak > ar) { \ 129 | if (al > ar) { \ 130 | B(dtype, k) = al; \ 131 | B(dtype, l) = ak; \ 132 | } else { \ 133 | B(dtype, k) = ar; \ 134 | B(dtype, r) = ak; \ 135 | } \ 136 | } \ 137 | } \ 138 | WIRTH(dtype) \ 139 | } 140 | 141 | /* slow ------------------------------------------------------------------ */ 142 | 143 | static PyObject *slow_module = NULL; 144 | 145 | static PyObject * 146 | slow(char *name, PyObject *args, PyObject *kwds) 147 | { 148 | PyObject *func = NULL; 149 | PyObject *out = NULL; 150 | 151 | if (slow_module == NULL) { 152 | /* bottleneck.slow has not been imported during the current 153 | * python session. Only import it once per session to save time */ 154 | slow_module = PyImport_ImportModule("bottleneck.slow"); 155 | if (slow_module == NULL) { 156 | PyErr_SetString(PyExc_RuntimeError, 157 | "Cannot import bottleneck.slow"); 158 | return NULL; 159 | } 160 | } 161 | 162 | func = PyObject_GetAttrString(slow_module, name); 163 | if (func == NULL) { 164 | PyErr_Format(PyExc_RuntimeError, 165 | "Cannot import %s from bottleneck.slow", name); 166 | return NULL; 167 | } 168 | if (PyCallable_Check(func)) { 169 | out = PyObject_Call(func, args, kwds); 170 | if (out == NULL) { 171 | Py_XDECREF(func); 172 | return NULL; 173 | } 174 | } else { 175 | Py_XDECREF(func); 176 | PyErr_Format(PyExc_RuntimeError, 177 | "bottleneck.slow.%s is not callable", name); 178 | return NULL; 179 | } 180 | Py_XDECREF(func); 181 | 182 | return out; 183 | } 184 | 185 | #endif // BOTTLENECK_H_ 186 | -------------------------------------------------------------------------------- /bottleneck/src/move_median/.gitignore: -------------------------------------------------------------------------------- 1 | *.out 2 | -------------------------------------------------------------------------------- /bottleneck/src/move_median/makefile: -------------------------------------------------------------------------------- 1 | 2 | all: clean 3 | @gcc move_median.c move_median_debug.c -DBINARY_TREE=1 -lm -Wall -Wextra 4 | @./a.out 5 | 6 | gdb: clean 7 | @gcc move_median.c move_median_debug.c -DBINARY_TREE=1 -lm -g -Wall -Wextra 8 | @gdb ./a.out 9 | 10 | valgrind: clean 11 | @gcc move_median.c move_median_debug.c -DBINARY_TREE=1 -lm -g -Wall -Wextra 12 | @valgrind --tool=memcheck --leak-check=yes --show-reachable=yes \ 13 | --num-callers=20 ./a.out 14 | 15 | clean: 16 | @rm -rf a.out 17 | -------------------------------------------------------------------------------- /bottleneck/src/move_median/move_median.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | typedef size_t idx_t; 10 | typedef double ai_t; 11 | 12 | #if BINARY_TREE==1 13 | #define NUM_CHILDREN 2 14 | #else 15 | /* maximum of 8 due to the manual loop-unrolling used in the code */ 16 | #define NUM_CHILDREN 8 17 | #endif 18 | 19 | /* Find indices of parent and first child */ 20 | #define P_IDX(i) ((i) - 1) / NUM_CHILDREN 21 | #define FC_IDX(i) NUM_CHILDREN * (i) + 1 22 | 23 | /* are we in the small heap (SM), large heap (LH) or NaN array (NA)? */ 24 | #define SH 0 25 | #define LH 1 26 | #define NA 2 27 | 28 | #define FIRST_LEAF(n) (idx_t) ceil((n - 1) / (double)NUM_CHILDREN) 29 | 30 | struct _mm_node { 31 | int region; /* SH small heap, LH large heap, NA nan array */ 32 | ai_t ai; /* The node's value */ 33 | idx_t idx; /* The node's index in the heap or nan array */ 34 | struct _mm_node *next; /* The next node in order of insertion */ 35 | }; 36 | typedef struct _mm_node mm_node; 37 | 38 | struct _mm_handle { 39 | idx_t window; /* window size */ 40 | int odd; /* is window even (0) or odd (1) */ 41 | idx_t min_count; /* Same meaning as in bn.move_median */ 42 | idx_t n_s; /* Number of nodes in the small heap */ 43 | idx_t n_l; /* Number of nodes in the large heap */ 44 | idx_t n_n; /* Number of nodes in the nan array */ 45 | mm_node **s_heap; /* The max heap of small ai */ 46 | mm_node **l_heap; /* The min heap of large ai */ 47 | mm_node **n_array; /* The nan array */ 48 | mm_node **nodes; /* s_heap and l_heap point into this array */ 49 | mm_node *node_data; /* Pointer to memory location where nodes live */ 50 | mm_node *oldest; /* The oldest node */ 51 | mm_node *newest; /* The newest node (most recent insert) */ 52 | idx_t s_first_leaf; /* All nodes this index or greater are leaf nodes */ 53 | idx_t l_first_leaf; /* All nodes this index or greater are leaf nodes */ 54 | }; 55 | typedef struct _mm_handle mm_handle; 56 | 57 | /* non-nan functions */ 58 | mm_handle *mm_new(const idx_t window, idx_t min_count); 59 | ai_t mm_update_init(mm_handle *mm, ai_t ai); 60 | ai_t mm_update(mm_handle *mm, ai_t ai); 61 | 62 | /* nan functions */ 63 | mm_handle *mm_new_nan(const idx_t window, idx_t min_count); 64 | ai_t mm_update_init_nan(mm_handle *mm, ai_t ai); 65 | ai_t mm_update_nan(mm_handle *mm, ai_t ai); 66 | 67 | /* functions common to non-nan and nan cases */ 68 | void mm_reset(mm_handle *mm); 69 | void mm_free(mm_handle *mm); 70 | 71 | /* Copied from Cython ---------------------------------------------------- */ 72 | 73 | /* NaN */ 74 | #ifdef NAN 75 | #define MM_NAN() ((float) NAN) 76 | #else 77 | static inline float MM_NAN(void) { 78 | float value; 79 | memset(&value, 0xFF, sizeof(value)); 80 | return value; 81 | } 82 | #endif 83 | -------------------------------------------------------------------------------- /bottleneck/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pydata/bottleneck/7a0110dff7e14012fb2924ba50668b2f1f83008c/bottleneck/tests/__init__.py -------------------------------------------------------------------------------- /bottleneck/tests/common.py: -------------------------------------------------------------------------------- 1 | from hypothesis.extra.numpy import array_shapes 2 | from hypothesis.extra.numpy import arrays as hy_arrays 3 | from hypothesis.extra.numpy import floating_dtypes, integer_dtypes 4 | from hypothesis.strategies import one_of 5 | 6 | hy_array_gen = hy_arrays( 7 | dtype=one_of(integer_dtypes(sizes=(32, 64)), floating_dtypes(sizes=(32, 64))), 8 | shape=array_shapes(), 9 | ) 10 | 11 | hy_int_array_gen = hy_arrays( 12 | dtype=integer_dtypes(sizes=(32, 64)), 13 | shape=array_shapes(), 14 | ) 15 | -------------------------------------------------------------------------------- /bottleneck/tests/data/template_test/test_template.c: -------------------------------------------------------------------------------- 1 | // Copyright 2010-2019 Keith Goodman 2 | // Copyright 2019 Bottleneck Developers 3 | #include "bottleneck.h" 4 | #include "iterators.h" 5 | 6 | 7 | /* nanmin, nanmax -------------------------------------------------------- */ 8 | 9 | /* repeat = {'NAME': ['nanmin', 'nanmax'], 10 | 'COMPARE': ['<=', '>='], 11 | 'BIG_FLOAT': ['BN_INFINITY', '-BN_INFINITY'], 12 | 'BIG_INT': ['NPY_MAX_DTYPE0', 'NPY_MIN_DTYPE0']} */ 13 | /* dtype = [['float64'], ['float32']] */ 14 | FOO(NAME, DTYPE0) { 15 | npy_DTYPE0 bar = BIG_FLOAT; 16 | if (bar COMPARE 0) { 17 | bar = 0; 18 | } 19 | return PyFloat_FromDouble(bar); 20 | } 21 | /* dtype end */ 22 | 23 | /* dtype = [['int64'], ['int32']] */ 24 | FOO(NAME, DTYPE0) { 25 | npy_DTYPE0 bar = BIG_FLOAT; 26 | if (bar COMPARE 0) { 27 | bar = 0; 28 | } 29 | return PyFloat_FromDouble(bar); 30 | } 31 | /* dtype end */ 32 | 33 | REDUCE_MAIN(NAME, 0) 34 | /* repeat end */ 35 | -------------------------------------------------------------------------------- /bottleneck/tests/data/template_test/truth.c: -------------------------------------------------------------------------------- 1 | #line 1 "{DIRPATH}/test_template.c" 2 | // Copyright 2010-2019 Keith Goodman 3 | // Copyright 2019 Bottleneck Developers 4 | #include "bottleneck.h" 5 | #include "iterators.h" 6 | 7 | /* nanmin, nanmax -------------------------------------------------------- */ 8 | 9 | #line 14 10 | FOO(nanmin, float64) { 11 | npy_float64 bar = BN_INFINITY; 12 | if (bar <= 0) { 13 | bar = 0; 14 | } 15 | return PyFloat_FromDouble(bar); 16 | } 17 | 18 | #line 14 19 | FOO(nanmin, float32) { 20 | npy_float32 bar = BN_INFINITY; 21 | if (bar <= 0) { 22 | bar = 0; 23 | } 24 | return PyFloat_FromDouble(bar); 25 | } 26 | 27 | #line 24 28 | FOO(nanmin, int64) { 29 | npy_int64 bar = BN_INFINITY; 30 | if (bar <= 0) { 31 | bar = 0; 32 | } 33 | return PyFloat_FromDouble(bar); 34 | } 35 | 36 | #line 24 37 | FOO(nanmin, int32) { 38 | npy_int32 bar = BN_INFINITY; 39 | if (bar <= 0) { 40 | bar = 0; 41 | } 42 | return PyFloat_FromDouble(bar); 43 | } 44 | 45 | REDUCE_MAIN(nanmin, 0) 46 | 47 | #line 14 48 | FOO(nanmax, float64) { 49 | npy_float64 bar = -BN_INFINITY; 50 | if (bar >= 0) { 51 | bar = 0; 52 | } 53 | return PyFloat_FromDouble(bar); 54 | } 55 | 56 | #line 14 57 | FOO(nanmax, float32) { 58 | npy_float32 bar = -BN_INFINITY; 59 | if (bar >= 0) { 60 | bar = 0; 61 | } 62 | return PyFloat_FromDouble(bar); 63 | } 64 | 65 | #line 24 66 | FOO(nanmax, int64) { 67 | npy_int64 bar = -BN_INFINITY; 68 | if (bar >= 0) { 69 | bar = 0; 70 | } 71 | return PyFloat_FromDouble(bar); 72 | } 73 | 74 | #line 24 75 | FOO(nanmax, int32) { 76 | npy_int32 bar = -BN_INFINITY; 77 | if (bar >= 0) { 78 | bar = 0; 79 | } 80 | return PyFloat_FromDouble(bar); 81 | } 82 | 83 | REDUCE_MAIN(nanmax, 0) 84 | -------------------------------------------------------------------------------- /bottleneck/tests/docker/centos_7_min_deps/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | RUN yum update -y 3 | RUN yum install -y gcc python3-devel python3-pip 4 | RUN pip3 install --upgrade pip 5 | WORKDIR /tmp 6 | CMD ["pip3", "install", "/bottleneck_src"] -------------------------------------------------------------------------------- /bottleneck/tests/docker/centos_8_min_deps/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:8 2 | RUN yum update -y 3 | RUN yum install -y gcc python3-devel python3-pip 4 | RUN pip3 install --upgrade pip 5 | WORKDIR /tmp 6 | CMD ["pip3", "install", "/bottleneck_src"] -------------------------------------------------------------------------------- /bottleneck/tests/docker/release_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | rm -rf temp_clone 5 | git clone ../../.. temp_clone 6 | 7 | cases=(centos_7_min_deps centos_8_min_deps ubuntu_lts_min_deps ubuntu_devel_min_deps) 8 | for case in ${cases[@]}; do 9 | echo $case 10 | docker build -t $case $case 11 | docker run --mount type=bind,source=$(pwd)/temp_clone,destination=/bottleneck_src,readonly $case 12 | done 13 | 14 | rm -rf temp_clone -------------------------------------------------------------------------------- /bottleneck/tests/docker/ubuntu_devel_min_deps/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:devel 2 | RUN apt-get update 3 | RUN apt-get install -y gcc python3-dev python3-pip 4 | RUN pip3 install --upgrade pip 5 | WORKDIR /tmp 6 | CMD ["pip3", "install", "/bottleneck_src"] -------------------------------------------------------------------------------- /bottleneck/tests/docker/ubuntu_lts_min_deps/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | RUN apt-get update 3 | RUN apt-get install -y gcc python3-dev python3-pip 4 | RUN pip3 install --upgrade pip 5 | WORKDIR /tmp 6 | CMD ["pip3", "install", "/bottleneck_src"] -------------------------------------------------------------------------------- /bottleneck/tests/input_modification_test.py: -------------------------------------------------------------------------------- 1 | """Test functions.""" 2 | 3 | import warnings 4 | 5 | import numpy as np 6 | from numpy.testing import assert_equal 7 | import bottleneck as bn 8 | from .util import DTYPES 9 | import pytest 10 | 11 | 12 | def arrays(dtypes): 13 | """Iterator that yield arrays to use for unit testing.""" 14 | ss = {} 15 | ss[1] = {"size": 4, "shapes": [(4,)]} 16 | ss[2] = {"size": 6, "shapes": [(2, 3)]} 17 | ss[3] = {"size": 6, "shapes": [(1, 2, 3)]} 18 | rs = np.random.RandomState([1, 2, 3]) 19 | for ndim in ss: 20 | size = ss[ndim]["size"] 21 | shapes = ss[ndim]["shapes"] 22 | for dtype in dtypes: 23 | a = np.arange(size, dtype=dtype) 24 | if issubclass(a.dtype.type, np.inexact): 25 | idx = rs.rand(*a.shape) < 0.2 26 | a[idx] = np.inf 27 | idx = rs.rand(*a.shape) < 0.2 28 | a[idx] = np.nan 29 | idx = rs.rand(*a.shape) < 0.2 30 | a[idx] *= -1 31 | for shape in shapes: 32 | a = a.reshape(shape) 33 | yield a 34 | 35 | 36 | @pytest.mark.parametrize("func", bn.get_functions("all"), ids=lambda x: x.__name__) 37 | def test_modification(func): 38 | """Test that bn.xxx gives the same output as np.xxx.""" 39 | name = func.__name__ 40 | if name == "replace": 41 | return 42 | msg = "\nInput array modified by %s.\n\n" 43 | msg += "input array before:\n%s\nafter:\n%s\n" 44 | for i, a in enumerate(arrays(DTYPES)): 45 | axes = list(range(-a.ndim, a.ndim)) 46 | if all(x not in name for x in ["push", "move", "sort", "partition"]): 47 | axes += [None] 48 | 49 | second_arg = 1 50 | if "partition" in name: 51 | second_arg = 0 52 | 53 | for axis in axes: 54 | with np.errstate(invalid="ignore"): 55 | a1 = a.copy() 56 | a2 = a.copy() 57 | if any(x in name for x in ["move", "sort", "partition"]): 58 | with warnings.catch_warnings(): 59 | warnings.simplefilter("ignore") 60 | func(a1, second_arg, axis=axis) 61 | else: 62 | try: 63 | with warnings.catch_warnings(): 64 | warnings.simplefilter("ignore") 65 | func(a1, axis=axis) 66 | except ValueError as e: 67 | if name.startswith( 68 | "nanarg" 69 | ) and "All-NaN slice encountered" in str(e): 70 | continue 71 | assert_equal(a1, a2, msg % (name, a1, a2)) 72 | -------------------------------------------------------------------------------- /bottleneck/tests/list_input_test.py: -------------------------------------------------------------------------------- 1 | """Check that functions can handle list input""" 2 | 3 | import warnings 4 | 5 | import numpy as np 6 | from numpy.testing import assert_array_almost_equal 7 | import bottleneck as bn 8 | from .util import DTYPES 9 | import pytest 10 | 11 | 12 | def lists(dtypes=DTYPES): 13 | """Iterator that yields lists to use for unit testing.""" 14 | ss = {} 15 | ss[1] = {"size": 4, "shapes": [(4,)]} 16 | ss[2] = {"size": 6, "shapes": [(1, 6), (2, 3)]} 17 | ss[3] = {"size": 6, "shapes": [(1, 2, 3)]} 18 | ss[4] = {"size": 24, "shapes": [(1, 2, 3, 4)]} 19 | for ndim in ss: 20 | size = ss[ndim]["size"] 21 | shapes = ss[ndim]["shapes"] 22 | a = np.arange(size) 23 | for shape in shapes: 24 | a = a.reshape(shape) 25 | for dtype in dtypes: 26 | yield a.astype(dtype).tolist() 27 | 28 | 29 | @pytest.mark.parametrize("func", bn.get_functions("all"), ids=lambda x: x.__name__) 30 | def test_list_input(func): 31 | """Test that bn.xxx gives the same output as bn.slow.xxx for list input.""" 32 | msg = "\nfunc %s | input %s (%s) | shape %s\n" 33 | msg += "\nInput array:\n%s\n" 34 | name = func.__name__ 35 | if name == "replace": 36 | return 37 | func0 = eval("bn.slow.%s" % name) 38 | for i, a in enumerate(lists()): 39 | with warnings.catch_warnings(): 40 | warnings.simplefilter("ignore") 41 | try: 42 | actual = func(a) 43 | desired = func0(a) 44 | except TypeError: 45 | actual = func(a, 2) 46 | desired = func0(a, 2) 47 | a = np.array(a) 48 | tup = (name, "a" + str(i), str(a.dtype), str(a.shape), a) 49 | err_msg = msg % tup 50 | assert_array_almost_equal(actual, desired, err_msg=err_msg) 51 | -------------------------------------------------------------------------------- /bottleneck/tests/memory_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | import bottleneck as bn 4 | import pytest 5 | 6 | 7 | @pytest.mark.thread_unsafe 8 | @pytest.mark.skipif( 9 | sys.platform.startswith("win"), reason="resource module not available on windows" 10 | ) 11 | def test_memory_leak(): 12 | import resource 13 | 14 | arr = np.arange(1).reshape((1, 1)) 15 | 16 | starting = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss 17 | 18 | for i in range(1000): 19 | for axis in [None, 0, 1]: 20 | bn.nansum(arr, axis=axis) 21 | bn.nanargmax(arr, axis=axis) 22 | bn.nanargmin(arr, axis=axis) 23 | bn.nanmedian(arr, axis=axis) 24 | bn.nansum(arr, axis=axis) 25 | bn.nanmean(arr, axis=axis) 26 | bn.nanmin(arr, axis=axis) 27 | bn.nanmax(arr, axis=axis) 28 | bn.nanvar(arr, axis=axis) 29 | 30 | ending = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss 31 | 32 | diff = ending - starting 33 | diff_bytes = diff * resource.getpagesize() 34 | print(diff_bytes) 35 | # For 1.3.0 release, this had value of ~100kB 36 | assert diff_bytes == 0 37 | -------------------------------------------------------------------------------- /bottleneck/tests/move_test.py: -------------------------------------------------------------------------------- 1 | """Test moving window functions.""" 2 | 3 | import numpy as np 4 | from numpy.testing import assert_equal, assert_array_almost_equal, assert_raises 5 | import bottleneck as bn 6 | from .util import arrays, array_order 7 | import pytest 8 | 9 | 10 | @pytest.mark.parametrize("func", bn.get_functions("move"), ids=lambda x: x.__name__) 11 | def test_move(func): 12 | """Test that bn.xxx gives the same output as a reference function.""" 13 | fmt = ( 14 | "\nfunc %s | window %d | min_count %s | input %s (%s) | shape %s | " 15 | "axis %s | order %s\n" 16 | ) 17 | fmt += "\nInput array:\n%s\n" 18 | aaae = assert_array_almost_equal 19 | func_name = func.__name__ 20 | func0 = eval("bn.slow.%s" % func_name) 21 | if func_name == "move_var": 22 | decimal = 3 23 | else: 24 | decimal = 5 25 | for i, a in enumerate(arrays(func_name)): 26 | axes = range(-1, a.ndim) 27 | for axis in axes: 28 | windows = range(1, a.shape[axis]) 29 | for window in windows: 30 | min_counts = list(range(1, window + 1)) + [None] 31 | for min_count in min_counts: 32 | actual = func(a, window, min_count, axis=axis) 33 | desired = func0(a, window, min_count, axis=axis) 34 | tup = ( 35 | func_name, 36 | window, 37 | str(min_count), 38 | "a" + str(i), 39 | str(a.dtype), 40 | str(a.shape), 41 | str(axis), 42 | array_order(a), 43 | a, 44 | ) 45 | err_msg = fmt % tup 46 | aaae(actual, desired, decimal, err_msg) 47 | err_msg += "\n dtype mismatch %s %s" 48 | da = actual.dtype 49 | dd = desired.dtype 50 | assert_equal(da, dd, err_msg % (da, dd)) 51 | 52 | 53 | # --------------------------------------------------------------------------- 54 | # Test argument parsing 55 | 56 | 57 | @pytest.mark.parametrize("func", bn.get_functions("move"), ids=lambda x: x.__name__) 58 | def test_arg_parsing(func, decimal=5): 59 | """test argument parsing.""" 60 | 61 | name = func.__name__ 62 | func0 = eval("bn.slow.%s" % name) 63 | 64 | a = np.array([1.0, 2, 3]) 65 | 66 | fmt = "\n%s" % func 67 | fmt += "%s\n" 68 | fmt += "\nInput array:\n%s\n" % a 69 | 70 | actual = func(a, 2) 71 | desired = func0(a, 2) 72 | err_msg = fmt % "(a, 2)" 73 | assert_array_almost_equal(actual, desired, decimal, err_msg) 74 | 75 | actual = func(a, 2, 1) 76 | desired = func0(a, 2, 1) 77 | err_msg = fmt % "(a, 2, 1)" 78 | assert_array_almost_equal(actual, desired, decimal, err_msg) 79 | 80 | actual = func(a, window=2) 81 | desired = func0(a, window=2) 82 | err_msg = fmt % "(a, window=2)" 83 | assert_array_almost_equal(actual, desired, decimal, err_msg) 84 | 85 | actual = func(a, window=2, min_count=1) 86 | desired = func0(a, window=2, min_count=1) 87 | err_msg = fmt % "(a, window=2, min_count=1)" 88 | assert_array_almost_equal(actual, desired, decimal, err_msg) 89 | 90 | actual = func(a, window=2, min_count=1, axis=0) 91 | desired = func0(a, window=2, min_count=1, axis=0) 92 | err_msg = fmt % "(a, window=2, min_count=1, axis=0)" 93 | assert_array_almost_equal(actual, desired, decimal, err_msg) 94 | 95 | actual = func(a, min_count=1, window=2, axis=0) 96 | desired = func0(a, min_count=1, window=2, axis=0) 97 | err_msg = fmt % "(a, min_count=1, window=2, axis=0)" 98 | assert_array_almost_equal(actual, desired, decimal, err_msg) 99 | 100 | actual = func(a, axis=-1, min_count=None, window=2) 101 | desired = func0(a, axis=-1, min_count=None, window=2) 102 | err_msg = fmt % "(a, axis=-1, min_count=None, window=2)" 103 | assert_array_almost_equal(actual, desired, decimal, err_msg) 104 | 105 | actual = func(a=a, axis=-1, min_count=None, window=2) 106 | desired = func0(a=a, axis=-1, min_count=None, window=2) 107 | err_msg = fmt % "(a=a, axis=-1, min_count=None, window=2)" 108 | assert_array_almost_equal(actual, desired, decimal, err_msg) 109 | 110 | if name in ("move_std", "move_var"): 111 | actual = func(a, 2, 1, -1, ddof=1) 112 | desired = func0(a, 2, 1, -1, ddof=1) 113 | err_msg = fmt % "(a, 2, 1, -1, ddof=1)" 114 | assert_array_almost_equal(actual, desired, decimal, err_msg) 115 | 116 | # regression test: make sure len(kwargs) == 0 doesn't raise 117 | args = (a, 1, 1, -1) 118 | kwargs = {} 119 | func(*args, **kwargs) 120 | 121 | 122 | @pytest.mark.parametrize("func", bn.get_functions("move"), ids=lambda x: x.__name__) 123 | def test_arg_parse_raises(func): 124 | """test argument parsing raises in move""" 125 | a = np.array([1.0, 2, 3]) 126 | assert_raises(TypeError, func) 127 | assert_raises(TypeError, func, axis=a) 128 | assert_raises(TypeError, func, a, 2, axis=0, extra=0) 129 | assert_raises(TypeError, func, a, 2, axis=0, a=a) 130 | assert_raises(TypeError, func, a, 2, 2, 0, 0, 0) 131 | assert_raises(TypeError, func, a, 2, axis="0") 132 | assert_raises(TypeError, func, a, 1, min_count="1") 133 | if func.__name__ not in ("move_std", "move_var"): 134 | assert_raises(TypeError, func, a, 2, ddof=0) 135 | 136 | 137 | # --------------------------------------------------------------------------- 138 | # move_median.c is complicated. Let's do some more testing. 139 | # 140 | # If you make changes to move_median.c then do lots of tests by increasing 141 | # range(100) in the two functions below to range(10000). And for extra credit 142 | # increase size to 30. With those two changes the unit tests will take a 143 | # LONG time to run. 144 | 145 | 146 | def test_move_median_with_nans(): 147 | """test move_median.c with nans""" 148 | fmt = "\nfunc %s | window %d | min_count %s\n\nInput array:\n%s\n" 149 | aaae = assert_array_almost_equal 150 | min_count = 1 151 | size = 10 152 | func = bn.move_median 153 | func0 = bn.slow.move_median 154 | rs = np.random.RandomState([1, 2, 3]) 155 | for i in range(100): 156 | a = np.arange(size, dtype=np.float64) 157 | idx = rs.rand(*a.shape) < 0.1 158 | a[idx] = np.inf 159 | idx = rs.rand(*a.shape) < 0.2 160 | a[idx] = np.nan 161 | rs.shuffle(a) 162 | for window in range(2, size + 1): 163 | actual = func(a, window=window, min_count=min_count) 164 | desired = func0(a, window=window, min_count=min_count) 165 | err_msg = fmt % (func.__name__, window, min_count, a) 166 | aaae(actual, desired, decimal=5, err_msg=err_msg) 167 | 168 | 169 | def test_move_median_without_nans(): 170 | """test move_median.c without nans""" 171 | fmt = "\nfunc %s | window %d | min_count %s\n\nInput array:\n%s\n" 172 | aaae = assert_array_almost_equal 173 | min_count = 1 174 | size = 10 175 | func = bn.move_median 176 | func0 = bn.slow.move_median 177 | rs = np.random.RandomState([1, 2, 3]) 178 | for i in range(100): 179 | a = np.arange(size, dtype=np.int64) 180 | rs.shuffle(a) 181 | for window in range(2, size + 1): 182 | actual = func(a, window=window, min_count=min_count) 183 | desired = func0(a, window=window, min_count=min_count) 184 | err_msg = fmt % (func.__name__, window, min_count, a) 185 | aaae(actual, desired, decimal=5, err_msg=err_msg) 186 | 187 | 188 | # ---------------------------------------------------------------------------- 189 | # Regression test for square roots of negative numbers 190 | 191 | 192 | def test_move_std_sqrt(): 193 | """Test move_std for neg sqrt.""" 194 | 195 | a = [ 196 | 0.0011448196318903589, 197 | 0.00028718669878572767, 198 | 0.00028718669878572767, 199 | 0.00028718669878572767, 200 | 0.00028718669878572767, 201 | ] 202 | err_msg = "Square root of negative number. ndim = %d" 203 | b = bn.move_std(a, window=3) 204 | assert np.isfinite(b[2:]).all(), err_msg % 1 205 | 206 | a2 = np.array([a, a]) 207 | b = bn.move_std(a2, window=3, axis=1) 208 | assert np.isfinite(b[:, 2:]).all(), err_msg % 2 209 | 210 | a3 = np.array([[a, a], [a, a]]) 211 | b = bn.move_std(a3, window=3, axis=2) 212 | assert np.isfinite(b[:, :, 2:]).all(), err_msg % 3 213 | -------------------------------------------------------------------------------- /bottleneck/tests/nonreduce_axis_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import ( 3 | assert_equal, 4 | assert_array_equal, 5 | assert_array_almost_equal, 6 | assert_raises, 7 | ) 8 | 9 | import bottleneck as bn 10 | from .reduce_test import ( 11 | unit_maker as reduce_unit_maker, 12 | unit_maker_argparse as unit_maker_parse_rankdata, 13 | ) 14 | from .util import arrays, array_order, DTYPES 15 | import pytest 16 | 17 | # --------------------------------------------------------------------------- 18 | # partition, argpartition 19 | 20 | 21 | @pytest.mark.parametrize( 22 | "func", (bn.partition, bn.argpartition), ids=lambda x: x.__name__ 23 | ) 24 | def test_partition_and_argpartition(func): 25 | """test partition or argpartition""" 26 | 27 | msg = "\nfunc %s | input %s (%s) | shape %s | n %d | axis %s | order %s\n" 28 | msg += "\nInput array:\n%s\n" 29 | 30 | name = func.__name__ 31 | func0 = eval("bn.slow.%s" % name) 32 | 33 | rs = np.random.RandomState([1, 2, 3]) 34 | for i, a in enumerate(arrays(name)): 35 | if a.ndim == 0 or a.size == 0 or a.ndim > 3: 36 | continue 37 | for axis in list(range(-1, a.ndim)) + [None]: 38 | if axis is None: 39 | nmax = a.size - 1 40 | else: 41 | nmax = a.shape[axis] - 1 42 | if nmax < 1: 43 | continue 44 | n = rs.randint(nmax) 45 | s0 = func0(a, n, axis) 46 | s1 = func(a, n, axis) 47 | if name == "argpartition": 48 | s0 = complete_the_argpartition(s0, a, n, axis) 49 | s1 = complete_the_argpartition(s1, a, n, axis) 50 | else: 51 | s0 = complete_the_partition(s0, n, axis) 52 | s1 = complete_the_partition(s1, n, axis) 53 | tup = ( 54 | name, 55 | "a" + str(i), 56 | str(a.dtype), 57 | str(a.shape), 58 | n, 59 | str(axis), 60 | array_order(a), 61 | a, 62 | ) 63 | err_msg = msg % tup 64 | assert_array_equal(s1, s0, err_msg) 65 | 66 | 67 | def complete_the_partition(a, n, axis): 68 | def func1d(a, n): 69 | a[:n] = np.sort(a[:n]) 70 | a[n + 1 :] = np.sort(a[n + 1 :]) 71 | return a 72 | 73 | a = a.copy() 74 | ndim = a.ndim 75 | if axis is None: 76 | if ndim != 1: 77 | raise ValueError("`a` must be 1d when axis is None") 78 | axis = 0 79 | elif axis < 0: 80 | axis += ndim 81 | if axis < 0: 82 | raise ValueError("`axis` out of range") 83 | a = np.apply_along_axis(func1d, axis, a, n) 84 | return a 85 | 86 | 87 | def complete_the_argpartition(index, a, n, axis): 88 | a = a.copy() 89 | ndim = a.ndim 90 | if axis is None: 91 | if index.ndim != 1: 92 | raise ValueError("`index` must be 1d when axis is None") 93 | axis = 0 94 | ndim = 1 95 | a = a.reshape(-1) 96 | elif axis < 0: 97 | axis += ndim 98 | if axis < 0: 99 | raise ValueError("`axis` out of range") 100 | if ndim == 1: 101 | a = a[index] 102 | elif ndim == 2: 103 | if axis == 0: 104 | for i in range(a.shape[1]): 105 | a[:, i] = a[index[:, i], i] 106 | elif axis == 1: 107 | for i in range(a.shape[0]): 108 | a[i] = a[i, index[i]] 109 | else: 110 | raise ValueError("`axis` out of range") 111 | elif ndim == 3: 112 | if axis == 0: 113 | for i in range(a.shape[1]): 114 | for j in range(a.shape[2]): 115 | a[:, i, j] = a[index[:, i, j], i, j] 116 | elif axis == 1: 117 | for i in range(a.shape[0]): 118 | for j in range(a.shape[2]): 119 | a[i, :, j] = a[i, index[i, :, j], j] 120 | elif axis == 2: 121 | for i in range(a.shape[0]): 122 | for j in range(a.shape[1]): 123 | a[i, j, :] = a[i, j, index[i, j, :]] 124 | else: 125 | raise ValueError("`axis` out of range") 126 | else: 127 | raise ValueError("`a.ndim` must be 1, 2, or 3") 128 | a = complete_the_partition(a, n, axis) 129 | return a 130 | 131 | 132 | def test_transpose(): 133 | """partition transpose test""" 134 | a = np.arange(12).reshape(4, 3) 135 | actual = bn.partition(a.T, 2, -1).T 136 | desired = bn.slow.partition(a.T, 2, -1).T 137 | assert_equal(actual, desired, "partition transpose test") 138 | 139 | 140 | # --------------------------------------------------------------------------- 141 | # rankdata, nanrankdata, push 142 | 143 | 144 | @pytest.mark.parametrize( 145 | "func", (bn.rankdata, bn.nanrankdata, bn.push), ids=lambda x: x.__name__ 146 | ) 147 | def test_nonreduce_axis(func): 148 | """Test nonreduce axis functions""" 149 | return reduce_unit_maker(func) 150 | 151 | 152 | def test_push(): 153 | """Test push""" 154 | ns = (0, 1, 2, 3, 4, 5, None) 155 | a = np.array([np.nan, 1, 2, np.nan, np.nan, np.nan, np.nan, 3, np.nan]) 156 | for n in ns: 157 | actual = bn.push(a.copy(), n=n) 158 | desired = bn.slow.push(a.copy(), n=n) 159 | assert_array_equal(actual, desired, "failed on n=%s" % str(n)) 160 | 161 | 162 | # --------------------------------------------------------------------------- 163 | # Test argument parsing 164 | 165 | 166 | @pytest.mark.parametrize( 167 | "func", bn.get_functions("nonreduce_axis"), ids=lambda x: x.__name__ 168 | ) 169 | def test_arg_parsing(func): 170 | """test argument parsing in nonreduce_axis""" 171 | name = func.__name__ 172 | if name in ("partition", "argpartition"): 173 | return unit_maker_parse(func) 174 | elif name in ("push"): 175 | return unit_maker_parse(func) 176 | elif name in ("rankdata", "nanrankdata"): 177 | return unit_maker_parse_rankdata(func) 178 | else: 179 | fmt = "``%s` is an unknown nonreduce_axis function" 180 | raise ValueError(fmt % name) 181 | return unit_maker_raises(func) 182 | 183 | 184 | def unit_maker_parse(func, decimal=5): 185 | """test argument parsing.""" 186 | 187 | name = func.__name__ 188 | func0 = eval("bn.slow.%s" % name) 189 | 190 | a = np.array([1.0, 2, 3]) 191 | 192 | fmt = "\n%s" % func 193 | fmt += "%s\n" 194 | fmt += "\nInput array:\n%s\n" % a 195 | 196 | actual = func(a, 1) 197 | desired = func0(a, 1) 198 | err_msg = fmt % "(a, 1)" 199 | assert_array_almost_equal(actual, desired, decimal, err_msg) 200 | 201 | actual = func(a, 1, axis=0) 202 | desired = func0(a, 1, axis=0) 203 | err_msg = fmt % "(a, 1, axis=0)" 204 | assert_array_almost_equal(actual, desired, decimal, err_msg) 205 | 206 | if name != "push": 207 | 208 | actual = func(a, 2, None) 209 | desired = func0(a, 2, None) 210 | err_msg = fmt % "(a, 2, None)" 211 | assert_array_almost_equal(actual, desired, decimal, err_msg) 212 | 213 | actual = func(a, 1, axis=None) 214 | desired = func0(a, 1, axis=None) 215 | err_msg = fmt % "(a, 1, axis=None)" 216 | assert_array_almost_equal(actual, desired, decimal, err_msg) 217 | 218 | # regression test: make sure len(kwargs) == 0 doesn't raise 219 | args = (a, 1, -1) 220 | kwargs = {} 221 | func(*args, **kwargs) 222 | 223 | else: 224 | 225 | # regression test: make sure len(kwargs) == 0 doesn't raise 226 | args = (a, 1) 227 | kwargs = {} 228 | func(*args, **kwargs) 229 | 230 | 231 | def unit_maker_raises(func): 232 | """test argument parsing raises in nonreduce_axis""" 233 | a = np.array([1.0, 2, 3]) 234 | assert_raises(TypeError, func) 235 | assert_raises(TypeError, func, axis=a) 236 | assert_raises(TypeError, func, a, axis=0, extra=0) 237 | assert_raises(TypeError, func, a, axis=0, a=a) 238 | if func.__name__ in ("partition", "argpartition"): 239 | assert_raises(TypeError, func, a, 0, 0, 0, 0, 0) 240 | assert_raises(TypeError, func, a, axis="0") 241 | 242 | 243 | @pytest.mark.parametrize("dtype", DTYPES) 244 | @pytest.mark.parametrize( 245 | "func", (bn.partition, bn.argpartition), ids=lambda x: x.__name__ 246 | ) 247 | def test_out_of_bounds_raises(func, dtype): 248 | array = np.ones((10, 10), dtype=dtype) 249 | for axis in [None, 0, 1, -1]: 250 | with pytest.raises(ValueError, match="must be between"): 251 | func(array, 1000, axis=axis) 252 | 253 | with pytest.raises(ValueError, match="must be between"): 254 | func(array, -1, axis=axis) 255 | -------------------------------------------------------------------------------- /bottleneck/tests/nonreduce_test.py: -------------------------------------------------------------------------------- 1 | """Test replace().""" 2 | 3 | import warnings 4 | 5 | import numpy as np 6 | from numpy.testing import assert_equal, assert_array_equal, assert_raises 7 | import bottleneck as bn 8 | from .util import arrays, array_order, DTYPES, INT_DTYPES 9 | import pytest 10 | 11 | 12 | @pytest.mark.parametrize( 13 | "func", bn.get_functions("nonreduce"), ids=lambda x: x.__name__ 14 | ) 15 | def test_nonreduce(func): 16 | """Test that bn.xxx gives the same output as np.xxx.""" 17 | msg = "\nfunc %s | input %s (%s) | shape %s | old %f | new %f | order %s\n" 18 | msg += "\nInput array:\n%s\n" 19 | name = func.__name__ 20 | func0 = eval("bn.slow.%s" % name) 21 | rs = np.random.RandomState([1, 2, 3]) 22 | news = [1, 0, np.nan, -np.inf] 23 | for i, arr in enumerate(arrays(name)): 24 | for idx in range(2): 25 | if arr.size == 0: 26 | old = 0 27 | else: 28 | idx = rs.randint(max(arr.size, 1)) 29 | old = arr.flat[idx] 30 | for new in news: 31 | if not issubclass(arr.dtype.type, np.inexact): 32 | if not np.isfinite(old): 33 | # Cannot safely cast to int 34 | continue 35 | if not np.isfinite(new): 36 | # Cannot safely cast to int 37 | continue 38 | actual = arr.copy() 39 | with warnings.catch_warnings(): 40 | warnings.simplefilter("ignore") 41 | func(actual, old, new) 42 | desired = arr.copy() 43 | with warnings.catch_warnings(): 44 | warnings.simplefilter("ignore") 45 | func0(desired, old, new) 46 | tup = ( 47 | name, 48 | "a" + str(i), 49 | str(arr.dtype), 50 | str(arr.shape), 51 | old, 52 | new, 53 | array_order(arr), 54 | arr, 55 | ) 56 | err_msg = msg % tup 57 | assert_array_equal(actual, desired, err_msg=err_msg) 58 | err_msg += "\n dtype mismatch %s %s" 59 | if hasattr(actual, "dtype") or hasattr(desired, "dtype"): 60 | da = actual.dtype 61 | dd = desired.dtype 62 | assert_equal(da, dd, err_msg % (da, dd)) 63 | 64 | 65 | # --------------------------------------------------------------------------- 66 | # Check that exceptions are raised 67 | 68 | 69 | def test_replace_unsafe_cast(): 70 | """Test replace for unsafe casts""" 71 | dtypes = INT_DTYPES 72 | for dtype in dtypes: 73 | a = np.zeros(3, dtype=dtype) 74 | assert_raises(ValueError, bn.replace, a.copy(), 0.1, 0) 75 | assert_raises(ValueError, bn.replace, a.copy(), 0, 0.1) 76 | assert_raises(ValueError, bn.slow.replace, a.copy(), 0.1, 0) 77 | assert_raises(ValueError, bn.slow.replace, a.copy(), 0, 0.1) 78 | 79 | 80 | def test_non_array(): 81 | """Test that non-array input raises""" 82 | a = [1, 2, 3] 83 | assert_raises(TypeError, bn.replace, a, 0, 1) 84 | a = (1, 2, 3) 85 | assert_raises(TypeError, bn.replace, a, 0, 1) 86 | 87 | 88 | # --------------------------------------------------------------------------- 89 | # Make sure bn.replace and bn.slow.replace can handle int arrays where 90 | # user wants to replace nans 91 | 92 | 93 | @pytest.mark.parametrize("dtype", INT_DTYPES) 94 | def test_replace_nan_int(dtype): 95 | """Test replace, int array, old=nan, new=0""" 96 | a = np.arange(2 * 3 * 4, dtype=dtype).reshape(2, 3, 4) 97 | actual = a.copy() 98 | bn.replace(actual, np.nan, 0) 99 | desired = a.copy() 100 | msg = "replace failed on int input looking for nans" 101 | assert_array_equal(actual, desired, err_msg=msg) 102 | actual = a.copy() 103 | bn.slow.replace(actual, np.nan, 0) 104 | msg = "slow.replace failed on int input looking for nans" 105 | assert_array_equal(actual, desired, err_msg=msg) 106 | 107 | 108 | def test_replace_bad_args(): 109 | array = np.ones((10, 10)) 110 | bad_vals = [None, "", [0], "0"] 111 | for bad_val in bad_vals: 112 | with pytest.raises(TypeError, match="`old` must be a number"): 113 | bn.replace(array, bad_val, 0) 114 | 115 | with pytest.raises(TypeError, match="`new` must be a number"): 116 | bn.replace(array, 0, bad_val) 117 | 118 | with pytest.raises(TypeError, match="Cannot find `a` keyword input"): 119 | bn.replace(foo=array) 120 | 121 | with pytest.raises(TypeError, match="Cannot find `old` keyword input"): 122 | bn.replace(a=array) 123 | 124 | with pytest.raises(TypeError, match="Cannot find `new` keyword input"): 125 | bn.replace(a=array, old=0) 126 | 127 | with pytest.raises(TypeError, match="wrong number of arguments 4"): 128 | bn.replace(array, 0) 129 | 130 | with pytest.raises(TypeError, match="wrong number of arguments 4"): 131 | bn.replace(array, 0, 0, 0) 132 | 133 | 134 | @pytest.mark.parametrize("dtype", DTYPES) 135 | def test_replace_newaxis(dtype): 136 | array = np.ones((2, 2), dtype=dtype)[..., np.newaxis] 137 | result = bn.replace(array, 1, 2) 138 | assert (result == 2).all().all() 139 | -------------------------------------------------------------------------------- /bottleneck/tests/scalar_input_test.py: -------------------------------------------------------------------------------- 1 | """Check that functions can handle scalar input""" 2 | 3 | from numpy.testing import assert_array_almost_equal 4 | import bottleneck as bn 5 | import pytest 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "func", 10 | bn.get_functions("reduce") + bn.get_functions("nonreduce_axis"), # noqa: W504 11 | ids=lambda x: x.__name__, 12 | ) 13 | def test_scalar_input(func, args=tuple()): 14 | """Test that bn.xxx gives the same output as bn.slow.xxx for scalar input.""" 15 | if func.__name__ in ("partition", "argpartition", "push"): 16 | return 17 | func0 = eval("bn.slow.%s" % func.__name__) 18 | msg = "\nfunc %s | input %s\n" 19 | a = -9 20 | argsi = [a] + list(args) 21 | actual = func(*argsi) 22 | desired = func0(*argsi) 23 | err_msg = msg % (func.__name__, a) 24 | assert_array_almost_equal(actual, desired, err_msg=err_msg) 25 | -------------------------------------------------------------------------------- /bottleneck/tests/test_template.py: -------------------------------------------------------------------------------- 1 | import os 2 | import posixpath as path 3 | import pytest 4 | 5 | from ..src.bn_template import make_c_files 6 | 7 | 8 | @pytest.mark.thread_unsafe 9 | def test_make_c_files() -> None: 10 | dirpath = os.path.join(os.path.dirname(__file__), "data/template_test/") 11 | modules = ["test"] 12 | test_input = os.path.join(dirpath, "test.c") 13 | if os.path.exists(test_input): 14 | os.remove(test_input) 15 | 16 | make_c_files(dirpath=dirpath, modules=modules) 17 | 18 | with open(os.path.join(dirpath, "truth.c")) as f: 19 | truth = f.read() 20 | 21 | with open(os.path.join(dirpath, "test.c")) as f: 22 | test = f.read() 23 | test = test.replace(path.relpath(dirpath), "{DIRPATH}") 24 | 25 | assert truth == test 26 | 27 | os.remove(test_input) 28 | -------------------------------------------------------------------------------- /bottleneck/tests/util.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import bottleneck as bn 3 | 4 | INT_DTYPES = [np.int64, np.int32] 5 | FLOAT_DTYPES = [np.float64, np.float32] 6 | DTYPES = tuple(FLOAT_DTYPES + INT_DTYPES) 7 | 8 | 9 | def get_functions(module_name, as_string=False): 10 | """Returns a list of functions, optionally as string function names""" 11 | if module_name == "all": 12 | funcs = [] 13 | funcs_in_dict = func_dict() 14 | for key in funcs_in_dict: 15 | for func in funcs_in_dict[key]: 16 | funcs.append(func) 17 | else: 18 | funcs = func_dict()[module_name] 19 | if as_string: 20 | funcs = [f.__name__ for f in funcs] 21 | return funcs 22 | 23 | 24 | def func_dict(): 25 | d = {} 26 | d["reduce"] = [ 27 | bn.nansum, 28 | bn.nanmean, 29 | bn.nanstd, 30 | bn.nanvar, 31 | bn.nanmin, 32 | bn.nanmax, 33 | bn.median, 34 | bn.nanmedian, 35 | bn.ss, 36 | bn.nanargmin, 37 | bn.nanargmax, 38 | bn.anynan, 39 | bn.allnan, 40 | ] 41 | d["move"] = [ 42 | bn.move_sum, 43 | bn.move_mean, 44 | bn.move_std, 45 | bn.move_var, 46 | bn.move_min, 47 | bn.move_max, 48 | bn.move_argmin, 49 | bn.move_argmax, 50 | bn.move_median, 51 | bn.move_rank, 52 | ] 53 | d["nonreduce"] = [bn.replace] 54 | d["nonreduce_axis"] = [ 55 | bn.partition, 56 | bn.argpartition, 57 | bn.rankdata, 58 | bn.nanrankdata, 59 | bn.push, 60 | ] 61 | return d 62 | 63 | 64 | # --------------------------------------------------------------------------- 65 | 66 | 67 | def arrays(func_name, dtypes=DTYPES): 68 | return array_iter(array_generator, func_name, dtypes) 69 | 70 | 71 | def array_iter(arrays_func, *args): 72 | for a in arrays_func(*args): 73 | if a.ndim < 2: 74 | yield a 75 | # this is good for an extra check but in everyday development it 76 | # is a pain because it doubles the unit test run time 77 | # elif a.ndim == 3: 78 | # for axes in permutations(range(a.ndim)): 79 | # yield np.transpose(a, axes) 80 | else: 81 | yield a 82 | yield a.T 83 | 84 | 85 | def array_generator(func_name, dtypes): 86 | """Iterator that yields arrays to use for unit testing.""" 87 | 88 | f_dtypes = list(set(dtypes) & set(FLOAT_DTYPES)) 89 | 90 | # define nan and inf 91 | if func_name in ("partition", "argpartition"): 92 | nan = 0 93 | else: 94 | nan = np.nan 95 | if func_name in ("move_sum", "move_mean", "move_std", "move_var"): 96 | # these functions can't handle inf 97 | inf = 8 98 | else: 99 | inf = np.inf 100 | 101 | # nan and inf 102 | for dtype in f_dtypes: 103 | yield np.array([inf, nan], dtype=dtype) 104 | yield np.array([inf, -inf], dtype=dtype) 105 | yield np.array([nan, 2, 3], dtype=dtype) 106 | yield np.array([-inf, 2, 3], dtype=dtype) 107 | if func_name != "nanargmin": 108 | yield np.array([nan, inf], dtype=dtype) 109 | 110 | # byte swapped 111 | yield np.array([1, 2, 3], dtype=">f4") 112 | yield np.array([1, 2, 3], dtype=" 5 | 6 | 13 | {% endblock %} 14 | -------------------------------------------------------------------------------- /doc/source/bottleneck.benchmark.rst: -------------------------------------------------------------------------------- 1 | bottleneck.benchmark package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | bottleneck.benchmark.autotimeit module 8 | -------------------------------------- 9 | 10 | .. automodule:: bottleneck.benchmark.autotimeit 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | bottleneck.benchmark.bench module 16 | --------------------------------- 17 | 18 | .. automodule:: bottleneck.benchmark.bench 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | bottleneck.benchmark.bench\_detailed module 24 | ------------------------------------------- 25 | 26 | .. automodule:: bottleneck.benchmark.bench_detailed 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: bottleneck.benchmark 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /doc/source/bottleneck.move.rst: -------------------------------------------------------------------------------- 1 | bottleneck.move module 2 | ============================ 3 | 4 | Module contents 5 | --------------- 6 | 7 | .. automodule:: bottleneck.move 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | -------------------------------------------------------------------------------- /doc/source/bottleneck.nonreduce.rst: -------------------------------------------------------------------------------- 1 | bottleneck.nonreduce module 2 | ============================ 3 | 4 | Module contents 5 | --------------- 6 | 7 | .. automodule:: bottleneck.nonreduce 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | -------------------------------------------------------------------------------- /doc/source/bottleneck.nonreduce_axis.rst: -------------------------------------------------------------------------------- 1 | bottleneck.nonreduce_axis module 2 | ================================ 3 | 4 | Module contents 5 | --------------- 6 | 7 | .. automodule:: bottleneck.nonreduce_axis 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | -------------------------------------------------------------------------------- /doc/source/bottleneck.reduce.rst: -------------------------------------------------------------------------------- 1 | bottleneck.reduce module 2 | ============================ 3 | 4 | Module contents 5 | --------------- 6 | 7 | .. automodule:: bottleneck.reduce 8 | :members: 9 | :undoc-members: 10 | :show-inheritance: 11 | -------------------------------------------------------------------------------- /doc/source/bottleneck.rst: -------------------------------------------------------------------------------- 1 | bottleneck package 2 | ================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | bottleneck.benchmark 10 | bottleneck.move 11 | bottleneck.nonreduce 12 | bottleneck.nonreduce_axis 13 | bottleneck.reduce 14 | bottleneck.slow 15 | bottleneck.src 16 | bottleneck.tests 17 | 18 | Module contents 19 | --------------- 20 | 21 | .. automodule:: bottleneck 22 | :members: 23 | :undoc-members: 24 | :show-inheritance: 25 | -------------------------------------------------------------------------------- /doc/source/bottleneck.slow.rst: -------------------------------------------------------------------------------- 1 | bottleneck.slow package 2 | ======================= 3 | 4 | Submodules 5 | ---------- 6 | 7 | bottleneck.slow.move module 8 | --------------------------- 9 | 10 | .. automodule:: bottleneck.slow.move 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | bottleneck.slow.nonreduce module 16 | -------------------------------- 17 | 18 | .. automodule:: bottleneck.slow.nonreduce 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | bottleneck.slow.nonreduce\_axis module 24 | -------------------------------------- 25 | 26 | .. automodule:: bottleneck.slow.nonreduce_axis 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | bottleneck.slow.reduce module 32 | ----------------------------- 33 | 34 | .. automodule:: bottleneck.slow.reduce 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: bottleneck.slow 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /doc/source/bottleneck.src.rst: -------------------------------------------------------------------------------- 1 | bottleneck.src package 2 | ====================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | bottleneck.src.bn\_config module 8 | -------------------------------- 9 | 10 | .. automodule:: bottleneck.src.bn_config 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | bottleneck.src.bn\_template module 16 | ---------------------------------- 17 | 18 | .. automodule:: bottleneck.src.bn_template 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | 24 | -------------------------------------------------------------------------------- /doc/source/bottleneck.tests.rst: -------------------------------------------------------------------------------- 1 | bottleneck.tests package 2 | ======================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | bottleneck.tests.input\_modification\_test module 8 | ------------------------------------------------- 9 | 10 | .. automodule:: bottleneck.tests.input_modification_test 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | bottleneck.tests.list\_input\_test module 16 | ----------------------------------------- 17 | 18 | .. automodule:: bottleneck.tests.list_input_test 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | bottleneck.tests.move\_test module 24 | ---------------------------------- 25 | 26 | .. automodule:: bottleneck.tests.move_test 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | bottleneck.tests.nonreduce\_axis\_test module 32 | --------------------------------------------- 33 | 34 | .. automodule:: bottleneck.tests.nonreduce_axis_test 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | bottleneck.tests.nonreduce\_test module 40 | --------------------------------------- 41 | 42 | .. automodule:: bottleneck.tests.nonreduce_test 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | bottleneck.tests.reduce\_test module 48 | ------------------------------------ 49 | 50 | .. automodule:: bottleneck.tests.reduce_test 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | bottleneck.tests.scalar\_input\_test module 56 | ------------------------------------------- 57 | 58 | .. automodule:: bottleneck.tests.scalar_input_test 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | bottleneck.tests.util module 64 | ---------------------------- 65 | 66 | .. automodule:: bottleneck.tests.util 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | 72 | Module contents 73 | --------------- 74 | 75 | .. automodule:: bottleneck.tests 76 | :members: 77 | :undoc-members: 78 | :show-inheritance: 79 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # la documentation build configuration file, created by 4 | # sphinx-quickstart on Thu Jan 14 16:31:34 2010. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | # sys.path.append(os.path.abspath('.')) 20 | sys.path.insert(0, os.path.abspath("../sphinxext")) 21 | if "READTHEDOCS" not in os.environ or not os.environ["READTHEDOCS"]: 22 | sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../..")) 23 | import bottleneck 24 | 25 | # -- General configuration ----------------------------------------------------- 26 | 27 | # Add any Sphinx extension module names here, as strings. They can be extensions 28 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 29 | extensions = [ 30 | "sphinx.ext.autodoc", 31 | "sphinx.ext.githubpages", 32 | "sphinx.ext.extlinks", 33 | "sphinx.ext.intersphinx", 34 | "numpydoc", 35 | "contributors", 36 | ] 37 | 38 | # Add any paths that contain templates here, relative to this directory. 39 | templates_path = ["_templates"] 40 | 41 | # The suffix of source filenames. 42 | source_suffix = ".rst" 43 | 44 | # The encoding of source files. 45 | # source_encoding = 'utf-8' 46 | 47 | # The master toctree document. 48 | master_doc = "index" 49 | 50 | # General information about the project. 51 | project = u"Bottleneck" 52 | copyright = u"2010-2019 Keith Goodman, 2019 Bottleneck Developers" 53 | 54 | # The version info for the project you're documenting, acts as replacement for 55 | # |version| and |release|, also used in various other places throughout the 56 | # built documents. 57 | # 58 | # The short X.Y version. 59 | version = bottleneck.__version__ 60 | # The full version, including alpha/beta/rc tags. 61 | release = bottleneck.__version__ 62 | 63 | # JP: added from sphinxdocs 64 | # autosummary_generate = True 65 | 66 | # The language for content autogenerated by Sphinx. Refer to documentation 67 | # for a list of supported languages. 68 | # language = None 69 | 70 | # There are two options for replacing |today|: either, you set today to some 71 | # non-false value, then it is used: 72 | # today = '' 73 | # Else, today_fmt is used as the format for a strftime call. 74 | # today_fmt = '%B %d, %Y' 75 | 76 | # List of documents that shouldn't be included in the build. 77 | # unused_docs = [] 78 | 79 | # List of directories, relative to source directory, that shouldn't be searched 80 | # for source files. 81 | exclude_trees = [] 82 | 83 | # The reST default role (used for this markup: `text`) to use for all documents. 84 | # default_role = None 85 | 86 | # If true, '()' will be appended to :func: etc. cross-reference text. 87 | # add_function_parentheses = True 88 | 89 | # If true, the current module name will be prepended to all description 90 | # unit titles (such as .. function::). 91 | # add_module_names = True 92 | 93 | # If true, sectionauthor and moduleauthor directives will be shown in the 94 | # output. They are ignored by default. 95 | # show_authors = False 96 | 97 | # The name of the Pygments (syntax highlighting) style to use. 98 | pygments_style = "sphinx" 99 | 100 | # A list of ignored prefixes for module index sorting. 101 | # modindex_common_prefix = [] 102 | 103 | 104 | # -- Options for HTML output --------------------------------------------------- 105 | 106 | # The theme to use for HTML and HTML Help pages. Major themes that come with 107 | # Sphinx are currently 'default' and 'sphinxdoc'. 108 | html_theme = "default" 109 | 110 | # Theme options are theme-specific and customize the look and feel of a theme 111 | # further. For a list of options available for each theme, see the 112 | # documentation. 113 | html_theme_options = { 114 | "headtextcolor": "#333333", 115 | "sidebarbgcolor": "#dddddd", 116 | "footerbgcolor": "#cccccc", 117 | "footertextcolor": "black", 118 | "headbgcolor": "#cccccc", 119 | "sidebartextcolor": "#333333", 120 | "sidebarlinkcolor": "default", 121 | "relbarbgcolor": "#cccccc", 122 | "relbartextcolor": "default", 123 | "relbarlinkcolor": "default", 124 | "codebgcolor": "#ffffff", 125 | "textcolor": "#333333", 126 | "bgcolor": "#f5f5f5", 127 | } 128 | 129 | # Add any paths that contain custom themes here, relative to this directory. 130 | # html_theme_path = [] 131 | 132 | # The name for this set of Sphinx documents. If None, it defaults to 133 | # " v documentation". 134 | # html_title = None 135 | 136 | # A shorter title for the navigation bar. Default is the same as html_title. 137 | # html_short_title = None 138 | 139 | # The name of an image file (relative to this directory) to place at the top 140 | # of the sidebar. 141 | html_logo = "../image/icon.png" 142 | 143 | # The name of an image file (within the static path) to use as favicon of the 144 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 145 | # pixels large. 146 | # html_favicon = None 147 | 148 | # Add any paths that contain custom static files (such as style sheets) here, 149 | # relative to this directory. They are copied after the builtin static files, 150 | # so a file named "default.css" will overwrite the builtin "default.css". 151 | # html_static_path = ['_static'] 152 | 153 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 154 | # using the given strftime format. 155 | # html_last_updated_fmt = '%b %d, %Y' 156 | 157 | # If true, SmartyPants will be used to convert quotes and dashes to 158 | # typographically correct entities. 159 | # html_use_smartypants = True 160 | 161 | # Custom sidebar templates, maps document names to template names. 162 | # html_sidebars = {} 163 | 164 | # Additional templates that should be rendered to pages, maps page names to 165 | # template names. 166 | # html_additional_pages = {} 167 | 168 | # If false, no module index is generated. 169 | # html_use_modindex = True 170 | 171 | # If false, no index is generated. 172 | # html_use_index = True 173 | 174 | # If true, the index is split into individual pages for each letter. 175 | # html_split_index = False 176 | 177 | # If true, links to the reST sources are added to the pages. 178 | # html_show_sourcelink = True 179 | 180 | # If true, an OpenSearch description file will be output, and all pages will 181 | # contain a tag referring to it. The value of this option must be the 182 | # base URL from which the finished HTML is served. 183 | # html_use_opensearch = '' 184 | 185 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 186 | # html_file_suffix = '' 187 | 188 | # Output file base name for HTML help builder. 189 | htmlhelp_basename = "bottleneckdoc" 190 | 191 | 192 | # -- Options for LaTeX output -------------------------------------------------- 193 | 194 | # The paper size ('letter' or 'a4'). 195 | # latex_paper_size = 'letter' 196 | 197 | # The font size ('10pt', '11pt' or '12pt'). 198 | # latex_font_size = '10pt' 199 | 200 | # Grouping the document tree into LaTeX files. List of tuples 201 | # (source start file, target name, title, author, documentclass [howto/manual]). 202 | latex_documents = [ 203 | ( 204 | "index", 205 | "bottleneck.tex", 206 | u"bottleneck Documentation", 207 | u"Keith Goodman and Bottleneck Developers", 208 | "manual", 209 | ) 210 | ] 211 | 212 | # The name of an image file (relative to this directory) to place at the top of 213 | # the title page. 214 | # latex_logo = None 215 | 216 | # For "manual" documents, if this is true, then toplevel headings are parts, 217 | # not chapters. 218 | # latex_use_parts = False 219 | 220 | # Additional stuff for the LaTeX preamble. 221 | # latex_preamble = '' 222 | 223 | # Documents to append as an appendix to all manuals. 224 | # latex_appendices = [] 225 | 226 | # If false, no module index is generated. 227 | # latex_use_modindex = True 228 | 229 | extlinks = {"issue": ("https://github.com/pydata/bottleneck/issues/%s", "#")} 230 | intersphinx_mapping = { 231 | "numpy": ("https://docs.scipy.org/doc/numpy/", None), 232 | "numpydoc": ("https://numpydoc.readthedocs.io/en/latest/", None), 233 | } 234 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | Bottleneck 3 | ========== 4 | 5 | Fast NumPy array functions written in C. 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | intro 11 | reference 12 | release 13 | license 14 | 15 | Indices and tables 16 | ================== 17 | 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /doc/source/installing.rst: -------------------------------------------------------------------------------- 1 | .. _installing: 2 | 3 | Installing Bottleneck 4 | ===================== 5 | 6 | As bottleneck aims to provide high-performance, optimized numerical functions 7 | to all users, it is distributed as a source package (except via Anaconda) so 8 | that local compilers can perform the relevant optimizations. Accordingly, 9 | installation may take some additional steps compared to packages like numpy. 10 | 11 | Anaconda 12 | ~~~~~~~~ 13 | 14 | If you wish to avoid additional steps, we recommend using Anaconda or 15 | Miniconda. A pre-compiled version of bottleneck is installed by default. 16 | Users looking for optimal performance may benefit from uninstalling the 17 | pre-compiled version and following the steps below. 18 | 19 | Build dependencies 20 | ~~~~~~~~~~~~~~~~~~ 21 | 22 | Debian & Ubuntu 23 | --------------- 24 | 25 | The following build packages must be installed prior to installing bottleneck: 26 | 27 | .. code-block:: 28 | 29 | sudo apt install gcc python3-dev 30 | 31 | The Python development headers can be excluded if using Anaconda. 32 | 33 | RHEL, Fedora & CentOS 34 | --------------------- 35 | 36 | .. code-block:: 37 | 38 | sudo yum install gcc python3-devel 39 | 40 | Windows 41 | ------- 42 | 43 | The Python Wiki maintains detailed instructions on which Visual Studio 44 | version to install here: https://wiki.python.org/moin/WindowsCompilers 45 | 46 | 47 | pip & setuptools 48 | ~~~~~~~~~~~~~~~~ 49 | 50 | bottleneck leverages :pep:`517` and thus we generally recommend updating 51 | pip and setuptools before installing to leverage recent improvements. 52 | 53 | With Anaconda: 54 | 55 | .. code-block:: 56 | 57 | conda update setuptools pip 58 | 59 | And with pip: 60 | 61 | .. code-block:: 62 | 63 | pip install --upgrade setuptools pip 64 | 65 | 66 | Installation 67 | ~~~~~~~~~~~~ 68 | 69 | Finally, simply install with: 70 | 71 | .. code-block:: 72 | 73 | pip install bottleneck 74 | 75 | If you encounter any errors, please open an issue on our GitHub 76 | page: https://github.com/pydata/bottleneck/issues 77 | -------------------------------------------------------------------------------- /doc/source/intro.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../README.rst 2 | :start-line: 4 3 | -------------------------------------------------------------------------------- /doc/source/license.rst: -------------------------------------------------------------------------------- 1 | Licenses 2 | ======== 3 | Bottleneck is distributed under a Simplified BSD license. Parts of NumPy and 4 | SciPy, which have BSD licenses, are included in Bottleneck. The setuptools project has a MIT license and is used for configuration and installation. 5 | 6 | Bottleneck License 7 | ~~~~~~~~~~~~~~~~~~ 8 | .. include:: ../../LICENSE 9 | 10 | 11 | Other licenses 12 | ~~~~~~~~~~~~~~ 13 | 14 | NumPy License 15 | ------------- 16 | .. include:: ../../LICENSES/NUMPY_LICENSE 17 | 18 | 19 | SciPy License 20 | ------------- 21 | .. include:: ../../LICENSES/SCIPY_LICENSE 22 | 23 | 24 | Setuptools License 25 | ------------------ 26 | .. include:: ../../LICENSES/SETUPTOOLS_LICENSE 27 | -------------------------------------------------------------------------------- /doc/source/reference.rst: -------------------------------------------------------------------------------- 1 | ================== 2 | Function reference 3 | ================== 4 | 5 | Bottleneck provides the following functions: 6 | 7 | ================================= ============================================================================================== 8 | reduce :meth:`nansum `, :meth:`nanmean `, 9 | :meth:`nanstd `, :meth:`nanvar `, 10 | :meth:`nanmin `, :meth:`nanmax `, 11 | :meth:`median `, :meth:`nanmedian `, 12 | :meth:`ss `, :meth:`nanargmin `, 13 | :meth:`nanargmax `, :meth:`anynan `, 14 | :meth:`allnan ` 15 | 16 | non-reduce :meth:`replace ` 17 | 18 | non-reduce with axis :meth:`rankdata `, :meth:`nanrankdata `, 19 | :meth:`partition `, :meth:`argpartition `, 20 | :meth:`push ` 21 | 22 | moving window :meth:`move_sum `, :meth:`move_mean `, 23 | :meth:`move_std `, :meth:`move_var `, 24 | :meth:`move_min `, :meth:`move_max `, 25 | :meth:`move_argmin `, :meth:`move_argmax `, 26 | :meth:`move_median `, :meth:`move_rank ` 27 | 28 | ================================= ============================================================================================== 29 | 30 | 31 | Reduce 32 | ------ 33 | 34 | Functions that reduce the input array along the specified axis. 35 | 36 | ------------ 37 | 38 | .. autofunction:: bottleneck.nansum 39 | 40 | ------------ 41 | 42 | .. autofunction:: bottleneck.nanmean 43 | 44 | ------------ 45 | 46 | .. autofunction:: bottleneck.nanstd 47 | 48 | ------------ 49 | 50 | .. autofunction:: bottleneck.nanvar 51 | 52 | ------------ 53 | 54 | .. autofunction:: bottleneck.nanmin 55 | 56 | ------------ 57 | 58 | .. autofunction:: bottleneck.nanmax 59 | 60 | ------------ 61 | 62 | .. autofunction:: bottleneck.median 63 | 64 | ------------ 65 | 66 | .. autofunction:: bottleneck.nanmedian 67 | 68 | ------------ 69 | 70 | .. autofunction:: bottleneck.ss 71 | 72 | ------------ 73 | 74 | .. autofunction:: bottleneck.nanargmin 75 | 76 | ------------ 77 | 78 | .. autofunction:: bottleneck.nanargmax 79 | 80 | 81 | ------------ 82 | 83 | .. autofunction:: bottleneck.anynan 84 | 85 | ------------ 86 | 87 | .. autofunction:: bottleneck.allnan 88 | 89 | 90 | Non-reduce 91 | ---------- 92 | 93 | Functions that do not reduce the input array and do not take `axis` as input. 94 | 95 | ------------ 96 | 97 | .. autofunction:: bottleneck.replace 98 | 99 | 100 | Non-reduce with axis 101 | -------------------- 102 | 103 | Functions that do not reduce the input array but operate along a specified 104 | axis. 105 | 106 | ------------ 107 | 108 | .. autofunction:: bottleneck.rankdata 109 | 110 | ------------ 111 | 112 | .. autofunction:: bottleneck.nanrankdata 113 | 114 | ------------ 115 | 116 | .. autofunction:: bottleneck.partition 117 | 118 | ------------ 119 | 120 | .. autofunction:: bottleneck.argpartition 121 | 122 | ------------ 123 | 124 | .. autofunction:: bottleneck.push 125 | 126 | 127 | Moving window functions 128 | ----------------------- 129 | 130 | Functions that operate along a (1d) moving window. 131 | 132 | ------------ 133 | 134 | .. autofunction:: bottleneck.move_sum 135 | 136 | ------------ 137 | 138 | .. autofunction:: bottleneck.move_mean 139 | 140 | ------------ 141 | 142 | .. autofunction:: bottleneck.move_std 143 | 144 | ------------ 145 | 146 | .. autofunction:: bottleneck.move_var 147 | 148 | ------------ 149 | 150 | .. autofunction:: bottleneck.move_min 151 | 152 | ------------ 153 | 154 | .. autofunction:: bottleneck.move_max 155 | 156 | ------------ 157 | 158 | .. autofunction:: bottleneck.move_argmin 159 | 160 | ------------ 161 | 162 | .. autofunction:: bottleneck.move_argmax 163 | 164 | ------------ 165 | 166 | .. autofunction:: bottleneck.move_median 167 | 168 | ------------ 169 | 170 | .. autofunction:: bottleneck.move_rank 171 | 172 | -------------------------------------------------------------------------------- /doc/source/release.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../RELEASE.rst 2 | -------------------------------------------------------------------------------- /doc/source/releases/RELEASE.rst: -------------------------------------------------------------------------------- 1 | 2 | ============= 3 | Release Notes 4 | ============= 5 | 6 | :titlesonly: 7 | 8 | These are the major changes made in each release. For details of the changes 9 | see the commit log at https://github.com/pydata/bottleneck 10 | 11 | .. contents:: 12 | :depth: 1 13 | :local: 14 | 15 | .. include:: v1.4.0.rst 16 | 17 | .. include:: v1.3.2.rst 18 | 19 | .. include:: v1.3.1.rst 20 | 21 | .. include:: v1.3.0.rst 22 | 23 | .. include:: v1.2.1.rst 24 | 25 | .. include:: v1.2.0.rst 26 | 27 | .. include:: v1.1.0.rst 28 | 29 | .. include:: v1.0.0.rst 30 | 31 | .. include:: v0.8.0.rst 32 | 33 | .. include:: v0.7.0.rst 34 | 35 | .. include:: v0.6.0.rst 36 | 37 | .. include:: v0.5.0.rst 38 | 39 | .. include:: v0.4.3.rst 40 | 41 | .. include:: v0.4.2.rst 42 | 43 | .. include:: v0.4.1.rst 44 | 45 | .. include:: v0.4.0.rst 46 | 47 | .. include:: v0.3.0.rst 48 | 49 | .. include:: v0.2.0.rst 50 | 51 | .. include:: v0.1.0.rst 52 | -------------------------------------------------------------------------------- /doc/source/releases/v0.1.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.1.0 2 | ================ 3 | 4 | *Release date: 2010-12-10* 5 | 6 | Initial release. The three categories of Bottleneck functions: 7 | 8 | - Faster replacement for NumPy and SciPy functions 9 | - Moving window functions 10 | - Group functions that bin calculations by like-labeled elements 11 | -------------------------------------------------------------------------------- /doc/source/releases/v0.2.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.2.0 2 | ================ 3 | 4 | *Release date: 2010-12-27* 5 | 6 | The second release of Bottleneck is faster, contains more functions, and 7 | supports more dtypes. 8 | 9 | Faster 10 | ~~~~~~ 11 | 12 | - All functions faster (less overhead) when output is not a scalar 13 | - Faster nanmean() for 2d, 3d arrays containing NaNs when axis is not None 14 | 15 | New functions 16 | ~~~~~~~~~~~~~ 17 | 18 | - nanargmin() 19 | - nanargmax() 20 | - nanmedian() 21 | 22 | Enhancements 23 | ~~~~~~~~~~~~ 24 | 25 | - Added support for float32 26 | - Fallback to slower, non-Cython functions for unaccelerated ndim/dtype 27 | - Scipy is no longer a dependency 28 | - Added support for older versions of NumPy (1.4.1) 29 | - All functions are now templated for dtype and axis 30 | - Added a sandbox for prototyping of new Bottleneck functions 31 | - Rewrote benchmarking code 32 | 33 | Contributors 34 | ~~~~~~~~~~~~ 35 | 36 | .. contributors:: v0.2.0..v0.1.0 37 | -------------------------------------------------------------------------------- /doc/source/releases/v0.3.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.3.0 2 | ================ 3 | 4 | *Release date: 2010-01-19* 5 | 6 | The third release of Bottleneck is twice as fast for small input arrays and 7 | contains 10 new functions. 8 | 9 | Faster 10 | ~~~~~~ 11 | 12 | - All functions are faster (less overhead in selector functions) 13 | 14 | New functions 15 | ~~~~~~~~~~~~~ 16 | 17 | - nansum() 18 | - move_sum() 19 | - move_nansum() 20 | - move_mean() 21 | - move_std() 22 | - move_nanstd() 23 | - move_min() 24 | - move_nanmin() 25 | - move_max() 26 | - move_nanmax() 27 | 28 | Enhancements 29 | ~~~~~~~~~~~~ 30 | 31 | - You can now specify the dtype and axis to use in the benchmark timings 32 | - Improved documentation and more unit tests 33 | 34 | Breaks from 0.2.0 35 | ~~~~~~~~~~~~~~~~~ 36 | 37 | - Moving window functions now default to axis=-1 instead of axis=0 38 | - Low-level moving window selector functions no longer take window as input 39 | 40 | Bug fix 41 | ~~~~~~~ 42 | 43 | - int input array resulted in call to slow, non-cython version of move_nanmean 44 | 45 | Contributors 46 | ~~~~~~~~~~~~ 47 | 48 | .. contributors:: v0.2.0..v0.3.0 49 | -------------------------------------------------------------------------------- /doc/source/releases/v0.4.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.4.0 2 | ================ 3 | 4 | *Release date: 2011-03-08* 5 | 6 | The fourth release of Bottleneck contains new functions and bug fixes. 7 | Separate source code distributions are now made for 32 bit and 64 bit 8 | operating systems. 9 | 10 | New functions 11 | ~~~~~~~~~~~~~ 12 | 13 | - rankdata() 14 | - nanrankdata() 15 | 16 | Enhancements 17 | ~~~~~~~~~~~~ 18 | 19 | - Optionally specify the shapes of the arrays used in benchmark 20 | - Can specify which input arrays to fill with one-third NaNs in benchmark 21 | 22 | Breaks from 0.3.0 23 | ~~~~~~~~~~~~~~~~~ 24 | 25 | - Removed group_nanmean() function 26 | - Bump dependency from NumPy 1.4.1 to NumPy 1.5.1 27 | - C files are now generated with Cython 0.14.1 instead of 0.13 28 | 29 | Bug fixes 30 | ~~~~~~~~~ 31 | 32 | - Some functions gave wrong output dtype for some input dtypes on 32 33 | bit OS (:issue:`6`) 34 | - Some functions choked on size zero input arrays (:issue:`7`) 35 | - Segmentation fault with Cython 0.14.1 (but not 0.13) (:issue:`8`) 36 | 37 | Contributors 38 | ~~~~~~~~~~~~ 39 | 40 | .. contributors:: v0.3.0..v0.4.0 41 | -------------------------------------------------------------------------------- /doc/source/releases/v0.4.1.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.4.1 2 | ================ 3 | 4 | *Release date: 2011-03-08* 5 | 6 | This is a bug fix release. 7 | 8 | The low-level functions nanstd_3d_int32_axis1 and nanstd_3d_int64_axis1, 9 | called by bottleneck.nanstd(), wrote beyond the memory owned by the output 10 | array if arr.shape[1] == 0 and arr.shape[0] > arr.shape[2], where arr is 11 | the input array. 12 | 13 | Thanks to Christoph Gohlke for finding an example to demonstrate the bug. 14 | 15 | Contributors 16 | ~~~~~~~~~~~~ 17 | 18 | .. contributors:: v0.4.0..v0.4.1 19 | -------------------------------------------------------------------------------- /doc/source/releases/v0.4.2.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.4.2 2 | ================ 3 | 4 | *Release date: 2011-03-08* 5 | 6 | This is a bug fix release. 7 | 8 | Same bug fixed in Bottleneck 0.4.1 for nanstd() was fixed for nanvar() in 9 | this release. Thanks again to Christoph Gohlke for finding the bug. 10 | 11 | Contributors 12 | ~~~~~~~~~~~~ 13 | 14 | .. contributors:: v0.4.1..v0.4.2 15 | -------------------------------------------------------------------------------- /doc/source/releases/v0.4.3.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.4.3 2 | ================ 3 | 4 | *Release date: 2011-03-17* 5 | 6 | This is a bug fix release. 7 | 8 | Bug fixes 9 | ~~~~~~~~~ 10 | 11 | - median and nanmedian modified (partial sort) input array (:issue:`11`) 12 | - nanmedian wrong when odd number of elements with all but last a NaN 13 | (:issue:`12`) 14 | 15 | Enhancement 16 | ~~~~~~~~~~~ 17 | 18 | - Lazy import of SciPy (rarely used) speeds Bottleneck import 3x 19 | 20 | Contributors 21 | ~~~~~~~~~~~~ 22 | 23 | .. contributors:: v0.4.2..v0.4.3 24 | -------------------------------------------------------------------------------- /doc/source/releases/v0.5.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.5.0 2 | ================ 3 | 4 | *Release date: 2011-06-13* 5 | 6 | The fifth release of bottleneck adds four new functions, comes in a single 7 | source distribution instead of separate 32 and 64 bit versions, and contains 8 | bug fixes. 9 | 10 | J. David Lee wrote the C-code implementation of the double heap moving 11 | window median. 12 | 13 | New functions 14 | ~~~~~~~~~~~~~ 15 | 16 | - move_median(), moving window median 17 | - partsort(), partial sort 18 | - argpartsort() 19 | - ss(), sum of squares, faster version of scipy.stats.ss 20 | 21 | Changes 22 | ~~~~~~~ 23 | 24 | - Single source distribution instead of separate 32 and 64 bit versions 25 | - nanmax and nanmin now follow Numpy 1.6 (not 1.5.1) when input is all NaN 26 | 27 | Bug fixes 28 | ~~~~~~~~~ 29 | 30 | - Support python 2.5 by importing `with` statement (:issue:`14`) 31 | - nanmedian wrong for particular ordering of NaN and non-NaN elements 32 | (:issue:`22`) 33 | - argpartsort, nanargmin, nanargmax returned wrong dtype on 64-bit Windows 34 | (:issue:`26`) 35 | - rankdata and nanrankdata crashed on 64-bit Windows (:issue:`29`) 36 | 37 | Contributors 38 | ~~~~~~~~~~~~ 39 | 40 | .. contributors:: v0.4.3..v0.5.0 41 | -------------------------------------------------------------------------------- /doc/source/releases/v0.6.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.6.0 2 | ================ 3 | 4 | *Release date: 2012-06-04* 5 | 6 | Thanks to Dougal Sutherland, Bottleneck now runs on Python 3.2. 7 | 8 | New functions 9 | ~~~~~~~~~~~~~ 10 | 11 | - replace(arr, old, new), e.g, replace(arr, np.nan, 0) 12 | - nn(arr, arr0, axis) nearest neighbor and its index of 1d arr0 in 2d arr 13 | - anynan(arr, axis) faster alternative to np.isnan(arr).any(axis) 14 | - allnan(arr, axis) faster alternative to np.isnan(arr).all(axis) 15 | 16 | Enhancements 17 | ~~~~~~~~~~~~ 18 | 19 | - Python 3.2 support (may work on earlier versions of Python 3) 20 | - C files are now generated with Cython 0.16 instead of 0.14.1 21 | - Upgrade numpydoc from 0.3.1 to 0.4 to support Sphinx 1.0.1 22 | 23 | Breaks from 0.5.0 24 | ~~~~~~~~~~~~~~~~~ 25 | 26 | - Support for Python 2.5 dropped 27 | - Default axis for benchmark suite is now axis=1 (was 0) 28 | 29 | Bug fixes 30 | ~~~~~~~~~ 31 | 32 | - Confusing error message in partsort and argpartsort (:issue:`31`) 33 | - Update path in MANIFEST.in (:issue:`32`) 34 | - Wrong output for very large (2**31) input arrays (:issue:`35`) 35 | 36 | Contributors 37 | ~~~~~~~~~~~~ 38 | 39 | .. contributors:: v0.5.0..v0.6.0 40 | -------------------------------------------------------------------------------- /doc/source/releases/v0.7.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.7.0 2 | ================ 3 | 4 | *Release date: 2013-09-10* 5 | 6 | Enhancements 7 | ~~~~~~~~~~~~ 8 | 9 | - bn.rankdata() is twice as fast (with input a = np.random.rand(1000000)) 10 | - C files now included in github repo; cython not needed to try latest 11 | - C files are now generated with Cython 0.19.1 instead of 0.16 12 | - Test bottleneck across multiple python/numpy versions using tox 13 | - Source tarball size cut in half 14 | 15 | Bug fixes 16 | ~~~~~~~~~ 17 | 18 | - move_std, move_nanstd return inappropriate NaNs (sqrt of negative #) 19 | (:issue:`50`) 20 | - `make test` fails on some computers (:issue:`52`) 21 | - scipy optional yet some unit tests depend on scipy (:issue:`57`) 22 | - now works on Mac OS X 10.8 using clang compiler (:issue:`49`, :issue:`55`) 23 | - nanstd([1.0], ddof=1) and nanvar([1.0], ddof=1) crash (:issue:`60`) 24 | 25 | Contributors 26 | ~~~~~~~~~~~~ 27 | 28 | .. contributors:: v0.6.0..v0.7.0 29 | -------------------------------------------------------------------------------- /doc/source/releases/v0.8.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 0.8.0 2 | ================ 3 | 4 | *Release date: 2014-01-21* 5 | 6 | This version of Bottleneck requires NumPy 1.8. 7 | 8 | Breaks from 0.7.0 9 | ~~~~~~~~~~~~~~~~~ 10 | 11 | - This version of Bottleneck requires NumPy 1.8 12 | - nanargmin and nanargmax behave like the corresponding functions in NumPy 1.8 13 | 14 | Bug fixes 15 | ~~~~~~~~~ 16 | 17 | - nanargmax/nanargmin wrong for redundant max/min values in 1d int arrays 18 | 19 | Contributors 20 | ~~~~~~~~~~~~ 21 | 22 | .. contributors:: v0.7.0..v0.8.0 23 | -------------------------------------------------------------------------------- /doc/source/releases/v1.0.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 1.0.0 2 | ================ 3 | 4 | *Release date: 2015-02-06* 5 | 6 | This release is a complete rewrite of Bottleneck. 7 | 8 | Faster 9 | ~~~~~~ 10 | 11 | - "python setup.py build" is 18.7 times faster 12 | - Function-call overhead cut in half---a big speed up for small input arrays 13 | - Arbitrary ndim input arrays accelerated; previously only 1d, 2d, and 3d 14 | - bn.nanrankdata is twice as fast for float input arrays 15 | - bn.move_max, bn.move_min are faster for int input arrays 16 | - No speed penalty for reducing along all axes when input is Fortran ordered 17 | 18 | Smaller 19 | ~~~~~~~ 20 | 21 | - Compiled binaries 14.1 times smaller 22 | - Source tarball 4.7 times smaller 23 | - 9.8 times less C code 24 | - 4.3 times less Cython code 25 | - 3.7 times less Python code 26 | 27 | Beware 28 | ~~~~~~ 29 | 30 | - Requires numpy 1.9.1 31 | - Single API, e.g.: bn.nansum instead of bn.nansum and nansum_2d_float64_axis0 32 | - On 64-bit systems bn.nansum(int32) returns int32 instead of int64 33 | - bn.nansum now returns 0 for all NaN slices (as does numpy 1.9.1) 34 | - Reducing over all axes returns, e.g., 6.0; previously np.float64(6.0) 35 | - bn.ss() now has default axis=None instead of axis=0 36 | - bn.nn() is no longer in bottleneck 37 | 38 | min_count 39 | ~~~~~~~~~ 40 | 41 | - Previous releases had moving window function pairs: move_sum, move_nansum 42 | - This release only has half of the pairs: move_sum 43 | - Instead a new input parameter, min_count, has been added 44 | - min_count=None same as old move_sum; min_count=1 same as old move_nansum 45 | - If # non-NaN values in window < min_count, then NaN assigned to the window 46 | - Exception: move_median does not take min_count as input 47 | 48 | Bug Fixes 49 | ~~~~~~~~~ 50 | 51 | - Can now install bottleneck with pip even if numpy is not already installed 52 | - bn.move_max, bn.move_min now return float32 for float32 input 53 | 54 | Contributors 55 | ~~~~~~~~~~~~ 56 | 57 | .. contributors:: v0.8.0..v1.0.0 -------------------------------------------------------------------------------- /doc/source/releases/v1.1.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 1.1.0 2 | ================ 3 | 4 | *Release date: 2016-06-22* 5 | 6 | This release makes Bottleneck more robust, releases GIL, adds new functions. 7 | 8 | More Robust 9 | ~~~~~~~~~~~ 10 | 11 | - :func:`bn.move_median` can now handle NaNs and `min_count` parameter 12 | - :func:`bn.move_std` is slower but numerically more stable 13 | - Bottleneck no longer crashes on byte-swapped input arrays 14 | 15 | Faster 16 | ~~~~~~ 17 | 18 | - All Bottleneck functions release the GIL 19 | - median is faster if the input array contains NaN 20 | - move_median is faster for input arrays that contain lots of NaNs 21 | - No speed penalty for median, nanmedian, nanargmin, nanargmax for Fortran 22 | ordered input arrays when axis is None 23 | - Function call overhead cut in half for reduction along all axes (axis=None) 24 | if the input array satisfies at least one of the following properties: 1d, 25 | C contiguous, F contiguous 26 | - Reduction along all axes (axis=None) is more than twice as fast for long, 27 | narrow input arrays such as a (1000000, 2) C contiguous array and a 28 | (2, 1000000) F contiguous array 29 | 30 | New Functions 31 | ~~~~~~~~~~~~~ 32 | 33 | - move_var 34 | - move_argmin 35 | - move_argmax 36 | - move_rank 37 | - push 38 | 39 | Beware 40 | ~~~~~~ 41 | 42 | - :func:`bn.median` now returns NaN for a slice that contains one or more NaNs 43 | - Instead of using the distutils default, the '-O2' C compiler flag is forced 44 | - :func:`bn.move_std` output changed when mean is large compared to standard 45 | deviation 46 | - Fixed: Non-accelerated moving window functions used min_count incorrectly 47 | - :func:`bn.move_median` is a bit slower for float input arrays that do not 48 | contain NaN 49 | 50 | Thanks 51 | ~~~~~~ 52 | 53 | Alphabeticaly by last name 54 | 55 | - Alessandro Amici worked on setup.py 56 | - Pietro Battiston modernized bottleneck installation 57 | - Moritz E. Beber set up continuous integration with Travis CI 58 | - Jaime Frio improved the numerical stability of move_std 59 | - Christoph Gohlke revived Windows compatibility 60 | - Jennifer Olsen added NaN support to move_median 61 | 62 | Contributors 63 | ~~~~~~~~~~~~ 64 | 65 | .. contributors:: v1.0.0..v1.1.0 -------------------------------------------------------------------------------- /doc/source/releases/v1.2.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 1.2.0 2 | ================ 3 | 4 | *Release date: 2016-10-20* 5 | 6 | This release is a complete rewrite of Bottleneck. 7 | 8 | Port to C 9 | ~~~~~~~~~ 10 | 11 | - Bottleneck is now written in C 12 | - Cython is no longer a dependency 13 | - Source tarball size reduced by 80% 14 | - Build time reduced by 66% 15 | - Install size reduced by 45% 16 | 17 | Redesign 18 | ~~~~~~~~ 19 | 20 | - Besides porting to C, much of bottleneck has been redesigned to be 21 | simpler and faster. For example, bottleneck now uses its own N-dimensional 22 | array iterators, reducing function call overhead. 23 | 24 | New features 25 | ~~~~~~~~~~~~ 26 | 27 | - The new function bench_detailed runs a detailed performance benchmark on 28 | a single bottleneck function. 29 | - Bottleneck can be installed on systems that do not yet have NumPy 30 | installed. Previously that only worked on some systems. 31 | 32 | Beware 33 | ~~~~~~ 34 | 35 | - Functions partsort and argpartsort have been renamed to partition and 36 | argpartition to match NumPy. Additionally the meaning of the input 37 | arguments have changed: :func:`bn.partsort(a, n)` is now equivalent to 38 | :func:`bn.partition(a, kth=n-1)`. Similarly for bn.argpartition. 39 | - The keyword for array input has been changed from `arr` to `a` in all 40 | functions. It now matches NumPy. 41 | 42 | Thanks 43 | ~~~~~~ 44 | 45 | - Moritz E. Beber: continuous integration with AppVeyor 46 | - Christoph Gohlke: Windows compatibility 47 | - Jennifer Olsen: comments and suggestions 48 | - A special thanks to the Cython developers. The quickest way to appreciate 49 | their work is to remove Cython from your project. It is not easy. 50 | 51 | Contributors 52 | ~~~~~~~~~~~~ 53 | 54 | .. contributors:: v1.1.0..v1.2.0 55 | -------------------------------------------------------------------------------- /doc/source/releases/v1.2.1.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 1.2.1 2 | ================ 3 | 4 | *Release date: 2017-05-15* 5 | 6 | This release adds support for NumPy's relaxed strides checking and 7 | fixes a few bugs. 8 | 9 | Bug Fixes 10 | ~~~~~~~~~ 11 | 12 | - Installing bottleneck when two versions of NumPy are present (:issue:`156`) 13 | - Compiling on Ubuntu 14.04 inside a Windows 7 WMware (:issue:`157`) 14 | - Occasional segmentation fault in :func:`bn.nanargmin`, :func:`nanargmax`, 15 | :func:`median`, and :func:`nanmedian` when all of the following conditions 16 | are met: axis is None, input array is 2d or greater, and input array is not C 17 | contiguous. (:issue:`159`) 18 | - Reducing np.array([2**31], dtype=np.int64) overflows on Windows 19 | (:issue:`163`) 20 | 21 | Contributors 22 | ~~~~~~~~~~~~ 23 | 24 | .. contributors:: v1.2.0..v1.2.1 25 | -------------------------------------------------------------------------------- /doc/source/releases/v1.3.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 1.3.0 2 | ================ 3 | 4 | *Release date: 2019-11-12* 5 | 6 | Project Updates 7 | ~~~~~~~~~~~~~~~ 8 | - Bottleneck has a new maintainer, Christopher Whelan (``@qwhelan`` on GitHub). 9 | - Documentation now hosted at https://bottleneck.readthedocs.io 10 | - 1.3.x will be the last release to support Python 2.7 11 | - Bottleneck now supports and is tested against Python 3.7 and 3.8. 12 | (:issue:`211`, :issue:`268`) 13 | - The ``LICENSE`` file has been restructured to only include the license for 14 | the Bottleneck project to aid license audit tools. There has been no 15 | change to the licensing of Bottleneck. 16 | 17 | - Licenses for other projects incorporated by Bottleneck are now reproduced 18 | in full in separate files in the ``LICENSES/`` directory 19 | (eg, ``LICENSES/NUMPY_LICENSE``) 20 | - All licenses have been updated. Notably, setuptools is now MIT licensed 21 | and no longer under the ambiguous dual PSF/Zope license. 22 | - Bottleneck now uses :pep:`518` for specifying build dependencies, with per 23 | Python version specifications (:issue:`247`) 24 | 25 | 26 | Enhancements 27 | ~~~~~~~~~~~~ 28 | - Remove ``numpydoc`` package from Bottleneck source distribution 29 | - :func:`bottleneck.slow.reduce.nansum` and 30 | :func:`bottleneck.slow.reduce.ss` now longer coerce output to have 31 | the same dtype as input 32 | - Test (tox, travis, appveyor) against latest ``numpy`` (in conda) 33 | - Performance benchmarking also available via ``asv`` 34 | - ``versioneer`` now used for versioning (:issue:`213`) 35 | - Test suite now uses ``pytest`` as ``nose`` is deprecated (:issue:`222`) 36 | - ``python setup.py build_ext --inplace`` is now incremental (:issue:`224`) 37 | - ``python setup.py clean`` now cleans all artifacts (:issue:`226`) 38 | - Compiler feature support now identified by testing rather than 39 | hardcoding (:issue:`227`) 40 | - The ``BN_OPT_3`` macro allows selective use of ``-O3`` at the function 41 | level (:issue:`223`) 42 | - Contributors are now automatically cited in the release notes (:issue:`244`) 43 | 44 | Performance 45 | ~~~~~~~~~~~ 46 | - Speed up :func:`bottleneck.reduce.anynan` and 47 | :func:`bottleneck.reduce.allnan` by 2x via ``BN_OPT_3`` (:issue:`223`) 48 | - All functions covered by ``asv`` benchmarks 49 | - :func:`bottleneck.nonreduce.replace` speedup of 4x via more explicit 50 | typing (:issue:`239`) 51 | - :func:`bottleneck.reduce.median` up to 2x faster for Fortran-ordered 52 | arrays (:issue:`248`) 53 | 54 | 55 | Bug Fixes 56 | ~~~~~~~~~ 57 | 58 | - Documentation fails to build on Python 3 (:issue:`170`) 59 | - :func:`bottleneck.benchmark.bench` crashes on python 3.6.3, 60 | numpy 1.13.3 (:issue:`175`) 61 | - :func:`bottleneck.nonreduce_axis.push` raises when :code:`n=None` is 62 | explicitly passed (:issue:`178`) 63 | - :func:`bottleneck.reduce.nansum` wrong output when 64 | :code:`a = np.ones((2, 2))[..., np.newaxis]` same issue of other reduce 65 | functions (:issue:`183`) 66 | - Silenced FutureWarning from NumPy in the slow version of move 67 | functions (:issue:`194`) 68 | - Installing bottleneck onto a system that does not already have Numpy 69 | (:issue:`195`) 70 | - Memory leaked when input was not a NumPy array (:issue:`201`) 71 | - Tautological comparison in :func:`bottleneck.move.move_rank` removed 72 | (:issue:`207`, :issue:`212`) 73 | 74 | Cleanup 75 | ~~~~~~~ 76 | 77 | - The ``ez_setup.py`` module is no longer packaged (:issue:`211`) 78 | - Building documentation is now self-contained in ``make doc`` (:issue:`214`) 79 | - Codebase now ``flake8`` compliant and run on every commit 80 | - Codebase now uses ``black`` for autoformatting (:issue:`253`) 81 | 82 | Contributors 83 | ~~~~~~~~~~~~ 84 | 85 | .. contributors:: v1.2.1..v1.3.0 86 | -------------------------------------------------------------------------------- /doc/source/releases/v1.3.1.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 1.3.1 2 | ================ 3 | 4 | *Release date: 2019-11-18* 5 | 6 | Bug Fixes 7 | ~~~~~~~~~ 8 | - Fix memory leak in :func:`bottleneck.nanmedian` with the default 9 | argument of ``axis=None``. Thanks to ``@jsmodic`` for reporting! 10 | (:issue:`276`, :issue:`278`) 11 | - Add regression test for memory leak case (:issue:`279`) 12 | 13 | Contributors 14 | ~~~~~~~~~~~~ 15 | 16 | .. contributors:: v1.3.0..v1.3.1 17 | -------------------------------------------------------------------------------- /doc/source/releases/v1.3.2.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 1.3.2 2 | ================ 3 | 4 | *Release date: 2020-02-20* 5 | 6 | Bug Fixes 7 | ~~~~~~~~~ 8 | - Explicitly declare numpy version dependency in ``pyproject.toml`` 9 | for Python 3.8, fixing certain cases where ``pip install`` would 10 | fail. Thanks to ``@goggle``, ``@astrofrog``, and ``@0xb0b`` for 11 | reporting. (:issue:`277`) 12 | 13 | Contributors 14 | ~~~~~~~~~~~~ 15 | 16 | .. contributors:: v1.3.1..v1.3.2 17 | -------------------------------------------------------------------------------- /doc/source/releases/v1.4.0.rst: -------------------------------------------------------------------------------- 1 | Bottleneck 1.4.0 2 | ================ 3 | 4 | *Release date: in development* 5 | 6 | Bug Fixes 7 | ~~~~~~~~~ 8 | 9 | 10 | Contributors 11 | ~~~~~~~~~~~~ 12 | 13 | .. contributors:: v1.3.x..HEAD 14 | -------------------------------------------------------------------------------- /doc/sphinxext/announce.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding:utf-8 -*- 3 | """ 4 | This file was copied from pandas.doc.sphinxext.announce 5 | 6 | Script to generate contributor and pull request lists 7 | 8 | This script generates contributor and pull request lists for release 9 | announcements using Github v3 protocol. Use requires an authentication token in 10 | order to have sufficient bandwidth, you can get one following the directions at 11 | `_ 12 | Don't add any scope, as the default is read access to public information. The 13 | token may be stored in an environment variable as you only get one chance to 14 | see it. 15 | 16 | Usage:: 17 | 18 | $ ./scripts/announce.py 19 | 20 | The output is utf8 rst. 21 | 22 | Dependencies 23 | ------------ 24 | 25 | - gitpython 26 | - pygithub 27 | 28 | Some code was copied from scipy `tools/gh_lists.py` and `tools/authors.py`. 29 | 30 | Examples 31 | -------- 32 | 33 | From the bash command line with $GITHUB token. 34 | 35 | $ ./scripts/announce.py $GITHUB v1.11.0..v1.11.1 > announce.rst 36 | 37 | """ 38 | import codecs 39 | import os 40 | import re 41 | import textwrap 42 | 43 | from git import Repo 44 | 45 | UTF8Writer = codecs.getwriter("utf8") 46 | this_repo = Repo(os.path.join(os.path.dirname(__file__), "..", "..")) 47 | 48 | author_msg = """\ 49 | A total of %d people contributed patches to this release. People with a 50 | "+" by their names contributed a patch for the first time. 51 | """ 52 | 53 | pull_request_msg = """\ 54 | A total of %d pull requests were merged for this release. 55 | """ 56 | 57 | 58 | def get_authors(revision_range): 59 | pat = "^.*\\t(.*)$" 60 | lst_release, cur_release = [r.strip() for r in revision_range.split("..")] 61 | 62 | # authors, in current release and previous to current release. 63 | cur = set(re.findall(pat, this_repo.git.shortlog("-s", revision_range), re.M)) 64 | pre = set(re.findall(pat, this_repo.git.shortlog("-s", lst_release), re.M)) 65 | 66 | # Homu is the author of auto merges, clean him out. 67 | cur.discard("Homu") 68 | pre.discard("Homu") 69 | 70 | # Append '+' to new authors. 71 | authors = [s + " +" for s in cur - pre] + [s for s in cur & pre] 72 | authors.sort() 73 | return authors 74 | 75 | 76 | def get_pull_requests(repo, revision_range): 77 | prnums = [] 78 | 79 | # From regular merges 80 | merges = this_repo.git.log("--oneline", "--merges", revision_range) 81 | issues = re.findall("Merge pull request \\#(\\d*)", merges) 82 | prnums.extend(int(s) for s in issues) 83 | 84 | # From Homu merges (Auto merges) 85 | issues = re.findall("Auto merge of \\#(\\d*)", merges) 86 | prnums.extend(int(s) for s in issues) 87 | 88 | # From fast forward squash-merges 89 | commits = this_repo.git.log( 90 | "--oneline", "--no-merges", "--first-parent", revision_range 91 | ) 92 | issues = re.findall("^.*\\(\\#(\\d+)\\)$", commits, re.M) 93 | prnums.extend(int(s) for s in issues) 94 | 95 | # get PR data from github repo 96 | prnums.sort() 97 | prs = [repo.get_pull(n) for n in prnums] 98 | return prs 99 | 100 | 101 | def build_components(revision_range, heading="Contributors"): 102 | lst_release, cur_release = [r.strip() for r in revision_range.split("..")] 103 | authors = get_authors(revision_range) 104 | 105 | return { 106 | "heading": heading, 107 | "author_message": author_msg % len(authors), 108 | "authors": authors, 109 | } 110 | 111 | 112 | def build_string(revision_range, heading="Contributors"): 113 | components = build_components(revision_range, heading=heading) 114 | components["uline"] = "=" * len(components["heading"]) 115 | components["authors"] = "* " + "\n* ".join(components["authors"]) 116 | 117 | tpl = textwrap.dedent( 118 | """\ 119 | {heading} 120 | {uline} 121 | 122 | {author_message} 123 | {authors}""" 124 | ).format(**components) 125 | return tpl 126 | 127 | 128 | def main(revision_range): 129 | # document authors 130 | text = build_string(revision_range) 131 | print(text) 132 | 133 | 134 | if __name__ == "__main__": 135 | from argparse import ArgumentParser 136 | 137 | parser = ArgumentParser(description="Generate author lists for release") 138 | parser.add_argument("revision_range", help="..") 139 | args = parser.parse_args() 140 | main(args.revision_range) 141 | -------------------------------------------------------------------------------- /doc/sphinxext/contributors.py: -------------------------------------------------------------------------------- 1 | """This file is copied from pandas.doc.sphinxext.contributors 2 | 3 | Sphinx extension for listing code contributors to a release. 4 | 5 | Usage:: 6 | 7 | .. contributors:: v0.23.0..v0.23.1 8 | 9 | This will be replaced with a message indicating the number of 10 | code contributors and commits, and then list each contributor 11 | individually. 12 | """ 13 | from announce import build_components 14 | from docutils import nodes 15 | from docutils.parsers.rst import Directive 16 | import git 17 | 18 | 19 | class ContributorsDirective(Directive): 20 | required_arguments = 1 21 | name = "contributors" 22 | 23 | def run(self): 24 | range_ = self.arguments[0] 25 | if range_.endswith("x..HEAD"): 26 | return [nodes.paragraph(), nodes.bullet_list()] 27 | try: 28 | components = build_components(range_) 29 | except git.GitCommandError as exc: 30 | return [ 31 | self.state.document.reporter.warning( 32 | "Cannot find contributors for range '{}': {}".format(range_, exc), 33 | line=self.lineno, 34 | ) 35 | ] 36 | else: 37 | message = nodes.paragraph() 38 | message += nodes.Text(components["author_message"]) 39 | 40 | listnode = nodes.bullet_list() 41 | 42 | for author in components["authors"]: 43 | para = nodes.paragraph() 44 | para += nodes.Text(author) 45 | listnode += nodes.list_item("", para) 46 | 47 | return [message, listnode] 48 | 49 | 50 | def setup(app): 51 | app.add_directive("contributors", ContributorsDirective) 52 | 53 | return {"version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True} 54 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools", 4 | "versioneer", 5 | # Comments on numpy build requirement range: 6 | # 7 | # 1. >=2.0.x is the numpy requirement for wheel builds for distribution 8 | # on PyPI - building against 2.x yields wheels that are also 9 | # ABI-compatible with numpy 1.x at runtime. 10 | # 2. Note that building against numpy 1.x works fine too - users and 11 | # redistributors can do this by installing the numpy version they like 12 | # and disabling build isolation. 13 | # 3. The <2.(N+3) upper bound is for matching the numpy deprecation policy, 14 | # it should not be loosened more than that. 15 | "numpy>=2,<2.5" 16 | ] 17 | build-backend = "setuptools.build_meta" 18 | 19 | 20 | [tool.pytest.ini_options] 21 | addopts = "-l" 22 | filterwarnings = ["error"] 23 | -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | sphinx: 4 | configuration: doc/source/conf.py 5 | 6 | formats: all 7 | 8 | python: 9 | version: 3.7 10 | install: 11 | - method: pip 12 | path: . 13 | extra_requirements: 14 | - doc 15 | 16 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [versioneer] 2 | VCS = git 3 | style = pep440-pre 4 | versionfile_source = bottleneck/_version.py 5 | versionfile_build = bottleneck/_version.py 6 | tag_prefix = v 7 | parentdir_prefix = bottleneck- 8 | 9 | [flake8] 10 | max-line-length = 88 11 | ignore = \ 12 | E203 # Defer to how black formats 13 | C408 # Unnecessary dict call - rewrite as a literal. 14 | W503 # Line break before binary operator - conflicts with W504 15 | per-file-ignores= \ 16 | bottleneck/__init__.py:F401 17 | exclude = \ 18 | doc/* 19 | versioneer.py 20 | bottleneck/_version.py 21 | env 22 | .eggs 23 | .tox 24 | build 25 | 26 | [tool:pytest] 27 | norecursedirs = 28 | .* 29 | build 30 | dist 31 | *.egg 32 | venv 33 | env 34 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import shutil 5 | import sys 6 | from distutils.command.config import config as _config 7 | 8 | from setuptools import Command, find_packages, setup 9 | from setuptools.command.build_ext import build_ext as _build_ext 10 | from setuptools.extension import Extension 11 | 12 | import versioneer 13 | 14 | 15 | class config(_config): 16 | def run(self): 17 | from bn_config import create_config_h 18 | 19 | create_config_h(self) 20 | 21 | 22 | class clean(Command): 23 | user_options = [("all", "a", "")] 24 | 25 | def initialize_options(self): 26 | self.all = True 27 | self.delete_dirs = [] 28 | self.delete_files = [] 29 | 30 | for root, dirs, files in os.walk("bottleneck"): 31 | for d in dirs: 32 | if d == "__pycache__": 33 | self.delete_dirs.append(os.path.join(root, d)) 34 | 35 | if "__pycache__" in root: 36 | continue 37 | 38 | for f in files: 39 | if f.endswith(".pyc") or f.endswith(".so"): 40 | self.delete_files.append(os.path.join(root, f)) 41 | 42 | if f.endswith(".c") and "template" in f: 43 | generated_file = os.path.join(root, f.replace("_template", "")) 44 | if os.path.exists(generated_file): 45 | self.delete_files.append(generated_file) 46 | 47 | config_h = "bottleneck/src/bn_config.h" 48 | if os.path.exists(config_h): 49 | self.delete_files.append(config_h) 50 | 51 | if os.path.exists("build"): 52 | self.delete_dirs.append("build") 53 | 54 | def finalize_options(self): 55 | pass 56 | 57 | def run(self): 58 | for delete_dir in self.delete_dirs: 59 | shutil.rmtree(delete_dir) 60 | for delete_file in self.delete_files: 61 | os.unlink(delete_file) 62 | 63 | 64 | # workaround for installing bottleneck when numpy is not present 65 | class build_ext(_build_ext): 66 | # taken from: stackoverflow.com/questions/19919905/ 67 | # how-to-bootstrap-numpy-installation-in-setup-py#21621689 68 | def finalize_options(self): 69 | _build_ext.finalize_options(self) 70 | # prevent numpy from thinking it is still in its setup process 71 | if sys.version_info < (3,): 72 | import __builtin__ as builtins 73 | else: 74 | import builtins 75 | builtins.__NUMPY_SETUP__ = False 76 | import numpy 77 | 78 | # place numpy includes first, see gh #156 79 | self.include_dirs.insert(0, numpy.get_include()) 80 | self.include_dirs.append("bottleneck/src") 81 | 82 | def build_extensions(self): 83 | from bn_template import make_c_files 84 | 85 | self.run_command("config") 86 | dirpath = "bottleneck/src" 87 | modules = ["reduce", "move", "nonreduce", "nonreduce_axis"] 88 | make_c_files(dirpath, modules) 89 | 90 | _build_ext.build_extensions(self) 91 | 92 | 93 | cmdclass = versioneer.get_cmdclass() 94 | cmdclass["build_ext"] = build_ext 95 | cmdclass["clean"] = clean 96 | cmdclass["config"] = config 97 | 98 | # Add our template path to the path so that we don't have a circular reference 99 | # of working install to be able to re-compile 100 | sys.path.append(os.path.join(os.path.dirname(__file__), "bottleneck/src")) 101 | 102 | 103 | def prepare_modules(): 104 | base_includes = [ 105 | "bottleneck/src/bottleneck.h", 106 | "bottleneck/src/bn_config.h", 107 | "bottleneck/src/iterators.h", 108 | ] 109 | ext = [ 110 | Extension( 111 | "bottleneck.reduce", 112 | sources=["bottleneck/src/reduce.c"], 113 | depends=base_includes, 114 | extra_compile_args=["-O2"], 115 | ) 116 | ] 117 | ext += [ 118 | Extension( 119 | "bottleneck.move", 120 | sources=[ 121 | "bottleneck/src/move.c", 122 | "bottleneck/src/move_median/move_median.c", 123 | ], 124 | depends=base_includes + ["bottleneck/src/move_median/move_median.h"], 125 | extra_compile_args=["-O2"], 126 | ) 127 | ] 128 | ext += [ 129 | Extension( 130 | "bottleneck.nonreduce", 131 | sources=["bottleneck/src/nonreduce.c"], 132 | depends=base_includes, 133 | extra_compile_args=["-O2"], 134 | ) 135 | ] 136 | ext += [ 137 | Extension( 138 | "bottleneck.nonreduce_axis", 139 | sources=["bottleneck/src/nonreduce_axis.c"], 140 | depends=base_includes, 141 | extra_compile_args=["-O2"], 142 | ) 143 | ] 144 | return ext 145 | 146 | 147 | def get_long_description(): 148 | with open("README.rst", "r") as fid: 149 | long_description = fid.read() 150 | idx = max(0, long_description.find("Bottleneck is a collection")) 151 | long_description = long_description[idx:] 152 | return long_description 153 | 154 | 155 | CLASSIFIERS = [ 156 | "Development Status :: 5 - Production/Stable", 157 | "Environment :: Console", 158 | "Intended Audience :: Science/Research", 159 | "Intended Audience :: Financial and Insurance Industry", 160 | "Operating System :: OS Independent", 161 | "Programming Language :: C", 162 | "Programming Language :: Python", 163 | "Programming Language :: Python :: 3", 164 | "Programming Language :: Python :: 3.9", 165 | "Programming Language :: Python :: 3.10", 166 | "Programming Language :: Python :: 3.11", 167 | "Programming Language :: Python :: 3.12", 168 | "Programming Language :: Python :: 3.13", 169 | "Topic :: Scientific/Engineering", 170 | ] 171 | 172 | 173 | metadata = dict( 174 | name="Bottleneck", 175 | maintainer="Christopher Whelan", 176 | maintainer_email="bottle-neck@googlegroups.com", 177 | description="Fast NumPy array functions written in C", 178 | long_description=get_long_description(), 179 | long_description_content_type="text/x-rst", 180 | url="https://github.com/pydata/bottleneck", 181 | download_url="http://pypi.python.org/pypi/Bottleneck", 182 | license="Simplified BSD", 183 | classifiers=CLASSIFIERS, 184 | platforms="OS Independent", 185 | version=versioneer.get_version(), 186 | packages=find_packages(), 187 | package_data={ 188 | "bottleneck": ["LICENSE"], 189 | "bottleneck.tests": ["data/*/*"], 190 | }, 191 | python_requires=">=3.9", 192 | install_requires=["numpy"], 193 | extras_require={"doc": ["numpydoc", "sphinx", "gitpython"]}, 194 | cmdclass=cmdclass, 195 | ext_modules=prepare_modules(), 196 | zip_safe=False, 197 | ) 198 | 199 | 200 | setup(**metadata) 201 | -------------------------------------------------------------------------------- /tools/appveyor/conda_setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | from __future__ import absolute_import 5 | import logging 6 | from os import environ 7 | 8 | from conda_wrapper import CondaWrapper 9 | 10 | 11 | if __name__ == "__main__": 12 | logging.basicConfig(level=logging.INFO) 13 | with CondaWrapper( 14 | environ["PYTHON_VERSION"], environ["CONDA_HOME"], environ["CONDA_VENV"] 15 | ) as conda: 16 | conda.configure() 17 | conda.update() 18 | conda.create(*environ["DEPS"].split(" ")) 19 | logging.shutdown() 20 | -------------------------------------------------------------------------------- /tools/appveyor/conda_wrapper.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from __future__ import absolute_import 4 | import sys 5 | import logging 6 | from subprocess import check_output 7 | 8 | 9 | if sys.version_info[0] == 2: 10 | 11 | def decode(string): 12 | return string 13 | 14 | 15 | else: 16 | 17 | def decode(string): 18 | return string.decode() 19 | 20 | 21 | class CondaWrapper(object): 22 | """Manage the AppVeyor Miniconda installation through Python. 23 | 24 | AppVeyor has pre-installed Python 2.7.x as well as Miniconda (2 and 3). 25 | Thus we only need to configure that properly and create the desired 26 | environment. 27 | """ 28 | 29 | def __init__(self, version, home, venv, **kw_args): 30 | super(CondaWrapper, self).__init__(**kw_args) 31 | self.logger = logging.getLogger( 32 | "{}.{}".format(__name__, self.__class__.__name__) 33 | ) 34 | self.version = version 35 | self.home = home 36 | self.venv = venv 37 | 38 | def __enter__(self): 39 | return self 40 | 41 | def __exit__(self, exc_type, exc_val, exc_tb): 42 | return False # False reraises the exception 43 | 44 | def configure(self): 45 | self.logger.info("Configuring '%s'...", self.home) 46 | cmd = [ 47 | "conda", 48 | "config", 49 | "--set", 50 | "always_yes", 51 | "yes", 52 | "--set", 53 | "changeps1", 54 | "no", 55 | ] 56 | msg = check_output(cmd, shell=True) 57 | self.logger.debug(decode(msg)) 58 | self.logger.info("Done.") 59 | 60 | def update(self): 61 | self.logger.info("Updating '%s'...", self.home) 62 | cmd = ["conda", "update", "-q", "conda"] 63 | msg = check_output(cmd, shell=True) 64 | self.logger.debug(decode(msg)) 65 | self.logger.info("Done.") 66 | 67 | def create(self, *args): 68 | self.logger.info("Creating environment '%s'...", self.venv) 69 | cmd = [ 70 | "conda", 71 | "create", 72 | "-q", 73 | "-n", 74 | self.venv, 75 | "python=" + self.version, 76 | ] + list(args) 77 | msg = check_output(cmd, shell=True) 78 | self.logger.debug(decode(msg)) 79 | cmd = ["activate", self.venv] 80 | msg = check_output(cmd, shell=True) 81 | self.logger.debug(decode(msg)) 82 | # consider only for debugging 83 | cmd = ["conda", "info", "-a"] 84 | msg = check_output(cmd, shell=True) 85 | self.logger.debug(decode(msg)) 86 | cmd = ["conda", "list"] 87 | msg = check_output(cmd, shell=True) 88 | self.logger.debug(decode(msg)) 89 | self.logger.info("Done.") 90 | -------------------------------------------------------------------------------- /tools/appveyor/windows_sdk.cmd: -------------------------------------------------------------------------------- 1 | :: To build extensions for 64 bit Python 3.5 or later no special environment needs 2 | :: to be configured. 3 | :: 4 | :: To build extensions for 64 bit Python 3.4 or earlier, we need to configure environment 5 | :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: 6 | :: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) 7 | :: 8 | :: To build extensions for 64 bit Python 2, we need to configure environment 9 | :: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: 10 | :: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) 11 | :: 12 | :: 32 bit builds do not require specific environment configurations. 13 | :: 14 | :: Note: this script needs to be run with the /E:ON and /V:ON flags for the 15 | :: cmd interpreter, at least for (SDK v7.0) 16 | :: 17 | :: More details at: 18 | :: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows 19 | :: https://stackoverflow.com/a/13751649/163740 20 | :: 21 | :: Original Author: Olivier Grisel 22 | :: License: CC0 1.0 Universal: https://creativecommons.org/publicdomain/zero/1.0/ 23 | :: This version based on updates for python 3.5 by Phil Elson at: 24 | :: https://github.com/pelson/Obvious-CI/tree/master/scripts 25 | 26 | @ECHO OFF 27 | 28 | SET COMMAND_TO_RUN=%* 29 | SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows 30 | 31 | SET MAJOR_PYTHON_VERSION="%PYTHON_VERSION:~0,1%" 32 | SET MINOR_PYTHON_VERSION=%PYTHON_VERSION:~2,1% 33 | IF %MAJOR_PYTHON_VERSION% == "2" ( 34 | SET WINDOWS_SDK_VERSION="v7.0" 35 | SET SET_SDK_64=Y 36 | ) ELSE IF %MAJOR_PYTHON_VERSION% == "3" ( 37 | SET WINDOWS_SDK_VERSION="v7.1" 38 | IF %MINOR_PYTHON_VERSION% LEQ 4 ( 39 | SET SET_SDK_64=Y 40 | ) ELSE ( 41 | SET SET_SDK_64=N 42 | ) 43 | ) ELSE ( 44 | ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%" 45 | EXIT 1 46 | ) 47 | 48 | IF "%PYTHON_ARCH%"=="64" ( 49 | IF %SET_SDK_64% == Y ( 50 | ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture 51 | SET DISTUTILS_USE_SDK=1 52 | SET MSSdk=1 53 | "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% 54 | "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release 55 | ECHO Executing: %COMMAND_TO_RUN% 56 | call %COMMAND_TO_RUN% || EXIT 1 57 | ) ELSE ( 58 | ECHO Using default MSVC build environment for 64 bit architecture 59 | ECHO Executing: %COMMAND_TO_RUN% 60 | call %COMMAND_TO_RUN% || EXIT 1 61 | ) 62 | ) ELSE ( 63 | ECHO Using default MSVC build environment for 32 bit architecture 64 | ECHO Executing: %COMMAND_TO_RUN% 65 | call %COMMAND_TO_RUN% || EXIT 1 66 | ) 67 | 68 | -------------------------------------------------------------------------------- /tools/test-installed-bottleneck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import division 4 | import sys 5 | from optparse import OptionParser 6 | import bottleneck 7 | 8 | # This file is a modified version of the original numpy file: 9 | # test-installed-numpy.py 10 | 11 | # A simple script to test the installed version of bottleneck by calling 12 | # 'bottleneck.test()'. Key features: 13 | # -- convenient command-line syntax 14 | # -- sets exit status appropriately, useful for automated test environments 15 | 16 | # It would be better to set this up as a module in the bottleneck namespace, so 17 | # that it could be run as: 18 | # python -m numpy.run_tests 19 | # But, python2.4's -m switch only works with top-level modules, not modules 20 | # that are inside packages. So, once we drop 2.4 support, maybe... 21 | # TODO: Bottleneck doesn't support python 2.4 22 | 23 | # In case we are run from the source directory, we don't want to import 24 | # bottleneck from there, we want to import the installed version: 25 | sys.path.pop(0) 26 | 27 | parser = OptionParser("usage: %prog [options] -- [nosetests options]") 28 | parser.add_option( 29 | "-v", 30 | "--verbose", 31 | action="count", 32 | dest="verbose", 33 | default=1, 34 | help="increase verbosity", 35 | ) 36 | parser.add_option( 37 | "--doctests", 38 | action="store_true", 39 | dest="doctests", 40 | default=False, 41 | help="Run doctests in module", 42 | ) 43 | parser.add_option( 44 | "--coverage", 45 | action="store_true", 46 | dest="coverage", 47 | default=False, 48 | help="report coverage requires 'coverage' module", 49 | ) 50 | parser.add_option( 51 | "-m", 52 | "--mode", 53 | action="store", 54 | dest="mode", 55 | default="fast", 56 | help="'fast', 'full', or something that could be " 57 | "passed to nosetests -A [default: %default]", 58 | ) 59 | (options, args) = parser.parse_args() 60 | 61 | result = bottleneck.test( 62 | options.mode, 63 | verbose=options.verbose, 64 | extra_argv=args, 65 | doctests=options.doctests, 66 | coverage=options.coverage, 67 | ) 68 | 69 | if result: 70 | sys.exit(0) 71 | else: 72 | sys.exit(1) 73 | -------------------------------------------------------------------------------- /tools/travis/bn_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ev # exit on first error, print commands 4 | 5 | if [ "${TEST_RUN}" = "style" ]; then 6 | flake8 7 | black . --check --exclude "(build/|dist/|\.git/|\.mypy_cache/|\.tox/|\.venv/\.asv/|env|\.eggs)" 8 | else 9 | if [ "${TEST_RUN}" = "sdist" ]; then 10 | python setup.py sdist 11 | ARCHIVE=`ls dist/*.tar.gz` 12 | pip install "${ARCHIVE[0]}" 13 | else 14 | pip install "." 15 | fi 16 | python setup.py build_ext --inplace 17 | set +e 18 | if [ "${TEST_RUN}" = "doc" ]; then 19 | make doc 20 | else 21 | # Workaround for https://github.com/travis-ci/travis-ci/issues/6522 22 | python "tools/test-installed-bottleneck.py" 23 | fi 24 | fi 25 | -------------------------------------------------------------------------------- /tools/travis/conda_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ev # exit on first error, print commands 4 | 5 | if [ "${PYTHON_ARCH}" == "32" ]; then 6 | set CONDA_FORCE_32BIT=1 7 | fi 8 | if [ -n "${TEST_RUN}" ]; then 9 | TEST_NAME="test-${TEST_RUN}-python-${PYTHON_VERSION}_${PYTHON_ARCH}bit" 10 | else 11 | TEST_NAME="test-python-${PYTHON_VERSION}_${PYTHON_ARCH}bit" 12 | fi 13 | export TEST_NAME 14 | # split dependencies into separate packages 15 | IFS=" " TEST_DEPS=(${TEST_DEPS}) 16 | echo "Creating environment '${TEST_NAME}'..." 17 | conda create -q -n "${TEST_NAME}" python="${PYTHON_VERSION}" "${TEST_DEPS[@]}" 18 | 19 | set +v # we dont want to see commands in the conda script 20 | 21 | source activate "${TEST_NAME}" 22 | conda update pip 23 | conda info -a 24 | conda list 25 | 26 | if [ -n "${PIP_DEPS}" ]; then 27 | pip install --upgrade pip 28 | # Install numpy via pip for python=3.5 and numpy=1.16 29 | pip install ${PIP_DEPS} 30 | fi 31 | -------------------------------------------------------------------------------- /tools/travis/conda_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ev # exit on first error, print commands 4 | 5 | CONDA_URL="http://repo.continuum.io/miniconda" 6 | 7 | if [ "${PYTHON_VERSION:0:1}" == "2" ]; then 8 | CONDA="Miniconda2" 9 | else 10 | CONDA="Miniconda3" 11 | fi 12 | if [ "${TRAVIS_OS_NAME}" == "osx" ]; then 13 | CONDA_OS="MacOSX" 14 | else 15 | CONDA_OS="Linux" 16 | fi 17 | if [ "${PYTHON_ARCH}" == "64" ]; then 18 | URL="${CONDA_URL}/${CONDA}-latest-${CONDA_OS}-x86_64.sh" 19 | else 20 | URL="${CONDA_URL}/${CONDA}-latest-${CONDA_OS}-x86.sh" 21 | fi 22 | echo "Downloading '${URL}'..." 23 | 24 | set +e 25 | travis_retry wget "${URL}" -O miniconda.sh 26 | set -e 27 | 28 | chmod +x miniconda.sh 29 | ./miniconda.sh -b -p "${HOME}/miniconda" 30 | export PATH="${HOME}/miniconda/bin:${PATH}" 31 | hash -r 32 | conda config --set always_yes yes --set changeps1 no 33 | conda update -q conda 34 | -------------------------------------------------------------------------------- /tools/update_readme.py: -------------------------------------------------------------------------------- 1 | from io import StringIO 2 | import sys 3 | import os 4 | 5 | import bottleneck as bn 6 | 7 | 8 | def update_readme(): 9 | 10 | # run benchmark suite while capturing output; indent 11 | with Capturing() as bench_list: 12 | bn.bench() 13 | bench_list = [" " + b for b in bench_list] 14 | 15 | # read readme 16 | cwd = os.path.dirname(__file__) 17 | readme_path = os.path.join(cwd, "../README.rst") 18 | with open(readme_path) as f: 19 | readme_list = f.readlines() 20 | readme_list = [r.strip("\n") for r in readme_list] 21 | 22 | # remove old benchmark result from readme 23 | idx1 = readme_list.index(" Bottleneck performance benchmark") 24 | idx2 = [i for i, line in enumerate(readme_list) if line == ""] 25 | idx2 = [i for i in idx2 if i > idx1] 26 | idx2 = idx2[1] 27 | del readme_list[idx1:idx2] 28 | 29 | # insert new benchmark result into readme; remove trailing whitespace 30 | readme_list = readme_list[:idx1] + bench_list + readme_list[idx1:] 31 | readme_list = [r.rstrip() for r in readme_list] 32 | 33 | # replace readme file 34 | os.remove(readme_path) 35 | with open(readme_path, "w") as f: 36 | f.write("\n".join(readme_list)) 37 | 38 | 39 | # --------------------------------------------------------------------------- 40 | # Capturing class taken from 41 | # http://stackoverflow.com/questions/16571150/ 42 | # how-to-capture-stdout-output-from-a-python-function-call 43 | 44 | 45 | class Capturing(list): 46 | def __enter__(self): 47 | self._stdout = sys.stdout 48 | sys.stdout = self._stringio = StringIO() 49 | return self 50 | 51 | def __exit__(self, *args): 52 | self.extend(self._stringio.getvalue().splitlines()) 53 | sys.stdout = self._stdout 54 | 55 | 56 | if __name__ == "__main__": 57 | update_readme() 58 | --------------------------------------------------------------------------------