├── .github └── workflows │ └── main.yml ├── .gitignore ├── .readthedocs.yaml ├── AUTHORS.rst ├── CHANGELOG.rst ├── CONTRIBUTING.rst ├── DEVGUIDE.rst ├── LICENSE.rst ├── MANIFEST.in ├── README.rst ├── docs ├── Makefile ├── _static │ └── theme_override.css ├── api.rst ├── authors.rst ├── changelog.rst ├── conf.py ├── contributing.rst ├── devguide.rst ├── index.rst ├── installation.rst ├── license.rst └── versioning.rst ├── pylintrc ├── pyproject.toml ├── requirements.txt ├── setup.cfg ├── setup.py ├── src └── fnc │ ├── __init__.py │ ├── helpers.py │ ├── mappings.py │ ├── sequences.py │ └── utilities.py ├── tasks.py ├── tests ├── __init__.py ├── conftest.py ├── helpers.py ├── test_mappings.py ├── test_sequences.py └── test_utilities.py └── tox.ini /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Main 2 | 3 | on: [ push, pull_request ] 4 | 5 | jobs: 6 | test: 7 | name: Test 8 | runs-on: ubuntu-latest 9 | strategy: 10 | matrix: 11 | python-version: [ "3.7", "3.8", "3.9", "3.10", "3.11" ] 12 | 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v3 16 | 17 | - name: Set up Python ${{ matrix.python-version }} 18 | uses: actions/setup-python@v4 19 | with: 20 | python-version: ${{ matrix.python-version }} 21 | 22 | - name: Install dependencies 23 | run: | 24 | pip install --upgrade pip setuptools 25 | pip install --upgrade tox-gh-actions coveralls 26 | 27 | - name: Run tests 28 | run: | 29 | tox 30 | 31 | - name: Send coverage report 32 | run: | 33 | coveralls 34 | env: 35 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 36 | COVERALLS_FLAG_NAME: ${{ matrix.python-version }} 37 | COVERALLS_PARALLEL: true 38 | COVERALLS_SERVICE_NAME: github 39 | 40 | coveralls: 41 | name: Finish Coveralls 42 | needs: test 43 | runs-on: ubuntu-latest 44 | container: python:3-slim 45 | 46 | steps: 47 | - name: Finished 48 | run: | 49 | pip install --upgrade coveralls 50 | coveralls --finish 51 | env: 52 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | junit.xml 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | 62 | # Sphinx documentation 63 | docs/_build/ 64 | 65 | # PyBuilder 66 | .pybuilder/ 67 | target/ 68 | 69 | # IPython 70 | profile_default/ 71 | ipython_config.py 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # PEP 582 77 | __pypackages__/ 78 | 79 | # Environments 80 | .env 81 | .venv 82 | env/ 83 | venv/ 84 | ENV/ 85 | env.bak/ 86 | venv.bak/ 87 | 88 | # mypy 89 | .mypy_cache/ 90 | .dmypy.json 91 | dmypy.json 92 | 93 | # Pyre type checker 94 | .pyre/ 95 | 96 | # pytype static type analyzer 97 | .pytype/ 98 | 99 | # Mr Developer 100 | .mr.developer.cfg 101 | .project 102 | .pydevproject 103 | .idea 104 | .DS_Store 105 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file for Sphinx projects 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version and other tools you might need 8 | build: 9 | os: ubuntu-22.04 10 | tools: 11 | python: "3.11" 12 | 13 | # Build documentation in the "docs/" directory with Sphinx 14 | sphinx: 15 | configuration: docs/conf.py 16 | fail_on_warning: true 17 | 18 | # Declare the Python requirements required to build documentation 19 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 20 | python: 21 | install: 22 | - requirements: requirements.txt 23 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | Authors 2 | ======= 3 | 4 | 5 | Lead 6 | ---- 7 | 8 | - Derrick Gilland, dgilland@gmail.com, `dgilland@github `_ 9 | 10 | 11 | Contributors 12 | ------------ 13 | 14 | None 15 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | 5 | v0.5.3 (2021-10-14) 6 | ------------------- 7 | 8 | - Minor performance optimization in ``pick``. 9 | 10 | 11 | v0.5.2 (2020-12-24) 12 | ------------------- 13 | 14 | - Fix regression in ``v0.5.1`` that broke ``get/has`` for dictionaries and dot-delimited keys that reference integer dict-keys. 15 | 16 | 17 | v0.5.1 (2020-12-14) 18 | ------------------- 19 | 20 | - Fix bug in ``get/has`` that caused ``defaultdict`` objects to get populated on key access. 21 | 22 | 23 | v0.5.0 (2020-10-23) 24 | ------------------- 25 | 26 | - Fix bug in ``intersection/intersectionby`` and ``difference/differenceby`` where incorrect results could be returned when generators passed in as the sequences to compare with. 27 | - Add support for Python 3.9. 28 | - Drop support for Python <= 3.5. 29 | 30 | 31 | v0.4.0 (2019-01-23) 32 | ------------------- 33 | 34 | - Add functions: 35 | 36 | - ``differenceby`` 37 | - ``duplicatesby`` 38 | - ``intersectionby`` 39 | - ``unionby`` 40 | 41 | 42 | v0.3.0 (2018-08-31) 43 | ------------------- 44 | 45 | - compose: Introduce new "partial" shorthand where instead of passing a callable, a ``tuple`` can be given which will then be converted to a callable using ``functools.partial``. For example, instead of ``fnc.compose(partial(fnc.filter, {'active': True}), partial(fnc.map, 'email'))``, one can do ``fnc.compose((fnc.filter, {'active': True}), (fnc.map, 'email'))``. 46 | 47 | 48 | v0.2.0 (2018-08-24) 49 | ------------------- 50 | 51 | - Add functions: 52 | 53 | - ``negate`` 54 | - ``over`` 55 | - ``overall`` 56 | - ``overany`` 57 | 58 | - Rename functions: (**breaking change**) 59 | 60 | - ``ismatch -> conforms`` 61 | - ``matches -> conformance`` 62 | 63 | - Make ``conforms/conformance`` (formerly ``ismatch/matches``) accept callable dictionary values that act as predicates against comparison target. (**breaking change**) 64 | 65 | 66 | v0.1.1 (2018-08-17) 67 | ------------------- 68 | 69 | - pick: Don't return ``None`` for keys that don't exist in source object. Instead of ``fnc.pick(['a'], {}) == {'a': None}``, it's now ``fnc.pick(['a'], {}) == {}``. 70 | 71 | 72 | v0.1.0 (2018-08-15) 73 | ------------------- 74 | 75 | - First release. 76 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | Contributing 2 | ============ 3 | 4 | Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. 5 | 6 | You can contribute in many ways: 7 | 8 | 9 | Types of Contributions 10 | ---------------------- 11 | 12 | Report Bugs 13 | +++++++++++ 14 | 15 | Report bugs at https://github.com/dgilland/fnc. 16 | 17 | If you are reporting a bug, please include: 18 | 19 | - Your operating system name and version. 20 | - Any details about your local setup that might be helpful in troubleshooting. 21 | - Detailed steps to reproduce the bug. 22 | 23 | 24 | Fix Bugs 25 | ++++++++ 26 | 27 | Look through the GitHub issues for bugs. Anything tagged with "bug" is open to whoever wants to implement it. 28 | 29 | 30 | Implement Features 31 | ++++++++++++++++++ 32 | 33 | Look through the GitHub issues for features. Anything tagged with "enhancement" or "help wanted" is open to whoever wants to implement it. 34 | 35 | 36 | Write Documentation 37 | +++++++++++++++++++ 38 | 39 | fnc could always use more documentation, whether as part of the official fnc docs, in docstrings, or even on the web in blog posts, articles, and such. 40 | 41 | 42 | Submit Feedback 43 | +++++++++++++++ 44 | 45 | The best way to send feedback is to file an issue at https://github.com/dgilland/fnc. 46 | 47 | If you are proposing a feature: 48 | 49 | - Explain in detail how it would work. 50 | - Keep the scope as narrow as possible, to make it easier to implement. 51 | - Remember that this is a volunteer-driven project, and that contributions are welcome :) 52 | 53 | 54 | Get Started! 55 | ------------ 56 | 57 | Ready to contribute? Here's how to set up ``fnc`` for local development. 58 | 59 | 1. Fork the ``fnc`` repo on GitHub. 60 | 2. Clone your fork locally:: 61 | 62 | $ git clone git@github.com:your_username_here/fnc.git 63 | 64 | 3. Install Python dependencies into a virtualenv:: 65 | 66 | $ cd fnc 67 | $ pip install -r requirements.txt 68 | 69 | 4. Create a branch for local development:: 70 | 71 | $ git checkout -b name-of-your-bugfix-or-feature 72 | 73 | Now you can make your changes locally. 74 | 75 | 5. Autoformat code:: 76 | 77 | $ inv fmt 78 | 79 | 6. When you're done making changes, check that your changes pass all unit tests by testing with ``tox`` across all supported Python versions:: 80 | 81 | $ tox 82 | 83 | 7. Add yourself to ``AUTHORS.rst``. 84 | 85 | 8. Commit your changes and push your branch to GitHub:: 86 | 87 | $ git add . 88 | $ git commit -m "" 89 | $ git push origin name-of-your-bugfix-or-feature-branch 90 | 91 | 9. Submit a pull request through GitHub. 92 | 93 | 94 | Pull Request Guidelines 95 | ----------------------- 96 | 97 | Before you submit a pull request, check that it meets these guidelines: 98 | 99 | 1. The pull request should include tests. 100 | 2. The pull request should work for all versions Python that this project supports. 101 | -------------------------------------------------------------------------------- /DEVGUIDE.rst: -------------------------------------------------------------------------------- 1 | Developer Guide 2 | =============== 3 | 4 | This guide provides an overview of the tooling this project uses and how to execute developer workflows using the developer CLI. 5 | 6 | 7 | Python Environments 8 | ------------------- 9 | 10 | This Python project is tested against different Python versions. For local development, it is a good idea to have those versions installed so that tests can be run against each. 11 | 12 | There are libraries that can help with this. Which tools to use is largely a matter of preference, but below are a few recommendations. 13 | 14 | For managing multiple Python versions: 15 | 16 | - pyenv_ 17 | - OS package manager (e.g. apt, yum, homebrew, etc) 18 | - Build from source 19 | 20 | For managing Python virtualenvs: 21 | 22 | - pyenv-virtualenv_ 23 | - pew_ 24 | - python-venv_ 25 | 26 | 27 | Tooling 28 | ------- 29 | 30 | The following tools are used by this project: 31 | 32 | ============= ========================== ================== 33 | Tool Description Configuration 34 | ============= ========================== ================== 35 | black_ Code formatter ``pyproject.toml`` 36 | isort_ Import statement formatter ``setup.cfg`` 37 | docformatter_ Docstring formatter ``setup.cfg`` 38 | flake8_ Code linter ``setup.cfg`` 39 | pylint_ Code linter ``pylintrc`` 40 | pytest_ Test framework ``setup.cfg`` 41 | tox_ Test environment manager ``tox.ini`` 42 | invoke_ CLI task execution library ``tasks.py`` 43 | ============= ========================== ================== 44 | 45 | 46 | Workflows 47 | --------- 48 | 49 | The following workflows use developer CLI commands via `invoke`_ and are defined in ``tasks.py``. 50 | 51 | Autoformat Code 52 | +++++++++++++++ 53 | 54 | To run all autoformatters: 55 | 56 | :: 57 | 58 | inv fmt 59 | 60 | This is the same as running each autoformatter individually: 61 | 62 | :: 63 | 64 | inv black 65 | inv isort 66 | inv docformatter 67 | 68 | 69 | Lint 70 | ++++ 71 | 72 | To run all linters: 73 | 74 | :: 75 | 76 | inv lint 77 | 78 | This is the same as running each linter individually: 79 | 80 | :: 81 | 82 | inv flake8 83 | inv pylint 84 | 85 | 86 | Test 87 | ++++ 88 | 89 | To run all unit tests: 90 | 91 | :: 92 | 93 | inv unit 94 | 95 | 96 | To run unit tests and builds: 97 | 98 | :: 99 | 100 | inv test 101 | 102 | 103 | Test on All Supported Python Versions 104 | +++++++++++++++++++++++++++++++++++++ 105 | 106 | To run tests on all supported Python versions: 107 | 108 | :: 109 | 110 | tox 111 | 112 | This requires that the supported versions are available on the PATH. 113 | 114 | 115 | Build Package 116 | +++++++++++++ 117 | 118 | To build the package: 119 | 120 | :: 121 | 122 | inv build 123 | 124 | This will output the source and binary distributions under ``dist/``. 125 | 126 | 127 | Build Docs 128 | ++++++++++ 129 | 130 | To build documentation: 131 | 132 | :: 133 | 134 | inv docs 135 | 136 | This will output the documentation under ``docs/_build/``. 137 | 138 | 139 | Serve Docs 140 | ++++++++++ 141 | 142 | To serve docs over HTTP: 143 | 144 | :: 145 | 146 | inv docs -s|--server [-b|--bind 127.0.0.1] [-p|--port 8000] 147 | 148 | inv docs -s 149 | inv docs -s -p 8080 150 | inv docs -s -b 0.0.0.0 -p 8080 151 | 152 | 153 | Delete Build Files 154 | ++++++++++++++++++ 155 | 156 | To remove all build and temporary files: 157 | 158 | :: 159 | 160 | inv clean 161 | 162 | This will remove Python bytecode files, egg files, build output folders, caches, and tox folders. 163 | 164 | 165 | Release Package 166 | +++++++++++++++ 167 | 168 | To release a new version of the package to https://pypi.org: 169 | 170 | :: 171 | 172 | inv release 173 | 174 | 175 | CI/CD 176 | ----- 177 | 178 | This project uses `Github Actions `_ for CI/CD: 179 | 180 | - https://github.com/dgilland/fnc/actions 181 | 182 | 183 | .. _pyenv: https://github.com/pyenv/pyenv 184 | .. _pyenv-virtualenv: https://github.com/pyenv/pyenv-virtualenv 185 | .. _pew: https://github.com/berdario/pew 186 | .. _python-venv: https://docs.python.org/3/library/venv.html 187 | .. _black: https://black.readthedocs.io 188 | .. _isort: https://pycqa.github.io/isort/ 189 | .. _docformatter: https://github.com/myint/docformatter 190 | .. _flake8: https://flake8.pycqa.org 191 | .. _pylint: https://www.pylint.org/ 192 | .. _pytest: https://docs.pytest.org 193 | .. _tox: https://tox.readthedocs.io 194 | .. _invoke: http://docs.pyinvoke.org 195 | -------------------------------------------------------------------------------- /LICENSE.rst: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Derrick Gilland 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | graft src 2 | graft tests 3 | graft docs 4 | 5 | include AUTHORS.rst 6 | include CONTRIBUTING.rst 7 | include CHANGELOG.rst 8 | include LICENSE.rst 9 | include README.rst 10 | include requirements.txt 11 | include tox.ini 12 | include pylintrc 13 | include tasks.py 14 | 15 | global-exclude *.py[cod] __pycache__ *.so 16 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | fnc 2 | *** 3 | 4 | |version| |build| |coveralls| |license| 5 | 6 | 7 | Functional programming in Python with generators and other utilities. 8 | 9 | 10 | Links 11 | ===== 12 | 13 | - Project: https://github.com/dgilland/fnc 14 | - Documentation: https://fnc.readthedocs.io 15 | - PyPI: https://pypi.python.org/pypi/fnc/ 16 | - Github Actions: https://github.com/dgilland/fnc/actions 17 | 18 | 19 | Features 20 | ======== 21 | 22 | - Functional-style methods that work with and return generators. 23 | - Shorthand-style iteratees (callbacks) to easily filter and map data. 24 | - String object-path support for references nested data structures. 25 | - 100% test coverage. 26 | - Python 3.6+ 27 | 28 | 29 | Quickstart 30 | ========== 31 | 32 | Install using pip: 33 | 34 | 35 | :: 36 | 37 | pip3 install fnc 38 | 39 | 40 | Import the main module: 41 | 42 | .. code-block:: python 43 | 44 | import fnc 45 | 46 | 47 | Start working with data: 48 | 49 | .. code-block:: python 50 | 51 | users = [ 52 | {'id': 1, 'name': 'Jack', 'email': 'jack@example.org', 'active': True}, 53 | {'id': 2, 'name': 'Max', 'email': 'max@example.com', 'active': True}, 54 | {'id': 3, 'name': 'Allison', 'email': 'allison@example.org', 'active': False}, 55 | {'id': 4, 'name': 'David', 'email': 'david@example.net', 'active': False} 56 | ] 57 | 58 | 59 | Filter active users: 60 | 61 | .. code-block:: python 62 | 63 | # Uses "matches" shorthand iteratee: dictionary 64 | active_users = fnc.filter({'active': True}, users) 65 | # 66 | 67 | active_uesrs = list(active_users) 68 | # [{'name': 'Jack', 'email': 'jack@example.org', 'active': True}, 69 | # {'name': 'Max', 'email': 'max@example.com', 'active': True}] 70 | 71 | 72 | Get a list of email addresses: 73 | 74 | .. code-block:: python 75 | 76 | # Uses "pathgetter" shorthand iteratee: string 77 | emails = fnc.map('email', users) 78 | # 79 | 80 | emails = list(emails) 81 | # ['jack@example.org', 'max@example.com', 'allison@example.org', 'david@example.net'] 82 | 83 | 84 | Create a ``dict`` of users keyed by ``'id'``: 85 | 86 | .. code-block:: python 87 | 88 | # Uses "pathgetter" shorthand iteratee: string 89 | users_by_id = fnc.keyby('id', users) 90 | # {1: {'id': 1, 'name': 'Jack', 'email': 'jack@example.org', 'active': True}, 91 | # 2: {'id': 2, 'name': 'Max', 'email': 'max@example.com', 'active': True}, 92 | # 3: {'id': 3, 'name': 'Allison', 'email': 'allison@example.org', 'active': False}, 93 | # 4: {'id': 4, 'name': 'David', 'email': 'david@example.net', 'active': False}} 94 | 95 | 96 | Select only ``'id'`` and ``'email'`` fields and return as dictionaries: 97 | 98 | .. code-block:: python 99 | 100 | # Uses "pickgetter" shorthand iteratee: set 101 | user_emails = list(fnc.map({'id', 'email'}, users)) 102 | # [{'email': 'jack@example.org', 'id': 1}, 103 | # {'email': 'max@example.com', 'id': 2}, 104 | # {'email': 'allison@example.org', 'id': 3}, 105 | # {'email': 'david@example.net', 'id': 4}] 106 | 107 | 108 | Select only ``'id'`` and ``'email'`` fields and return as tuples: 109 | 110 | .. code-block:: python 111 | 112 | # Uses "atgetter" shorthand iteratee: tuple 113 | user_emails = list(fnc.map(('id', 'email'), users)) 114 | # [(1, 'jack@example.org'), 115 | # (2, 'max@example.com'), 116 | # (3, 'allison@example.org'), 117 | # (4, 'david@example.net')] 118 | 119 | 120 | Access nested data structures using object-path notation: 121 | 122 | .. code-block:: python 123 | 124 | fnc.get('a.b.c[1][0].d', {'a': {'b': {'c': [None, [{'d': 100}]]}}}) 125 | # 100 126 | 127 | # Same result but using a path list instead of a string. 128 | fnc.get(['a', 'b', 'c', 1, 0, 'd'], {'a': {'b': {'c': [None, [{'d': 100}]]}}}) 129 | # 100 130 | 131 | 132 | Compose multiple functions into a generator pipeline: 133 | 134 | .. code-block:: python 135 | 136 | from functools import partial 137 | 138 | filter_active = partial(fnc.filter, {'active': True}) 139 | get_emails = partial(fnc.map, 'email') 140 | get_email_domains = partial(fnc.map, lambda email: email.split('@')[1]) 141 | 142 | get_active_email_domains = fnc.compose( 143 | filter_active, 144 | get_emails, 145 | get_email_domains, 146 | set, 147 | ) 148 | 149 | email_domains = get_active_email_domains(users) 150 | # {'example.com', 'example.org'} 151 | 152 | 153 | Or do the same thing except using a terser "partial" shorthand: 154 | 155 | .. code-block:: python 156 | 157 | get_active_email_domains = fnc.compose( 158 | (fnc.filter, {'active': True}), 159 | (fnc.map, 'email'), 160 | (fnc.map, lambda email: email.split('@')[1]), 161 | set, 162 | ) 163 | 164 | email_domains = get_active_email_domains(users) 165 | # {'example.com', 'example.org'} 166 | 167 | 168 | For more details and examples, please see the full documentation at https://fnc.readthedocs.io. 169 | 170 | 171 | .. |version| image:: https://img.shields.io/pypi/v/fnc.svg?style=flat-square 172 | :target: https://pypi.python.org/pypi/fnc/ 173 | 174 | .. |build| image:: https://img.shields.io/github/actions/workflow/status/dgilland/fnc/main.yml?branch=master&style=flat-square 175 | :target: https://github.com/dgilland/fnc/actions 176 | 177 | .. |coveralls| image:: https://img.shields.io/coveralls/dgilland/fnc/master.svg?style=flat-square 178 | :target: https://coveralls.io/r/dgilland/fnc 179 | 180 | .. |license| image:: https://img.shields.io/pypi/l/fnc.svg?style=flat-square 181 | :target: https://pypi.python.org/pypi/fnc/ 182 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS ?= 6 | SPHINXBUILD ?= sphinx-build 7 | PAPER ?= 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Internal variables. 12 | PAPEROPT_a4 = -D latex_elements.papersize=a4 13 | PAPEROPT_letter = -D latex_elements.papersize=letter 14 | # $(O) is meant as a shortcut for $(SPHINXOPTS) 15 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(O) $(SOURCEDIR) 16 | # the i18n builder cannot share the environment and doctrees with the others 17 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(O) $(SOURCEDIR) 18 | 19 | .PHONY: help 20 | help: 21 | @echo "Please use \`make ' where is one of" 22 | @echo " html to make standalone HTML files" 23 | @echo " dirhtml to make HTML files named index.html in directories" 24 | @echo " singlehtml to make a single large HTML file" 25 | @echo " pickle to make pickle files" 26 | @echo " json to make JSON files" 27 | @echo " htmlhelp to make HTML files and an HTML help project" 28 | @echo " qthelp to make HTML files and a qthelp project" 29 | @echo " applehelp to make an Apple Help Book" 30 | @echo " devhelp to make HTML files and a Devhelp project" 31 | @echo " epub to make an epub" 32 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 33 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 34 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 35 | @echo " lualatexpdf to make LaTeX files and run them through lualatex" 36 | @echo " xelatexpdf to make LaTeX files and run them through xelatex" 37 | @echo " text to make text files" 38 | @echo " man to make manual pages" 39 | @echo " texinfo to make Texinfo files" 40 | @echo " info to make Texinfo files and run them through makeinfo" 41 | @echo " gettext to make PO message catalogs" 42 | @echo " changes to make an overview of all changed/added/deprecated items" 43 | @echo " xml to make Docutils-native XML files" 44 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 45 | @echo " linkcheck to check all external links for integrity" 46 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 47 | @echo " coverage to run coverage check of the documentation (if enabled)" 48 | @echo " dummy to check syntax errors of document sources" 49 | 50 | .PHONY: clean 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | .PHONY: latexpdf 55 | latexpdf: 56 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 57 | @echo "Running LaTeX files through pdflatex..." 58 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 59 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 60 | 61 | .PHONY: latexpdfja 62 | latexpdfja: 63 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 64 | @echo "Running LaTeX files through platex and dvipdfmx..." 65 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 66 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 67 | 68 | .PHONY: lualatexpdf 69 | lualatexpdf: 70 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 71 | @echo "Running LaTeX files through lualatex..." 72 | $(MAKE) PDFLATEX=lualatex -C $(BUILDDIR)/latex all-pdf 73 | @echo "lualatex finished; the PDF files are in $(BUILDDIR)/latex." 74 | 75 | .PHONY: xelatexpdf 76 | xelatexpdf: 77 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 78 | @echo "Running LaTeX files through xelatex..." 79 | $(MAKE) PDFLATEX=xelatex -C $(BUILDDIR)/latex all-pdf 80 | @echo "xelatex finished; the PDF files are in $(BUILDDIR)/latex." 81 | 82 | .PHONY: info 83 | info: 84 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 85 | @echo "Running Texinfo files through makeinfo..." 86 | make -C $(BUILDDIR)/texinfo info 87 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 88 | 89 | .PHONY: gettext 90 | gettext: 91 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 92 | 93 | # Catch-all target: route all unknown targets to Sphinx 94 | .PHONY: Makefile 95 | %: Makefile 96 | $(SPHINXBUILD) -b "$@" $(ALLSPHINXOPTS) "$(BUILDDIR)/$@" 97 | -------------------------------------------------------------------------------- /docs/_static/theme_override.css: -------------------------------------------------------------------------------- 1 | 2 | .wy-table-responsive table td, 3 | .wy-table-responsive table th { 4 | /* Get ride of nowrap for table cells. */ 5 | white-space: normal; 6 | } 7 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. _api: 2 | 3 | API Reference 4 | ============= 5 | 6 | .. testsetup:: 7 | 8 | import fnc 9 | from fnc import * 10 | 11 | 12 | .. automodule:: fnc 13 | 14 | 15 | Sequences 16 | --------- 17 | 18 | .. automodule:: fnc.sequences 19 | :members: 20 | 21 | 22 | Mappings 23 | -------- 24 | 25 | .. automodule:: fnc.mappings 26 | :members: 27 | 28 | 29 | Utilities 30 | --------- 31 | 32 | .. automodule:: fnc.utilities 33 | :members: 34 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../AUTHORS.rst 2 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGELOG.rst 2 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | import os 16 | import sys 17 | 18 | 19 | sys.path.insert(0, os.path.abspath("..")) 20 | 21 | 22 | # -- Project information ----------------------------------------------------- 23 | 24 | from email import message_from_string 25 | from pkg_resources import get_distribution 26 | 27 | dist = get_distribution("fnc") 28 | 29 | if hasattr(dist, "_parsed_pkg_info"): 30 | pkg_info = dict(dist._parsed_pkg_info) 31 | else: 32 | pkg_info = dict(message_from_string("\n".join(dist._get_metadata("PKG-INFO")))) 33 | 34 | project = pkg_info["Name"] 35 | author = pkg_info["Author"] 36 | description = pkg_info["Summary"] 37 | copyright = "2018, " + author 38 | 39 | # The short X.Y version 40 | version = pkg_info["Version"] 41 | # The full version, including alpha/beta/rc tags 42 | release = version 43 | 44 | 45 | # -- General configuration --------------------------------------------------- 46 | 47 | # If your documentation needs a minimal Sphinx version, state it here. 48 | # 49 | # needs_sphinx = '1.0' 50 | 51 | # Add any Sphinx extension module names here, as strings. They can be 52 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 53 | # ones. 54 | extensions = [ 55 | "sphinx.ext.autodoc", 56 | "sphinx.ext.doctest", 57 | "sphinx.ext.coverage", 58 | "sphinx.ext.viewcode", 59 | "sphinx.ext.napoleon", 60 | ] 61 | 62 | # Add any paths that contain templates here, relative to this directory. 63 | templates_path = ["_templates"] 64 | 65 | # The suffix(es) of source filenames. 66 | # You can specify multiple suffix as a list of string: 67 | # 68 | # source_suffix = ['.rst', '.md'] 69 | source_parsers = {} 70 | source_suffix = [".rst"] 71 | 72 | # The master toctree document. 73 | master_doc = "index" 74 | 75 | # The language for content autogenerated by Sphinx. Refer to documentation 76 | # for a list of supported languages. 77 | # 78 | # This is also used if you do content translation via gettext catalogs. 79 | # Usually you set "language" from the command line for these cases. 80 | language = "en" 81 | 82 | # List of patterns, relative to source directory, that match files and 83 | # directories to ignore when looking for source files. 84 | # This pattern also affects html_static_path and html_extra_path. 85 | exclude_patterns = ["_build"] 86 | 87 | # The name of the Pygments (syntax highlighting) style to use. 88 | pygments_style = "sphinx" 89 | 90 | # If true, `todo` and `todoList` produce output, else they produce nothing. 91 | todo_include_todos = False 92 | 93 | 94 | # -- Options for HTML output ------------------------------------------------- 95 | 96 | # The theme to use for HTML and HTML Help pages. See the documentation for 97 | # a list of builtin themes. 98 | # 99 | html_theme = "furo" 100 | 101 | # Theme options are theme-specific and customize the look and feel of a theme 102 | # further. For a list of options available for each theme, see the 103 | # documentation. 104 | # 105 | # html_theme_options = {} 106 | 107 | # Add any paths that contain custom static files (such as style sheets) here, 108 | # relative to this directory. They are copied after the builtin static files, 109 | # so a file named "default.css" will overwrite the builtin "default.css". 110 | # html_static_path = ['_static'] 111 | 112 | # Custom sidebar templates, must be a dictionary that maps document names 113 | # to template names. 114 | # 115 | # The default sidebars (for documents that don't match any pattern) are 116 | # defined by theme itself. Builtin themes are using these templates by 117 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 118 | # 'searchbox.html']``. 119 | # 120 | # html_sidebars = {} 121 | 122 | 123 | # -- Options for HTMLHelp output --------------------------------------------- 124 | 125 | # Output file base name for HTML help builder. 126 | htmlhelp_basename = project + "doc" 127 | 128 | 129 | # -- Options for LaTeX output ------------------------------------------------ 130 | 131 | latex_elements = { 132 | # The paper size ('letterpaper' or 'a4paper'). 133 | # 134 | # 'papersize': 'letterpaper', 135 | # The font size ('10pt', '11pt' or '12pt'). 136 | # 137 | # 'pointsize': '10pt', 138 | # Additional stuff for the LaTeX preamble. 139 | # 140 | # 'preamble': '', 141 | # Latex figure (float) alignment 142 | # 143 | # 'figure_align': 'htbp', 144 | } 145 | 146 | 147 | # Grouping the document tree into LaTeX files. List of tuples 148 | # (source start file, target name, title, 149 | # author, documentclass [howto, manual, or own class]). 150 | latex_documents = [ 151 | (master_doc, project + ".tex", project + " Documentation", author, "manual"), 152 | ] 153 | 154 | 155 | # -- Options for manual page output ------------------------------------------ 156 | 157 | # One entry per manual page. List of tuples 158 | # (source start file, name, description, authors, manual section). 159 | man_pages = [(master_doc, project, project + " Documentation", [author], 1)] 160 | 161 | 162 | # -- Options for Texinfo output ---------------------------------------------- 163 | 164 | # Grouping the document tree into Texinfo files. List of tuples 165 | # (source start file, target name, title, author, 166 | # dir menu entry, description, category) 167 | texinfo_documents = [ 168 | ( 169 | master_doc, 170 | project, 171 | project + " Documentation", 172 | author, 173 | project, 174 | description, 175 | "Miscellaneous", 176 | ), 177 | ] 178 | 179 | 180 | # -- Extension configuration ------------------------------------------------- 181 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/devguide.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../DEVGUIDE.rst 2 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. fnc documentation master file 2 | 3 | .. include:: ../README.rst 4 | 5 | Guide 6 | ===== 7 | 8 | .. toctree:: 9 | :maxdepth: 3 10 | 11 | installation 12 | api 13 | devguide 14 | 15 | 16 | Project Info 17 | ============ 18 | 19 | .. toctree:: 20 | :maxdepth: 1 21 | 22 | license 23 | versioning 24 | changelog 25 | authors 26 | contributing 27 | 28 | 29 | Indices and Tables 30 | ================== 31 | 32 | - :ref:`genindex` 33 | - :ref:`modindex` 34 | - :ref:`search` 35 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | fnc requires Python >= 3.6. 5 | 6 | To install from `PyPI `_: 7 | 8 | :: 9 | 10 | pip install fnc 11 | -------------------------------------------------------------------------------- /docs/license.rst: -------------------------------------------------------------------------------- 1 | License 2 | ======= 3 | 4 | .. include:: ../LICENSE.rst 5 | -------------------------------------------------------------------------------- /docs/versioning.rst: -------------------------------------------------------------------------------- 1 | Versioning 2 | ========== 3 | 4 | This project follows `Semantic Versioning`_ with the following caveats: 5 | 6 | - Only the public API (i.e. the objects imported into the fnc module) will maintain backwards compatibility between MINOR version bumps. 7 | - Objects within any other parts of the library are not guaranteed to not break between MINOR version bumps. 8 | 9 | With that in mind, it is recommended to only use or import objects from the main module, fnc. 10 | 11 | 12 | .. _Semantic Versioning: http://semver.org/ 13 | -------------------------------------------------------------------------------- /pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code. 6 | extension-pkg-whitelist= 7 | 8 | # Specify a score threshold to be exceeded before program exits with error. 9 | fail-under=10 10 | 11 | # Add files or directories to the blacklist. They should be base names, not 12 | # paths. 13 | ignore=CVS 14 | 15 | # Add files or directories matching the regex patterns to the blacklist. The 16 | # regex matches against base names, not paths. 17 | #ignore-patterns= 18 | 19 | # Python code to execute, usually for sys.path manipulation such as 20 | # pygtk.require(). 21 | #init-hook= 22 | 23 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 24 | # number of processors available to use. 25 | #jobs=0 26 | # Temporarily disable multiprocessing since it duplicates violations due to bug: https://github.com/PyCQA/pylint/issues/3584 27 | jobs=1 28 | 29 | # Control the amount of potential inferred values when inferring a single 30 | # object. This can help the performance when dealing with large functions or 31 | # complex, nested conditions. 32 | limit-inference-results=100 33 | 34 | # List of plugins (as comma separated values of python module names) to load, 35 | # usually to register additional checkers. 36 | load-plugins= 37 | 38 | # Pickle collected data for later comparisons. 39 | persistent=yes 40 | 41 | # When enabled, pylint would attempt to guess common misconfiguration and emit 42 | # user-friendly hints instead of false-positive error messages. 43 | suggestion-mode=yes 44 | 45 | # Allow loading of arbitrary C extensions. Extensions are imported into the 46 | # active Python interpreter and may run arbitrary code. 47 | unsafe-load-any-extension=no 48 | 49 | 50 | [MESSAGES CONTROL] 51 | 52 | # Only show warnings with the listed confidence levels. Leave empty to show 53 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. 54 | confidence= 55 | 56 | # Enable the message, report, category or checker with the given id(s). You can 57 | # either give multiple identifier separated by comma (,) or put this option 58 | # multiple time (only on the command line, not in the configuration file where 59 | # it should appear only once). See also the "--disable" option for examples. 60 | # NOTE: Only check for errors. Style/other conventions are not being checked. 61 | enable=E,F 62 | 63 | # Disable the message, report, category or checker with the given id(s). You 64 | # can either give multiple identifiers separated by comma (,) or put this 65 | # option multiple times (only on the command line, not in the configuration 66 | # file where it should appear only once). You can also use "--disable=all" to 67 | # disable everything first and then reenable specific checks. For example, if 68 | # you want to run only the similarities checker, you can use "--disable=all 69 | # --enable=similarities". If you want to run only the classes checker, but have 70 | # no Warning level messages displayed, use "--disable=all --enable=classes 71 | # --disable=W". 72 | disable=C,R,W 73 | 74 | 75 | [REPORTS] 76 | 77 | # Python expression which should return a score less than or equal to 10. You 78 | # have access to the variables 'error', 'warning', 'refactor', and 'convention' 79 | # which contain the number of messages in each category, as well as 'statement' 80 | # which is the total number of statements analyzed. This score is used by the 81 | # global evaluation report (RP0004). 82 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 83 | 84 | # Template used to display messages. This is a python new-style format string 85 | # used to format the message information. See doc for all details. 86 | #msg-template= 87 | 88 | # Set the output format. Available formats are text, parseable, colorized, json 89 | # and msvs (visual studio). You can also give a reporter class, e.g. 90 | # mypackage.mymodule.MyReporterClass. 91 | output-format=text 92 | 93 | # Tells whether to display a full report or only the messages. 94 | reports=no 95 | 96 | # Activate the evaluation score. 97 | # NOTE: Since we're only checking errors, we don't really need to worry about a score. 98 | score=no 99 | 100 | 101 | ######################################################################################## 102 | # NOTE: All other sections of pylintrc have been removed since we are only interested in 103 | # checking pylint errors and not other style conventions. Those could be added from 104 | # https://github.com/PyCQA/pylint/blob/master/pylintrc or by borrowing from 105 | # $ pylint --generate-rcfile 106 | ######################################################################################## 107 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=46.4", 4 | "wheel", 5 | ] 6 | 7 | 8 | [tool.black] 9 | line-length = 100 10 | include = '\.pyi?$' 11 | exclude = ''' 12 | /( 13 | \.git 14 | | \.mypy_cache 15 | | \.tox 16 | | \.venv 17 | | \.cache 18 | | _build 19 | | build 20 | | dist 21 | )/ 22 | ''' 23 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -e .[dev] 2 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = fnc 3 | version = attr: fnc.__version__ 4 | author = Derrick Gilland 5 | author_email = dgilland@gmail.com 6 | url = https://github.com/dgilland/fnc 7 | description = Functional programming in Python with generators and other utilities. 8 | long_description = file: README.rst, CHANGELOG.rst, LICENSE.rst 9 | keywords = fnc functional functional-programming generators utility 10 | license = MIT License 11 | classifiers = 12 | Development Status :: 4 - Beta 13 | Intended Audience :: Developers 14 | License :: OSI Approved :: MIT License 15 | Operating System :: OS Independent 16 | Programming Language :: Python 17 | Programming Language :: Python :: 3 18 | Programming Language :: Python :: 3.7 19 | Programming Language :: Python :: 3.8 20 | Programming Language :: Python :: 3.9 21 | Programming Language :: Python :: 3.10 22 | Programming Language :: Python :: 3.11 23 | Topic :: Software Development :: Libraries 24 | Topic :: Software Development :: Libraries :: Python Modules 25 | Topic :: Utilities 26 | 27 | [options] 28 | package_dir = 29 | = src 30 | packages = find: 31 | python_requires = >=3.6 32 | install_requires = 33 | 34 | [options.packages.find] 35 | where = src 36 | 37 | [options.extras_require] 38 | dev = 39 | black 40 | build 41 | coverage 42 | docformatter 43 | flake8 44 | flake8-black 45 | flake8-bugbear 46 | flake8-isort 47 | furo 48 | importlib_metadata<5; python_version=="3.7" 49 | invoke 50 | isort 51 | pylint 52 | pytest 53 | pytest-cov 54 | sphinx 55 | tox 56 | twine 57 | wheel 58 | 59 | 60 | [bdist_wheel] 61 | python_tag = py3 62 | 63 | [flake8] 64 | exclude = .tox,venv,env 65 | max_line_length = 100 66 | max_complexity = 12 67 | # F401 - `module` imported but unused 68 | # F811 - redefinition of unused `name` from line `N` 69 | # E203 - whitespace before ':' 70 | # W503 - line break before binary operator 71 | ignore = F401,F811,E203,W503 72 | 73 | [tool:isort] 74 | line_length = 100 75 | multi_line_output = 3 76 | lines_after_imports = 2 77 | combine_as_imports = true 78 | include_trailing_comma = true 79 | force_sort_within_sections = true 80 | 81 | [tool:pytest] 82 | junit_family = xunit2 83 | addopts = 84 | --verbose 85 | --doctest-modules 86 | --no-cov-on-fail 87 | --cov-fail-under=100 88 | --cov-report=term-missing 89 | --cov-report=xml:build/coverage/coverage.xml 90 | --cov-report=html:build/coverage 91 | --junitxml=build/testresults/junit.xml 92 | 93 | [coverage:run] 94 | omit = 95 | */tests/* 96 | */test_* 97 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | 5 | 6 | setup() 7 | -------------------------------------------------------------------------------- /src/fnc/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The ``fnc`` library is a functional-style utility library with an emphasis on using generators to 3 | process sequences of data. This allows one to easily build data processing pipelines to more 4 | efficiently munge data through function composition. 5 | 6 | All public functions are available from the main module. 7 | 8 | :: 9 | 10 | import fnc 11 | 12 | fnc. 13 | 14 | Note: 15 | It is recommended to use the above syntax or import functions from the main module instead of 16 | importing from submodules. Future versions may change/reorganize things which could break that 17 | usage. 18 | 19 | So what makes this library different than other function libraries for Python? Some main features to 20 | highlight are: 21 | 22 | 1. Generators when possible. 23 | 2. Shorthand iteratee support. 24 | 3. Shorthand partial function composition support. 25 | 26 | Generators 27 | ---------- 28 | 29 | By using generators, large datasets can be processed more efficiently which can enable one to build 30 | data pipelines that "push" each item of a sequence all the way through to the end before being 31 | "collected" in a final data structure. This means that these data pipelines can iterate over a 32 | sequence just once while transforming the data as it goes. These pipelines can be built through 33 | function composition (e.g. ``fnc.compose`` + ``functools.partial``) or by simply building up the 34 | final form through successive generator passing. 35 | 36 | Iteratees 37 | --------- 38 | 39 | The other main feature is shorthand iteratee support. But what is an iteratee? From Wikipedia 40 | (https://en.wikipedia.org/wiki/Iteratee): 41 | 42 | ...an iteratee is a composable abstraction for incrementally processing sequentially 43 | presented chunks of input data in a purely functional fashion. 44 | 45 | What does that mean exactly? An iteratee is a function that is applied to each item in a sequence. 46 | 47 | Note: 48 | All functions that accept an iteratee have the iteratee as the first argument to the function. 49 | This mirrors the Python standard library for functions like ``map``, ``filter``, and ``reduce``. 50 | It also makes it easier to use ``functools.partial`` to create ad-hoc functions with bound 51 | iteratees. 52 | 53 | Functions that accept iteratees can of course use a callable as the iteratee, but they can also 54 | accept the shorthand styles below. 55 | 56 | Note: 57 | If iteratee shorthand styles are not your thing, each shorthand style has a corresponding 58 | higher-order function that can be used to return the same callable iteratee. 59 | 60 | Dict 61 | ++++ 62 | 63 | A dictionary-iteratee returns a "conforms" comparator that matches source key-values to 64 | target key-values. Typically, this iteratee is used to filter a list of dictionaries by checking if 65 | the targets are a superset of the source. 66 | 67 | For example: 68 | 69 | :: 70 | 71 | x = [*fnc.filter({'a': 1, 'b': 2}, [{'a': 1}, {'a': 1, 'b': 2, 'c': 3}])] 72 | x == [{'a': 1, 'b': 2, 'c': 3}] 73 | 74 | which is the same as: 75 | 76 | :: 77 | 78 | x = list(fnc.filter(fnc.conformance({'a': 1, 'b': 2}), ...)) 79 | 80 | Note: 81 | When values in the dictionary-iteratee are callables, they will be treated as predicate 82 | functions that will be called with the corresponding value in the comparison target. 83 | 84 | Set 85 | +++ 86 | 87 | A set-iteratee applies a "pickgetter" function to select a subset of fields from an object. 88 | 89 | For example: 90 | 91 | :: 92 | 93 | x = [*fnc.map({'a', 'b'}, [{'a': 1, 'b': 2, 'c': 3}, {'b': 4, 'd': 5}, {'a': 1}])] 94 | x == [{'a': 1, 'b': 2}, {'a': None, 'b': 4}, {'a': 1, 'b': None}] 95 | 96 | which is the same as: 97 | 98 | :: 99 | 100 | x = [*fnc.map(fnc.pickgetter(['a', 'b']), ...)] 101 | 102 | # or 103 | from functools import partial 104 | x = [*fnc.map(partial(fnc.pick, ['a', 'b']), ...)] 105 | 106 | Tuple 107 | +++++ 108 | 109 | A tuple-iteratee applies an "atgetter" function to return a tuple of values at the given paths. 110 | 111 | For example: 112 | 113 | :: 114 | 115 | x = [ 116 | *fnc.map( 117 | ('a', 'b'), 118 | [{'a': 1, 'b': 2, 'c': 3}, {'b': 4, 'd': 5}, {'a': 1}] 119 | ) 120 | ] 121 | x == [(1, 2), (None, 4), (1, None)] 122 | 123 | which is the same as: 124 | 125 | :: 126 | 127 | x = [*fnc.map(fnc.atgetter(['a', 'b']), ...)] 128 | 129 | # or 130 | x = [*fnc.map(partial(fnc.at, ['a', 'b']), ...)] 131 | 132 | List 133 | ++++ 134 | 135 | A list-iteratee applies a "pathgetter" function to return the value at the given object path. 136 | 137 | For example: 138 | 139 | :: 140 | 141 | x = [ 142 | *fnc.map( 143 | ['a', 'aa', 0, 'aaa'], 144 | [{'a': {'aa': [{'aaa': 1}]}}, {'a': {'aa': [{'aaa': 2}]}}] 145 | ) 146 | ] 147 | x == [1, 2] 148 | 149 | which is the same as: 150 | 151 | :: 152 | 153 | x = [*fnc.map(fnc.pathgetter(['a', 'aa', 0, 'aaa']), ...)] 154 | 155 | # or 156 | x = [*fnc.map(partial(fnc.get, ['a', 'aa', 0, 'aaa']), ...)] 157 | 158 | String 159 | ++++++ 160 | 161 | A string-iteratee is like a list-iteratee except that an object path is represented in object-path 162 | notation like ``'a.aa[0].aaa'``. 163 | 164 | For example: 165 | 166 | :: 167 | 168 | x = [ 169 | *fnc.map( 170 | 'a.aa[0].aaa', 171 | [{'a': {'aa': [{'aaa': 1}]}}, {'a': {'aa': [{'aaa': 2}]}}] 172 | ) 173 | ] 174 | x == [1, 2] 175 | 176 | which is the same as: 177 | 178 | :: 179 | 180 | x = [*fnc.map(fnc.pathgetter('a.aa[0].aaa'), ...)] 181 | 182 | # or 183 | x = [*fnc.map(partial(fnc.get, 'a.aa[0].aaa'), ...)] 184 | 185 | Other Values 186 | ++++++++++++ 187 | 188 | All other non-callable values will be used in a "pathgetter" iteratee as a top-level "key" to return 189 | the object value from. Callable values will be used directly as iteratees. 190 | 191 | Note: 192 | To reference a mapping that has a ``tuple`` key (e.g. {(1, 2): 'value}), use the list-iteratee 193 | like ``fnc.map([(1, 2)], ...)``. 194 | 195 | 196 | Function Composition 197 | -------------------- 198 | 199 | The primary method for function composition is ``fnc.compose`` combined with "partial" shorthand as 200 | needed. 201 | 202 | What is "partial" shorthand? Instead of passing callables to ``fnc.compose``, one can pass a tuple 203 | with the same arguments to ``functools.partial``. 204 | 205 | :: 206 | 207 | count_by_age_over21 = fnc.compose( 208 | (fnc.filter, {'age': lambda age: age >= 21}), 209 | (fnc.countby, 'age') 210 | ) 211 | 212 | # is equivalent to... 213 | # count_by_age_over21 = fnc.compose( 214 | # partial(fnc.filter, {'age': lambda age: age >= 21}), 215 | # partial(fnc.countby, 'age') 216 | # ) 217 | 218 | x = count_by_age_over21( 219 | [ 220 | {'age': 20}, 221 | {'age': 21}, 222 | {'age': 30}, 223 | {'age': 22}, 224 | {'age': 21}, 225 | {'age': 22} 226 | ] 227 | ) 228 | x == {21: 2, 30: 1, 22: 2} 229 | 230 | Note: 231 | The "partial" shorthand only supports invoking ``functools.partial`` using positional arguments. 232 | If keyword argument partials are needed, then use ``functools.partial`` directly. 233 | """ 234 | 235 | __version__ = "0.5.3" 236 | 237 | from .mappings import at, defaults, get, has, invert, mapkeys, mapvalues, merge, omit, pick 238 | from .sequences import ( 239 | chunk, 240 | compact, 241 | concat, 242 | countby, 243 | difference, 244 | differenceby, 245 | duplicates, 246 | duplicatesby, 247 | filter, 248 | find, 249 | findindex, 250 | findlast, 251 | findlastindex, 252 | flatten, 253 | flattendeep, 254 | groupall, 255 | groupby, 256 | intercalate, 257 | interleave, 258 | intersection, 259 | intersectionby, 260 | intersperse, 261 | keyby, 262 | map, 263 | mapcat, 264 | mapflat, 265 | mapflatdeep, 266 | partition, 267 | reject, 268 | union, 269 | unionby, 270 | unzip, 271 | without, 272 | xor, 273 | ) 274 | from .utilities import ( 275 | after, 276 | aspath, 277 | atgetter, 278 | before, 279 | compose, 280 | conformance, 281 | conforms, 282 | constant, 283 | identity, 284 | iteratee, 285 | negate, 286 | noop, 287 | over, 288 | overall, 289 | overany, 290 | pathgetter, 291 | pickgetter, 292 | random, 293 | retry, 294 | ) 295 | -------------------------------------------------------------------------------- /src/fnc/helpers.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Iterable, Mapping, Sequence 2 | from decimal import Decimal 3 | import types 4 | 5 | 6 | number_types = (int, float, Decimal) 7 | 8 | Sentinel = object() 9 | 10 | 11 | class _Unset(object): 12 | """ 13 | Represents an unset value. 14 | 15 | Used to differentiate between an explicit ``None`` and an unset value. 16 | """ 17 | 18 | def __bool__(self): # pragma: no cover 19 | return False 20 | 21 | 22 | UNSET = _Unset() 23 | 24 | 25 | class Container(object): 26 | """ 27 | A "seen" container for keeping track of elements of a sequence that have been encountered 28 | before. 29 | 30 | It is optimized to work with both hashable and unhashable values by storing hashable items in a 31 | ``set`` and unhashable items in a ``list`` and then checking both containers for existence. 32 | """ 33 | 34 | def __init__(self, values=None): 35 | self.hashable = set() 36 | self.unhashable = [] 37 | 38 | if values is not None: 39 | self.extend(values) 40 | 41 | def __contains__(self, value): 42 | try: 43 | return value in self.hashable 44 | except TypeError: 45 | return value in self.unhashable 46 | 47 | def __len__(self): # pragma: no cover 48 | return len(self.hashable) + len(self.unhashable) 49 | 50 | def add(self, value): 51 | if value in self: 52 | return 53 | 54 | try: 55 | self.hashable.add(value) 56 | except TypeError: 57 | self.unhashable.append(value) 58 | 59 | def extend(self, values): 60 | for value in values: 61 | self.add(value) 62 | 63 | 64 | def iscollection(value): 65 | """Return whether `value` is iterable but not string or bytes.""" 66 | return isinstance(value, Iterable) and not isinstance(value, (str, bytes)) 67 | 68 | 69 | def isgenerator(value): 70 | """ 71 | Return whether `value` is a generator or generator-like. 72 | 73 | The purpose being to determine whether `value` will be exhausted if it is iterated over. 74 | """ 75 | return isinstance(value, types.GeneratorType) or ( 76 | hasattr(value, "__iter__") 77 | and hasattr(value, "__next__") 78 | and not hasattr(value, "__getitem__") 79 | ) 80 | 81 | 82 | def iterate(mapping): 83 | """ 84 | Attempt to iterate over `mapping` such that key-values pairs are yielded per iteration. For 85 | dictionaries and other mappings, this would be the keys and values. For lists and other 86 | sequences, this would be the indexes and values. For other non- standard object types, some 87 | duck-typing will be used: 88 | 89 | - If `mapping` has callable ``mapping.items()`` attribute, it will be used. 90 | - If `mapping` has callable ``mapping.keys()`` and ``__getitem__`` attributes, then 91 | ``(key, mapping[key])`` will be used. 92 | - Otherwise, `iter(mapping)` will be returned. 93 | """ 94 | if isinstance(mapping, Mapping) or callable(getattr(mapping, "items", None)): 95 | return mapping.items() 96 | 97 | if isinstance(mapping, Sequence): 98 | return enumerate(mapping) 99 | 100 | if callable(getattr(mapping, "keys", None)) and hasattr(mapping, "__getitem__"): 101 | return ((key, mapping[key]) for key in mapping.keys()) 102 | 103 | return iter(mapping) 104 | -------------------------------------------------------------------------------- /src/fnc/mappings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions that operate on mappings. 3 | 4 | A mapping includes dictionaries, lists, strings, ``collections.abc.Mapping`` and 5 | ``collections.abc.Sequence`` subclasses, and other mapping-like objects that either have an 6 | ``items()`` method, have ``keys()`` and ``__getitem__`` methods, or have an ``__iter__()`` method. 7 | For functions that use :func:`get`, non-mapping object values can be selected from class attributes. 8 | """ 9 | 10 | from collections.abc import Mapping, Sequence 11 | 12 | import fnc 13 | 14 | from .helpers import UNSET, Sentinel, iterate 15 | 16 | 17 | def at(paths, obj): 18 | """ 19 | Creates a ``tuple`` of elements from `obj` at the given `paths`. 20 | 21 | Examples: 22 | >>> at(['a', 'c'], {'a': 1, 'b': 2, 'c': 3, 'd': 4}) 23 | (1, 3) 24 | >>> at(['a', ['c', 'd', 'e']], {'a': 1, 'b': 2, 'c': {'d': {'e': 3}}}) 25 | (1, 3) 26 | >>> at(['a', 'c.d.e[0]'], {'a': 1, 'b': 2, 'c': {'d': {'e': [3]}}}) 27 | (1, 3) 28 | >>> at([0, 2], [1, 2, 3, 4]) 29 | (1, 3) 30 | 31 | Args: 32 | paths (Iterable): The object paths to pick. 33 | obj (Iterable): Iterable to pick from. 34 | 35 | Returns: 36 | tuple 37 | """ 38 | return tuple(get(path, obj) for path in paths) 39 | 40 | 41 | def defaults(*objs): 42 | """ 43 | Create a ``dict`` extended with the key-values from the provided dictionaries such that keys are 44 | set once and not overridden by subsequent dictionaries. 45 | 46 | Examples: 47 | >>> obj = defaults({'a': 1}, {'b': 2}, {'c': 3, 'b': 5}, {'a': 4, 'c': 2}) 48 | >>> obj == {'a': 1, 'b': 2, 'c': 3} 49 | True 50 | 51 | Args: 52 | *objs (dict): Dictionary sources. 53 | 54 | Returns: 55 | dict 56 | """ 57 | return merge(*reversed(objs)) 58 | 59 | 60 | def get(path, obj, *, default=None): 61 | """ 62 | Get the `path` value at any depth of an object. If path doesn't exist, `default` is returned. 63 | 64 | Examples: 65 | >>> get('a.b.c', {}) is None 66 | True 67 | >>> get('a.b.c[1]', {'a': {'b': {'c': [1, 2, 3, 4]}}}) 68 | 2 69 | >>> get('a.b.c.1', {'a': {'b': {'c': [1, 2, 3, 4]}}}) 70 | 2 71 | >>> get('a.b.1.c[1]', {'a': {'b': [0, {'c': [1, 2]}]}}) 72 | 2 73 | >>> get(['a', 'b', 1, 'c', 1], {'a': {'b': [0, {'c': [1, 2]}]}}) 74 | 2 75 | >>> get('a.b.1.c.2', {'a': {'b': [0, {'c': [1, 2]}]}}, default=False) 76 | False 77 | 78 | Args: 79 | path (object): Path to test for. Can be a key value, list of keys, or a 80 | ``.`` delimited path-string. 81 | obj (Mapping): Object to process. 82 | default (mixed): Default value to return if path doesn't exist. 83 | Defaults to ``None``. 84 | 85 | Returns: 86 | object: Value of `obj` at path. 87 | """ 88 | if default is UNSET: 89 | # When NotSet given for default, then this method will raise if path is 90 | # not present in obj. 91 | sentinel = UNSET 92 | else: 93 | # When a returnable default is given, use a sentinel value to detect 94 | # when _get() returns a default value for a missing path so we can exit 95 | # early from the loop and not mistakenly iterate over the default. 96 | sentinel = Sentinel 97 | 98 | result = obj 99 | for key in fnc.aspath(path): 100 | result = _get(key, result, default=sentinel) 101 | 102 | if result is sentinel: 103 | result = default 104 | break 105 | 106 | return result 107 | 108 | 109 | def _get(key, obj, *, default=UNSET): 110 | if isinstance(obj, dict): 111 | value = _get_dict(key, obj, default=default) 112 | elif not isinstance(obj, (Mapping, Sequence)) or isinstance(obj, tuple): 113 | value = _get_obj(key, obj, default=default) 114 | else: 115 | value = _get_item(key, obj, default=default) 116 | 117 | if value is UNSET: 118 | raise KeyError(f"Key {key!r} not found in {obj!r}") 119 | 120 | return value 121 | 122 | 123 | def _get_dict(key, obj, *, default=UNSET): 124 | value = obj.get(key, UNSET) 125 | if value is UNSET: 126 | value = default 127 | if not isinstance(key, int): 128 | try: 129 | value = obj.get(int(key), default) 130 | except Exception: 131 | pass 132 | return value 133 | 134 | 135 | def _get_item(key, obj, *, default=UNSET): 136 | try: 137 | return obj[key] 138 | except (KeyError, TypeError, IndexError): 139 | pass 140 | 141 | if not isinstance(key, int): 142 | try: 143 | return obj[int(key)] 144 | except (KeyError, TypeError, IndexError, ValueError): 145 | pass 146 | 147 | return default 148 | 149 | 150 | def _get_obj(key, obj, *, default=UNSET): 151 | value = _get_item(key, obj, default=UNSET) 152 | if value is UNSET: 153 | value = default 154 | try: 155 | value = getattr(obj, key) 156 | except AttributeError: 157 | pass 158 | return value 159 | 160 | 161 | def has(path, obj): 162 | """ 163 | Return whether `path` exists in `obj`. 164 | 165 | Examples: 166 | >>> has(1, [1, 2, 3]) 167 | True 168 | >>> has('b', {'a': 1, 'b': 2}) 169 | True 170 | >>> has('c', {'a': 1, 'b': 2}) 171 | False 172 | >>> has('a.b[1].c[1]', {'a': {'b': [0, {'c': [1, 2]}]}}) 173 | True 174 | >>> has('a.b.1.c.2', {'a': {'b': [0, {'c': [1, 2]}]}}) 175 | False 176 | 177 | Args: 178 | path (object): Path to test for. Can be a key value, list of keys, or a 179 | ``.`` delimited path-string. 180 | obj (Iterable): Object to test. 181 | 182 | Returns: 183 | bool: Whether `obj` has `path`. 184 | """ 185 | try: 186 | get(path, obj, default=UNSET) 187 | return True 188 | except KeyError: 189 | return False 190 | 191 | 192 | def invert(obj): 193 | """ 194 | Return a ``dict`` composed of the inverted keys and values of the given dictionary. 195 | 196 | Note: 197 | It's assumed that `obj` values are hashable as ``dict`` keys. 198 | 199 | Examples: 200 | >>> result = invert({'a': 1, 'b': 2, 'c': 3}) 201 | >>> result == {1: 'a', 2: 'b', 3: 'c'} 202 | True 203 | 204 | Args: 205 | obj (Mapping): Mapping to invert. 206 | 207 | Returns: 208 | dict: Inverted dictionary. 209 | """ 210 | return {value: key for key, value in iterate(obj)} 211 | 212 | 213 | def mapkeys(iteratee, obj): 214 | """ 215 | Return a ``dict`` with keys from `obj` mapped with `iteratee` while containing the same values. 216 | 217 | Examples: 218 | >>> result = mapkeys(lambda k: k * 2, {'a': 1, 'b': 2, 'c': 3}) 219 | >>> result == {'aa': 1, 'bb': 2, 'cc': 3} 220 | True 221 | 222 | Args: 223 | iteratee (object): Iteratee applied to each key. 224 | obj (Mapping): Mapping to map. 225 | 226 | Returns: 227 | dict: Dictionary with mapped keys. 228 | """ 229 | iteratee = fnc.iteratee(iteratee) 230 | return {iteratee(key): value for key, value in iterate(obj)} 231 | 232 | 233 | def mapvalues(iteratee, obj): 234 | """ 235 | Return a ``dict`` with values from `obj` mapped with `iteratee` while containing the same keys. 236 | 237 | Examples: 238 | >>> result = mapvalues(lambda v: v * 2, {'a': 1, 'b': 2, 'c': 3}) 239 | >>> result == {'a': 2, 'b': 4, 'c': 6} 240 | True 241 | >>> result = mapvalues({'d': 4}, {'a': 1, 'b': {'d': 4}, 'c': 3}) 242 | >>> result == {'a': False, 'b': True, 'c': False} 243 | True 244 | 245 | Args: 246 | iteratee (object): Iteratee applied to each key. 247 | obj (Mapping): Mapping to map. 248 | 249 | Returns: 250 | dict: Dictionary with mapped values. 251 | """ 252 | iteratee = fnc.iteratee(iteratee) 253 | return {key: iteratee(value) for key, value in iterate(obj)} 254 | 255 | 256 | def merge(*objs): 257 | """ 258 | Create a ``dict`` merged with the key-values from the provided dictionaries such that each next 259 | dictionary extends the previous results. 260 | 261 | Examples: 262 | >>> item = merge({'a': 0}, {'b': 1}, {'b': 2, 'c': 3}, {'a': 1}) 263 | >>> item == {'a': 1, 'b': 2, 'c': 3} 264 | True 265 | 266 | Args: 267 | *objs (dict): Dictionary sources. 268 | 269 | Returns: 270 | dict 271 | """ 272 | result = {} 273 | for obj in objs: 274 | result.update(obj) 275 | return result 276 | 277 | 278 | def omit(keys, obj): 279 | """ 280 | The opposite of :func:`pick`. This method creates an object composed of the property paths of 281 | `obj` that are not omitted. 282 | 283 | Examples: 284 | >>> omit(['a', 'c'], {'a': 1, 'b': 2, 'c': 3 }) == {'b': 2} 285 | True 286 | >>> omit([0, 3], ['a', 'b', 'c', 'd']) == {1: 'b', 2: 'c'} 287 | True 288 | 289 | Args: 290 | keys (Iterable): Keys to omit. 291 | obj (Iterable): Object to process. 292 | 293 | Returns: 294 | dict: Dictionary with `keys` omitted. 295 | """ 296 | return {key: value for key, value in iterate(obj) if key not in keys} 297 | 298 | 299 | def pick(keys, obj): 300 | """ 301 | Create a ``dict`` composed of the picked `keys` from `obj`. 302 | 303 | Examples: 304 | >>> pick(['a', 'b'], {'a': 1, 'b': 2, 'c': 3}) == {'a': 1, 'b': 2} 305 | True 306 | >>> pick(['a', 'b'], {'b': 2}) == {'b': 2} 307 | True 308 | 309 | Args: 310 | keys (Iterable): Keys to omit. 311 | obj (Iterable): Object to process. 312 | 313 | Returns: 314 | dict: Dict containg picked properties. 315 | """ 316 | result = {} 317 | for key in keys: 318 | value = _get(key, obj, default=Sentinel) 319 | if value is not Sentinel: 320 | result[key] = value 321 | return result 322 | -------------------------------------------------------------------------------- /src/fnc/sequences.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions that operate on sequences. 3 | 4 | Most of these functions return generators so that they will be more efficient at processing large 5 | datasets. All generator functions will have a ``Yields`` section in their docstring to easily 6 | identify them as generators. Otherwise, functions that return concrete values with have a 7 | ``Returns`` section instead. 8 | """ 9 | 10 | from collections import Counter, deque 11 | from functools import partial 12 | import itertools 13 | from operator import not_ 14 | 15 | import fnc 16 | 17 | from .helpers import Container, iscollection, isgenerator 18 | 19 | 20 | _filter = filter # pylint: disable=used-before-assignment 21 | _map = map # pylint: disable=used-before-assignment 22 | 23 | 24 | def chunk(size, seq): 25 | """ 26 | Split elements of `seq` into chunks with length `size` and yield each chunk. 27 | 28 | Examples: 29 | >>> list(chunk(2, [1, 2, 3, 4, 5])) 30 | [[1, 2], [3, 4], [5]] 31 | 32 | Args: 33 | seq (Iterable): Iterable to chunk. 34 | size (int, optional): Chunk size. Defaults to ``1``. 35 | 36 | Yields: 37 | list: Chunked groups. 38 | """ 39 | if not isinstance(size, int) or size <= 0: # pragma: no cover 40 | raise ValueError("size must be an integer greater than zero") 41 | 42 | group = [] 43 | 44 | for item in seq: 45 | if len(group) >= size: 46 | yield group 47 | group = [] 48 | group.append(item) 49 | 50 | if group: 51 | yield group 52 | 53 | 54 | def compact(seq): 55 | """ 56 | Exclude elements from `seq` that are falsey. 57 | 58 | Examples: 59 | >>> list(compact(['', 1, 0, True, False, None])) 60 | [1, True] 61 | 62 | Args: 63 | seq (Iterable): Iterable to compact. 64 | 65 | Yields: 66 | Elements that are truthy. 67 | """ 68 | for item in seq: 69 | if item: 70 | yield item 71 | 72 | 73 | def concat(*seqs): 74 | """ 75 | Concatenates zero or more iterables into a single iterable. 76 | 77 | Examples: 78 | >>> list(concat([1, 2], [3, 4], [[5], [6]])) 79 | [1, 2, 3, 4, [5], [6]] 80 | 81 | Args: 82 | *seqs (Iterable): Iterables to concatenate. 83 | 84 | Yields: 85 | Each element from all iterables. 86 | """ 87 | return itertools.chain.from_iterable(seqs) 88 | 89 | 90 | def countby(iteratee, seq): 91 | """ 92 | Return a ``dict`` composed of keys generated from the results of running each element of `seq` 93 | through the `iteratee`. 94 | 95 | Examples: 96 | >>> result = countby(None, [1, 2, 1, 2, 3, 4]) 97 | >>> result == {1: 2, 2: 2, 3: 1, 4: 1} 98 | True 99 | >>> result = countby(lambda x: x.lower(), ['a', 'A', 'B', 'b']) 100 | >>> result == {'a': 2, 'b': 2} 101 | True 102 | >>> result = countby('a', [{'a': 'x'}, {'a': 'x'}, {'a': 'y'}]) 103 | >>> result == {'x': 2, 'y': 1} 104 | True 105 | 106 | Args: 107 | iteratee (object): Iteratee applied per iteration. 108 | seq (Iterable): Iterable to iterate over. 109 | 110 | Returns: 111 | dict 112 | """ 113 | return dict(Counter(map(iteratee, seq))) 114 | 115 | 116 | def difference(seq, *seqs): 117 | """ 118 | Yields elements from `seq` that are not in `seqs`. 119 | 120 | Note: 121 | This function is like ``set.difference()`` except it works with both hashable 122 | and unhashable values and preserves the ordering of the original iterables. 123 | 124 | Examples: 125 | >>> list(difference([1, 2, 3], [1], [2])) 126 | [3] 127 | >>> list(difference([1, 4, 2, 3, 5, 0], [1], [2, 0])) 128 | [4, 3, 5] 129 | >>> list(difference([1, 3, 4, 1, 2, 4], [1, 4])) 130 | [3, 2] 131 | 132 | Args: 133 | seq (Iterable): Iterable to compute difference against. 134 | *seqs (Iterable): Other iterables to compare with. 135 | 136 | Yields: 137 | Each element in `seq` that doesn't appear in `seqs`. 138 | """ 139 | yield from differenceby(None, seq, *seqs) 140 | 141 | 142 | def differenceby(iteratee, seq, *seqs): 143 | """ 144 | Like :func:`difference` except that an `iteratee` is used to modify each element in the 145 | sequences. The modified values are then used for comparison. 146 | 147 | Note: 148 | This function is like ``set.difference()`` except it works with both hashable 149 | and unhashable values and preserves the ordering of the original iterables. 150 | 151 | Examples: 152 | >>> list(differenceby('a', [{'a': 1}, {'a': 2}, {'a': 3}], [{'a': 1}], [{'a': 2}])) 153 | [{'a': 3}] 154 | >>> list(differenceby(lambda x: x % 4, [1, 4, 2, 3, 5, 0], [1], [2, 0])) 155 | [3] 156 | 157 | Args: 158 | iteratee (object): Iteratee applied per iteration. 159 | seq (Iterable): Iterable to compute difference against. 160 | *seqs (Iterable): Other iterables to compare with. 161 | 162 | Yields: 163 | Each element in `seq` that doesn't appear in `seqs`. 164 | """ 165 | if not seqs: 166 | yield from unionby(iteratee, seq) 167 | return 168 | 169 | if iteratee is not None: 170 | iteratee = fnc.iteratee(iteratee) 171 | 172 | yielded = Container() 173 | # Concat sequences into a single sequence and map iteratee to each item so that the 174 | # computed value only needs to be done once for each item since that is what we'll 175 | # compare to below. We'll store these values into a iterable in case any of the 176 | # sequences are a generator/iterator that would get exhausted if we tried to iterate 177 | # over it more than once. 178 | others = Container(map(iteratee, concat(*seqs))) 179 | 180 | for item in seq: 181 | if iteratee is not None: 182 | value = iteratee(item) 183 | else: 184 | value = item 185 | 186 | if value in yielded or value in others: 187 | continue 188 | 189 | yield item 190 | yielded.add(value) 191 | 192 | 193 | def duplicates(seq, *seqs): 194 | """ 195 | Yields unique elements from sequences that are repeated one or more times. 196 | 197 | Note: 198 | The order of yielded elements depends on when the second duplicated 199 | element is found and not when the element first appeared. 200 | 201 | Examples: 202 | >>> list(duplicates([0, 1, 3, 2, 3, 1])) 203 | [3, 1] 204 | >>> list(duplicates([0, 1], [3, 2], [3, 1])) 205 | [3, 1] 206 | 207 | Args: 208 | seq (Iterable): Iterable to check for duplicates. 209 | *seqs (Iterable): Other iterables to compare with. 210 | 211 | Yields: 212 | Duplicated elements. 213 | """ 214 | yield from duplicatesby(None, seq, *seqs) 215 | 216 | 217 | def duplicatesby(iteratee, seq, *seqs): 218 | """ 219 | Like :func:`duplicates` except that an `iteratee` is used to modify each element in the 220 | sequences. The modified values are then used for comparison. 221 | 222 | Examples: 223 | >>> list(duplicatesby('a', [{'a':1}, {'a':3}, {'a':2}, {'a':3}, {'a':1}])) 224 | [{'a': 3}, {'a': 1}] 225 | 226 | Args: 227 | iteratee (object): Iteratee applied per iteration. 228 | seq (Iterable): Iterable to check for duplicates 229 | *seqs (Iterable): Other iterables to compare with. 230 | 231 | Yields: 232 | Each element in `seq` that doesn't appear in `seqs`. 233 | """ 234 | if iteratee is not None: 235 | iteratee = fnc.iteratee(iteratee) 236 | 237 | seen = Container() 238 | yielded = Container() 239 | 240 | for item in itertools.chain(seq, *seqs): 241 | if iteratee is not None: 242 | value = iteratee(item) 243 | else: 244 | value = item 245 | 246 | if value not in seen: 247 | seen.add(value) 248 | continue 249 | 250 | if value not in yielded: 251 | yield item 252 | yielded.add(value) 253 | 254 | 255 | def filter(iteratee, seq): 256 | """ 257 | Filter `seq` by `iteratee`, yielding only the elements that the iteratee returns truthy for. 258 | 259 | Note: 260 | This function is like the builtin ``filter`` except it converts `iteratee` into 261 | a fnc-style predicate. 262 | 263 | Examples: 264 | >>> result = filter({'a': 1}, [{'a': 1}, {'b': 2}, {'a': 1, 'b': 3}]) 265 | >>> list(result) == [{'a': 1}, {'a': 1, 'b': 3}] 266 | True 267 | >>> list(filter(lambda x: x >= 3, [1, 2, 3, 4])) 268 | [3, 4] 269 | 270 | Args: 271 | iteratee (object): Iteratee applied per iteration. 272 | seq (Iterable): Iterable to filter. 273 | 274 | Yields: 275 | Filtered elements. 276 | """ 277 | return _filter(fnc.iteratee(iteratee), seq) 278 | 279 | 280 | def find(iteratee, seq): 281 | """ 282 | Iterates over elements of `seq`, returning the first element that the iteratee returns truthy 283 | for. 284 | 285 | Examples: 286 | >>> find(lambda x: x >= 3, [1, 2, 3, 4]) 287 | 3 288 | >>> find(lambda x: x >= 5, [1, 2, 3, 4]) is None 289 | True 290 | >>> find({'a': 1}, [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]) 291 | {'a': 1} 292 | >>> result = find({'a': 1}, [{'b': 2}, {'a': 1, 'b': 2}, {'a': 1}]) 293 | >>> result == {'a': 1, 'b': 2} 294 | True 295 | 296 | Args: 297 | iteratee (object): Iteratee applied per iteration. 298 | seq (Iterable): Iterable to iterate over. 299 | 300 | Returns: 301 | First element found or ``None``. 302 | """ 303 | for item in filter(iteratee, seq): 304 | return item 305 | 306 | 307 | def findindex(iteratee, seq): 308 | """ 309 | Return the index of the element in `seq` that returns ``True`` for `iteratee`. If no match is 310 | found, ``-1`` is returned. 311 | 312 | Examples: 313 | >>> findindex(lambda x: x >= 3, [1, 2, 3, 4]) 314 | 2 315 | >>> findindex(lambda x: x > 4, [1, 2, 3, 4]) 316 | -1 317 | 318 | Args: 319 | iteratee (object): Iteratee applied per iteration. 320 | seq (Iterable): Iterable to process. 321 | 322 | Returns: 323 | int: Index of found item or ``-1`` if not found. 324 | """ 325 | iteratee = fnc.iteratee(iteratee) 326 | return next((i for i, value in enumerate(seq) if iteratee(value)), -1) 327 | 328 | 329 | def findlast(iteratee, seq): 330 | """ 331 | This function is like :func:`find` except it iterates over elements of `seq` from right to left. 332 | 333 | Examples: 334 | >>> findlast(lambda x: x >= 3, [1, 2, 3, 4]) 335 | 4 336 | >>> findlast(lambda x: x >= 5, [1, 2, 3, 4]) is None 337 | True 338 | >>> result = findlast({'a': 1}, [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]) 339 | >>> result == {'a': 1, 'b': 2} 340 | True 341 | 342 | Args: 343 | iteratee (object): Iteratee applied per iteration. 344 | seq (Iterable): Iterable to iterate over. 345 | 346 | Returns: 347 | Last element found or ``None``. 348 | """ 349 | return find(iteratee, reversed(seq)) 350 | 351 | 352 | def findlastindex(iteratee, seq): 353 | """ 354 | Return the index of the element in `seq` that returns ``True`` for `iteratee`. If no match is 355 | found, ``-1`` is returned. 356 | 357 | Examples: 358 | >>> findlastindex(lambda x: x >= 3, [1, 2, 3, 4]) 359 | 3 360 | >>> findlastindex(lambda x: x > 4, [1, 2, 3, 4]) 361 | -1 362 | 363 | Args: 364 | iteratee (object): Iteratee applied per iteration. 365 | seq (Iterable): Iterable to process. 366 | 367 | Returns: 368 | int: Index of found item or ``-1`` if not found. 369 | """ 370 | iteratee = fnc.iteratee(iteratee) 371 | return next((i for i, value in reversed(tuple(enumerate(seq))) if iteratee(value)), -1) 372 | 373 | 374 | def flatten(*seqs): 375 | """ 376 | Flatten iterables a single level deep. 377 | 378 | Examples: 379 | >>> list(flatten([[1], [2, [3]], [[4]]])) 380 | [1, 2, [3], [4]] 381 | >>> list(flatten([[1], [2, [3]], [[4]]], [5, [6, 7]])) 382 | [1, 2, [3], [4], 5, 6, 7] 383 | 384 | Args: 385 | *seqs (Iterables): Iterables to flatten. 386 | 387 | Yields: 388 | Eelements from the flattened iterable. 389 | """ 390 | for item in itertools.chain.from_iterable(seqs): 391 | if iscollection(item): 392 | yield from item 393 | else: 394 | yield item 395 | 396 | 397 | def flattendeep(*seqs): 398 | """ 399 | Recursively flatten iterables. 400 | 401 | Examples: 402 | >>> list(flattendeep([[1], [2, [3]], [[4]]])) 403 | [1, 2, 3, 4] 404 | >>> list(flattendeep([[1], [2, [3]], [[4]]], [5, [6, 7]])) 405 | [1, 2, 3, 4, 5, 6, 7] 406 | >>> list(flattendeep([[1], [2, [3]], [[4]]], [5, [[[[6, [[[7]]]]]]]])) 407 | [1, 2, 3, 4, 5, 6, 7] 408 | 409 | Args: 410 | *seqs (Iterables): Iterables to flatten. 411 | 412 | Yields: 413 | Flattened elements. 414 | """ 415 | for item in itertools.chain.from_iterable(seqs): 416 | if iscollection(item): 417 | yield from flattendeep(item) 418 | else: 419 | yield item 420 | 421 | 422 | def groupall(iteratees, seq): 423 | """ 424 | This function is like :func:`groupby` except it supports nested grouping by multiple iteratees. 425 | If only a single iteratee is given, it is like calling :func:`groupby`. 426 | 427 | Examples: 428 | >>> result = groupall( 429 | ... ['shape', 'qty'], 430 | ... [ 431 | ... {'shape': 'square', 'color': 'red', 'qty': 5}, 432 | ... {'shape': 'square', 'color': 'blue', 'qty': 10}, 433 | ... {'shape': 'square', 'color': 'orange', 'qty': 5}, 434 | ... {'shape': 'circle', 'color': 'yellow', 'qty': 5}, 435 | ... {'shape': 'circle', 'color': 'pink', 'qty': 10}, 436 | ... {'shape': 'oval', 'color': 'purple', 'qty': 5} 437 | ... ] 438 | ... ) 439 | >>> expected = { 440 | ... 'square': { 441 | ... 5: [ 442 | ... {'shape': 'square', 'color': 'red', 'qty': 5}, 443 | ... {'shape': 'square', 'color': 'orange', 'qty': 5} 444 | ... ], 445 | ... 10: [{'shape': 'square', 'color': 'blue', 'qty': 10}] 446 | ... }, 447 | ... 'circle': { 448 | ... 5: [{'shape': 'circle', 'color': 'yellow', 'qty': 5}], 449 | ... 10: [{'shape': 'circle', 'color': 'pink', 'qty': 10}] 450 | ... }, 451 | ... 'oval': { 452 | ... 5: [{'shape': 'oval', 'color': 'purple', 'qty': 5}] 453 | ... } 454 | ... } 455 | >>> result == expected 456 | True 457 | 458 | Args: 459 | iteratees (Iterable): Iteratees to group by. 460 | seq (Iterable): Iterable to iterate over. 461 | 462 | Returns: 463 | dict: Results of recursively grouping by all `iteratees`. 464 | """ 465 | if not iteratees: 466 | return seq 467 | 468 | head, *rest = iteratees 469 | 470 | return fnc.mapvalues(partial(groupall, rest), groupby(head, seq)) 471 | 472 | 473 | def groupby(iteratee, seq): 474 | """ 475 | Return a ``dict`` composed of keys generated from the results of running each element of `seq` 476 | through the `iteratee`. 477 | 478 | Examples: 479 | >>> result = groupby('a', [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) 480 | >>> result == {1: [{'a': 1, 'b': 2}], 3: [{'a': 3, 'b': 4}]} 481 | True 482 | >>> result = groupby({'a': 1}, [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) 483 | >>> result == {False: [{'a': 3, 'b': 4}], True: [{'a': 1, 'b': 2}]} 484 | True 485 | 486 | Args: 487 | iteratee (object): Iteratee applied per iteration. 488 | seq (Iterable): Iterable to iterate over. 489 | 490 | Returns: 491 | dict: Results of grouping by `iteratee`. 492 | """ 493 | result = {} 494 | iteratee = fnc.iteratee(iteratee) 495 | 496 | for item in seq: 497 | result.setdefault(iteratee(item), []).append(item) 498 | 499 | return result 500 | 501 | 502 | def intercalate(value, seq): 503 | """ 504 | Insert `value` between each element in `seq` and concatenate the results. 505 | 506 | Examples: 507 | >>> list(intercalate('x', [1, [2], [3], 4])) 508 | [1, 'x', 2, 'x', 3, 'x', 4] 509 | >>> list(intercalate(', ', ['Lorem', 'ipsum', 'dolor'])) 510 | ['Lorem', ', ', 'ipsum', ', ', 'dolor'] 511 | >>> ''.join(intercalate(', ', ['Lorem', 'ipsum', 'dolor'])) 512 | 'Lorem, ipsum, dolor' 513 | >>> list(intercalate([0,0,0], [[1,2,3],[4,5,6],[7,8,9]])) 514 | [1, 2, 3, 0, 0, 0, 4, 5, 6, 0, 0, 0, 7, 8, 9] 515 | 516 | Args: 517 | value (object): Element to insert. 518 | seq (Iterable): Iterable to intercalate. 519 | 520 | Yields: 521 | Elements of the intercalated iterable. 522 | """ 523 | return flatten(intersperse(value, seq)) 524 | 525 | 526 | def interleave(*seqs): 527 | """ 528 | Merge multiple iterables into a single iterable by inserting the next element from each iterable 529 | by sequential round-robin. 530 | 531 | Examples: 532 | >>> list(interleave([1, 2, 3], [4, 5, 6], [7, 8, 9])) 533 | [1, 4, 7, 2, 5, 8, 3, 6, 9] 534 | 535 | Args: 536 | *seqs (Iterable): Iterables to interleave. 537 | 538 | Yields: 539 | Elements of the interleaved iterable. 540 | """ 541 | queue = deque(iter(seq) for seq in seqs) 542 | 543 | while queue: 544 | seq = queue.popleft() 545 | 546 | try: 547 | yield next(seq) 548 | except StopIteration: 549 | pass 550 | else: 551 | queue.append(seq) 552 | 553 | 554 | def intersection(seq, *seqs): 555 | """ 556 | Computes the intersection of all the passed-in iterables. 557 | 558 | Note: 559 | This function is like ``set.intersection()`` except it works with both hashable 560 | and unhashable values and preserves the ordering of the original iterables. 561 | 562 | Examples: 563 | >>> list(intersection([1, 2, 3], [1, 2, 3, 4, 5], [2, 3])) 564 | [2, 3] 565 | >>> list(intersection([1, 2, 3])) 566 | [1, 2, 3] 567 | 568 | Args: 569 | seq (Iterable): Iterable to compute intersection against. 570 | *seqs (Iterable): Other iterables to compare with. 571 | 572 | Yields: 573 | Elements that itersect. 574 | """ 575 | yield from intersectionby(None, seq, *seqs) 576 | 577 | 578 | def intersectionby(iteratee, seq, *seqs): 579 | """ 580 | Like :func:`intersection` except that an `iteratee` is used to modify each element in the 581 | sequences. The modified values are then used for comparison. 582 | 583 | Note: 584 | This function is like ``set.intersection()`` except it works with both hashable 585 | and unhashable values and preserves the ordering of the original iterables. 586 | 587 | Examples: 588 | >>> list(intersectionby( 589 | ... 'a', 590 | ... [{'a': 1}, {'a': 2}, {'a': 3}], 591 | ... [{'a': 1}, {'a': 2}, {'a': 3}, {'a': 4}, {'a': 5}], 592 | ... [{'a': 2}, {'a': 3}] 593 | ... )) 594 | [{'a': 2}, {'a': 3}] 595 | 596 | Args: 597 | iteratee (object): Iteratee applied per iteration. 598 | seq (Iterable): Iterable to compute intersection against. 599 | *seqs (Iterable): Other iterables to compare with. 600 | 601 | Yields: 602 | Elements that intersect. 603 | """ 604 | if not seqs: 605 | yield from unionby(iteratee, seq) 606 | return 607 | 608 | if iteratee is not None: 609 | iteratee = fnc.iteratee(iteratee) 610 | 611 | yielded = Container() 612 | # Map iteratee to each item in each other sequence and compute intersection of those 613 | # values to reduce number of times iteratee is called. The resulting sequence will 614 | # be an intersection of computed values which will be used to compare to the primary 615 | # sequence. We'll store these values into a iterable in case any of the sequences 616 | # are a generator/iterator that would get exhausted if we tried to iterate over it 617 | # more than once. 618 | others = Container(intersection(*(map(iteratee, other) for other in seqs))) 619 | 620 | for item in seq: 621 | if iteratee is not None: 622 | value = iteratee(item) 623 | else: 624 | value = item 625 | 626 | if value in yielded: 627 | continue 628 | 629 | if value in others: 630 | yield item 631 | yielded.add(value) 632 | 633 | 634 | def intersperse(value, seq): 635 | """ 636 | Insert a separating element between each element in `seq`. 637 | 638 | Examples: 639 | >>> list(intersperse('x', [1, [2], [3], 4])) 640 | [1, 'x', [2], 'x', [3], 'x', 4] 641 | 642 | Args: 643 | value (object): Element to insert. 644 | seq (Iterable): Iterable to intersperse. 645 | 646 | Yields: 647 | Elements of the interspersed iterable. 648 | """ 649 | seq = iter(seq) 650 | 651 | try: 652 | yield next(seq) 653 | except StopIteration: 654 | return 655 | 656 | for item in seq: 657 | yield value 658 | yield item 659 | 660 | 661 | def keyby(iteratee, seq): 662 | """ 663 | Return a ``dict`` composed of keys generated from the results of running each element of `seq` 664 | through the `iteratee`. 665 | 666 | Examples: 667 | >>> results = keyby('a', [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) 668 | >>> results == {1: {'a': 1, 'b': 2}, 3: {'a': 3, 'b': 4}} 669 | True 670 | 671 | Args: 672 | iteratee (object): Iteratee applied per iteration. 673 | seq (Iterable): Iterable to iterate over. 674 | 675 | Returns: 676 | dict: Results of indexing by `iteratee`. 677 | """ 678 | iteratee = fnc.iteratee(iteratee) 679 | return {iteratee(value): value for value in seq} 680 | 681 | 682 | def map(iteratee, *seqs): 683 | """ 684 | Map `iteratee` to each element of iterable and yield the results. If additional iterable 685 | arguments are passed, `iteratee` must take that many arguments and is applied to the items from 686 | all iterables in parallel. 687 | 688 | Note: 689 | This function is like the builtin ``map`` except it converts `iteratee` into a 690 | fnc-style predicate. 691 | 692 | Examples: 693 | >>> list(map(str, [1, 2, 3, 4])) 694 | ['1', '2', '3', '4'] 695 | >>> list(map('a', [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}, {'a': 5, 'b': 6}])) 696 | [1, 3, 5] 697 | >>> list(map('0.1', [[[0, 1]], [[2, 3]], [[4, 5]]])) 698 | [1, 3, 5] 699 | >>> list(map('a.b', [{'a': {'b': 1}}, {'a': {'b': 2}}])) 700 | [1, 2] 701 | >>> list(map('a.b[1]', [{'a': {'b': [0, 1]}}, {'a': {'b': [2, 3]}}])) 702 | [1, 3] 703 | 704 | Args: 705 | iteratee (object): Iteratee applied per iteration. 706 | *seqs (Iterable): Iterables to map. 707 | 708 | Yields: 709 | Mapped elements. 710 | """ 711 | return _map(fnc.iteratee(iteratee), *seqs) 712 | 713 | 714 | def mapcat(iteratee, *seqs): 715 | """ 716 | Map an `iteratee` to each element of each iterable in `seqs` and concatenate the results into a 717 | single iterable. 718 | 719 | Examples: 720 | >>> list(mapcat(lambda x: list(range(x)), range(4))) 721 | [0, 0, 1, 0, 1, 2] 722 | 723 | Args: 724 | iteratee (object): Iteratee to apply to each element. 725 | *seqs (Iterable): Iterable to map and concatenate. 726 | 727 | Yields: 728 | Elements resulting from concat + map operations. 729 | """ 730 | return concat(*map(iteratee, *seqs)) 731 | 732 | 733 | def mapflat(iteratee, *seqs): 734 | """ 735 | Map an `iteratee` to each element of each iterable in `seqs` and flatten the results. 736 | 737 | Examples: 738 | >>> list(mapflat(lambda n: [[n, n]], [1, 2])) 739 | [[1, 1], [2, 2]] 740 | 741 | Args: 742 | iteratee (object): Iteratee applied per iteration. 743 | *seqs (Iterable): Iterables to iterate over. 744 | 745 | Yields: 746 | Elements result from flatten + map operations. 747 | """ 748 | return flatten(map(iteratee, *seqs)) 749 | 750 | 751 | def mapflatdeep(iteratee, *seqs): 752 | """ 753 | Map an `iteratee` to each element of each iterable in `seqs` and recurisvely flatten the 754 | results. 755 | 756 | Examples: 757 | >>> list(mapflatdeep(lambda n: [[n, n]], [1, 2])) 758 | [1, 1, 2, 2] 759 | 760 | Args: 761 | iteratee (object): Iteratee applied per iteration. 762 | *seqs (Iterable): Iterables to iterate over. 763 | 764 | Yields: 765 | Elements result from recursive flatten + map operations. 766 | """ 767 | return flattendeep(map(iteratee, *seqs)) 768 | 769 | 770 | def partition(iteratee, seq): 771 | """ 772 | Return a ``tuple`` of 2 lists containing elements from `seq` split into two groups where the 773 | first group contains all elements the `iteratee` returned truthy for and the second group 774 | containing the falsey elements. 775 | 776 | Examples: 777 | >>> partition(lambda x: x % 2, [1, 2, 3, 4]) 778 | ([1, 3], [2, 4]) 779 | 780 | Args: 781 | iteratee (object): Iteratee applied per iteration. 782 | seq (Iterable): Iterable to iterate over. 783 | 784 | Returns: 785 | tuple[list] 786 | """ 787 | iteratee = fnc.iteratee(iteratee) 788 | successes = [] 789 | failures = [] 790 | 791 | for item in seq: 792 | if iteratee(item): 793 | successes.append(item) 794 | else: 795 | failures.append(item) 796 | 797 | return successes, failures 798 | 799 | 800 | def reject(iteratee, seq): 801 | """ 802 | The opposite of :func:`filter` this function yields the elements of `seq` that the `iteratee` 803 | returns falsey for. 804 | 805 | Examples: 806 | >>> list(reject(lambda x: x >= 3, [1, 2, 3, 4])) 807 | [1, 2] 808 | >>> list(reject('a', [{'a': 0}, {'a': 1}, {'a': 2}])) 809 | [{'a': 0}] 810 | >>> list(reject({'a': 1}, [{'a': 0}, {'a': 1}, {'a': 2}])) 811 | [{'a': 0}, {'a': 2}] 812 | 813 | Args: 814 | iteratee (object): Iteratee applied per iteration. 815 | seq (Iterable): Iterable to iterate over. 816 | 817 | Yields: 818 | Rejected elements. 819 | """ 820 | iteratee = fnc.iteratee(iteratee) 821 | return filter(fnc.compose(iteratee, not_), seq) 822 | 823 | 824 | def union(seq, *seqs): 825 | """ 826 | Computes the union of the passed-in iterables (sometimes referred to as ``unique``). 827 | 828 | Note: 829 | This function is like ``set.union()`` except it works with both hashable and 830 | unhashable values and preserves the ordering of the original iterables. 831 | 832 | Examples: 833 | >>> list(union([1, 2, 3, 1, 2, 3])) 834 | [1, 2, 3] 835 | >>> list(union([1, 2, 3], [2, 3, 4], [3, 4, 5])) 836 | [1, 2, 3, 4, 5] 837 | 838 | Args: 839 | seq (Iterable): Iterable to compute union against. 840 | *seqs (Iterable): Other iterables to compare with. 841 | 842 | Yields: 843 | Each unique element from all iterables. 844 | """ 845 | yield from unionby(None, seq, *seqs) 846 | 847 | 848 | def unionby(iteratee, seq, *seqs): 849 | """ 850 | Like :func:`union` except that an `iteratee` is used to modify each element in the sequences. 851 | The modified values are then used for comparison. 852 | 853 | Note: 854 | This function is like ``set.union()`` except it works with both hashable and 855 | unhashable values and preserves the ordering of the original iterables. 856 | 857 | Examples: 858 | >>> list(unionby( 859 | ... 'a', 860 | ... [{'a': 1}, {'a': 2}, {'a': 3}, {'a': 1}, {'a': 2}, {'a': 3}] 861 | ... )) 862 | [{'a': 1}, {'a': 2}, {'a': 3}] 863 | 864 | Args: 865 | iteratee (object): Iteratee applied per iteration. 866 | seq (Iterable): Iterable to compute union against. 867 | *seqs (Iterable): Other iterables to compare with. 868 | 869 | Yields: 870 | Each unique element from all iterables. 871 | """ 872 | if iteratee is not None: 873 | iteratee = fnc.iteratee(iteratee) 874 | 875 | seen = Container() 876 | 877 | for item in itertools.chain(seq, *seqs): 878 | if iteratee is not None: 879 | value = iteratee(item) 880 | else: 881 | value = item 882 | 883 | if value not in seen: 884 | yield item 885 | 886 | seen.add(value) 887 | 888 | 889 | def unzip(seq): 890 | """ 891 | The inverse of the builtin ``zip`` function, this method transposes groups of elements into new 892 | groups composed of elements from each group at their corresponding indexes. 893 | 894 | Examples: 895 | >>> list(unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)])) 896 | [(1, 2, 3), (4, 5, 6), (7, 8, 9)] 897 | >>> list(unzip(unzip([(1, 4, 7), (2, 5, 8), (3, 6, 9)]))) 898 | [(1, 4, 7), (2, 5, 8), (3, 6, 9)] 899 | 900 | Args: 901 | seq (Iterable): Iterable to unzip. 902 | 903 | Yields: 904 | tuple: Each transposed group. 905 | """ 906 | return zip(*seq) 907 | 908 | 909 | def without(values, seq): 910 | """ 911 | Exclude elements in `seq` that are in `values`. 912 | 913 | Examples: 914 | >>> list(without([2, 4], [1, 2, 3, 2, 4, 4, 3])) 915 | [1, 3, 3] 916 | 917 | Args: 918 | values (mixed): Values to remove. 919 | seq (Iterable): List to filter. 920 | 921 | Yields: 922 | Elements not in `values`. 923 | """ 924 | for item in seq: 925 | if item not in values: 926 | yield item 927 | 928 | 929 | def xor(seq, *seqs): 930 | """ 931 | Computes the symmetric difference of the provided iterables where the elements are only in one 932 | of the iteralbes. 933 | 934 | Note: 935 | This function is like ``set.symmetric_difference()`` except it works with both 936 | hashable and unhashable values and preserves the ordering of the original 937 | iterables. 938 | 939 | Warning: 940 | While this function returns a generator object, internally it will create 941 | intermediate non-generator iterables which may or may not be a performance 942 | concern depending on the sizes of the inputs. 943 | 944 | Examples: 945 | >>> list(xor([1, 3, 4], [1, 2, 4], [2])) 946 | [3] 947 | 948 | Args: 949 | seq (Iterable): Iterable to compute symmetric difference against. 950 | *seqs (Iterable): Other iterables to compare with. 951 | 952 | Yields: 953 | Elements from the symmetric difference. 954 | """ 955 | if not seqs: 956 | yield from seq 957 | return 958 | 959 | head, *rest = seqs 960 | 961 | if isgenerator(seq): 962 | seq = tuple(seq) 963 | 964 | if isgenerator(head): 965 | head = tuple(head) 966 | 967 | a = union(seq, head) 968 | b = tuple(intersection(seq, head)) 969 | d = difference(a, b) 970 | 971 | yield from xor(d, *rest) 972 | -------------------------------------------------------------------------------- /src/fnc/utilities.py: -------------------------------------------------------------------------------- 1 | """General utility functions.""" 2 | 3 | from collections.abc import Iterable 4 | from functools import partial, wraps 5 | from random import randint, uniform 6 | import re 7 | import time 8 | 9 | import fnc 10 | 11 | from .helpers import Sentinel, number_types 12 | 13 | 14 | # These regexes are used in aspath() to parse deep path strings. 15 | 16 | # This is used to split a deep path string into dict keys or list indexex. 17 | # This matches "." as delimiter and "[]" as delimiter while keeping the 18 | # "[]" as an item. 19 | RE_PATH_KEY_DELIM = re.compile(r"(?]". 22 | RE_PATH_GET_ITEM = re.compile(r"^\[.*?\]$") 23 | 24 | 25 | def after(method): 26 | """ 27 | Decorator that calls `method` after the decorated function is called. 28 | 29 | Examples: 30 | >>> def a(): print('a') 31 | >>> def b(): print('b') 32 | >>> after(a)(b)() 33 | b 34 | a 35 | 36 | Args: 37 | method (callable): Function to call afterwards. 38 | """ 39 | 40 | def decorator(func): 41 | @wraps(func) 42 | def decorated(*args, **kwargs): 43 | result = func(*args, **kwargs) 44 | method() 45 | return result 46 | 47 | return decorated 48 | 49 | return decorator 50 | 51 | 52 | def aspath(value): 53 | """ 54 | Converts value to an object path list. 55 | 56 | Examples: 57 | >>> aspath('a.b.c') 58 | ['a', 'b', 'c'] 59 | >>> aspath('a.0.0.b.c') 60 | ['a', '0', '0', 'b', 'c'] 61 | >>> aspath('a[0].b.c') 62 | ['a', '0', 'b', 'c'] 63 | >>> aspath('a[0][1][2].b.c') 64 | ['a', '0', '1', '2', 'b', 'c'] 65 | >>> aspath('[a][0][1][2][b][c]') 66 | ['a', '0', '1', '2', 'b', 'c'] 67 | >>> aspath('a.[]') 68 | ['a', ''] 69 | >>> aspath(0) 70 | [0] 71 | >>> aspath([0, 1]) 72 | [0, 1] 73 | >>> aspath((0, 1)) 74 | [(0, 1)] 75 | 76 | Args: 77 | value (object): Value to convert. 78 | 79 | Returns: 80 | list: Returns property paths. 81 | """ 82 | if isinstance(value, list): 83 | return value 84 | 85 | if not isinstance(value, str): 86 | return [value] 87 | 88 | return [_parse_path_token(token) for token in RE_PATH_KEY_DELIM.split(value) if token] 89 | 90 | 91 | def _parse_path_token(token): 92 | if RE_PATH_GET_ITEM.match(token): 93 | path = token[1:-1] 94 | else: 95 | path = token 96 | 97 | return path 98 | 99 | 100 | def atgetter(paths): 101 | """ 102 | Creates a function that returns the values at paths of a given object. 103 | 104 | Examples: 105 | >>> get_id_name = atgetter(['data.id', 'data.name']) 106 | >>> get_id_name({'data': {'id': 1, 'name': 'foo'}}) 107 | (1, 'foo') 108 | 109 | Args: 110 | paths (Iterable): Path values to fetch from object. 111 | 112 | Returns: 113 | callable: Function like ``f(obj): fnc.at(paths, obj)``. 114 | """ 115 | return partial(fnc.at, paths) 116 | 117 | 118 | def before(method): 119 | """ 120 | Decorator that calls `method` before the decorated function is called. 121 | 122 | Examples: 123 | >>> def a(): print('a') 124 | >>> def b(): print('b') 125 | >>> before(a)(b)() 126 | a 127 | b 128 | 129 | Args: 130 | method (callable): Function to call afterwards. 131 | """ 132 | 133 | def decorator(func): 134 | @wraps(func) 135 | def decorated(*args, **kwargs): 136 | method() 137 | return func(*args, **kwargs) 138 | 139 | return decorated 140 | 141 | return decorator 142 | 143 | 144 | def compose(*funcs): 145 | """ 146 | Create a function that is the composition of the provided functions, where each successive 147 | invocation is supplied the return value of the previous. For example, composing the functions 148 | ``f()``, ``g()``, and ``h()`` produces ``h(g(f()))``. 149 | 150 | Note: 151 | Each element in `funcs` can either be a callable or a ``tuple`` where the first 152 | element is a callable and the remaining elements are partial arguments. The 153 | tuples will be converted to a callable using ``functools.partial(*func)``. 154 | 155 | Note: 156 | The "partial" shorthand only supports invoking ``functools.partial`` using 157 | positional arguments. If keywoard argument partials are needed, then use 158 | ``functools.partial`` directly. 159 | 160 | Examples: 161 | >>> mult_5 = lambda x: x * 5 162 | >>> div_10 = lambda x: x / 10.0 163 | >>> pow_2 = lambda x: x ** 2 164 | >>> mult_div_pow = compose(sum, mult_5, div_10, pow_2) 165 | >>> mult_div_pow([1, 2, 3, 4]) 166 | 25.0 167 | >>> sum_positive_evens = compose( 168 | ... (filter, lambda x: x > 0), 169 | ... (filter, lambda x: x % 2 == 0), 170 | ... sum 171 | ... ) 172 | >>> sum_positive_evens([-1, 1, 2, 3, -5, 0, 6]) 173 | 8 174 | 175 | Args: 176 | *funcs (callable): Function(s) to compose. If `func` is a tuple, then it will be 177 | converted into a partial using ``functools.partial(*func)``. 178 | 179 | Returns: 180 | callable: Composed function. 181 | """ 182 | funcs = tuple(partial(*func) if isinstance(func, tuple) else func for func in funcs) 183 | 184 | def _compose(*args, **kwargs): 185 | result = None 186 | for func in funcs: 187 | result = func(*args, **kwargs) 188 | args, kwargs = (result,), {} 189 | return result 190 | 191 | return _compose 192 | 193 | 194 | def conformance(source): 195 | """ 196 | Creates a function that does a shallow comparison between a given object and the `source` 197 | dictionary using :func:`conforms`. 198 | 199 | Examples: 200 | >>> conformance({'a': 1})({'b': 2, 'a': 1}) 201 | True 202 | >>> conformance({'a': 1})({'b': 2, 'a': 2}) 203 | False 204 | 205 | Args: 206 | source (dict): Source object used for comparision. 207 | 208 | Returns: 209 | function 210 | """ 211 | if not isinstance(source, dict): # pragma: no cover 212 | raise TypeError("source must be a dict") 213 | 214 | return partial(conforms, source) 215 | 216 | 217 | def conforms(source, target): 218 | """ 219 | Return whether the `target` object conforms to `source` where `source` is a dictionary that 220 | contains key-value pairs which are compared against the same key- values in `target`. If a key- 221 | value in `source` is a callable, then that callable is used as a predicate against the 222 | corresponding key-value in `target`. 223 | 224 | Examples: 225 | >>> conforms({'b': 2}, {'a': 1, 'b': 2}) 226 | True 227 | >>> conforms({'b': 3}, {'a': 1, 'b': 2}) 228 | False 229 | >>> conforms({'b': 2, 'a': lambda a: a > 0}, {'a': 1, 'b': 2}) 230 | True 231 | >>> conforms({'b': 2, 'a': lambda a: a > 0}, {'a': -1, 'b': 2}) 232 | False 233 | 234 | Args: 235 | source (Mapping): Object of path values to match. 236 | target (Mapping): Object to compare. 237 | 238 | Returns: 239 | bool: Whether `target` is a match or not. 240 | """ 241 | result = True 242 | for key, value in source.items(): 243 | target_value = fnc.get(key, target, default=Sentinel) 244 | 245 | if target_value is Sentinel: 246 | target_result = False 247 | elif callable(value): 248 | target_result = value(target_value) 249 | else: 250 | target_result = target_value == value 251 | 252 | if not target_result: 253 | result = False 254 | break 255 | 256 | return result 257 | 258 | 259 | def constant(value): 260 | """ 261 | Creates a function that returns a constant `value`. 262 | 263 | Examples: 264 | >>> pi = constant(3.14) 265 | >>> pi() 266 | 3.14 267 | 268 | Args: 269 | value (object): Constant value to return. 270 | 271 | Returns: 272 | callable: Function that always returns `value`. 273 | """ 274 | return lambda *args, **kwargs: value 275 | 276 | 277 | def identity(value=None, *args, **kwargs): 278 | """ 279 | Return the first argument provided. 280 | 281 | Examples: 282 | >>> identity(1) 283 | 1 284 | >>> identity(1, 2, 3) 285 | 1 286 | >>> identity(1, 2, 3, a=4) 287 | 1 288 | >>> identity() is None 289 | True 290 | 291 | Args: 292 | value (object, optional): Value to return. Defaults to ``None``. 293 | 294 | Returns: 295 | object: First argument or ``None``. 296 | """ 297 | return value 298 | 299 | 300 | def iteratee(obj): 301 | """ 302 | Return iteratee function based on the type of `obj`. 303 | 304 | The iteratee object can be one of the following: 305 | 306 | - ``callable``: Return as-is. 307 | - ``None``: Return :func:`identity` function. 308 | - ``dict``: Return :func:`conformance(obj)` function. 309 | - ``set``: Return :func:`pickgetter(obj)` function. 310 | - ``tuple``: Return :func:`atgetter(obj)`` function. 311 | - otherwise: Return :func:`pathgetter(obj)`` function. 312 | 313 | Note: 314 | In most cases, this function won't need to be called directly since 315 | other functions that accept an iteratee will call this function 316 | internally. 317 | 318 | Examples: 319 | >>> iteratee(lambda a, b: a + b)(1, 2) 320 | 3 321 | >>> iteratee(None)(1, 2, 3) 322 | 1 323 | >>> is_active = iteratee({'active': True}) 324 | >>> is_active({'active': True}) 325 | True 326 | >>> is_active({'active': 0}) 327 | False 328 | >>> iteratee({'a': 5, 'b.c': 1})({'a': 5, 'b': {'c': 1}}) 329 | True 330 | >>> iteratee({'a', 'b'})({'a': 1, 'b': 2, 'c': 3}) == {'a': 1, 'b': 2} 331 | True 332 | >>> iteratee(('a', ['c', 'd', 'e']))({'a': 1, 'c': {'d': {'e': 3}}}) 333 | (1, 3) 334 | >>> iteratee(['c', 'd', 'e'])({'a': 1, 'c': {'d': {'e': 3}}}) 335 | 3 336 | >>> get_data = iteratee('data') 337 | >>> get_data({'data': [1, 2, 3]}) 338 | [1, 2, 3] 339 | >>> iteratee(['a.b'])({'a.b': 5}) 340 | 5 341 | >>> iteratee('a.b')({'a': {'b': 5}}) 342 | 5 343 | 344 | Args: 345 | obj (object): Object to convert into an iteratee. 346 | 347 | Returns: 348 | callable: Iteratee function. 349 | """ 350 | if obj is None: 351 | return identity 352 | elif callable(obj): 353 | return obj 354 | elif isinstance(obj, dict): 355 | return conformance(obj) 356 | elif isinstance(obj, set): 357 | return pickgetter(obj) 358 | elif isinstance(obj, tuple): 359 | return atgetter(obj) 360 | else: 361 | return pathgetter(obj) 362 | 363 | 364 | def negate(func): 365 | """ 366 | Creates a function that negates the result of the predicate `func`. 367 | 368 | Examples: 369 | >>> not_number = negate(lambda x: isinstance(x, (int, float))) 370 | >>> not_number(1) 371 | False 372 | >>> not_number('1') 373 | True 374 | 375 | Args: 376 | func (callabe): Function to negate. 377 | 378 | Returns: 379 | function 380 | """ 381 | return lambda *args, **kwargs: not func(*args, **kwargs) 382 | 383 | 384 | def noop(*args, **kwargs): 385 | """ 386 | A no-operation function. 387 | 388 | Examples: 389 | >>> noop(1, 2, 3) is None 390 | True 391 | """ 392 | return 393 | 394 | 395 | def over(*funcs): 396 | """ 397 | Creates a function that calls each function with the provided arguments and returns the results 398 | as a ``tuple``. 399 | 400 | Example: 401 | >>> minmax = over(min, max) 402 | >>> minmax([1, 2, 3, 4]) 403 | (1, 4) 404 | 405 | Args: 406 | *funcs (callable): Functions to call. 407 | 408 | Returns: 409 | callable: Function that returns tuple results from each function call. 410 | """ 411 | return lambda *args: tuple(func(*args) for func in funcs) 412 | 413 | 414 | def overall(*funcs): 415 | """ 416 | Creates a function that returns ``True`` when all of the given functions return true for the 417 | provided arguments. 418 | 419 | Example: 420 | >>> is_bool = overall( 421 | ... lambda v: isinstance(v, bool), 422 | ... lambda v: v is True or v is False 423 | ... ) 424 | >>> is_bool(False) 425 | True 426 | >>> is_bool(0) 427 | False 428 | 429 | Args: 430 | *funcs (callable): Functions to call. 431 | 432 | Returns: 433 | callable: Function that returns bool of whether call functions evaulate to true. 434 | """ 435 | return lambda *args: all(func(*args) for func in funcs) 436 | 437 | 438 | def overany(*funcs): 439 | """ 440 | Creates a function that returns ``True`` when any of the given functions return true for the 441 | provided arguments. 442 | 443 | Example: 444 | >>> is_bool_like = overany( 445 | ... lambda v: isinstance(v, bool), 446 | ... lambda v: v in [0, 1] 447 | ... ) 448 | >>> is_bool_like(False) 449 | True 450 | >>> is_bool_like(0) 451 | True 452 | 453 | Args: 454 | *funcs (callable): Functions to call. 455 | 456 | Returns: 457 | callable: Function that returns bool of whether call functions evaulate to true. 458 | """ 459 | return lambda *args: any(func(*args) for func in funcs) 460 | 461 | 462 | def pathgetter(path, default=None): 463 | """ 464 | Creates a function that returns the value at path of a given object. 465 | 466 | Examples: 467 | >>> get_data = pathgetter('data') 468 | >>> get_data({'data': 1}) 469 | 1 470 | >>> get_data({}) is None 471 | True 472 | >>> get_first = pathgetter(0) 473 | >>> get_first([1, 2, 3]) 474 | 1 475 | >>> get_nested = pathgetter('data.items') 476 | >>> get_nested({'data': {'items': [1, 2]}}) 477 | [1, 2] 478 | 479 | Args: 480 | path (object): Path value to fetch from object. 481 | 482 | Returns: 483 | callable: Function like ``f(obj): fnc.get(path, obj)``. 484 | """ 485 | return partial(fnc.get, path, default=default) 486 | 487 | 488 | def pickgetter(keys): 489 | """ 490 | Creates a function that returns the value at path of a given object. 491 | 492 | Examples: 493 | >>> pick_ab = pickgetter(['a', 'b']) 494 | >>> pick_ab({'a': 1, 'b': 2, 'c': 4}) == {'a': 1, 'b': 2} 495 | True 496 | 497 | Args: 498 | keys (Iterable): Keys to fetch from object. 499 | 500 | Returns: 501 | callable: Function like ``f(obj): fnc.pick(keys, obj)``. 502 | """ 503 | return partial(fnc.pick, keys) 504 | 505 | 506 | def random(start=0, stop=1, floating=False): 507 | """ 508 | Produces a random number between `start` and `stop` (inclusive). If only one argument is 509 | provided a number between 0 and the given number will be returned. If floating is truthy or 510 | either `start` or `stop` are floats a floating-point number will be returned instead of an 511 | integer. 512 | 513 | Args: 514 | start (int): Minimum value. 515 | stop (int): Maximum value. 516 | floating (bool, optional): Whether to force random value to ``float``. Default 517 | is ``False``. 518 | 519 | Returns: 520 | int|float: Random value. 521 | 522 | Example: 523 | >>> 0 <= random() <= 1 524 | True 525 | >>> 5 <= random(5, 10) <= 10 526 | True 527 | >>> isinstance(random(floating=True), float) 528 | True 529 | """ 530 | floating = isinstance(start, float) or isinstance(stop, float) or floating is True 531 | 532 | if stop < start: 533 | stop, start = start, stop 534 | 535 | if floating: 536 | rnd = uniform(start, stop) 537 | else: 538 | rnd = randint(start, stop) 539 | 540 | return rnd 541 | 542 | 543 | def retry( # noqa: C901 544 | attempts=3, 545 | *, 546 | delay=0.5, 547 | max_delay=150.0, 548 | scale=2.0, 549 | jitter=0, 550 | exceptions=(Exception,), 551 | on_exception=None 552 | ): 553 | """ 554 | Decorator that retries a function multiple times if it raises an exception with an optional 555 | delay between each attempt. When a `delay` is supplied, there will be a sleep period in between 556 | retry attempts. The first delay time will always be equal to `delay`. After subsequent retries, 557 | the delay time will be scaled by `scale` up to `max_delay`. If `max_delay` is ``0``, then 558 | `delay` can increase unbounded. 559 | 560 | Args: 561 | attempts (int, optional): Number of retry attempts. Defaults to ``3``. 562 | delay (int|float, optional): Base amount of seconds to sleep between retry 563 | attempts. Defaults to ``0.5``. 564 | max_delay (int|float, optional): Maximum number of seconds to sleep between 565 | retries. Is ignored when equal to ``0``. Defaults to ``150.0`` 566 | (2.5 minutes). 567 | scale (int|float, optional): Scale factor to increase `delay` after first retry 568 | fails. Defaults to ``2.0``. 569 | jitter (int|float|tuple, optional): Random jitter to add to `delay` time. Can be 570 | a positive number or 2-item tuple of numbers representing the random range 571 | to choose from. When a number is given, the random range will be from 572 | ``[0, jitter]``. When jitter is a float or contains a float, then a random 573 | float will be chosen; otherwise, a random integer will be selected. Defaults 574 | to ``0`` which disables jitter. 575 | exceptions (tuple, optional): Tuple of exceptions that trigger a retry attempt. 576 | Exceptions not in the tuple will be ignored. Defaults to ``(Exception,)`` 577 | (all exceptions). 578 | on_exception (function, optional): Function that is called when a retryable 579 | exception is caught. It is invoked with ``on_exception(exc, attempt)`` where 580 | ``exc`` is the caught exception and ``attempt`` is the attempt count. All 581 | arguments are optional. Defaults to ``None``. 582 | 583 | Example: 584 | 585 | >>> @retry(attempts=3, delay=0) 586 | ... def do_something(): 587 | ... print('something') 588 | ... raise Exception('something went wrong') 589 | >>> try: do_something() 590 | ... except Exception: print('caught something') 591 | something 592 | something 593 | something 594 | caught something 595 | """ 596 | if isinstance(exceptions, Exception): # pragma: no cover 597 | exceptions = (exceptions,) 598 | 599 | if not isinstance(attempts, int) or attempts <= 0: 600 | raise ValueError("attempts must be an integer greater than 0") 601 | 602 | if not isinstance(delay, number_types) or delay < 0: 603 | raise ValueError("delay must be a number greater than or equal to 0") 604 | 605 | if not isinstance(max_delay, number_types) or max_delay < 0: 606 | raise ValueError("scale must be a number greater than or equal to 0") 607 | 608 | if not isinstance(scale, number_types) or scale <= 0: 609 | raise ValueError("scale must be a number greater than 0") 610 | 611 | if ( 612 | not isinstance(jitter, number_types + (tuple,)) 613 | or (isinstance(jitter, number_types) and jitter < 0) 614 | or ( 615 | isinstance(jitter, tuple) 616 | and (len(jitter) != 2 or not all(isinstance(jit, number_types) for jit in jitter)) 617 | ) 618 | ): 619 | raise ValueError("jitter must be a number greater than 0 or a 2-item tuple of " "numbers") 620 | 621 | if not isinstance(exceptions, tuple) or not all( 622 | issubclass(exc, Exception) for exc in exceptions 623 | ): 624 | raise TypeError("exceptions must be a tuple of Exception types") 625 | 626 | if on_exception and not callable(on_exception): 627 | raise TypeError("on_exception must be a callable") 628 | 629 | if jitter and not isinstance(jitter, tuple): 630 | jitter = (0, jitter) 631 | 632 | def decorator(func): 633 | @wraps(func) 634 | def decorated(*args, **kargs): 635 | delay_time = delay 636 | 637 | for attempt in range(1, attempts + 1): 638 | # pylint: disable=catching-non-exception 639 | try: 640 | return func(*args, **kargs) 641 | except exceptions as exc: 642 | if on_exception: 643 | exc.retry = {"attempt": attempt} 644 | on_exception(exc) 645 | 646 | if attempt == attempts: 647 | raise 648 | 649 | if jitter: 650 | delay_time += max(0, random(*jitter)) 651 | 652 | if delay_time < 0: # pragma: no cover 653 | continue 654 | 655 | if max_delay: 656 | delay_time = min(delay_time, max_delay) 657 | 658 | time.sleep(delay_time) 659 | 660 | # Scale after first iteration. 661 | delay_time *= scale 662 | 663 | return decorated 664 | 665 | return decorator 666 | -------------------------------------------------------------------------------- /tasks.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides the CLI interface for invoke tasks. 3 | 4 | All tasks can be executed from this file's directory using: 5 | 6 | $ inv 7 | 8 | Where is a function defined below with the @task decorator. 9 | """ 10 | 11 | from functools import partial 12 | import os 13 | 14 | from invoke import Exit, UnexpectedExit, run as _run, task 15 | 16 | 17 | PACKAGE_NAME = "fnc" 18 | PACKAGE_SOURCE = f"src/{PACKAGE_NAME}" 19 | TEST_TARGETS = f"{PACKAGE_SOURCE} tests" 20 | LINT_TARGETS = f"{TEST_TARGETS} tasks.py" 21 | EXIT_EXCEPTIONS = (Exit, UnexpectedExit, SystemExit) 22 | 23 | 24 | # Set pyt=True to enable colored output when available. 25 | run = partial(_run, pty=True) 26 | 27 | 28 | @task 29 | def black(ctx, quiet=False): 30 | """Autoformat code using black.""" 31 | run(f"black {LINT_TARGETS}", hide=quiet) 32 | 33 | 34 | @task 35 | def isort(ctx, quiet=False): 36 | """Autoformat Python imports.""" 37 | run(f"isort {LINT_TARGETS}", hide=quiet) 38 | 39 | 40 | @task 41 | def docformatter(ctx): 42 | """Autoformat docstrings using docformatter.""" 43 | run( 44 | f"docformatter -r {LINT_TARGETS} " 45 | f"--in-place --pre-summary-newline --wrap-descriptions 100 --wrap-summaries 100" 46 | ) 47 | 48 | 49 | @task 50 | def fmt(ctx): 51 | """Autoformat code and docstrings.""" 52 | print("Running docformatter") 53 | docformatter(ctx) 54 | 55 | print("Running isort") 56 | isort(ctx, quiet=True) 57 | 58 | print("Running black") 59 | black(ctx, quiet=True) 60 | 61 | 62 | @task 63 | def flake8(ctx): 64 | """Check code for PEP8 violations using flake8.""" 65 | run(f"flake8 --format=pylint {LINT_TARGETS}") 66 | 67 | 68 | @task 69 | def pylint(ctx): 70 | """Check code for static errors using pylint.""" 71 | run(f"pylint {LINT_TARGETS}") 72 | 73 | 74 | @task 75 | def lint(ctx): 76 | """Run linters.""" 77 | linters = {"flake8": flake8, "pylint": pylint} 78 | failures = [] 79 | 80 | print(f"Preparing to run linters: {', '.join(linters)}\n") 81 | 82 | for name, linter in linters.items(): 83 | print(f"Running {name}") 84 | try: 85 | linter(ctx) 86 | except EXIT_EXCEPTIONS: 87 | failures.append(name) 88 | result = "FAILED" 89 | else: 90 | result = "PASSED" 91 | print(f"{result}\n") 92 | 93 | if failures: 94 | failed = ", ".join(failures) 95 | raise Exit(f"ERROR: linters failed: {failed}") 96 | 97 | 98 | @task(help={"args": "Override default pytest arguments"}) 99 | def test(ctx, args=f"{TEST_TARGETS} --cov={PACKAGE_NAME}"): 100 | """Run unit tests using pytest.""" 101 | tox_env_site_packages_dir = os.getenv("TOX_ENV_SITE_PACKAGES_DIR") 102 | if tox_env_site_packages_dir: 103 | # Re-path package source to match tox env so that we generate proper coverage report. 104 | tox_env_pkg_src = os.path.join(tox_env_site_packages_dir, os.path.basename(PACKAGE_SOURCE)) 105 | args = args.replace(PACKAGE_SOURCE, tox_env_pkg_src) 106 | 107 | run(f"pytest {args}") 108 | 109 | 110 | @task 111 | def ci(ctx): 112 | """Run linters and tests.""" 113 | print("Building package") 114 | build(ctx) 115 | 116 | print("Building docs") 117 | docs(ctx) 118 | 119 | print("Checking linters") 120 | lint(ctx) 121 | 122 | print("Running unit tests") 123 | test(ctx) 124 | 125 | 126 | @task 127 | def docs(ctx, serve=False, bind="127.0.0.1", port=8000): 128 | """Build docs.""" 129 | run("rm -rf docs/_build") 130 | run("sphinx-build -q -W -b html docs docs/_build/html") 131 | 132 | if serve: 133 | print(f"Serving docs on {bind} port {port} (http://{bind}:{port}/) ...") 134 | run(f"python -m http.server -b {bind} --directory docs/_build/html {port}", hide=True) 135 | 136 | 137 | @task 138 | def build(ctx): 139 | """Build Python package.""" 140 | run("rm -rf dist build docs/_build") 141 | run("python -m build") 142 | 143 | 144 | @task 145 | def clean(ctx): 146 | """Remove temporary files related to development.""" 147 | run("find . -type f -name '*.py[cod]' -delete -o -type d -name __pycache__ -delete") 148 | run("rm -rf .tox .coverage .cache .pytest_cache .mypy_cache **/.egg* **/*.egg* dist build") 149 | 150 | 151 | @task(pre=[build]) 152 | def release(ctx): 153 | """Release Python package.""" 154 | run("twine upload dist/*") 155 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgilland/fnc/b474d32d47feab0aa56e25f53c0cf24487aaa977/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from unittest import mock 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture 7 | def mocksleep(): 8 | with mock.patch("time.sleep") as mocked: 9 | yield mocked 10 | -------------------------------------------------------------------------------- /tests/helpers.py: -------------------------------------------------------------------------------- 1 | class AttrObject(object): 2 | def __init__(self, **attrs): 3 | for attr, value in attrs.items(): 4 | setattr(self, attr, value) 5 | 6 | 7 | class KeysGetItemObject(object): 8 | def __init__(self, mapping): 9 | self.mapping = mapping 10 | 11 | def keys(self): 12 | return self.mapping.keys() 13 | 14 | def __getitem__(self, item): 15 | return self.mapping[item] 16 | 17 | 18 | class IterMappingObject(object): 19 | def __init__(self, mapping): 20 | self.mapping = mapping 21 | 22 | def __iter__(self): 23 | return iter(self.mapping.items()) 24 | -------------------------------------------------------------------------------- /tests/test_mappings.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict, namedtuple 2 | 3 | import pytest 4 | 5 | import fnc 6 | 7 | from .helpers import AttrObject, IterMappingObject, KeysGetItemObject 8 | 9 | 10 | parametrize = pytest.mark.parametrize 11 | 12 | 13 | @parametrize( 14 | "case", 15 | [ 16 | dict(args=([0, 2, 4], ["a", "b", "c", "d", "e"]), expected=("a", "c", "e")), 17 | dict(args=([0, 2], ["moe", "larry", "curly"]), expected=("moe", "curly")), 18 | dict(args=(["a", "b"], {"a": 1, "b": 2, "c": 3}), expected=(1, 2)), 19 | ], 20 | ) 21 | def test_at(case): 22 | assert fnc.at(*case["args"]) == case["expected"] 23 | 24 | 25 | @parametrize( 26 | "case", 27 | [ 28 | dict( 29 | args=({"name": "barney"}, {"name": "fred", "employer": "slate"}), 30 | expected={"name": "barney", "employer": "slate"}, 31 | ) 32 | ], 33 | ) 34 | def test_defaults(case): 35 | assert fnc.defaults(*case["args"]) == case["expected"] 36 | 37 | 38 | @parametrize( 39 | "case", 40 | [ 41 | dict(args=("one.two", {"one": {"two": {"three": 4}}}), expected={"three": 4}), 42 | dict(args=("one.two.three", {"one": {"two": {"three": 4}}}), expected=4), 43 | dict(args=(["one", "two"], {"one": {"two": {"three": 4}}}), expected={"three": 4}), 44 | dict(args=(["one", "two", "three"], {"one": {"two": {"three": 4}}}), expected=4), 45 | dict(args=("one.four", {"one": {"two": {"three": 4}}}), expected=None), 46 | dict( 47 | args=("one.four.three", {"one": {"two": {"three": 4}}}), 48 | kwargs={"default": []}, 49 | expected=[], 50 | ), 51 | dict( 52 | args=("one.four.0.a", {"one": {"two": {"three": 4}}}), 53 | kwargs={"default": [{"a": 1}]}, 54 | expected=[{"a": 1}], 55 | ), 56 | dict( 57 | args=("one.four.three.0.a", {"one": {"two": {"three": [{"a": 1}]}}}), 58 | kwargs={"default": []}, 59 | expected=[], 60 | ), 61 | dict(args=("one.four.three", {"one": {"two": {"three": 4}}}), expected=None), 62 | dict( 63 | args=("one.four.three.0.a", {"one": {"two": {"three": [{"a": 1}]}}}), 64 | expected=None, 65 | ), 66 | dict( 67 | args=("one.four.three", {"one": {"two": {"three": 4}}}), 68 | kwargs={"default": 2}, 69 | expected=2, 70 | ), 71 | dict( 72 | args=("one.four.three.0.a", {"one": {"two": {"three": [{"a": 1}]}}}), 73 | kwargs={"default": 2}, 74 | expected=2, 75 | ), 76 | dict( 77 | args=("one.four.three", {"one": {"two": {"three": 4}}}), 78 | kwargs={"default": {"test": "value"}}, 79 | expected={"test": "value"}, 80 | ), 81 | dict( 82 | args=("one.four.three.0.a", {"one": {"two": {"three": [{"a": 1}]}}}), 83 | kwargs={"default": {"test": "value"}}, 84 | expected={"test": "value"}, 85 | ), 86 | dict( 87 | args=("one.four.three", {"one": {"two": {"three": 4}}}), 88 | kwargs={"default": "haha"}, 89 | expected="haha", 90 | ), 91 | dict( 92 | args=("one.four.three.0.a", {"one": {"two": {"three": [{"a": 1}]}}}), 93 | kwargs={"default": "haha"}, 94 | expected="haha", 95 | ), 96 | dict(args=("five", {"one": {"two": {"three": 4}}}), expected=None), 97 | dict( 98 | args=(["one", 1, "three", 1], {"one": ["two", {"three": [4, 5]}]}), 99 | expected=5, 100 | ), 101 | dict(args=("one.[1].three.[1]", {"one": ["two", {"three": [4, 5]}]}), expected=5), 102 | dict(args=("one.1.three.1", {"one": ["two", {"three": [4, 5]}]}), expected=5), 103 | dict(args=("[1].two.three.[0]", ["one", {"two": {"three": [4, 5]}}]), expected=4), 104 | dict( 105 | args=( 106 | "[1].two.three[1][0].four[0]", 107 | ["one", {"two": {"three": [4, [{"four": [5]}]]}}], 108 | ), 109 | expected=5, 110 | ), 111 | dict(args=("[42]", range(50)), expected=42), 112 | dict(args=("[0][0][0][0][0][0][0][0][0][0]", [[[[[[[[[[42]]]]]]]]]]), expected=42), 113 | dict(args=("[0][42]", [range(50)]), expected=42), 114 | dict(args=("a[0].b[42]", {"a": [{"b": range(50)}]}), expected=42), 115 | dict( 116 | args=("one.bad.hello", {"one": ["hello", "there"]}), 117 | kwargs={"default": []}, 118 | expected=[], 119 | ), 120 | dict(args=("one.1.hello", {"one": ["hello", None]}), expected=None), 121 | dict(args=("a", namedtuple("a", ["a", "b"])(1, 2)), expected=1), 122 | dict(args=(0, namedtuple("a", ["a", "b"])(1, 2)), expected=1), 123 | dict(args=("a.c.d", namedtuple("a", ["a", "b"])({"c": {"d": 1}}, 2)), expected=1), 124 | dict(args=("update", {}), expected=None), 125 | dict(args=("extend", []), expected=None), 126 | dict(args=((1,), {(1,): {(2,): 3}}), expected={(2,): 3}), 127 | dict(args=([(1,)], {(1,): {(2,): 3}}), expected={(2,): 3}), 128 | dict(args=([(1,), (2,)], {(1,): {(2,): 3}}), expected=3), 129 | dict(args=(object, {object: 1}), expected=1), 130 | dict(args=([object, object], {object: {object: 1}}), expected=1), 131 | dict(args=("0.0.0.0.0.0.0.0.0.0", [[[[[[[[[[42]]]]]]]]]]), expected=42), 132 | dict(args=("1.name", {1: {"name": "John Doe"}}), expected="John Doe"), 133 | ], 134 | ) 135 | def test_get(case): 136 | kwargs = case.get("kwargs", {}) 137 | assert fnc.get(*case["args"], **kwargs) == case["expected"] 138 | 139 | 140 | def test_get__should_not_populate_defaultdict(): 141 | data = defaultdict(list) 142 | fnc.get("a", data) 143 | assert data == {} 144 | 145 | 146 | @parametrize( 147 | "case", 148 | [ 149 | dict(args=("b", {"a": 1, "b": 2, "c": 3}), expected=True), 150 | dict(args=(0, [1, 2, 3]), expected=True), 151 | dict(args=(1, [1, 2, 3]), expected=True), 152 | dict(args=(3, [1, 2, 3]), expected=False), 153 | dict(args=("b", {"a": 1, "b": 2, "c": 3}), expected=True), 154 | dict(args=(0, [1, 2, 3]), expected=True), 155 | dict(args=(1, [1, 2, 3]), expected=True), 156 | dict(args=(3, [1, 2, 3]), expected=False), 157 | dict(args=("one.two", {"one": {"two": {"three": 4}}}), expected=True), 158 | dict(args=("one.two.three", {"one": {"two": {"three": 4}}}), expected=True), 159 | dict(args=(["one", "two"], {"one": {"two": {"three": 4}}}), expected=True), 160 | dict( 161 | args=(["one", "two", "three"], {"one": {"two": {"three": 4}}}), 162 | expected=True, 163 | ), 164 | dict(args=("one.four", {"one": {"two": {"three": 4}}}), expected=False), 165 | dict(args=("five", {"one": {"two": {"three": 4}}}), expected=False), 166 | dict(args=("one.four.three", {"one": {"two": {"three": 4}}}), expected=False), 167 | dict( 168 | args=("one.four.three.0.a", {"one": {"two": {"three": [{"a": 1}]}}}), 169 | expected=False, 170 | ), 171 | dict( 172 | args=(["one", 1, "three", 1], {"one": ["two", {"three": [4, 5]}]}), 173 | expected=True, 174 | ), 175 | dict( 176 | args=("one.[1].three.[1]", {"one": ["two", {"three": [4, 5]}]}), 177 | expected=True, 178 | ), 179 | dict(args=("one.1.three.1", {"one": ["two", {"three": [4, 5]}]}), expected=True), 180 | dict( 181 | args=("[1].two.three.[0]", ["one", {"two": {"three": [4, 5]}}]), 182 | expected=True, 183 | ), 184 | ], 185 | ) 186 | def test_has(case): 187 | assert fnc.has(*case["args"]) == case["expected"] 188 | 189 | 190 | def test_has__should_not_populate_defaultdict(): 191 | data = defaultdict(list) 192 | fnc.has("a", data) 193 | assert data == {} 194 | 195 | 196 | @parametrize( 197 | "case", 198 | [ 199 | dict(args=({"a": 1, "b": 2, "c": 3},), expected={1: "a", 2: "b", 3: "c"}), 200 | dict( 201 | args=(IterMappingObject({"a": 1, "b": 2, "c": 3}),), 202 | expected={1: "a", 2: "b", 3: "c"}, 203 | ), 204 | dict(args=([1, 2, 3],), expected={1: 0, 2: 1, 3: 2}), 205 | ], 206 | ) 207 | def test_invert(case): 208 | assert fnc.invert(*case["args"]) == case["expected"] 209 | 210 | 211 | @parametrize( 212 | "case", 213 | [ 214 | dict( 215 | args=(lambda k: k + k, {0: "a", 1: "b", 2: "c"}), 216 | expected={0: "a", 2: "b", 4: "c"}, 217 | ), 218 | dict( 219 | args=(lambda k: k + k, KeysGetItemObject({0: "a", 1: "b", 2: "c"})), 220 | expected={0: "a", 2: "b", 4: "c"}, 221 | ), 222 | ], 223 | ) 224 | def test_mapkeys(case): 225 | assert fnc.mapkeys(*case["args"]) == case["expected"] 226 | 227 | 228 | @parametrize( 229 | "case", 230 | [ 231 | dict( 232 | args=(lambda num: num * 3, {"a": 1, "b": 2, "c": 3}), 233 | expected={"a": 3, "b": 6, "c": 9}, 234 | ), 235 | dict( 236 | args=( 237 | "age", 238 | { 239 | "fred": {"name": "fred", "age": 40}, 240 | "pebbles": {"name": "pebbles", "age": 1}, 241 | }, 242 | ), 243 | expected={"fred": 40, "pebbles": 1}, 244 | ), 245 | ], 246 | ) 247 | def test_mapvalues(case): 248 | assert fnc.mapvalues(*case["args"]) == case["expected"] 249 | 250 | 251 | @parametrize( 252 | "case", 253 | [ 254 | dict( 255 | args=({"name": "fred"}, {"company": "a"}), 256 | expected={"name": "fred", "company": "a"}, 257 | ), 258 | dict( 259 | args=({"name": "fred"}, {"company": "a"}, {"company": "b"}), 260 | expected={"name": "fred", "company": "b"}, 261 | ), 262 | ], 263 | ) 264 | def test_merge(case): 265 | assert fnc.merge(*case["args"]) == case["expected"] 266 | 267 | 268 | @parametrize( 269 | "case", 270 | [ 271 | dict(args=(["a"], {"a": 1, "b": 2, "c": 3}), expected={"b": 2, "c": 3}), 272 | dict(args=(["a", "b"], {"a": 1, "b": 2, "c": 3}), expected={"c": 3}), 273 | dict(args=([], [1, 2, 3]), expected={0: 1, 1: 2, 2: 3}), 274 | dict(args=([0], [1, 2, 3]), expected={1: 2, 2: 3}), 275 | dict(args=([0, 1], [1, 2, 3]), expected={2: 3}), 276 | ], 277 | ) 278 | def test_omit(case): 279 | assert fnc.omit(*case["args"]) == case["expected"] 280 | 281 | 282 | @parametrize( 283 | "case", 284 | [ 285 | dict(args=(["a"], {"a": 1, "b": 2, "c": 3}), expected={"a": 1}), 286 | dict(args=(["a", "b"], {"a": 1, "b": 2, "c": 3}), expected={"a": 1, "b": 2}), 287 | dict(args=(["a"], {}), expected={}), 288 | dict(args=([], [1, 2, 3]), expected={}), 289 | dict(args=([0], [1, 2, 3]), expected={0: 1}), 290 | dict(args=(["a"], AttrObject(a=1, b=2, c=3)), expected={"a": 1}), 291 | ], 292 | ) 293 | def test_pick(case): 294 | assert fnc.pick(*case["args"]) == case["expected"] 295 | -------------------------------------------------------------------------------- /tests/test_sequences.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import pytest 4 | 5 | import fnc 6 | 7 | 8 | parametrize = pytest.mark.parametrize 9 | 10 | 11 | @parametrize( 12 | "case", 13 | [ 14 | dict(args=(1, [1, 2, 3, 4, 5]), expected=[[1], [2], [3], [4], [5]]), 15 | dict(args=(2, [1, 2, 3, 4, 5]), expected=[[1, 2], [3, 4], [5]]), 16 | dict(args=(3, [1, 2, 3, 4, 5]), expected=[[1, 2, 3], [4, 5]]), 17 | dict(args=(4, [1, 2, 3, 4, 5]), expected=[[1, 2, 3, 4], [5]]), 18 | dict(args=(5, [1, 2, 3, 4, 5]), expected=[[1, 2, 3, 4, 5]]), 19 | dict(args=(6, [1, 2, 3, 4, 5]), expected=[[1, 2, 3, 4, 5]]), 20 | ], 21 | ) 22 | def test_chunk(case): 23 | assert list(fnc.chunk(*case["args"])) == case["expected"] 24 | 25 | 26 | @parametrize( 27 | "case", 28 | [ 29 | dict(args=([0, 1, 2, 3],), expected=[1, 2, 3]), 30 | dict(args=([True, False, None, True, 1, "foo"],), expected=[True, True, 1, "foo"]), 31 | ], 32 | ) 33 | def test_compact(case): 34 | assert list(fnc.compact(*case["args"])) == case["expected"] 35 | 36 | 37 | @parametrize( 38 | "case", 39 | [ 40 | dict(args=(), expected=[]), 41 | dict(args=([],), expected=[]), 42 | dict(args=([1, 2, 3],), expected=[1, 2, 3]), 43 | dict(args=([1, 2, 3], [4, 5, 6]), expected=[1, 2, 3, 4, 5, 6]), 44 | dict(args=([1, 2, 3], [4, 5, 6], [7]), expected=[1, 2, 3, 4, 5, 6, 7]), 45 | dict(args=([1], [2], [3], [4]), expected=[1, 2, 3, 4]), 46 | ], 47 | ) 48 | def test_concat(case): 49 | assert list(fnc.concat(*case["args"])) == case["expected"] 50 | 51 | 52 | @parametrize( 53 | "case", 54 | [ 55 | dict( 56 | args=(lambda num: int(math.floor(num)), [4.3, 6.1, 6.4]), 57 | expected={4: 1, 6: 2}, 58 | ), 59 | dict( 60 | args=({"one": 1}, [{"one": 1}, {"one": 1}, {"two": 2}, {"one": 1}]), 61 | expected={True: 3, False: 1}, 62 | ), 63 | dict( 64 | args=("one", [{"one": 1}, {"one": 1}, {"two": 2}, {"one": 1}]), 65 | expected={1: 3, None: 1}, 66 | ), 67 | dict(args=(None, {1: 0, 2: 0, 4: 3}), expected={1: 1, 2: 1, 4: 1}), 68 | ], 69 | ) 70 | def test_countby(case): 71 | assert fnc.countby(*case["args"]) == case["expected"] 72 | 73 | 74 | @parametrize( 75 | "case", 76 | [ 77 | dict(args=([1, 2, 3, 4],), expected=[1, 2, 3, 4]), 78 | dict(args=([1, 2, 3, 4], []), expected=[1, 2, 3, 4]), 79 | dict(args=([1, 2, 3, 4], [2, 4], [3, 5, 6]), expected=[1]), 80 | dict(args=([1, 1, 1, 1], [2, 4], [3, 5, 6]), expected=[1]), 81 | dict(args=(iter([1, 2, 3, 4]), iter([2, 4]), iter([1, 3, 5, 6])), expected=[]), 82 | dict(args=(iter([0, 1, 2, 3, 4]), iter([2, 4]), iter([1, 3, 5, 6])), expected=[0]), 83 | ], 84 | ) 85 | def test_difference(case): 86 | assert list(fnc.difference(*case["args"])) == case["expected"] 87 | 88 | 89 | @parametrize( 90 | "case", 91 | [ 92 | dict( 93 | args=("a", [{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}]), 94 | expected=[{"a": 1}, {"a": 2}, {"a": 3}, {"a": 4}], 95 | ), 96 | dict( 97 | args=(round, [1.5, 2.2, 3.7, 4.2], [2.5, 4.9], [3, 5, 6]), 98 | expected=[3.7], 99 | ), 100 | ], 101 | ) 102 | def test_differenceby(case): 103 | assert list(fnc.differenceby(*case["args"])) == case["expected"] 104 | 105 | 106 | @parametrize( 107 | "case", 108 | [ 109 | dict(args=([1, 2, 3, 2, 1, 5, 6, 5, 5, 5],), expected=[2, 1, 5]), 110 | dict(args=([1, 2], [3, 2], [1, 5], [6, 5, 5, 5]), expected=[2, 1, 5]), 111 | dict(args=([1, 2], [3, 2], [1, 5], [6, 5, 5, 5]), expected=[2, 1, 5]), 112 | dict( 113 | args=(iter([1, 2]), iter([3, 2]), iter([1, 5]), iter([6, 5, 5, 5])), 114 | expected=[2, 1, 5], 115 | ), 116 | ], 117 | ) 118 | def test_duplicates(case): 119 | assert list(fnc.duplicates(*case["args"])) == case["expected"] 120 | 121 | 122 | @parametrize( 123 | "case", 124 | [ 125 | dict( 126 | args=( 127 | "a", 128 | [ 129 | {"a": 1}, 130 | {"a": 2}, 131 | {"a": 3}, 132 | {"a": 2}, 133 | {"a": 1}, 134 | {"a": 5}, 135 | {"a": 6}, 136 | {"a": 5}, 137 | {"a": 5}, 138 | {"a": 5}, 139 | ], 140 | ), 141 | expected=[{"a": 2}, {"a": 1}, {"a": 5}], 142 | ), 143 | dict( 144 | args=( 145 | lambda x: round(x), 146 | [1.5, 2.3], 147 | [3.7, 2.5], 148 | [1.1, 5.8], 149 | [6.9, 5.1, 5.2, 5.3], 150 | ), 151 | expected=[2.3, 5.2], 152 | ), 153 | ], 154 | ) 155 | def test_duplicatesby(case): 156 | assert list(fnc.duplicatesby(*case["args"])) == case["expected"] 157 | 158 | 159 | @parametrize( 160 | "case", 161 | [ 162 | dict(args=(None, [0, True, False, None, 1, 2, 3]), expected=[True, 1, 2, 3]), 163 | dict(args=(lambda num: num % 2 == 0, [1, 2, 3, 4, 5, 6]), expected=[2, 4, 6]), 164 | dict( 165 | args=( 166 | "blocked", 167 | [ 168 | {"name": "barney", "age": 36, "blocked": False}, 169 | {"name": "fred", "age": 40, "blocked": True}, 170 | ], 171 | ), 172 | expected=[{"name": "fred", "age": 40, "blocked": True}], 173 | ), 174 | dict( 175 | args=( 176 | {"age": 36}, 177 | [ 178 | {"name": "barney", "age": 36, "blocked": False}, 179 | {"name": "fred", "age": 40, "blocked": True}, 180 | ], 181 | ), 182 | expected=[{"name": "barney", "age": 36, "blocked": False}], 183 | ), 184 | dict( 185 | args=( 186 | {"age": 40}, 187 | [{"name": "moe", "age": 40}, {"name": "larry", "age": 50}], 188 | ), 189 | expected=[{"name": "moe", "age": 40}], 190 | ), 191 | ], 192 | ) 193 | def test_filter(case): 194 | assert list(fnc.filter(*case["args"])) == case["expected"] 195 | 196 | 197 | @parametrize( 198 | "case", 199 | [ 200 | dict( 201 | args=( 202 | lambda c: c["age"] < 40, 203 | [ 204 | {"name": "barney", "age": 36, "blocked": False}, 205 | {"name": "fred", "age": 40, "blocked": True}, 206 | {"name": "pebbles", "age": 1, "blocked": False}, 207 | ], 208 | ), 209 | expected={"name": "barney", "age": 36, "blocked": False}, 210 | ), 211 | dict( 212 | args=( 213 | {"age": 1}, 214 | [ 215 | {"name": "barney", "age": 36, "blocked": False}, 216 | {"name": "fred", "age": 40, "blocked": True}, 217 | {"name": "pebbles", "age": 1, "blocked": False}, 218 | ], 219 | ), 220 | expected={"name": "pebbles", "age": 1, "blocked": False}, 221 | ), 222 | dict( 223 | args=( 224 | "blocked", 225 | [ 226 | {"name": "barney", "age": 36, "blocked": False}, 227 | {"name": "fred", "age": 40, "blocked": True}, 228 | {"name": "pebbles", "age": 1, "blocked": False}, 229 | ], 230 | ), 231 | expected={"name": "fred", "age": 40, "blocked": True}, 232 | ), 233 | dict( 234 | args=( 235 | None, 236 | [ 237 | {"name": "barney", "age": 36, "blocked": False}, 238 | {"name": "fred", "age": 40, "blocked": True}, 239 | {"name": "pebbles", "age": 1, "blocked": False}, 240 | ], 241 | ), 242 | expected={"name": "barney", "age": 36, "blocked": False}, 243 | ), 244 | ], 245 | ) 246 | def test_find(case): 247 | assert fnc.find(*case["args"]) == case["expected"] 248 | 249 | 250 | @parametrize( 251 | "case", 252 | [ 253 | dict( 254 | args=(lambda item: item.startswith("b"), ["apple", "banana", "beet"]), 255 | expected=1, 256 | ), 257 | dict( 258 | args=( 259 | {"name": "banana"}, 260 | [ 261 | {"name": "apple", "type": "fruit"}, 262 | {"name": "banana", "type": "fruit"}, 263 | {"name": "beet", "type": "vegetable"}, 264 | ], 265 | ), 266 | expected=1, 267 | ), 268 | dict(args=(lambda *_: False, ["apple", "banana", "beet"]), expected=-1), 269 | ], 270 | ) 271 | def test_findindex(case): 272 | assert fnc.findindex(*case["args"]) == case["expected"] 273 | 274 | 275 | @parametrize("case", [dict(args=(lambda num: num % 2 == 1, [1, 2, 3, 4]), expected=3)]) 276 | def test_findlast(case): 277 | assert fnc.findlast(*case["args"]) == case["expected"] 278 | 279 | 280 | @parametrize( 281 | "case", 282 | [ 283 | dict( 284 | args=(lambda item: item.startswith("b"), ["apple", "banana", "beet"]), 285 | expected=2, 286 | ), 287 | dict( 288 | args=( 289 | {"type": "fruit"}, 290 | [ 291 | {"name": "apple", "type": "fruit"}, 292 | {"name": "banana", "type": "fruit"}, 293 | {"name": "beet", "type": "vegetable"}, 294 | ], 295 | ), 296 | expected=1, 297 | ), 298 | dict(args=(lambda *_: False, ["apple", "banana", "beet"]), expected=-1), 299 | ], 300 | ) 301 | def test_findlastindex(case): 302 | assert fnc.findlastindex(*case["args"]) == case["expected"] 303 | 304 | 305 | @parametrize( 306 | "case", 307 | [ 308 | dict( 309 | args=([1, ["2222"], [3, [[4]]]], [[[[5]]]]), 310 | expected=[1, "2222", 3, [[4]], [[5]]], 311 | ) 312 | ], 313 | ) 314 | def test_flatten(case): 315 | assert list(fnc.flatten(*case["args"])) == case["expected"] 316 | 317 | 318 | @parametrize( 319 | "case", 320 | [dict(args=([1, ["2222"], [3, [[4]]]], [[[[5]]]]), expected=[1, "2222", 3, 4, 5])], 321 | ) 322 | def test_flattendeep(case): 323 | assert list(fnc.flattendeep(*case["args"])) == case["expected"] 324 | 325 | 326 | @parametrize( 327 | "case", 328 | [ 329 | dict( 330 | args=( 331 | ["a"], 332 | [ 333 | {"a": 1, "b": 2, "c": 3}, 334 | {"a": 1, "b": 2, "c": 4}, 335 | {"a": 1, "b": 2, "c": 5}, 336 | {"a": 1, "b": 1, "c": 6}, 337 | {"a": 1, "b": 1, "c": 7}, 338 | {"a": 2, "b": 2, "c": 8}, 339 | {"a": 2, "b": 2, "c": 9}, 340 | {"a": 2, "b": 2, "c": 10}, 341 | {"a": 3, "b": 1, "c": 11}, 342 | ], 343 | ), 344 | expected={ 345 | 1: [ 346 | {"a": 1, "b": 2, "c": 3}, 347 | {"a": 1, "b": 2, "c": 4}, 348 | {"a": 1, "b": 2, "c": 5}, 349 | {"a": 1, "b": 1, "c": 6}, 350 | {"a": 1, "b": 1, "c": 7}, 351 | ], 352 | 2: [ 353 | {"a": 2, "b": 2, "c": 8}, 354 | {"a": 2, "b": 2, "c": 9}, 355 | {"a": 2, "b": 2, "c": 10}, 356 | ], 357 | 3: [{"a": 3, "b": 1, "c": 11}], 358 | }, 359 | ), 360 | dict( 361 | args=( 362 | ["a", "b"], 363 | [ 364 | {"a": 1, "b": 2, "c": 3}, 365 | {"a": 1, "b": 2, "c": 4}, 366 | {"a": 1, "b": 2, "c": 5}, 367 | {"a": 1, "b": 1, "c": 6}, 368 | {"a": 1, "b": 1, "c": 7}, 369 | {"a": 2, "b": 2, "c": 8}, 370 | {"a": 2, "b": 2, "c": 9}, 371 | {"a": 2, "b": 2, "c": 10}, 372 | {"a": 3, "b": 1, "c": 11}, 373 | ], 374 | ), 375 | expected={ 376 | 1: { 377 | 2: [ 378 | {"a": 1, "b": 2, "c": 3}, 379 | {"a": 1, "b": 2, "c": 4}, 380 | {"a": 1, "b": 2, "c": 5}, 381 | ], 382 | 1: [{"a": 1, "b": 1, "c": 6}, {"a": 1, "b": 1, "c": 7}], 383 | }, 384 | 2: { 385 | 2: [ 386 | {"a": 2, "b": 2, "c": 8}, 387 | {"a": 2, "b": 2, "c": 9}, 388 | {"a": 2, "b": 2, "c": 10}, 389 | ] 390 | }, 391 | 3: {1: [{"a": 3, "b": 1, "c": 11}]}, 392 | }, 393 | ), 394 | dict( 395 | args=( 396 | [], 397 | [ 398 | {"a": 1, "b": 2, "c": 3}, 399 | {"a": 1, "b": 2, "c": 4}, 400 | {"a": 1, "b": 2, "c": 5}, 401 | {"a": 1, "b": 1, "c": 6}, 402 | {"a": 1, "b": 1, "c": 7}, 403 | {"a": 2, "b": 2, "c": 8}, 404 | {"a": 2, "b": 2, "c": 9}, 405 | {"a": 2, "b": 2, "c": 10}, 406 | {"a": 3, "b": 1, "c": 11}, 407 | ], 408 | ), 409 | expected=[ 410 | {"a": 1, "b": 2, "c": 3}, 411 | {"a": 1, "b": 2, "c": 4}, 412 | {"a": 1, "b": 2, "c": 5}, 413 | {"a": 1, "b": 1, "c": 6}, 414 | {"a": 1, "b": 1, "c": 7}, 415 | {"a": 2, "b": 2, "c": 8}, 416 | {"a": 2, "b": 2, "c": 9}, 417 | {"a": 2, "b": 2, "c": 10}, 418 | {"a": 3, "b": 1, "c": 11}, 419 | ], 420 | ), 421 | ], 422 | ) 423 | def test_groupall(case): 424 | assert fnc.groupall(*case["args"]) == case["expected"] 425 | 426 | 427 | @parametrize( 428 | "case", 429 | [ 430 | dict( 431 | args=(lambda num: int(math.floor(num)), [4.2, 6.1, 6.4]), 432 | expected={4: [4.2], 6: [6.1, 6.4]}, 433 | ) 434 | ], 435 | ) 436 | def test_groupby(case): 437 | assert fnc.groupby(*case["args"]) == case["expected"] 438 | 439 | 440 | @parametrize( 441 | "case", 442 | [ 443 | dict( 444 | args=([1, 2, 3], [[10, 20], [30, 40], [50, 60]]), 445 | expected=[10, 20, 1, 2, 3, 30, 40, 1, 2, 3, 50, 60], 446 | ), 447 | dict( 448 | args=([1, 2, 3], [[[10, 20]], [[30, 40]], [50, [60]]]), 449 | expected=[[10, 20], 1, 2, 3, [30, 40], 1, 2, 3, 50, [60]], 450 | ), 451 | ], 452 | ) 453 | def test_intercalate(case): 454 | assert list(fnc.intercalate(*case["args"])) == case["expected"] 455 | 456 | 457 | @parametrize( 458 | "case", 459 | [ 460 | dict(args=([1, 2], [3, 4]), expected=[1, 3, 2, 4]), 461 | dict(args=([1, 2], [3, 4], [5, 6]), expected=[1, 3, 5, 2, 4, 6]), 462 | dict(args=([1, 2], [3, 4, 5], [6]), expected=[1, 3, 6, 2, 4, 5]), 463 | dict(args=([1, 2, 3], [4], [5, 6]), expected=[1, 4, 5, 2, 6, 3]), 464 | ], 465 | ) 466 | def test_interleave(case): 467 | assert list(fnc.interleave(*case["args"])) == case["expected"] 468 | 469 | 470 | @parametrize( 471 | "case", 472 | [ 473 | dict(args=([1, 2, 3], [101, 2, 1, 10], [2, 1]), expected=[1, 2]), 474 | dict(args=([1, 1, 2, 2], [1, 1, 2, 2]), expected=[1, 2]), 475 | dict(args=([1, 2, 3], [4]), expected=[]), 476 | dict(args=([1, 2, 3],), expected=[1, 2, 3]), 477 | dict(args=([], [101, 2, 1, 10], [2, 1]), expected=[]), 478 | dict(args=([],), expected=[]), 479 | dict(args=[iter([2, 1]), iter([2, 1])], expected=[2, 1]), 480 | dict(args=[iter([2, 1]), iter([1, 2])], expected=[2, 1]), 481 | dict(args=[iter([2, 1]), iter([1, 2]), iter([0, 1, 2]), iter([1])], expected=[1]), 482 | dict(args=[iter([1, 2]), iter([2, 1]), iter([0, 1, 2]), iter([1])], expected=[1]), 483 | ], 484 | ) 485 | def test_intersection(case): 486 | assert list(fnc.intersection(*case["args"])) == case["expected"] 487 | 488 | 489 | @parametrize( 490 | "case", 491 | [ 492 | dict( 493 | args=( 494 | "a", 495 | [{"a": 1}, {"a": 2}, {"a": 3}], 496 | [{"a": 101}, {"a": 2}, {"a": 1}, {"a": 10}], 497 | [{"a": 2}, {"a": 1}], 498 | ), 499 | expected=[{"a": 1}, {"a": 2}], 500 | ), 501 | dict( 502 | args=(lambda x: round(x), [1.5, 1.7, 2.1, 2.8], [1, 1, 2, 2]), 503 | expected=[1.5], 504 | ), 505 | ], 506 | ) 507 | def test_intersectionby(case): 508 | assert list(fnc.intersectionby(*case["args"])) == case["expected"] 509 | 510 | 511 | @parametrize( 512 | "case", 513 | [ 514 | dict(args=(10, []), expected=[]), 515 | dict(args=(10, [1]), expected=[1]), 516 | dict(args=(10, [1, 2, 3, 4]), expected=[1, 10, 2, 10, 3, 10, 4]), 517 | dict( 518 | args=([0, 0, 0], [1, 2, 3, 4]), 519 | expected=[1, [0, 0, 0], 2, [0, 0, 0], 3, [0, 0, 0], 4], 520 | ), 521 | dict( 522 | args=([0, 0, 0], [[1, 2, 3], [4, 5, 6], [7, 8, 9]]), 523 | expected=[[1, 2, 3], [0, 0, 0], [4, 5, 6], [0, 0, 0], [7, 8, 9]], 524 | ), 525 | ], 526 | ) 527 | def test_intersperse(case): 528 | assert list(fnc.intersperse(*case["args"])) == case["expected"] 529 | 530 | 531 | @parametrize( 532 | "case", 533 | [ 534 | dict( 535 | args=("dir", [{"dir": "left", "code": 97}, {"dir": "right", "code": 100}]), 536 | expected={ 537 | "left": {"dir": "left", "code": 97}, 538 | "right": {"dir": "right", "code": 100}, 539 | }, 540 | ) 541 | ], 542 | ) 543 | def test_keyby(case): 544 | assert fnc.keyby(*case["args"]) == case["expected"] 545 | 546 | 547 | @parametrize( 548 | "case", 549 | [ 550 | dict(args=(None, [1, 2, 3]), expected=[1, 2, 3]), 551 | dict(args=(int, [1.1, 2.1, 3.1]), expected=[1, 2, 3]), 552 | dict(args=(lambda num: num * 3, [1, 2, 3]), expected=[3, 6, 9]), 553 | dict(args=(len, [[1], [2, 3], [4, 5, 6]]), expected=[1, 2, 3]), 554 | dict( 555 | args=("name", [{"name": "moe", "age": 40}, {"name": "larry", "age": 50}]), 556 | expected=["moe", "larry"], 557 | ), 558 | dict( 559 | args=( 560 | "level1.level2.level3.value", 561 | [ 562 | {"level1": {"level2": {"level3": {"value": 1}}}}, 563 | {"level1": {"level2": {"level3": {"value": 2}}}}, 564 | {"level1": {"level2": {"level3": {"value": 3}}}}, 565 | {"level1": {"level2": {"level3": {"value": 4}}}}, 566 | {"level1": {"level2": {}}}, 567 | {}, 568 | ], 569 | ), 570 | expected=[1, 2, 3, 4, None, None], 571 | ), 572 | dict(args=([1], [[0, 1], [2, 3], [4, 5]]), expected=[1, 3, 5]), 573 | dict( 574 | args=( 575 | ["a"], 576 | [ 577 | {"a": 1, "b": 2, "c": -1}, 578 | {"a": 3, "b": 4, "c": -1}, 579 | {"a": 5, "b": 6, "c": -1}, 580 | ], 581 | ), 582 | expected=[1, 3, 5], 583 | ), 584 | dict( 585 | args=( 586 | ("a", "b"), 587 | [ 588 | {"a": 1, "b": 2, "c": -1}, 589 | {"a": 3, "b": 4, "c": -1}, 590 | {"a": 5, "b": 6, "c": -1}, 591 | ], 592 | ), 593 | expected=[(1, 2), (3, 4), (5, 6)], 594 | ), 595 | dict( 596 | args=( 597 | {"a", "b"}, 598 | [ 599 | {"a": 1, "b": 2, "c": -1}, 600 | {"a": 3, "b": 4, "c": -1}, 601 | {"a": 5, "b": 6, "c": -1}, 602 | ], 603 | ), 604 | expected=[{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}], 605 | ), 606 | ], 607 | ) 608 | def test_map(case): 609 | assert list(fnc.map(*case["args"])) == case["expected"] 610 | 611 | 612 | @parametrize( 613 | "case", 614 | [ 615 | dict( 616 | args=(lambda x: [str(x)] if x is None else [], [1, 2, None, 4, None, 6]), 617 | expected=["None", "None"], 618 | ) 619 | ], 620 | ) 621 | def test_mapcat(case): 622 | assert list(fnc.mapcat(*case["args"])) == case["expected"] 623 | 624 | 625 | @parametrize( 626 | "case", 627 | [ 628 | dict(args=(None, [1, 2, 3]), expected=[1, 2, 3]), 629 | dict(args=(None, [[1], [2], [3]]), expected=[1, 2, 3]), 630 | dict(args=(None, [[[1]], [[2]], [[3]]]), expected=[[1], [2], [3]]), 631 | dict(args=(lambda x: [x - 1], [1, 2, 3]), expected=[0, 1, 2]), 632 | dict( 633 | args=(lambda x: [[x], [x]], [1, 2, 3]), 634 | expected=[[1], [1], [2], [2], [3], [3]], 635 | ), 636 | ], 637 | ) 638 | def test_mapflat(case): 639 | assert list(fnc.mapflat(*case["args"])) == case["expected"] 640 | 641 | 642 | @parametrize( 643 | "case", 644 | [ 645 | dict(args=(None, [1, 2, 3]), expected=[1, 2, 3]), 646 | dict(args=(None, [[1], [2], [3]]), expected=[1, 2, 3]), 647 | dict(args=(None, [[[1]], [[2]], [[3]]]), expected=[1, 2, 3]), 648 | dict(args=(lambda x: [x - 1], [1, 2, 3]), expected=[0, 1, 2]), 649 | dict(args=(lambda x: [[x], [x]], [1, 2, 3]), expected=[1, 1, 2, 2, 3, 3]), 650 | ], 651 | ) 652 | def test_mapflatdeep(case): 653 | assert list(fnc.mapflatdeep(*case["args"])) == case["expected"] 654 | 655 | 656 | @parametrize( 657 | "case", 658 | [ 659 | dict(args=(lambda item: item % 2, [1, 2, 3]), expected=[[1, 3], [2]]), 660 | dict( 661 | args=(lambda item: math.floor(item) % 2, [1.2, 2.3, 3.4]), 662 | expected=[[1.2, 3.4], [2.3]], 663 | ), 664 | dict( 665 | args=( 666 | {"age": 1}, 667 | [ 668 | {"name": "barney", "age": 36}, 669 | {"name": "fred", "age": 40, "blocked": True}, 670 | {"name": "pebbles", "age": 1}, 671 | ], 672 | ), 673 | expected=[ 674 | [{"name": "pebbles", "age": 1}], 675 | [ 676 | {"name": "barney", "age": 36}, 677 | {"name": "fred", "age": 40, "blocked": True}, 678 | ], 679 | ], 680 | ), 681 | dict( 682 | args=( 683 | "blocked", 684 | [ 685 | {"name": "barney", "age": 36}, 686 | {"name": "fred", "age": 40, "blocked": True}, 687 | {"name": "pebbles", "age": 1}, 688 | ], 689 | ), 690 | expected=[ 691 | [{"name": "fred", "age": 40, "blocked": True}], 692 | [{"name": "barney", "age": 36}, {"name": "pebbles", "age": 1}], 693 | ], 694 | ), 695 | ], 696 | ) 697 | def test_partition(case): 698 | assert list(fnc.partition(*case["args"])) == case["expected"] 699 | 700 | 701 | @parametrize( 702 | "case", 703 | [ 704 | dict(args=(None, [0, True, False, None, 1, 2, 3]), expected=[0, False, None]), 705 | dict(args=(lambda num: num % 2 == 0, [1, 2, 3, 4, 5, 6]), expected=[1, 3, 5]), 706 | dict( 707 | args=( 708 | "blocked", 709 | [ 710 | {"name": "barney", "age": 36, "blocked": False}, 711 | {"name": "fred", "age": 40, "blocked": True}, 712 | ], 713 | ), 714 | expected=[{"name": "barney", "age": 36, "blocked": False}], 715 | ), 716 | dict( 717 | args=( 718 | {"age": 36}, 719 | [ 720 | {"name": "barney", "age": 36, "blocked": False}, 721 | {"name": "fred", "age": 40, "blocked": True}, 722 | ], 723 | ), 724 | expected=[{"name": "fred", "age": 40, "blocked": True}], 725 | ), 726 | ], 727 | ) 728 | def test_reject(case): 729 | assert list(fnc.reject(*case["args"])) == case["expected"] 730 | 731 | 732 | @parametrize( 733 | "case", 734 | [ 735 | dict(args=([1, 2, 1, 3, 1], [1, 3, 2, 6, 4], [5]), expected=[1, 2, 3, 6, 4, 5]), 736 | dict(args=([dict(a=1), dict(a=2), dict(a=1)],), expected=[dict(a=1), dict(a=2)]), 737 | ], 738 | ) 739 | def test_union(case): 740 | assert list(fnc.union(*case["args"])) == case["expected"] 741 | 742 | 743 | @parametrize( 744 | "case", 745 | [ 746 | dict( 747 | args=( 748 | "a", 749 | [dict(a=1), dict(a=2), dict(a=1), dict(a=3), dict(a=1)], 750 | [dict(a=1), dict(a=3), dict(a=2), dict(a=6), dict(a=4)], 751 | [dict(a=5)], 752 | ), 753 | expected=[dict(a=1), dict(a=2), dict(a=3), dict(a=6), dict(a=4), dict(a=5)], 754 | ), 755 | dict( 756 | args=(lambda x: round(x["a"]), [dict(a=1.7), dict(a=2), dict(a=1)]), 757 | expected=[dict(a=1.7), dict(a=1)], 758 | ), 759 | ], 760 | ) 761 | def test_unionby(case): 762 | assert list(fnc.unionby(*case["args"])) == case["expected"] 763 | 764 | 765 | @parametrize( 766 | "case", 767 | [ 768 | dict( 769 | args=([["moe", 30, True], ["larry", 40, False], ["curly", 35, True]],), 770 | expected=[("moe", "larry", "curly"), (30, 40, 35), (True, False, True)], 771 | ) 772 | ], 773 | ) 774 | def test_unzip(case): 775 | assert list(fnc.unzip(*case["args"])) == case["expected"] 776 | 777 | 778 | @parametrize("case", [dict(args=([0, 1], [1, 2, 1, 0, 3, 1, 4]), expected=[2, 3, 4])]) 779 | def test_without(case): 780 | assert list(fnc.without(*case["args"])) == case["expected"] 781 | 782 | 783 | @parametrize( 784 | "case", 785 | [ 786 | dict(args=([1, 2, 3], [5, 2, 1, 4]), expected=[3, 5, 4]), 787 | dict(args=([1, 2, 5], [2, 3, 5], [3, 4, 5]), expected=[1, 4, 5]), 788 | dict(args=(iter([1, 2, 5]), iter([2, 3, 5]), iter([3, 4, 5])), expected=[1, 4, 5]), 789 | dict( 790 | args=( 791 | iter(x for x in [1, 2, 5]), 792 | iter(x for x in [2, 3, 5]), 793 | iter(x for x in [3, 4, 5]), 794 | ), 795 | expected=[1, 4, 5], 796 | ), 797 | ], 798 | ) 799 | def test_xor(case): 800 | assert list(fnc.xor(*case["args"])) == case["expected"] 801 | -------------------------------------------------------------------------------- /tests/test_utilities.py: -------------------------------------------------------------------------------- 1 | from unittest import mock 2 | 3 | import pytest 4 | 5 | import fnc 6 | 7 | 8 | parametrize = pytest.mark.parametrize 9 | 10 | 11 | def test_after(): 12 | tracker = [] 13 | 14 | def track_one(): 15 | tracker.append(1) 16 | 17 | @fnc.after(track_one) 18 | def track_two(): 19 | tracker.append(2) 20 | return True 21 | 22 | assert track_two() is True 23 | assert tracker == [2, 1] 24 | 25 | 26 | @parametrize( 27 | "case", 28 | [ 29 | dict(args=("a.b.c",), expected=["a", "b", "c"]), 30 | dict(args=("a[0].b.c",), expected=["a", "0", "b", "c"]), 31 | dict(args=("a[0][1][2].b.c",), expected=["a", "0", "1", "2", "b", "c"]), 32 | dict( 33 | args=(["a", "0", "1", "2", "b", "c"],), 34 | expected=["a", "0", "1", "2", "b", "c"], 35 | ), 36 | dict(args=((1, 2),), expected=[(1, 2)]), 37 | ], 38 | ) 39 | def test_aspath(case): 40 | assert fnc.aspath(*case["args"]) == case["expected"] 41 | 42 | 43 | @parametrize( 44 | "case", 45 | [ 46 | dict(args=([0, 2, 4], ["a", "b", "c", "d", "e"]), expected=("a", "c", "e")), 47 | dict(args=([0, 2], ["moe", "larry", "curly"]), expected=("moe", "curly")), 48 | dict(args=(["a", "b"], {"a": 1, "b": 2, "c": 3}), expected=(1, 2)), 49 | ], 50 | ) 51 | def test_atgetter(case): 52 | assert fnc.atgetter(case["args"][0])(case["args"][1]) == case["expected"] 53 | 54 | 55 | def test_before(): 56 | tracker = [] 57 | 58 | def track_one(): 59 | tracker.append(1) 60 | 61 | @fnc.before(track_one) 62 | def track_two(): 63 | tracker.append(2) 64 | return True 65 | 66 | assert track_two() is True 67 | assert tracker == [1, 2] 68 | 69 | 70 | @parametrize( 71 | "case", 72 | [ 73 | dict( 74 | funcs=(lambda x: "!!!" + x + "!!!", lambda x: f"Hi {x}"), 75 | args=("Bob",), 76 | expected="Hi !!!Bob!!!", 77 | ), 78 | dict(funcs=(lambda x: x + x, lambda x: x * x), args=(5,), expected=100), 79 | dict( 80 | funcs=((fnc.map, tuple), (fnc.map, list), tuple), 81 | args=([{"a": 1}, {"b": 2}, {"c": 3}],), 82 | expected=(["a"], ["b"], ["c"]), 83 | ), 84 | dict( 85 | funcs=((fnc.filter, lambda item: item[0][1] > 0), (fnc.map, dict), list), 86 | args=([[("a", 1)], [("a", 0)], [("a", 5)], [("a", -2)]],), 87 | expected=[{"a": 1}, {"a": 5}], 88 | ), 89 | ], 90 | ) 91 | def test_compose(case): 92 | assert fnc.compose(*case["funcs"])(*case["args"]) == case["expected"] 93 | 94 | 95 | @parametrize( 96 | "case", 97 | [ 98 | dict(args=({"age": 40}, {"name": "fred", "age": 40}), expected=True), 99 | dict( 100 | args=({"age": 40, "active": True}, {"name": "fred", "age": 40}), 101 | expected=False, 102 | ), 103 | dict(args=({}, {}), expected=True), 104 | dict(args=({}, {"a": 1}), expected=True), 105 | ], 106 | ) 107 | def test_conformance(case): 108 | expected = case["expected"] 109 | assert fnc.conformance(case["args"][0])(case["args"][1]) == expected 110 | 111 | 112 | @parametrize( 113 | "case", 114 | [ 115 | dict(args=({"age": 40}, {"name": "fred", "age": 40}), expected=True), 116 | dict( 117 | args=({"age": 40, "active": True}, {"name": "fred", "age": 40}), 118 | expected=False, 119 | ), 120 | dict( 121 | args=({"age": lambda age: age >= 21}, {"name": "fred", "age": 21}), 122 | expected=True, 123 | ), 124 | dict( 125 | args=({"age": lambda age: age >= 21}, {"name": "fred", "age": 19}), 126 | expected=False, 127 | ), 128 | dict(args=({}, {}), expected=True), 129 | dict(args=({}, {"a": 1}), expected=True), 130 | ], 131 | ) 132 | def test_conforms(case): 133 | assert fnc.conforms(*case["args"]) == case["expected"] 134 | 135 | 136 | @parametrize("case", ["foo", "bar", {"a": 1}]) 137 | def test_constant(case): 138 | assert fnc.constant(case)() is case 139 | 140 | 141 | @parametrize( 142 | "case", 143 | [ 144 | dict(args=(1,), expected=1), 145 | dict(args=(1, 2), expected=1), 146 | dict(args=(), expected=None), 147 | dict(args=(1, 2), kwargs={"a": 3, "b": 4}, expected=1), 148 | ], 149 | ) 150 | def test_identity(case): 151 | kwargs = case.get("kwargs", {}) 152 | assert fnc.identity(*case["args"], **kwargs) == case["expected"] 153 | 154 | 155 | @parametrize( 156 | "case", 157 | [ 158 | dict(args=(lambda a, b: a + b, 1, 2), expected=3), 159 | dict(args=(None, 1, 2), expected=1), 160 | dict(args=({"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}), expected=True), 161 | dict(args=({"a": 1, "b": 2}, {"a": 4, "b": 2, "c": 3}), expected=False), 162 | dict( 163 | args=({"a", "b"}, {"a": 1, "b": 2, "c": 3, "d": 4}), 164 | expected={"a": 1, "b": 2}, 165 | ), 166 | dict(args=(("a", "b"), {"a": 1, "b": 2, "c": 3, "d": 4}), expected=(1, 2)), 167 | dict(args=("a", {"a": 1, "b": 2}), expected=1), 168 | dict(args=("a.b", {"a": {"b": 2}}), expected=2), 169 | dict(args=(["a", "b"], {"a": {"b": 2}}), expected=2), 170 | ], 171 | ) 172 | def test_iteratee(case): 173 | args = case["args"] 174 | assert fnc.iteratee(args[0])(*args[1:]) == case["expected"] 175 | 176 | 177 | @parametrize( 178 | "case", 179 | [dict(args=(lambda item: item, True)), dict(args=(lambda item: item, False))], 180 | ) 181 | def test_negate(case): 182 | func, *callargs = case["args"] 183 | assert fnc.negate(func)(*callargs) == (not func(*callargs)) 184 | 185 | 186 | @parametrize( 187 | "case", 188 | [ 189 | dict(args=(), kwargs={}), 190 | dict(args=(1, 2, 3), kwargs={}), 191 | dict(args=(), kwargs={"a": 1, "b": 2}), 192 | dict(args=(1, 2, 3), kwargs={"a": 1, "b": 2}), 193 | ], 194 | ) 195 | def test_noop(case): 196 | assert fnc.noop(*case["args"], **case["kwargs"]) is None 197 | 198 | 199 | @parametrize("case", [dict(args=((max, min), [1, 2, 3, 4]), expected=(4, 1))]) 200 | def test_over(case): 201 | funcs, *callargs = case["args"] 202 | assert fnc.over(*funcs)(*callargs) == case["expected"] 203 | 204 | 205 | @parametrize( 206 | "case", 207 | [ 208 | dict(args=((lambda x: x is not None, bool), 1), expected=True), 209 | dict(args=((lambda x: x is None, bool), 1), expected=False), 210 | ], 211 | ) 212 | def test_overall(case): 213 | funcs, *callargs = case["args"] 214 | assert fnc.overall(*funcs)(*callargs) == case["expected"] 215 | 216 | 217 | @parametrize( 218 | "case", 219 | [ 220 | dict(args=((lambda x: x is not None, bool), 1), expected=True), 221 | dict(args=((lambda x: x is None, bool), 1), expected=True), 222 | dict(args=((lambda x: x is False, lambda y: y == 2), True), expected=False), 223 | ], 224 | ) 225 | def test_overany(case): 226 | funcs, *callargs = case["args"] 227 | assert fnc.overany(*funcs)(*callargs) == case["expected"] 228 | 229 | 230 | @parametrize( 231 | "case", 232 | [ 233 | dict(args=("one.two", {"one": {"two": {"three": 4}}}), expected={"three": 4}), 234 | dict( 235 | args=("one.four.three", {"one": {"two": {"three": 4}}}), 236 | kwargs={"default": []}, 237 | expected=[], 238 | ), 239 | ], 240 | ) 241 | def test_pathgetter(case): 242 | args = case["args"] 243 | kwargs = case.get("kwargs", {}) 244 | assert fnc.pathgetter(args[0], **kwargs)(args[1]) == case["expected"] 245 | 246 | 247 | @parametrize( 248 | "case", 249 | [ 250 | dict(args=(["a"], {"a": 1, "b": 2, "c": 3}), expected={"a": 1}), 251 | dict(args=(["a", "b"], {"a": 1, "b": 2, "c": 3}), expected={"a": 1, "b": 2}), 252 | dict(args=([], [1, 2, 3]), expected={}), 253 | dict(args=([0], [1, 2, 3]), expected={0: 1}), 254 | ], 255 | ) 256 | def test_pickgetter(case): 257 | assert fnc.pickgetter(case["args"][0])(case["args"][1]) == case["expected"] 258 | 259 | 260 | @parametrize( 261 | "case", 262 | [ 263 | dict(args=(), expected={"type": int, "min": 0, "max": 1}), 264 | dict(args=(25,), expected={"type": int, "min": 0, "max": 25}), 265 | dict(args=(5, 10), expected={"type": int, "min": 5, "max": 10}), 266 | dict( 267 | args=(), 268 | kwargs={"floating": True}, 269 | expected={"type": float, "min": 0, "max": 1}, 270 | ), 271 | dict( 272 | args=(25,), 273 | kwargs={"floating": True}, 274 | expected={"type": float, "min": 0, "max": 25}, 275 | ), 276 | dict( 277 | args=(5, 10), 278 | kwargs={"floating": True}, 279 | expected={"type": float, "min": 5, "max": 10}, 280 | ), 281 | dict(args=(5.0, 10), expected={"type": float, "min": 5, "max": 10}), 282 | dict(args=(5, 10.0), expected={"type": float, "min": 5, "max": 10}), 283 | dict(args=(5.0, 10.0), expected={"type": float, "min": 5, "max": 10}), 284 | dict( 285 | args=(5.0, 10.0), 286 | kwargs={"floating": True}, 287 | expected={"type": float, "min": 5, "max": 10}, 288 | ), 289 | ], 290 | ) 291 | def test_random(case): 292 | kwargs = case.get("kwargs", {}) 293 | 294 | for _ in range(50): 295 | rnd = fnc.random(*case["args"], **kwargs) 296 | assert isinstance(rnd, case["expected"]["type"]) 297 | assert case["expected"]["min"] <= rnd <= case["expected"]["max"] 298 | 299 | 300 | @parametrize( 301 | "case", 302 | [ 303 | dict(args={"attempts": 3}, expected={"count": 0}), 304 | dict(args={"attempts": 3}, expected={"count": 1}), 305 | dict(args={"attempts": 3}, expected={"count": 2}), 306 | dict(args={"attempts": 5}, expected={"count": 3}), 307 | ], 308 | ) 309 | def test_retry_success(mocksleep, case): 310 | counter = {True: 0} 311 | 312 | @fnc.retry(**case["args"]) 313 | def func(): 314 | if counter[True] != case["expected"]["count"]: 315 | counter[True] += 1 316 | raise Exception() 317 | return True 318 | 319 | result = func() 320 | 321 | assert result is True 322 | assert counter[True] == case["expected"]["count"] 323 | assert mocksleep.call_count == case["expected"]["count"] 324 | 325 | 326 | @parametrize( 327 | "case", 328 | [ 329 | dict(args={}, expected={"count": 2, "times": [0.5, 1.0]}), 330 | dict(args={"attempts": 1}, expected={"count": 0, "times": []}), 331 | dict( 332 | args={"attempts": 3, "delay": 0.5, "scale": 2.0}, 333 | expected={"count": 2, "times": [0.5, 1.0]}, 334 | ), 335 | dict( 336 | args={"attempts": 5, "delay": 1.5, "scale": 2.5}, 337 | expected={"count": 4, "times": [1.5, 3.75, 9.375, 23.4375]}, 338 | ), 339 | dict( 340 | args={"attempts": 5, "delay": 1.5, "max_delay": 8.0, "scale": 2.5}, 341 | expected={"count": 4, "times": [1.5, 3.75, 8.0, 8.0]}, 342 | ), 343 | ], 344 | ) 345 | def test_retry_error(mocksleep, case): 346 | @fnc.retry(**case["args"]) 347 | def func(): 348 | raise ValueError() 349 | 350 | with pytest.raises(ValueError): 351 | func() 352 | 353 | assert mocksleep.call_count == case["expected"]["count"] 354 | 355 | delay_calls = [mock.call(time) for time in case["expected"]["times"]] 356 | assert mocksleep.call_args_list == delay_calls 357 | 358 | 359 | @parametrize( 360 | "case", 361 | [ 362 | dict( 363 | args={"jitter": 5, "delay": 2, "scale": 1, "attempts": 5}, 364 | unexpected=[2, 2, 2, 2], 365 | ), 366 | dict( 367 | args={"jitter": 10, "delay": 3, "scale": 1.5, "attempts": 5}, 368 | unexpected=[3, 4.5, 6.75, 10.125], 369 | ), 370 | dict( 371 | args={"jitter": 1.0, "delay": 3, "scale": 1.5, "attempts": 5}, 372 | unexpected=[3, 4.5, 6.75, 10.125], 373 | ), 374 | ], 375 | ) 376 | def test_retry_jitter(mocksleep, case): 377 | @fnc.retry(**case["args"]) 378 | def func(): 379 | raise ValueError() 380 | 381 | with pytest.raises(ValueError): 382 | func() 383 | 384 | delay_calls = [mock.call(time) for time in case["unexpected"]] 385 | 386 | assert mocksleep.call_count == len(delay_calls) 387 | assert mocksleep.call_args_list != delay_calls 388 | 389 | 390 | @parametrize( 391 | "case", 392 | [ 393 | dict( 394 | args={"attempts": 1, "exceptions": (RuntimeError,)}, 395 | expected={"exception": RuntimeError, "count": 0}, 396 | ), 397 | dict( 398 | args={"attempts": 2, "exceptions": (RuntimeError,)}, 399 | expected={"exception": RuntimeError, "count": 1}, 400 | ), 401 | dict( 402 | args={"attempts": 2, "exceptions": (RuntimeError,)}, 403 | expected={"exception": Exception, "count": 0}, 404 | ), 405 | ], 406 | ) 407 | def test_retry_exceptions(mocksleep, case): 408 | @fnc.retry(**case["args"]) 409 | def func(): 410 | raise case["expected"]["exception"]() 411 | 412 | with pytest.raises(case["expected"]["exception"]): 413 | func() 414 | 415 | assert case["expected"]["count"] == mocksleep.call_count 416 | 417 | 418 | def test_retry_on_exception(mocksleep): 419 | attempts = 5 420 | error_counts = {} 421 | 422 | def on_exception(exc): 423 | error_counts[exc.retry["attempt"]] = True 424 | 425 | @fnc.retry(attempts=attempts, on_exception=on_exception) 426 | def func(): 427 | raise ValueError() 428 | 429 | with pytest.raises(ValueError): 430 | func() 431 | 432 | assert error_counts == {key: True for key in range(1, attempts + 1)} 433 | 434 | 435 | @parametrize( 436 | "case", 437 | [ 438 | dict(args={"attempts": 0}, exception=ValueError), 439 | dict(args={"attempts": "1"}, exception=ValueError), 440 | dict(args={"delay": -1}, exception=ValueError), 441 | dict(args={"delay": "1"}, exception=ValueError), 442 | dict(args={"max_delay": -1}, exception=ValueError), 443 | dict(args={"max_delay": "1"}, exception=ValueError), 444 | dict(args={"scale": 0}, exception=ValueError), 445 | dict(args={"scale": "1"}, exception=ValueError), 446 | dict(args={"jitter": -1}, exception=ValueError), 447 | dict(args={"jitter": "1"}, exception=ValueError), 448 | dict(args={"jitter": (1,)}, exception=ValueError), 449 | dict(args={"jitter": ("1", "2")}, exception=ValueError), 450 | dict(args={"exceptions": (1, 2)}, exception=TypeError), 451 | dict(args={"exceptions": 1}, exception=TypeError), 452 | dict(args={"exceptions": (Exception, 2)}, exception=TypeError), 453 | dict(args={"on_exception": 5}, exception=TypeError), 454 | ], 455 | ) 456 | def test_retry_invalid_args(case): 457 | with pytest.raises(case["exception"]): 458 | fnc.retry(**case["args"]) 459 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py37, py38, py39, py310, py311 3 | isolated_build = true 4 | 5 | [gh-actions] 6 | python = 7 | 3.7: py37 8 | 3.8: py38 9 | 3.9: py39 10 | 3.10: py310 11 | 3.11: py311 12 | 13 | [testenv] 14 | passenv = * 15 | extras = dev 16 | commands = 17 | {posargs:inv ci} 18 | setenv = 19 | TOX_ENV_SITE_PACKAGES_DIR = {envsitepackagesdir} 20 | --------------------------------------------------------------------------------