├── .circleci └── config.yml ├── .codecov.yml ├── .coveragerc ├── .flake8 ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.rst ├── SECURITY.md ├── dev-requirements.txt ├── docs ├── api │ ├── autodoc.rst │ ├── ci.rst │ ├── console.rst │ ├── docs.rst │ ├── environment.rst │ ├── packaging.rst │ └── pytest.rst ├── changelog.rst ├── conf.py └── index.rst ├── invocations ├── __init__.py ├── _version.py ├── autodoc.py ├── checks.py ├── ci.py ├── console.py ├── docs.py ├── environment.py ├── packaging │ ├── __init__.py │ ├── release.py │ ├── semantic_version_monkey.py │ └── vendorize.py ├── pytest.py ├── testing.py ├── util.py └── watch.py ├── pytest.ini ├── setup.py ├── tasks.py └── tests ├── autodoc ├── _support │ ├── conf.py │ ├── docs │ │ ├── api.rst │ │ └── index.rst │ └── mytasks.py └── base.py ├── checks.py ├── conftest.py ├── console.py ├── environment.py ├── packaging ├── _support │ ├── conf.py │ ├── fakepackage │ │ ├── __init__.py │ │ ├── _version.py │ │ ├── noversion.py │ │ └── otherversion.py │ ├── index.rst │ ├── no_unreleased_1.1_bugs.rst │ ├── no_unreleased_1.x_features.rst │ ├── unreleased_1.1_bugs.rst │ └── unreleased_1.x_features.rst └── release.py └── pytest_.py /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | orbs: 4 | orb: invocations/orb@1.3.1 5 | 6 | workflows: 7 | main: 8 | jobs: 9 | - orb/lint: 10 | name: Lint 11 | - orb/format: 12 | name: Style check 13 | - orb/coverage: 14 | name: Test 15 | - orb/test-release: 16 | name: Release test 17 | - orb/test: 18 | name: Test << matrix.version >> 19 | # It's not worth testing on other interpreters if the baseline one 20 | # failed. Can't run >4 jobs at a time anyhow! 21 | requires: ["Test"] 22 | matrix: 23 | parameters: 24 | version: ["3.7", "3.8", "3.9", "3.10", "3.11"] 25 | - orb/docs: 26 | name: "Docs" 27 | requires: ["Test"] 28 | task: "docs --nitpick" 29 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | coverage: 3 | precision: 0 4 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | include = 4 | invocations/* 5 | tests/* 6 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = .git,build,dist 3 | ignore = E124,E125,E128,E261,E301,E302,E303,W503 4 | max-line-length = 79 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | .coverage 3 | htmlcov 4 | .cache 5 | docs/_build 6 | coverage.xml 7 | _build 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020 Jeff Forcier. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, 8 | this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 17 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 20 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 21 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 22 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.rst 3 | include dev-requirements.txt 4 | recursive-include docs * 5 | recursive-exclude docs/_build * 6 | include dev-requirements.txt 7 | recursive-include tests * 8 | recursive-exclude * *.pyc *.pyo 9 | recursive-exclude **/__pycache__ * 10 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | |version| |python| |license| |ci| |coverage| 2 | 3 | .. |version| image:: https://img.shields.io/pypi/v/invocations 4 | :target: https://pypi.org/project/invocations/ 5 | :alt: PyPI - Package Version 6 | .. |python| image:: https://img.shields.io/pypi/pyversions/invocations 7 | :target: https://pypi.org/project/invocations/ 8 | :alt: PyPI - Python Version 9 | .. |license| image:: https://img.shields.io/pypi/l/invocations 10 | :target: https://github.com/pyinvoke/invocations/blob/main/LICENSE 11 | :alt: PyPI - License 12 | .. |ci| image:: https://img.shields.io/circleci/build/github/pyinvoke/invocations/main 13 | :target: https://app.circleci.com/pipelines/github/pyinvoke/invocations 14 | :alt: CircleCI 15 | .. |coverage| image:: https://img.shields.io/codecov/c/gh/pyinvoke/invocations 16 | :target: https://app.codecov.io/gh/pyinvoke/invocations 17 | :alt: Codecov 18 | 19 | What is this? 20 | ============= 21 | 22 | Invocations is a collection of reusable `Invoke `_ tasks, 23 | task collections and helper functions. Originally sourced from the Invoke 24 | project's own project-management tasks file, they are now highly configurable 25 | and used across a number of projects, with the intent to become a clearinghouse 26 | for implementing common best practices. 27 | 28 | Currently implemented topics include (but are not limited to): 29 | 30 | - management of Sphinx documentation trees 31 | - Python project release lifecycles 32 | - dependency vendoring 33 | - running test suites (unit, integration, coverage-oriented, etc) 34 | - console utilities such as confirmation prompts 35 | 36 | and more. 37 | 38 | Roadmap 39 | ======= 40 | 41 | While Invocations has been released with a major version number to signal 42 | adherence to semantic versioning, it's somewhat early in development and has 43 | not fully achieved its design vision yet. 44 | 45 | We expect it to gain maturity in tandem with the adoption and development of 46 | Invoke post-1.x. It's also highly likely that Invocations will see a few major 47 | releases as its API (and those of its sister library, `patchwork 48 | `_) matures. 49 | 50 | For a high level roadmap re: when Invocations will get significant updates, see 51 | the maintainer's `roadmap page `_. 52 | 53 | Development 54 | =========== 55 | 56 | This project uses the same dev methodology as Invoke proper - please see its 57 | development page `here `_. 58 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | ## Security contact information 2 | 3 | To report a security vulnerability, please use the 4 | [Tidelift security contact](https://tidelift.com/security). 5 | Tidelift will coordinate the fix and disclosure. 6 | -------------------------------------------------------------------------------- /dev-requirements.txt: -------------------------------------------------------------------------------- 1 | # For testing 2 | pytest-relaxed>=2 3 | invoke>=2 4 | releases>=2.0.1 5 | pytest-cov==2.4.0 6 | pytest-mock==3.2.0 7 | watchdog==0.8.3 8 | coverage==4.4.2 9 | icecream==2.1.3 10 | # Formatting 11 | black==19.10b0 12 | # Linting 13 | flake8==3.6.0 14 | # Modern setuptools, eg automatic license file detection 15 | setuptools>=56 16 | 17 | # Self, for runtime/task dependencies 18 | -e . 19 | -------------------------------------------------------------------------------- /docs/api/autodoc.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | ``autodoc`` 3 | =========== 4 | 5 | .. automodule:: invocations.autodoc 6 | -------------------------------------------------------------------------------- /docs/api/ci.rst: -------------------------------------------------------------------------------- 1 | ====== 2 | ``ci`` 3 | ====== 4 | 5 | .. automodule:: invocations.ci 6 | -------------------------------------------------------------------------------- /docs/api/console.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | ``console`` 3 | =========== 4 | 5 | .. automodule:: invocations.console 6 | -------------------------------------------------------------------------------- /docs/api/docs.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | ``docs`` 3 | ======== 4 | 5 | .. automodule:: invocations.docs 6 | -------------------------------------------------------------------------------- /docs/api/environment.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | ``environment`` 3 | =============== 4 | 5 | .. automodule:: invocations.environment 6 | -------------------------------------------------------------------------------- /docs/api/packaging.rst: -------------------------------------------------------------------------------- 1 | ============= 2 | ``packaging`` 3 | ============= 4 | 5 | ``packaging.release`` 6 | ===================== 7 | 8 | .. automodule:: invocations.packaging.release 9 | 10 | 11 | ``packaging.vendorize`` 12 | ======================= 13 | 14 | .. automodule:: invocations.packaging.vendorize 15 | -------------------------------------------------------------------------------- /docs/api/pytest.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | ``pytest`` 3 | ========== 4 | 5 | .. automodule:: invocations.pytest 6 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | - :release:`3.3.0 <2023-05-12>` 6 | - :feature:`-` Add mypy type-checking variant of the recently added import 7 | test, in ``packaging.release.test_install``. This helps prove packages 8 | exposing ``py.typed`` in their source tree are including it in their 9 | distributions correctly. 10 | - :release:`3.2.0 <2023-05-11>` 11 | - :feature:`-` Minor enhancements to the ``checks`` module: 12 | 13 | - ``blacken`` now has a ``format`` alias (and will likely reverse the real 14 | name and the alias in 4.0) 15 | - Added ``lint`` task which currently just runs ``flake8``, will likely 16 | learn how to be configurable later. 17 | - Added ``all_`` default task for the collection, which runs both 18 | ``blacken`` (in regular, not diff-only mode - idea is to be useful for 19 | devs, not CI, which already does both independently) and ``lint`` in 20 | series. 21 | 22 | - :release:`3.1.0 <2023-05-02>` 23 | - :feature:`-` Updated ``packaging.release.test_install`` to attempt imports of 24 | freshly test-installed packages, to catch import-time errors on top of 25 | install-time ones. This can be opted out of by giving the ``skip_import`` 26 | kwarg (aka the ``--skip-import`` flag on the CLI). 27 | - :release:`3.0.2 <2023-04-28>` 28 | - :support:`- backported` Unpin ``tabulate`` in our install requirements, it's 29 | had many more releases since we instituted a defensive pin vs some bugs in 30 | its later 0.7 line! 31 | - :release:`3.0.1 <2023-01-06>` 32 | - :bug:`-` We neglected to remove references to ``six`` in a few spots - 33 | including some that utilized Invoke's old vendor of same; this causes issues 34 | when trying to use development and upcoming versions of Invoke. Six is now 35 | truly gone! 36 | - :release:`3.0.0 <2022-12-31>` 37 | - :support:`-` Various fixes and doc updates re: the `~invocations.autodoc` 38 | module's compatibility with modern Sphinx versions. 39 | - :support:`-` The ``dual_wheels``, ``alt_python``, and ``check_desc`` 40 | arguments/config options for the ``invocations.packaging.release`` module 41 | have been removed. 42 | 43 | .. warning:: This is a backwards-incompatible change. 44 | 45 | .. note:: 46 | If you were using ``check_desc``, note that the release tasks have been 47 | using ``twine check`` for a few releases now, as a default part of 48 | execution, and will continue doing so; ``check_desc`` only impacted the 49 | use of the older ``setup.py check`` command. 50 | 51 | - :support:`-` The ``invocations.travis`` module has been removed. If you 52 | relied upon it, we may accept PRs to make the newer ``invocations.ci`` module 53 | more generic. 54 | 55 | .. warning:: This is a backwards-incompatible change. 56 | 57 | - :support:`-` Drop Python 2 (and 3.5) support. We now support Python 58 | 3.6+ only. This naturally includes a number of dependency updates (direct and 59 | indirect) as well. 60 | 61 | .. warning:: This is a backwards-incompatible change. 62 | 63 | - :release:`2.6.1 <2022-06-26>` 64 | - :support:`- backported` Remove upper bounds pinning on many deps; this makes 65 | it easier for related projects to test upgrades, run CI, etc. In general, 66 | we're moving away from this tactic. 67 | - :release:`2.6.0 <2022-03-25>` 68 | - :feature:`-` Enhance ``packaging.release.test-install`` so it's more flexible 69 | about the primary directory argument (re: a ``dist`` dir, or a parent of one) 70 | and errors usefully when you (probably) gave it an incorrect path. 71 | - :feature:`-` Update ``packaging.release.publish`` with a new config option, 72 | ``rebuild_with_env``, to support a downstream (Fabric) release use-case. 73 | - :release:`2.5.0 <2022-03-25>` 74 | - :feature:`-` Port ``make-sshable`` from the ``travis`` module to the new 75 | ``ci`` one. 76 | - :release:`2.4.0 <2022-03-17>` 77 | - :feature:`-` Allow supplying additional test runners to ``pytest.coverage``; 78 | primarily useful for setting up multiple additive test runs before publishing 79 | reports. 80 | - :feature:`-` Add a new `invocations.ci` task module for somewhat-more-generic 81 | CI support than the now legacy ``invocations.travis`` tasks. 82 | - :feature:`-` Add additional CLI flags to the use of ``gpg`` when signing 83 | releases, to support headless passphrase entry. It was found that modern GPG 84 | versions require ``--batch`` and ``--pinentry-mode=loopback`` for 85 | ``--passphrase-fd`` to function correctly. 86 | - :release:`2.3.0 <2021-09-24>` 87 | - :bug:`- major` Ensure that the venv used for 88 | ``packaging.release.test_install`` has its ``pip`` upgraded to match the 89 | invoking interpreter's version of same; this avoids common pitfalls where the 90 | "inner" pip is a bundled-with-venv, much-older version incapable of modern 91 | package installations. 92 | - :support:`-` Overhaul testing and release procedures to use CircleCI & modern 93 | Invocations. 94 | - :bug:`- major` The ``packaging.release.upload`` task wasn't properly exposed 95 | externally, even though another task's docstring referenced it. Fixed. 96 | - :release:`2.2.0 <2021-09-03>` 97 | - :bug:`- major` ``packaging.release.status`` (and its use elsewhere, eg 98 | ``prepare``) didn't adequately reload the local project's version module 99 | during its second/final recheck; this causes that check to fail when said 100 | version was edited as part of a ``prepare`` run. It now force-reloads said 101 | version module. 102 | - :feature:`-` ``packaging.release.push``, in dry-run mode, now dry-runs its 103 | ``git push`` subcommand -- meaning the subcommand itself is what is 104 | "dry-ran", instead of truly executing ``git push --dry-run`` -- when a CI 105 | environment is detected. 106 | 107 | - This prevents spurious errors when the git remote (eg Github) bails out on 108 | read-only authentication credentials, which is common within CI systems. 109 | - It's also just not very useful to dry-run a real git push within CI, since 110 | almost certainly the commands to generate git objects to get pushed will 111 | themselves not have truly run! 112 | 113 | - :feature:`-` Added the ``invocations.environment`` module with top-level 114 | functions such as `~invocations.environment.in_ci`. 115 | - :release:`2.1.0 <2021-08-27>` 116 | - :feature:`-` Add ``packaging.release.test_install`` task and call it just 117 | prior to the final step in ``packaging.release.upload`` (so one doesn't 118 | upload packages which build OK but don't actually install OK). 119 | - :feature:`-` Add Codecov support to ``pytest.coverage``. 120 | - :support:`-` Rely on Invoke 1.6+ for some of its new features. 121 | - :support:`-` ``packaging.release.prepare`` now runs its internal status check 122 | twice, once at the start (as before) and again at the end (to prove that the 123 | actions taken did in fact satisfy needs). 124 | - :feature:`-` ``packaging.release.prepare`` grew a ``dry_run`` flag to match 125 | the rest of its friends. 126 | - :bug:`- major` ``packaging.release.prepare`` now generates annotated Git tags 127 | instead of lightweight ones. This was a perplexing oversight (Git has always 128 | intended annotated tags to be used for release purposes) so we're considering 129 | it a bugfix instead of a backwards incompatible feature change. 130 | - :feature:`-` The ``packaging.release.all_`` task has been expanded to 131 | actually do "ALL THE THINGS!!!", given a ``dry_run`` flag, and renamed on the 132 | CLI to ``all`` (no trailing underscore). 133 | - :feature:`-` Add ``packaging.release.push`` for pushing Git objects as part 134 | of a release. 135 | - :feature:`-` Added ``twine check`` (which validates packaging metadata's 136 | ``long_description``) as a pre-upload step within 137 | ``packaging.release.publish``. 138 | 139 | - This includes some tweaking of ``readme_renderer`` behavior (used 140 | internally by twine) so it correctly spots more malformed RST, as Sphinx 141 | does. 142 | 143 | - :bug:`- major` ``packaging.release.publish`` missed a spot when it grew 144 | "kwargs beat configuration" behavior - the ``index`` kwarg still got 145 | overwritten by the config value, if defined. This has been fixed. 146 | - :bug:`- major` Correctly test for ``html`` report type inside of 147 | ``pytest.coverage`` when deciding whether to run ``open`` at the end. 148 | - :bug:`- major` ``pytest.coverage`` incorrectly concatenated its ``opts`` 149 | argument to internal options; this has been fixed. 150 | - :release:`2.0.0 <2021-01-24>` 151 | - :support:`-` Drop Python 3.4 support. We didn't actually do anything to make 152 | the code not work on 3.4, but we've removed some 3.4 related runtime (and 153 | development) dependency limitations. Our CI will also no longer test on 3.4. 154 | 155 | .. warning:: This is technically a backwards incompatible change. 156 | 157 | - :support:`12` Upgrade our packaging manifest so tests (also docs, 158 | requirements files, etc) are included in the distribution archives. Thanks to 159 | Tomáš Chvátal for the report. 160 | - :support:`21` Only require ``enum34`` under Python 2 to prevent it clashing 161 | with the stdlib ``enum`` under Python 3. Credit: Alex Gaynor. 162 | - :bug:`- major` ``release.build``'s ``--clean`` flag has been updated: 163 | 164 | - It now honors configuration like the other flags in this task, 165 | specifically ``packaging.clean``. 166 | - It now defaults to ``False`` (rationale: most build operations in the 167 | wild tend to assume no cleaning by default, so defaulting to the opposite 168 | was sometimes surprising). 169 | 170 | .. warning:: This is a backwards incompatible change. 171 | 172 | - When ``True``, it applies to both build and dist directories, instead of 173 | just build. 174 | 175 | .. warning:: This is a backwards incompatible change. 176 | 177 | - :support:`-` Reverse the default value of ``release.build`` and 178 | ``release.publish``)'s ``wheel`` argument from ``False`` to ``True``. 179 | Included in this change is a new required runtime dependency on the ``wheel`` 180 | package. 181 | 182 | Rationale: at this point in time, most users will be expecting wheels to be 183 | available, and not building wheels is likely to be the uncommon case. 184 | 185 | .. warning:: This is a backwards incompatible change. 186 | 187 | - :bug:`- major` ``release.build`` and ``release.publish`` had bad 188 | kwargs-vs-config logic preventing flags such as ``--wheel`` or ``--python`` 189 | from actually working (config defaults always won out, leading to silent 190 | ignoring of user input). This has been fixed; config will now only be honored 191 | unless the CLI appears to be overriding it. 192 | - :support:`-` Replace some old Python 2.6-compatible syntax bits. 193 | - :feature:`-` Add a ``warnings`` kwarg/flag to ``pytest.test``, allowing one 194 | to call it with ``--no-warnings`` as an inline 'alias' for pytest's own 195 | ``--disable-warnings`` flag. 196 | - :bug:`- major` Fix minor display bug causing the ``pytest`` task module to 197 | append a trailing space to the invocation of pytest itself. 198 | - :support:`-` Modify ``release`` task tree to look at ``main`` branches 199 | in addition to ``master`` ones, for "are we on a feature release line or a 200 | bugfix one?" calculations, etc. 201 | - :release:`1.4.0 <2018-06-26>` 202 | - :release:`1.3.1 <2018-06-26>` 203 | - :release:`1.2.2 <2018-06-26>` 204 | - :release:`1.1.1 <2018-06-26>` 205 | - :release:`1.0.1 <2018-06-26>` 206 | - :bug:`-` Was missing a 'hide output' flag on a subprocess shell call, the 207 | result of which was mystery git branch names appearing in the output of 208 | ``inv release`` and friends. Fixed now. 209 | - :bug:`-` ``checks.blacken`` had a typo regarding its folder selection 210 | argument; the CLI/function arg was ``folder`` while the configuration value 211 | was ``folders`` (plural). It's been made consistent: the CLI/function 212 | argument is now ``folders``. 213 | - :feature:`-` Add a ``find_opts`` argument to ``checks.blacken`` for improved 214 | control over what files get blackened. 215 | - :release:`1.3.0 <2018-06-20>` 216 | - :feature:`-` Bump Releases requirement up to 1.6 and leverage its new ability 217 | to load Sphinx extensions, in ``packaging.release.prepare`` (which parses 218 | Releases changelogs programmatically). Prior to this, projects which needed 219 | extensions to build their doctree would throw errors when using the 220 | ``packaging.release`` module. 221 | - :release:`1.2.1 <2018-06-18>` 222 | - :support:`- backported` Remove some apparently non-functional ``setup.py`` 223 | logic around conditionally requiring ``enum34``; it was never getting 224 | selected and thus breaking a couple modules that relied on it. 225 | 226 | ``enum34`` is now a hard requirement like the other 227 | semi-optional-but-not-really requirements. 228 | - :release:`1.2.0 <2018-05-22>` 229 | - :feature:`-` Add ``travis.blacken`` which wraps the new ``checks.blacken`` 230 | (in diff+check mode, for test output useful for users who cannot themselves 231 | simply run black) in addition to performing Travis-oriented Python version 232 | checks and pip installation. 233 | 234 | This is necessary to remove boilerplate around the fact that ``black`` is not 235 | even visible to Python versions less than 3.6. 236 | - :feature:`-` Break out a generic form of the ``travis.sudo-coverage`` task 237 | into ``travis.sudo-run`` which can be used for arbitrary commands run under 238 | the ssh/sudo capable user generated by 239 | ``travis.make-sudouser``/``travis.make-sshable``. 240 | - :feature:`-` Add 'missing' arguments to ``pytest.integration`` so its 241 | signature now largely matches ``pytest.test``, which it wraps. 242 | - :feature:`-` Add the ``checks`` module, containing ``checks.blacken`` which 243 | executes the `black `_ code formatter. Thanks 244 | to Chris Rose. 245 | - :release:`1.1.0 <2018-05-14>` 246 | - :feature:`-` Split out the body of the (sadly incomplete) 247 | ``packaging.release.all`` task into the better-named 248 | ``packaging.release.prepare``. (``all`` continues to behave as it did, it 249 | just now calls ``prepare`` explicitly.) 250 | - :release:`1.0.0 <2018-05-08>` 251 | - :feature:`-` Pre-history / code primarily for internal consumption 252 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from os import environ, getcwd 3 | from os.path import abspath, join, dirname 4 | import sys 5 | 6 | from invocations.environment import in_ci 7 | 8 | 9 | # Core settings 10 | extensions = [ 11 | "releases", 12 | "sphinx.ext.intersphinx", 13 | "sphinx.ext.autodoc", 14 | "invocations.autodoc", 15 | ] 16 | templates_path = ["_templates"] 17 | source_suffix = ".rst" 18 | master_doc = "index" 19 | exclude_patterns = ["_build"] 20 | default_role = "obj" 21 | 22 | project = "Invocations" 23 | copyright = f"{datetime.now().year} Jeff Forcier" 24 | 25 | # Ensure project directory is on PYTHONPATH for version, autodoc access 26 | sys.path.insert(0, abspath(join(getcwd(), ".."))) 27 | 28 | # Enforce use of Alabaster (even on RTD) and configure it 29 | html_theme = "alabaster" 30 | html_theme_options = { 31 | "description": "Common/best-practice Invoke tasks and collections", 32 | "github_user": "pyinvoke", 33 | "github_repo": "invocations", 34 | # TODO: make new UA property? only good for full domains and not RTD.io? 35 | # 'analytics_id': 'UA-18486793-X', 36 | "travis_button": False, 37 | "tidelift_url": "https://tidelift.com/subscription/pkg/pypi-invocations?utm_source=pypi-invocations&utm_medium=referral&utm_campaign=docs", # noqa 38 | } 39 | html_sidebars = { 40 | "**": ["about.html", "navigation.html", "searchbox.html", "donate.html"] 41 | } 42 | 43 | # Other extension configs 44 | autodoc_default_options = { 45 | "members": True, 46 | "special-members": True, 47 | } 48 | # Without this, as of Sphinx 4-ish? our autodoc plugin goes boom because its 49 | # parent class (in sphinx itself!) isn't in our reference tree & the ref fails 50 | autodoc_inherit_docstrings = False 51 | releases_github_path = "pyinvoke/invocations" 52 | 53 | # Intersphinx 54 | # TODO: this could probably get wrapped up into us or some other shared lib? 55 | on_rtd = environ.get("READTHEDOCS") == "True" 56 | on_dev = not (on_rtd or in_ci()) 57 | 58 | # Invoke 59 | inv_target = join( 60 | dirname(__file__), "..", "..", "invoke", "sites", "docs", "_build" 61 | ) 62 | if not on_dev: 63 | inv_target = "http://docs.pyinvoke.org/en/latest/" 64 | # Put them all together, + Python core 65 | intersphinx_mapping = { 66 | "python": ("http://docs.python.org/", None), 67 | "invoke": (inv_target, None), 68 | } 69 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Invocations 3 | =========== 4 | 5 | .. include:: ../README.rst 6 | 7 | 8 | Contents 9 | ======== 10 | 11 | .. toctree:: 12 | :glob: 13 | 14 | changelog 15 | 16 | API/task docs 17 | ============= 18 | 19 | .. toctree:: 20 | :glob: 21 | 22 | api/* 23 | -------------------------------------------------------------------------------- /invocations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyinvoke/invocations/4e3578e9c49dbbff2ec00ef3c8d37810fba511fa/invocations/__init__.py -------------------------------------------------------------------------------- /invocations/_version.py: -------------------------------------------------------------------------------- 1 | __version_info__ = (3, 3, 0) 2 | __version__ = ".".join(map(str, __version_info__)) 3 | -------------------------------------------------------------------------------- /invocations/autodoc.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sphinx autodoc hooks for documenting Invoke-level objects such as tasks. 3 | 4 | Unlike most of the rest of Invocations, this module isn't for reuse in the 5 | "import and call functions" sense, but instead acts as a Sphinx extension which 6 | allows Sphinx's `autodoc`_ functionality to see and document 7 | Invoke tasks and similar Invoke objects. 8 | 9 | .. note:: 10 | This functionality is mostly useful for redistributable/reusable tasks 11 | which have been defined as importable members of some Python package or 12 | module, as opposed to "local-only" tasks that live in a single project's 13 | ``tasks.py``. 14 | 15 | However, it will work for any tasks that Sphinx autodoc can import, so in a 16 | pinch you could for example tweak ``sys.path`` in your Sphinx ``conf.py`` 17 | to get it loading up a "local" tasks file for import. 18 | 19 | To use: 20 | 21 | - Add ``"invocations.autodoc"`` to your Sphinx ``conf.py``'s ``extensions`` 22 | list. 23 | - Use Sphinx autodoc's ``automodule`` directive normally, aiming it at your 24 | tasks module(s), e.g. ``.. automodule:: myproject.tasks`` in some ``.rst`` 25 | document of your choosing. 26 | 27 | - As noted above, this only works for modules that are importable, like any 28 | other Sphinx autodoc use case. 29 | - Unless you want to opt-in which module members get documented, use 30 | ``:members:`` or add ``"members": True`` to your ``conf.py``'s 31 | ``autodoc_default_options``. 32 | - By default, only tasks with docstrings will be picked up, unless you also 33 | give the ``:undoc-members:`` flag or add ``:undoc-members:`` / add 34 | ``"undoc-members": True`` to ``autodoc_default_options``. 35 | - Please see the `autodoc`_ docs for details on these settings and more! 36 | 37 | - Build your docs, and you should see your tasks showing up as documented 38 | functions in the result. 39 | 40 | 41 | .. _autodoc: http://www.sphinx-doc.org/en/master/ext/autodoc.html 42 | """ 43 | 44 | import inspect 45 | 46 | from invoke import Task 47 | 48 | # For sane mock patching. Meh. 49 | from sphinx.ext import autodoc 50 | 51 | 52 | class TaskDocumenter( 53 | autodoc.DocstringSignatureMixin, autodoc.ModuleLevelDocumenter 54 | ): 55 | objtype = "task" 56 | directivetype = "function" 57 | 58 | @classmethod 59 | def can_document_member(cls, member, membername, isattr, parent): 60 | return isinstance(member, Task) 61 | 62 | def format_args(self): 63 | function = self.object.body 64 | # TODO: consider extending (or adding a sibling to) Task.argspec so it 65 | # preserves more of the full argspec tuple. 66 | # TODO: whether to preserve the initial context argument is an open 67 | # question. For now, it will appear, but only pending invoke#170 - 68 | # after which point "call tasks as raw functions" may be less common. 69 | # TODO: also, it may become moot-ish if we turn this all into emission 70 | # of custom domain objects and/or make the CLI arguments the focus 71 | return autodoc.stringify_signature(inspect.signature(function)) 72 | 73 | def document_members(self, all_members=False): 74 | # Neuter this so superclass bits don't introspect & spit out autodoc 75 | # directives for task attributes. Most of that's not useful. 76 | pass 77 | 78 | 79 | def setup(app): 80 | app.setup_extension("sphinx.ext.autodoc") 81 | app.add_autodocumenter(TaskDocumenter) 82 | -------------------------------------------------------------------------------- /invocations/checks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tasks for common project sanity-checking such as linting or type checking. 3 | 4 | .. versionadded:: 1.2 5 | """ 6 | 7 | from invoke import task 8 | 9 | 10 | @task(name="blacken", aliases=["format"], iterable=["folders"]) 11 | def blacken( 12 | c, line_length=79, folders=None, check=False, diff=False, find_opts=None 13 | ): 14 | r""" 15 | Run black on the current source tree (all ``.py`` files). 16 | 17 | :param int line_length: 18 | Line length argument. Default: ``79``. 19 | :param list folders: 20 | List of folders (or, on the CLI, an argument that can be given N times) 21 | to search within for ``.py`` files. Default: ``["."]``. Honors the 22 | ``blacken.folders`` config option. 23 | :param bool check: 24 | Whether to run ``black --check``. Default: ``False``. 25 | :param bool diff: 26 | Whether to run ``black --diff``. Default: ``False``. 27 | :param str find_opts: 28 | Extra option string appended to the end of the internal ``find`` 29 | command. For example, skip a vendor directory with ``"-and -not -path 30 | ./vendor\*"``, add ``-mtime N``, or etc. Honors the 31 | ``blacken.find_opts`` config option. 32 | 33 | .. versionadded:: 1.2 34 | .. versionchanged:: 1.4 35 | Added the ``find_opts`` argument. 36 | .. versionchanged:: 3.2 37 | Added the ``format`` alias. 38 | """ 39 | config = c.config.get("blacken", {}) 40 | default_folders = ["."] 41 | configured_folders = config.get("folders", default_folders) 42 | folders = folders or configured_folders 43 | 44 | default_find_opts = "" 45 | configured_find_opts = config.get("find_opts", default_find_opts) 46 | find_opts = find_opts or configured_find_opts 47 | 48 | black_command_line = "black -l {}".format(line_length) 49 | if check: 50 | black_command_line = "{} --check".format(black_command_line) 51 | if diff: 52 | black_command_line = "{} --diff".format(black_command_line) 53 | if find_opts: 54 | find_opts = " {}".format(find_opts) 55 | else: 56 | find_opts = "" 57 | 58 | cmd = "find {} -name '*.py'{} | xargs {}".format( 59 | " ".join(folders), find_opts, black_command_line 60 | ) 61 | c.run(cmd, pty=True) 62 | 63 | 64 | @task 65 | def lint(c): 66 | """ 67 | Apply linting. 68 | 69 | .. versionadded:: 3.2 70 | """ 71 | # TODO: configurable and/or switch to ruff 72 | c.run("flake8", warn=True, pty=True) 73 | 74 | 75 | @task(default=True) 76 | def all_(c): 77 | """ 78 | Run all common formatters/linters for the project. 79 | 80 | .. versionadded:: 3.2 81 | """ 82 | # TODO: contextmanager config, if we don't already have that 83 | c.config.run.echo = True 84 | blacken(c) 85 | lint(c) 86 | -------------------------------------------------------------------------------- /invocations/ci.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tasks intended for use under continuous integration. 3 | 4 | Presently, this tends to assume CircleCI, but it is intended to be generic & 5 | we'll accept patches to make any Circle-isms configurable. 6 | 7 | Most of it involves setting up to run a test suite under a special user who is 8 | allowed to run ``sudo`` and who also needs a password to do so. This allows 9 | testing sudo-related functionality which would otherwise suffer 10 | false-positives, since most CI environments allow passwordless sudo for the 11 | default user. 12 | 13 | Thus, the pattern is: 14 | 15 | - use that default user's sudo privileges to generate the special user (if they 16 | don't already exist in the image) 17 | - as the default user, execute the test suite runner via ``sudo -u `` 18 | - the test suite will then at times run its own ``sudo someprogram`` & be 19 | prompted for its password (which the test suite should read from the config 20 | data, same as this outer set of tasks does). 21 | 22 | .. note:: 23 | This module defines default values for the ``ci.sudo`` config subtree, but 24 | if you're using an execution environment where the default sudoers group 25 | isn't ``sudo`` (eg ``wheel``) you'll want to override ``ci.sudo.group`` in 26 | your own config files. 27 | """ 28 | 29 | from invoke import task, Collection 30 | 31 | 32 | @task 33 | def make_sudouser(c): 34 | """ 35 | Create a passworded sudo-capable user. 36 | 37 | Used by other tasks to execute the test suite so sudo tests work. 38 | """ 39 | user = c.ci.sudo.user 40 | password = c.ci.sudo.password 41 | groups = c.ci.sudo.groups 42 | # "--create-home" because we need a place to put conf files, keys etc 43 | # "--groups xxx" for (non-passwordless) sudo access, eg 'sudo' group on 44 | # Debian, plus any others, eg shared group membership with regular user for 45 | # writing out artifact files (assuming $HOME is g+w, which it is on Circle) 46 | c.sudo( 47 | "useradd {} --create-home --groups {}".format(user, ",".join(groups)) 48 | ) 49 | # Password set noninteractively via chpasswd (assumes invoking user itself 50 | # is able to passwordless sudo; this is true on CircleCI) 51 | c.run("echo {}:{} | sudo chpasswd".format(user, password)) 52 | 53 | 54 | @task 55 | def sudo_run(c, command): 56 | """ 57 | Run some command under CI-oriented sudo subshell/virtualenv. 58 | 59 | :param str command: 60 | Command string to run, e.g. ``inv coverage``, ``inv integration``, etc. 61 | (Does not necessarily need to be an Invoke task, but...) 62 | """ 63 | # NOTE: due to circle sudoers config, circleci user can't do "sudo -u" w/o 64 | # password prompt. However, 'sudo su' seems to work just as well... 65 | # NOTE: well. provided you do this really asinine PATH preservation to work 66 | # around su's path resetting. no, --preserve-environment doesn't work, even 67 | # if you have --preserve-environment=PATH on the outer 'sudo' (which does 68 | # work for what sudo directly calls) 69 | # TODO: may want to rub --pty on the 'su' but so far seems irrelevant 70 | c.run( 71 | 'sudo su {} -c "export PATH=$PATH && {}"'.format( 72 | c.ci.sudo.user, command 73 | ) 74 | ) 75 | 76 | 77 | @task 78 | def make_sshable(c): 79 | """ 80 | Set up passwordless SSH keypair & authorized_hosts access to localhost. 81 | """ 82 | user = c.ci.sudo.user 83 | home = "~{}".format(user) 84 | # Run sudo() as the new sudo user; means less chown'ing, etc. 85 | c.config.sudo.user = user 86 | c.config.sudo.password = c.ci.sudo.password 87 | ssh_dir = "{}/.ssh".format(home) 88 | for cmd in ("mkdir {0}", "chmod 0700 {0}"): 89 | sudo_run(c, cmd.format(ssh_dir, user)) 90 | sudo_run(c, "ssh-keygen -t rsa -f {}/id_rsa -N ''".format(ssh_dir)) 91 | sudo_run(c, f"cp {ssh_dir}/id_rsa.pub {ssh_dir}/authorized_keys") 92 | 93 | 94 | ns = Collection(make_sudouser, sudo_run, make_sshable) 95 | ns.configure( 96 | { 97 | "ci": { 98 | "sudo": { 99 | "user": "invoker", 100 | "password": "secret", 101 | "groups": ["sudo", "circleci"], 102 | } 103 | } 104 | } 105 | ) 106 | -------------------------------------------------------------------------------- /invocations/console.py: -------------------------------------------------------------------------------- 1 | """ 2 | Text console UI helpers and patterns, e.g. 'Y/n' prompts and the like. 3 | """ 4 | 5 | import sys 6 | 7 | 8 | # NOTE: originally cribbed from fab 1's contrib.console.confirm 9 | def confirm(question, assume_yes=True): 10 | """ 11 | Ask user a yes/no question and return their response as a boolean. 12 | 13 | ``question`` should be a simple, grammatically complete question such as 14 | "Do you wish to continue?", and will have a string similar to ``" [Y/n] "`` 15 | appended automatically. This function will *not* append a question mark for 16 | you. 17 | 18 | By default, when the user presses Enter without typing anything, "yes" is 19 | assumed. This can be changed by specifying ``assume_yes=False``. 20 | 21 | .. note:: 22 | If the user does not supply input that is (case-insensitively) equal to 23 | "y", "yes", "n" or "no", they will be re-prompted until they do. 24 | 25 | :param str question: The question part of the prompt. 26 | :param bool assume_yes: 27 | Whether to assume the affirmative answer by default. Default value: 28 | ``True``. 29 | 30 | :returns: A `bool`. 31 | """ 32 | # Set up suffix 33 | if assume_yes: 34 | suffix = "Y/n" 35 | else: 36 | suffix = "y/N" 37 | # Loop till we get something we like 38 | # TODO: maybe don't do this? It can be annoying. Turn into 'q'-for-quit? 39 | while True: 40 | # TODO: ensure that this is Ctrl-C friendly, ISTR issues with 41 | # raw_input/input on some Python versions blocking KeyboardInterrupt. 42 | response = input("{} [{}] ".format(question, suffix)) 43 | response = response.lower().strip() # Normalize 44 | # Default 45 | if not response: 46 | return assume_yes 47 | # Yes 48 | if response in ["y", "yes"]: 49 | return True 50 | # No 51 | if response in ["n", "no"]: 52 | return False 53 | # Didn't get empty, yes or no, so complain and loop 54 | err = "I didn't understand you. Please specify '(y)es' or '(n)o'." 55 | print(err, file=sys.stderr) 56 | -------------------------------------------------------------------------------- /invocations/docs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tasks for managing Sphinx documentation trees. 3 | """ 4 | 5 | from os.path import join, isdir 6 | from tempfile import mkdtemp 7 | from shutil import rmtree 8 | import sys 9 | 10 | from invoke import task, Collection, Context 11 | 12 | from .watch import make_handler, observe 13 | 14 | 15 | # Underscored func name to avoid shadowing kwargs in build() 16 | @task(name="clean") 17 | def _clean(c): 18 | """ 19 | Nuke docs build target directory so next build is clean. 20 | """ 21 | if isdir(c.sphinx.target): 22 | rmtree(c.sphinx.target) 23 | 24 | 25 | # Ditto 26 | @task(name="browse") 27 | def _browse(c): 28 | """ 29 | Open build target's index.html in a browser (using 'open'). 30 | """ 31 | index = join(c.sphinx.target, c.sphinx.target_file) 32 | c.run("open {}".format(index)) 33 | 34 | 35 | @task( 36 | default=True, 37 | help={ 38 | "opts": "Extra sphinx-build options/args", 39 | "clean": "Remove build tree before building", 40 | "browse": "Open docs index in browser after building", 41 | "nitpick": "Build with stricter warnings/errors enabled", 42 | "source": "Source directory; overrides config setting", 43 | "target": "Output directory; overrides config setting", 44 | }, 45 | ) 46 | def build( 47 | c, 48 | clean=False, 49 | browse=False, 50 | nitpick=False, 51 | opts=None, 52 | source=None, 53 | target=None, 54 | ): 55 | """ 56 | Build the project's Sphinx docs. 57 | """ 58 | if clean: 59 | _clean(c) 60 | if opts is None: 61 | opts = "" 62 | if nitpick: 63 | opts += " -n -W -T" 64 | cmd = "sphinx-build{} {} {}".format( 65 | (" " + opts) if opts else "", 66 | source or c.sphinx.source, 67 | target or c.sphinx.target, 68 | ) 69 | c.run(cmd, pty=True) 70 | if browse: 71 | _browse(c) 72 | 73 | 74 | @task 75 | def doctest(c): 76 | """ 77 | Run Sphinx' doctest builder. 78 | 79 | This will act like a test run, displaying test results & exiting nonzero if 80 | all tests did not pass. 81 | 82 | A temporary directory is used for the build target, as the only output is 83 | the text file which is automatically printed. 84 | """ 85 | tmpdir = mkdtemp() 86 | try: 87 | opts = "-b doctest" 88 | target = tmpdir 89 | build(c, clean=True, target=target, opts=opts) 90 | finally: 91 | rmtree(tmpdir) 92 | 93 | 94 | @task 95 | def tree(c): 96 | """ 97 | Display documentation contents with the 'tree' program. 98 | """ 99 | ignore = ".git|*.pyc|*.swp|dist|*.egg-info|_static|_build|_templates" 100 | c.run('tree -Ca -I "{}" {}'.format(ignore, c.sphinx.source)) 101 | 102 | 103 | # Vanilla/default/parameterized collection for normal use 104 | ns = Collection(_clean, _browse, build, tree, doctest) 105 | ns.configure( 106 | { 107 | "sphinx": { 108 | "source": "docs", 109 | # TODO: allow lazy eval so one attr can refer to another? 110 | "target": join("docs", "_build"), 111 | "target_file": "index.html", 112 | } 113 | } 114 | ) 115 | 116 | 117 | # Multi-site variants, used by various projects (fabric, invoke, paramiko) 118 | # Expects a tree like sites/www/ + sites/docs/, 119 | # and that you want 'inline' html build dirs, e.g. sites/www/_build/index.html. 120 | 121 | 122 | def _site(name, help_part): 123 | _path = join("sites", name) 124 | # TODO: turn part of from_module into .clone(), heh. 125 | self = sys.modules[__name__] 126 | coll = Collection.from_module( 127 | self, 128 | name=name, 129 | config={"sphinx": {"source": _path, "target": join(_path, "_build")}}, 130 | ) 131 | coll.__doc__ = "Tasks for building {}".format(help_part) 132 | coll["build"].__doc__ = "Build {}".format(help_part) 133 | return coll 134 | 135 | 136 | # Usage doc/API site (published as e.g. docs.myproject.org) 137 | docs = _site("docs", "the API docs subsite.") 138 | # Main/about/changelog site (e.g. (www.)?myproject.org) 139 | www = _site("www", "the main project website.") 140 | 141 | 142 | @task 143 | def sites(c): 144 | """ 145 | Build both doc sites w/ maxed nitpicking. 146 | """ 147 | # TODO: This is super lolzy but we haven't actually tackled nontrivial 148 | # in-Python task calling yet, so we do this to get a copy of 'our' context, 149 | # which has been updated with the per-collection config data of the 150 | # docs/www subcollections. 151 | docs_c = Context(config=c.config.clone()) 152 | www_c = Context(config=c.config.clone()) 153 | docs_c.update(**docs.configuration()) 154 | www_c.update(**www.configuration()) 155 | # Must build both normally first to ensure good intersphinx inventory files 156 | # exist =/ circular dependencies ahoy! Do it quietly to avoid pulluting 157 | # output; only super-serious errors will bubble up. 158 | # TODO: wants a 'temporarily tweak context settings' contextmanager 159 | # TODO: also a fucking spinner cuz this confuses me every time I run it 160 | # when the docs aren't already prebuilt 161 | # TODO: this is still bad because it means the actually displayed build 162 | # output "looks like" nothing was built (due to that first pass building 163 | # most pages) 164 | docs_c["run"].hide = True 165 | www_c["run"].hide = True 166 | docs["build"](docs_c) 167 | www["build"](www_c) 168 | docs_c["run"].hide = False 169 | www_c["run"].hide = False 170 | # Run the actual builds, with nitpick=True (nitpicks + tracebacks) 171 | docs["build"](docs_c, nitpick=True) 172 | www["build"](www_c, nitpick=True) 173 | 174 | 175 | @task 176 | def watch_docs(c): 177 | """ 178 | Watch both doc trees & rebuild them if files change. 179 | 180 | This includes e.g. rebuilding the API docs if the source code changes; 181 | rebuilding the WWW docs if the README changes; etc. 182 | 183 | Reuses the configuration values ``packaging.package`` or ``tests.package`` 184 | (the former winning over the latter if both defined) when determining which 185 | source directory to scan for API doc updates. 186 | """ 187 | # TODO: break back down into generic single-site version, then create split 188 | # tasks as with docs/www above. Probably wants invoke#63. 189 | 190 | # NOTE: 'www'/'docs' refer to the module level sub-collections. meh. 191 | 192 | # Readme & WWW triggers WWW 193 | www_c = Context(config=c.config.clone()) 194 | www_c.update(**www.configuration()) 195 | www_handler = make_handler( 196 | ctx=www_c, 197 | task_=www["build"], 198 | regexes=[r"\./README.rst", r"\./sites/www"], 199 | ignore_regexes=[r".*/\..*\.swp", r"\./sites/www/_build"], 200 | ) 201 | 202 | # Code and docs trigger API 203 | docs_c = Context(config=c.config.clone()) 204 | docs_c.update(**docs.configuration()) 205 | regexes = [r"\./sites/docs"] 206 | package = c.get("packaging", {}).get("package", None) 207 | if package is None: 208 | package = c.get("tests", {}).get("package", None) 209 | if package: 210 | regexes.append(r"\./{}/".format(package)) 211 | api_handler = make_handler( 212 | ctx=docs_c, 213 | task_=docs["build"], 214 | regexes=regexes, 215 | ignore_regexes=[r".*/\..*\.swp", r"\./sites/docs/_build"], 216 | ) 217 | 218 | observe(www_handler, api_handler) 219 | -------------------------------------------------------------------------------- /invocations/environment.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helpers concerning the invoking shell environment. 3 | 4 | For example, generalized "do we appear to be on CI?" tests, which may be used 5 | in multiple other modules. 6 | """ 7 | 8 | import os 9 | 10 | 11 | def in_ci(): 12 | """ 13 | Return ``True`` if we appear to be running inside a CI environment. 14 | 15 | Checks for CI system env vars such as ``CIRCLECI`` or ``TRAVIS`` - 16 | specifically whether they exist and are non-empty. The actual value is not 17 | currently relevant, as long as it's not the empty string. 18 | """ 19 | for sentinel in ("CIRCLECI", "TRAVIS"): 20 | if os.environ.get(sentinel, False): 21 | return True 22 | return False 23 | -------------------------------------------------------------------------------- /invocations/packaging/__init__.py: -------------------------------------------------------------------------------- 1 | # Make the inner modules' tasks/collections readily available. 2 | from .vendorize import vendorize 3 | from . import release 4 | 5 | # Most of the time, importers of this module want the 'release' sub-collection. 6 | # TODO: update other libs & then remove this so it's a bit cleaner. 7 | ns = release 8 | 9 | # flake8: noqa 10 | -------------------------------------------------------------------------------- /invocations/packaging/release.py: -------------------------------------------------------------------------------- 1 | """ 2 | Python package release tasks. 3 | 4 | This module assumes: 5 | 6 | - you're using semantic versioning for your releases 7 | - you maintain a file called ``$package/_version.py`` containing normal version 8 | conventions (``__version_info__`` tuple and ``__version__`` string). 9 | """ 10 | 11 | import getpass 12 | import itertools 13 | import logging 14 | import os 15 | import re 16 | import sys 17 | import venv 18 | from functools import partial 19 | from glob import glob 20 | from io import StringIO 21 | from pathlib import Path 22 | from shutil import rmtree 23 | 24 | from invoke.vendor.lexicon import Lexicon 25 | 26 | from blessings import Terminal 27 | from docutils.utils import Reporter 28 | from enum import Enum 29 | from invoke import Collection, task, Exit 30 | from pip import __version__ as pip_version 31 | import readme_renderer.rst # transitively required via twine in setup.py 32 | from releases.util import parse_changelog 33 | from tabulate import tabulate 34 | from twine.commands.check import check as twine_check 35 | 36 | from .semantic_version_monkey import Version 37 | 38 | from ..console import confirm 39 | from ..environment import in_ci 40 | from ..util import tmpdir 41 | 42 | 43 | debug = logging.getLogger("invocations.packaging.release").debug 44 | 45 | # Monkeypatch readme_renderer.rst so it acts more like Sphinx re: docutils 46 | # warning levels - otherwise it overlooks (and misrenders) stuff like bad 47 | # header formats etc! 48 | # (The defaults in readme_renderer are halt_level=WARNING and 49 | # report_level=SEVERE) 50 | # NOTE: this only works because we directly call twine via Python and not via 51 | # subprocess. 52 | for key in ("halt_level", "report_level"): 53 | readme_renderer.rst.SETTINGS[key] = Reporter.INFO_LEVEL 54 | 55 | 56 | # TODO: this could be a good module to test out a more class-centric method of 57 | # organizing tasks. E.g.: 58 | # - 'Checks'/readonly things like 'should_changelog' live in a base class 59 | # - one subclass defines dry-run actions for the 'verbs', and is used for 60 | # sanity checking or dry-running 61 | # - another subclass defines actual, mutating actions for the 'verbs', and is 62 | # used for actual release management 63 | # - are those classes simply arbitrary tasky classes used *by* 64 | # actual task functions exposing them; or are they the collections themselves 65 | # (as per #347)? 66 | # - if the latter, how should one "switch" between the subclasses when dry 67 | # running vs real running? 68 | # - what's the CLI "API" look like for that? 69 | # - Different subcollections, e.g. `inv release.dry-run(.all/changelog/etc)` 70 | # vs `inv release.all`? 71 | # - Dry-run flag (which feels more natural/obvious/expected)? How 72 | # would/should that flag affect collection/task loading/selection? 73 | # - especially given task load concerns are typically part of core, but 74 | # this dry-run-or-not behavior clearly doesn't want to be in core? 75 | 76 | 77 | # 78 | # State junk 79 | # 80 | 81 | # Blessings Terminal object for ANSI colorization. 82 | # NOTE: mildly uncomfortable with the instance living at module level, but also 83 | # pretty sure it's unlikely to change meaningfully over time, between 84 | # threads/etc - and it'd be otherwise a PITA to cart around/re-instantiate. 85 | t = Terminal() 86 | check = "\u2714" 87 | ex = "\u2718" 88 | 89 | # Types of releases/branches 90 | Release = Enum("Release", "BUGFIX FEATURE UNDEFINED") 91 | 92 | # Actions to take for various components - done as enums whose values are 93 | # useful one-line status outputs. 94 | 95 | 96 | class Changelog(Enum): 97 | OKAY = t.green(check + " no unreleased issues") 98 | NEEDS_RELEASE = t.red(ex + " needs :release: entry") 99 | 100 | 101 | class VersionFile(Enum): 102 | OKAY = t.green(check + " version up to date") 103 | NEEDS_BUMP = t.red(ex + " needs version bump") 104 | 105 | 106 | class Tag(Enum): 107 | OKAY = t.green(check + " all set") 108 | NEEDS_CUTTING = t.red(ex + " needs cutting") 109 | 110 | 111 | # Bits for testing branch names to determine release type 112 | BUGFIX_RE = re.compile(r"^\d+\.\d+$") 113 | BUGFIX_RELEASE_RE = re.compile(r"^\d+\.\d+\.\d+$") 114 | # TODO: allow tweaking this if folks use different branch methodology: 115 | # - same concept, different name, e.g. s/main/dev/ 116 | # - different concept entirely, e.g. no main-ish, only feature branches 117 | FEATURE_RE = re.compile(r"^(main|master)$") 118 | 119 | 120 | class UndefinedReleaseType(Exception): 121 | pass 122 | 123 | 124 | def _converge(c): 125 | """ 126 | Examine world state, returning data on what needs updating for release. 127 | 128 | :param c: Invoke ``Context`` object or subclass. 129 | 130 | :returns: 131 | Two dicts (technically, dict subclasses, which allow attribute access), 132 | ``actions`` and ``state`` (in that order.) 133 | 134 | ``actions`` maps release component names to variables (usually class 135 | constants) determining what action should be taken for that component: 136 | 137 | - ``changelog``: members of `.Changelog` such as ``NEEDS_RELEASE`` or 138 | ``OKAY``. 139 | - ``version``: members of `.VersionFile`. 140 | 141 | ``state`` contains the data used to calculate the actions, in case the 142 | caller wants to do further analysis: 143 | 144 | - ``branch``: the name of the checked-out Git branch. 145 | - ``changelog``: the parsed project changelog, a `dict` of releases. 146 | - ``release_type``: what type of release the branch appears to be (will 147 | be a member of `.Release` such as ``Release.BUGFIX``.) 148 | - ``latest_line_release``: the latest changelog release found for 149 | current release type/line. 150 | - ``latest_overall_release``: the absolute most recent release entry. 151 | Useful for determining next minor/feature release. 152 | - ``current_version``: the version string as found in the package's 153 | ``__version__``. 154 | """ 155 | # 156 | # Data/state gathering 157 | # 158 | 159 | # Get data about current repo context: what branch are we on & what kind of 160 | # release does it appear to represent? 161 | branch, release_type = _release_line(c) 162 | # Short-circuit if type is undefined; we can't do useful work for that. 163 | if release_type is Release.UNDEFINED: 164 | raise UndefinedReleaseType( 165 | "You don't seem to be on a release-related branch; " 166 | "why are you trying to cut a release?" 167 | ) 168 | # Parse our changelog so we can tell what's released and what's not. 169 | # TODO: below needs to go in something doc-y somewhere; having it in a 170 | # non-user-facing subroutine docstring isn't visible enough. 171 | """ 172 | .. note:: 173 | Requires that one sets the ``packaging.changelog_file`` configuration 174 | option; it should be a relative or absolute path to your 175 | ``changelog.rst`` (or whatever it's named in your project). 176 | """ 177 | # TODO: allow skipping changelog if not using Releases since we have no 178 | # other good way of detecting whether a changelog needs/got an update. 179 | # TODO: chdir to sphinx.source, import conf.py, look at 180 | # releases_changelog_name - that way it will honor that setting and we can 181 | # ditch this explicit one instead. (and the docstring above) 182 | changelog = parse_changelog( 183 | c.packaging.changelog_file, load_extensions=True 184 | ) 185 | # Get latest appropriate changelog release and any unreleased issues, for 186 | # current line 187 | line_release, issues = _release_and_issues(changelog, branch, release_type) 188 | # Also get latest overall release, sometimes that matters (usually only 189 | # when latest *appropriate* release doesn't exist yet) 190 | overall_release = _versions_from_changelog(changelog)[-1] 191 | # Obtain the project's main package & its version data 192 | current_version = load_version(c) 193 | # Grab all git tags 194 | tags = _get_tags(c) 195 | 196 | state = Lexicon( 197 | { 198 | "branch": branch, 199 | "release_type": release_type, 200 | "changelog": changelog, 201 | "latest_line_release": Version(line_release) 202 | if line_release 203 | else None, 204 | "latest_overall_release": overall_release, # already a Version 205 | "unreleased_issues": issues, 206 | "current_version": Version(current_version), 207 | "tags": tags, 208 | } 209 | ) 210 | # Version number determinations: 211 | # - latest actually-released version 212 | # - the next version after that for current branch 213 | # - which of the two is the actual version we're looking to converge on, 214 | # depends on current changelog state. 215 | latest_version, next_version = _latest_and_next_version(state) 216 | state.latest_version = latest_version 217 | state.next_version = next_version 218 | state.expected_version = latest_version 219 | if state.unreleased_issues: 220 | state.expected_version = next_version 221 | 222 | # 223 | # Logic determination / convergence 224 | # 225 | 226 | actions = Lexicon() 227 | 228 | # Changelog: needs new release entry if there are any unreleased issues for 229 | # current branch's line. 230 | # TODO: annotate with number of released issues [of each type?] - so not 231 | # just "up to date!" but "all set (will release 3 features & 5 bugs)" 232 | actions.changelog = Changelog.OKAY 233 | if release_type in (Release.BUGFIX, Release.FEATURE) and issues: 234 | actions.changelog = Changelog.NEEDS_RELEASE 235 | 236 | # Version file: simply whether version file equals the target version. 237 | # TODO: corner case of 'version file is >1 release in the future', but 238 | # that's still wrong, just would be a different 'bad' status output. 239 | actions.version = VersionFile.OKAY 240 | if state.current_version != state.expected_version: 241 | actions.version = VersionFile.NEEDS_BUMP 242 | 243 | # Git tag: similar to version file, except the check is existence of tag 244 | # instead of comparison to file contents. We even reuse the 245 | # 'expected_version' variable wholesale. 246 | actions.tag = Tag.OKAY 247 | if state.expected_version not in state.tags: 248 | actions.tag = Tag.NEEDS_CUTTING 249 | 250 | actions.all_okay = ( 251 | actions.changelog == Changelog.OKAY 252 | and actions.version == VersionFile.OKAY 253 | and actions.tag == Tag.OKAY 254 | ) 255 | 256 | # 257 | # Return 258 | # 259 | 260 | return actions, state 261 | 262 | 263 | @task 264 | def status(c): 265 | """ 266 | Print current release (version, changelog, tag, etc) status. 267 | 268 | Doubles as a subroutine, returning the return values from its inner call to 269 | ``_converge`` (an ``(actions, state)`` two-tuple of Lexicons). 270 | """ 271 | actions, state = _converge(c) 272 | table = [] 273 | # NOTE: explicit 'sensible' sort (in rough order of how things are usually 274 | # modified, and/or which depend on one another, e.g. tags are near the end) 275 | for component in "changelog version tag".split(): 276 | table.append((component.capitalize(), actions[component].value)) 277 | print(tabulate(table)) 278 | return actions, state 279 | 280 | 281 | # TODO: thought we had automatic trailing underscore stripping but...no? 282 | @task(name="all", default=True) 283 | def all_(c, dry_run=False): 284 | """ 285 | Catchall version-bump/tag/changelog/PyPI upload task. 286 | 287 | :param bool dry_run: 288 | Handed to all subtasks which themselves have a ``dry_run`` flag. 289 | 290 | .. versionchanged:: 2.1 291 | Expanded functionality to run ``publish`` and ``push`` as well as 292 | ``prepare``. 293 | .. versionchanged:: 2.1 294 | Added the ``dry_run`` flag. 295 | """ 296 | prepare(c, dry_run=dry_run) 297 | publish(c, dry_run=dry_run) 298 | push(c, dry_run=dry_run) 299 | 300 | 301 | @task 302 | def prepare(c, dry_run=False): 303 | """ 304 | Edit changelog & version, git commit, and git tag, to set up for release. 305 | 306 | :param bool dry_run: 307 | Whether to take any actual actions or just say what might occur. Will 308 | also non-fatally exit if not on some form of release branch. Default: 309 | ``False``. 310 | 311 | :returns: ``True`` if short-circuited due to all-ok, ``None`` otherwise. 312 | 313 | .. versionchanged:: 2.1 314 | Added the ``dry_run`` parameter. 315 | .. versionchanged:: 2.1 316 | Generate annotated git tags instead of lightweight ones. 317 | """ 318 | # Print dry-run/status/actions-to-take data & grab programmatic result 319 | # TODO: maybe expand the enum-based stuff to have values that split up 320 | # textual description, command string, etc. See the TODO up by their 321 | # definition too, re: just making them non-enum classes period. 322 | # TODO: otherwise, we at least want derived eg changelog/version/etc paths 323 | # transmitted from status() into here... 324 | try: 325 | actions, state = status(c) 326 | except UndefinedReleaseType: 327 | if not dry_run: 328 | raise 329 | raise Exit( 330 | code=0, 331 | message="Can't dry-run release tasks, not on a release branch; skipping.", # noqa 332 | ) 333 | # Short-circuit if nothing to do 334 | if actions.all_okay: 335 | return True 336 | # If work to do and not dry-running, make sure user confirms to move ahead 337 | if not dry_run: 338 | if not confirm("Take the above actions?"): 339 | raise Exit("Aborting.") 340 | 341 | # TODO: factor out what it means to edit a file: 342 | # - $EDITOR or explicit expansion of it in case no shell involved 343 | # - pty=True and hide=False, because otherwise things can be bad 344 | # - what else? 345 | 346 | # Changelog! (pty for non shite editing, eg vim sure won't like non-pty) 347 | if actions.changelog == Changelog.NEEDS_RELEASE: 348 | # TODO: identify top of list and inject a ready-made line? Requires vim 349 | # assumption...GREAT opportunity for class/method based tasks! 350 | cmd = "$EDITOR {.packaging.changelog_file}".format(c) 351 | c.run(cmd, pty=True, hide=False, dry=dry_run) 352 | # Version file! 353 | if actions.version == VersionFile.NEEDS_BUMP: 354 | version_file = os.path.join( 355 | _find_package(c), 356 | c.packaging.get("version_module", "_version") + ".py", 357 | ) 358 | cmd = "$EDITOR {}".format(version_file) 359 | c.run(cmd, pty=True, hide=False, dry=dry_run) 360 | if actions.tag == Tag.NEEDS_CUTTING: 361 | # Commit, if necessary, so the tag includes everything. 362 | # NOTE: this strips out untracked files. effort. 363 | cmd = 'git status --porcelain | egrep -v "^\\?"' 364 | if c.run(cmd, hide=True, warn=True).ok: 365 | c.run( 366 | 'git commit -am "Cut {}"'.format(state.expected_version), 367 | hide=False, 368 | dry=dry_run, 369 | echo=True, 370 | ) 371 | # Tag! 372 | c.run( 373 | 'git tag -a {} -m ""'.format(state.expected_version), 374 | hide=False, 375 | dry=dry_run, 376 | echo=True, 377 | ) 378 | # If top-of-task status check wasn't all_okay, it means the code between 379 | # there and here was expected to alter state. Run another check to make 380 | # sure those actions actually succeeded! 381 | if not dry_run and not actions.all_okay: 382 | actions, state = status(c) 383 | if not actions.all_okay: 384 | raise Exit("Something went wrong! Please fix.") 385 | 386 | 387 | def _release_line(c): 388 | """ 389 | Examine current repo state to determine what type of release to prep. 390 | 391 | :returns: 392 | A two-tuple of ``(branch-name, line-type)`` where: 393 | 394 | - ``branch-name`` is the current branch name, e.g. ``1.1``, ``main``, 395 | ``gobbledygook`` (or, usually, ``HEAD`` if not on a branch). 396 | - ``line-type`` is a symbolic member of `.Release` representing what 397 | "type" of release the line appears to be for: 398 | 399 | - ``Release.BUGFIX`` if on a bugfix/stable release line, e.g. 400 | ``1.1``. 401 | - ``Release.FEATURE`` if on a feature-release branch (typically 402 | ``main``). 403 | - ``Release.UNDEFINED`` if neither of those appears to apply 404 | (usually means on some unmerged feature/dev branch). 405 | """ 406 | # TODO: I don't _think_ this technically overlaps with Releases (because 407 | # that only ever deals with changelog contents, and therefore full release 408 | # version numbers) but in case it does, move it there sometime. 409 | # TODO: this and similar calls in this module may want to be given an 410 | # explicit pointer-to-git-repo option (i.e. if run from outside project 411 | # context). 412 | # TODO: major releases? or are they big enough events we don't need to 413 | # bother with the script? Also just hard to gauge - when is main the next 414 | # 1.x feature vs 2.0? 415 | branch = c.run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip() 416 | type_ = Release.UNDEFINED 417 | if BUGFIX_RE.match(branch): 418 | type_ = Release.BUGFIX 419 | if FEATURE_RE.match(branch): 420 | type_ = Release.FEATURE 421 | return branch, type_ 422 | 423 | 424 | def _latest_feature_bucket(changelog): 425 | """ 426 | Select 'latest'/'highest' unreleased feature bucket from changelog. 427 | 428 | :returns: a string key from ``changelog``. 429 | """ 430 | unreleased = [x for x in changelog if x.startswith("unreleased_")] 431 | return sorted( 432 | unreleased, key=lambda x: int(x.split("_")[1]), reverse=True 433 | )[0] 434 | 435 | 436 | # TODO: this feels like it should live in Releases, though that would imply 437 | # adding semantic_version as a dep there, grump 438 | def _versions_from_changelog(changelog): 439 | """ 440 | Return all released versions from given ``changelog``, sorted. 441 | 442 | :param dict changelog: 443 | A changelog dict as returned by ``releases.util.parse_changelog``. 444 | 445 | :returns: A sorted list of `semantic_version.Version` objects. 446 | """ 447 | versions = [Version(x) for x in changelog if BUGFIX_RELEASE_RE.match(x)] 448 | return sorted(versions) 449 | 450 | 451 | # TODO: may want to live in releases.util eventually 452 | def _release_and_issues(changelog, branch, release_type): 453 | """ 454 | Return most recent branch-appropriate release, if any, and its contents. 455 | 456 | :param dict changelog: 457 | Changelog contents, as returned by ``releases.util.parse_changelog``. 458 | 459 | :param str branch: 460 | Branch name. 461 | 462 | :param release_type: 463 | Member of `Release`, e.g. `Release.FEATURE`. 464 | 465 | :returns: 466 | Two-tuple of release (``str``) and issues (``list`` of issue numbers.) 467 | 468 | If there is no latest release for the given branch (e.g. if it's a 469 | feature or main branch), it will be ``None``. 470 | """ 471 | # Bugfix lines just use the branch to find issues 472 | bucket = branch 473 | # Features need a bit more logic 474 | if release_type is Release.FEATURE: 475 | bucket = _latest_feature_bucket(changelog) 476 | # Issues is simply what's in the bucket 477 | issues = changelog[bucket] 478 | # Latest release is undefined for feature lines 479 | release = None 480 | # And requires scanning changelog, for bugfix lines 481 | if release_type is Release.BUGFIX: 482 | versions = [str(x) for x in _versions_from_changelog(changelog)] 483 | release = [x for x in versions if x.startswith(bucket)][-1] 484 | return release, issues 485 | 486 | 487 | def _get_tags(c): 488 | """ 489 | Return sorted list of release-style tags as semver objects. 490 | """ 491 | tags_ = [] 492 | for tagstr in c.run("git tag", hide=True).stdout.strip().split("\n"): 493 | try: 494 | tags_.append(Version(tagstr)) 495 | # Ignore anything non-semver; most of the time they'll be non-release 496 | # tags, and even if they are, we can't reason about anything 497 | # non-semver anyways. 498 | # TODO: perhaps log these to DEBUG 499 | except ValueError: 500 | pass 501 | # Version objects sort semantically 502 | return sorted(tags_) 503 | 504 | 505 | def _latest_and_next_version(state): 506 | """ 507 | Determine latest version for current branch, and its increment. 508 | 509 | E.g. on the ``1.2`` branch, we take the latest ``1.2.x`` release and 510 | increment its tertiary number, so e.g. if the previous release was 511 | ``1.2.2``, this function returns ``1.2.3``. If on ``main`` and latest 512 | overall release was ``1.2.2``, it returns ``1.3.0``. 513 | 514 | :param dict state: 515 | The ``state`` dict as returned by / generated within `converge`. 516 | 517 | :returns: 2-tuple of ``semantic_version.Version``. 518 | """ 519 | if state.release_type == Release.FEATURE: 520 | previous_version = state.latest_overall_release 521 | next_version = previous_version.next_minor() 522 | else: 523 | previous_version = state.latest_line_release 524 | next_version = previous_version.next_patch() 525 | return previous_version, next_version 526 | 527 | 528 | def _find_package(c): 529 | """ 530 | Try to find 'the' One True Package for this project. 531 | 532 | Mostly for obtaining the ``_version`` file within it. 533 | 534 | Uses the ``packaging.package`` config setting if defined. If not defined, 535 | fallback is to look for a single top-level Python package (directory 536 | containing ``__init__.py``). (This search ignores a small blacklist of 537 | directories like ``tests/``, ``vendor/`` etc.) 538 | """ 539 | # TODO: is there a way to get this from the same place setup.py does w/o 540 | # setup.py barfing (since setup() runs at import time and assumes CLI use)? 541 | # TODO Python 3.7: seems like a job for the then-in-stdlib 542 | # importlib.metadata? 543 | configured_value = c.get("packaging", {}).get("package", None) 544 | if configured_value: 545 | return configured_value 546 | # TODO: tests covering this stuff here (most logic tests simply supply 547 | # config above) 548 | packages = [ 549 | path 550 | for path in os.listdir(".") 551 | if ( 552 | os.path.isdir(path) 553 | and os.path.exists(os.path.join(path, "__init__.py")) 554 | and path not in ("tests", "integration", "sites", "vendor") 555 | ) 556 | ] 557 | if not packages: 558 | raise Exit("Unable to find a local Python package!") 559 | if len(packages) > 1: 560 | raise Exit("Found multiple Python packages: {!r}".format(packages)) 561 | return packages[0] 562 | 563 | 564 | def load_version(c): 565 | package_name = _find_package(c) 566 | version_module = c.packaging.get("version_module", "_version") 567 | # Evict from sys.modules in case we're running at the end of an in-session 568 | # edit (eg within prepare()). Otherwise we'll always only see what was 569 | # on-disk at first import. 570 | # NOTE: must do both the top level package and the version module! Unclear 571 | # why. May be due to the specific import strategy 572 | # TODO 3.0: def try using the cleaner options available under Python 3 when 573 | # we drop 2. 574 | sys.modules.pop("{}.{}".format(package_name, version_module), None) 575 | sys.modules.pop(package_name, None) 576 | package = __import__(package_name, fromlist=[str(version_module)]) 577 | # TODO: explode nicely if it lacks a _version/etc, or a __version__ 578 | # TODO: make this a Version()? 579 | return getattr(package, version_module).__version__ 580 | 581 | 582 | @task 583 | def build(c, sdist=True, wheel=True, directory=None, python=None, clean=False): 584 | """ 585 | Build sdist and/or wheel archives, optionally in a temp base directory. 586 | 587 | All parameters/flags honor config settings of the same name, under the 588 | ``packaging`` tree. E.g. say ``.configure({'packaging': {'wheel': 589 | False}})`` to disable building wheel archives by default. 590 | 591 | :param bool sdist: 592 | Whether to build sdists/tgzs. Default: ``True``. 593 | 594 | :param bool wheel: 595 | Whether to build wheels (requires the ``wheel`` package from PyPI). 596 | Default: ``True``. 597 | 598 | :param str directory: 599 | Allows specifying a specific directory in which to perform builds and 600 | dist creation. Useful when running as a subroutine from ``publish`` 601 | which sets up a temporary directory. 602 | 603 | Up to two subdirectories may be created within this directory: one for 604 | builds (if building wheels), and one for the dist archives. 605 | 606 | When ``None`` or another false-y value (which is the default), the 607 | current working directory is used (and thus, local ``dist/`` and 608 | ``build/`` subdirectories). 609 | 610 | :param str python: 611 | Which Python binary to use when invoking ``setup.py``. 612 | 613 | Defaults to ``"python"``. 614 | 615 | If ``wheel=True``, then this Python must have ``wheel`` installed in 616 | its default ``site-packages`` (or similar) location. 617 | 618 | :param clean: 619 | Whether to clean out the build and dist directories before building. 620 | 621 | .. versionchanged:: 2.0 622 | ``clean`` now defaults to False instead of True, cleans both dist and 623 | build dirs when True, and honors configuration. 624 | .. versionchanged:: 2.0 625 | ``wheel`` now defaults to True instead of False. 626 | """ 627 | # Config hooks 628 | config = c.config.get("packaging", {}) 629 | # Check bool flags to see if they were overridden by config. 630 | # TODO: this wants something explicit at the Invoke layer, technically this 631 | # prevents someone from giving eg --sdist on CLI to override a falsey 632 | # config value for it. 633 | if sdist is True and "sdist" in config: 634 | sdist = config["sdist"] 635 | if wheel is True and "wheel" in config: 636 | wheel = config["wheel"] 637 | if clean is False and "clean" in config: 638 | clean = config["clean"] 639 | if directory is None: 640 | directory = config.get("directory", "") 641 | if python is None: 642 | python = config.get("python", "python") # buffalo buffalo 643 | # Sanity 644 | if not sdist and not wheel: 645 | raise Exit( 646 | "You said no sdists and no wheels..." 647 | "what DO you want to build exactly?" 648 | ) 649 | # Directory path/arg logic 650 | dist_dir = os.path.join(directory, "dist") 651 | dist_arg = "-d {}".format(dist_dir) 652 | build_dir = os.path.join(directory, "build") 653 | build_arg = "-b {}".format(build_dir) 654 | # Clean 655 | if clean: 656 | for target in (dist_dir, build_dir): 657 | rmtree(target, ignore_errors=True) 658 | # Build 659 | parts = [python, "setup.py"] 660 | if sdist: 661 | parts.extend(("sdist", dist_arg)) 662 | if wheel: 663 | # Manually execute build in case we are using a custom build dir. 664 | # Doesn't seem to be a way to tell bdist_wheel to do this directly. 665 | parts.extend(("build", build_arg)) 666 | parts.extend(("bdist_wheel", dist_arg)) 667 | c.run(" ".join(parts)) 668 | 669 | 670 | def find_gpg(c): 671 | for candidate in "gpg gpg1 gpg2".split(): 672 | if c.run("which {}".format(candidate), hide=True, warn=True).ok: 673 | return candidate 674 | 675 | 676 | @task 677 | def publish( 678 | c, 679 | sdist=True, 680 | wheel=True, 681 | index=None, 682 | sign=False, 683 | dry_run=False, 684 | directory=None, 685 | ): 686 | """ 687 | Publish code to PyPI or index of choice. Wraps ``build`` and ``publish``. 688 | 689 | This uses the ``twine`` command under the hood, both its pre-upload 690 | ``check`` subcommand (which verifies the archives to be uploaded, including 691 | checking your PyPI readme) and the ``upload`` one. 692 | 693 | All parameters save ``dry_run`` and ``directory`` honor config settings of 694 | the same name, under the ``packaging`` tree. E.g. say 695 | ``.configure({'packaging': {'wheel': True}})`` to force building wheel 696 | archives by default. 697 | 698 | :param bool sdist: 699 | Whether to upload sdists/tgzs. Default: ``True``. 700 | 701 | :param bool wheel: 702 | Whether to upload wheels (requires the ``wheel`` package from PyPI). 703 | Default: ``True``. 704 | 705 | :param str index: 706 | Custom upload index/repository name. See ``upload`` help for details. 707 | 708 | :param bool sign: 709 | Whether to sign the built archive(s) via GPG. 710 | 711 | :param bool dry_run: 712 | Skip upload step if ``True``. 713 | 714 | This also prevents cleanup of the temporary build/dist directories, so 715 | you can examine the build artifacts. 716 | 717 | Note that this does not skip the ``twine check`` step, just the final 718 | upload. 719 | 720 | :param str directory: 721 | Base directory within which will live the ``dist/`` and ``build/`` 722 | directories. 723 | 724 | Defaults to a temporary directory which is cleaned up after the run 725 | finishes. 726 | """ 727 | # Don't hide by default, this step likes to be verbose most of the time. 728 | c.config.run.hide = False 729 | # Including echoing! 730 | c.config.run.echo = True 731 | # Config hooks 732 | # TODO: this pattern is too widespread. Really needs something in probably 733 | # Executor that automatically does this on our behalf for any kwargs we 734 | # indicate should be configurable 735 | config = c.config.get("packaging", {}) 736 | if index is None and "index" in config: 737 | index = config["index"] 738 | if sign is False and "sign" in config: 739 | sign = config["sign"] 740 | # Build, into controlled temp dir (avoids attempting to re-upload old 741 | # files) 742 | with tmpdir(skip_cleanup=dry_run, explicit=directory) as tmp: 743 | # Build default archives 744 | builder = partial(build, c, sdist=sdist, wheel=wheel, directory=tmp) 745 | builder() 746 | # Rebuild with env (mostly for Fabric 2) 747 | # TODO: code smell; implies this really wants to be class/hook based? 748 | # TODO: or at least invert sometime so it's easier to say "do random 749 | # stuff to arrive at dists, then test and upload". 750 | rebuild_with_env = config.get("rebuild_with_env", None) 751 | if rebuild_with_env: 752 | old_environ = os.environ.copy() 753 | os.environ.update(rebuild_with_env) 754 | try: 755 | builder() 756 | finally: 757 | os.environ.update(old_environ) 758 | for key in rebuild_with_env: 759 | if key not in old_environ: 760 | del os.environ[key] 761 | # Use twine's check command on built artifacts (at present this just 762 | # validates long_description) 763 | print(c.config.run.echo_format.format(command="twine check")) 764 | failure = twine_check(dists=[os.path.join(tmp, "dist", "*")]) 765 | if failure: 766 | raise Exit(1) 767 | # Test installation of built artifacts into virtualenvs (even during 768 | # dry run) 769 | test_install(c, directory=tmp) 770 | # Do the thing! (Maybe.) 771 | upload(c, directory=tmp, index=index, sign=sign, dry_run=dry_run) 772 | 773 | 774 | @task 775 | def test_install(c, directory, verbose=False, skip_import=False): 776 | """ 777 | Test installation of build artifacts found in ``$directory``. 778 | 779 | Directory should either be a ``dist`` directory itself, or the parent of 780 | one. 781 | 782 | Uses the `venv` module to build temporary virtualenvs. 783 | 784 | :param bool verbose: Whether to print subprocess output. 785 | :param bool skip_import: 786 | If True, don't try importing the installed module or checking it for 787 | type hints. 788 | """ 789 | # TODO: wants contextmanager or similar for only altering a setting within 790 | # a given scope or block - this may pollute subsequent subroutine calls 791 | if verbose: 792 | old_hide = c.config.run.hide 793 | c.config.run.hide = False 794 | 795 | builder = venv.EnvBuilder(with_pip=True) 796 | archives = get_archives(directory) 797 | if not archives: 798 | raise Exit("No archive files found in {}!".format(directory)) 799 | for archive in archives: 800 | with tmpdir() as tmp: 801 | # Make temp venv 802 | builder.create(tmp) 803 | # Obligatory: make inner pip match outer pip (version obtained from 804 | # this file's executable env, up in import land); very frequently 805 | # venv-made envs have a bundled, older pip :( 806 | envbin = Path(tmp) / "bin" 807 | pip = envbin / "pip" 808 | c.run("{} install pip=={}".format(pip, pip_version)) 809 | # Does the package under test install cleanly? 810 | c.run( 811 | "{} install --disable-pip-version-check {}".format( 812 | pip, archive 813 | ) 814 | ) 815 | # Can we actually import it? (Will catch certain classes of 816 | # import-time-but-not-install-time explosions, eg busted dependency 817 | # specifications or imports). 818 | if not skip_import: 819 | package = _find_package(c) 820 | # Import, generally 821 | c.run(f"{envbin / 'python'} -c 'import {package}'") 822 | # Import, typecheck version (ie dependent package typechecking 823 | # both itself and us). Assumes task is run from project root. 824 | pytyped = Path(package) / "py.typed" 825 | if pytyped.exists(): 826 | # TODO: pin a specific mypy version? 827 | c.run(f"{envbin / 'pip'} install mypy") 828 | # Use some other dir (our cwd is probably the project root, 829 | # whose local $package dir may confuse mypy into a false 830 | # positive!) 831 | with tmpdir() as tmp2: 832 | mypy_check = f"{envbin / 'mypy'} -c 'import {package}'" 833 | c.run(f"cd {tmp2} && {mypy_check}") 834 | 835 | if verbose: 836 | c.config.run.hide = old_hide 837 | 838 | 839 | def get_archives(directory): 840 | # Obtain list of archive filenames, then ensure any wheels come first 841 | # so their improved metadata is what PyPI sees initially (otherwise, it 842 | # only honors the sdist's lesser data). 843 | dist = "" if directory.endswith("dist") else "dist" 844 | return list( 845 | itertools.chain.from_iterable( 846 | glob(os.path.join(directory, dist, "*.{}".format(extension))) 847 | for extension in ("whl", "tar.gz") 848 | ) 849 | ) 850 | 851 | 852 | @task 853 | def upload(c, directory, index=None, sign=False, dry_run=False): 854 | """ 855 | Upload (potentially also signing) all artifacts in ``directory/dist``. 856 | 857 | :param str index: 858 | Custom upload index/repository name. 859 | 860 | By default, uses whatever the invoked ``pip`` is configured to use. 861 | Modify your ``pypirc`` file to add new named repositories. 862 | 863 | :param bool sign: 864 | Whether to sign the built archive(s) via GPG. 865 | 866 | :param bool dry_run: 867 | Skip actual publication step (and dry-run actions like signing) if 868 | ``True``. 869 | 870 | This also prevents cleanup of the temporary build/dist directories, so 871 | you can examine the build artifacts. 872 | """ 873 | archives = get_archives(directory) 874 | # Sign each archive in turn 875 | # NOTE: twine has a --sign option but it's not quite flexible enough & 876 | # doesn't allow you to dry-run or upload manually when API is borked... 877 | if sign: 878 | prompt = "Please enter GPG passphrase for signing: " 879 | passphrase = "" if dry_run else getpass.getpass(prompt) 880 | input_ = StringIO(passphrase + "\n") 881 | gpg_bin = find_gpg(c) 882 | if not gpg_bin: 883 | raise Exit( 884 | "You need to have one of `gpg`, `gpg1` or `gpg2` " 885 | "installed to GPG-sign!" 886 | ) 887 | for archive in archives: 888 | cmd = "{} --detach-sign --armor --passphrase-fd=0 --batch --pinentry-mode=loopback {{}}".format( # noqa 889 | gpg_bin 890 | ) 891 | c.run(cmd.format(archive), in_stream=input_, dry=dry_run) 892 | input_.seek(0) # So it can be replayed by subsequent iterations 893 | # Upload 894 | parts = ["twine", "upload"] 895 | if index: 896 | parts.append("--repository {}".format(index)) 897 | paths = archives.copy() 898 | if sign and not dry_run: 899 | paths.append(os.path.join(directory, "dist", "*.asc")) 900 | parts.extend(paths) 901 | cmd = " ".join(parts) 902 | if dry_run: 903 | print("Would publish via: {}".format(cmd)) 904 | print("Files that would be published:") 905 | c.run("ls -l {}".format(" ".join(paths))) 906 | else: 907 | c.run(cmd) 908 | 909 | 910 | @task 911 | def push(c, dry_run=False): 912 | """ 913 | Push current branch and tags to default Git remote. 914 | """ 915 | # Push tags, not just branches; and at this stage pre-push hooks will be 916 | # more trouble than they're worth. 917 | opts = "--follow-tags --no-verify" 918 | # Dry run: echo, and either tack on git's own dry-run (if not CI) or 919 | # dry-run the run() itself (if CI - which probably can't push to the remote 920 | # and might thus error uselessly) 921 | kwargs = dict() 922 | if dry_run: 923 | kwargs["echo"] = True 924 | if in_ci(): 925 | kwargs["dry"] = True 926 | else: 927 | opts += " --dry-run" 928 | c.run("git push {}".format(opts), **kwargs) 929 | 930 | 931 | # TODO: still need time to solve the 'just myself pls' problem 932 | ns = Collection( 933 | "release", 934 | all_, 935 | status, 936 | prepare, 937 | build, 938 | publish, 939 | push, 940 | test_install, 941 | upload, 942 | ) 943 | # Hide stdout by default, preferring to explicitly enable it when necessary. 944 | ns.configure({"run": {"hide": "stdout"}}) 945 | -------------------------------------------------------------------------------- /invocations/packaging/semantic_version_monkey.py: -------------------------------------------------------------------------------- 1 | """ 2 | Monkey patches for ``semantic_version.Version``. 3 | 4 | We never like monkey-patching, but for now this is easier than either vendoring 5 | or distributing our own fork. 6 | """ 7 | 8 | 9 | from semantic_version import Version 10 | 11 | 12 | def clone(self): 13 | """ 14 | Return a new copy of this Version object. 15 | 16 | Useful when you need to generate a new object that can be mutated 17 | separately from the original. 18 | """ 19 | return Version(str(self)) 20 | 21 | 22 | Version.clone = clone 23 | 24 | 25 | def next_minor(self): 26 | """ 27 | Return a Version whose minor number is one greater than self's. 28 | 29 | .. note:: 30 | The new Version will always have a zeroed-out bugfix/tertiary version 31 | number, because the "next minor release" of e.g. 1.2.1 is 1.3.0, not 32 | 1.3.1. 33 | """ 34 | clone = self.clone() 35 | clone.minor += 1 36 | clone.patch = 0 37 | return clone 38 | 39 | 40 | Version.next_minor = next_minor 41 | 42 | 43 | def next_patch(self): 44 | """ 45 | Return a Version whose patch/bugfix number is one greater than self's. 46 | """ 47 | clone = self.clone() 48 | clone.patch += 1 49 | return clone 50 | 51 | 52 | Version.next_patch = next_patch 53 | -------------------------------------------------------------------------------- /invocations/packaging/vendorize.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tasks for importing external code into a vendor subdirectory. 3 | """ 4 | from os import chdir 5 | from pathlib import Path 6 | from shutil import copy, copytree, rmtree 7 | 8 | from invoke import task 9 | 10 | from ..util import tmpdir 11 | 12 | 13 | def _unpack(c, tmp, package, version, git_url=None): 14 | """ 15 | Download + unpack given package into temp dir ``tmp``. 16 | 17 | Return ``(real_version, source)`` where ``real_version`` is the "actual" 18 | version downloaded (e.g. if a Git main branch was indicated, it will be the 19 | SHA of ``main`` HEAD) and ``source`` is the source directory (relative to 20 | unpacked source) to import into ``/vendor``. 21 | """ 22 | real_version = version[:] 23 | source = None 24 | if git_url: 25 | pass 26 | # git clone into tempdir 27 | # git checkout 28 | # set target to checkout 29 | # if version does not look SHA-ish: 30 | # in the checkout, obtain SHA from that branch 31 | # set real_version to that value 32 | else: 33 | cwd = Path.cwd() 34 | print(f"Moving into temp dir {tmp}") 35 | chdir(tmp) 36 | try: 37 | # Nab from index. Skip wheels; we want to unpack an sdist. 38 | flags = "--download=. --build=build --no-use-wheel" 39 | cmd = f"pip install {flags} {package}=={version}" 40 | c.run(cmd) 41 | # Identify basename 42 | # TODO: glob is bad here because pip install --download gets all 43 | # dependencies too! ugh. Figure out best approach for that. 44 | globs = [] 45 | globexpr = "" 46 | for extension, opener in ( 47 | ("zip", "unzip"), 48 | ("tgz", "tar xzvf"), 49 | ("tar.gz", "tar xzvf"), 50 | ): 51 | globexpr = "*.{}".format(extension) 52 | globs = cwd.glob(globexpr) 53 | if globs: 54 | break 55 | archive = globs[0].name 56 | # TODO: weird how there's no "mega-.stem" in Pathlib, o well 57 | source, _, _ = archive.rpartition(".{}".format(extension)) 58 | c.run("{} {}".format(opener, globexpr)) 59 | finally: 60 | chdir(cwd) 61 | return real_version, source 62 | 63 | 64 | @task 65 | def vendorize( 66 | c, 67 | distribution, 68 | version, 69 | vendor_dir, 70 | package=None, 71 | git_url=None, 72 | license=None, 73 | ): 74 | """ 75 | Vendorize Python package ``distribution`` at version/SHA ``version``. 76 | 77 | Specify the vendor folder (e.g. ``/vendor``) as ``vendor_dir``. 78 | 79 | For Crate/PyPI releases, ``package`` should be the name of the software 80 | entry on those sites, and ``version`` should be a specific version number. 81 | E.g. ``vendorize('lexicon', '0.1.2')``. 82 | 83 | For Git releases, ``package`` should be the name of the package folder 84 | within the checkout that needs to be vendorized and ``version`` should be a 85 | Git identifier (branch, tag, SHA etc.) ``git_url`` must also be given, 86 | something suitable for ``git clone ``. 87 | 88 | For SVN releases: xxx. 89 | 90 | For packages where the distribution name is not the same as the package 91 | directory name, give ``package='name'``. 92 | 93 | By default, no explicit license seeking is done -- we assume the license 94 | info is in file headers or otherwise within the Python package vendorized. 95 | This is not always true; specify ``license=/path/to/license/file`` to 96 | trigger copying of a license into the vendored folder from the 97 | checkout/download (relative to its root.) 98 | """ 99 | with tmpdir() as tmp: 100 | package = package or distribution 101 | target = Path(vendor_dir) / package 102 | # Unpack source 103 | real_version, source = _unpack(c, tmp, distribution, version, git_url) 104 | abs_source = tmp / source 105 | source_package = abs_source / package 106 | # Ensure source package exists 107 | if not source_package.exists(): 108 | rel_package = source_package.relative_to(Path.cwd()) 109 | raise ValueError(f"Source package {rel_package} doesn't exist!") 110 | # Nuke target if exists 111 | if target.exists(): 112 | print(f"Removing pre-existing vendorized folder {target}") 113 | rmtree(target) 114 | # Perform the copy 115 | print(f"Copying {source_package} => {target}") 116 | copytree(source_package, target) 117 | # Explicit license if needed 118 | if license: 119 | copy(abs_source / license, target) 120 | # git commit -a -m "Update $package to $version ($real_version if different)" # noqa 121 | -------------------------------------------------------------------------------- /invocations/pytest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Pytest-using variant of testing.py. Will eventually replace the latter. 3 | """ 4 | 5 | from invoke import task 6 | 7 | 8 | @task 9 | def test( 10 | c, 11 | verbose=True, 12 | color=True, 13 | capture="sys", 14 | module=None, 15 | k=None, 16 | x=False, 17 | opts="", 18 | pty=True, 19 | warnings=True, 20 | ): 21 | """ 22 | Run pytest with given options. 23 | 24 | :param bool verbose: 25 | Whether to run tests in verbose mode. 26 | 27 | :param bool color: 28 | Whether to request colorized output (typically only works when 29 | ``verbose=True``.) 30 | 31 | :param str capture: 32 | What type of stdout/err capturing pytest should use. Defaults to 33 | ``sys`` since pytest's own default, ``fd``, tends to trip up 34 | subprocesses trying to detect PTY status. Can be set to ``no`` for no 35 | capturing / useful print-debugging / etc. 36 | 37 | :param str module: 38 | Select a specific test module to focus on, e.g. ``main`` to only run 39 | ``tests/main.py``. (Note that this is a specific idiom aside from the 40 | use of ``-o '-k pattern'``.) Default: ``None``. 41 | 42 | :param str k: 43 | Convenience passthrough for ``pytest -k``, i.e. test selection. 44 | Default: ``None``. 45 | 46 | :param bool x: 47 | Convenience passthrough for ``pytest -x``, i.e. fail-fast. Default: 48 | ``False``. 49 | 50 | :param str opts: 51 | Extra runtime options to hand to ``pytest``. 52 | 53 | :param bool pty: 54 | Whether to use a pty when executing pytest. Default: ``True``. 55 | 56 | :param bool warnings: 57 | Inverse alias for the pytest ``--disable_warnings`` flag; when this is 58 | False (i.e. called on CLI as ``--no-warnings``), ``--disable-warnings`` 59 | will be given. Default: ``True``. 60 | 61 | .. versionadded:: 2.0 62 | """ 63 | # TODO: really need better tooling around these patterns 64 | # TODO: especially the problem of wanting to be configurable, but 65 | # sometimes wanting to override one's config via kwargs; and also needing 66 | # non-None defaults in the kwargs to inform the parser (or have to 67 | # configure it explicitly...?) 68 | flags = [] 69 | if verbose: 70 | flags.append("--verbose") 71 | if color: 72 | flags.append("--color=yes") 73 | flags.append("--capture={}".format(capture)) 74 | if opts: 75 | flags.append(opts) 76 | if k is not None and not ("-k" in opts if opts else False): 77 | flags.append("-k '{}'".format(k)) 78 | if x and not ("-x" in opts if opts else False): 79 | flags.append("-x") 80 | if not warnings and not ("--disable-warnings" in opts if opts else False): 81 | flags.append("--disable-warnings") 82 | modstr = "" 83 | if module is not None: 84 | modstr = " tests/{}.py".format(module) 85 | c.run("pytest {}{}".format(" ".join(flags), modstr), pty=pty) 86 | 87 | 88 | @task(help=test.help) 89 | def integration( 90 | c, 91 | opts=None, 92 | pty=True, 93 | x=False, 94 | k=None, 95 | verbose=True, 96 | color=True, 97 | capture="sys", 98 | module=None, 99 | ): 100 | """ 101 | Run the integration test suite. May be slow! 102 | 103 | See ``pytest.test`` for description of most arguments. 104 | """ 105 | opts = opts or "" 106 | opts += " integration/" 107 | if module is not None: 108 | opts += "{}.py".format(module) 109 | test( 110 | c, 111 | opts=opts, 112 | pty=pty, 113 | x=x, 114 | k=k, 115 | verbose=verbose, 116 | color=color, 117 | capture=capture, 118 | ) 119 | 120 | 121 | @task(iterable=["additional_testers"]) 122 | def coverage( 123 | c, 124 | report="term", 125 | opts="", 126 | tester=None, 127 | codecov=False, 128 | additional_testers=None, 129 | ): 130 | """ 131 | Run pytest with coverage enabled. 132 | 133 | Assumes the ``pytest-cov`` pytest plugin is installed. 134 | 135 | :param str report: 136 | Coverage report style to use. If 'html', will also open in browser. 137 | 138 | :param str opts: 139 | Extra runtime opts to pass to pytest. 140 | 141 | :param tester: 142 | Specific test task object to invoke. If ``None`` (default), uses this 143 | module's local `test`. 144 | 145 | :param bool codecov: 146 | Whether to build XML and upload to Codecov. Requires ``codecov`` tool. 147 | Default: ``False``. 148 | 149 | :param additional_testers: 150 | List of additional test functions to call besides ``tester``. If given, 151 | implies the use of ``--cov-append`` on these subsequent test runs. 152 | 153 | .. versionchanged:: 2.4 154 | Added the ``additional_testers`` argument. 155 | """ 156 | my_opts = "--cov --no-cov-on-fail --cov-report={}".format(report) 157 | if opts: 158 | my_opts += " " + opts 159 | # TODO: call attached suite's test(), not the one in here, if they differ 160 | # TODO: arguably wants ability to lookup task string when tester(s) given 161 | # on CLI, but, eh 162 | (tester or test)(c, opts=my_opts) 163 | if additional_testers: 164 | my_opts += " --cov-append" 165 | for tester in additional_testers: 166 | tester(c, opts=my_opts) 167 | if report == "html": 168 | c.run("open htmlcov/index.html") 169 | if codecov: 170 | # Generate XML report from that already-gathered data (otherwise 171 | # codecov generates it on its own and gets it wrong!) 172 | c.run("coverage xml") 173 | # Upload to Codecov 174 | c.run("codecov") 175 | -------------------------------------------------------------------------------- /invocations/testing.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | from collections import defaultdict 4 | 5 | from invoke import task 6 | from tqdm import tqdm 7 | 8 | from .watch import watch 9 | 10 | 11 | @task( 12 | help={ 13 | "module": "Just runs tests/STRING.py.", 14 | "runner": "Use STRING to run tests instead of 'spec'.", 15 | "opts": "Extra flags for the test runner", 16 | "pty": "Whether to run tests under a pseudo-tty", 17 | } 18 | ) 19 | def test(c, module=None, runner=None, opts=None, pty=True): 20 | """ 21 | Run a Spec or Nose-powered internal test suite. 22 | """ 23 | runner = runner or "spec" 24 | # Allow selecting specific submodule 25 | specific_module = f" --tests=tests/{module}.py" 26 | args = specific_module if module else "" 27 | if opts: 28 | args += " " + opts 29 | # Always enable timing info by default. OPINIONATED 30 | args += " --with-timing" 31 | # Allow client to configure some other Nose-related things. 32 | logformat = c.config.get("tests", {}).get("logformat", None) 33 | if logformat is not None: 34 | args += f" --logging-format='{logformat}'" 35 | # Use pty by default so the spec/nose/Python process buffers "correctly" 36 | c.run(runner + args, pty=pty) 37 | 38 | 39 | @task(help=test.help) 40 | def integration(c, module=None, runner=None, opts=None, pty=True): 41 | """ 42 | Run the integration test suite. May be slow! 43 | """ 44 | opts = opts or "" 45 | override = " --tests=integration/" 46 | if module: 47 | override += f"{module}.py" 48 | opts += override 49 | test(c, runner=runner, opts=opts, pty=pty) 50 | 51 | 52 | @task 53 | def watch_tests(c, module=None, opts=None): 54 | """ 55 | Watch source tree and test tree for changes, rerunning tests as necessary. 56 | 57 | Honors ``tests.package`` setting re: which source directory to watch for 58 | changes. 59 | """ 60 | package = c.config.get("tests", {}).get("package") 61 | patterns = [r"\./tests/"] 62 | if package: 63 | patterns.append(r"\./{}/".format(package)) 64 | kwargs = {"module": module, "opts": opts} 65 | # Kick things off with an initial test (making sure it doesn't exit on its 66 | # own if tests currently fail) 67 | c.config.run.warn = True 68 | test(c, **kwargs) 69 | # Then watch 70 | watch(c, test, patterns, [r".*/\..*\.swp"], **kwargs) 71 | 72 | 73 | @task 74 | def coverage(c, html=True, integration_=True): 75 | """ 76 | Run tests w/ coverage enabled, optionally generating HTML & opening it. 77 | 78 | :param bool html: 79 | Whether to generate & open an HTML report. Default: ``True``. 80 | 81 | :param bool integration_: 82 | Whether to run integration test suite (``integration/``) in addition to 83 | unit test suite (``tests/``). Default: ``True``. 84 | """ 85 | if not c.run("which coverage", hide=True, warn=True).ok: 86 | sys.exit("You need to 'pip install coverage' to use this task!") 87 | # Generate actual coverage data. NOTE: this will honor a local .coveragerc 88 | test_opts = "--with-coverage" 89 | test(c, opts=test_opts) 90 | # Coverage naturally accumulates unless --cover-erase is used - so the 91 | # resulting .coverage file parsed by 'coverage html' will contain the union 92 | # of both suites, if integration suite is run too. 93 | if integration_: 94 | integration(c, opts=test_opts) 95 | if html: 96 | c.run("coverage html && open htmlcov/index.html") 97 | 98 | 99 | # TODO: rename to like find_errors or something more generic 100 | @task 101 | def count_errors(c, command, trials=10, verbose=False, fail_fast=False): 102 | """ 103 | Run ``command`` multiple times and tally statistics about failures. 104 | 105 | Use Ctrl-C or other SIGINT to abort early (also see ``fail_fast``.) 106 | 107 | :param str command: 108 | The command to execute. Make sure to escape special shell characters! 109 | 110 | :param int trials: 111 | Number of trials to execute (default 10.) 112 | 113 | :param bool verbose: 114 | Whether to emit stdout/err from failed runs at end of execution. 115 | Default: ``False``. 116 | 117 | :param bool fail_fast: 118 | Whether to exit after the first error (i.e. "count runs til error is 119 | exhibited" mode.) Default: ``False``. 120 | 121 | Say ``verbose=True`` to see stderr from failed runs at the end. 122 | 123 | Say ``--fail-fast`` to error out, with error output, on the first error. 124 | """ 125 | # TODO: allow defining failure as something besides "exited 1", e.g. 126 | # "stdout contained " or whatnot 127 | goods, bads = [], [] 128 | prev_error = time.time() 129 | for num_runs in tqdm(range(trials), unit="trial"): 130 | result = c.run(command, hide=True, warn=True) 131 | if result.failed: 132 | now = time.time() 133 | result.since_prev_error = int(now - prev_error) 134 | prev_error = now 135 | bads.append(result) 136 | # -2 is typically indicative of SIGINT in most shells 137 | if fail_fast or result.exited == -2: 138 | break 139 | else: 140 | goods.append(result) 141 | num_runs += 1 # for count starting at 1, not 0 142 | if verbose or fail_fast: 143 | # TODO: would be nice to show interwoven stdout/err but I don't believe 144 | # we track that at present... 145 | for result in bads: 146 | print("") 147 | print(result.stdout) 148 | print(result.stderr) 149 | # Stats! TODO: errors only jeez 150 | successes = len(goods) 151 | failures = len(bads) 152 | overall = "{}/{} trials failed".format(failures, num_runs) 153 | # Short-circuit if no errors 154 | if not bads: 155 | print(overall) 156 | return 157 | periods = [x.since_prev_error for x in bads] 158 | # Period mean 159 | mean = int(sum(periods) / float(len(periods))) 160 | # Period mode 161 | # TODO: use collections.Counter now that we've dropped 2.6 162 | counts = defaultdict(int) 163 | for period in periods: 164 | counts[period] += 1 165 | mode = sorted((value, key) for key, value in counts.items())[-1][1] 166 | # Emission of stats! 167 | if fail_fast: 168 | print("First failure occurred after {} successes".format(successes)) 169 | else: 170 | print(overall) 171 | print( 172 | "Stats: min={}s, mean={}s, mode={}s, max={}s".format( 173 | min(periods), mean, mode, max(periods) 174 | ) 175 | ) 176 | -------------------------------------------------------------------------------- /invocations/util.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | from shutil import rmtree 3 | from tempfile import mkdtemp 4 | 5 | 6 | @contextmanager 7 | def tmpdir(skip_cleanup=False, explicit=None): 8 | """ 9 | Context-manage a temporary directory. 10 | 11 | Can be given ``skip_cleanup`` to skip cleanup, and ``explicit`` to choose a 12 | specific location. 13 | 14 | (If both are given, this is basically not doing anything, but it allows 15 | code that normally requires a secure temporary directory to 'dry run' 16 | instead.) 17 | """ 18 | tmp = explicit if explicit is not None else mkdtemp() 19 | try: 20 | yield tmp 21 | finally: 22 | if not skip_cleanup: 23 | rmtree(tmp) 24 | -------------------------------------------------------------------------------- /invocations/watch.py: -------------------------------------------------------------------------------- 1 | """ 2 | File-watching subroutines, built on watchdog. 3 | """ 4 | 5 | import sys 6 | import time 7 | 8 | 9 | def make_handler(ctx, task_, regexes, ignore_regexes, *args, **kwargs): 10 | args = [ctx] + list(args) 11 | try: 12 | from watchdog.events import RegexMatchingEventHandler 13 | except ImportError: 14 | sys.exit("If you want to use this, 'pip install watchdog' first.") 15 | 16 | class Handler(RegexMatchingEventHandler): 17 | def on_any_event(self, event): 18 | try: 19 | task_(*args, **kwargs) 20 | except BaseException: 21 | pass 22 | 23 | return Handler(regexes=regexes, ignore_regexes=ignore_regexes) 24 | 25 | 26 | def observe(*handlers): 27 | try: 28 | from watchdog.observers import Observer 29 | except ImportError: 30 | sys.exit("If you want to use this, 'pip install watchdog' first.") 31 | 32 | observer = Observer() 33 | # TODO: Find parent directory of tasks.py and use that. 34 | for handler in handlers: 35 | observer.schedule(handler, ".", recursive=True) 36 | observer.start() 37 | try: 38 | while True: 39 | time.sleep(1) 40 | except KeyboardInterrupt: 41 | observer.stop() 42 | observer.join() 43 | 44 | 45 | def watch(c, task_, regexes, ignore_regexes, *args, **kwargs): 46 | observe(make_handler(c, task_, regexes, ignore_regexes, *args, **kwargs)) 47 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests 3 | python_files = * 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup, find_packages 4 | 5 | # Version info -- read without importing 6 | _locals = {} 7 | with open("invocations/_version.py") as fp: 8 | exec(fp.read(), None, _locals) 9 | version = _locals["__version__"] 10 | 11 | requirements = [ 12 | # Core dependency 13 | "invoke>=1.7.2", 14 | # Dependencies for various subpackages. 15 | # NOTE: these used to be all optional (only complained about at import 16 | # time if missing), but that got hairy fast, and these are all 17 | # pure-Python packages, so it shouldn't be a huge burden for users to 18 | # obtain them. 19 | "blessings>=1.6", 20 | "releases>=1.6", 21 | "semantic_version>=2.4,<2.7", 22 | "tabulate>=0.7.5", 23 | "tqdm>=4.8.1", 24 | "twine>=1.15", 25 | "wheel>=0.24.0", 26 | ] 27 | 28 | setup( 29 | name="invocations", 30 | version=version, 31 | description="Common/best-practice Invoke tasks and collections", 32 | long_description=open("README.rst").read(), 33 | license="BSD", 34 | author="Jeff Forcier", 35 | author_email="jeff@bitprophet.org", 36 | url="https://invocations.readthedocs.io", 37 | project_urls={ 38 | "Source": "https://github.com/pyinvoke/invocations", 39 | "Changelog": "https://invocations.readthedocs.io/en/latest/changelog.html", # noqa 40 | "CI": "https://app.circleci.com/pipelines/github/pyinvoke/invocations", 41 | "Issues": "https://github.com/pyinvoke/invocations/issues", 42 | }, 43 | # Release requirements. See dev-requirements.txt for dev version reqs. 44 | python_requires=">=3.6", 45 | install_requires=requirements, 46 | packages=find_packages(), 47 | classifiers=[ 48 | "Development Status :: 5 - Production/Stable", 49 | "Environment :: Console", 50 | "Intended Audience :: Developers", 51 | "Intended Audience :: System Administrators", 52 | "License :: OSI Approved :: BSD License", 53 | "Operating System :: POSIX", 54 | "Operating System :: Unix", 55 | "Operating System :: MacOS :: MacOS X", 56 | "Operating System :: Microsoft :: Windows", 57 | "Programming Language :: Python", 58 | "Programming Language :: Python :: 3", 59 | "Programming Language :: Python :: 3 :: Only", 60 | "Programming Language :: Python :: 3.6", 61 | "Programming Language :: Python :: 3.7", 62 | "Programming Language :: Python :: 3.8", 63 | "Programming Language :: Python :: 3.9", 64 | "Programming Language :: Python :: 3.10", 65 | "Programming Language :: Python :: 3.11", 66 | "Topic :: Software Development", 67 | "Topic :: Software Development :: Build Tools", 68 | "Topic :: Software Development :: Libraries", 69 | "Topic :: Software Development :: Libraries :: Python Modules", 70 | "Topic :: System :: Software Distribution", 71 | "Topic :: System :: Systems Administration", 72 | ], 73 | ) 74 | -------------------------------------------------------------------------------- /tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import Collection 2 | 3 | from invocations import docs, checks 4 | from invocations.packaging import release 5 | from invocations.pytest import test, coverage 6 | 7 | 8 | ns = Collection(release, test, coverage, docs, checks.blacken, checks) 9 | ns.configure( 10 | { 11 | "packaging": {"wheel": True, "changelog_file": "docs/changelog.rst"}, 12 | "run": { 13 | "env": { 14 | # Our ANSI color tests test against hardcoded codes appropriate 15 | # for this terminal, for now. 16 | "TERM": "xterm-256color" 17 | }, 18 | }, 19 | } 20 | ) 21 | -------------------------------------------------------------------------------- /tests/autodoc/_support/conf.py: -------------------------------------------------------------------------------- 1 | from os.path import dirname 2 | import sys 3 | 4 | 5 | # Add local support dir to path so tasks modules may be imported by autodoc 6 | sys.path.insert(0, dirname(__file__)) 7 | 8 | master_doc = "index" 9 | extensions = ["invocations.autodoc"] 10 | autodoc_default_options = dict(members=True) 11 | -------------------------------------------------------------------------------- /tests/autodoc/_support/docs/api.rst: -------------------------------------------------------------------------------- 1 | === 2 | API 3 | === 4 | 5 | .. automodule:: mytasks 6 | -------------------------------------------------------------------------------- /tests/autodoc/_support/docs/index.rst: -------------------------------------------------------------------------------- 1 | ==== 2 | Test 3 | ==== 4 | 5 | .. toctree:: 6 | api 7 | -------------------------------------------------------------------------------- /tests/autodoc/_support/mytasks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some fake tasks to test task autodoc. 3 | """ 4 | 5 | from invoke import task 6 | 7 | 8 | def not_a_task(c): 9 | """ 10 | I am a regular function. 11 | """ 12 | pass 13 | 14 | 15 | @task 16 | def undocumented(c): 17 | # I have no docstring so I may not show up. Or...may I?! 18 | pass 19 | 20 | 21 | @task 22 | def base_case(c): 23 | """ 24 | Literally the smallest possible task. 25 | """ 26 | pass 27 | 28 | 29 | @task 30 | def simple_case(c, simple_arg): 31 | """ 32 | Parameterization! 33 | """ 34 | pass 35 | -------------------------------------------------------------------------------- /tests/autodoc/base.py: -------------------------------------------------------------------------------- 1 | from os.path import join, dirname 2 | import re 3 | import shutil 4 | 5 | from unittest.mock import Mock 6 | 7 | from invoke import Context 8 | from invocations.autodoc import setup as our_setup, TaskDocumenter 9 | 10 | 11 | def _build(): 12 | """ 13 | Build local support docs tree and return the build target dir for cleanup. 14 | """ 15 | c = Context() 16 | support = join(dirname(__file__), "_support") 17 | docs = join(support, "docs") 18 | build = join(support, "_build") 19 | command = "sphinx-build -c {} -W {} {}".format(support, docs, build) 20 | with c.cd(support): 21 | # Turn off stdin mirroring to avoid irritating pytest. 22 | c.run(command, in_stream=False) 23 | return build 24 | 25 | 26 | class autodoc_: 27 | @classmethod 28 | def setup_class(self): 29 | # Build once, introspect many...for now 30 | self.build_dir = _build() 31 | with open(join(self.build_dir, "api.html")) as fd: 32 | self.api_docs = fd.read() 33 | 34 | @classmethod 35 | def teardown_class(self): 36 | shutil.rmtree(self.build_dir, ignore_errors=True) 37 | 38 | def setup_requires_autodoc_and_adds_autodocumenter(self): 39 | app = Mock() 40 | our_setup(app) 41 | app.setup_extension.assert_called_once_with("sphinx.ext.autodoc") 42 | app.add_autodocumenter.assert_called_once_with(TaskDocumenter) 43 | 44 | def module_docstring_unmodified(self): 45 | # Just a sanity test, really. 46 | assert "Some fake tasks to test task autodoc." in self.api_docs 47 | 48 | def regular_functions_only_appear_once(self): 49 | # Paranoid sanity check re: our 50 | # very-much-like-FunctionDocumenter-documenter not accidentally loading 51 | # up non-task objects (and thus having them autodoc'd twice: once 52 | # regularly and once incorrectly 'as tasks'). SHRUG. 53 | # NOTE: as of Sphinx 5.2, ToC now shows name of object too, so we test 54 | # for the identifier and the docstring separately (and expect the 1st 55 | # twice) 56 | assert len(re.findall(">not_a_task", self.api_docs)) == 2 57 | assert len(re.findall(">I am a regular function", self.api_docs)) == 1 58 | 59 | def undocumented_members_do_not_appear_by_default(self): 60 | # This really just tests basic Sphinx/autodoc stuff for now...meh 61 | assert "undocumented" not in self.api_docs 62 | 63 | def base_case_of_no_argument_docstringed_task(self): 64 | for sentinel in ("base_case", "smallest possible task"): 65 | assert sentinel in self.api_docs 66 | 67 | def simple_case_of_single_argument_task(self): 68 | # TODO: OK we really need something that scales better soon re: 69 | # viewing the output as a non-HTML string / something that is not 70 | # super tied to sphinx/theme output...heh 71 | for sentinel in ("simple_case", "simple_arg", "Parameterization!"): 72 | assert sentinel in self.api_docs 73 | -------------------------------------------------------------------------------- /tests/checks.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import call 2 | 3 | import pytest 4 | 5 | from invocations.checks import blacken, lint, all_ as all_task 6 | 7 | 8 | class checks: 9 | class blacken_: 10 | @pytest.mark.parametrize( 11 | "kwargs,command", 12 | [ 13 | (dict(), "find . -name '*.py' | xargs black -l 79"), 14 | ( 15 | dict(line_length=80), 16 | "find . -name '*.py' | xargs black -l 80", 17 | ), 18 | ( 19 | dict(folders=["foo", "bar"]), 20 | "find foo bar -name '*.py' | xargs black -l 79", 21 | ), 22 | ( 23 | # Explicit invocation that matches a default CLI 24 | # invocation, since 'folders' is an iterable and thus shows 25 | # up as an empty list in real life. Ehhh. 26 | dict(folders=[]), 27 | "find . -name '*.py' | xargs black -l 79", 28 | ), 29 | ( 30 | dict(check=True), 31 | "find . -name '*.py' | xargs black -l 79 --check", 32 | ), 33 | ( 34 | dict(diff=True), 35 | "find . -name '*.py' | xargs black -l 79 --diff", 36 | ), 37 | ( 38 | dict( 39 | diff=True, 40 | check=True, 41 | line_length=80, 42 | folders=["foo", "bar"], 43 | ), 44 | "find foo bar -name '*.py' | xargs black -l 80 --check --diff", # noqa 45 | ), 46 | ( 47 | dict(find_opts="-and -not -name foo"), 48 | "find . -name '*.py' -and -not -name foo | xargs black -l 79", # noqa 49 | ), 50 | ], 51 | ids=[ 52 | "base case is all files and 79 characters", 53 | "line length controllable", 54 | "folders controllable", 55 | "folders real default value", 56 | "check flag passed through", 57 | "diff flag passed through", 58 | "most args combined", 59 | "find opts controllable", 60 | ], 61 | ) 62 | def runs_black(self, ctx, kwargs, command): 63 | blacken(ctx, **kwargs) 64 | ctx.run.assert_called_once_with(command, pty=True) 65 | 66 | def folders_configurable(self, ctx): 67 | # Just config -> works fine 68 | ctx.blacken = dict(folders=["elsewhere"]) 69 | blacken(ctx) 70 | assert "elsewhere" in ctx.run_command 71 | 72 | def folders_config_loses_to_runtime(self, ctx): 73 | # Config + CLI opt -> CLI opt wins 74 | ctx.blacken = dict(folders=["nowhere"]) 75 | blacken(ctx, folders=["nowhere"]) 76 | assert "nowhere" in ctx.run_command 77 | 78 | def find_opts_configurable(self, ctx): 79 | ctx.blacken = dict(find_opts="-and -not -name foo.py") 80 | blacken(ctx) 81 | assert ( 82 | "find . -name '*.py' -and -not -name foo.py" in ctx.run_command 83 | ) 84 | 85 | def find_opts_config_loses_to_runtime(self, ctx): 86 | ctx.blacken = dict(find_opts="-and -not -name foo.py") 87 | blacken(ctx, find_opts="-or -name '*.js'") 88 | assert "find . -name '*.py' -or -name '*.js'" in ctx.run_command 89 | 90 | def aliased_to_format(self): 91 | assert blacken.aliases == ["format"] 92 | 93 | class lint_: 94 | def runs_flake8_by_default(self, ctx): 95 | lint(ctx) 96 | assert ctx.run_command == "flake8" 97 | 98 | class all_: 99 | def runs_blacken_and_lint(self, ctx): 100 | all_task(ctx) 101 | assert ctx.run.call_args_list == [ 102 | call("find . -name '*.py' | xargs black -l 79", pty=True), 103 | call("flake8", pty=True, warn=True), 104 | ] 105 | 106 | def is_default_task(self): 107 | assert all_task.is_default 108 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from unittest.mock import patch, Mock, MagicMock, call 3 | 4 | from pytest import fixture 5 | from invoke import MockContext 6 | 7 | # Set up icecream globally for convenience. 8 | from icecream import install 9 | 10 | install() 11 | 12 | 13 | @fixture 14 | def ctx(): 15 | # TODO: this would be a nice convenience in MockContext itself, though most 16 | # uses of it really just want responses-style "assert if expected calls did 17 | # not happen" behavior 18 | MockContext.run_command = property(lambda self: self.run.call_args[0][0]) 19 | return MockContext(run=True) 20 | 21 | 22 | class Mocks: 23 | pass 24 | 25 | 26 | # For use in packaging.release.publish tests 27 | @fixture 28 | def fakepub(mocker): 29 | mocks = Mocks() 30 | mocks.rmtree = mocker.patch("invocations.util.rmtree") 31 | mocks.twine_check = mocker.patch( 32 | "invocations.packaging.release.twine_check", return_value=False 33 | ) 34 | mocks.upload = mocker.patch("invocations.packaging.release.upload") 35 | mocks.build = mocker.patch("invocations.packaging.release.build") 36 | mocks.test_install = mocker.patch( 37 | "invocations.packaging.release.test_install" 38 | ) 39 | mocks.mkdtemp = mocker.patch("invocations.util.mkdtemp") 40 | mocks.mkdtemp.return_value = "tmpdir" 41 | c = MockContext(run=True) 42 | yield c, mocks 43 | 44 | 45 | # For use in packaging.release.test_install tests 46 | @fixture 47 | def install(): 48 | with patch("invocations.packaging.release.pip_version", "lmao"), patch( 49 | "invocations.util.rmtree", Mock("rmtree") 50 | ), patch( 51 | "invocations.packaging.release._find_package", lambda c: "foo" 52 | ), patch( 53 | "venv.EnvBuilder" 54 | ) as builder, patch( 55 | "invocations.util.mkdtemp" 56 | ) as mkdtemp, patch( 57 | "invocations.packaging.release.get_archives" 58 | ) as get_archives, patch( 59 | "invocations.packaging.release.Path" 60 | ) as fakePath: 61 | # Setup & run 62 | c = MockContext(run=True, repeat=True) 63 | mkdtemp.return_value = "tmpdir" 64 | get_archives.return_value = ["foo.tgz", "foo.whl"] 65 | 66 | # I hate this but don't see a cleaner way to mock out a nested 67 | # 'exists()' w/o breaking everything else, or using a real tmpdir. 68 | def set_exists(value): 69 | def fakediv(self, arg): 70 | root = Path(mkdtemp.return_value) 71 | bindir = root / "bin" 72 | if arg == "bin": 73 | return bindir 74 | elif arg == "pip": 75 | return bindir / "pip" 76 | elif arg == "python": 77 | return bindir / "python" 78 | elif arg == "py.typed": 79 | path = Path("foo") / "py.typed" 80 | ret = MagicMock(wraps=path) 81 | ret.exists.return_value = value 82 | return ret 83 | 84 | fakePath.return_value.__truediv__ = fakediv 85 | 86 | c.set_exists = set_exists # so caller can run it 87 | c.set_exists(False) # default 88 | yield c 89 | # Create factory 90 | builder.assert_called_once_with(with_pip=True) 91 | # Used helper to get artifacts 92 | get_archives.assert_called_once_with("whatever") 93 | # venv factory ran twice in some temp dir 94 | builder.return_value.create.assert_has_calls( 95 | [call("tmpdir"), call("tmpdir")] 96 | ) 97 | pip_base = "tmpdir/bin/pip install --disable-pip-version-check" 98 | for wanted in ( 99 | # Pip installed to same version as running interpreter's pip 100 | call("tmpdir/bin/pip install pip==lmao"), 101 | # Archives installed into venv 102 | call("{} foo.tgz".format(pip_base)), 103 | call("{} foo.whl".format(pip_base)), 104 | ): 105 | assert wanted in c.run.mock_calls 106 | -------------------------------------------------------------------------------- /tests/console.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from unittest.mock import patch 4 | from pytest_relaxed import trap 5 | 6 | from invocations.console import confirm 7 | 8 | 9 | class confirm_: 10 | @patch("invocations.console.input", return_value="yes") 11 | def displays_question_with_yes_no_suffix(self, mock_input): 12 | confirm("Are you sure?") 13 | assert mock_input.call_args[0][0] == "Are you sure? [Y/n] " 14 | 15 | @patch("invocations.console.input") 16 | def returns_True_for_yeslike_responses(self, mock_input): 17 | for value in ("y", "Y", "yes", "YES", "yES", "Yes"): 18 | mock_input.return_value = value 19 | assert confirm("Meh") is True 20 | 21 | @patch("invocations.console.input") 22 | def returns_False_for_nolike_responses(self, mock_input): 23 | for value in ("n", "N", "no", "NO", "nO", "No"): 24 | mock_input.return_value = value 25 | assert confirm("Meh") is False 26 | 27 | @trap 28 | @patch("invocations.console.input", side_effect=["wat", "y"]) 29 | def reprompts_on_bad_input(self, mock_input): 30 | assert confirm("O rly?") is True 31 | assert "I didn't understand you" in sys.stderr.getvalue() 32 | 33 | @patch("invocations.console.input", return_value="y") 34 | def suffix_changes_when_assume_yes_False(self, mock_input): 35 | confirm("Are you sure?", assume_yes=False) 36 | assert mock_input.call_args[0][0] == "Are you sure? [y/N] " 37 | 38 | @patch("invocations.console.input", return_value="") 39 | def default_on_empty_response_is_True_by_default(self, mock_input): 40 | assert confirm("Are you sure?") is True 41 | 42 | @patch("invocations.console.input", return_value="") 43 | def default_on_empty_response_is_False_if_assume_yes_False( 44 | self, mock_input 45 | ): 46 | assert confirm("Are you sure?", assume_yes=False) is False 47 | 48 | @patch("invocations.console.input", return_value=" y ") 49 | def whitespace_is_trimmed(self, mock_input): 50 | assert confirm("Are you sure?") is True 51 | -------------------------------------------------------------------------------- /tests/environment.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import patch 2 | from pytest import mark 3 | 4 | from invocations.environment import in_ci 5 | 6 | 7 | @mark.parametrize( 8 | "environ,expected", 9 | [ 10 | (dict(), False), 11 | (dict(WHATEVS="true", SURE_WHYNOT=""), False), 12 | (dict(CIRCLECI=""), False), 13 | (dict(TRAVIS=""), False), 14 | (dict(CIRCLECI="", WHATEVS="yo"), False), 15 | (dict(CIRCLECI="", TRAVIS=""), False), 16 | (dict(CIRCLECI="true"), True), 17 | (dict(CIRCLECI="false"), True), # yup 18 | (dict(CIRCLECI="no"), True), 19 | (dict(CIRCLECI="1"), True), 20 | (dict(CIRCLECI="0"), True), 21 | (dict(TRAVIS="true"), True), 22 | (dict(CIRCLECI="true", TRAVIS=""), True), 23 | (dict(CIRCLECI="", TRAVIS="true"), True), 24 | (dict(CIRCLECI="true", TRAVIS="true"), True), 25 | (dict(CIRCLECI="false", TRAVIS="no"), True), 26 | (dict(CIRCLECI="true", WHATEVS=""), True), 27 | (dict(CIRCLECI="true", WHATEVS="huh?"), True), 28 | ], 29 | ) 30 | def in_ci_true_when_any_expected_vars_nonempty(environ, expected): 31 | with patch("invocations.environment.os.environ", environ): 32 | assert in_ci() is expected 33 | -------------------------------------------------------------------------------- /tests/packaging/_support/conf.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyinvoke/invocations/4e3578e9c49dbbff2ec00ef3c8d37810fba511fa/tests/packaging/_support/conf.py -------------------------------------------------------------------------------- /tests/packaging/_support/fakepackage/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyinvoke/invocations/4e3578e9c49dbbff2ec00ef3c8d37810fba511fa/tests/packaging/_support/fakepackage/__init__.py -------------------------------------------------------------------------------- /tests/packaging/_support/fakepackage/_version.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.0" 2 | -------------------------------------------------------------------------------- /tests/packaging/_support/fakepackage/noversion.py: -------------------------------------------------------------------------------- 1 | lol = "nope" 2 | -------------------------------------------------------------------------------- /tests/packaging/_support/fakepackage/otherversion.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.1" 2 | -------------------------------------------------------------------------------- /tests/packaging/_support/index.rst: -------------------------------------------------------------------------------- 1 | .. Dummy index file so nearby changelogs can load 2 | -------------------------------------------------------------------------------- /tests/packaging/_support/no_unreleased_1.1_bugs.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | * :release:`1.1.2 <2016-10-24>` 6 | * :release:`1.0.2 <2016-10-24>` 7 | * :bug:`2` Yup. 8 | * :release:`1.1.1 <2016-10-17>` 9 | * :release:`1.0.1 <2014-01-02>` 10 | * :bug:`1` Fix a bug. 11 | * :release:`1.1.0 <2014-01-01>` 12 | * :release:`1.0.0 <2014-01-01>` 13 | -------------------------------------------------------------------------------- /tests/packaging/_support/no_unreleased_1.x_features.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | * :release:`1.1.0 <2016-10-17>` 6 | * :release:`1.0.1 <2014-01-02>` 7 | * :feature:`2` New! Improved! 8 | * :bug:`1` Fix a bug. 9 | * :release:`1.0.0 <2014-01-01>` 10 | -------------------------------------------------------------------------------- /tests/packaging/_support/unreleased_1.1_bugs.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | * :release:`1.0.2 <2016-10-24>` 6 | * :bug:`2` Yup. 7 | * :release:`1.1.1 <2016-10-24>` 8 | * :release:`1.0.1 <2014-01-02>` 9 | * :bug:`1` Fix a bug. 10 | * :release:`1.1.0 <2014-01-01>` 11 | * :release:`1.0.0 <2014-01-01>` 12 | -------------------------------------------------------------------------------- /tests/packaging/_support/unreleased_1.x_features.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Changelog 3 | ========= 4 | 5 | .. Notably, no release yet for 1.1.0 6 | 7 | * :release:`1.0.1 <2014-01-02>` 8 | * :feature:`2` New! Improved! 9 | * :bug:`1` Fix a bug. 10 | * :release:`1.0.0 <2014-01-01>` 11 | -------------------------------------------------------------------------------- /tests/packaging/release.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | from os import path 3 | import re 4 | import sys 5 | 6 | from invoke.vendor.lexicon import Lexicon 7 | from invoke import MockContext, Result, Config, Exit 8 | from docutils.utils import Reporter 9 | from unittest.mock import Mock, patch, call 10 | import pytest 11 | from pytest import skip 12 | from pytest_relaxed import trap, raises 13 | 14 | from invocations.packaging.semantic_version_monkey import Version 15 | from invocations.packaging.release import ( 16 | Changelog, 17 | Release, 18 | Tag, 19 | UndefinedReleaseType, 20 | VersionFile, 21 | _latest_and_next_version, 22 | _latest_feature_bucket, 23 | _release_and_issues, 24 | _release_line, 25 | all_, 26 | prepare, 27 | push, 28 | build, 29 | load_version, 30 | publish, 31 | status, 32 | upload, 33 | test_install as install_test_task, # to avoid pytest treating as test func 34 | ns as release_ns, 35 | ) 36 | 37 | 38 | class release_line_: 39 | def assumes_bugfix_if_release_branch(self): 40 | c = MockContext(run=Result("2.7")) 41 | assert _release_line(c)[1] == Release.BUGFIX 42 | 43 | def assumes_feature_if_main(self): 44 | c = MockContext(run=Result("main")) 45 | assert _release_line(c)[1] == Release.FEATURE 46 | 47 | def assumes_feature_if_master(self): 48 | c = MockContext(run=Result("master")) 49 | assert _release_line(c)[1] == Release.FEATURE 50 | 51 | def is_undefined_if_arbitrary_branch_name(self): 52 | c = MockContext(run=Result("yea-whatever")) 53 | assert _release_line(c)[1] == Release.UNDEFINED 54 | 55 | def is_undefined_if_specific_commit_checkout(self): 56 | # Just a sanity check; current logic doesn't differentiate between e.g. 57 | # 'gobbledygook' and 'HEAD'. 58 | c = MockContext(run=Result("HEAD")) 59 | assert _release_line(c)[1] == Release.UNDEFINED 60 | 61 | 62 | class latest_feature_bucket_: 63 | def base_case_of_single_release_family(self): 64 | bucket = _latest_feature_bucket( 65 | dict.fromkeys(["unreleased_1_feature"]) 66 | ) 67 | assert bucket == "unreleased_1_feature" 68 | 69 | def simple_ordering_by_bucket_number(self): 70 | bucket = _latest_feature_bucket( 71 | dict.fromkeys(["unreleased_1_feature", "unreleased_2_feature"]) 72 | ) 73 | assert bucket == "unreleased_2_feature" 74 | 75 | def ordering_goes_by_numeric_not_lexical_order(self): 76 | bucket = _latest_feature_bucket( 77 | dict.fromkeys( 78 | [ 79 | "unreleased_1_feature", 80 | # Yes, releases like 10.x or 17.x are unlikely, but 81 | # definitely plausible - think modern Firefox for example. 82 | "unreleased_10_feature", 83 | "unreleased_23_feature", 84 | "unreleased_202_feature", 85 | "unreleased_17_feature", 86 | "unreleased_2_feature", 87 | ] 88 | ) 89 | ) 90 | assert bucket == "unreleased_202_feature" 91 | 92 | 93 | class release_and_issues_: 94 | class bugfix: 95 | # TODO: factor out into setup() so each test has some excluded/ignored 96 | # data in it - helps avoid naive implementation returning x[0] etc. 97 | 98 | def no_unreleased(self): 99 | release, issues = _release_and_issues( 100 | changelog={"1.1": [], "1.1.0": [1, 2]}, 101 | branch="1.1", 102 | release_type=Release.BUGFIX, 103 | ) 104 | assert release == "1.1.0" 105 | assert issues == [] 106 | 107 | def has_unreleased(self): 108 | skip() 109 | 110 | class feature: 111 | def no_unreleased(self): 112 | # release is None, issues is empty list 113 | release, issues = _release_and_issues( 114 | changelog={"1.0.1": [1], "unreleased_1_feature": []}, 115 | branch="main", 116 | release_type=Release.FEATURE, 117 | ) 118 | assert release is None 119 | assert issues == [] 120 | 121 | def has_unreleased(self): 122 | # release is still None, issues is nonempty list 123 | release, issues = _release_and_issues( 124 | changelog={"1.0.1": [1], "unreleased_1_feature": [2, 3]}, 125 | branch="main", 126 | release_type=Release.FEATURE, 127 | ) 128 | assert release is None 129 | assert issues == [2, 3] 130 | 131 | def undefined_always_returns_None_and_empty_list(self): 132 | skip() 133 | 134 | 135 | class find_package_: 136 | def can_be_short_circuited_with_config_value(self): 137 | skip() 138 | 139 | def seeks_directories_with_init_py_in_em(self): 140 | skip() 141 | 142 | def blacklists_common_non_public_modules(self): 143 | skip() 144 | 145 | def errors_if_cannot_find_anything(self): 146 | skip() 147 | 148 | def errors_if_ambiguous_results(self): 149 | # I.e. >1 possible result 150 | skip() 151 | 152 | 153 | class load_version_: 154 | def setup(self): 155 | sys.path.insert(0, support_dir) 156 | 157 | def teardown(self): 158 | sys.path.remove(support_dir) 159 | 160 | def _expect_version(self, expected, config_val=None): 161 | config = {"package": "fakepackage"} 162 | if config_val is not None: 163 | config["version_module"] = config_val 164 | c = MockContext(Config(overrides={"packaging": config})) 165 | assert load_version(c) == expected 166 | 167 | # NOTE: these all also happen to test the Python bug re: a unicode value 168 | # given to `__import__(xxx, fromlist=['onoz'])`. No real point making 169 | # another one. 170 | 171 | def defaults_to_underscore_version(self): 172 | self._expect_version("1.0.0") 173 | 174 | def can_configure_which_module_holds_version_data(self): 175 | self._expect_version("1.0.1", config_val="otherversion") 176 | 177 | @patch("invocations.packaging.release.sys.modules", wraps=sys.modules) 178 | def reloads_version_in_case_edited_during_run(self, modules): 179 | # NOTE: mock doesn't mock/wrap dunder-attrs well (eg see python core 180 | # bug #25597) so we gotta rub some more on top, esp for eg 181 | # Python 3.8+ importlib which does additional setattrs and pops. 182 | # (but we still wraps= in @patch as it smooths over other bits we don't 183 | # care about mocking, at least under Python <3.8) 184 | even_faker_package = Mock(_version=Mock(__version__="1.0.0")) 185 | modules.__getitem__.return_value = even_faker_package 186 | modules.get.return_value = even_faker_package 187 | self._expect_version("1.0.0") 188 | # Expect our own internal pops (the stdlib ones, eg under 3.8+, don't 189 | # exactly match these - no 2nd arg - so we can be pretty sure this 190 | # won't incorrectly pass due to them) 191 | modules.pop.assert_any_call("fakepackage._version", None) 192 | modules.pop.assert_any_call("fakepackage", None) 193 | 194 | def errors_usefully_if_version_module_not_found(self): 195 | skip() 196 | 197 | 198 | class latest_and_next_version_: 199 | def next_patch_of_bugfix_release(self): 200 | versions = _latest_and_next_version( 201 | Lexicon( 202 | { 203 | "release_type": Release.BUGFIX, 204 | "latest_line_release": Version("1.2.2"), 205 | "latest_overall_release": Version("1.4.1"), # realism! 206 | } 207 | ) 208 | ) 209 | assert versions == (Version("1.2.2"), Version("1.2.3")) 210 | 211 | def next_minor_of_feature_release(self): 212 | versions = _latest_and_next_version( 213 | Lexicon( 214 | { 215 | "release_type": Release.FEATURE, 216 | "latest_line_release": None, # realism! 217 | "latest_overall_release": Version("1.2.2"), 218 | } 219 | ) 220 | ) 221 | assert versions == (Version("1.2.2"), Version("1.3.0")) 222 | 223 | 224 | # Multi-dimensional scenarios, in relatively arbitrary nesting order: 225 | # - what type of release we're talking about (based on branch name) 226 | # - whether there appear to be unreleased issues in the changelog 227 | # - comparison of version file contents w/ latest release in changelog 228 | # TODO: ... (pypi release, etc) 229 | 230 | support_dir = path.join(path.dirname(__file__), "_support") 231 | 232 | # Sentinel for targeted __import__ mocking. Is a string so that it can be 233 | # expected in tests about the version file, etc. 234 | # NOTE: needs to not shadow any real imported module name! 235 | FAKE_PACKAGE = "fakey_mcfakerson_not_real_in_any_way" 236 | 237 | # NOTE: can't easily slap this on the test class itself due to using inner 238 | # classes. If we can get the inner classes to not only copy attributes but also 239 | # decorators (seems unlikely?), we could organize more "naturally". 240 | # NOTE: OTOH, it's actually nice to use this in >1 top level class, so...meh? 241 | @contextmanager 242 | def _mock_context(self): 243 | """ 244 | Context manager for a mocked Invoke context + other external patches. 245 | 246 | Specifically: 247 | 248 | - Examine test class attributes for configuration; this allows easy 249 | multidimensional test setup. 250 | - Where possible, the code under test relies on calling shell commands via 251 | the Context object, so we pass in a MockContext for that. 252 | - Where not possible (eg things which must be Python-level and not 253 | shell-level, such as version imports), mock with the 'mock' lib as usual. 254 | 255 | :yields: 256 | an `invoke.context.MockContext` created & modified as described above. 257 | """ 258 | # 259 | # Generate config & context from attrs 260 | # 261 | 262 | changelog_file = "{}.rst".format(self._changelog) 263 | config = Config( 264 | overrides={ 265 | "packaging": { 266 | "changelog_file": path.join(support_dir, changelog_file), 267 | "package": FAKE_PACKAGE, 268 | } 269 | } 270 | ) 271 | tag_output = "" 272 | if hasattr(self, "_tags"): 273 | tag_output = "\n".join(self._tags) + "\n" 274 | # NOTE: Result first posarg is stdout string data. 275 | run_results = { 276 | # Branch detection 277 | "git rev-parse --abbrev-ref HEAD": self._branch, 278 | # Changelog update action - just here so it can be called 279 | re.compile(r"\$EDITOR.*"): True, 280 | # Git tags 281 | "git tag": tag_output, 282 | # Git status/commit/tagging 283 | re.compile("git tag .*"): True, 284 | re.compile("git commit.*"): True, 285 | # NOTE: some tests will need to override this, for now default to a 286 | # result that implies a commit is needed 287 | 'git status --porcelain | egrep -v "^\\?"': Result( 288 | "M somefile", exited=0 289 | ), 290 | } 291 | context = MockContext(config=config, run=run_results, repeat=True) 292 | 293 | # 294 | # Execute converge() inside a mock environment 295 | # 296 | 297 | # Allow targeted import mocking, leaving regular imports alone. 298 | real_import = __import__ 299 | 300 | def fake_import(*args, **kwargs): 301 | if args[0] is not FAKE_PACKAGE: 302 | return real_import(*args, **kwargs) 303 | return Mock(_version=Mock(__version__=self._version)) 304 | 305 | import_patcher = patch("builtins.__import__", side_effect=fake_import) 306 | 307 | with import_patcher: 308 | yield context 309 | 310 | 311 | def _mock_status(self): 312 | with _mock_context(self) as c: 313 | return status(c) 314 | 315 | 316 | @trap 317 | def _expect_actions(self, *actions): 318 | _mock_status(self) 319 | stdout = sys.stdout.getvalue() 320 | for action in actions: 321 | # Check for action's text value in the table which gets printed. 322 | # (Actual table formatting is tested in an individual test.) 323 | err = "Didn't find {} in stdout:\n\n{}".format(action, stdout) 324 | assert action.value in stdout, err 325 | 326 | 327 | class status_: 328 | class overall_behavior: 329 | _branch = "1.1" 330 | _changelog = "unreleased_1.1_bugs" 331 | _version = "1.1.1" 332 | _tags = ("1.1.0", "1.1.1") 333 | 334 | @trap 335 | def displays_expectations_and_component_statuses(self): 336 | _mock_status(self) 337 | 338 | # TODO: make things more organic/specific/less tabular: 339 | # 340 | # current git branch: xxx (implies type yyy) 341 | # changelog: xxx 342 | # so the next release would be: a.b.c (or: 'so the release we're 343 | # cutting/expecting is a.b.c') 344 | # version file: 345 | # git tag: (maybe including 346 | # latest that is found? that's extra logic...) 347 | # etc... 348 | 349 | parts = dict( 350 | changelog=Changelog.NEEDS_RELEASE.value, 351 | version=VersionFile.NEEDS_BUMP.value, 352 | tag=Tag.NEEDS_CUTTING.value, 353 | ) 354 | for part in parts: 355 | parts[part] = re.escape(parts[part]) 356 | parts["header_footer"] = r"-+( +-+)?" 357 | # NOTE: forces impl to follow specific order, which is good 358 | regex = r""" 359 | {header_footer} 360 | Changelog +{changelog} 361 | Version +{version} 362 | Tag +{tag} 363 | {header_footer} 364 | """.format( 365 | **parts 366 | ).strip() 367 | output = sys.stdout.getvalue() 368 | err = "Expected:\n\n{}\n\nGot:\n\n{}".format(regex, output) 369 | err += "\n\nRepr edition...\n\n" 370 | err += "Expected:\n\n{!r}\n\nGot:\n\n{!r}".format(regex, output) 371 | assert re.match(regex, output) is not None, err 372 | 373 | @trap # just for cleaner test output 374 | def returns_lexica_for_reuse(self): 375 | actions = Lexicon( 376 | changelog=Changelog.NEEDS_RELEASE, 377 | version=VersionFile.NEEDS_BUMP, 378 | tag=Tag.NEEDS_CUTTING, 379 | all_okay=False, 380 | ) 381 | found_actions, found_state = _mock_status(self) 382 | assert found_actions == actions 383 | # Spot check state, don't need to check whole thing... 384 | assert found_state.branch == self._branch 385 | assert found_state.latest_version == Version("1.1.1") 386 | assert found_state.tags == [Version(x) for x in self._tags] 387 | 388 | # TODO: I got this attribute jazz working in pytest but see if there is a 389 | # 'native' pytest feature that works better (while still in conjunction 390 | # with nested tasks, ideally) 391 | class release_line_branch: 392 | _branch = "1.1" 393 | 394 | class unreleased_issues: 395 | _changelog = "unreleased_1.1_bugs" 396 | 397 | class file_version_equals_latest_in_changelog: 398 | _version = "1.1.1" 399 | 400 | class tags_only_exist_for_past_releases: 401 | _tags = ("1.1.0", "1.1.1") 402 | 403 | def changelog_release_version_update_tag_update(self): 404 | _expect_actions( 405 | self, 406 | Changelog.NEEDS_RELEASE, 407 | VersionFile.NEEDS_BUMP, 408 | Tag.NEEDS_CUTTING, 409 | ) 410 | 411 | class version_file_is_newer: 412 | _version = "1.1.2" 413 | 414 | class tags_only_exist_for_past_releases: 415 | _tags = ("1.1.0", "1.1.1") 416 | 417 | def changelog_release_version_okay_tag_update(self): 418 | _expect_actions( 419 | self, 420 | Changelog.NEEDS_RELEASE, 421 | VersionFile.OKAY, 422 | Tag.NEEDS_CUTTING, 423 | ) 424 | 425 | class changelog_version_is_newer: 426 | _version = "1.1.0" 427 | # Undefined situation - unsure how/whether to test 428 | 429 | class no_unreleased_issues: 430 | _changelog = "no_unreleased_1.1_bugs" 431 | 432 | class file_version_equals_latest_in_changelog: 433 | _version = "1.1.2" 434 | 435 | class tag_for_new_version_present: 436 | _tags = ("1.1.0", "1.1.1", "1.1.2") 437 | 438 | def no_updates_necessary(self): 439 | _expect_actions( 440 | self, Changelog.OKAY, VersionFile.OKAY, Tag.OKAY 441 | ) 442 | 443 | class tag_for_new_version_missing: 444 | _tags = ("1.1.0", "1.1.1") 445 | 446 | def tag_needs_cutting_still(self): 447 | _expect_actions( 448 | self, 449 | Changelog.OKAY, 450 | VersionFile.OKAY, 451 | Tag.NEEDS_CUTTING, 452 | ) 453 | 454 | class version_file_out_of_date: 455 | _version = "1.1.1" 456 | 457 | class tag_missing: 458 | _tags = ("1.1.0", "1.1.1") # no 1.1.2 459 | 460 | def changelog_okay_version_needs_bump_tag_needs_cut(self): 461 | _expect_actions( 462 | self, 463 | Changelog.OKAY, 464 | VersionFile.NEEDS_BUMP, 465 | Tag.NEEDS_CUTTING, 466 | ) 467 | 468 | # TODO: as in other TODOs, tag can't be expected to exist/be up 469 | # to date if any other files are also not up to date. so tag 470 | # present but version file out of date, makes no sense, would 471 | # be an error. 472 | 473 | class version_file_is_newer: 474 | _version = "1.1.3" 475 | 476 | def both_technically_okay(self): 477 | skip() # see TODO below 478 | _expect_actions( 479 | self, 480 | # TODO: display a 'warning' state noting that your 481 | # version outpaces your changelog despite your 482 | # changelog having no unreleased stuff in it. Still 483 | # "Okay" (no action needed), not an error per se, but 484 | # still "strange". 485 | Changelog.OKAY, 486 | VersionFile.OKAY, 487 | ) 488 | 489 | class main_branch: 490 | _branch = "main" 491 | 492 | class unreleased_issues: 493 | _changelog = "unreleased_1.x_features" 494 | 495 | class file_version_equals_latest_in_changelog: 496 | _version = "1.0.1" 497 | 498 | class latest_tag_same_as_file_version: 499 | _tags = ("1.0.0", "1.0.1") 500 | 501 | def changelog_release_version_update_tag_cut(self): 502 | # TODO: do we want some sort of "and here's _what_ you 503 | # ought to be adding as the new release and/or version 504 | # value" aspect to the actions? can leave up to user 505 | # for now, but, more automation is better. 506 | _expect_actions( 507 | self, 508 | Changelog.NEEDS_RELEASE, 509 | VersionFile.NEEDS_BUMP, 510 | Tag.NEEDS_CUTTING, 511 | ) 512 | 513 | # TODO: if there's somehow a tag present for a release as yet 514 | # uncut...which makes no sense as changelog still has no 515 | # release. Would represent error state! 516 | 517 | # TODO: what if the version file is newer _but not what it needs to 518 | # be for the branch_? e.g. if it was 1.0.2 here (where latest 519 | # release is 1.0.1 but branch (main) implies desire is 1.1.0)? 520 | 521 | class version_file_is_newer: 522 | _version = "1.1.0" 523 | 524 | class new_tag_not_present: 525 | _tags = ("1.0.1",) 526 | 527 | def changelog_release_version_okay(self): 528 | _expect_actions( 529 | self, 530 | # TODO: same as above re: suggesting the release 531 | # value to the edit step 532 | Changelog.NEEDS_RELEASE, 533 | VersionFile.OKAY, 534 | Tag.NEEDS_CUTTING, 535 | ) 536 | 537 | class changelog_version_is_newer: 538 | _version = "1.2.0" 539 | # TODO: as with bugfix branches, this is undefined, except here 540 | # it's even moreso because...well it's even more wacky. why 541 | # would we have anything >1.1.0 when the changelog itself only 542 | # even goes up to 1.0.x?? 543 | 544 | class no_unreleased_issues: 545 | _changelog = "no_unreleased_1.x_features" 546 | 547 | class file_version_equals_latest_in_changelog: 548 | _version = "1.1.0" 549 | 550 | class tag_present: 551 | _tags = ("1.0.2", "1.1.0") 552 | 553 | def all_okay(self): 554 | _expect_actions( 555 | self, Changelog.OKAY, VersionFile.OKAY, Tag.OKAY 556 | ) 557 | 558 | class tag_missing: 559 | _tags = "1.0.2" 560 | 561 | def changelog_and_version_okay_tag_needs_cut(self): 562 | _expect_actions( 563 | self, 564 | Changelog.OKAY, 565 | VersionFile.OKAY, 566 | Tag.NEEDS_CUTTING, 567 | ) 568 | 569 | class undefined_branch: 570 | _branch = "whatever" 571 | _changelog = "nah" 572 | _tags = ("nope",) 573 | 574 | @raises(UndefinedReleaseType) 575 | def raises_exception(self): 576 | _mock_status(self) 577 | 578 | 579 | def _confirm(which): 580 | path = "invocations.packaging.release.confirm" 581 | 582 | def _wrapper(f): 583 | return trap(patch(path, return_value=which)(f)) 584 | 585 | return _wrapper 586 | 587 | 588 | _confirm_true = _confirm(True) 589 | _confirm_false = _confirm(False) 590 | 591 | 592 | # This is shit but I'm too tired and angry right now to give a fuck. 593 | def _run_prepare(c, mute=True, **kwargs): 594 | try: 595 | return prepare(c, **kwargs) 596 | except Exit: 597 | if not mute: 598 | raise 599 | 600 | 601 | class prepare_: 602 | 603 | # NOTE: mostly testing the base case of 'everything needs updating', 604 | # all the permutations are tested elsewhere. 605 | _branch = "1.1" 606 | _changelog = "unreleased_1.1_bugs" 607 | _version = "1.1.1" 608 | _tags = ("1.1.0",) 609 | 610 | @_confirm_false 611 | def displays_status_output(self, _): 612 | with _mock_context(self) as c: 613 | _run_prepare(c) 614 | output = sys.stdout.getvalue() 615 | for action in ( 616 | Changelog.NEEDS_RELEASE, 617 | VersionFile.NEEDS_BUMP, 618 | Tag.NEEDS_CUTTING, 619 | ): 620 | err = "Didn't see '{}' text in status output!".format(action.name) 621 | assert action.value in output, err 622 | 623 | @patch("invocations.packaging.release.status") 624 | def short_circuits_when_no_work_to_do(self, status): 625 | status.return_value = Lexicon(all_okay=True), Lexicon() 626 | with _mock_context(self) as c: 627 | # True retval, one call to status(), and no barfing on lack of 628 | # run() mocking, all point to the short circuit happening 629 | assert _run_prepare(c) is True 630 | assert status.call_count == 1 631 | 632 | @trap 633 | @patch("invocations.console.input", return_value="no") 634 | def prompts_before_taking_action(self, mock_input): 635 | with _mock_context(self) as c: 636 | _run_prepare(c) 637 | assert mock_input.call_args[0][0] == "Take the above actions? [Y/n] " 638 | 639 | @_confirm_false 640 | def if_prompt_response_negative_no_action_taken(self, _): 641 | with _mock_context(self) as c: 642 | _run_prepare(c) 643 | # TODO: move all action-y code into subroutines, then mock them and 644 | # assert they were never called? 645 | # Expect that only the status-y run() calls were made. 646 | assert c.run.call_count == 2 647 | commands = [x[0][0] for x in c.run.call_args_list] 648 | assert commands[0].startswith("git rev-parse") 649 | assert commands[1].startswith("git tag") 650 | 651 | @_confirm_true 652 | def opens_EDITOR_with_changelog_when_it_needs_update(self, _): 653 | with _mock_context(self) as c: 654 | _run_prepare(c) 655 | # Grab changelog path from the context config, why not 656 | path = c.config.packaging.changelog_file 657 | # TODO: real code should probs expand EDITOR explicitly so it can 658 | # run w/o a shell wrap / require a full env? 659 | cmd = "$EDITOR {}".format(path) 660 | c.run.assert_any_call(cmd, pty=True, hide=False, dry=False) 661 | 662 | @_confirm_true 663 | def opens_EDITOR_with_version_file_when_it_needs_update(self, _): 664 | with _mock_context(self) as c: 665 | _run_prepare(c) 666 | path = "{}/_version.py".format(FAKE_PACKAGE) 667 | # TODO: real code should probs expand EDITOR explicitly so it can 668 | # run w/o a shell wrap / require a full env? 669 | cmd = "$EDITOR {}".format(path) 670 | c.run.assert_any_call(cmd, pty=True, hide=False, dry=False) 671 | 672 | @_confirm_true 673 | def commits_and_adds_git_tag_when_needs_cutting(self, _): 674 | with _mock_context(self) as c: 675 | _run_prepare(c) 676 | version = "1.1.2" # as changelog has issues & prev was 1.1.1 677 | # Ensure the commit necessity test happened. (Default mock_context 678 | # sets it up to result in a commit being necessary.) 679 | check = 'git status --porcelain | egrep -v "^\\?"' 680 | c.run.assert_any_call(check, hide=True, warn=True) 681 | commit = 'git commit -am "Cut {}"'.format(version) 682 | tag = 'git tag -a {} -m ""'.format(version) 683 | for cmd in (commit, tag): 684 | c.run.assert_any_call(cmd, hide=False, dry=False, echo=True) 685 | 686 | @_confirm_true 687 | def does_not_commit_if_no_commit_necessary(self, _): 688 | with _mock_context(self) as c: 689 | # Set up for a no-commit-necessary result to check command 690 | check = 'git status --porcelain | egrep -v "^\\?"' 691 | c.set_result_for("run", check, Result("", exited=1)) 692 | _run_prepare(c) 693 | # Expect NO git commit 694 | commands = [x[0][0] for x in c.run.call_args_list] 695 | assert not any(x.startswith("git commit") for x in commands) 696 | # Expect git tag 697 | c.run.assert_any_call( 698 | 'git tag -a 1.1.2 -m ""', hide=False, dry=False, echo=True 699 | ) 700 | 701 | class final_status_check: 702 | @_confirm_true 703 | @patch("invocations.packaging.release.status") 704 | def run_twice_when_not_short_circuiting(self, status, _): 705 | status.side_effect = [ 706 | ( 707 | Lexicon( 708 | changelog=Changelog.NEEDS_RELEASE, 709 | version=VersionFile.OKAY, 710 | tag=Tag.OKAY, 711 | all_okay=False, 712 | ), 713 | Lexicon(), 714 | ), 715 | (Lexicon(all_okay=True), Lexicon()), 716 | ] 717 | with _mock_context(self) as c: 718 | # Mute off - want kaboom if Exit raised 719 | _run_prepare(c, mute=False) 720 | assert status.call_count == 2 721 | 722 | @_confirm_true 723 | @patch("invocations.packaging.release.status") 724 | def exits_if_still_not_all_okay(self, status, _): 725 | status.side_effect = [ 726 | ( 727 | Lexicon( 728 | changelog=Changelog.NEEDS_RELEASE, 729 | version=VersionFile.OKAY, 730 | tag=Tag.OKAY, 731 | all_okay=False, 732 | ), 733 | Lexicon(), 734 | ), 735 | (Lexicon(all_okay=False), Lexicon()), 736 | ] 737 | with _mock_context(self) as c: 738 | with pytest.raises(Exit, match=r"Something went wrong"): 739 | _run_prepare(c, mute=False) 740 | assert status.call_count == 2 741 | 742 | class dry_run_prepare: 743 | @patch("invocations.packaging.release.status") 744 | def exits_early_like_non_dry_run_on_all_okay(self, status): 745 | status.return_value = Lexicon(all_okay=True), Lexicon() 746 | with _mock_context(self) as c: 747 | assert _run_prepare(c, dry_run=True) is True 748 | assert status.call_count == 1 749 | 750 | @patch("invocations.packaging.release.status") 751 | def does_not_fail_fast_on_bad_release_type(self, status): 752 | status.side_effect = UndefinedReleaseType 753 | with _mock_context(self) as c: 754 | _run_prepare(c, dry_run=True) 755 | 756 | @patch("invocations.console.input") 757 | def does_not_prompt_to_confirm(self, mock_input): 758 | with _mock_context(self) as c: 759 | _run_prepare(c, dry_run=True) 760 | assert not mock_input.called 761 | 762 | def dry_runs_all_prep_commands(self): 763 | # Reminder: default state of mocked context is "everything needs 764 | # updates" 765 | with _mock_context(self) as c: 766 | _run_prepare(c, dry_run=True) 767 | dry_runs = [ 768 | x[1][0] for x in c.run.mock_calls if x[2].get("dry", False) 769 | ] 770 | for pattern in ( 771 | r"\$EDITOR .*\.rst", 772 | r"\$EDITOR .*_version\.py", 773 | r"git commit.*", 774 | r"git tag -a.*", 775 | ): 776 | assert any(re.match(pattern, x) for x in dry_runs) 777 | 778 | @patch("invocations.packaging.release.status") 779 | def does_not_run_final_status_check(self, status): 780 | # Slight cheat: other actions all actually ok even tho all_okay is 781 | # false. means no needing to mock the run() calls etc. 782 | status.return_value = ( 783 | Lexicon( 784 | changelog=Changelog.OKAY, 785 | version=VersionFile.OKAY, 786 | tag=Tag.OKAY, 787 | all_okay=False, 788 | ), 789 | Lexicon(), 790 | ) 791 | with _mock_context(self) as c: 792 | _run_prepare(c, dry_run=True) 793 | # The end step was skipped 794 | assert status.call_count == 1 795 | 796 | # Don't want a full re-enactment of status_ test tree, but do want to spot 797 | # check that actions not needing to be taken, aren't... 798 | class lack_of_action: 799 | _changelog = "no_unreleased_1.1_bugs" 800 | 801 | @_confirm_true 802 | def no_changelog_update_needed_means_no_changelog_edit(self, _): 803 | with _mock_context(self) as c: 804 | _run_prepare(c) 805 | # TODO: as with the 'took no actions at all' test above, 806 | # proving a negative sucks - eventually make this subroutine 807 | # assert based. Meh. 808 | path = c.config.packaging.changelog_file 809 | cmd = "$EDITOR {}".format(path) 810 | err = "Saw {!r} despite changelog not needing update!".format( 811 | cmd 812 | ) 813 | assert cmd not in [x[0][0] for x in c.run.call_args_list], err 814 | 815 | 816 | # NOTE: yea...this kinda pushes the limits of sane TDD...meh 817 | # NOTE: possible that the actual codes blessings emits differ based on 818 | # termcap/etc; consider sucking it up and just calling blessings directly in 819 | # that case, even though it makes the tests kinda tautological. 820 | # TODO: yes, when I personally went from TERM=xterm-256color to 821 | # TERM=screen-256color, that made these tests break! Updating test machinery to 822 | # account for now, but...not ideal! 823 | class component_state_enums_contain_human_readable_values: 824 | class changelog: 825 | def okay(self): 826 | expected = "\x1b[32m\u2714 no unreleased issues\x1b(B\x1b[m" 827 | assert Changelog.OKAY.value == expected 828 | 829 | def needs_release(self): 830 | expected = "\x1b[31m\u2718 needs :release: entry\x1b(B\x1b[m" 831 | assert Changelog.NEEDS_RELEASE.value == expected 832 | 833 | class version_file: 834 | def okay(self): 835 | expected = "\x1b[32m\u2714 version up to date\x1b(B\x1b[m" 836 | assert VersionFile.OKAY.value == expected 837 | 838 | def needs_bump(self): 839 | expected = "\x1b[31m\u2718 needs version bump\x1b(B\x1b[m" 840 | assert VersionFile.NEEDS_BUMP.value == expected 841 | 842 | class tag: 843 | def okay(self): 844 | assert Tag.OKAY.value == "\x1b[32m\u2714 all set\x1b(B\x1b[m" 845 | 846 | def needs_cutting(self): 847 | expected = "\x1b[31m\u2718 needs cutting\x1b(B\x1b[m" 848 | assert Tag.NEEDS_CUTTING.value == expected 849 | 850 | 851 | @contextmanager 852 | def _expect_setuppy(flags, python="python", config=None, yield_rmtree=False): 853 | kwargs = dict(run=True) 854 | if config is not None: 855 | kwargs["config"] = config 856 | c = MockContext(**kwargs) 857 | # Make sure we don't actually run rmtree regardless 858 | with patch("invocations.packaging.release.rmtree") as rmtree: 859 | if yield_rmtree: 860 | yield c, rmtree 861 | else: 862 | yield c 863 | c.run.assert_called_once_with("{} setup.py {}".format(python, flags)) 864 | 865 | 866 | class build_: 867 | _sdist_flags = "sdist -d dist" 868 | _wheel_flags = "build -b build bdist_wheel -d dist" 869 | _both_flags = "sdist -d dist build -b build bdist_wheel -d dist" 870 | _oh_dir = "sdist -d {0} build -b {1} bdist_wheel -d {0}".format( 871 | path.join("dir", "dist"), path.join("dir", "build") 872 | ) 873 | 874 | class sdist: 875 | def indicates_sdist_builds(self): 876 | with _expect_setuppy(self._both_flags) as c: 877 | build(c, sdist=True) 878 | 879 | def on_by_default(self): 880 | with _expect_setuppy(self._both_flags) as c: 881 | build(c) 882 | 883 | def can_be_disabled_via_config(self): 884 | config = Config(dict(packaging=dict(sdist=False))) 885 | with _expect_setuppy(self._wheel_flags, config=config) as c: 886 | build(c) 887 | 888 | def kwarg_wins_over_config(self): 889 | config = Config(dict(packaging=dict(sdist=True))) 890 | with _expect_setuppy(self._wheel_flags, config=config) as c: 891 | build(c, sdist=False) 892 | 893 | class wheel: 894 | def indicates_explicit_build_and_wheel(self): 895 | with _expect_setuppy(self._wheel_flags) as c: 896 | build(c, sdist=False, wheel=True) 897 | 898 | def on_by_default(self): 899 | with _expect_setuppy(self._wheel_flags) as c: 900 | build(c, sdist=False) 901 | 902 | def can_be_disabled_via_config(self): 903 | config = Config(dict(packaging=dict(wheel=False))) 904 | with _expect_setuppy(self._sdist_flags, config=config) as c: 905 | build(c) 906 | 907 | def kwarg_wins_over_config(self): 908 | config = Config(dict(packaging=dict(wheel=True))) 909 | with _expect_setuppy(self._sdist_flags, config=config) as c: 910 | build(c, wheel=False) 911 | 912 | @raises(Exit) 913 | def kabooms_if_sdist_and_wheel_both_False(self): 914 | build(MockContext(), sdist=False, wheel=False) 915 | 916 | class directory: 917 | def defaults_to_blank_or_cwd(self): 918 | with _expect_setuppy(self._both_flags) as c: 919 | build(c) 920 | 921 | def if_given_affects_build_and_dist_dirs(self): 922 | with _expect_setuppy(self._oh_dir) as c: 923 | build(c, directory="dir") 924 | 925 | def may_be_given_via_config(self): 926 | config = Config(dict(packaging=dict(directory="dir"))) 927 | with _expect_setuppy(self._oh_dir, config=config) as c: 928 | build(c) 929 | 930 | def kwarg_wins_over_config(self): 931 | config = Config(dict(packaging=dict(directory="NOTdir"))) 932 | with _expect_setuppy(self._oh_dir, config=config) as c: 933 | build(c, directory="dir") 934 | 935 | class python: 936 | def defaults_to_python(self): 937 | with _expect_setuppy(self._both_flags, python="python") as c: 938 | build(c, python="python") 939 | 940 | def may_be_overridden(self): 941 | with _expect_setuppy(self._both_flags, python="fython") as c: 942 | build(c, python="fython") 943 | 944 | def can_be_given_via_config(self): 945 | config = Config(dict(packaging=dict(python="python17"))) 946 | with _expect_setuppy( 947 | self._both_flags, config=config, python="python17" 948 | ) as c: 949 | build(c) 950 | 951 | def kwarg_wins_over_config(self): 952 | config = Config(dict(packaging=dict(python="python17"))) 953 | with _expect_setuppy( 954 | self._both_flags, config=config, python="python99" 955 | ) as c: 956 | build(c, python="python99") 957 | 958 | class clean: 959 | def _expect_with_rmtree(self): 960 | return _expect_setuppy(self._both_flags, yield_rmtree=True) 961 | 962 | def defaults_to_False_meaning_no_clean(self): 963 | with self._expect_with_rmtree() as (c, rmtree): 964 | build(c) 965 | assert not rmtree.called 966 | 967 | def True_means_clean_both_dirs(self): 968 | with self._expect_with_rmtree() as (c, rmtree): 969 | build(c, clean=True) 970 | rmtree.assert_any_call("dist", ignore_errors=True) 971 | rmtree.assert_any_call("build", ignore_errors=True) 972 | 973 | def understands_directory_option(self): 974 | with _expect_setuppy(self._oh_dir, yield_rmtree=True) as ( 975 | c, 976 | rmtree, 977 | ): 978 | build(c, directory="dir", clean=True) 979 | rmtree.assert_any_call( 980 | path.join("dir", "build"), ignore_errors=True 981 | ) 982 | rmtree.assert_any_call( 983 | path.join("dir", "dist"), ignore_errors=True 984 | ) 985 | 986 | def may_be_configured(self): 987 | config = Config(dict(packaging=dict(clean=True))) 988 | with _expect_setuppy( 989 | self._both_flags, yield_rmtree=True, config=config 990 | ) as (c, rmtree): 991 | build(c) 992 | rmtree.assert_any_call("dist", ignore_errors=True) 993 | rmtree.assert_any_call("build", ignore_errors=True) 994 | 995 | def kwarg_wins_over_config(self): 996 | config = Config(dict(packaging=dict(clean=True))) 997 | with _expect_setuppy( 998 | self._both_flags, yield_rmtree=True, config=config 999 | ) as (c, rmtree): 1000 | build(c, clean=False) 1001 | rmtree.assert_any_call("dist", ignore_errors=True) 1002 | rmtree.assert_any_call("build", ignore_errors=True) 1003 | 1004 | 1005 | class upload_: 1006 | def _check_upload(self, c, kwargs=None, flags=None, extra=None): 1007 | """ 1008 | Expect/call upload() with common environment and settings/mocks. 1009 | 1010 | Returns the full command constructed, typically for further 1011 | examination. 1012 | """ 1013 | 1014 | def mkpath(x): 1015 | return path.join("somedir", "dist", x) 1016 | 1017 | with patch("invocations.packaging.release.glob") as glob: 1018 | tgz, whl = mkpath("foo.tar.gz"), mkpath("foo.whl") 1019 | glob.side_effect = lambda x: [tgz if x.endswith("gz") else whl] 1020 | # Do the thing! 1021 | upload(c, "somedir", **(kwargs or {})) 1022 | glob.assert_any_call(mkpath("*.tar.gz")) 1023 | glob.assert_any_call(mkpath("*.whl")) 1024 | self.files = "{} {}".format(whl, tgz) 1025 | cmd = "twine upload" 1026 | if flags: 1027 | cmd += " {}".format(flags) 1028 | cmd += " {}".format(self.files) 1029 | if extra: 1030 | cmd += " {}".format(extra) 1031 | return cmd 1032 | 1033 | def twine_uploads_dist_contents_with_wheels_first(self): 1034 | c = MockContext(run=True) 1035 | c.run.assert_called_once_with(self._check_upload(c)) 1036 | 1037 | def may_target_alternate_index(self): 1038 | c = MockContext(run=True) 1039 | cmd = self._check_upload( 1040 | c, kwargs=dict(index="lol"), flags="--repository lol" 1041 | ) 1042 | c.run.assert_called_once_with(cmd) 1043 | 1044 | @patch("builtins.print") 1045 | def dry_run_just_prints_and_ls(self, print): 1046 | c = MockContext(run=True) 1047 | cmd = self._check_upload(c, kwargs=dict(dry_run=True)) 1048 | print.assert_any_call("Would publish via: {}".format(cmd)) 1049 | c.run.assert_called_once_with("ls -l {}".format(self.files)) 1050 | 1051 | @patch("invocations.packaging.release.getpass.getpass") 1052 | def allows_signing_via_gpg(self, getpass): 1053 | c = MockContext(run=True, repeat=True) 1054 | getpass.return_value = "super sekrit" 1055 | twine_upload = self._check_upload( 1056 | c, kwargs=dict(sign=True), extra="somedir/dist/*.asc" 1057 | ) 1058 | calls = c.run.mock_calls 1059 | # Looked for gpg 1060 | assert calls[0] == call("which gpg", hide=True, warn=True) 1061 | # Signed wheel 1062 | flags = "--detach-sign --armor --passphrase-fd=0 --batch --pinentry-mode=loopback" # noqa 1063 | template = "gpg {} somedir/dist/foo.{{}}".format(flags) 1064 | assert calls[1][1][0] == template.format("whl") 1065 | # Spot check: did use in_stream to submit passphrase 1066 | assert "in_stream" in calls[1][2] 1067 | # Signed tgz 1068 | assert calls[2][1][0] == template.format("tar.gz") 1069 | # Uploaded (and w/ asc's) 1070 | c.run.assert_any_call(twine_upload) 1071 | 1072 | 1073 | class _Kaboom(Exception): 1074 | pass 1075 | 1076 | 1077 | class publish_: 1078 | class base_case: 1079 | def does_all_the_things(self, fakepub): 1080 | c, mocks = fakepub 1081 | # Execution 1082 | publish(c) 1083 | # Unhides stdout 1084 | assert c.config.run.hide is False 1085 | # Build 1086 | mocks.build.assert_called_once_with( 1087 | c, sdist=True, wheel=True, directory="tmpdir" 1088 | ) 1089 | # Twine check 1090 | splat = path.join("tmpdir", "dist", "*") 1091 | mocks.twine_check.assert_called_once_with(dists=[splat]) 1092 | # Install test 1093 | mocks.test_install.assert_called_once_with(c, directory="tmpdir") 1094 | # Upload 1095 | mocks.upload.assert_called_once_with( 1096 | c, directory="tmpdir", index=None, sign=False, dry_run=False 1097 | ) 1098 | # Tmpdir cleaned up 1099 | mocks.rmtree.assert_called_once_with("tmpdir") 1100 | 1101 | def cleans_up_on_error(self, fakepub): 1102 | c, mocks = fakepub 1103 | mocks.build.side_effect = _Kaboom 1104 | with pytest.raises(_Kaboom): 1105 | publish(MockContext(run=True)) 1106 | mocks.rmtree.assert_called_once_with(mocks.mkdtemp.return_value) 1107 | 1108 | def monkeypatches_readme_renderer(self, fakepub): 1109 | # Happens at module load time but is just a data structure change 1110 | import readme_renderer.rst 1111 | 1112 | assert ( 1113 | readme_renderer.rst.SETTINGS["halt_level"] 1114 | == Reporter.INFO_LEVEL 1115 | ) 1116 | assert ( 1117 | readme_renderer.rst.SETTINGS["report_level"] 1118 | == Reporter.INFO_LEVEL 1119 | ) 1120 | 1121 | class index: 1122 | def passed_to_upload(self, fakepub): 1123 | c, mocks = fakepub 1124 | publish(c, index="dev") 1125 | assert mocks.upload.call_args[1]["index"] == "dev" 1126 | 1127 | def honors_config(self, fakepub): 1128 | c, mocks = fakepub 1129 | c.config.packaging = dict(index="prod") 1130 | publish(c) 1131 | assert mocks.upload.call_args[1]["index"] == "prod" 1132 | 1133 | def kwarg_beats_config(self, fakepub): 1134 | c, mocks = fakepub 1135 | c.config.packaging = dict(index="prod") 1136 | publish(c, index="dev") 1137 | assert mocks.upload.call_args[1]["index"] == "dev" 1138 | 1139 | class sign: 1140 | def passed_to_upload(self, fakepub): 1141 | c, mocks = fakepub 1142 | publish(c, sign=True) 1143 | assert mocks.upload.call_args[1]["sign"] is True 1144 | 1145 | def honors_config(self, fakepub): 1146 | c, mocks = fakepub 1147 | c.config.packaging = dict(sign=True) 1148 | publish(c) 1149 | assert mocks.upload.call_args[1]["sign"] is True 1150 | 1151 | def kwarg_beats_config(self, fakepub): 1152 | c, mocks = fakepub 1153 | c.config.packaging = dict(sign=False) 1154 | publish(c, sign=True) 1155 | assert mocks.upload.call_args[1]["sign"] is True 1156 | 1157 | class sdist: 1158 | def defaults_True_and_passed_to_build(self, fakepub): 1159 | c, mocks = fakepub 1160 | publish(c) 1161 | assert mocks.build.call_args[1]["sdist"] is True 1162 | 1163 | def may_be_overridden(self, fakepub): 1164 | c, mocks = fakepub 1165 | publish(c, sdist=False) 1166 | assert mocks.build.call_args[1]["sdist"] is False 1167 | 1168 | class wheel: 1169 | def defaults_True_and_passed_to_build(self, fakepub): 1170 | c, mocks = fakepub 1171 | publish(c) 1172 | assert mocks.build.call_args[1]["wheel"] is True 1173 | 1174 | def may_be_overridden(self, fakepub): 1175 | c, mocks = fakepub 1176 | publish(c, wheel=False) 1177 | assert mocks.build.call_args[1]["wheel"] is False 1178 | 1179 | def directory_affects_tmpdir(self, fakepub): 1180 | c, mocks = fakepub 1181 | publish(c, directory="explicit") 1182 | assert not mocks.mkdtemp.called 1183 | assert mocks.build.call_args[1]["directory"] == "explicit" 1184 | 1185 | class dry_run: 1186 | def causes_tmpdir_cleanup_to_be_skipped(self, fakepub): 1187 | c, mocks = fakepub 1188 | publish(c, dry_run=True) 1189 | assert not mocks.rmtree.called 1190 | 1191 | def causes_tmpdir_cleanup_to_be_skipped_on_exception(self, fakepub): 1192 | c, mocks = fakepub 1193 | mocks.build.side_effect = _Kaboom 1194 | with pytest.raises(_Kaboom): 1195 | publish(c, dry_run=True) 1196 | assert not mocks.rmtree.called 1197 | 1198 | def passed_to_upload(self, fakepub): 1199 | c, mocks = fakepub 1200 | publish(c, dry_run=True) 1201 | assert mocks.upload.call_args[1]["dry_run"] is True 1202 | 1203 | 1204 | class test_install_: 1205 | def installs_all_archives_in_fresh_venv_with_matching_pip(self, install): 1206 | c = install 1207 | # Basic test, uses guts of fixture 1208 | install_test_task(c, directory="whatever") 1209 | # Import attempt was made 1210 | c.run.assert_any_call("tmpdir/bin/python -c 'import foo'") 1211 | 1212 | def skips_import_test_when_asked_to(self, install): 1213 | c = install 1214 | install_test_task(c, directory="whatever", skip_import=True) 1215 | # No import attempt 1216 | for unwanted in (call("tmpdir/bin/python -c 'import foo'"),): 1217 | assert unwanted not in c.run.mock_calls 1218 | 1219 | def does_mypy_import_when_py_typed_present(self, install): 1220 | c = install 1221 | # Mock out the pathlib exists call as positive (default is negative) 1222 | c.set_exists(True) 1223 | install_test_task(c, directory="whatever") 1224 | # Mypy installed and executed 1225 | c.run.assert_any_call("tmpdir/bin/pip install mypy") 1226 | # NOTE: not actually the same 2 tmpdirs here but I'm already so sick of 1227 | # all these mocks, jeez 1228 | c.run.assert_any_call("cd tmpdir && tmpdir/bin/mypy -c 'import foo'") 1229 | 1230 | def skips_mypy_import_when_no_py_typed(self, install): 1231 | c = install 1232 | # Mock out the pathlib exists call as explicitly false, why not 1233 | c.set_exists(False) 1234 | install_test_task(c, directory="whatever") 1235 | # Mypy NOT installed or executed 1236 | for unwanted in ( 1237 | call("tmpdir/bin/pip install mypy"), 1238 | call("cd tmpdir && tmpdir/bin/mypy -c 'import foo'"), 1239 | ): 1240 | assert unwanted not in c.run.mock_calls 1241 | 1242 | def skips_mypy_import_when_skipping_regular_import(self, install): 1243 | c = install 1244 | c.set_exists(True) 1245 | install_test_task(c, directory="whatever", skip_import=True) 1246 | # Mypy NOT installed or executed 1247 | for unwanted in ( 1248 | call("tmpdir/bin/python -c 'import foo'"), 1249 | call("tmpdir/bin/pip install mypy"), 1250 | call("cd tmpdir && tmpdir/bin/mypy -c 'import foo'"), 1251 | ): 1252 | assert unwanted not in c.run.mock_calls 1253 | 1254 | 1255 | class push_: 1256 | def pushes_with_follow_tags(self): 1257 | "git-pushes with --follow-tags" 1258 | c = MockContext(run=True) 1259 | push(c) 1260 | c.run.assert_called_once_with("git push --follow-tags --no-verify") 1261 | 1262 | @trap 1263 | @patch("invocations.environment.os.environ", dict(CIRCLECI="")) 1264 | def honors_dry_run(self): 1265 | c = MockContext(run=True) 1266 | push(c, dry_run=True) 1267 | c.run.assert_called_once_with( 1268 | "git push --follow-tags --no-verify --dry-run", echo=True 1269 | ) 1270 | 1271 | @trap 1272 | @patch("invocations.environment.os.environ", dict(CIRCLECI="true")) 1273 | def dry_run_dry_runs_the_invocation_itself_if_in_ci(self): 1274 | c = MockContext(run=True) 1275 | push(c, dry_run=True) 1276 | c.run.assert_called_once_with( 1277 | "git push --follow-tags --no-verify", echo=True, dry=True 1278 | ) 1279 | 1280 | @trap 1281 | @patch("invocations.environment.os.environ", dict(CIRCLECI="true")) 1282 | def ci_check_only_applies_to_dry_run_behavior(self): 1283 | # Yes, technically already covered by base tests, but... 1284 | c = MockContext(run=True) 1285 | push(c, dry_run=False) 1286 | c.run.assert_called_once_with("git push --follow-tags --no-verify") 1287 | 1288 | 1289 | class all_task: 1290 | @patch("invocations.packaging.release.prepare") 1291 | @patch("invocations.packaging.release.publish") 1292 | @patch("invocations.packaging.release.push") 1293 | def runs_primary_workflow(self, push, publish, prepare): 1294 | c = MockContext(run=True) 1295 | all_(c) 1296 | # TODO: this doesn't actually prove order of operations. not seeing an 1297 | # unhairy way to do that, but not really that worried either...:P 1298 | prepare.assert_called_once_with(c, dry_run=False) 1299 | publish.assert_called_once_with(c, dry_run=False) 1300 | push.assert_called_once_with(c, dry_run=False) 1301 | 1302 | @patch("invocations.packaging.release.prepare") 1303 | @patch("invocations.packaging.release.publish") 1304 | @patch("invocations.packaging.release.push") 1305 | def passes_through_dry_run_flag(self, push, publish, prepare): 1306 | c = MockContext(run=True) 1307 | all_(c, dry_run=True) 1308 | prepare.assert_called_once_with(c, dry_run=True) 1309 | publish.assert_called_once_with(c, dry_run=True) 1310 | push.assert_called_once_with(c, dry_run=True) 1311 | 1312 | def bound_to_name_without_underscore(self): 1313 | assert all_.name == "all" 1314 | 1315 | 1316 | class namespace: 1317 | def contains_all_tasks(self): 1318 | names = """ 1319 | all 1320 | build 1321 | prepare 1322 | publish 1323 | push 1324 | status 1325 | test-install 1326 | upload 1327 | """.split() 1328 | assert set(release_ns.task_names) == set(names) 1329 | 1330 | def all_is_default_task(self): 1331 | assert release_ns.default == "all" 1332 | 1333 | def hides_stdout_by_default(self): 1334 | assert release_ns.configuration()["run"]["hide"] == "stdout" 1335 | -------------------------------------------------------------------------------- /tests/pytest_.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | 3 | from invoke import MockContext 4 | from invocations.pytest import test as _test_task, coverage 5 | from unittest.mock import Mock, call 6 | 7 | 8 | @contextmanager 9 | def _expect(flags=None, extra_flags=None, kwargs=None): 10 | if kwargs is None: 11 | kwargs = dict(pty=True) 12 | flags = flags or "--verbose --color=yes --capture=sys" 13 | if extra_flags is not None: 14 | flags = flags + " " + extra_flags 15 | c = MockContext(run=True) 16 | yield c 17 | c.run.assert_called_once_with("pytest {}".format(flags), **kwargs) 18 | 19 | 20 | class test_: 21 | def defaults_to_verbose_color_and_syscapture_with_pty_True(self): 22 | # Relies on default flags within expect helper 23 | with _expect() as c: 24 | _test_task(c) 25 | 26 | def can_turn_off_or_change_defaults(self): 27 | with _expect(flags="--capture=no", kwargs=dict(pty=False)) as c: 28 | _test_task(c, verbose=False, color=False, pty=False, capture="no") 29 | 30 | def can_passthru_k_x_and_arbitrary_opts(self): 31 | with _expect(extra_flags="--whatever -man -k 'lmao' -x") as c: 32 | _test_task(c, k="lmao", x=True, opts="--whatever -man") 33 | 34 | def can_disable_warnings(self): 35 | with _expect(extra_flags="--disable-warnings") as c: 36 | _test_task(c, warnings=False) 37 | 38 | 39 | class coverage_: 40 | _FLAGS = "--cov --no-cov-on-fail --cov-report={}" 41 | 42 | def default_args(self): 43 | with _expect(extra_flags=self._FLAGS.format("term")) as c: 44 | coverage(c) 45 | 46 | def report_type(self): 47 | with _expect(extra_flags=self._FLAGS.format("xml")) as c: 48 | coverage(c, report="xml") 49 | 50 | def opts(self): 51 | with _expect(extra_flags=self._FLAGS.format("term") + " --meh") as c: 52 | coverage(c, opts="--meh") 53 | 54 | def test_function(self): 55 | c = MockContext() 56 | faketest = Mock() 57 | coverage(c, tester=faketest) 58 | faketest.assert_called_once_with(c, opts=self._FLAGS.format("term")) 59 | 60 | def can_append_additional_test_tasks(self): 61 | c = MockContext(run=True, repeat=True) 62 | faketest1, faketest2 = Mock(), Mock() 63 | coverage(c, additional_testers=[faketest1, faketest2]) 64 | # Uses coverage-appending arg to pytest-cov 65 | flags = self._FLAGS.format("term") + " --cov-append" 66 | faketest1.assert_called_once_with(c, opts=flags) 67 | faketest2.assert_called_once_with(c, opts=flags) 68 | 69 | def open_html_report(self): 70 | c = MockContext(run=True, repeat=True) 71 | coverage(c, report="html") 72 | print(c.run.mock_calls) 73 | c.run.assert_any_call("open htmlcov/index.html") 74 | 75 | class codecov_support: 76 | def defaults_False(self): 77 | c = MockContext(run=True, repeat=True) 78 | coverage(c) 79 | assert call("codecov") not in c.run.mock_calls 80 | 81 | def runs_xml_and_codecov_when_True(self): 82 | c = MockContext(run=True, repeat=True) 83 | coverage(c, codecov=True) 84 | c.run.assert_has_calls([call("coverage xml"), call("codecov")]) 85 | --------------------------------------------------------------------------------