├── .circleci └── config.yml ├── .codecov.yml ├── .coveragerc ├── .flake8 ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.rst ├── SECURITY.md ├── THOUGHTS.rst ├── codecov.yml ├── dev-requirements.txt ├── integration ├── _explicit.py ├── _support │ ├── busywork.py │ ├── err.py │ ├── nested_or_piped.py │ ├── package │ │ └── tasks │ │ │ ├── __init__.py │ │ │ └── module.py │ ├── parsing.py │ ├── regression.py │ ├── respond_base.py │ ├── respond_both.py │ ├── respond_fail.py │ ├── tasks.py │ └── tree.out ├── _util.py ├── context.py ├── main.py └── runners.py ├── invoke ├── __init__.py ├── __main__.py ├── _version.py ├── collection.py ├── completion │ ├── __init__.py │ ├── bash.completion │ ├── complete.py │ ├── fish.completion │ └── zsh.completion ├── config.py ├── context.py ├── env.py ├── exceptions.py ├── executor.py ├── loader.py ├── main.py ├── parser │ ├── __init__.py │ ├── argument.py │ ├── context.py │ └── parser.py ├── program.py ├── py.typed ├── runners.py ├── tasks.py ├── terminals.py ├── util.py ├── vendor │ ├── __init__.py │ ├── fluidity │ │ ├── LICENSE │ │ ├── __init__.py │ │ ├── backwardscompat.py │ │ └── machine.py │ ├── lexicon │ │ ├── LICENSE │ │ ├── __init__.py │ │ ├── _version.py │ │ ├── alias_dict.py │ │ └── attribute_dict.py │ └── yaml │ │ ├── __init__.py │ │ ├── composer.py │ │ ├── constructor.py │ │ ├── cyaml.py │ │ ├── dumper.py │ │ ├── emitter.py │ │ ├── error.py │ │ ├── events.py │ │ ├── loader.py │ │ ├── nodes.py │ │ ├── parser.py │ │ ├── reader.py │ │ ├── representer.py │ │ ├── resolver.py │ │ ├── scanner.py │ │ ├── serializer.py │ │ └── tokens.py └── watchers.py ├── pyproject.toml ├── pytest.ini ├── setup.py ├── sites ├── docs │ ├── .readthedocs.yaml │ ├── _static │ │ └── rtd.css │ ├── api │ │ ├── __init__.rst │ │ ├── collection.rst │ │ ├── config.rst │ │ ├── context.rst │ │ ├── exceptions.rst │ │ ├── executor.rst │ │ ├── loader.rst │ │ ├── parser.rst │ │ ├── program.rst │ │ ├── runners.rst │ │ ├── tasks.rst │ │ ├── terminals.rst │ │ ├── util.rst │ │ └── watchers.rst │ ├── concepts │ │ ├── configuration.rst │ │ ├── invoking-tasks.rst │ │ ├── library.rst │ │ ├── loading.rst │ │ ├── namespaces.rst │ │ ├── testing.rst │ │ └── watchers.rst │ ├── conf.py │ ├── getting-started.rst │ ├── index.rst │ └── invoke.rst ├── shared_conf.py └── www │ ├── .readthedocs.yaml │ ├── changelog.rst │ ├── conf.py │ ├── contact.rst │ ├── development.rst │ ├── faq.rst │ ├── index.rst │ ├── installing.rst │ └── prior-art.rst ├── tasks.py ├── tests ├── _support │ ├── alias_sorting.py │ ├── autoprint.py │ ├── branch │ │ ├── explicit.py │ │ └── tasks.py │ ├── configs │ │ ├── all-four │ │ │ ├── invoke.json │ │ │ ├── invoke.py │ │ │ ├── invoke.yaml │ │ │ └── invoke.yml │ │ ├── collection.py │ │ ├── echo.yaml │ │ ├── json-and-python │ │ │ ├── invoke.json │ │ │ └── invoke.py │ │ ├── json │ │ │ └── invoke.json │ │ ├── nested │ │ │ └── invoke.yaml │ │ ├── no-dedupe.yaml │ │ ├── no-echo.yaml │ │ ├── package │ │ │ ├── invoke.yml │ │ │ └── tasks │ │ │ │ └── __init__.py │ │ ├── python │ │ │ └── invoke.py │ │ ├── runtime.py │ │ ├── three-of-em │ │ │ ├── invoke.json │ │ │ ├── invoke.py │ │ │ └── invoke.yml │ │ ├── underscores │ │ │ ├── invoke.yaml │ │ │ └── tasks.py │ │ ├── yaml │ │ │ ├── explicit.py │ │ │ ├── invoke.yaml │ │ │ └── tasks.py │ │ └── yml │ │ │ ├── explicit.py │ │ │ ├── invoke.yml │ │ │ └── tasks.py │ ├── contextualized.py │ ├── custom_executor.py │ ├── debugging.py │ ├── decorator_multi_default.py │ ├── decorators.py │ ├── deeper_ns_list.py │ ├── depth_first.py │ ├── docstrings.py │ ├── empty.py │ ├── empty_subcollection.py │ ├── explicit_root.py │ ├── foo.py │ ├── has_modules.py │ ├── ignoreme │ │ └── ignoremetoo │ │ │ └── .no │ ├── integration.py │ ├── namespacing.py │ ├── nontrivial_docstrings.py │ ├── oops.py │ ├── package │ │ ├── __init__.py │ │ └── module.py │ ├── simple_ns_list.py │ ├── subcollection_task_name.py │ ├── subspace │ │ ├── __init__.py │ │ └── module.py │ ├── sudo_prompt.py │ ├── tasks.py │ ├── tree.json │ └── tree │ │ ├── __init__.py │ │ ├── build │ │ ├── __init__.py │ │ ├── docs.py │ │ └── python.py │ │ ├── deploy.py │ │ └── provision.py ├── _util.py ├── cli.py ├── collection.py ├── completion.py ├── concurrency.py ├── config.py ├── conftest.py ├── context.py ├── executor.py ├── init.py ├── loader.py ├── merge_dicts.py ├── parser_argument.py ├── parser_context.py ├── parser_parser.py ├── program.py ├── runners.py ├── task.py ├── terminals.py ├── util.py └── watchers.py └── tox.ini /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | 4 | orbs: 5 | orb: invocations/orb@1.3.1 6 | 7 | 8 | jobs: 9 | # Unit+integration tests, with coverage 10 | coverage: 11 | executor: 12 | name: orb/default 13 | version: "3.6" 14 | steps: 15 | - orb/setup 16 | - run: inv ci.make-sudouser 17 | - orb/sudo-coverage 18 | - orb/debug 19 | 20 | regression: 21 | executor: 22 | name: orb/default 23 | version: "3.6" 24 | steps: 25 | - orb/setup 26 | - run: inv regression 27 | - orb/debug 28 | 29 | doctests: 30 | executor: 31 | name: orb/default 32 | version: "3.6" 33 | steps: 34 | - orb/setup 35 | - run: inv www.doctest 36 | - orb/debug 37 | 38 | typecheck: 39 | executor: 40 | name: orb/default 41 | version: "3.6" 42 | steps: 43 | - orb/setup 44 | - run: mypy . 45 | - orb/debug 46 | 47 | 48 | workflows: 49 | main: 50 | jobs: 51 | - orb/lint: 52 | name: Lint 53 | - orb/format: 54 | name: Style check 55 | - typecheck: 56 | name: Types check 57 | - coverage: 58 | name: Test 59 | - regression: 60 | name: Regression tests 61 | - orb/test-release: 62 | name: Release test 63 | - orb/test: 64 | name: Test << matrix.version >> 65 | requires: ["Test"] 66 | matrix: 67 | parameters: 68 | version: ["3.7", "3.8", "3.9", "3.10", "3.11"] 69 | - orb/docs: 70 | name: "Docs" 71 | requires: ["Test"] 72 | - doctests: 73 | name: "Doctests" 74 | requires: ["Docs"] 75 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | coverage: 3 | precision: 0 4 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | include = 4 | invoke/* 5 | tests/* 6 | omit = invoke/vendor/* 7 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = invoke/vendor,sites,.git,build,dist,alt_env,appveyor 3 | ignore = E124,E125,E128,E261,E301,E302,E303,E306,W503,E731 4 | max-line-length = 79 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | build/ 3 | dist/ 4 | .coverage 5 | .tox 6 | *.egg-info 7 | *.py[cod] 8 | src/ 9 | htmlcov 10 | coverage.xml 11 | .cache 12 | .mypy_cache/ 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020 Jeff Forcier. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, 8 | this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 17 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 20 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 21 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 22 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.rst 3 | include tasks.py 4 | recursive-include invoke/completion * 5 | recursive-include sites * 6 | recursive-exclude sites/*/_build * 7 | include dev-requirements.txt 8 | recursive-include * py.typed 9 | recursive-include tests * 10 | recursive-exclude * *.pyc *.pyo 11 | recursive-exclude **/__pycache__ * 12 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | |version| |python| |license| |ci| |coverage| 2 | 3 | .. |version| image:: https://img.shields.io/pypi/v/invoke 4 | :target: https://pypi.org/project/invoke/ 5 | :alt: PyPI - Package Version 6 | .. |python| image:: https://img.shields.io/pypi/pyversions/invoke 7 | :target: https://pypi.org/project/invoke/ 8 | :alt: PyPI - Python Version 9 | .. |license| image:: https://img.shields.io/pypi/l/invoke 10 | :target: https://github.com/pyinvoke/invoke/blob/main/LICENSE 11 | :alt: PyPI - License 12 | .. |ci| image:: https://img.shields.io/circleci/build/github/pyinvoke/invoke/main 13 | :target: https://app.circleci.com/pipelines/github/pyinvoke/invoke 14 | :alt: CircleCI 15 | .. |coverage| image:: https://img.shields.io/codecov/c/gh/pyinvoke/invoke 16 | :target: https://app.codecov.io/gh/pyinvoke/invoke 17 | :alt: Codecov 18 | 19 | Welcome to Invoke! 20 | ================== 21 | 22 | Invoke is a Python library for managing shell-oriented subprocesses and 23 | organizing executable Python code into CLI-invokable tasks. It draws 24 | inspiration from various sources (``make``/``rake``, Fabric 1.x, etc) to arrive 25 | at a powerful & clean feature set. 26 | 27 | To find out what's new in this version of Invoke, please see `the changelog 28 | `_. 29 | 30 | The project maintainer keeps a `roadmap 31 | `_ on his website. 32 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Security contact information 4 | 5 | To report a security vulnerability, please use the 6 | [Tidelift security contact](https://tidelift.com/security). 7 | Tidelift will coordinate the fix and disclosure. 8 | -------------------------------------------------------------------------------- /THOUGHTS.rst: -------------------------------------------------------------------------------- 1 | ============================================== 2 | Random thoughts unsuitable for public docs yet 3 | ============================================== 4 | 5 | CLI type mapping 6 | ================ 7 | 8 | Some loose thoughts on bridging the "shell is strings, Python wants 9 | lists/dicts/integers/bools/etc" problem. 10 | 11 | Methodologies 12 | ------------- 13 | 14 | * Explicit mapping, as with ``argparse``: this particular flag turns into a 15 | list/boolean/int/whatever. Because we're specifically mapping to function 16 | keyword arguments, a little of that complexity can be removed, but generally 17 | it'll look very similar. E.g.:: 18 | 19 | @args(foo=int) 20 | def mytask(foo): 21 | ... 22 | 23 | would turn this:: 24 | 25 | $ invoke mytask --foo 7 26 | 27 | into ``7``, not ``"7"``. 28 | * Introspection-based mapping, i.e. introspecting the default values of a 29 | function signature and automatically transforming the CLI input. E.g.:: 30 | 31 | def mytask(foo=5): 32 | ... 33 | 34 | invoked as:: 35 | 36 | $ invoke mytask --foo 7 37 | 38 | results in the Python value ``7`` instead of ``"7"``, just as with the 39 | explicit example above. 40 | * Formatting-based mapping, i.e. having (optional) conventions in the string 41 | format of an incoming flag argument that cause transformations to occur. 42 | E.g. we could say that commas in an argument automatically trigger 43 | transformation into a list of strings; thus the invocation:: 44 | 45 | $ invoke mytask --items a,b,c 46 | 47 | would on the Python end turn into a call like this:: 48 | 49 | mytask(items=['a', 'b', 'c']) 50 | 51 | What to do? 52 | ~~~~~~~~~~~ 53 | 54 | We haven't decided exactly how many of these to use -- we may end up using all 55 | three of them as appropriate, with some useful/sensible default and the option 56 | to enable/disable things for power users. The trick is to balance 57 | power/features with becoming overly complicated to understand or utilize. 58 | 59 | Other types 60 | ----------- 61 | 62 | Those examples cover integers/numbers, and lists/iterables. Strings are 63 | obviously easy/the default. What else is there? 64 | 65 | * Booleans: these are relatively simple too, either a flag exists (``True``) or 66 | is omitted (``False``). 67 | 68 | * Could also work in a ``--foo`` vs ``--no-foo`` convention to help with 69 | the inverse, i.e. values which should default to ``True`` and then need 70 | to be turned "off" on the command line. E.g.:: 71 | 72 | def mytask(option=True): 73 | ... 74 | 75 | could result in having a flag called ``--no-option`` instead of 76 | ``--option``. (Or possibly both.) 77 | 78 | * Dicts: these are tougher, but we could potentially use something like:: 79 | 80 | $ invoke mytask --dictopt key1=val1,key2=val2 81 | 82 | resulting in:: 83 | 84 | mytask(dictopt={'key1': 'val1', 'key2': 'val2'}) 85 | 86 | 87 | Parameterizing tasks 88 | ==================== 89 | 90 | Old "previous example" (at time the below was split out of live docs, the 91 | actual previous example had been changed a lot and no longer applied):: 92 | 93 | $ invoke test --module=foo test --module=bar 94 | Cleaning 95 | Testing foo 96 | Cleaning 97 | Testing bar 98 | 99 | The previous example had a bit of duplication in how it was invoked; an 100 | intermediate use case is to bundle up that sort of parameterization into a 101 | "meta" task that itself invokes other tasks in a parameterized fashion. 102 | 103 | TK: API for this? at CLI level would have to be unorthodox invocation, e.g.:: 104 | 105 | @task 106 | def foo(bar): 107 | print(bar) 108 | 109 | $ invoke --parameterize foo --param bar --values 1 2 3 4 110 | 1 111 | 2 112 | 3 113 | 4 114 | 115 | Note how there's no "real" invocation of ``foo`` in the normal sense. How to 116 | handle partial application (e.g. runtime selection of other non-parameterized 117 | arguments)? E.g.:: 118 | 119 | @task 120 | def foo(bar, biz): 121 | print("%s %s" % (bar, biz)) 122 | 123 | $ invoke --parameterize foo --param bar --values 1 2 3 4 --biz "And a" 124 | And a 1 125 | And a 2 126 | And a 3 127 | And a 4 128 | 129 | That's pretty clunky and foregoes any multi-task invocation. But how could we 130 | handle multiple tasks here? If we gave each individual task flags for this, 131 | like so:: 132 | 133 | $ invoke foo --biz "And a" --param foo --values 1 2 3 4 134 | 135 | We could do multiple tasks, but then we're stomping on tasks' argument 136 | namespaces (we've taken over ``param`` and ``values``). Really hate that. 137 | 138 | **IDEALLY** we'd still limit parameterization to library use since it's an 139 | advanced-ish feature and frequently the parameterization vector is dynamic (aka 140 | not the sort of thing you'd give at CLI anyway) 141 | 142 | Probably best to leave that in the intermediate docs and keep it lib level; 143 | it's mostly there for Fabric and advanced users, not something the average 144 | Invoke-only user would care about. Not worth the effort to make it work on CLI 145 | at this point. 146 | 147 | :: 148 | 149 | @task 150 | def stuff(var): 151 | print(var) 152 | 153 | # NOTE: may need to be part of base executor since Collection has to know 154 | # to pass the parameterization option/values into Executor().execute()? 155 | class ParameterizedExecutor(Executor): 156 | # NOTE: assumes single dimension of parameterization. 157 | # Realistically would want e.g. {'name': [values], ...} structure and 158 | # then do cross product or something 159 | def execute(self, task, args, kwargs, parameter=None, values=None): 160 | # Would be nice to generalize this? 161 | if parameter: 162 | # TODO: handle non-None parameter w/ None values (error) 163 | # NOTE: this is where parallelization would occur; probably 164 | # need to move into sub-method 165 | for value in values: 166 | my_kwargs = dict(kwargs) 167 | my_kwargs[parameter] = value 168 | super(self, ParameterizedExecutor).execute(task, kwargs=my_kwargs) 169 | else: 170 | super(self, ParameterizedExecutor).execute(task, args, kwargs) 171 | 172 | 173 | Getting hairy: one task, with one pre-task, parameterized 174 | ========================================================= 175 | 176 | :: 177 | 178 | @task 179 | def setup(): 180 | print("Yay") 181 | 182 | @task(pre=[setup]) 183 | def build(): 184 | print("Woo") 185 | 186 | class OhGodExecutor(Executor): 187 | def execute(self, task, args, kwargs, parameter, values): 188 | # assume always parameterized meh 189 | # Run pretasks once only, instead of once per parameter value 190 | for pre in task.pre: 191 | self.execute(self.collection[pre]) 192 | for value in values: 193 | my_kwargs = dict(kwargs) 194 | my_kwargs[parameter] = value 195 | super(self, OhGodExecutor).execute(task, kwargs=my_kwargs) 196 | 197 | 198 | Still hairy: one task, with a pre-task that itself has a pre-task 199 | ================================================================= 200 | 201 | All the things: two tasks, each with pre-tasks, both parameterized 202 | ================================================================== 203 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | # No codecov comments at all, please - just the github 'checks' is sufficient 2 | comment: off 3 | -------------------------------------------------------------------------------- /dev-requirements.txt: -------------------------------------------------------------------------------- 1 | # Install self before invocations to save a bit of time 2 | -e . 3 | # Invocations, for all sorts of things 4 | invocations>=3.3 5 | # Coverage! 6 | coverage>=6.2,<7 7 | # Docs 8 | releases>=2 9 | alabaster==0.7.12 10 | # Testing 11 | pytest-relaxed>=2 12 | pytest-cov>=4 13 | # Formatting 14 | # Flake8 5.x seems to have an odd importlib-metadata incompatibility? 15 | flake8>=4,<5 16 | black>=22.8,<22.9 17 | # Packaging 18 | setuptools>56 19 | # Debuggery 20 | icecream>=2.1 21 | # typing 22 | mypy==0.971 23 | types-PyYAML==6.0.12.4 24 | -------------------------------------------------------------------------------- /integration/_explicit.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def foo(c): 6 | """ 7 | Frobazz 8 | """ 9 | print("Yup") 10 | -------------------------------------------------------------------------------- /integration/_support/busywork.py: -------------------------------------------------------------------------------- 1 | """ 2 | Program that just does busywork, yields stdout/stderr and ignores stdin. 3 | 4 | Useful for measuring CPU usage of the code interfacing with it without 5 | expecting the test environment to have much of anything. 6 | 7 | Accepts a single argv argument, which is the number of cycles to run. 8 | """ 9 | 10 | import sys 11 | import time 12 | 13 | 14 | num_cycles = int(sys.argv[1]) 15 | 16 | for i in range(num_cycles): 17 | out = "[{}] This is my stdout, there are many like it, but...\n".format(i) 18 | print(out, file=sys.stdout, flush=True) 19 | err = "[{}] To err is human, to stderr is superhuman\n".format(i) 20 | print(out, file=sys.stderr, flush=True) 21 | time.sleep(0.1) 22 | -------------------------------------------------------------------------------- /integration/_support/err.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | 5 | stream = sys.stderr 6 | stream.write(" ".join(sys.argv[1:]) + "\n") 7 | stream.flush() 8 | 9 | # vim:set ft=python : 10 | -------------------------------------------------------------------------------- /integration/_support/nested_or_piped.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def calls_foo(c): 6 | c.run("inv -c nested_or_piped foo") 7 | 8 | 9 | @task 10 | def foo(c): 11 | c.run("echo meh") 12 | -------------------------------------------------------------------------------- /integration/_support/package/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | from invoke import Collection 2 | 3 | # Issue #934 (from #919) only seems to trigger on this style of 'from . import 4 | # xxx' - a vanilla self-contained tasks/__init__.py is still fine! 5 | from . import module 6 | 7 | ns = Collection(module) 8 | -------------------------------------------------------------------------------- /integration/_support/package/tasks/module.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def mytask(c): 6 | print("hi!") 7 | -------------------------------------------------------------------------------- /integration/_support/parsing.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task(optional=["meh"]) 5 | def foo(c, meh=False): 6 | print(meh) 7 | -------------------------------------------------------------------------------- /integration/_support/regression.py: -------------------------------------------------------------------------------- 1 | """ 2 | Barebones regression-catching script that looks for ephemeral run() failures. 3 | 4 | Intended to be run from top level of project via ``inv regression``. In an 5 | ideal world this would be truly part of the integration test suite, but: 6 | 7 | - something about the outer invoke or pytest environment seems to prevent such 8 | issues from appearing reliably (see eg issue #660) 9 | - it can take quite a while to run, even compared to other integration tests. 10 | """ 11 | 12 | 13 | import sys 14 | 15 | from invoke import task 16 | 17 | 18 | @task 19 | def check(c): 20 | count = 0 21 | failures = [] 22 | for _ in range(0, 1000): 23 | count += 1 24 | try: 25 | # 'ls' chosen as an arbitrary, fast-enough-for-looping but 26 | # does-some-real-work example (where eg 'sleep' is less useful) 27 | response = c.run("ls", hide=True) 28 | if not response.ok: 29 | failures.append(response) 30 | except Exception as e: 31 | failures.append(e) 32 | if failures: 33 | print("run() FAILED {}/{} times!".format(len(failures), count)) 34 | sys.exit(1) 35 | else: 36 | print("No failures detected after {} runs, A-OK".format(count)) 37 | -------------------------------------------------------------------------------- /integration/_support/respond_base.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | if input("What's the password?") != "Rosebud": 5 | sys.exit(1) 6 | -------------------------------------------------------------------------------- /integration/_support/respond_both.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | if input("standard out") != "with it": 5 | sys.exit(1) 6 | 7 | # Since raw_input(text) defaults to stdout... 8 | sys.stderr.write("standard error") 9 | sys.stderr.flush() 10 | if input() != "between chair and keyboard": 11 | sys.exit(1) 12 | -------------------------------------------------------------------------------- /integration/_support/respond_fail.py: -------------------------------------------------------------------------------- 1 | if input("What's the password?") == "Rosebud": 2 | print("You're not Citizen Kane!") 3 | # This should sit around forever like e.g. a bad sudo prompt would, but the 4 | # responder ought to be looking for the above and aborting instead. 5 | input("Seriously, what's the password???") 6 | -------------------------------------------------------------------------------- /integration/_support/tasks.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tasks module for use within the integration tests. 3 | """ 4 | 5 | from invoke import task 6 | 7 | 8 | @task 9 | def print_foo(c): 10 | print("foo") 11 | 12 | 13 | @task 14 | def print_name(c, name): 15 | print(name) 16 | 17 | 18 | @task 19 | def print_config(c): 20 | print(c.foo) 21 | -------------------------------------------------------------------------------- /integration/_support/tree.out: -------------------------------------------------------------------------------- 1 | docs 2 | ├── api 3 | │   ├── cli.rst 4 | │   ├── collection.rst 5 | │   ├── exceptions.rst 6 | │   ├── loader.rst 7 | │   ├── parser 8 | │   │   ├── argument.rst 9 | │   │   ├── context.rst 10 | │   │   └── parser.rst 11 | │   ├── parser.rst 12 | │   ├── runner.rst 13 | │   ├── tasks.rst 14 | │   └── util.rst 15 | ├── api.rst 16 | ├── concepts 17 | │   ├── cli 18 | │   │   ├── background.rst 19 | │   │   ├── execution.rst 20 | │   │   ├── intro.rst 21 | │   │   └── type_mapping.rst 22 | │   ├── cli.rst 23 | │   ├── execution.rst 24 | │   ├── loading.rst 25 | │   └── namespaces.rst 26 | ├── concepts.rst 27 | ├── conf.py 28 | ├── contributing.rst 29 | ├── index.rst 30 | └── prior_art.rst 31 | 32 | 4 directories, 25 files 33 | -------------------------------------------------------------------------------- /integration/_util.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | from functools import wraps 3 | from resource import getrusage, RUSAGE_SELF 4 | import sys 5 | import time 6 | 7 | 8 | from pytest import skip 9 | 10 | 11 | def current_cpu_usage(): 12 | rusage = getrusage(RUSAGE_SELF) 13 | return rusage.ru_utime + rusage.ru_stime 14 | 15 | 16 | @contextmanager 17 | def assert_cpu_usage(lt, verbose=False): 18 | """ 19 | Execute wrapped block, asserting CPU utilization was less than ``lt``%. 20 | 21 | :param float lt: CPU use percentage above which failure will occur. 22 | :param bool verbose: Whether to print out the calculated percentage. 23 | """ 24 | start_usage = current_cpu_usage() 25 | start_time = time.time() 26 | yield 27 | end_usage = current_cpu_usage() 28 | end_time = time.time() 29 | 30 | usage_diff = end_usage - start_usage 31 | time_diff = end_time - start_time 32 | 33 | if time_diff == 0: # Apparently possible! 34 | time_diff = 0.000001 35 | 36 | percentage = (usage_diff / time_diff) * 100.0 37 | 38 | if verbose: 39 | print("Used {0:.2}% CPU over {1:.2}s".format(percentage, time_diff)) 40 | 41 | assert percentage < lt 42 | 43 | 44 | def only_utf8(f): 45 | """ 46 | Decorator causing tests to skip if local shell pipes aren't UTF-8. 47 | """ 48 | # TODO: use actual test selection labels or whatever nose has 49 | @wraps(f) 50 | def inner(*args, **kwargs): 51 | if getattr(sys.stdout, "encoding", None) == "UTF-8": 52 | return f(*args, **kwargs) 53 | # TODO: could remove this so they show green, but figure yellow is more 54 | # appropriate 55 | skip() 56 | 57 | return inner 58 | -------------------------------------------------------------------------------- /integration/context.py: -------------------------------------------------------------------------------- 1 | from invoke import Context, Config 2 | from invocations import ci as ci_mod 3 | 4 | 5 | class Context_: 6 | class sudo: 7 | def base_case(self): 8 | c = Context() 9 | # Grab CI-oriented sudo user/pass direct from invocations.ci 10 | # TODO: might be nice to give Collection a way to get a Config 11 | # object direct, instead of a dict? 12 | ci_conf = Config(ci_mod.ns.configuration()).ci.sudo 13 | user = ci_conf.user 14 | c.config.sudo.password = ci_conf.password 15 | # Safety 1: ensure configured user even exists 16 | assert c.run("id {}".format(user), warn=True) 17 | # Safety 2: make sure we ARE them (and not eg root already) 18 | assert c.run("whoami", hide=True).stdout.strip() == user 19 | assert c.sudo("whoami", hide=True).stdout.strip() == "root" 20 | -------------------------------------------------------------------------------- /integration/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | import sys 4 | 5 | import pytest 6 | from pytest_relaxed import trap 7 | 8 | from invoke import run 9 | from invoke._version import __version__ 10 | from invoke.terminals import WINDOWS 11 | 12 | from _util import only_utf8 13 | 14 | 15 | def _output_eq(cmd, expected): 16 | assert run(cmd, hide=True).stdout == expected 17 | 18 | 19 | class Main: 20 | def setup_method(self): 21 | self.cwd = os.getcwd() 22 | # Enter integration/_support as all support files are in there now 23 | os.chdir(Path(__file__).parent / "_support") 24 | 25 | def teardown_method(self): 26 | os.chdir(self.cwd) 27 | 28 | class basics: 29 | @trap 30 | def basic_invocation(self): 31 | _output_eq("invoke print-foo", "foo\n") 32 | 33 | @trap 34 | def version_output(self): 35 | _output_eq("invoke --version", "Invoke {}\n".format(__version__)) 36 | 37 | @trap 38 | def help_output(self): 39 | assert "Usage: inv[oke] " in run("invoke --help").stdout 40 | 41 | @trap 42 | def per_task_help(self): 43 | assert "Frobazz" in run("invoke -c _explicit foo --help").stdout 44 | 45 | @trap 46 | def shorthand_binary_name(self): 47 | _output_eq("inv print-foo", "foo\n") 48 | 49 | @trap 50 | def explicit_task_module(self): 51 | _output_eq("inv --collection _explicit foo", "Yup\n") 52 | 53 | @trap 54 | def invocation_with_args(self): 55 | _output_eq("inv print-name --name whatevs", "whatevs\n") 56 | 57 | @trap 58 | def bad_collection_exits_nonzero(self): 59 | result = run("inv -c nope -l", warn=True) 60 | assert result.exited == 1 61 | assert not result.stdout 62 | assert result.stderr 63 | 64 | @trap 65 | def package_style_collections_internally_importable(self): 66 | # After merging #919 blew this up and unit tests did not detect! 67 | result = run("cd package && inv -l") 68 | assert "mytask" in result.stdout 69 | 70 | def loads_real_user_config(self): 71 | path = os.path.expanduser("~/.invoke.yaml") 72 | try: 73 | with open(path, "w") as fd: 74 | fd.write("foo: bar") 75 | _output_eq("inv print-config", "bar\n") 76 | finally: 77 | try: 78 | os.unlink(path) 79 | except OSError: 80 | pass 81 | 82 | @trap 83 | def invocable_via_python_dash_m(self): 84 | _output_eq( 85 | "python -m invoke print-name --name mainline", "mainline\n" 86 | ) 87 | 88 | class funky_characters_in_stdout: 89 | @only_utf8 90 | def basic_nonstandard_characters(self): 91 | # Crummy "doesn't explode with decode errors" test 92 | cmd = ("type" if WINDOWS else "cat") + " tree.out" 93 | run(cmd, hide="stderr") 94 | 95 | @only_utf8 96 | def nonprinting_bytes(self): 97 | # Seriously non-printing characters (i.e. non UTF8) also don't 98 | # asplode (they would print as escapes normally, but still) 99 | run("echo '\xff'", hide="stderr") 100 | 101 | @only_utf8 102 | def nonprinting_bytes_pty(self): 103 | if WINDOWS: 104 | return 105 | # PTY use adds another utf-8 decode spot which can also fail. 106 | run("echo '\xff'", pty=True, hide="stderr") 107 | 108 | class ptys: 109 | def complex_nesting_under_ptys_doesnt_break(self): 110 | if WINDOWS: # Not sure how to make this work on Windows 111 | return 112 | # GH issue 191 113 | substr = " hello\t\t\nworld with spaces" 114 | cmd = """ eval 'echo "{}" ' """.format(substr) 115 | expected = " hello\t\t\r\nworld with spaces\r\n" 116 | assert run(cmd, pty=True, hide="both").stdout == expected 117 | 118 | def pty_puts_both_streams_in_stdout(self): 119 | if WINDOWS: 120 | return 121 | err_echo = "{} err.py".format(sys.executable) 122 | command = "echo foo && {} bar".format(err_echo) 123 | r = run(command, hide="both", pty=True) 124 | assert r.stdout == "foo\r\nbar\r\n" 125 | assert r.stderr == "" 126 | 127 | def simple_command_with_pty(self): 128 | """ 129 | Run command under PTY 130 | """ 131 | # Most Unix systems should have stty, which asplodes when not run 132 | # under a pty, and prints useful info otherwise 133 | result = run("stty -a", hide=True, pty=True) 134 | # PTYs use \r\n, not \n, line separation 135 | assert "\r\n" in result.stdout 136 | assert result.pty is True 137 | 138 | @pytest.mark.skip(reason="CircleCI env actually does have 0x0 stty") 139 | def pty_size_is_realistic(self): 140 | # When we don't explicitly set pty size, 'stty size' sees it as 141 | # 0x0. 142 | # When we do set it, it should be some non 0x0, non 80x24 (the 143 | # default) value. (yes, this means it fails if you really do have 144 | # an 80x24 terminal. but who does that?) 145 | size = run("stty size", hide=True, pty=True).stdout.strip() 146 | assert size != "" 147 | assert size != "0 0" 148 | assert size != "24 80" 149 | 150 | class parsing: 151 | def false_as_optional_arg_default_value_works_okay(self): 152 | # (Dis)proves #416. When bug present, parser gets very confused, 153 | # asks "what the hell is 'whee'?". See also a unit test for 154 | # Task.get_arguments. 155 | for argstr, expected in ( 156 | ("", "False"), 157 | ("--meh", "True"), 158 | ("--meh=whee", "whee"), 159 | ): 160 | _output_eq( 161 | "inv -c parsing foo {}".format(argstr), expected + "\n" 162 | ) 163 | -------------------------------------------------------------------------------- /integration/runners.py: -------------------------------------------------------------------------------- 1 | import os 2 | import platform 3 | import time 4 | 5 | from unittest.mock import Mock 6 | from pytest import skip, raises 7 | 8 | from invoke import ( 9 | run, 10 | Local, 11 | Context, 12 | ThreadException, 13 | Responder, 14 | FailingResponder, 15 | WatcherError, 16 | Failure, 17 | CommandTimedOut, 18 | ) 19 | 20 | from _util import assert_cpu_usage 21 | 22 | 23 | PYPY = platform.python_implementation() == "PyPy" 24 | 25 | 26 | class Runner_: 27 | def setup(self): 28 | os.chdir(os.path.join(os.path.dirname(__file__), "_support")) 29 | 30 | class responding: 31 | def base_case(self): 32 | # Basic "doesn't explode" test: respond.py will exit nonzero unless 33 | # this works, causing a Failure. 34 | watcher = Responder(r"What's the password\?", "Rosebud\n") 35 | # Gotta give -u or Python will line-buffer its stdout, so we'll 36 | # never actually see the prompt. 37 | run( 38 | "python -u respond_base.py", 39 | watchers=[watcher], 40 | hide=True, 41 | timeout=5, 42 | ) 43 | 44 | def both_streams(self): 45 | watchers = [ 46 | Responder("standard out", "with it\n"), 47 | Responder("standard error", "between chair and keyboard\n"), 48 | ] 49 | run( 50 | "python -u respond_both.py", 51 | watchers=watchers, 52 | hide=True, 53 | timeout=5, 54 | ) 55 | 56 | def watcher_errors_become_Failures(self): 57 | watcher = FailingResponder( 58 | pattern=r"What's the password\?", 59 | response="Rosebud\n", 60 | sentinel="You're not Citizen Kane!", 61 | ) 62 | try: 63 | run( 64 | "python -u respond_fail.py", 65 | watchers=[watcher], 66 | hide=True, 67 | timeout=5, 68 | ) 69 | except Failure as e: 70 | assert isinstance(e.reason, WatcherError) 71 | assert e.result.exited is None 72 | else: 73 | assert False, "Did not raise Failure!" 74 | 75 | class stdin_mirroring: 76 | def piped_stdin_is_not_conflated_with_mocked_stdin(self): 77 | # Re: GH issue #308 78 | # Will die on broken-pipe OSError if bug is present. 79 | run("echo 'lollerskates' | inv -c nested_or_piped foo", hide=True) 80 | 81 | def nested_invoke_sessions_not_conflated_with_mocked_stdin(self): 82 | # Also re: GH issue #308. This one will just hang forever. Woo! 83 | run("inv -c nested_or_piped calls-foo", hide=True) 84 | 85 | def isnt_cpu_heavy(self): 86 | "stdin mirroring isn't CPU-heavy" 87 | # CPU measurement under PyPy is...rather different. NBD. 88 | if PYPY: 89 | skip() 90 | # Python 3.5 has been seen using up to ~6.0s CPU time under Travis 91 | with assert_cpu_usage(lt=7.0): 92 | run("python -u busywork.py 10", pty=True, hide=True) 93 | 94 | def doesnt_break_when_stdin_exists_but_null(self): 95 | # Re: #425 - IOError occurs when bug present 96 | run("inv -c nested_or_piped foo < /dev/null", hide=True) 97 | 98 | class IO_hangs: 99 | "IO hangs" 100 | 101 | def _hang_on_full_pipe(self, pty): 102 | class Whoops(Exception): 103 | pass 104 | 105 | runner = Local(Context()) 106 | # Force runner IO thread-body method to raise an exception to mimic 107 | # real world encoding explosions/etc. When bug is present, this 108 | # will make the test hang until forcibly terminated. 109 | runner.handle_stdout = Mock(side_effect=Whoops, __name__="sigh") 110 | # NOTE: both Darwin (10.10) and Linux (Travis' docker image) have 111 | # this file. It's plenty large enough to fill most pipe buffers, 112 | # which is the triggering behavior. 113 | try: 114 | runner.run("cat /usr/share/dict/words", pty=pty) 115 | except ThreadException as e: 116 | assert len(e.exceptions) == 1 117 | assert e.exceptions[0].type is Whoops 118 | else: 119 | assert False, "Did not receive expected ThreadException!" 120 | 121 | def pty_subproc_should_not_hang_if_IO_thread_has_an_exception(self): 122 | self._hang_on_full_pipe(pty=True) 123 | 124 | def nonpty_subproc_should_not_hang_if_IO_thread_has_an_exception(self): 125 | self._hang_on_full_pipe(pty=False) 126 | 127 | class timeouts: 128 | def does_not_fire_when_command_quick(self): 129 | assert run("sleep 1", timeout=5) 130 | 131 | def triggers_exception_when_command_slow(self): 132 | before = time.time() 133 | with raises(CommandTimedOut) as info: 134 | run("sleep 5", timeout=0.5) 135 | after = time.time() 136 | # Fudge real time check a bit, <=0.5 typically fails due to 137 | # overhead etc. May need raising further to avoid races? Meh. 138 | assert (after - before) <= 0.75 139 | # Sanity checks of the exception obj 140 | assert info.value.timeout == 0.5 141 | assert info.value.result.command == "sleep 5" 142 | -------------------------------------------------------------------------------- /invoke/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | from ._version import __version_info__, __version__ # noqa 4 | from .collection import Collection # noqa 5 | from .config import Config # noqa 6 | from .context import Context, MockContext # noqa 7 | from .exceptions import ( # noqa 8 | AmbiguousEnvVar, 9 | AuthFailure, 10 | CollectionNotFound, 11 | Exit, 12 | ParseError, 13 | PlatformError, 14 | ResponseNotAccepted, 15 | SubprocessPipeError, 16 | ThreadException, 17 | UncastableEnvVar, 18 | UnexpectedExit, 19 | UnknownFileType, 20 | UnpicklableConfigMember, 21 | WatcherError, 22 | CommandTimedOut, 23 | ) 24 | from .executor import Executor # noqa 25 | from .loader import FilesystemLoader # noqa 26 | from .parser import Argument, Parser, ParserContext, ParseResult # noqa 27 | from .program import Program # noqa 28 | from .runners import Runner, Local, Failure, Result, Promise # noqa 29 | from .tasks import task, call, Call, Task # noqa 30 | from .terminals import pty_size # noqa 31 | from .watchers import FailingResponder, Responder, StreamWatcher # noqa 32 | 33 | 34 | def run(command: str, **kwargs: Any) -> Optional[Result]: 35 | """ 36 | Run ``command`` in a subprocess and return a `.Result` object. 37 | 38 | See `.Runner.run` for API details. 39 | 40 | .. note:: 41 | This function is a convenience wrapper around Invoke's `.Context` and 42 | `.Runner` APIs. 43 | 44 | Specifically, it creates an anonymous `.Context` instance and calls its 45 | `~.Context.run` method, which in turn defaults to using a `.Local` 46 | runner subclass for command execution. 47 | 48 | .. versionadded:: 1.0 49 | """ 50 | return Context().run(command, **kwargs) 51 | 52 | 53 | def sudo(command: str, **kwargs: Any) -> Optional[Result]: 54 | """ 55 | Run ``command`` in a ``sudo`` subprocess and return a `.Result` object. 56 | 57 | See `.Context.sudo` for API details, such as the ``password`` kwarg. 58 | 59 | .. note:: 60 | This function is a convenience wrapper around Invoke's `.Context` and 61 | `.Runner` APIs. 62 | 63 | Specifically, it creates an anonymous `.Context` instance and calls its 64 | `~.Context.sudo` method, which in turn defaults to using a `.Local` 65 | runner subclass for command execution (plus sudo-related bits & 66 | pieces). 67 | 68 | .. versionadded:: 1.4 69 | """ 70 | return Context().sudo(command, **kwargs) 71 | -------------------------------------------------------------------------------- /invoke/__main__.py: -------------------------------------------------------------------------------- 1 | from invoke.main import program 2 | 3 | program.run() 4 | -------------------------------------------------------------------------------- /invoke/_version.py: -------------------------------------------------------------------------------- 1 | __version_info__ = (2, 2, 0) 2 | __version__ = ".".join(map(str, __version_info__)) 3 | -------------------------------------------------------------------------------- /invoke/completion/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyinvoke/invoke/65dd896d93994d423feb46313f651ae8a021c6d7/invoke/completion/__init__.py -------------------------------------------------------------------------------- /invoke/completion/bash.completion: -------------------------------------------------------------------------------- 1 | # Invoke tab-completion script to be sourced with Bash shell. 2 | # Known to work on Bash 3.x, untested on 4.x. 3 | 4 | _complete_{binary}() {{ 5 | local candidates 6 | 7 | # COMP_WORDS contains the entire command string up til now (including 8 | # program name). 9 | # We hand it to Invoke so it can figure out the current context: spit back 10 | # core options, task names, the current task's options, or some combo. 11 | candidates=`{binary} --complete -- ${{COMP_WORDS[*]}}` 12 | 13 | # `compgen -W` takes list of valid options & a partial word & spits back 14 | # possible matches. Necessary for any partial word completions (vs 15 | # completions performed when no partial words are present). 16 | # 17 | # $2 is the current word or token being tabbed on, either empty string or a 18 | # partial word, and thus wants to be compgen'd to arrive at some subset of 19 | # our candidate list which actually matches. 20 | # 21 | # COMPREPLY is the list of valid completions handed back to `complete`. 22 | COMPREPLY=( $(compgen -W "${{candidates}}" -- $2) ) 23 | }} 24 | 25 | 26 | # Tell shell builtin to use the above for completing our invocations. 27 | # * -F: use given function name to generate completions. 28 | # * -o default: when function generates no results, use filenames. 29 | # * positional args: program names to complete for. 30 | complete -F _complete_{binary} -o default {spaced_names} 31 | 32 | # vim: set ft=sh : 33 | -------------------------------------------------------------------------------- /invoke/completion/complete.py: -------------------------------------------------------------------------------- 1 | """ 2 | Command-line completion mechanisms, executed by the core ``--complete`` flag. 3 | """ 4 | 5 | from typing import List 6 | import glob 7 | import os 8 | import re 9 | import shlex 10 | from typing import TYPE_CHECKING 11 | 12 | from ..exceptions import Exit, ParseError 13 | from ..util import debug, task_name_sort_key 14 | 15 | if TYPE_CHECKING: 16 | from ..collection import Collection 17 | from ..parser import Parser, ParseResult, ParserContext 18 | 19 | 20 | def complete( 21 | names: List[str], 22 | core: "ParseResult", 23 | initial_context: "ParserContext", 24 | collection: "Collection", 25 | parser: "Parser", 26 | ) -> Exit: 27 | # Strip out program name (scripts give us full command line) 28 | # TODO: this may not handle path/to/script though? 29 | invocation = re.sub(r"^({}) ".format("|".join(names)), "", core.remainder) 30 | debug("Completing for invocation: {!r}".format(invocation)) 31 | # Tokenize (shlex will have to do) 32 | tokens = shlex.split(invocation) 33 | # Handle flags (partial or otherwise) 34 | if tokens and tokens[-1].startswith("-"): 35 | tail = tokens[-1] 36 | debug("Invocation's tail {!r} is flag-like".format(tail)) 37 | # Gently parse invocation to obtain 'current' context. 38 | # Use last seen context in case of failure (required for 39 | # otherwise-invalid partial invocations being completed). 40 | 41 | contexts: List[ParserContext] 42 | try: 43 | debug("Seeking context name in tokens: {!r}".format(tokens)) 44 | contexts = parser.parse_argv(tokens) 45 | except ParseError as e: 46 | msg = "Got parser error ({!r}), grabbing its last-seen context {!r}" # noqa 47 | debug(msg.format(e, e.context)) 48 | contexts = [e.context] if e.context is not None else [] 49 | # Fall back to core context if no context seen. 50 | debug("Parsed invocation, contexts: {!r}".format(contexts)) 51 | if not contexts or not contexts[-1]: 52 | context = initial_context 53 | else: 54 | context = contexts[-1] 55 | debug("Selected context: {!r}".format(context)) 56 | # Unknown flags (could be e.g. only partially typed out; could be 57 | # wholly invalid; doesn't matter) complete with flags. 58 | debug("Looking for {!r} in {!r}".format(tail, context.flags)) 59 | if tail not in context.flags: 60 | debug("Not found, completing with flag names") 61 | # Long flags - partial or just the dashes - complete w/ long flags 62 | if tail.startswith("--"): 63 | for name in filter( 64 | lambda x: x.startswith("--"), context.flag_names() 65 | ): 66 | print(name) 67 | # Just a dash, completes with all flags 68 | elif tail == "-": 69 | for name in context.flag_names(): 70 | print(name) 71 | # Otherwise, it's something entirely invalid (a shortflag not 72 | # recognized, or a java style flag like -foo) so return nothing 73 | # (the shell will still try completing with files, but that doesn't 74 | # hurt really.) 75 | else: 76 | pass 77 | # Known flags complete w/ nothing or tasks, depending 78 | else: 79 | # Flags expecting values: do nothing, to let default (usually 80 | # file) shell completion occur (which we actively want in this 81 | # case.) 82 | if context.flags[tail].takes_value: 83 | debug("Found, and it takes a value, so no completion") 84 | pass 85 | # Not taking values (eg bools): print task names 86 | else: 87 | debug("Found, takes no value, printing task names") 88 | print_task_names(collection) 89 | # If not a flag, is either task name or a flag value, so just complete 90 | # task names. 91 | else: 92 | debug("Last token isn't flag-like, just printing task names") 93 | print_task_names(collection) 94 | raise Exit 95 | 96 | 97 | def print_task_names(collection: "Collection") -> None: 98 | for name in sorted(collection.task_names, key=task_name_sort_key): 99 | print(name) 100 | # Just stick aliases after the thing they're aliased to. Sorting isn't 101 | # so important that it's worth bending over backwards here. 102 | for alias in collection.task_names[name]: 103 | print(alias) 104 | 105 | 106 | def print_completion_script(shell: str, names: List[str]) -> None: 107 | # Grab all .completion files in invoke/completion/. (These used to have no 108 | # suffix, but surprise, that's super fragile. 109 | completions = { 110 | os.path.splitext(os.path.basename(x))[0]: x 111 | for x in glob.glob( 112 | os.path.join( 113 | os.path.dirname(os.path.realpath(__file__)), "*.completion" 114 | ) 115 | ) 116 | } 117 | try: 118 | path = completions[shell] 119 | except KeyError: 120 | err = 'Completion for shell "{}" not supported (options are: {}).' 121 | raise ParseError(err.format(shell, ", ".join(sorted(completions)))) 122 | debug("Printing completion script from {}".format(path)) 123 | # Choose one arbitrary program name for script's own internal invocation 124 | # (also used to construct completion function names when necessary) 125 | binary = names[0] 126 | with open(path, "r") as script: 127 | print( 128 | script.read().format(binary=binary, spaced_names=" ".join(names)) 129 | ) 130 | -------------------------------------------------------------------------------- /invoke/completion/fish.completion: -------------------------------------------------------------------------------- 1 | # Invoke tab-completion script for the fish shell 2 | # Copy it to the ~/.config/fish/completions directory 3 | 4 | function __complete_{binary} 5 | {binary} --complete -- (commandline --tokenize) 6 | end 7 | 8 | # --no-files: Don't complete files unless invoke gives an empty result 9 | # TODO: find a way to honor all binary_names 10 | complete --command {binary} --no-files --arguments '(__complete_{binary})' 11 | -------------------------------------------------------------------------------- /invoke/completion/zsh.completion: -------------------------------------------------------------------------------- 1 | # Invoke tab-completion script to be sourced with the Z shell. 2 | # Known to work on zsh 5.0.x, probably works on later 4.x releases as well (as 3 | # it uses the older compctl completion system). 4 | 5 | _complete_{binary}() {{ 6 | # `words` contains the entire command string up til now (including 7 | # program name). 8 | # 9 | # We hand it to Invoke so it can figure out the current context: spit back 10 | # core options, task names, the current task's options, or some combo. 11 | # 12 | # Before doing so, we attempt to tease out any collection flag+arg so we 13 | # can ensure it is applied correctly. 14 | collection_arg='' 15 | if [[ "${{words}}" =~ "(-c|--collection) [^ ]+" ]]; then 16 | collection_arg=$MATCH 17 | fi 18 | # `reply` is the array of valid completions handed back to `compctl`. 19 | # Use ${{=...}} to force whitespace splitting in expansion of 20 | # $collection_arg 21 | reply=( $({binary} ${{=collection_arg}} --complete -- ${{words}}) ) 22 | }} 23 | 24 | 25 | # Tell shell builtin to use the above for completing our given binary name(s). 26 | # * -K: use given function name to generate completions. 27 | # * +: specifies 'alternative' completion, where options after the '+' are only 28 | # used if the completion from the options before the '+' result in no matches. 29 | # * -f: when function generates no results, use filenames. 30 | # * positional args: program names to complete for. 31 | compctl -K _complete_{binary} + -f {spaced_names} 32 | 33 | # vim: set ft=sh : 34 | -------------------------------------------------------------------------------- /invoke/env.py: -------------------------------------------------------------------------------- 1 | """ 2 | Environment variable configuration loading class. 3 | 4 | Using a class here doesn't really model anything but makes state passing (in a 5 | situation requiring it) more convenient. 6 | 7 | This module is currently considered private/an implementation detail and should 8 | not be included in the Sphinx API documentation. 9 | """ 10 | 11 | import os 12 | from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Sequence 13 | 14 | from .exceptions import UncastableEnvVar, AmbiguousEnvVar 15 | from .util import debug 16 | 17 | if TYPE_CHECKING: 18 | from .config import Config 19 | 20 | 21 | class Environment: 22 | def __init__(self, config: "Config", prefix: str) -> None: 23 | self._config = config 24 | self._prefix = prefix 25 | self.data: Dict[str, Any] = {} # Accumulator 26 | 27 | def load(self) -> Dict[str, Any]: 28 | """ 29 | Return a nested dict containing values from `os.environ`. 30 | 31 | Specifically, values whose keys map to already-known configuration 32 | settings, allowing us to perform basic typecasting. 33 | 34 | See :ref:`env-vars` for details. 35 | """ 36 | # Obtain allowed env var -> existing value map 37 | env_vars = self._crawl(key_path=[], env_vars={}) 38 | m = "Scanning for env vars according to prefix: {!r}, mapping: {!r}" 39 | debug(m.format(self._prefix, env_vars)) 40 | # Check for actual env var (honoring prefix) and try to set 41 | for env_var, key_path in env_vars.items(): 42 | real_var = (self._prefix or "") + env_var 43 | if real_var in os.environ: 44 | self._path_set(key_path, os.environ[real_var]) 45 | debug("Obtained env var config: {!r}".format(self.data)) 46 | return self.data 47 | 48 | def _crawl( 49 | self, key_path: List[str], env_vars: Mapping[str, Sequence[str]] 50 | ) -> Dict[str, Any]: 51 | """ 52 | Examine config at location ``key_path`` & return potential env vars. 53 | 54 | Uses ``env_vars`` dict to determine if a conflict exists, and raises an 55 | exception if so. This dict is of the following form:: 56 | 57 | { 58 | 'EXPECTED_ENV_VAR_HERE': ['actual', 'nested', 'key_path'], 59 | ... 60 | } 61 | 62 | Returns another dictionary of new keypairs as per above. 63 | """ 64 | new_vars: Dict[str, List[str]] = {} 65 | obj = self._path_get(key_path) 66 | # Sub-dict -> recurse 67 | if ( 68 | hasattr(obj, "keys") 69 | and callable(obj.keys) 70 | and hasattr(obj, "__getitem__") 71 | ): 72 | for key in obj.keys(): 73 | merged_vars = dict(env_vars, **new_vars) 74 | merged_path = key_path + [key] 75 | crawled = self._crawl(merged_path, merged_vars) 76 | # Handle conflicts 77 | for key in crawled: 78 | if key in new_vars: 79 | err = "Found >1 source for {}" 80 | raise AmbiguousEnvVar(err.format(key)) 81 | # Merge and continue 82 | new_vars.update(crawled) 83 | # Other -> is leaf, no recursion 84 | else: 85 | new_vars[self._to_env_var(key_path)] = key_path 86 | return new_vars 87 | 88 | def _to_env_var(self, key_path: Iterable[str]) -> str: 89 | return "_".join(key_path).upper() 90 | 91 | def _path_get(self, key_path: Iterable[str]) -> "Config": 92 | # Gets are from self._config because that's what determines valid env 93 | # vars and/or values for typecasting. 94 | obj = self._config 95 | for key in key_path: 96 | obj = obj[key] 97 | return obj 98 | 99 | def _path_set(self, key_path: Sequence[str], value: str) -> None: 100 | # Sets are to self.data since that's what we are presenting to the 101 | # outer config object and debugging. 102 | obj = self.data 103 | for key in key_path[:-1]: 104 | if key not in obj: 105 | obj[key] = {} 106 | obj = obj[key] 107 | old = self._path_get(key_path) 108 | new = self._cast(old, value) 109 | obj[key_path[-1]] = new 110 | 111 | def _cast(self, old: Any, new: Any) -> Any: 112 | if isinstance(old, bool): 113 | return new not in ("0", "") 114 | elif isinstance(old, str): 115 | return new 116 | elif old is None: 117 | return new 118 | elif isinstance(old, (list, tuple)): 119 | err = "Can't adapt an environment string into a {}!" 120 | err = err.format(type(old)) 121 | raise UncastableEnvVar(err) 122 | else: 123 | return old.__class__(new) 124 | -------------------------------------------------------------------------------- /invoke/loader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from importlib.machinery import ModuleSpec 4 | from importlib.util import module_from_spec, spec_from_file_location 5 | from pathlib import Path 6 | from types import ModuleType 7 | from typing import Any, Optional, Tuple 8 | 9 | from . import Config 10 | from .exceptions import CollectionNotFound 11 | from .util import debug 12 | 13 | 14 | class Loader: 15 | """ 16 | Abstract class defining how to find/import a session's base `.Collection`. 17 | 18 | .. versionadded:: 1.0 19 | """ 20 | 21 | def __init__(self, config: Optional["Config"] = None) -> None: 22 | """ 23 | Set up a new loader with some `.Config`. 24 | 25 | :param config: 26 | An explicit `.Config` to use; it is referenced for loading-related 27 | config options. Defaults to an anonymous ``Config()`` if none is 28 | given. 29 | """ 30 | if config is None: 31 | config = Config() 32 | self.config = config 33 | 34 | def find(self, name: str) -> Optional[ModuleSpec]: 35 | """ 36 | Implementation-specific finder method seeking collection ``name``. 37 | 38 | Must return a ModuleSpec valid for use by `importlib`, which is 39 | typically a name string followed by the contents of the 3-tuple 40 | returned by `importlib.module_from_spec` (``name``, ``loader``, 41 | ``origin``.) 42 | 43 | For a sample implementation, see `.FilesystemLoader`. 44 | 45 | .. versionadded:: 1.0 46 | """ 47 | raise NotImplementedError 48 | 49 | def load(self, name: Optional[str] = None) -> Tuple[ModuleType, str]: 50 | """ 51 | Load and return collection module identified by ``name``. 52 | 53 | This method requires a working implementation of `.find` in order to 54 | function. 55 | 56 | In addition to importing the named module, it will add the module's 57 | parent directory to the front of `sys.path` to provide normal Python 58 | import behavior (i.e. so the loaded module may load local-to-it modules 59 | or packages.) 60 | 61 | :returns: 62 | Two-tuple of ``(module, directory)`` where ``module`` is the 63 | collection-containing Python module object, and ``directory`` is 64 | the string path to the directory the module was found in. 65 | 66 | .. versionadded:: 1.0 67 | """ 68 | if name is None: 69 | name = self.config.tasks.collection_name 70 | spec = self.find(name) 71 | if spec and spec.loader and spec.origin: 72 | # Typically either tasks.py or tasks/__init__.py 73 | source_file = Path(spec.origin) 74 | # Will be 'the dir tasks.py is in', or 'tasks/', in both cases this 75 | # is what wants to be in sys.path for "from . import sibling" 76 | enclosing_dir = source_file.parent 77 | # Will be "the directory above the spot that 'import tasks' found", 78 | # namely the parent of "your task tree", i.e. "where project level 79 | # config files are looked for". So, same as enclosing_dir for 80 | # tasks.py, but one more level up for tasks/__init__.py... 81 | module_parent = enclosing_dir 82 | if spec.parent: # it's a package, so we have to go up again 83 | module_parent = module_parent.parent 84 | # Get the enclosing dir on the path 85 | enclosing_str = str(enclosing_dir) 86 | if enclosing_str not in sys.path: 87 | sys.path.insert(0, enclosing_str) 88 | # Actual import 89 | module = module_from_spec(spec) 90 | sys.modules[spec.name] = module # so 'from . import xxx' works 91 | spec.loader.exec_module(module) 92 | # Return the module and the folder it was found in 93 | return module, str(module_parent) 94 | msg = "ImportError loading {!r}, raising ImportError" 95 | debug(msg.format(name)) 96 | raise ImportError 97 | 98 | 99 | class FilesystemLoader(Loader): 100 | """ 101 | Loads Python files from the filesystem (e.g. ``tasks.py``.) 102 | 103 | Searches recursively towards filesystem root from a given start point. 104 | 105 | .. versionadded:: 1.0 106 | """ 107 | 108 | # TODO: could introduce config obj here for transmission to Collection 109 | # TODO: otherwise Loader has to know about specific bits to transmit, such 110 | # as auto-dashes, and has to grow one of those for every bit Collection 111 | # ever needs to know 112 | def __init__(self, start: Optional[str] = None, **kwargs: Any) -> None: 113 | super().__init__(**kwargs) 114 | if start is None: 115 | start = self.config.tasks.search_root 116 | self._start = start 117 | 118 | @property 119 | def start(self) -> str: 120 | # Lazily determine default CWD if configured value is falsey 121 | return self._start or os.getcwd() 122 | 123 | def find(self, name: str) -> Optional[ModuleSpec]: 124 | debug("FilesystemLoader find starting at {!r}".format(self.start)) 125 | spec = None 126 | module = "{}.py".format(name) 127 | paths = self.start.split(os.sep) 128 | try: 129 | # walk the path upwards to check for dynamic import 130 | for x in reversed(range(len(paths) + 1)): 131 | path = os.sep.join(paths[0:x]) 132 | if module in os.listdir(path): 133 | spec = spec_from_file_location( 134 | name, os.path.join(path, module) 135 | ) 136 | break 137 | elif name in os.listdir(path) and os.path.exists( 138 | os.path.join(path, name, "__init__.py") 139 | ): 140 | basepath = os.path.join(path, name) 141 | spec = spec_from_file_location( 142 | name, 143 | os.path.join(basepath, "__init__.py"), 144 | submodule_search_locations=[basepath], 145 | ) 146 | break 147 | if spec: 148 | debug("Found module: {!r}".format(spec)) 149 | return spec 150 | except (FileNotFoundError, ModuleNotFoundError): 151 | msg = "ImportError loading {!r}, raising CollectionNotFound" 152 | debug(msg.format(name)) 153 | raise CollectionNotFound(name=name, start=self.start) 154 | return None 155 | -------------------------------------------------------------------------------- /invoke/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Invoke's own 'binary' entrypoint. 3 | 4 | Dogfoods the `program` module. 5 | """ 6 | 7 | from . import __version__, Program 8 | 9 | program = Program( 10 | name="Invoke", 11 | binary="inv[oke]", 12 | binary_names=["invoke", "inv"], 13 | version=__version__, 14 | ) 15 | -------------------------------------------------------------------------------- /invoke/parser/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | from .parser import * 3 | from .context import ParserContext 4 | from .context import ParserContext as Context, to_flag, translate_underscores 5 | from .argument import Argument 6 | -------------------------------------------------------------------------------- /invoke/parser/argument.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Iterable, Optional, Tuple 2 | 3 | # TODO: dynamic type for kind 4 | # T = TypeVar('T') 5 | 6 | 7 | class Argument: 8 | """ 9 | A command-line argument/flag. 10 | 11 | :param name: 12 | Syntactic sugar for ``names=[]``. Giving both ``name`` and 13 | ``names`` is invalid. 14 | :param names: 15 | List of valid identifiers for this argument. For example, a "help" 16 | argument may be defined with a name list of ``['-h', '--help']``. 17 | :param kind: 18 | Type factory & parser hint. E.g. ``int`` will turn the default text 19 | value parsed, into a Python integer; and ``bool`` will tell the 20 | parser not to expect an actual value but to treat the argument as a 21 | toggle/flag. 22 | :param default: 23 | Default value made available to the parser if no value is given on the 24 | command line. 25 | :param help: 26 | Help text, intended for use with ``--help``. 27 | :param positional: 28 | Whether or not this argument's value may be given positionally. When 29 | ``False`` (default) arguments must be explicitly named. 30 | :param optional: 31 | Whether or not this (non-``bool``) argument requires a value. 32 | :param incrementable: 33 | Whether or not this (``int``) argument is to be incremented instead of 34 | overwritten/assigned to. 35 | :param attr_name: 36 | A Python identifier/attribute friendly name, typically filled in with 37 | the underscored version when ``name``/``names`` contain dashes. 38 | 39 | .. versionadded:: 1.0 40 | """ 41 | 42 | def __init__( 43 | self, 44 | name: Optional[str] = None, 45 | names: Iterable[str] = (), 46 | kind: Any = str, 47 | default: Optional[Any] = None, 48 | help: Optional[str] = None, 49 | positional: bool = False, 50 | optional: bool = False, 51 | incrementable: bool = False, 52 | attr_name: Optional[str] = None, 53 | ) -> None: 54 | if name and names: 55 | raise TypeError( 56 | "Cannot give both 'name' and 'names' arguments! Pick one." 57 | ) 58 | if not (name or names): 59 | raise TypeError("An Argument must have at least one name.") 60 | if names: 61 | self.names = tuple(names) 62 | elif name and not names: 63 | self.names = (name,) 64 | self.kind = kind 65 | initial_value: Optional[Any] = None 66 | # Special case: list-type args start out as empty list, not None. 67 | if kind is list: 68 | initial_value = [] 69 | # Another: incrementable args start out as their default value. 70 | if incrementable: 71 | initial_value = default 72 | self.raw_value = self._value = initial_value 73 | self.default = default 74 | self.help = help 75 | self.positional = positional 76 | self.optional = optional 77 | self.incrementable = incrementable 78 | self.attr_name = attr_name 79 | 80 | def __repr__(self) -> str: 81 | nicks = "" 82 | if self.nicknames: 83 | nicks = " ({})".format(", ".join(self.nicknames)) 84 | flags = "" 85 | if self.positional or self.optional: 86 | flags = " " 87 | if self.positional: 88 | flags += "*" 89 | if self.optional: 90 | flags += "?" 91 | # TODO: store this default value somewhere other than signature of 92 | # Argument.__init__? 93 | kind = "" 94 | if self.kind != str: 95 | kind = " [{}]".format(self.kind.__name__) 96 | return "<{}: {}{}{}{}>".format( 97 | self.__class__.__name__, self.name, nicks, kind, flags 98 | ) 99 | 100 | @property 101 | def name(self) -> Optional[str]: 102 | """ 103 | The canonical attribute-friendly name for this argument. 104 | 105 | Will be ``attr_name`` (if given to constructor) or the first name in 106 | ``names`` otherwise. 107 | 108 | .. versionadded:: 1.0 109 | """ 110 | return self.attr_name or self.names[0] 111 | 112 | @property 113 | def nicknames(self) -> Tuple[str, ...]: 114 | return self.names[1:] 115 | 116 | @property 117 | def takes_value(self) -> bool: 118 | if self.kind is bool: 119 | return False 120 | if self.incrementable: 121 | return False 122 | return True 123 | 124 | @property 125 | def value(self) -> Any: 126 | # TODO: should probably be optional instead 127 | return self._value if self._value is not None else self.default 128 | 129 | @value.setter 130 | def value(self, arg: str) -> None: 131 | self.set_value(arg, cast=True) 132 | 133 | def set_value(self, value: Any, cast: bool = True) -> None: 134 | """ 135 | Actual explicit value-setting API call. 136 | 137 | Sets ``self.raw_value`` to ``value`` directly. 138 | 139 | Sets ``self.value`` to ``self.kind(value)``, unless: 140 | 141 | - ``cast=False``, in which case the raw value is also used. 142 | - ``self.kind==list``, in which case the value is appended to 143 | ``self.value`` instead of cast & overwritten. 144 | - ``self.incrementable==True``, in which case the value is ignored and 145 | the current (assumed int) value is simply incremented. 146 | 147 | .. versionadded:: 1.0 148 | """ 149 | self.raw_value = value 150 | # Default to do-nothing/identity function 151 | func = lambda x: x 152 | # If cast, set to self.kind, which should be str/int/etc 153 | if cast: 154 | func = self.kind 155 | # If self.kind is a list, append instead of using cast func. 156 | if self.kind is list: 157 | func = lambda x: self.value + [x] 158 | # If incrementable, just increment. 159 | if self.incrementable: 160 | # TODO: explode nicely if self.value was not an int to start 161 | # with 162 | func = lambda x: self.value + 1 163 | self._value = func(value) 164 | 165 | @property 166 | def got_value(self) -> bool: 167 | """ 168 | Returns whether the argument was ever given a (non-default) value. 169 | 170 | For most argument kinds, this simply checks whether the internally 171 | stored value is non-``None``; for others, such as ``list`` kinds, 172 | different checks may be used. 173 | 174 | .. versionadded:: 1.3 175 | """ 176 | if self.kind is list: 177 | return bool(self._value) 178 | return self._value is not None 179 | -------------------------------------------------------------------------------- /invoke/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyinvoke/invoke/65dd896d93994d423feb46313f651ae8a021c6d7/invoke/py.typed -------------------------------------------------------------------------------- /invoke/vendor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyinvoke/invoke/65dd896d93994d423feb46313f651ae8a021c6d7/invoke/vendor/__init__.py -------------------------------------------------------------------------------- /invoke/vendor/fluidity/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2011 Rodrigo S. Manhães 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /invoke/vendor/fluidity/__init__.py: -------------------------------------------------------------------------------- 1 | from .machine import (StateMachine, state, transition, 2 | InvalidConfiguration, InvalidTransition, 3 | GuardNotSatisfied, ForkedTransition) 4 | 5 | -------------------------------------------------------------------------------- /invoke/vendor/fluidity/backwardscompat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if sys.version_info >= (3,): 4 | def callable(obj): 5 | return hasattr(obj, '__call__') 6 | else: 7 | callable = callable 8 | 9 | -------------------------------------------------------------------------------- /invoke/vendor/lexicon/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2020 Jeff Forcier. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, 8 | this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 14 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 17 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 19 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 20 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 21 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 22 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 | -------------------------------------------------------------------------------- /invoke/vendor/lexicon/__init__.py: -------------------------------------------------------------------------------- 1 | from ._version import __version_info__, __version__ # noqa 2 | from .attribute_dict import AttributeDict 3 | from .alias_dict import AliasDict 4 | 5 | 6 | class Lexicon(AttributeDict, AliasDict): 7 | def __init__(self, *args, **kwargs): 8 | # Need to avoid combining AliasDict's initial attribute write on 9 | # self.aliases, with AttributeDict's __setattr__. Doing so results in 10 | # an infinite loop. Instead, just skip straight to dict() for both 11 | # explicitly (i.e. we override AliasDict.__init__ instead of extending 12 | # it.) 13 | # NOTE: could tickle AttributeDict.__init__ instead, in case it ever 14 | # grows one. 15 | dict.__init__(self, *args, **kwargs) 16 | dict.__setattr__(self, "aliases", {}) 17 | 18 | def __getattr__(self, key): 19 | # Intercept deepcopy/etc driven access to self.aliases when not 20 | # actually set. (Only a problem for us, due to abovementioned combo of 21 | # Alias and Attribute Dicts, so not solvable in a parent alone.) 22 | if key == "aliases" and key not in self.__dict__: 23 | self.__dict__[key] = {} 24 | return super(Lexicon, self).__getattr__(key) 25 | -------------------------------------------------------------------------------- /invoke/vendor/lexicon/_version.py: -------------------------------------------------------------------------------- 1 | __version_info__ = (2, 0, 1) 2 | __version__ = ".".join(map(str, __version_info__)) 3 | -------------------------------------------------------------------------------- /invoke/vendor/lexicon/alias_dict.py: -------------------------------------------------------------------------------- 1 | class AliasDict(dict): 2 | def __init__(self, *args, **kwargs): 3 | super(AliasDict, self).__init__(*args, **kwargs) 4 | self.aliases = {} 5 | 6 | def alias(self, from_, to): 7 | self.aliases[from_] = to 8 | 9 | def unalias(self, from_): 10 | del self.aliases[from_] 11 | 12 | def aliases_of(self, name): 13 | """ 14 | Returns other names for given real key or alias ``name``. 15 | 16 | If given a real key, returns its aliases. 17 | 18 | If given an alias, returns the real key it points to, plus any other 19 | aliases of that real key. (The given alias itself is not included in 20 | the return value.) 21 | """ 22 | names = [] 23 | key = name 24 | # self.aliases keys are aliases, not realkeys. Easy test to see if we 25 | # should flip around to the POV of a realkey when given an alias. 26 | if name in self.aliases: 27 | key = self.aliases[name] 28 | # Ensure the real key shows up in output. 29 | names.append(key) 30 | # 'key' is now a realkey, whose aliases are all keys whose value is 31 | # itself. Filter out the original name given. 32 | names.extend( 33 | [k for k, v in self.aliases.items() if v == key and k != name] 34 | ) 35 | return names 36 | 37 | def _handle(self, key, value, single, multi, unaliased): 38 | # Attribute existence test required to not blow up when deepcopy'd 39 | if key in getattr(self, "aliases", {}): 40 | target = self.aliases[key] 41 | # Single-string targets 42 | if isinstance(target, str): 43 | return single(self, target, value) 44 | # Multi-string targets 45 | else: 46 | if multi: 47 | return multi(self, target, value) 48 | else: 49 | for subkey in target: 50 | single(self, subkey, value) 51 | else: 52 | return unaliased(self, key, value) 53 | 54 | def __setitem__(self, key, value): 55 | def single(d, target, value): 56 | d[target] = value 57 | 58 | def unaliased(d, key, value): 59 | super(AliasDict, d).__setitem__(key, value) 60 | 61 | return self._handle(key, value, single, None, unaliased) 62 | 63 | def __getitem__(self, key): 64 | def single(d, target, value): 65 | return d[target] 66 | 67 | def unaliased(d, key, value): 68 | return super(AliasDict, d).__getitem__(key) 69 | 70 | def multi(d, target, value): 71 | msg = "Multi-target aliases have no well-defined value and can't be read." # noqa 72 | raise ValueError(msg) 73 | 74 | return self._handle(key, None, single, multi, unaliased) 75 | 76 | def __contains__(self, key): 77 | def single(d, target, value): 78 | return target in d 79 | 80 | def multi(d, target, value): 81 | return all(subkey in self for subkey in self.aliases[key]) 82 | 83 | def unaliased(d, key, value): 84 | return super(AliasDict, d).__contains__(key) 85 | 86 | return self._handle(key, None, single, multi, unaliased) 87 | 88 | def __delitem__(self, key): 89 | def single(d, target, value): 90 | del d[target] 91 | 92 | def unaliased(d, key, value): 93 | return super(AliasDict, d).__delitem__(key) 94 | 95 | return self._handle(key, None, single, None, unaliased) 96 | -------------------------------------------------------------------------------- /invoke/vendor/lexicon/attribute_dict.py: -------------------------------------------------------------------------------- 1 | class AttributeDict(dict): 2 | def __getattr__(self, key): 3 | try: 4 | return self[key] 5 | except KeyError: 6 | # to conform with __getattr__ spec 7 | raise AttributeError(key) 8 | 9 | def __setattr__(self, key, value): 10 | self[key] = value 11 | 12 | def __delattr__(self, key): 13 | del self[key] 14 | 15 | def __dir__(self): 16 | return dir(type(self)) + list(self.keys()) 17 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/composer.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ['Composer', 'ComposerError'] 3 | 4 | from .error import MarkedYAMLError 5 | from .events import * 6 | from .nodes import * 7 | 8 | class ComposerError(MarkedYAMLError): 9 | pass 10 | 11 | class Composer: 12 | 13 | def __init__(self): 14 | self.anchors = {} 15 | 16 | def check_node(self): 17 | # Drop the STREAM-START event. 18 | if self.check_event(StreamStartEvent): 19 | self.get_event() 20 | 21 | # If there are more documents available? 22 | return not self.check_event(StreamEndEvent) 23 | 24 | def get_node(self): 25 | # Get the root node of the next document. 26 | if not self.check_event(StreamEndEvent): 27 | return self.compose_document() 28 | 29 | def get_single_node(self): 30 | # Drop the STREAM-START event. 31 | self.get_event() 32 | 33 | # Compose a document if the stream is not empty. 34 | document = None 35 | if not self.check_event(StreamEndEvent): 36 | document = self.compose_document() 37 | 38 | # Ensure that the stream contains no more documents. 39 | if not self.check_event(StreamEndEvent): 40 | event = self.get_event() 41 | raise ComposerError("expected a single document in the stream", 42 | document.start_mark, "but found another document", 43 | event.start_mark) 44 | 45 | # Drop the STREAM-END event. 46 | self.get_event() 47 | 48 | return document 49 | 50 | def compose_document(self): 51 | # Drop the DOCUMENT-START event. 52 | self.get_event() 53 | 54 | # Compose the root node. 55 | node = self.compose_node(None, None) 56 | 57 | # Drop the DOCUMENT-END event. 58 | self.get_event() 59 | 60 | self.anchors = {} 61 | return node 62 | 63 | def compose_node(self, parent, index): 64 | if self.check_event(AliasEvent): 65 | event = self.get_event() 66 | anchor = event.anchor 67 | if anchor not in self.anchors: 68 | raise ComposerError(None, None, "found undefined alias %r" 69 | % anchor, event.start_mark) 70 | return self.anchors[anchor] 71 | event = self.peek_event() 72 | anchor = event.anchor 73 | if anchor is not None: 74 | if anchor in self.anchors: 75 | raise ComposerError("found duplicate anchor %r; first occurrence" 76 | % anchor, self.anchors[anchor].start_mark, 77 | "second occurrence", event.start_mark) 78 | self.descend_resolver(parent, index) 79 | if self.check_event(ScalarEvent): 80 | node = self.compose_scalar_node(anchor) 81 | elif self.check_event(SequenceStartEvent): 82 | node = self.compose_sequence_node(anchor) 83 | elif self.check_event(MappingStartEvent): 84 | node = self.compose_mapping_node(anchor) 85 | self.ascend_resolver() 86 | return node 87 | 88 | def compose_scalar_node(self, anchor): 89 | event = self.get_event() 90 | tag = event.tag 91 | if tag is None or tag == '!': 92 | tag = self.resolve(ScalarNode, event.value, event.implicit) 93 | node = ScalarNode(tag, event.value, 94 | event.start_mark, event.end_mark, style=event.style) 95 | if anchor is not None: 96 | self.anchors[anchor] = node 97 | return node 98 | 99 | def compose_sequence_node(self, anchor): 100 | start_event = self.get_event() 101 | tag = start_event.tag 102 | if tag is None or tag == '!': 103 | tag = self.resolve(SequenceNode, None, start_event.implicit) 104 | node = SequenceNode(tag, [], 105 | start_event.start_mark, None, 106 | flow_style=start_event.flow_style) 107 | if anchor is not None: 108 | self.anchors[anchor] = node 109 | index = 0 110 | while not self.check_event(SequenceEndEvent): 111 | node.value.append(self.compose_node(node, index)) 112 | index += 1 113 | end_event = self.get_event() 114 | node.end_mark = end_event.end_mark 115 | return node 116 | 117 | def compose_mapping_node(self, anchor): 118 | start_event = self.get_event() 119 | tag = start_event.tag 120 | if tag is None or tag == '!': 121 | tag = self.resolve(MappingNode, None, start_event.implicit) 122 | node = MappingNode(tag, [], 123 | start_event.start_mark, None, 124 | flow_style=start_event.flow_style) 125 | if anchor is not None: 126 | self.anchors[anchor] = node 127 | while not self.check_event(MappingEndEvent): 128 | #key_event = self.peek_event() 129 | item_key = self.compose_node(node, None) 130 | #if item_key in node.value: 131 | # raise ComposerError("while composing a mapping", start_event.start_mark, 132 | # "found duplicate key", key_event.start_mark) 133 | item_value = self.compose_node(node, item_key) 134 | #node.value[item_key] = item_value 135 | node.value.append((item_key, item_value)) 136 | end_event = self.get_event() 137 | node.end_mark = end_event.end_mark 138 | return node 139 | 140 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/cyaml.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = [ 3 | 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader', 4 | 'CBaseDumper', 'CSafeDumper', 'CDumper' 5 | ] 6 | 7 | from yaml._yaml import CParser, CEmitter 8 | 9 | from .constructor import * 10 | 11 | from .serializer import * 12 | from .representer import * 13 | 14 | from .resolver import * 15 | 16 | class CBaseLoader(CParser, BaseConstructor, BaseResolver): 17 | 18 | def __init__(self, stream): 19 | CParser.__init__(self, stream) 20 | BaseConstructor.__init__(self) 21 | BaseResolver.__init__(self) 22 | 23 | class CSafeLoader(CParser, SafeConstructor, Resolver): 24 | 25 | def __init__(self, stream): 26 | CParser.__init__(self, stream) 27 | SafeConstructor.__init__(self) 28 | Resolver.__init__(self) 29 | 30 | class CFullLoader(CParser, FullConstructor, Resolver): 31 | 32 | def __init__(self, stream): 33 | CParser.__init__(self, stream) 34 | FullConstructor.__init__(self) 35 | Resolver.__init__(self) 36 | 37 | class CUnsafeLoader(CParser, UnsafeConstructor, Resolver): 38 | 39 | def __init__(self, stream): 40 | CParser.__init__(self, stream) 41 | UnsafeConstructor.__init__(self) 42 | Resolver.__init__(self) 43 | 44 | class CLoader(CParser, Constructor, Resolver): 45 | 46 | def __init__(self, stream): 47 | CParser.__init__(self, stream) 48 | Constructor.__init__(self) 49 | Resolver.__init__(self) 50 | 51 | class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): 52 | 53 | def __init__(self, stream, 54 | default_style=None, default_flow_style=False, 55 | canonical=None, indent=None, width=None, 56 | allow_unicode=None, line_break=None, 57 | encoding=None, explicit_start=None, explicit_end=None, 58 | version=None, tags=None, sort_keys=True): 59 | CEmitter.__init__(self, stream, canonical=canonical, 60 | indent=indent, width=width, encoding=encoding, 61 | allow_unicode=allow_unicode, line_break=line_break, 62 | explicit_start=explicit_start, explicit_end=explicit_end, 63 | version=version, tags=tags) 64 | Representer.__init__(self, default_style=default_style, 65 | default_flow_style=default_flow_style, sort_keys=sort_keys) 66 | Resolver.__init__(self) 67 | 68 | class CSafeDumper(CEmitter, SafeRepresenter, Resolver): 69 | 70 | def __init__(self, stream, 71 | default_style=None, default_flow_style=False, 72 | canonical=None, indent=None, width=None, 73 | allow_unicode=None, line_break=None, 74 | encoding=None, explicit_start=None, explicit_end=None, 75 | version=None, tags=None, sort_keys=True): 76 | CEmitter.__init__(self, stream, canonical=canonical, 77 | indent=indent, width=width, encoding=encoding, 78 | allow_unicode=allow_unicode, line_break=line_break, 79 | explicit_start=explicit_start, explicit_end=explicit_end, 80 | version=version, tags=tags) 81 | SafeRepresenter.__init__(self, default_style=default_style, 82 | default_flow_style=default_flow_style, sort_keys=sort_keys) 83 | Resolver.__init__(self) 84 | 85 | class CDumper(CEmitter, Serializer, Representer, Resolver): 86 | 87 | def __init__(self, stream, 88 | default_style=None, default_flow_style=False, 89 | canonical=None, indent=None, width=None, 90 | allow_unicode=None, line_break=None, 91 | encoding=None, explicit_start=None, explicit_end=None, 92 | version=None, tags=None, sort_keys=True): 93 | CEmitter.__init__(self, stream, canonical=canonical, 94 | indent=indent, width=width, encoding=encoding, 95 | allow_unicode=allow_unicode, line_break=line_break, 96 | explicit_start=explicit_start, explicit_end=explicit_end, 97 | version=version, tags=tags) 98 | Representer.__init__(self, default_style=default_style, 99 | default_flow_style=default_flow_style, sort_keys=sort_keys) 100 | Resolver.__init__(self) 101 | 102 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/dumper.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] 3 | 4 | from .emitter import * 5 | from .serializer import * 6 | from .representer import * 7 | from .resolver import * 8 | 9 | class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): 10 | 11 | def __init__(self, stream, 12 | default_style=None, default_flow_style=False, 13 | canonical=None, indent=None, width=None, 14 | allow_unicode=None, line_break=None, 15 | encoding=None, explicit_start=None, explicit_end=None, 16 | version=None, tags=None, sort_keys=True): 17 | Emitter.__init__(self, stream, canonical=canonical, 18 | indent=indent, width=width, 19 | allow_unicode=allow_unicode, line_break=line_break) 20 | Serializer.__init__(self, encoding=encoding, 21 | explicit_start=explicit_start, explicit_end=explicit_end, 22 | version=version, tags=tags) 23 | Representer.__init__(self, default_style=default_style, 24 | default_flow_style=default_flow_style, sort_keys=sort_keys) 25 | Resolver.__init__(self) 26 | 27 | class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): 28 | 29 | def __init__(self, stream, 30 | default_style=None, default_flow_style=False, 31 | canonical=None, indent=None, width=None, 32 | allow_unicode=None, line_break=None, 33 | encoding=None, explicit_start=None, explicit_end=None, 34 | version=None, tags=None, sort_keys=True): 35 | Emitter.__init__(self, stream, canonical=canonical, 36 | indent=indent, width=width, 37 | allow_unicode=allow_unicode, line_break=line_break) 38 | Serializer.__init__(self, encoding=encoding, 39 | explicit_start=explicit_start, explicit_end=explicit_end, 40 | version=version, tags=tags) 41 | SafeRepresenter.__init__(self, default_style=default_style, 42 | default_flow_style=default_flow_style, sort_keys=sort_keys) 43 | Resolver.__init__(self) 44 | 45 | class Dumper(Emitter, Serializer, Representer, Resolver): 46 | 47 | def __init__(self, stream, 48 | default_style=None, default_flow_style=False, 49 | canonical=None, indent=None, width=None, 50 | allow_unicode=None, line_break=None, 51 | encoding=None, explicit_start=None, explicit_end=None, 52 | version=None, tags=None, sort_keys=True): 53 | Emitter.__init__(self, stream, canonical=canonical, 54 | indent=indent, width=width, 55 | allow_unicode=allow_unicode, line_break=line_break) 56 | Serializer.__init__(self, encoding=encoding, 57 | explicit_start=explicit_start, explicit_end=explicit_end, 58 | version=version, tags=tags) 59 | Representer.__init__(self, default_style=default_style, 60 | default_flow_style=default_flow_style, sort_keys=sort_keys) 61 | Resolver.__init__(self) 62 | 63 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/error.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] 3 | 4 | class Mark: 5 | 6 | def __init__(self, name, index, line, column, buffer, pointer): 7 | self.name = name 8 | self.index = index 9 | self.line = line 10 | self.column = column 11 | self.buffer = buffer 12 | self.pointer = pointer 13 | 14 | def get_snippet(self, indent=4, max_length=75): 15 | if self.buffer is None: 16 | return None 17 | head = '' 18 | start = self.pointer 19 | while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': 20 | start -= 1 21 | if self.pointer-start > max_length/2-1: 22 | head = ' ... ' 23 | start += 5 24 | break 25 | tail = '' 26 | end = self.pointer 27 | while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': 28 | end += 1 29 | if end-self.pointer > max_length/2-1: 30 | tail = ' ... ' 31 | end -= 5 32 | break 33 | snippet = self.buffer[start:end] 34 | return ' '*indent + head + snippet + tail + '\n' \ 35 | + ' '*(indent+self.pointer-start+len(head)) + '^' 36 | 37 | def __str__(self): 38 | snippet = self.get_snippet() 39 | where = " in \"%s\", line %d, column %d" \ 40 | % (self.name, self.line+1, self.column+1) 41 | if snippet is not None: 42 | where += ":\n"+snippet 43 | return where 44 | 45 | class YAMLError(Exception): 46 | pass 47 | 48 | class MarkedYAMLError(YAMLError): 49 | 50 | def __init__(self, context=None, context_mark=None, 51 | problem=None, problem_mark=None, note=None): 52 | self.context = context 53 | self.context_mark = context_mark 54 | self.problem = problem 55 | self.problem_mark = problem_mark 56 | self.note = note 57 | 58 | def __str__(self): 59 | lines = [] 60 | if self.context is not None: 61 | lines.append(self.context) 62 | if self.context_mark is not None \ 63 | and (self.problem is None or self.problem_mark is None 64 | or self.context_mark.name != self.problem_mark.name 65 | or self.context_mark.line != self.problem_mark.line 66 | or self.context_mark.column != self.problem_mark.column): 67 | lines.append(str(self.context_mark)) 68 | if self.problem is not None: 69 | lines.append(self.problem) 70 | if self.problem_mark is not None: 71 | lines.append(str(self.problem_mark)) 72 | if self.note is not None: 73 | lines.append(self.note) 74 | return '\n'.join(lines) 75 | 76 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/events.py: -------------------------------------------------------------------------------- 1 | 2 | # Abstract classes. 3 | 4 | class Event(object): 5 | def __init__(self, start_mark=None, end_mark=None): 6 | self.start_mark = start_mark 7 | self.end_mark = end_mark 8 | def __repr__(self): 9 | attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] 10 | if hasattr(self, key)] 11 | arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) 12 | for key in attributes]) 13 | return '%s(%s)' % (self.__class__.__name__, arguments) 14 | 15 | class NodeEvent(Event): 16 | def __init__(self, anchor, start_mark=None, end_mark=None): 17 | self.anchor = anchor 18 | self.start_mark = start_mark 19 | self.end_mark = end_mark 20 | 21 | class CollectionStartEvent(NodeEvent): 22 | def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, 23 | flow_style=None): 24 | self.anchor = anchor 25 | self.tag = tag 26 | self.implicit = implicit 27 | self.start_mark = start_mark 28 | self.end_mark = end_mark 29 | self.flow_style = flow_style 30 | 31 | class CollectionEndEvent(Event): 32 | pass 33 | 34 | # Implementations. 35 | 36 | class StreamStartEvent(Event): 37 | def __init__(self, start_mark=None, end_mark=None, encoding=None): 38 | self.start_mark = start_mark 39 | self.end_mark = end_mark 40 | self.encoding = encoding 41 | 42 | class StreamEndEvent(Event): 43 | pass 44 | 45 | class DocumentStartEvent(Event): 46 | def __init__(self, start_mark=None, end_mark=None, 47 | explicit=None, version=None, tags=None): 48 | self.start_mark = start_mark 49 | self.end_mark = end_mark 50 | self.explicit = explicit 51 | self.version = version 52 | self.tags = tags 53 | 54 | class DocumentEndEvent(Event): 55 | def __init__(self, start_mark=None, end_mark=None, 56 | explicit=None): 57 | self.start_mark = start_mark 58 | self.end_mark = end_mark 59 | self.explicit = explicit 60 | 61 | class AliasEvent(NodeEvent): 62 | pass 63 | 64 | class ScalarEvent(NodeEvent): 65 | def __init__(self, anchor, tag, implicit, value, 66 | start_mark=None, end_mark=None, style=None): 67 | self.anchor = anchor 68 | self.tag = tag 69 | self.implicit = implicit 70 | self.value = value 71 | self.start_mark = start_mark 72 | self.end_mark = end_mark 73 | self.style = style 74 | 75 | class SequenceStartEvent(CollectionStartEvent): 76 | pass 77 | 78 | class SequenceEndEvent(CollectionEndEvent): 79 | pass 80 | 81 | class MappingStartEvent(CollectionStartEvent): 82 | pass 83 | 84 | class MappingEndEvent(CollectionEndEvent): 85 | pass 86 | 87 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/loader.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader'] 3 | 4 | from .reader import * 5 | from .scanner import * 6 | from .parser import * 7 | from .composer import * 8 | from .constructor import * 9 | from .resolver import * 10 | 11 | class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): 12 | 13 | def __init__(self, stream): 14 | Reader.__init__(self, stream) 15 | Scanner.__init__(self) 16 | Parser.__init__(self) 17 | Composer.__init__(self) 18 | BaseConstructor.__init__(self) 19 | BaseResolver.__init__(self) 20 | 21 | class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver): 22 | 23 | def __init__(self, stream): 24 | Reader.__init__(self, stream) 25 | Scanner.__init__(self) 26 | Parser.__init__(self) 27 | Composer.__init__(self) 28 | FullConstructor.__init__(self) 29 | Resolver.__init__(self) 30 | 31 | class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): 32 | 33 | def __init__(self, stream): 34 | Reader.__init__(self, stream) 35 | Scanner.__init__(self) 36 | Parser.__init__(self) 37 | Composer.__init__(self) 38 | SafeConstructor.__init__(self) 39 | Resolver.__init__(self) 40 | 41 | class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): 42 | 43 | def __init__(self, stream): 44 | Reader.__init__(self, stream) 45 | Scanner.__init__(self) 46 | Parser.__init__(self) 47 | Composer.__init__(self) 48 | Constructor.__init__(self) 49 | Resolver.__init__(self) 50 | 51 | # UnsafeLoader is the same as Loader (which is and was always unsafe on 52 | # untrusted input). Use of either Loader or UnsafeLoader should be rare, since 53 | # FullLoad should be able to load almost all YAML safely. Loader is left intact 54 | # to ensure backwards compatibility. 55 | class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver): 56 | 57 | def __init__(self, stream): 58 | Reader.__init__(self, stream) 59 | Scanner.__init__(self) 60 | Parser.__init__(self) 61 | Composer.__init__(self) 62 | Constructor.__init__(self) 63 | Resolver.__init__(self) 64 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/nodes.py: -------------------------------------------------------------------------------- 1 | 2 | class Node(object): 3 | def __init__(self, tag, value, start_mark, end_mark): 4 | self.tag = tag 5 | self.value = value 6 | self.start_mark = start_mark 7 | self.end_mark = end_mark 8 | def __repr__(self): 9 | value = self.value 10 | #if isinstance(value, list): 11 | # if len(value) == 0: 12 | # value = '' 13 | # elif len(value) == 1: 14 | # value = '<1 item>' 15 | # else: 16 | # value = '<%d items>' % len(value) 17 | #else: 18 | # if len(value) > 75: 19 | # value = repr(value[:70]+u' ... ') 20 | # else: 21 | # value = repr(value) 22 | value = repr(value) 23 | return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) 24 | 25 | class ScalarNode(Node): 26 | id = 'scalar' 27 | def __init__(self, tag, value, 28 | start_mark=None, end_mark=None, style=None): 29 | self.tag = tag 30 | self.value = value 31 | self.start_mark = start_mark 32 | self.end_mark = end_mark 33 | self.style = style 34 | 35 | class CollectionNode(Node): 36 | def __init__(self, tag, value, 37 | start_mark=None, end_mark=None, flow_style=None): 38 | self.tag = tag 39 | self.value = value 40 | self.start_mark = start_mark 41 | self.end_mark = end_mark 42 | self.flow_style = flow_style 43 | 44 | class SequenceNode(CollectionNode): 45 | id = 'sequence' 46 | 47 | class MappingNode(CollectionNode): 48 | id = 'mapping' 49 | 50 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/reader.py: -------------------------------------------------------------------------------- 1 | # This module contains abstractions for the input stream. You don't have to 2 | # looks further, there are no pretty code. 3 | # 4 | # We define two classes here. 5 | # 6 | # Mark(source, line, column) 7 | # It's just a record and its only use is producing nice error messages. 8 | # Parser does not use it for any other purposes. 9 | # 10 | # Reader(source, data) 11 | # Reader determines the encoding of `data` and converts it to unicode. 12 | # Reader provides the following methods and attributes: 13 | # reader.peek(length=1) - return the next `length` characters 14 | # reader.forward(length=1) - move the current position to `length` characters. 15 | # reader.index - the number of the current character. 16 | # reader.line, stream.column - the line and the column of the current character. 17 | 18 | __all__ = ['Reader', 'ReaderError'] 19 | 20 | from .error import YAMLError, Mark 21 | 22 | import codecs, re 23 | 24 | class ReaderError(YAMLError): 25 | 26 | def __init__(self, name, position, character, encoding, reason): 27 | self.name = name 28 | self.character = character 29 | self.position = position 30 | self.encoding = encoding 31 | self.reason = reason 32 | 33 | def __str__(self): 34 | if isinstance(self.character, bytes): 35 | return "'%s' codec can't decode byte #x%02x: %s\n" \ 36 | " in \"%s\", position %d" \ 37 | % (self.encoding, ord(self.character), self.reason, 38 | self.name, self.position) 39 | else: 40 | return "unacceptable character #x%04x: %s\n" \ 41 | " in \"%s\", position %d" \ 42 | % (self.character, self.reason, 43 | self.name, self.position) 44 | 45 | class Reader(object): 46 | # Reader: 47 | # - determines the data encoding and converts it to a unicode string, 48 | # - checks if characters are in allowed range, 49 | # - adds '\0' to the end. 50 | 51 | # Reader accepts 52 | # - a `bytes` object, 53 | # - a `str` object, 54 | # - a file-like object with its `read` method returning `str`, 55 | # - a file-like object with its `read` method returning `unicode`. 56 | 57 | # Yeah, it's ugly and slow. 58 | 59 | def __init__(self, stream): 60 | self.name = None 61 | self.stream = None 62 | self.stream_pointer = 0 63 | self.eof = True 64 | self.buffer = '' 65 | self.pointer = 0 66 | self.raw_buffer = None 67 | self.raw_decode = None 68 | self.encoding = None 69 | self.index = 0 70 | self.line = 0 71 | self.column = 0 72 | if isinstance(stream, str): 73 | self.name = "" 74 | self.check_printable(stream) 75 | self.buffer = stream+'\0' 76 | elif isinstance(stream, bytes): 77 | self.name = "" 78 | self.raw_buffer = stream 79 | self.determine_encoding() 80 | else: 81 | self.stream = stream 82 | self.name = getattr(stream, 'name', "") 83 | self.eof = False 84 | self.raw_buffer = None 85 | self.determine_encoding() 86 | 87 | def peek(self, index=0): 88 | try: 89 | return self.buffer[self.pointer+index] 90 | except IndexError: 91 | self.update(index+1) 92 | return self.buffer[self.pointer+index] 93 | 94 | def prefix(self, length=1): 95 | if self.pointer+length >= len(self.buffer): 96 | self.update(length) 97 | return self.buffer[self.pointer:self.pointer+length] 98 | 99 | def forward(self, length=1): 100 | if self.pointer+length+1 >= len(self.buffer): 101 | self.update(length+1) 102 | while length: 103 | ch = self.buffer[self.pointer] 104 | self.pointer += 1 105 | self.index += 1 106 | if ch in '\n\x85\u2028\u2029' \ 107 | or (ch == '\r' and self.buffer[self.pointer] != '\n'): 108 | self.line += 1 109 | self.column = 0 110 | elif ch != '\uFEFF': 111 | self.column += 1 112 | length -= 1 113 | 114 | def get_mark(self): 115 | if self.stream is None: 116 | return Mark(self.name, self.index, self.line, self.column, 117 | self.buffer, self.pointer) 118 | else: 119 | return Mark(self.name, self.index, self.line, self.column, 120 | None, None) 121 | 122 | def determine_encoding(self): 123 | while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): 124 | self.update_raw() 125 | if isinstance(self.raw_buffer, bytes): 126 | if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): 127 | self.raw_decode = codecs.utf_16_le_decode 128 | self.encoding = 'utf-16-le' 129 | elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): 130 | self.raw_decode = codecs.utf_16_be_decode 131 | self.encoding = 'utf-16-be' 132 | else: 133 | self.raw_decode = codecs.utf_8_decode 134 | self.encoding = 'utf-8' 135 | self.update(1) 136 | 137 | NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]') 138 | def check_printable(self, data): 139 | match = self.NON_PRINTABLE.search(data) 140 | if match: 141 | character = match.group() 142 | position = self.index+(len(self.buffer)-self.pointer)+match.start() 143 | raise ReaderError(self.name, position, ord(character), 144 | 'unicode', "special characters are not allowed") 145 | 146 | def update(self, length): 147 | if self.raw_buffer is None: 148 | return 149 | self.buffer = self.buffer[self.pointer:] 150 | self.pointer = 0 151 | while len(self.buffer) < length: 152 | if not self.eof: 153 | self.update_raw() 154 | if self.raw_decode is not None: 155 | try: 156 | data, converted = self.raw_decode(self.raw_buffer, 157 | 'strict', self.eof) 158 | except UnicodeDecodeError as exc: 159 | character = self.raw_buffer[exc.start] 160 | if self.stream is not None: 161 | position = self.stream_pointer-len(self.raw_buffer)+exc.start 162 | else: 163 | position = exc.start 164 | raise ReaderError(self.name, position, character, 165 | exc.encoding, exc.reason) 166 | else: 167 | data = self.raw_buffer 168 | converted = len(data) 169 | self.check_printable(data) 170 | self.buffer += data 171 | self.raw_buffer = self.raw_buffer[converted:] 172 | if self.eof: 173 | self.buffer += '\0' 174 | self.raw_buffer = None 175 | break 176 | 177 | def update_raw(self, size=4096): 178 | data = self.stream.read(size) 179 | if self.raw_buffer is None: 180 | self.raw_buffer = data 181 | else: 182 | self.raw_buffer += data 183 | self.stream_pointer += len(data) 184 | if not data: 185 | self.eof = True 186 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/serializer.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ['Serializer', 'SerializerError'] 3 | 4 | from .error import YAMLError 5 | from .events import * 6 | from .nodes import * 7 | 8 | class SerializerError(YAMLError): 9 | pass 10 | 11 | class Serializer: 12 | 13 | ANCHOR_TEMPLATE = 'id%03d' 14 | 15 | def __init__(self, encoding=None, 16 | explicit_start=None, explicit_end=None, version=None, tags=None): 17 | self.use_encoding = encoding 18 | self.use_explicit_start = explicit_start 19 | self.use_explicit_end = explicit_end 20 | self.use_version = version 21 | self.use_tags = tags 22 | self.serialized_nodes = {} 23 | self.anchors = {} 24 | self.last_anchor_id = 0 25 | self.closed = None 26 | 27 | def open(self): 28 | if self.closed is None: 29 | self.emit(StreamStartEvent(encoding=self.use_encoding)) 30 | self.closed = False 31 | elif self.closed: 32 | raise SerializerError("serializer is closed") 33 | else: 34 | raise SerializerError("serializer is already opened") 35 | 36 | def close(self): 37 | if self.closed is None: 38 | raise SerializerError("serializer is not opened") 39 | elif not self.closed: 40 | self.emit(StreamEndEvent()) 41 | self.closed = True 42 | 43 | #def __del__(self): 44 | # self.close() 45 | 46 | def serialize(self, node): 47 | if self.closed is None: 48 | raise SerializerError("serializer is not opened") 49 | elif self.closed: 50 | raise SerializerError("serializer is closed") 51 | self.emit(DocumentStartEvent(explicit=self.use_explicit_start, 52 | version=self.use_version, tags=self.use_tags)) 53 | self.anchor_node(node) 54 | self.serialize_node(node, None, None) 55 | self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) 56 | self.serialized_nodes = {} 57 | self.anchors = {} 58 | self.last_anchor_id = 0 59 | 60 | def anchor_node(self, node): 61 | if node in self.anchors: 62 | if self.anchors[node] is None: 63 | self.anchors[node] = self.generate_anchor(node) 64 | else: 65 | self.anchors[node] = None 66 | if isinstance(node, SequenceNode): 67 | for item in node.value: 68 | self.anchor_node(item) 69 | elif isinstance(node, MappingNode): 70 | for key, value in node.value: 71 | self.anchor_node(key) 72 | self.anchor_node(value) 73 | 74 | def generate_anchor(self, node): 75 | self.last_anchor_id += 1 76 | return self.ANCHOR_TEMPLATE % self.last_anchor_id 77 | 78 | def serialize_node(self, node, parent, index): 79 | alias = self.anchors[node] 80 | if node in self.serialized_nodes: 81 | self.emit(AliasEvent(alias)) 82 | else: 83 | self.serialized_nodes[node] = True 84 | self.descend_resolver(parent, index) 85 | if isinstance(node, ScalarNode): 86 | detected_tag = self.resolve(ScalarNode, node.value, (True, False)) 87 | default_tag = self.resolve(ScalarNode, node.value, (False, True)) 88 | implicit = (node.tag == detected_tag), (node.tag == default_tag) 89 | self.emit(ScalarEvent(alias, node.tag, implicit, node.value, 90 | style=node.style)) 91 | elif isinstance(node, SequenceNode): 92 | implicit = (node.tag 93 | == self.resolve(SequenceNode, node.value, True)) 94 | self.emit(SequenceStartEvent(alias, node.tag, implicit, 95 | flow_style=node.flow_style)) 96 | index = 0 97 | for item in node.value: 98 | self.serialize_node(item, node, index) 99 | index += 1 100 | self.emit(SequenceEndEvent()) 101 | elif isinstance(node, MappingNode): 102 | implicit = (node.tag 103 | == self.resolve(MappingNode, node.value, True)) 104 | self.emit(MappingStartEvent(alias, node.tag, implicit, 105 | flow_style=node.flow_style)) 106 | for key, value in node.value: 107 | self.serialize_node(key, node, None) 108 | self.serialize_node(value, node, key) 109 | self.emit(MappingEndEvent()) 110 | self.ascend_resolver() 111 | 112 | -------------------------------------------------------------------------------- /invoke/vendor/yaml/tokens.py: -------------------------------------------------------------------------------- 1 | 2 | class Token(object): 3 | def __init__(self, start_mark, end_mark): 4 | self.start_mark = start_mark 5 | self.end_mark = end_mark 6 | def __repr__(self): 7 | attributes = [key for key in self.__dict__ 8 | if not key.endswith('_mark')] 9 | attributes.sort() 10 | arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) 11 | for key in attributes]) 12 | return '%s(%s)' % (self.__class__.__name__, arguments) 13 | 14 | #class BOMToken(Token): 15 | # id = '' 16 | 17 | class DirectiveToken(Token): 18 | id = '' 19 | def __init__(self, name, value, start_mark, end_mark): 20 | self.name = name 21 | self.value = value 22 | self.start_mark = start_mark 23 | self.end_mark = end_mark 24 | 25 | class DocumentStartToken(Token): 26 | id = '' 27 | 28 | class DocumentEndToken(Token): 29 | id = '' 30 | 31 | class StreamStartToken(Token): 32 | id = '' 33 | def __init__(self, start_mark=None, end_mark=None, 34 | encoding=None): 35 | self.start_mark = start_mark 36 | self.end_mark = end_mark 37 | self.encoding = encoding 38 | 39 | class StreamEndToken(Token): 40 | id = '' 41 | 42 | class BlockSequenceStartToken(Token): 43 | id = '' 44 | 45 | class BlockMappingStartToken(Token): 46 | id = '' 47 | 48 | class BlockEndToken(Token): 49 | id = '' 50 | 51 | class FlowSequenceStartToken(Token): 52 | id = '[' 53 | 54 | class FlowMappingStartToken(Token): 55 | id = '{' 56 | 57 | class FlowSequenceEndToken(Token): 58 | id = ']' 59 | 60 | class FlowMappingEndToken(Token): 61 | id = '}' 62 | 63 | class KeyToken(Token): 64 | id = '?' 65 | 66 | class ValueToken(Token): 67 | id = ':' 68 | 69 | class BlockEntryToken(Token): 70 | id = '-' 71 | 72 | class FlowEntryToken(Token): 73 | id = ',' 74 | 75 | class AliasToken(Token): 76 | id = '' 77 | def __init__(self, value, start_mark, end_mark): 78 | self.value = value 79 | self.start_mark = start_mark 80 | self.end_mark = end_mark 81 | 82 | class AnchorToken(Token): 83 | id = '' 84 | def __init__(self, value, start_mark, end_mark): 85 | self.value = value 86 | self.start_mark = start_mark 87 | self.end_mark = end_mark 88 | 89 | class TagToken(Token): 90 | id = '' 91 | def __init__(self, value, start_mark, end_mark): 92 | self.value = value 93 | self.start_mark = start_mark 94 | self.end_mark = end_mark 95 | 96 | class ScalarToken(Token): 97 | id = '' 98 | def __init__(self, value, plain, start_mark, end_mark, style=None): 99 | self.value = value 100 | self.plain = plain 101 | self.start_mark = start_mark 102 | self.end_mark = end_mark 103 | self.style = style 104 | 105 | -------------------------------------------------------------------------------- /invoke/watchers.py: -------------------------------------------------------------------------------- 1 | import re 2 | import threading 3 | from typing import Generator, Iterable 4 | 5 | from .exceptions import ResponseNotAccepted 6 | 7 | 8 | class StreamWatcher(threading.local): 9 | """ 10 | A class whose subclasses may act on seen stream data from subprocesses. 11 | 12 | Subclasses must exhibit the following API; see `Responder` for a concrete 13 | example. 14 | 15 | * ``__init__`` is completely up to each subclass, though as usual, 16 | subclasses *of* subclasses should be careful to make use of `super` where 17 | appropriate. 18 | * `submit` must accept the entire current contents of the stream being 19 | watched, as a string, and may optionally return an iterable of strings 20 | (or act as a generator iterator, i.e. multiple calls to ``yield 21 | ``), which will each be written to the subprocess' standard 22 | input. 23 | 24 | .. note:: 25 | `StreamWatcher` subclasses exist in part to enable state tracking, such 26 | as detecting when a submitted password didn't work & erroring (or 27 | prompting a user, or etc). Such bookkeeping isn't easily achievable 28 | with simple callback functions. 29 | 30 | .. note:: 31 | `StreamWatcher` subclasses `threading.local` so that its instances can 32 | be used to 'watch' both subprocess stdout and stderr in separate 33 | threads. 34 | 35 | .. versionadded:: 1.0 36 | """ 37 | 38 | def submit(self, stream: str) -> Iterable[str]: 39 | """ 40 | Act on ``stream`` data, potentially returning responses. 41 | 42 | :param str stream: 43 | All data read on this stream since the beginning of the session. 44 | 45 | :returns: 46 | An iterable of ``str`` (which may be empty). 47 | 48 | .. versionadded:: 1.0 49 | """ 50 | raise NotImplementedError 51 | 52 | 53 | class Responder(StreamWatcher): 54 | """ 55 | A parameterizable object that submits responses to specific patterns. 56 | 57 | Commonly used to implement password auto-responds for things like ``sudo``. 58 | 59 | .. versionadded:: 1.0 60 | """ 61 | 62 | def __init__(self, pattern: str, response: str) -> None: 63 | r""" 64 | Imprint this `Responder` with necessary parameters. 65 | 66 | :param pattern: 67 | A raw string (e.g. ``r"\[sudo\] password for .*:"``) which will be 68 | turned into a regular expression. 69 | 70 | :param response: 71 | The string to submit to the subprocess' stdin when ``pattern`` is 72 | detected. 73 | """ 74 | # TODO: precompile the keys into regex objects 75 | self.pattern = pattern 76 | self.response = response 77 | self.index = 0 78 | 79 | def pattern_matches( 80 | self, stream: str, pattern: str, index_attr: str 81 | ) -> Iterable[str]: 82 | """ 83 | Generic "search for pattern in stream, using index" behavior. 84 | 85 | Used here and in some subclasses that want to track multiple patterns 86 | concurrently. 87 | 88 | :param str stream: The same data passed to ``submit``. 89 | :param str pattern: The pattern to search for. 90 | :param str index_attr: The name of the index attribute to use. 91 | :returns: An iterable of string matches. 92 | 93 | .. versionadded:: 1.0 94 | """ 95 | # NOTE: generifies scanning so it can be used to scan for >1 pattern at 96 | # once, e.g. in FailingResponder. 97 | # Only look at stream contents we haven't seen yet, to avoid dupes. 98 | index = getattr(self, index_attr) 99 | new = stream[index:] 100 | # Search, across lines if necessary 101 | matches = re.findall(pattern, new, re.S) 102 | # Update seek index if we've matched 103 | if matches: 104 | setattr(self, index_attr, index + len(new)) 105 | return matches 106 | 107 | def submit(self, stream: str) -> Generator[str, None, None]: 108 | # Iterate over findall() response in case >1 match occurred. 109 | for _ in self.pattern_matches(stream, self.pattern, "index"): 110 | yield self.response 111 | 112 | 113 | class FailingResponder(Responder): 114 | """ 115 | Variant of `Responder` which is capable of detecting incorrect responses. 116 | 117 | This class adds a ``sentinel`` parameter to ``__init__``, and its 118 | ``submit`` will raise `.ResponseNotAccepted` if it detects that sentinel 119 | value in the stream. 120 | 121 | .. versionadded:: 1.0 122 | """ 123 | 124 | def __init__(self, pattern: str, response: str, sentinel: str) -> None: 125 | super().__init__(pattern, response) 126 | self.sentinel = sentinel 127 | self.failure_index = 0 128 | self.tried = False 129 | 130 | def submit(self, stream: str) -> Generator[str, None, None]: 131 | # Behave like regular Responder initially 132 | response = super().submit(stream) 133 | # Also check stream for our failure sentinel 134 | failed = self.pattern_matches(stream, self.sentinel, "failure_index") 135 | # Error out if we seem to have failed after a previous response. 136 | if self.tried and failed: 137 | err = 'Auto-response to r"{}" failed with {!r}!'.format( 138 | self.pattern, self.sentinel 139 | ) 140 | raise ResponseNotAccepted(err) 141 | # Once we see that we had a response, take note 142 | if response: 143 | self.tried = True 144 | # Again, behave regularly by default. 145 | return response 146 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.mypy] 2 | # check_untyped_defs = true 3 | # follow_imports_for_stubs = true 4 | # disallow_any_decorated = true 5 | # disallow_any_generics = true 6 | # disallow_any_unimported = true 7 | # disallow_incomplete_defs = true 8 | # disallow_subclassing_any = true 9 | # disallow_untyped_calls = true 10 | # disallow_untyped_decorators = true 11 | disallow_untyped_defs = true 12 | # enable_error_code = [ 13 | # "redundant-expr", 14 | # "truthy-bool", 15 | # "ignore-without-code", 16 | # "unused-awaitable", 17 | # 18 | exclude = [ 19 | "integration/", "tests/", "setup.py", "sites/www/conf.py", "build/", 20 | ] 21 | ignore_missing_imports = true 22 | # implicit_reexport = False 23 | # no_implicit_optional = true 24 | # pretty = true 25 | # show_column_numbers = true 26 | # show_error_codes = true 27 | # strict_equality = true 28 | warn_incomplete_stub = true 29 | warn_redundant_casts = true 30 | # warn_return_any = true 31 | # warn_unreachable = true 32 | warn_unused_ignores = true 33 | 34 | [[tool.mypy.overrides]] 35 | module = "invoke.vendor.*" 36 | ignore_errors = true 37 | 38 | [[tool.mypy.overrides]] 39 | module = "alabaster" 40 | ignore_missing_imports = true 41 | 42 | [[tool.mypy.overrides]] 43 | module = "icecream" 44 | ignore_missing_imports = true 45 | 46 | [[tool.mypy.overrides]] 47 | module = "invocations" 48 | ignore_missing_imports = true 49 | 50 | [[tool.mypy.overrides]] 51 | module = "pytest_relaxed" 52 | ignore_missing_imports = true 53 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | testpaths = tests 3 | python_files = * 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Support setuptools only, distutils has a divergent and more annoying API and 4 | # few folks will lack setuptools. 5 | from setuptools import setup, find_packages 6 | 7 | # Version info -- read without importing 8 | _locals = {} 9 | with open("invoke/_version.py") as fp: 10 | exec(fp.read(), None, _locals) 11 | version = _locals["__version__"] 12 | 13 | exclude = [] 14 | 15 | # Frankenstein long_description 16 | long_description = """ 17 | {} 18 | 19 | For a high level introduction, including example code, please see `our main 20 | project website `_; or for detailed API docs, see `the 21 | versioned API website `_. 22 | """.format( 23 | open("README.rst").read() 24 | ) 25 | 26 | 27 | setup( 28 | name="invoke", 29 | version=version, 30 | description="Pythonic task execution", 31 | license="BSD", 32 | long_description=long_description, 33 | author="Jeff Forcier", 34 | author_email="jeff@bitprophet.org", 35 | url="https://pyinvoke.org", 36 | project_urls={ 37 | "Docs": "https://docs.pyinvoke.org", 38 | "Source": "https://github.com/pyinvoke/invoke", 39 | "Issues": "https://github.com/pyinvoke/invoke/issues", 40 | "Changelog": "https://www.pyinvoke.org/changelog.html", 41 | "CI": "https://app.circleci.com/pipelines/github/pyinvoke/invoke", 42 | }, 43 | python_requires=">=3.6", 44 | packages=find_packages(exclude=exclude), 45 | include_package_data=True, 46 | entry_points={ 47 | "console_scripts": [ 48 | "invoke = invoke.main:program.run", 49 | "inv = invoke.main:program.run", 50 | ] 51 | }, 52 | classifiers=[ 53 | "Development Status :: 5 - Production/Stable", 54 | "Environment :: Console", 55 | "Intended Audience :: Developers", 56 | "Intended Audience :: System Administrators", 57 | "License :: OSI Approved :: BSD License", 58 | "Operating System :: POSIX", 59 | "Operating System :: Unix", 60 | "Operating System :: MacOS :: MacOS X", 61 | "Operating System :: Microsoft :: Windows", 62 | "Programming Language :: Python", 63 | "Programming Language :: Python :: 3", 64 | "Programming Language :: Python :: 3 :: Only", 65 | "Programming Language :: Python :: 3.6", 66 | "Programming Language :: Python :: 3.7", 67 | "Programming Language :: Python :: 3.8", 68 | "Programming Language :: Python :: 3.9", 69 | "Programming Language :: Python :: 3.10", 70 | "Programming Language :: Python :: 3.11", 71 | "Topic :: Software Development", 72 | "Topic :: Software Development :: Build Tools", 73 | "Topic :: Software Development :: Libraries", 74 | "Topic :: Software Development :: Libraries :: Python Modules", 75 | "Topic :: System :: Software Distribution", 76 | "Topic :: System :: Systems Administration", 77 | ], 78 | ) 79 | -------------------------------------------------------------------------------- /sites/docs/.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: "ubuntu-22.04" 5 | tools: 6 | python: "3.7" 7 | 8 | python: 9 | install: 10 | - requirements: dev-requirements.txt 11 | 12 | sphinx: 13 | configuration: sites/docs/conf.py 14 | -------------------------------------------------------------------------------- /sites/docs/api/__init__.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | ``__init__`` 3 | ============ 4 | 5 | .. automodule:: invoke 6 | -------------------------------------------------------------------------------- /sites/docs/api/collection.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | ``collection`` 3 | ============== 4 | 5 | .. autoclass:: invoke.collection.Collection 6 | :special-members: 7 | :exclude-members: __weakref__, __init__ 8 | -------------------------------------------------------------------------------- /sites/docs/api/config.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | ``config`` 3 | ========== 4 | 5 | .. automodule:: invoke.config 6 | -------------------------------------------------------------------------------- /sites/docs/api/context.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | ``context`` 3 | =========== 4 | 5 | .. automodule:: invoke.context 6 | -------------------------------------------------------------------------------- /sites/docs/api/exceptions.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | ``exceptions`` 3 | ============== 4 | 5 | .. automodule:: invoke.exceptions 6 | -------------------------------------------------------------------------------- /sites/docs/api/executor.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | ``executor`` 3 | ============ 4 | 5 | .. autoclass:: invoke.executor.Executor 6 | -------------------------------------------------------------------------------- /sites/docs/api/loader.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | ``loader`` 3 | ========== 4 | 5 | .. autoclass:: invoke.loader.Loader 6 | :special-members: 7 | :exclude-members: __weakref__ 8 | 9 | .. autoclass:: invoke.loader.FilesystemLoader 10 | :special-members: 11 | :exclude-members: __weakref__ 12 | -------------------------------------------------------------------------------- /sites/docs/api/parser.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | ``parser`` 3 | ========== 4 | 5 | The command-line parsing framework is split up into a handful of sub-modules: 6 | 7 | - ``parser.argument`` 8 | - ``parser.context`` (not to be confused with the top level ``context``!) 9 | - ``parser.parser`` 10 | 11 | API docs for all are below. 12 | 13 | .. automodule:: invoke.parser.parser 14 | :member-order: bysource 15 | .. automodule:: invoke.parser.context 16 | .. automodule:: invoke.parser.argument 17 | -------------------------------------------------------------------------------- /sites/docs/api/program.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | ``program`` 3 | =========== 4 | 5 | .. automodule:: invoke.program 6 | -------------------------------------------------------------------------------- /sites/docs/api/runners.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | ``runners`` 3 | =========== 4 | 5 | .. automodule:: invoke.runners 6 | :member-order: bysource 7 | -------------------------------------------------------------------------------- /sites/docs/api/tasks.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | ``tasks`` 3 | ========= 4 | 5 | .. automodule:: invoke.tasks 6 | -------------------------------------------------------------------------------- /sites/docs/api/terminals.rst: -------------------------------------------------------------------------------- 1 | ============= 2 | ``terminals`` 3 | ============= 4 | 5 | .. automodule:: invoke.terminals 6 | -------------------------------------------------------------------------------- /sites/docs/api/util.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | ``util`` 3 | ======== 4 | 5 | .. automodule:: invoke.util 6 | :exclude-members: ExceptionWrapper 7 | 8 | .. autoclass:: invoke.util.ExceptionWrapper 9 | :no-members: 10 | -------------------------------------------------------------------------------- /sites/docs/api/watchers.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | ``watchers`` 3 | ============ 4 | 5 | .. automodule:: invoke.watchers 6 | -------------------------------------------------------------------------------- /sites/docs/concepts/loading.rst: -------------------------------------------------------------------------------- 1 | .. _loading-collections: 2 | 3 | =================== 4 | Loading collections 5 | =================== 6 | 7 | The core of Invoke's execution model involves one or more Collection objects. 8 | While these may be created programmatically, in typical use Invoke will create 9 | them for you from Python modules it finds or is told to use. 10 | 11 | 12 | .. _collection-discovery: 13 | 14 | Task module discovery 15 | ===================== 16 | 17 | With no other configuration, simply calling ``invoke`` will look for a single 18 | Python module or package named ``tasks``, and will treat it as the root 19 | namespace. ``tasks`` (or any other name given via :ref:`loading configuration 20 | options `) is searched for in the following ways: 21 | 22 | * First, if a valid tasks module by that name already exists on Python's 23 | `sys.path `_, 24 | no more searching is done -- that module is selected. 25 | * Failing that, search towards the root of the local filesystem, starting with 26 | the user's current working directory (`os.getcwd 27 | `_) and try 28 | importing again with each directory temporarily added to ``sys.path``. 29 | 30 | * Due to how Python's import machinery works, this approach will always 31 | favor a package directory (``tasks/`` containing an ``__init__.py``) over 32 | a module file (``tasks.py``) in the same location. 33 | * If a candidate is found and successfully imported, its parent directory 34 | will **stay** on ``sys.path`` during the rest of the Python session -- 35 | this allows task code to make convenient assumptions concerning sibling 36 | modules' importability. 37 | 38 | Candidate modules/packages are introspected to make sure they can actually be 39 | used as valid task collections. Any that fail are discarded, the ``sys.path`` 40 | munging done to import them is reverted, and the search continues. 41 | 42 | 43 | .. _configuring-loading: 44 | 45 | Configuring the loading process 46 | =============================== 47 | 48 | You can configure the above behavior, requesting that Invoke alter the 49 | collection name searched for and/or the path where filesystem-level loading 50 | starts looking. 51 | 52 | For example, you may already have a project-level ``tasks.py`` that you can't 53 | easily rename; or you may want to host a number of tasks collections stored 54 | outside the project root and make it easy to switch between them; or any number 55 | of reasons. 56 | 57 | Both the sought collection name and the search root can be specified via 58 | :ref:`configuration file options ` or as :doc:`runtime CLI flags 59 | `: 60 | 61 | - **Change the collection name**: Set the ``tasks.collection_name`` 62 | configuration option, or use :option:`--collection`. It should be a Python 63 | module name and not a file name (so ``mytasks``, not ``mytasks.py`` or 64 | ``mytasks/``.) 65 | - **Change the root search path**: Configure ``tasks.search_root`` or use 66 | :option:`--search-root`. This value may be any valid directory path. 67 | -------------------------------------------------------------------------------- /sites/docs/concepts/watchers.rst: -------------------------------------------------------------------------------- 1 | .. _autoresponding: 2 | 3 | ========================================== 4 | Automatically responding to program output 5 | ========================================== 6 | 7 | Background 8 | ========== 9 | 10 | Command-line programs tend to be designed for interactive shells, which 11 | frequently manifests as waiting around for user input, or "prompts". 12 | Well-designed programs offer options for pre-empting such prompts, resulting in 13 | an easily automated workflow -- but with the rest, interactivity is 14 | unavoidable. 15 | 16 | Thankfully, Invoke's `.Runner` class not only forwards your standard input to 17 | the running program (allowing you to manually respond to prompts) but it can 18 | also be configured to respond automatically on your behalf. 19 | 20 | Basic use 21 | ========= 22 | 23 | The mechanism for this automation is the ``watchers`` kwarg to the 24 | `.Runner.run` method (and its wrappers elsewhere, such as `.Context.run` and 25 | `invoke.run`), which is a list of `.StreamWatcher`-subclass instances 26 | configured to watch for patterns & respond accordingly. The simplest of these 27 | is `.Responder`, which just replies with its configured response every time its 28 | pattern is seen; others can be found in the :doc:`watchers module 29 | `. 30 | 31 | .. note:: 32 | As with all other arguments to ``run``, you can also set the default set of 33 | watchers globally via :doc:`configuration files `. 34 | 35 | Take for example this program which expects a manual response to a yes/no 36 | prompt:: 37 | 38 | $ excitable-program 39 | When you give the OK, I'm going to do the things. All of them!! 40 | Are you ready? [Y/n] y 41 | OK! I just did all sorts of neat stuff. You're welcome! Bye! 42 | 43 | You *could* call ``run("excitable-program")``, manually watch for the 44 | prompt, and mash Y by hand. But if you instead supply a `.Responder` like so:: 45 | 46 | @task 47 | def always_ready(c): 48 | responder = Responder( 49 | pattern=r"Are you ready? \[Y/n\] ", 50 | response="y\n", 51 | ) 52 | c.run("excitable-program", watchers=[responder]) 53 | 54 | Then `.Runner` passes the program's ``stdout`` and ``stderr`` through 55 | ``responder``, which watches for ``"Are you ready? [Y/n] "`` and automatically 56 | writes ``y`` (plus ``\n`` to simulate hitting Enter/Return) to the program's 57 | ``stdin``. 58 | 59 | .. note:: 60 | The pattern argument to `.Responder` is treated as a `regular expression 61 | `, requiring more care (note how we had to escape our square-brackets 62 | in the above example) but providing more power as well. 63 | -------------------------------------------------------------------------------- /sites/docs/conf.py: -------------------------------------------------------------------------------- 1 | # Obtain shared config values 2 | import os, sys 3 | 4 | sys.path.append(os.path.abspath("..")) 5 | sys.path.append(os.path.abspath("../..")) 6 | from shared_conf import * 7 | 8 | # Enable autodoc, intersphinx 9 | extensions.extend(["sphinx.ext.autodoc"]) 10 | 11 | # Autodoc settings 12 | autodoc_default_options = { 13 | "members": True, 14 | "special-members": True, 15 | } 16 | 17 | # Sister-site links to WWW 18 | html_theme_options["extra_nav_links"] = { 19 | "Main website": "https://www.pyinvoke.org" 20 | } 21 | -------------------------------------------------------------------------------- /sites/docs/index.rst: -------------------------------------------------------------------------------- 1 | ================================== 2 | Welcome to Invoke's documentation! 3 | ================================== 4 | 5 | This site covers Invoke's conceptual & API documentation. For basic info on 6 | what Invoke is, including its public changelog & how the project is maintained, 7 | please see `the main project website `_. 8 | 9 | Getting started 10 | --------------- 11 | 12 | Many core ideas & API calls are explained in the tutorial/getting-started 13 | document: 14 | 15 | .. toctree:: 16 | :maxdepth: 2 17 | 18 | getting-started 19 | 20 | 21 | The ``invoke`` CLI tool 22 | ----------------------- 23 | 24 | Details on the CLI interface to Invoke, available core flags, and tab 25 | completion options. 26 | 27 | .. toctree:: 28 | invoke 29 | 30 | 31 | Concepts 32 | -------- 33 | 34 | Dig deeper into specific topics: 35 | 36 | .. toctree:: 37 | :maxdepth: 2 38 | :glob: 39 | 40 | concepts/* 41 | 42 | .. _api: 43 | 44 | API 45 | --- 46 | 47 | Know what you're looking for & just need API details? View our auto-generated 48 | API documentation: 49 | 50 | .. toctree:: 51 | :maxdepth: 1 52 | :glob: 53 | 54 | api/* 55 | -------------------------------------------------------------------------------- /sites/shared_conf.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from os.path import abspath, join, dirname 3 | 4 | import alabaster 5 | 6 | 7 | # Alabaster theme + mini-extension 8 | html_theme_path = [alabaster.get_path()] 9 | extensions = ["alabaster", "sphinx.ext.intersphinx", "sphinx.ext.doctest"] 10 | # Paths relative to invoking conf.py - not this shared file 11 | html_theme = "alabaster" 12 | html_theme_options = { 13 | "description": "Pythonic task execution", 14 | "github_user": "pyinvoke", 15 | "github_repo": "invoke", 16 | "analytics_id": "UA-18486793-3", 17 | "travis_button": False, # No longer on Travis-CI; README buttons link to Circle 18 | "codecov_button": False, # Now a README button 19 | "tidelift_url": "https://tidelift.com/subscription/pkg/pypi-invoke?utm_source=pypi-invoke&utm_medium=referral&utm_campaign=docs", # noqa 20 | } 21 | html_sidebars = { 22 | "**": ["about.html", "navigation.html", "searchbox.html", "donate.html"] 23 | } 24 | 25 | # Everything intersphinx's to Python 26 | intersphinx_mapping = {"python": ("https://docs.python.org/2.7/", None)} 27 | 28 | # Doctest settings 29 | doctest_path = [abspath(join(dirname(__file__), "..", "tests"))] 30 | doctest_global_setup = r""" 31 | from _util import MockSubprocess 32 | """ 33 | 34 | # Regular settings 35 | project = "Invoke" 36 | year = datetime.now().year 37 | copyright = "{} Jeff Forcier".format(year) 38 | master_doc = "index" 39 | templates_path = ["_templates"] 40 | exclude_trees = ["_build"] 41 | source_suffix = ".rst" 42 | default_role = "obj" 43 | -------------------------------------------------------------------------------- /sites/www/.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: "ubuntu-22.04" 5 | tools: 6 | python: "3.7" 7 | 8 | python: 9 | install: 10 | - requirements: dev-requirements.txt 11 | 12 | sphinx: 13 | configuration: sites/www/conf.py 14 | -------------------------------------------------------------------------------- /sites/www/conf.py: -------------------------------------------------------------------------------- 1 | # Obtain shared config values 2 | import sys 3 | import os 4 | from os.path import abspath, join, dirname 5 | 6 | sys.path.append(abspath(join(dirname(__file__), ".."))) 7 | from shared_conf import * 8 | 9 | # Releases changelog extension 10 | extensions.append("releases") 11 | releases_github_path = "pyinvoke/invoke" 12 | 13 | # Default is 'local' building, but reference the public docs site when building 14 | # under RTD. 15 | target = join(dirname(__file__), "..", "docs", "_build") 16 | if os.environ.get("READTHEDOCS") == "True": 17 | target = "https://docs.pyinvoke.org/en/latest/" 18 | intersphinx_mapping["docs"] = (target, None) 19 | 20 | # Sister-site links to documentation 21 | html_theme_options["extra_nav_links"] = { 22 | "Documentation": "https://docs.pyinvoke.org" 23 | } 24 | -------------------------------------------------------------------------------- /sites/www/contact.rst: -------------------------------------------------------------------------------- 1 | Contact 2 | ======= 3 | 4 | You can get in touch with the developer & user community in any of the 5 | following ways: 6 | 7 | * Bug reports and feature requests: first read `contribution-guide.org 8 | `_, then check out our `GitHub page 9 | `_. 10 | * Blog posts: https://bitprophet.org/categories/invoke/ 11 | * Twitter: you've got a few options here: 12 | 13 | * `@bitprophet `_ is the canonical source 14 | for updates, but is also the developer's personal account (hint: you can 15 | turn off retweets and only see original content!) 16 | * `@pyfabric `_ is a much lower-traffic, 17 | announcement-only account that also serves the `Fabric 18 | `_ project; given how much Fabric is built directly 19 | on top of Invoke, many of the posts will be relevant to Invoke-only 20 | users. 21 | * `@pyinvoke `_ was set up for 22 | Invoke-specific announcements, but it only has a dozen followers so we've 23 | unfortunately let it languish. Should we automate our release process 24 | further, this account may get posts again, and we'll update this page 25 | accordingly. 26 | -------------------------------------------------------------------------------- /sites/www/development.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Development 3 | =========== 4 | 5 | Obtaining a source checkout 6 | =========================== 7 | 8 | Our Git repository is maintained on Github at `pyinvoke/invoke`_. Please 9 | follow their instructions for cloning (or forking, then cloning, which is best 10 | if you intend to contribute back) the repository there. 11 | 12 | Once downloaded, install the repo itself + its development dependencies by 13 | running ``pip install -r dev-requirements.txt``. 14 | 15 | 16 | Submitting bug reports or patches 17 | ================================= 18 | 19 | We follow `contribution-guide.org`_ for all of our development - please `go 20 | there`_ for details on submitting patches, which branch(es) to work out of, 21 | and so on. Our issue tracker is on `our GitHub page`_. 22 | 23 | Changelog location 24 | ================== 25 | 26 | Invoke's changelog lives in ``sites/www/changelog.rst`` and is formatted 27 | using the `Releases `_ Sphinx plugin. 28 | 29 | Running management tasks 30 | ======================== 31 | 32 | Invoke uses itself for project management and has a number of tasks you can 33 | see with ``inv --list``. Some specific tasks of note: 34 | 35 | * ``test`` and ``integration``: Runs the primary and integration test 36 | suites, respectively. (Most of the time you can ignore ``integration`` - 37 | it's mostly for use by CI systems or once-in-a-while sanity checks 38 | locally.) 39 | * ``www`` and ``docs`` (and their subtasks like ``docs.browse``): Builds 40 | the WWW site and the API docs, respectively. 41 | 42 | Another good resource is to skim our ``.travis.yml`` file for the commands it 43 | executes - if submissions don't pass all of those commands to some degree, they 44 | won't pass Travis' CI builds either! 45 | 46 | 47 | .. _go there: 48 | .. _contribution-guide.org: https://contribution-guide.org 49 | 50 | .. _our GitHub page: 51 | .. _pyinvoke/invoke: https://github.com/pyinvoke/invoke 52 | -------------------------------------------------------------------------------- /sites/www/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../README.rst 2 | 3 | This website covers project information for Invoke such as the changelog, 4 | contribution guidelines, development roadmap, news/blog, and so forth. 5 | Detailed usage and API documentation can be found at our code documentation 6 | site, `docs.pyinvoke.org `_. 7 | 8 | Please see below for a high level intro, or the navigation on the left for the 9 | rest of the site content. 10 | 11 | What is Invoke? 12 | --------------- 13 | 14 | * Like Ruby's Rake tool and Invoke's own predecessor Fabric 1.x, it provides a 15 | clean, high level API for running shell commands and defining/organizing 16 | task functions from a ``tasks.py`` file: 17 | 18 | .. code-block:: python 19 | 20 | from invoke import task 21 | 22 | @task 23 | def clean(c, docs=False, bytecode=False, extra=''): 24 | patterns = ['build'] 25 | if docs: 26 | patterns.append('docs/_build') 27 | if bytecode: 28 | patterns.append('**/*.pyc') 29 | if extra: 30 | patterns.append(extra) 31 | for pattern in patterns: 32 | c.run("rm -rf {}".format(pattern)) 33 | 34 | @task 35 | def build(c, docs=False): 36 | c.run("python setup.py build") 37 | if docs: 38 | c.run("sphinx-build docs docs/_build") 39 | 40 | * From GNU Make, it inherits an emphasis on minimal boilerplate for common 41 | patterns and the ability to run multiple tasks in a single invocation:: 42 | 43 | $ invoke clean build 44 | 45 | * Where Fabric 1.x considered the command-line approach the default mode of 46 | use, Invoke (and tools built on it) are equally at home embedded in your own 47 | Python code or a REPL: 48 | 49 | .. testsetup:: blurb 50 | 51 | fakeout = """ 52 | Hello, this is pip 53 | Installing is fun 54 | Fake output is fake 55 | Successfully installed invocations-0.13.0 pep8-1.5.7 spec-1.3.1 56 | """ 57 | proc = MockSubprocess(out=fakeout, exit=0) 58 | 59 | .. testcleanup:: blurb 60 | 61 | proc.stop() 62 | 63 | .. doctest:: blurb 64 | 65 | >>> from invoke import run 66 | >>> cmd = "pip install -r requirements.txt" 67 | >>> result = run(cmd, hide=True, warn=True) 68 | >>> print(result.ok) 69 | True 70 | >>> print(result.stdout.splitlines()[-1]) 71 | Successfully installed invocations-0.13.0 pep8-1.5.7 spec-1.3.1 72 | 73 | * Following the lead of most Unix CLI applications, it offers a traditional 74 | flag-based style of command-line parsing, deriving flag names and value types 75 | from task signatures (optionally, of course!):: 76 | 77 | $ invoke clean --docs --bytecode build --docs --extra='**/*.pyo' 78 | $ invoke clean -d -b build --docs -e '**/*.pyo' 79 | $ invoke clean -db build -de '**/*.pyo' 80 | 81 | * Like many of its predecessors, it offers advanced features as well -- 82 | namespacing, task aliasing, before/after hooks, parallel execution and more. 83 | 84 | 85 | .. toctree:: 86 | :hidden: 87 | 88 | changelog 89 | FAQs 90 | installing 91 | development 92 | prior-art 93 | contact 94 | -------------------------------------------------------------------------------- /sites/www/installing.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | Installing 3 | ========== 4 | 5 | Basic installation 6 | ================== 7 | 8 | The recommended way to get Invoke is to **install the latest stable release** 9 | via `pip `_:: 10 | 11 | $ pip install invoke 12 | 13 | We currently support **Python 3.6+**. Users still on Python 3.5 or older are 14 | urged to upgrade. 15 | 16 | As long as you have a supported Python interpreter, **there are no other 17 | dependencies**. Invoke is pure-Python, and contains copies of its few 18 | dependencies within its source tree. 19 | 20 | .. note:: 21 | See `this blog post 22 | `_ for background on 23 | our decision to vendorize dependencies. 24 | 25 | .. seealso:: 26 | :doc:`development` for details on source control checkouts / unstable 27 | versions. 28 | -------------------------------------------------------------------------------- /sites/www/prior-art.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Prior art 3 | ========= 4 | 5 | Why another task-running/subprocess-spawning Python library? As usual, the 6 | short answer is "there were already great 80-90% solutions out there, but none 7 | that fit our needs 100%." Specifically: 8 | 9 | - **Multiple tasks at once** - almost no other Python command-line oriented 10 | libraries allow for invocations like:: 11 | 12 | runner --core-opts task1 --task1-opts task2 --task2-opts 13 | 14 | and the few that do have half-baked implementations of the feature or are 15 | lacking in other ways. 16 | - **Ability to mirror and capture subprocess output simultaneously** (in 17 | addition to everything flowing from that, like the ability to transparently 18 | auto-respond) - the standard library's ``subprocess`` can't do this and most 19 | other tools choose one or the other, or have other tradeoffs such as not 20 | supporting (or *only* supporting!) pseudoterminals. 21 | - **Simplicity** - tools that try to do many things often suffer for it due to 22 | lack of focus. We wanted to build something clean and simple that just did 23 | one thing (ok...two things) well. 24 | - **Customizability/control** - Invoke was designed to work well with (and be a 25 | foundation for) other tools such as `Fabric `_'s second 26 | version, and we felt that the work needed to adapt existing tools towards 27 | this goal would impede progress. 28 | 29 | Some of the pre-existing solutions in this space in the Python world include: 30 | 31 | - `Argh `_: One of the more 32 | appealing options, but being built on argparse it doesn't support the 33 | multi-task invocation we needed. Also has its own "prior art" list which is 34 | worth your time. 35 | - `Baker `_: Nice and simple, but 36 | unfortunately too much so for our needs. 37 | - `Paver `_: Tries to do too much, clunky API, 38 | user-hostile error messages, multi-task feature existed but was lacking. 39 | - `Argparse `_: The modern gold 40 | standard for CLI parsing (albeit without command execution). Unfortunately, 41 | we were unable to get multiple tasks working despite lots of experimentation. 42 | Multiple tasks with their own potentially overlapping argument names, simply 43 | doesn't mesh with how ``argparse`` thinks about the command line. 44 | - `Click `_: is actually not pre-existing 45 | (Invoke's first public releases predate Click by a number of years) but it 46 | deserves mention anyway, as it's become popular in this particular niche. 47 | -------------------------------------------------------------------------------- /tasks.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import TYPE_CHECKING, Optional 3 | 4 | from invoke import Collection, task, Exit 5 | 6 | from invocations import ci, checks 7 | from invocations.docs import docs, www, sites, watch_docs 8 | from invocations.pytest import coverage as coverage_, test as test_ 9 | from invocations.packaging import vendorize, release 10 | 11 | if TYPE_CHECKING: 12 | from invoke import Context 13 | 14 | 15 | @task 16 | def test( 17 | c: "Context", 18 | verbose: bool = False, 19 | color: bool = True, 20 | capture: str = "no", 21 | module: Optional[str] = None, 22 | k: Optional[str] = None, 23 | x: bool = False, 24 | opts: str = "", 25 | pty: bool = True, 26 | ) -> None: 27 | """ 28 | Run pytest. See `invocations.pytest.test` for details. 29 | 30 | This is a simple wrapper around the abovementioned task, which makes a 31 | couple minor defaults changes appropriate for this particular test suite, 32 | such as: 33 | 34 | - setting ``capture=no`` instead of ``capture=sys``, as we do a very large 35 | amount of subprocess IO testing that even the ``sys`` capture screws up 36 | - setting ``verbose=False`` because we have a large number of tests and 37 | skipping verbose output by default is a ~20% time savings.) 38 | """ 39 | # TODO: update test suite to use c.config.run.in_stream = False globally. 40 | # somehow. 41 | test_( 42 | c, 43 | verbose=verbose, 44 | color=color, 45 | capture=capture, 46 | module=module, 47 | k=k, 48 | x=x, 49 | opts=opts, 50 | pty=pty, 51 | ) 52 | 53 | 54 | # TODO: replace with invocations' once the "call truly local tester" problem is 55 | # solved (see other TODOs). For now this is just a copy/paste/modify. 56 | @task(help=test.help) # type: ignore 57 | def integration( 58 | c: "Context", opts: Optional[str] = None, pty: bool = True 59 | ) -> None: 60 | """ 61 | Run the integration test suite. May be slow! 62 | """ 63 | # Abort if no default shell on this system - implies some unusual dev 64 | # environment. Certain entirely-standalone tests will fail w/o it, even if 65 | # tests honoring config overrides (like the unit-test suite) don't. 66 | shell = c.config.global_defaults()["run"]["shell"] 67 | if not c.run("which {}".format(shell), hide=True, warn=True): 68 | err = "No {} on this system - cannot run integration tests! Try a container?" # noqa 69 | raise Exit(err.format(shell)) 70 | opts = opts or "" 71 | opts += " integration/" 72 | test(c, opts=opts, pty=pty) 73 | 74 | 75 | @task 76 | def coverage( 77 | c: "Context", report: str = "term", opts: str = "", codecov: bool = False 78 | ) -> None: 79 | """ 80 | Run pytest in coverage mode. See `invocations.pytest.coverage` for details. 81 | """ 82 | # Use our own test() instead of theirs. 83 | # Also add integration test so this always hits both. 84 | # (Not regression, since that's "weird" / doesn't really hit any new 85 | # coverage points) 86 | coverage_( 87 | c, 88 | report=report, 89 | opts=opts, 90 | tester=test, 91 | additional_testers=[integration], 92 | codecov=codecov, 93 | ) 94 | 95 | 96 | @task 97 | def regression(c: "Context", jobs: int = 8) -> None: 98 | """ 99 | Run an expensive, hard-to-test-in-pytest run() regression checker. 100 | 101 | :param int jobs: Number of jobs to run, in total. Ideally num of CPUs. 102 | """ 103 | os.chdir("integration/_support") 104 | cmd = "seq {} | parallel -n0 --halt=now,fail=1 inv -c regression check" 105 | c.run(cmd.format(jobs)) 106 | 107 | 108 | ns = Collection( 109 | test, 110 | coverage, 111 | integration, 112 | regression, 113 | vendorize, 114 | release, 115 | www, 116 | docs, 117 | sites, 118 | watch_docs, 119 | ci, 120 | checks.blacken, 121 | checks, 122 | ) 123 | ns.configure( 124 | { 125 | "blacken": { 126 | # Skip vendor, build dirs when blackening. 127 | # TODO: this is making it seem like I really do want an explicit 128 | # arg/conf-opt in the blacken task for "excluded paths"...ha 129 | r"find_opts": "-and -not \\( -path './invoke/vendor*' -or -path './build*' \\)" # noqa 130 | }, 131 | "packaging": { 132 | "wheel": True, 133 | "check_desc": True, 134 | "changelog_file": os.path.join( 135 | www.configuration()["sphinx"]["source"], "changelog.rst" 136 | ), 137 | }, 138 | } 139 | ) 140 | -------------------------------------------------------------------------------- /tests/_support/alias_sorting.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task(aliases=("z", "a")) 5 | def toplevel(c): 6 | pass 7 | -------------------------------------------------------------------------------- /tests/_support/autoprint.py: -------------------------------------------------------------------------------- 1 | from invoke.tasks import task 2 | from invoke.collection import Collection 3 | 4 | 5 | @task 6 | def nope(c): 7 | return "You can't see this" 8 | 9 | 10 | @task(autoprint=True) 11 | def yup(c): 12 | return "It's alive!" 13 | 14 | 15 | @task(pre=[yup]) 16 | def pre_check(c): 17 | pass 18 | 19 | 20 | @task(post=[yup]) 21 | def post_check(c): 22 | pass 23 | 24 | 25 | sub = Collection("sub", yup) 26 | ns = Collection(nope, yup, pre_check, post_check, sub) 27 | -------------------------------------------------------------------------------- /tests/_support/branch/explicit.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def lyrics(c): 6 | print("Don't swear!") 7 | -------------------------------------------------------------------------------- /tests/_support/branch/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def alt_root(c): 6 | print("Down with the alt-root!") 7 | -------------------------------------------------------------------------------- /tests/_support/configs/all-four/invoke.json: -------------------------------------------------------------------------------- 1 | {"json-only": "whee", "shared": "json-value"} 2 | -------------------------------------------------------------------------------- /tests/_support/configs/all-four/invoke.py: -------------------------------------------------------------------------------- 1 | shared = "python-value" 2 | python_only = "heh" 3 | -------------------------------------------------------------------------------- /tests/_support/configs/all-four/invoke.yaml: -------------------------------------------------------------------------------- 1 | 'yaml-only': "yup" 2 | shared: "yaml-value" 3 | -------------------------------------------------------------------------------- /tests/_support/configs/all-four/invoke.yml: -------------------------------------------------------------------------------- 1 | 'yml-only': "yup" 2 | shared: "yml-value" 3 | -------------------------------------------------------------------------------- /tests/_support/configs/collection.py: -------------------------------------------------------------------------------- 1 | from invoke import ctask, Collection 2 | 3 | 4 | @ctask 5 | def go(c): 6 | c.run("false") # Ensures a kaboom if mocking fails 7 | 8 | 9 | ns = Collection(go) 10 | ns.configure({"run": {"echo": True}}) 11 | -------------------------------------------------------------------------------- /tests/_support/configs/echo.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | echo: true 3 | -------------------------------------------------------------------------------- /tests/_support/configs/json-and-python/invoke.json: -------------------------------------------------------------------------------- 1 | {"json-only": "whee", "shared": "json-value"} 2 | -------------------------------------------------------------------------------- /tests/_support/configs/json-and-python/invoke.py: -------------------------------------------------------------------------------- 1 | shared = "python-value" 2 | python_only = "heh" 3 | -------------------------------------------------------------------------------- /tests/_support/configs/json/invoke.json: -------------------------------------------------------------------------------- 1 | {"outer": {"inner": {"hooray": "json"}}} 2 | -------------------------------------------------------------------------------- /tests/_support/configs/nested/invoke.yaml: -------------------------------------------------------------------------------- 1 | outer: 2 | inner: 3 | hooray: "yaml" 4 | -------------------------------------------------------------------------------- /tests/_support/configs/no-dedupe.yaml: -------------------------------------------------------------------------------- 1 | tasks: 2 | dedupe: false 3 | -------------------------------------------------------------------------------- /tests/_support/configs/no-echo.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | echo: false 3 | -------------------------------------------------------------------------------- /tests/_support/configs/package/invoke.yml: -------------------------------------------------------------------------------- 1 | outer: 2 | inner: 3 | hooray: "package" 4 | -------------------------------------------------------------------------------- /tests/_support/configs/package/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def mytask(c): 6 | assert c.outer.inner.hooray == "package" 7 | -------------------------------------------------------------------------------- /tests/_support/configs/python/invoke.py: -------------------------------------------------------------------------------- 1 | outer = {"inner": {"hooray": "python"}} 2 | -------------------------------------------------------------------------------- /tests/_support/configs/runtime.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def mytask(c): 6 | assert c.outer.inner.hooray == "yaml" 7 | -------------------------------------------------------------------------------- /tests/_support/configs/three-of-em/invoke.json: -------------------------------------------------------------------------------- 1 | {"json-only": "whee", "shared": "json-value"} 2 | -------------------------------------------------------------------------------- /tests/_support/configs/three-of-em/invoke.py: -------------------------------------------------------------------------------- 1 | shared = "python-value" 2 | python_only = "heh" 3 | -------------------------------------------------------------------------------- /tests/_support/configs/three-of-em/invoke.yml: -------------------------------------------------------------------------------- 1 | 'yml-only': "yup" 2 | shared: "yml-value" 3 | -------------------------------------------------------------------------------- /tests/_support/configs/underscores/invoke.yaml: -------------------------------------------------------------------------------- 1 | tasks: 2 | auto_dash_names: false 3 | -------------------------------------------------------------------------------- /tests/_support/configs/underscores/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def i_have_underscores(c): 6 | pass 7 | -------------------------------------------------------------------------------- /tests/_support/configs/yaml/explicit.py: -------------------------------------------------------------------------------- 1 | from invoke import task, Collection 2 | 3 | 4 | @task 5 | def mytask(c): 6 | assert c.outer.inner.hooray == "yaml" 7 | 8 | 9 | ns = Collection(mytask) 10 | -------------------------------------------------------------------------------- /tests/_support/configs/yaml/invoke.yaml: -------------------------------------------------------------------------------- 1 | outer: 2 | inner: 3 | hooray: "yaml" 4 | -------------------------------------------------------------------------------- /tests/_support/configs/yaml/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def mytask(c): 6 | assert c.outer.inner.hooray == "yaml" 7 | -------------------------------------------------------------------------------- /tests/_support/configs/yml/explicit.py: -------------------------------------------------------------------------------- 1 | from invoke import task, Collection 2 | 3 | 4 | @task 5 | def mytask(c): 6 | assert c.outer.inner.hooray == "yml" 7 | 8 | 9 | ns = Collection(mytask) 10 | -------------------------------------------------------------------------------- /tests/_support/configs/yml/invoke.yml: -------------------------------------------------------------------------------- 1 | outer: 2 | inner: 3 | hooray: "yml" 4 | -------------------------------------------------------------------------------- /tests/_support/configs/yml/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def mytask(c): 6 | assert c.outer.inner.hooray == "yml" 7 | -------------------------------------------------------------------------------- /tests/_support/contextualized.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def go(c): 6 | return c 7 | 8 | 9 | @task 10 | def check_warn(c): 11 | # default: False 12 | assert c.config.run.warn is True 13 | 14 | 15 | @task 16 | def check_pty(c): 17 | # default: False 18 | assert c.config.run.pty is True 19 | 20 | 21 | @task 22 | def check_hide(c): 23 | # default: None 24 | assert c.config.run.hide == "both" 25 | 26 | 27 | @task 28 | def check_echo(c): 29 | # default: False 30 | assert c.config.run.echo is True 31 | -------------------------------------------------------------------------------- /tests/_support/custom_executor.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import Mock 2 | 3 | 4 | CustomExecutor = Mock() 5 | -------------------------------------------------------------------------------- /tests/_support/debugging.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | from invoke.util import debug 3 | 4 | 5 | @task 6 | def foo(c): 7 | debug("my-sentinel") 8 | -------------------------------------------------------------------------------- /tests/_support/decorator_multi_default.py: -------------------------------------------------------------------------------- 1 | from invoke.tasks import task 2 | 3 | 4 | @task(default=True) 5 | def foo(c): 6 | pass 7 | 8 | 9 | @task(default=True) 10 | def biz(c): 11 | pass 12 | -------------------------------------------------------------------------------- /tests/_support/decorators.py: -------------------------------------------------------------------------------- 1 | from invoke.tasks import task 2 | 3 | 4 | @task(aliases=("bar", "otherbar")) 5 | def foo(c): 6 | """ 7 | Foo the bar. 8 | """ 9 | pass 10 | 11 | 12 | @task 13 | def foo2(c): 14 | """ 15 | Foo the bar: 16 | 17 | example code 18 | 19 | Added in 1.0 20 | """ 21 | pass 22 | 23 | 24 | @task 25 | def foo3(c): 26 | """Foo the other bar: 27 | 28 | example code 29 | 30 | Added in 1.1 31 | """ 32 | pass 33 | 34 | 35 | @task(default=True) 36 | def biz(c): 37 | pass 38 | 39 | 40 | @task(help={"why": "Motive", "who": "Who to punch"}) 41 | def punch(c, who, why): 42 | pass 43 | 44 | 45 | @task(positional=["pos"]) 46 | def one_positional(c, pos, nonpos): 47 | pass 48 | 49 | 50 | @task(positional=["pos1", "pos2"]) 51 | def two_positionals(c, pos1, pos2, nonpos): 52 | pass 53 | 54 | 55 | @task 56 | def implicit_positionals(c, pos1, pos2, nonpos=None): 57 | pass 58 | 59 | 60 | @task(optional=["myopt"]) 61 | def optional_values(c, myopt): 62 | pass 63 | 64 | 65 | @task(iterable=["mylist"]) 66 | def iterable_values(c, mylist=None): 67 | pass 68 | 69 | 70 | @task(incrementable=["verbose"]) 71 | def incrementable_values(c, verbose=None): 72 | pass 73 | -------------------------------------------------------------------------------- /tests/_support/deeper_ns_list.py: -------------------------------------------------------------------------------- 1 | from invoke import task, Collection 2 | 3 | 4 | @task 5 | def toplevel(c): 6 | pass 7 | 8 | 9 | @task 10 | def subtask(c): 11 | pass 12 | 13 | 14 | ns = Collection( 15 | toplevel, Collection("a", subtask, Collection("nother", subtask)) 16 | ) 17 | -------------------------------------------------------------------------------- /tests/_support/depth_first.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def clean_html(c): 6 | print("Cleaning HTML") 7 | 8 | 9 | @task 10 | def clean_tgz(c): 11 | print("Cleaning .tar.gz files") 12 | 13 | 14 | @task(clean_html, clean_tgz) 15 | def clean(c): 16 | print("Cleaned everything") 17 | 18 | 19 | @task 20 | def makedirs(c): 21 | print("Making directories") 22 | 23 | 24 | @task(clean, makedirs) 25 | def build(c): 26 | print("Building") 27 | 28 | 29 | @task 30 | def pretest(c): 31 | print("Preparing for testing") 32 | 33 | 34 | @task(pretest) 35 | def test(c): 36 | print("Testing") 37 | 38 | 39 | @task(build, post=[test]) 40 | def deploy(c): 41 | print("Deploying") 42 | -------------------------------------------------------------------------------- /tests/_support/docstrings.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def no_docstring(c): 6 | pass 7 | 8 | 9 | @task 10 | def one_line(c): 11 | """foo""" 12 | 13 | 14 | @task 15 | def two_lines(c): 16 | """foo 17 | bar 18 | """ 19 | 20 | 21 | @task 22 | def leading_whitespace(c): 23 | """ 24 | foo 25 | """ 26 | 27 | 28 | @task(aliases=("a", "b")) 29 | def with_aliases(c): 30 | """foo""" 31 | -------------------------------------------------------------------------------- /tests/_support/empty.py: -------------------------------------------------------------------------------- 1 | # Yup. 2 | -------------------------------------------------------------------------------- /tests/_support/empty_subcollection.py: -------------------------------------------------------------------------------- 1 | from invoke import task, Collection 2 | 3 | 4 | @task 5 | def dummy(c): 6 | pass 7 | 8 | 9 | ns = Collection(dummy, Collection("subcollection")) 10 | -------------------------------------------------------------------------------- /tests/_support/explicit_root.py: -------------------------------------------------------------------------------- 1 | """ 2 | EXPLICIT LYRICS 3 | """ 4 | 5 | from invoke import task, Collection 6 | 7 | 8 | @task(aliases=["other_top"]) 9 | def top_level(c): 10 | pass 11 | 12 | 13 | @task(aliases=["other_sub"], default=True) 14 | def sub_task(c): 15 | pass 16 | 17 | 18 | sub = Collection("sub_level", sub_task) 19 | ns = Collection(top_level, sub) 20 | -------------------------------------------------------------------------------- /tests/_support/foo.py: -------------------------------------------------------------------------------- 1 | from invoke.tasks import task 2 | 3 | 4 | @task 5 | def mytask(c): 6 | pass 7 | 8 | 9 | @task 10 | def basic_arg(c, arg="val"): 11 | pass 12 | 13 | 14 | @task 15 | def multiple_args(c, arg1="val1", otherarg="val2"): 16 | pass 17 | 18 | 19 | @task 20 | def basic_bool(c, mybool=True): 21 | pass 22 | -------------------------------------------------------------------------------- /tests/_support/has_modules.py: -------------------------------------------------------------------------------- 1 | # Not picklable! 2 | import os # noqa 3 | -------------------------------------------------------------------------------- /tests/_support/ignoreme/ignoremetoo/.no: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pyinvoke/invoke/65dd896d93994d423feb46313f651ae8a021c6d7/tests/_support/ignoreme/ignoremetoo/.no -------------------------------------------------------------------------------- /tests/_support/integration.py: -------------------------------------------------------------------------------- 1 | """ 2 | A semi-integration-test style fixture spanning multiple feature examples. 3 | 4 | If we're being honest, though, the new 'tree' fixture package is a lot bigger. 5 | """ 6 | 7 | from invoke.tasks import task 8 | 9 | 10 | @task 11 | def print_foo(c): 12 | print("foo") 13 | 14 | 15 | @task 16 | def print_name(c, name): 17 | print(name) 18 | 19 | 20 | @task 21 | def print_underscored_arg(c, my_option): 22 | print(my_option) 23 | 24 | 25 | @task 26 | def foo(c): 27 | print("foo") 28 | 29 | 30 | @task(foo) 31 | def bar(c): 32 | print("bar") 33 | 34 | 35 | @task 36 | def post2(c): 37 | print("post2") 38 | 39 | 40 | @task(post=[post2]) 41 | def post1(c): 42 | print("post1") 43 | 44 | 45 | @task(foo, bar, post=[post1, post2]) 46 | def biz(c): 47 | print("biz") 48 | 49 | 50 | @task(bar, foo, post=[post2, post1]) 51 | def boz(c): 52 | print("boz") 53 | -------------------------------------------------------------------------------- /tests/_support/namespacing.py: -------------------------------------------------------------------------------- 1 | from invoke import Collection, task, call 2 | 3 | from subspace import module 4 | 5 | 6 | @task 7 | def top_pre(c): 8 | pass 9 | 10 | 11 | @task(call(top_pre)) 12 | def toplevel(c): 13 | pass 14 | 15 | 16 | ns = Collection(module, toplevel) 17 | -------------------------------------------------------------------------------- /tests/_support/nontrivial_docstrings.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def no_docstring(c): 6 | pass 7 | 8 | 9 | @task 10 | def task_one(c): 11 | """ 12 | Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam id dictum 13 | 14 | risus. Nulla lorem justo, sagittis in volutpat eget 15 | """ 16 | 17 | 18 | @task 19 | def task_two(c): 20 | """ 21 | Nulla eget ultrices ante. Curabitur sagittis commodo posuere. Duis dapibus 22 | 23 | facilisis, lacus et dapibus rutrum, lectus turpis egestas dui 24 | """ 25 | -------------------------------------------------------------------------------- /tests/_support/oops.py: -------------------------------------------------------------------------------- 1 | import modulethatdoesnotexistohnoes # noqa 2 | -------------------------------------------------------------------------------- /tests/_support/package/__init__.py: -------------------------------------------------------------------------------- 1 | from invoke import Collection 2 | 3 | from . import module 4 | 5 | ns = Collection(module) 6 | -------------------------------------------------------------------------------- /tests/_support/package/module.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def mytask(c): 6 | pass 7 | -------------------------------------------------------------------------------- /tests/_support/simple_ns_list.py: -------------------------------------------------------------------------------- 1 | from invoke import task, Collection 2 | 3 | 4 | @task 5 | def z_toplevel(c): 6 | pass 7 | 8 | 9 | @task 10 | def subtask(c): 11 | pass 12 | 13 | 14 | ns = Collection(z_toplevel, Collection("a", Collection("b", subtask))) 15 | -------------------------------------------------------------------------------- /tests/_support/subcollection_task_name.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task(name="explicit_name") 5 | def implicit_name(c): 6 | pass 7 | -------------------------------------------------------------------------------- /tests/_support/subspace/__init__.py: -------------------------------------------------------------------------------- 1 | from invoke import Collection 2 | 3 | from . import module 4 | 5 | ns = Collection(module) 6 | -------------------------------------------------------------------------------- /tests/_support/subspace/module.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def mytask(c): 6 | pass 7 | -------------------------------------------------------------------------------- /tests/_support/sudo_prompt.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | 4 | @task 5 | def expect_config(c): 6 | password = c.config.sudo.password 7 | assert password == "mypassword", "Got {!r}".format(password) 8 | -------------------------------------------------------------------------------- /tests/_support/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke.tasks import task 2 | 3 | 4 | @task 5 | def foo(c): 6 | print("Hm") 7 | 8 | 9 | @task 10 | def noop(c): 11 | pass 12 | -------------------------------------------------------------------------------- /tests/_support/tree.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "tree", 3 | "help": null, 4 | "tasks": [ 5 | { 6 | "name": "shell", 7 | "aliases": ["ipython"], 8 | "help": "Load a REPL with project state already set up." 9 | }, 10 | { 11 | "name": "test", 12 | "aliases": ["run-tests"], 13 | "help": "Run the test suite with baked-in args." 14 | } 15 | ], 16 | "default": "test", 17 | "collections": [ 18 | { 19 | "name": null, 20 | "help": "Tasks for compiling static code and assets.", 21 | "tasks": [ 22 | { 23 | "name": "all", 24 | "aliases": ["everything"], 25 | "help": "Build all necessary artifacts." 26 | }, 27 | { 28 | "name": "c-ext", 29 | "aliases": ["ext"], 30 | "help": "Build our internal C extension." 31 | }, 32 | { 33 | "name": "zap", 34 | "aliases": [], 35 | "help": "A silly way to clean." 36 | } 37 | ], 38 | "default": "all", 39 | "collections": [ 40 | { 41 | "name": "docs", 42 | "help": "Tasks for managing Sphinx docs.", 43 | "tasks": [ 44 | { 45 | "name": "all", 46 | "aliases": [], 47 | "help": "Build all doc formats." 48 | }, 49 | { 50 | "name": "html", 51 | "aliases": [], 52 | "help": "Build HTML output only." 53 | }, 54 | { 55 | "name": "pdf", 56 | "aliases": [], 57 | "help": "Build PDF output only." 58 | } 59 | ], 60 | "default": "all", 61 | "collections": [] 62 | }, 63 | { 64 | "name": "python", 65 | "help": "PyPI/etc distribution artifacts.", 66 | "tasks": [ 67 | { 68 | "name": "all", 69 | "aliases": [], 70 | "help": "Build all Python packages." 71 | }, 72 | { 73 | "name": "sdist", 74 | "aliases": [], 75 | "help": "Build classic style tar.gz." 76 | }, 77 | { 78 | "name": "wheel", 79 | "aliases": [], 80 | "help": "Build a wheel." 81 | } 82 | ], 83 | "default": "all", 84 | "collections": [] 85 | } 86 | ] 87 | }, 88 | { 89 | "name": "deploy", 90 | "help": "How to deploy our code and configs.", 91 | "tasks": [ 92 | { 93 | "name": "db", 94 | "aliases": ["db-servers"], 95 | "help": "Deploy to our database servers." 96 | }, 97 | { 98 | "name": "everywhere", 99 | "aliases": [], 100 | "help": "Deploy to all targets." 101 | }, 102 | { 103 | "name": "web", 104 | "aliases": [], 105 | "help": "Update and bounce the webservers." 106 | } 107 | ], 108 | "default": "everywhere", 109 | "collections": [] 110 | }, 111 | { 112 | "name": "provision", 113 | "help": "System setup code.", 114 | "tasks": [ 115 | { 116 | "name": "db", 117 | "aliases": [], 118 | "help": "Stand up one or more DB servers." 119 | }, 120 | { 121 | "name": "web", 122 | "aliases": [], 123 | "help": "Stand up a Web server." 124 | } 125 | ], 126 | "default": null, 127 | "collections": [] 128 | } 129 | ] 130 | } 131 | -------------------------------------------------------------------------------- /tests/_support/tree/__init__.py: -------------------------------------------------------------------------------- 1 | from invoke import task, Collection 2 | 3 | from . import build, deploy, provision 4 | 5 | 6 | @task(aliases=["ipython"]) 7 | def shell(c): 8 | "Load a REPL with project state already set up." 9 | pass 10 | 11 | 12 | @task(aliases=["run_tests"], default=True) 13 | def test(c): 14 | "Run the test suite with baked-in args." 15 | pass 16 | 17 | 18 | # NOTE: using build's internal collection directly as a way of ensuring a 19 | # corner case (collection 'named' via local kwarg) gets tested for --list. 20 | # NOTE: Docstring cloning in effect to preserve the final organic looking 21 | # result... 22 | localbuild = build.ns 23 | localbuild.__doc__ = build.__doc__ 24 | ns = Collection(shell, test, deploy, provision, build=localbuild) 25 | -------------------------------------------------------------------------------- /tests/_support/tree/build/__init__.py: -------------------------------------------------------------------------------- 1 | "Tasks for compiling static code and assets." 2 | 3 | from invoke import task, Collection 4 | 5 | from . import docs, python 6 | 7 | 8 | @task(name="all", aliases=["everything"], default=True) 9 | def all_(c): 10 | "Build all necessary artifacts." 11 | pass 12 | 13 | 14 | @task(aliases=["ext"]) 15 | def c_ext(c): 16 | "Build our internal C extension." 17 | pass 18 | 19 | 20 | @task 21 | def zap(c): 22 | "A silly way to clean." 23 | pass 24 | 25 | 26 | ns = Collection(all_, c_ext, zap, docs, python) 27 | -------------------------------------------------------------------------------- /tests/_support/tree/build/docs.py: -------------------------------------------------------------------------------- 1 | "Tasks for managing Sphinx docs." 2 | 3 | from invoke import task, Collection 4 | 5 | 6 | @task(name="all", default=True) 7 | def all_(c): 8 | "Build all doc formats." 9 | pass 10 | 11 | 12 | @task 13 | def html(c): 14 | "Build HTML output only." 15 | pass 16 | 17 | 18 | @task 19 | def pdf(c): 20 | "Build PDF output only." 21 | pass 22 | -------------------------------------------------------------------------------- /tests/_support/tree/build/python.py: -------------------------------------------------------------------------------- 1 | "PyPI/etc distribution artifacts." 2 | 3 | from invoke import task, Collection 4 | 5 | 6 | @task(name="all", default=True) 7 | def all_(c): 8 | "Build all Python packages." 9 | pass 10 | 11 | 12 | @task 13 | def sdist(c): 14 | "Build classic style tar.gz." 15 | pass 16 | 17 | 18 | @task 19 | def wheel(c): 20 | "Build a wheel." 21 | pass 22 | -------------------------------------------------------------------------------- /tests/_support/tree/deploy.py: -------------------------------------------------------------------------------- 1 | "How to deploy our code and configs." 2 | 3 | from invoke import task 4 | 5 | 6 | @task(default=True) 7 | def everywhere(c): 8 | "Deploy to all targets." 9 | pass 10 | 11 | 12 | @task(aliases=["db_servers"]) 13 | def db(c): 14 | "Deploy to our database servers." 15 | pass 16 | 17 | 18 | @task 19 | def web(c): 20 | "Update and bounce the webservers." 21 | pass 22 | -------------------------------------------------------------------------------- /tests/_support/tree/provision.py: -------------------------------------------------------------------------------- 1 | "System setup code." 2 | 3 | from invoke import task 4 | 5 | 6 | @task 7 | def db(c): 8 | "Stand up one or more DB servers." 9 | pass 10 | 11 | 12 | @task 13 | def web(c): 14 | "Stand up a Web server." 15 | pass 16 | -------------------------------------------------------------------------------- /tests/cli.py: -------------------------------------------------------------------------------- 1 | from invoke.collection import Collection 2 | from invoke.parser import Parser 3 | from invoke.tasks import task 4 | 5 | 6 | class CLIParsing: 7 | """ 8 | High level parsing tests 9 | """ 10 | 11 | def setup_method(self): 12 | @task(positional=[], iterable=["my_list"], incrementable=["verbose"]) 13 | def my_task( 14 | c, 15 | mystring, 16 | s, 17 | boolean=False, 18 | b=False, 19 | v=False, 20 | long_name=False, 21 | true_bool=True, 22 | _leading_underscore=False, 23 | trailing_underscore_=False, 24 | my_list=None, 25 | verbose=0, 26 | ): 27 | pass 28 | 29 | @task(aliases=["my_task27"]) 30 | def my_task2(c): 31 | pass 32 | 33 | @task(default=True) 34 | def my_task3(c, mystring): 35 | pass 36 | 37 | @task 38 | def my_task4(c, clean=False, browse=False): 39 | pass 40 | 41 | @task(aliases=["other"], default=True) 42 | def sub_task(c): 43 | pass 44 | 45 | sub_coll = Collection("sub_coll", sub_task) 46 | self.c = Collection(my_task, my_task2, my_task3, my_task4, sub_coll) 47 | 48 | def _parser(self): 49 | return Parser(self.c.to_contexts()) 50 | 51 | def _parse(self, argstr): 52 | return self._parser().parse_argv(argstr.split()) 53 | 54 | def _compare(self, invoke, flagname, value): 55 | invoke = "my-task " + invoke 56 | result = self._parse(invoke) 57 | assert result[0].args[flagname].value == value 58 | 59 | def _compare_names(self, given, real): 60 | assert self._parse(given)[0].name == real 61 | 62 | def underscored_flags_can_be_given_as_dashed(self): 63 | self._compare("--long-name", "long_name", True) 64 | 65 | def leading_underscores_are_ignored(self): 66 | self._compare("--leading-underscore", "_leading_underscore", True) 67 | 68 | def trailing_underscores_are_ignored(self): 69 | self._compare("--trailing-underscore", "trailing_underscore_", True) 70 | 71 | def inverse_boolean_flags(self): 72 | self._compare("--no-true-bool", "true_bool", False) 73 | 74 | def namespaced_task(self): 75 | self._compare_names("sub-coll.sub-task", "sub-coll.sub-task") 76 | 77 | def aliases(self): 78 | self._compare_names("my-task27", "my-task2") 79 | 80 | def subcollection_aliases(self): 81 | self._compare_names("sub-coll.other", "sub-coll.sub-task") 82 | 83 | def subcollection_default_tasks(self): 84 | self._compare_names("sub-coll", "sub-coll.sub-task") 85 | 86 | def boolean_args(self): 87 | "my-task --boolean" 88 | self._compare("--boolean", "boolean", True) 89 | 90 | def flag_then_space_then_value(self): 91 | "my-task --mystring foo" 92 | self._compare("--mystring foo", "mystring", "foo") 93 | 94 | def flag_then_equals_sign_then_value(self): 95 | "my-task --mystring=foo" 96 | self._compare("--mystring=foo", "mystring", "foo") 97 | 98 | def short_boolean_flag(self): 99 | "my-task -b" 100 | self._compare("-b", "b", True) 101 | 102 | def short_flag_then_space_then_value(self): 103 | "my-task -s value" 104 | self._compare("-s value", "s", "value") 105 | 106 | def short_flag_then_equals_sign_then_value(self): 107 | "my-task -s=value" 108 | self._compare("-s=value", "s", "value") 109 | 110 | def short_flag_with_adjacent_value(self): 111 | "my-task -svalue" 112 | r = self._parse("my-task -svalue") 113 | assert r[0].args.s.value == "value" 114 | 115 | def _flag_value_task(self, value): 116 | r = self._parse("my-task -s {} my-task2".format(value)) 117 | assert len(r) == 2 118 | assert r[0].name == "my-task" 119 | assert r[0].args.s.value == value 120 | assert r[1].name == "my-task2" 121 | 122 | def flag_value_then_task(self): 123 | "my-task -s value my-task2" 124 | self._flag_value_task("value") 125 | 126 | def flag_value_same_as_task_name(self): 127 | "my-task -s my-task2 my-task2" 128 | self._flag_value_task("my-task2") 129 | 130 | def three_tasks_with_args(self): 131 | "my-task --boolean my-task3 --mystring foo my-task2" 132 | r = self._parse("my-task --boolean my-task3 --mystring foo my-task2") 133 | assert len(r) == 3 134 | assert [x.name for x in r] == ["my-task", "my-task3", "my-task2"] 135 | assert r[0].args.boolean.value 136 | assert r[1].args.mystring.value == "foo" 137 | 138 | def tasks_with_duplicately_named_kwargs(self): 139 | "my-task --mystring foo my-task3 --mystring bar" 140 | r = self._parse("my-task --mystring foo my-task3 --mystring bar") 141 | assert r[0].name == "my-task" 142 | assert r[0].args.mystring.value == "foo" 143 | assert r[1].name == "my-task3" 144 | assert r[1].args.mystring.value == "bar" 145 | 146 | def multiple_short_flags_adjacent(self): 147 | "my-task -bv (and inverse)" 148 | for args in ("-bv", "-vb"): 149 | r = self._parse("my-task {}".format(args)) 150 | a = r[0].args 151 | assert a.b.value 152 | assert a.v.value 153 | 154 | def list_type_flag_can_be_given_N_times_building_a_list(self): 155 | "my-task --my-list foo --my-list bar" 156 | # Test both the singular and plural cases, just to be safe. 157 | self._compare("--my-list foo", "my-list", ["foo"]) 158 | self._compare("--my-list foo --my-list bar", "my-list", ["foo", "bar"]) 159 | 160 | def incrementable_type_flag_can_be_used_as_a_switch_or_counter(self): 161 | "my-task -v, -vv, -vvvvv etc, except with explicit --verbose" 162 | self._compare("", "verbose", 0) 163 | self._compare("--verbose", "verbose", 1) 164 | self._compare("--verbose --verbose --verbose", "verbose", 3) 165 | -------------------------------------------------------------------------------- /tests/concurrency.py: -------------------------------------------------------------------------------- 1 | from queue import Queue 2 | 3 | from invoke.util import ExceptionWrapper, ExceptionHandlingThread as EHThread 4 | 5 | 6 | # TODO: rename 7 | class ExceptionHandlingThread_: 8 | class via_target: 9 | def setup_method(self): 10 | def worker(q): 11 | q.put(7) 12 | 13 | self.worker = worker 14 | 15 | def base_case(self): 16 | queue = Queue() 17 | t = EHThread(target=self.worker, args=[queue]) 18 | t.start() 19 | t.join() 20 | assert queue.get(block=False) == 7 21 | assert queue.empty() 22 | 23 | def catches_exceptions(self): 24 | # Induce exception by submitting a bad queue obj 25 | t = EHThread(target=self.worker, args=[None]) 26 | t.start() 27 | t.join() 28 | wrapper = t.exception() 29 | assert isinstance(wrapper, ExceptionWrapper) 30 | assert wrapper.kwargs == {"args": [None], "target": self.worker} 31 | assert wrapper.type == AttributeError 32 | assert isinstance(wrapper.value, AttributeError) 33 | 34 | def exhibits_is_dead_flag(self): 35 | # Spin up a thread that will except internally (can't put() on a 36 | # None object) 37 | t = EHThread(target=self.worker, args=[None]) 38 | t.start() 39 | t.join() 40 | # Excepted -> it's dead 41 | assert t.is_dead 42 | # Spin up a happy thread that can exit peacefully (it's not "dead", 43 | # though...maybe we should change that terminology) 44 | t = EHThread(target=self.worker, args=[Queue()]) 45 | t.start() 46 | t.join() 47 | # Not dead, just uh...sleeping? 48 | assert not t.is_dead 49 | 50 | class via_subclassing: 51 | def setup_method(self): 52 | class MyThread(EHThread): 53 | def __init__(self, *args, **kwargs): 54 | self.queue = kwargs.pop("queue") 55 | super().__init__(*args, **kwargs) 56 | 57 | def _run(self): 58 | self.queue.put(7) 59 | 60 | self.klass = MyThread 61 | 62 | def base_case(self): 63 | queue = Queue() 64 | t = self.klass(queue=queue) 65 | t.start() 66 | t.join() 67 | assert queue.get(block=False) == 7 68 | assert queue.empty() 69 | 70 | def catches_exceptions(self): 71 | # Induce exception by submitting a bad queue obj 72 | t = self.klass(queue=None) 73 | t.start() 74 | t.join() 75 | wrapper = t.exception() 76 | assert isinstance(wrapper, ExceptionWrapper) 77 | assert wrapper.kwargs == {} 78 | assert wrapper.type == AttributeError 79 | assert isinstance(wrapper.value, AttributeError) 80 | 81 | def exhibits_is_dead_flag(self): 82 | # Spin up a thread that will except internally (can't put() on a 83 | # None object) 84 | t = self.klass(queue=None) 85 | t.start() 86 | t.join() 87 | # Excepted -> it's dead 88 | assert t.is_dead 89 | # Spin up a happy thread that can exit peacefully (it's not "dead", 90 | # though...maybe we should change that terminology) 91 | t = self.klass(queue=Queue()) 92 | t.start() 93 | t.join() 94 | # Not dead, just uh...sleeping? 95 | assert not t.is_dead 96 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import termios 5 | 6 | import pytest 7 | from unittest.mock import patch 8 | 9 | from _util import support 10 | 11 | # Set up icecream globally for convenience. 12 | from icecream import install 13 | 14 | install() 15 | 16 | 17 | # pytest seems to tweak logging such that Invoke's debug logs go to stderr, 18 | # which is then hella spammy if one is using --capture=no (which one must in 19 | # order to test low level terminal IO stuff, as we do!) 20 | # So, we explicitly turn default logging back down. 21 | # NOTE: no real better place to put this than here 22 | # TODO: see if we can use modern pytest's logging functionality to remove the 23 | # need for this, now that pytest-relaxed was modernized 24 | logging.basicConfig(level=logging.INFO) 25 | 26 | 27 | @pytest.fixture(autouse=True) 28 | def fake_user_home(): 29 | # Ignore any real user homedir for purpose of testing. 30 | # This allows, for example, a user who has real Invoke configs in their 31 | # homedir to still run the test suite safely. 32 | # TODO: this is still a bit of a kludge & doesn't solve systemwide configs 33 | with patch("invoke.config.expanduser", side_effect=lambda x: x): 34 | yield 35 | 36 | 37 | @pytest.fixture 38 | def reset_environ(): 39 | """ 40 | Resets `os.environ` to its prior state after the fixtured test finishes. 41 | """ 42 | old_environ = os.environ.copy() 43 | yield 44 | os.environ.clear() 45 | os.environ.update(old_environ) 46 | 47 | 48 | @pytest.fixture 49 | def chdir_support(): 50 | # Always do things relative to tests/_support 51 | os.chdir(support) 52 | yield 53 | # Chdir back to project root to avoid problems 54 | os.chdir(os.path.join(os.path.dirname(__file__), "..")) 55 | 56 | 57 | @pytest.fixture 58 | def clean_sys_modules(): 59 | """ 60 | Attempt to nix any imports incurred by the test, to prevent state bleed. 61 | 62 | In some cases this prevents outright errors (eg a test accidentally relying 63 | on another's import of a task tree in the support folder) and in others 64 | it's required because we're literally testing runtime imports. 65 | """ 66 | snapshot = sys.modules.copy() 67 | yield 68 | # Iterate over another copy to avoid ye olde mutate-during-iterate problem 69 | # NOTE: cannot simply 'sys.modules = snapshot' as that is warned against 70 | for name, module in sys.modules.copy().items(): 71 | # Delete anything newly added (imported) 72 | if name not in snapshot: 73 | del sys.modules[name] 74 | # Overwrite anything that was modified (the easy version...) 75 | sys.modules.update(snapshot) 76 | 77 | 78 | @pytest.fixture 79 | def integration(reset_environ, chdir_support, clean_sys_modules): 80 | yield 81 | 82 | 83 | @pytest.fixture 84 | def mock_termios(): 85 | with patch("invoke.terminals.termios") as mocked: 86 | # Ensure mocked termios has 'real' values for constants...otherwise 87 | # doing bit arithmetic on Mocks kinda defeats the point. 88 | mocked.ECHO = termios.ECHO 89 | mocked.ICANON = termios.ICANON 90 | mocked.VMIN = termios.VMIN 91 | mocked.VTIME = termios.VTIME 92 | yield mocked 93 | -------------------------------------------------------------------------------- /tests/init.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from unittest.mock import patch 4 | 5 | import invoke 6 | import invoke.collection 7 | import invoke.exceptions 8 | import invoke.tasks 9 | import invoke.program 10 | 11 | 12 | class Init: 13 | "__init__" 14 | 15 | def dunder_version_info(self): 16 | assert hasattr(invoke, "__version_info__") 17 | ver = invoke.__version_info__ 18 | assert isinstance(ver, tuple) 19 | assert all(isinstance(x, int) for x in ver) 20 | 21 | def dunder_version(self): 22 | assert hasattr(invoke, "__version__") 23 | ver = invoke.__version__ 24 | assert isinstance(ver, str) 25 | assert re.match(r"\d+\.\d+\.\d+", ver) 26 | 27 | def dunder_version_looks_generated_from_dunder_version_info(self): 28 | # Meh. 29 | ver_part = invoke.__version__.split(".")[0] 30 | ver_info_part = invoke.__version_info__[0] 31 | assert ver_part == str(ver_info_part) 32 | 33 | class exposes_bindings: 34 | def task_decorator(self): 35 | assert invoke.task is invoke.tasks.task 36 | 37 | def task_class(self): 38 | assert invoke.Task is invoke.tasks.Task 39 | 40 | def collection_class(self): 41 | assert invoke.Collection is invoke.collection.Collection 42 | 43 | def context_class(self): 44 | assert invoke.Context is invoke.context.Context 45 | 46 | def mock_context_class(self): 47 | assert invoke.MockContext is invoke.context.MockContext 48 | 49 | def config_class(self): 50 | assert invoke.Config is invoke.config.Config 51 | 52 | def pty_size_function(self): 53 | assert invoke.pty_size is invoke.terminals.pty_size 54 | 55 | def local_class(self): 56 | assert invoke.Local is invoke.runners.Local 57 | 58 | def runner_class(self): 59 | assert invoke.Runner is invoke.runners.Runner 60 | 61 | def promise_class(self): 62 | assert invoke.Promise is invoke.runners.Promise 63 | 64 | def failure_class(self): 65 | assert invoke.Failure is invoke.runners.Failure 66 | 67 | def exceptions(self): 68 | # Meh 69 | for obj in vars(invoke.exceptions).values(): 70 | if isinstance(obj, type) and issubclass(obj, BaseException): 71 | top_level = getattr(invoke, obj.__name__) 72 | real = getattr(invoke.exceptions, obj.__name__) 73 | assert top_level is real 74 | 75 | def runner_result(self): 76 | assert invoke.Result is invoke.runners.Result 77 | 78 | def watchers(self): 79 | assert invoke.StreamWatcher is invoke.watchers.StreamWatcher 80 | assert invoke.Responder is invoke.watchers.Responder 81 | assert invoke.FailingResponder is invoke.watchers.FailingResponder 82 | 83 | def program(self): 84 | assert invoke.Program is invoke.program.Program 85 | 86 | def filesystemloader(self): 87 | assert invoke.FilesystemLoader is invoke.loader.FilesystemLoader 88 | 89 | def argument(self): 90 | assert invoke.Argument is invoke.parser.Argument 91 | 92 | def parsercontext(self): 93 | assert invoke.ParserContext is invoke.parser.ParserContext 94 | 95 | def parser(self): 96 | assert invoke.Parser is invoke.parser.Parser 97 | 98 | def parseresult(self): 99 | assert invoke.ParseResult is invoke.parser.ParseResult 100 | 101 | def executor(self): 102 | assert invoke.Executor is invoke.executor.Executor 103 | 104 | def call(self): 105 | assert invoke.call is invoke.tasks.call 106 | 107 | def Call(self): 108 | # Starting to think we shouldn't bother with lowercase-c call... 109 | assert invoke.Call is invoke.tasks.Call 110 | 111 | class offers_singletons: 112 | @patch("invoke.Context") 113 | def run(self, Context): 114 | result = invoke.run("foo", bar="biz") 115 | ctx = Context.return_value 116 | ctx.run.assert_called_once_with("foo", bar="biz") 117 | assert result is ctx.run.return_value 118 | 119 | @patch("invoke.Context") 120 | def sudo(self, Context): 121 | result = invoke.sudo("foo", bar="biz") 122 | ctx = Context.return_value 123 | ctx.sudo.assert_called_once_with("foo", bar="biz") 124 | assert result is ctx.sudo.return_value 125 | -------------------------------------------------------------------------------- /tests/loader.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from importlib.util import spec_from_file_location 4 | from types import ModuleType 5 | from pathlib import Path 6 | 7 | from pytest import raises 8 | 9 | from invoke import Config 10 | from invoke.loader import Loader, FilesystemLoader as FSLoader 11 | from invoke.exceptions import CollectionNotFound 12 | 13 | from _util import support 14 | 15 | 16 | class _BasicLoader(Loader): 17 | """ 18 | Tests top level Loader behavior with basic finder stub. 19 | 20 | Used when we want to make sure we're testing Loader.load and not e.g. 21 | FilesystemLoader's specific implementation. 22 | """ 23 | 24 | def find(self, name): 25 | path = os.path.join(support, name) 26 | if os.path.exists(f"{path}.py"): 27 | path = f"{path}.py" 28 | elif os.path.exists(path): 29 | path = os.path.join(path, "__init__.py") 30 | spec = spec_from_file_location(name, path) 31 | return spec 32 | 33 | 34 | class Loader_: 35 | def exhibits_default_config_object(self): 36 | loader = _BasicLoader() 37 | assert isinstance(loader.config, Config) 38 | assert loader.config.tasks.collection_name == "tasks" 39 | 40 | def returns_module_and_location(self): 41 | mod, path = _BasicLoader().load("namespacing") 42 | assert isinstance(mod, ModuleType) 43 | assert path == support 44 | 45 | def may_configure_config_via_constructor(self): 46 | config = Config({"tasks": {"collection_name": "mytasks"}}) 47 | loader = _BasicLoader(config=config) 48 | assert loader.config.tasks.collection_name == "mytasks" 49 | 50 | def adds_module_parent_dir_to_sys_path(self): 51 | # Crummy doesn't-explode test. 52 | _BasicLoader().load("namespacing") 53 | 54 | def doesnt_duplicate_parent_dir_addition(self): 55 | _BasicLoader().load("namespacing") 56 | _BasicLoader().load("namespacing") 57 | # If the bug is present, this will be 2 at least (and often more, since 58 | # other tests will pollute it (!). 59 | assert sys.path.count(support) == 1 60 | 61 | def can_load_package(self): 62 | loader = _BasicLoader() 63 | # Load itself doesn't explode (tests 'from . import xxx' internally) 64 | mod, enclosing_dir = loader.load("package") 65 | # Properties of returned values look as expected 66 | # (enclosing dir is always the one above the module-or-package) 67 | assert enclosing_dir == support 68 | assert mod.__file__ == str(Path(support) / "package" / "__init__.py") 69 | 70 | def load_name_defaults_to_config_tasks_collection_name(self): 71 | "load() name defaults to config.tasks.collection_name" 72 | 73 | class MockLoader(_BasicLoader): 74 | def find(self, name): 75 | # Sanity 76 | assert name == "simple_ns_list" 77 | return super().find(name) 78 | 79 | config = Config({"tasks": {"collection_name": "simple_ns_list"}}) 80 | loader = MockLoader(config=config) 81 | # More sanity: expect simple_ns_list.py (not tasks.py) 82 | mod, path = loader.load() 83 | assert mod.__file__ == os.path.join(support, "simple_ns_list.py") 84 | 85 | 86 | class FilesystemLoader_: 87 | def setup_method(self): 88 | self.loader = FSLoader(start=support) 89 | 90 | def discovery_start_point_defaults_to_cwd(self): 91 | assert FSLoader().start == os.getcwd() 92 | 93 | def exposes_start_point_as_attribute(self): 94 | assert FSLoader().start == os.getcwd() 95 | 96 | def start_point_is_configurable_via_kwarg(self): 97 | start = "/tmp" 98 | assert FSLoader(start=start).start == start 99 | 100 | def start_point_is_configurable_via_config(self): 101 | config = Config({"tasks": {"search_root": "nowhere"}}) 102 | assert FSLoader(config=config).start == "nowhere" 103 | 104 | def raises_CollectionNotFound_if_not_found(self): 105 | with raises(CollectionNotFound): 106 | self.loader.load("nope") 107 | 108 | def raises_ImportError_if_found_collection_cannot_be_imported(self): 109 | # Instead of masking with a CollectionNotFound 110 | with raises(ModuleNotFoundError): 111 | self.loader.load("oops") 112 | 113 | # TODO: Need CollectionImportError here 114 | 115 | def searches_towards_root_of_filesystem(self): 116 | # Loaded while root is in same dir as .py 117 | directly = self.loader.load("foo") 118 | # Loaded while root is multiple dirs deeper than the .py 119 | deep = os.path.join(support, "ignoreme", "ignoremetoo") 120 | indirectly = FSLoader(start=deep).load("foo") 121 | assert directly[0].__file__ == indirectly[0].__file__ 122 | assert directly[0].__spec__ == indirectly[0].__spec__ 123 | assert directly[1] == indirectly[1] 124 | -------------------------------------------------------------------------------- /tests/merge_dicts.py: -------------------------------------------------------------------------------- 1 | from pytest import raises 2 | 3 | from invoke.config import merge_dicts, copy_dict, AmbiguousMergeError 4 | 5 | 6 | class merge_dicts_: 7 | # NOTE: don't usually like doing true unit tests of low level plumbing - 8 | # prefer to infer it's all working by examining higher level behavior - but 9 | # sometimes it's necessary to more easily stamp out certain bugs. 10 | 11 | def merging_data_onto_empty_dict(self): 12 | d1 = {} 13 | d2 = {"foo": "bar"} 14 | merge_dicts(d1, d2) 15 | assert d1 == d2 16 | 17 | def updating_with_None_acts_like_merging_empty_dict(self): 18 | # When bug present, AttributeError is raised on a None.items() 19 | d1 = {"my": "data"} 20 | d2 = None 21 | merge_dicts(d1, d2) 22 | assert d1 == {"my": "data"} 23 | 24 | def orthogonal_data_merges(self): 25 | d1 = {"foo": "bar"} 26 | d2 = {"biz": "baz"} 27 | merge_dicts(d1, d2) 28 | assert d1 == {"foo": "bar", "biz": "baz"} 29 | 30 | def updates_arg_values_win(self): 31 | d1 = {"foo": "bar"} 32 | d2 = {"foo": "notbar"} 33 | merge_dicts(d1, d2) 34 | assert d1 == {"foo": "notbar"} 35 | 36 | def non_dict_type_mismatch_overwrites_ok(self): 37 | d1 = {"foo": "bar"} 38 | d2 = {"foo": [1, 2, 3]} 39 | merge_dicts(d1, d2) 40 | assert d1 == {"foo": [1, 2, 3]} 41 | 42 | def merging_dict_into_nondict_raises_error(self): 43 | # TODO: or...should it?! If a user really wants to take a pre-existing 44 | # config path and make it 'deeper' by overwriting e.g. a string with a 45 | # dict of strings (or whatever)...should they be allowed to? 46 | d1 = {"foo": "bar"} 47 | d2 = {"foo": {"uh": "oh"}} 48 | with raises(AmbiguousMergeError): 49 | merge_dicts(d1, d2) 50 | 51 | def merging_nondict_into_dict_raises_error(self): 52 | d1 = {"foo": {"uh": "oh"}} 53 | d2 = {"foo": "bar"} 54 | with raises(AmbiguousMergeError): 55 | merge_dicts(d1, d2) 56 | 57 | def nested_leaf_values_merge_ok(self): 58 | d1 = {"foo": {"bar": {"biz": "baz"}}} 59 | d2 = {"foo": {"bar": {"biz": "notbaz"}}} 60 | merge_dicts(d1, d2) 61 | assert d1 == {"foo": {"bar": {"biz": "notbaz"}}} 62 | 63 | def mixed_branch_levels_merges_ok(self): 64 | d1 = {"foo": {"bar": {"biz": "baz"}}, "meh": 17, "myown": "ok"} 65 | d2 = {"foo": {"bar": {"biz": "notbaz"}}, "meh": 25} 66 | merge_dicts(d1, d2) 67 | expected = { 68 | "foo": {"bar": {"biz": "notbaz"}}, 69 | "meh": 25, 70 | "myown": "ok", 71 | } 72 | assert d1 == expected 73 | 74 | def dict_value_merges_are_not_references(self): 75 | core = {} 76 | coll = {"foo": {"bar": {"biz": "coll value"}}} 77 | proj = {"foo": {"bar": {"biz": "proj value"}}} 78 | # Initial merge - when bug present, this sets core['foo'] to the entire 79 | # 'foo' dict in 'proj' as a reference - meaning it 'links' back to the 80 | # 'proj' dict whenever other things are merged into it 81 | merge_dicts(core, proj) 82 | assert core == {"foo": {"bar": {"biz": "proj value"}}} 83 | assert proj["foo"]["bar"]["biz"] == "proj value" 84 | # Identity tests can also prove the bug early 85 | assert ( 86 | core["foo"] is not proj["foo"] 87 | ), "Core foo is literally proj foo!" # noqa 88 | # Subsequent merge - just overwrites leaf values this time (thus no 89 | # real change, but this is what real config merge code does, so why 90 | # not) 91 | merge_dicts(core, proj) 92 | assert core == {"foo": {"bar": {"biz": "proj value"}}} 93 | assert proj["foo"]["bar"]["biz"] == "proj value" 94 | # The problem merge - when bug present, core['foo'] references 'foo' 95 | # inside 'proj', so this ends up tweaking "core" but it actually 96 | # affects "proj" as well! 97 | merge_dicts(core, coll) 98 | # Expect that the core dict got the update from 'coll'... 99 | assert core == {"foo": {"bar": {"biz": "coll value"}}} 100 | # BUT that 'proj' remains UNTOUCHED 101 | assert proj["foo"]["bar"]["biz"] == "proj value" 102 | 103 | def merge_file_types_by_reference(self): 104 | with open(__file__) as fd: 105 | d1 = {} 106 | d2 = {"foo": fd} 107 | merge_dicts(d1, d2) 108 | assert d1["foo"].closed is False 109 | 110 | 111 | class copy_dict_: 112 | def returns_deep_copy_of_given_dict(self): 113 | # NOTE: not actual deepcopy... 114 | source = {"foo": {"bar": {"biz": "baz"}}} 115 | copy = copy_dict(source) 116 | assert copy["foo"]["bar"] == source["foo"]["bar"] 117 | assert copy["foo"]["bar"] is not source["foo"]["bar"] 118 | copy["foo"]["bar"]["biz"] = "notbaz" 119 | assert source["foo"]["bar"]["biz"] == "baz" 120 | -------------------------------------------------------------------------------- /tests/terminals.py: -------------------------------------------------------------------------------- 1 | import fcntl 2 | import termios 3 | 4 | from unittest.mock import Mock, patch 5 | from pytest import skip, mark 6 | 7 | from invoke.terminals import pty_size, bytes_to_read, WINDOWS 8 | 9 | # Skip on Windows CI, it may blow up on one of these tests 10 | pytestmark = mark.skipif( 11 | WINDOWS, reason="Low level terminal tests only work well on POSIX" 12 | ) 13 | 14 | 15 | # NOTE: 'with character_buffered()' tests are in runners.py as it's a lot 16 | # easier to test some aspects in a non-unit sense (e.g. a keyboard-interrupting 17 | # Runner subclass). MEH. 18 | 19 | 20 | class terminals: 21 | class pty_size: 22 | @patch("fcntl.ioctl", wraps=fcntl.ioctl) 23 | def calls_fcntl_with_TIOCGWINSZ(self, ioctl): 24 | # Test the default (Unix) implementation because that's all we 25 | # can realistically do here. 26 | pty_size() 27 | assert ioctl.call_args_list[0][0][1] == termios.TIOCGWINSZ 28 | 29 | @patch("sys.stdout") 30 | @patch("fcntl.ioctl") 31 | def defaults_to_80x24_when_stdout_not_a_tty(self, ioctl, stdout): 32 | # Make sure stdout acts like a real stream (means failure is 33 | # more obvious) 34 | stdout.fileno.return_value = 1 35 | # Ensure it fails the isatty() test too 36 | stdout.isatty.return_value = False 37 | # Test 38 | assert pty_size() == (80, 24) 39 | 40 | @patch("sys.stdout") 41 | @patch("fcntl.ioctl") 42 | def uses_default_when_stdout_lacks_fileno(self, ioctl, stdout): 43 | # i.e. when accessing it throws AttributeError 44 | stdout.fileno.side_effect = AttributeError 45 | assert pty_size() == (80, 24) 46 | 47 | @patch("sys.stdout") 48 | @patch("fcntl.ioctl") 49 | def uses_default_when_stdout_triggers_ioctl_error(self, ioctl, stdout): 50 | ioctl.side_effect = TypeError 51 | assert pty_size() == (80, 24) 52 | 53 | class bytes_to_read_: 54 | @patch("invoke.terminals.fcntl") 55 | def returns_1_when_stream_lacks_fileno(self, fcntl): 56 | # A fileno() that exists but returns a non-int is a quick way 57 | # to fail util.has_fileno(). 58 | assert bytes_to_read(Mock(fileno=lambda: None)) == 1 59 | assert not fcntl.ioctl.called 60 | 61 | @patch("invoke.terminals.fcntl") 62 | def returns_1_when_stream_has_fileno_but_is_not_a_tty(self, fcntl): 63 | # It blows up otherwise anyways (struct.unpack gets mad because 64 | # result isn't a string of the right length) but let's make 65 | # ioctl die similarly to the real world case we're testing for 66 | # here (#425) 67 | fcntl.ioctl.side_effect = IOError( 68 | "Operation not supported by device" 69 | ) 70 | stream = Mock(isatty=lambda: False, fileno=lambda: 17) # arbitrary 71 | assert bytes_to_read(stream) == 1 72 | assert not fcntl.ioctl.called 73 | 74 | def returns_FIONREAD_result_when_stream_is_a_tty(self): 75 | skip() 76 | 77 | def returns_1_on_windows(self): 78 | skip() 79 | -------------------------------------------------------------------------------- /tests/util.py: -------------------------------------------------------------------------------- 1 | from invoke.util import helpline 2 | 3 | 4 | class util: 5 | class helpline: 6 | def is_None_if_no_docstring(self): 7 | def foo(c): 8 | pass 9 | 10 | assert helpline(foo) is None 11 | 12 | def is_None_if_whitespace_only_docstring(self): 13 | def foo(c): 14 | """ """ 15 | pass 16 | 17 | assert helpline(foo) is None 18 | 19 | def is_entire_thing_if_docstring_one_liner(self): 20 | def foo(c): 21 | "foo!" 22 | pass 23 | 24 | assert helpline(foo) == "foo!" 25 | 26 | def left_strips_newline_bearing_one_liners(self): 27 | def foo(c): 28 | """ 29 | foo! 30 | """ 31 | pass 32 | 33 | assert helpline(foo) == "foo!" 34 | 35 | def is_first_line_in_multiline_docstrings(self): 36 | def foo(c): 37 | """ 38 | foo? 39 | 40 | foo! 41 | """ 42 | pass 43 | 44 | assert helpline(foo) == "foo?" 45 | 46 | def is_None_if_docstring_matches_object_type(self): 47 | # I.e. we don't want a docstring that is coming from the class 48 | # instead of the instance. 49 | class Foo: 50 | "I am Foo" 51 | pass 52 | 53 | foo = Foo() 54 | assert helpline(foo) is None 55 | 56 | def instance_attached_docstring_is_still_displayed(self): 57 | # This is actually a property of regular object semantics, but 58 | # whatever, why not have a test for it. 59 | class Foo: 60 | "I am Foo" 61 | pass 62 | 63 | foo = Foo() 64 | foo.__doc__ = "I am foo" 65 | assert helpline(foo) == "I am foo" 66 | -------------------------------------------------------------------------------- /tests/watchers.py: -------------------------------------------------------------------------------- 1 | from queue import Queue, Empty 2 | from threading import Thread, Event 3 | 4 | from invoke import Responder, FailingResponder, ResponseNotAccepted 5 | 6 | 7 | # NOTE: StreamWatcher is basically just an interface/protocol; no behavior to 8 | # test of its own. So this file tests Responder primarily, and some subclasses. 9 | 10 | 11 | class Responder_: 12 | def keeps_track_of_seen_index_per_thread(self): 13 | # Instantiate a single object which will be used in >1 thread 14 | r = Responder(pattern="foo", response="bar fight") # meh 15 | # Thread body func allowing us to mimic actual IO thread behavior, with 16 | # Queues used in place of actual pipes/files 17 | def body(responder, in_q, out_q, finished): 18 | while not finished.is_set(): 19 | try: 20 | # NOTE: use nowait() so our loop is hot & can shutdown ASAP 21 | # if finished gets set. 22 | stream = in_q.get_nowait() 23 | for response in r.submit(stream): 24 | out_q.put_nowait(response) 25 | except Empty: 26 | pass 27 | 28 | # Create two threads from that body func, and queues/etc for each 29 | t1_in, t1_out, t1_finished = Queue(), Queue(), Event() 30 | t2_in, t2_out, t2_finished = Queue(), Queue(), Event() 31 | t1 = Thread(target=body, args=(r, t1_in, t1_out, t1_finished)) 32 | t2 = Thread(target=body, args=(r, t2_in, t2_out, t2_finished)) 33 | # Start the threads 34 | t1.start() 35 | t2.start() 36 | try: 37 | stream = "foo fighters" 38 | # First thread will basically always work 39 | t1_in.put(stream) 40 | assert t1_out.get() == "bar fight" 41 | # Second thread get() will block/timeout if threadlocals aren't in 42 | # use, because the 2nd thread's copy of the responder will not have 43 | # its own index & will thus already be 'past' the `foo` in the 44 | # stream. 45 | t2_in.put(stream) 46 | assert t2_out.get(timeout=1) == "bar fight" 47 | except Empty: 48 | assert ( 49 | False 50 | ), "Unable to read from thread 2 - implies threadlocal indices are broken!" # noqa 51 | # Close up. 52 | finally: 53 | t1_finished.set() 54 | t2_finished.set() 55 | t1.join() 56 | t2.join() 57 | 58 | def yields_response_when_regular_string_pattern_seen(self): 59 | r = Responder(pattern="empty", response="handed") 60 | assert list(r.submit("the house was empty")) == ["handed"] 61 | 62 | def yields_response_when_regex_seen(self): 63 | r = Responder(pattern=r"tech.*debt", response="pay it down") 64 | response = r.submit("technically, it's still debt") 65 | assert list(response) == ["pay it down"] 66 | 67 | def multiple_hits_within_stream_yield_multiple_responses(self): 68 | r = Responder(pattern="jump", response="how high?") 69 | assert list(r.submit("jump, wait, jump, wait")) == ["how high?"] * 2 70 | 71 | def patterns_span_multiple_lines(self): 72 | r = Responder(pattern=r"call.*problem", response="So sorry") 73 | output = """ 74 | You only call me 75 | when you have a problem 76 | You never call me 77 | Just to say hi 78 | """ 79 | assert list(r.submit(output)) == ["So sorry"] 80 | 81 | 82 | class FailingResponder_: 83 | def behaves_like_regular_responder_by_default(self): 84 | r = FailingResponder( 85 | pattern="ju[^ ]{2}", response="how high?", sentinel="lolnope" 86 | ) 87 | assert list(r.submit("jump, wait, jump, wait")) == ["how high?"] * 2 88 | 89 | def raises_failure_exception_when_sentinel_detected(self): 90 | r = FailingResponder( 91 | pattern="ju[^ ]{2}", response="how high?", sentinel="lolnope" 92 | ) 93 | # Behaves normally initially 94 | assert list(r.submit("jump")) == ["how high?"] 95 | # But then! 96 | try: 97 | r.submit("lolnope") 98 | except ResponseNotAccepted as e: 99 | message = str(e) 100 | # Expect useful bits in exception text 101 | err = "Didn't see pattern in {!r}".format(message) 102 | assert "ju[^ ]{2}" in message, err 103 | err = "Didn't see failure sentinel in {!r}".format(message) 104 | assert "lolnope" in message, err 105 | else: 106 | assert False, "Did not raise ResponseNotAccepted!" 107 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox (http://tox.testrun.org/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py36, py37, py38, py39, 'py310', py311 8 | 9 | [testenv] 10 | commands = 11 | pip install -r dev-requirements.txt 12 | spec 13 | --------------------------------------------------------------------------------