├── .github └── workflows │ └── test.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── pyproject.toml ├── pytest_console_scripts ├── __init__.py └── py.typed ├── tests ├── conftest.py ├── test_console_scripts.py └── test_run_scripts.py └── tox.ini /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Python package 2 | 3 | on: 4 | push: 5 | branches: "*" 6 | tags: "*.*.*" 7 | pull_request: 8 | 9 | jobs: 10 | lint: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Install Tox 15 | run: pip install tox 16 | - name: Lint with tox 17 | run: tox run -e lint 18 | 19 | tests: 20 | runs-on: ${{ matrix.os }} 21 | strategy: 22 | matrix: 23 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13-dev", "pypy3.9", "pypy3.10"] 24 | os: ["ubuntu-latest", "windows-latest"] 25 | 26 | steps: 27 | - uses: actions/checkout@v4 28 | - name: Set up Python ${{ matrix.python-version }} 29 | uses: actions/setup-python@v5 30 | with: 31 | python-version: ${{ matrix.python-version }} 32 | - name: Install dependencies 33 | run: | 34 | pip install tox 35 | - name: Test with tox 36 | run: tox run -e py,report_ci 37 | - uses: codecov/codecov-action@v4 38 | with: 39 | token: ${{ secrets.CODECOV_TOKEN }} 40 | 41 | package: 42 | runs-on: ubuntu-latest 43 | steps: 44 | - name: Checkout code 45 | uses: actions/checkout@v4 46 | - name: Install build dependencies 47 | run: pip install --upgrade build 48 | - name: Build distributions 49 | run: python -m build 50 | - name: Upload packages 51 | uses: actions/upload-artifact@v4 52 | with: 53 | name: python-dist 54 | path: dist/* 55 | retention-days: 1 56 | compression-level: 0 57 | 58 | publish: 59 | needs: [tests, package] 60 | runs-on: ubuntu-latest 61 | if: github.ref_type == 'tag' 62 | environment: 63 | name: release 64 | url: https://pypi.org/project/pytest-console-scripts/${{ github.ref_name }}/ 65 | permissions: 66 | id-token: write 67 | steps: 68 | - name: Download packages 69 | uses: actions/download-artifact@v4 70 | with: 71 | name: python-dist 72 | path: dist/ 73 | - name: Publish package distributions to PyPI 74 | uses: pypa/gh-action-pypi-publish@release/v1 75 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | _version.py 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *,cover 47 | .hypothesis/ 48 | .pytest_cache 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | 58 | # Flask instance folder 59 | instance/ 60 | 61 | # Sphinx documentation 62 | docs/_build/ 63 | 64 | # PyBuilder 65 | target/ 66 | 67 | # IPython Notebook 68 | .ipynb_checkpoints 69 | 70 | # pyenv 71 | .python-version 72 | 73 | # PyCharm 74 | .idea 75 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [1.4.1] - 2023-05-29 11 | 12 | ### Removed 13 | - Dropped support for Python 3.7 14 | [#72](https://github.com/kvas-it/pytest-console-scripts/pull/72) 15 | 16 | ### Fixed 17 | - Fix loading scripts with non-UTF-8 encodings. 18 | [#77](https://github.com/kvas-it/pytest-console-scripts/pull/77) 19 | - Print output when a subprocess runner with `check=True` fails was missing. 20 | [#78](https://github.com/kvas-it/pytest-console-scripts/pull/78) 21 | 22 | ## [1.4.0] - 2023-05-22 23 | 24 | ### Added 25 | - Added type-hinting for all types, `pytest_console_scripts.ScriptRunner` 26 | can now be used to hint the `script_runner` fixture. 27 | [#62](https://github.com/kvas-it/pytest-console-scripts/pull/62) 28 | - Added support for the `shell` and `check` keywords for in-process mode. 29 | These behave as similarly to `subprocess.run` as possible. 30 | - Script runners now take command arguments similar to `subprocess.run`, 31 | including support for PathLike objects. 32 | [#69](https://github.com/kvas-it/pytest-console-scripts/pull/69) 33 | 34 | ### Deprecated 35 | - Passing command arguments in `*args` is now deprecated and will raise warnings. 36 | These should be wrapped in a list or tuple from now on, similar to `subprocess.run`. 37 | [#69](https://github.com/kvas-it/pytest-console-scripts/pull/69) 38 | 39 | ### Removed 40 | - Dropped support for Python 3.6 41 | [#61](https://github.com/kvas-it/pytest-console-scripts/pull/61) 42 | 43 | ### Fixed 44 | - Install-time dependencies have been fixed. 45 | [#56](https://github.com/kvas-it/pytest-console-scripts/issues/56) 46 | 47 | ## [1.3.1] - 2022-03-18 48 | 49 | ### Changed 50 | - Removed `mock` dependency. 51 | [#53](https://github.com/kvas-it/pytest-console-scripts/pull/53) 52 | 53 | ## [1.3.0] - 2022-02-23 54 | 55 | ### Changed 56 | - Added `python_requires` to the project. 57 | [#51](https://github.com/kvas-it/pytest-console-scripts/issues/51) 58 | 59 | ## [1.2.2] - 2022-01-06 60 | 61 | ### Added 62 | - Add `print` method to allow results to be manually printed. 63 | [#49](https://github.com/kvas-it/pytest-console-scripts/issues/49) 64 | 65 | ### Fixed 66 | - Avoid overwriting the global logging config of tested scripts. 67 | [#48](https://github.com/kvas-it/pytest-console-scripts/pull/48) 68 | 69 | ## [1.2.1] - 2021-09-28 70 | 71 | ### Removed 72 | - Drop support for Python 3.5 73 | 74 | ## [1.2.0] - 2021-04-26 75 | 76 | ### Changed 77 | - Locate the Python interpreter through sys.executable 78 | 79 | ### Fixed 80 | - Do not rely on the Python interpreter being called `python`, 81 | as that command does not exist in certain environments. 82 | 83 | ## [1.1.0] - 2020-11-20 84 | 85 | ### Added 86 | - Add option to suppress printing script run results. 87 | [#41](https://github.com/kvas-it/pytest-console-scripts/issues/41) 88 | 89 | ## [1.0.0] - 2020-10-06 90 | 91 | ### Added 92 | - Support scripts that are not in `console_scripts`. 93 | [#17](https://github.com/kvas-it/pytest-console-scripts/issues/17) 94 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Vasily Kuznetsov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | graft tests 2 | 3 | include tox.ini 4 | include LICENSE 5 | include *.md 6 | include MANIFEST.in 7 | 8 | global-exclude *.py[cod] __pycache__ 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | pytest-console-scripts 2 | ====================== 3 | 4 | [![PyPI](https://img.shields.io/pypi/v/pytest-console-scripts)](https://pypi.org/project/pytest-console-scripts/) 5 | [![PyPI - License](https://img.shields.io/pypi/l/pytest-console-scripts)](https://github.com/kvas-it/pytest-console-scripts/blob/master/LICENSE) 6 | [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/kvas-it/pytest-console-scripts/test.yml)](https://github.com/kvas-it/pytest-console-scripts/actions) 7 | [![codecov](https://codecov.io/gh/kvas-it/pytest-console-scripts/branch/master/graph/badge.svg?token=RfELxcqvpF)](https://codecov.io/gh/kvas-it/pytest-console-scripts) 8 | 9 | [![GitHub issues](https://img.shields.io/github/issues/kvas-it/pytest-console-scripts)](https://github.com/kvas-it/pytest-console-scripts/issues) 10 | [![GitHub pull requests](https://img.shields.io/github/issues-pr/kvas-it/pytest-console-scripts)](https://github.com/kvas-it/pytest-console-scripts/pulls) 11 | [![GitHub commits since latest release (by date)](https://img.shields.io/github/commits-since/kvas-it/pytest-console-scripts/latest)](https://github.com/kvas-it/pytest-console-scripts/blob/master/CHANGELOG.md) 12 | 13 | Pytest-console-scripts is a [pytest][1] plugin for running python scripts from 14 | within tests. It's quite similar to `subprocess.run()`, but it also has an 15 | in-process mode, where the scripts are executed by the interpreter that's 16 | running `pytest` (using some amount of sandboxing). 17 | 18 | In-process mode significantly reduces the run time of the test suites that 19 | run many external scripts. This is speeds up development. In the CI environment 20 | subprocess mode can be used to make sure the scripts also work (and behave the 21 | same) when run by a fresh interpreter. 22 | 23 | Requirements 24 | ------------ 25 | 26 | - Python 3.8+, or PyPy3, 27 | - Pytest 4.0 or newer. 28 | 29 | Installation 30 | ------------ 31 | 32 | You can install "pytest-console-scripts" via [pip][2] from [PyPI][3]: 33 | 34 | ```sh 35 | $ pip install pytest-console-scripts 36 | ``` 37 | 38 | Normally you would add it as a test dependency in `tox.ini` (see [tox 39 | documentation][9]). 40 | 41 | Usage 42 | ----- 43 | 44 | This plugin will run scripts that are installed via `console_scripts` entry 45 | point in `setup.py`, python files in current directory (or anywhere else, if 46 | given the path), and Python scripts anywhere else in the path. It will also run 47 | executables that are not Python scripts, but only in subprocess mode (there's 48 | no benefit in using `pytest-console-scripts` for this, you should just use 49 | `subprocess.run`). 50 | 51 | Here's an example with `console_scripts` entry point. Imagine we have a python 52 | package `foo` with the following `setup.py`: 53 | 54 | ```py 55 | setup( 56 | name='foo', 57 | version='0.0.1', 58 | py_modules=['foo'], 59 | entry_points={ 60 | 'console_scripts': ['foobar=foo:bar'] 61 | }, 62 | ) 63 | ``` 64 | 65 | We could use pytest-console-scripts to test the `foobar` script: 66 | 67 | ```py 68 | def test_foo_bar(script_runner): 69 | result = script_runner.run(['foobar', '--version']) 70 | assert result.returncode == 0 71 | assert result.stdout == '3.2.1\n' 72 | assert result.stderr == '' 73 | 74 | script_runner.run('foobar --version', shell=True, check=True) 75 | ``` 76 | 77 | This would use the `script_runner` fixture provided by the plugin to 78 | run the script and capture its output. 79 | 80 | The arguments of `script_runner.run` are the command name of the script and 81 | any command line arguments that should be passed to it. Additionally the 82 | following keyword arguments can be used: 83 | 84 | - `cwd` - set the working directory of the script under test. 85 | - `env` - a dictionary with environment variables to use instead of the current 86 | environment. 87 | - `stdin` - a file-like object that will be piped to standard input of the 88 | script. 89 | - `check` - raises an exception if `returncode != 0`, defaults to False. 90 | - `shell` - mimic shell execution, this should work well for simple cases, 91 | defaults to False. 92 | 93 | Type-hinting is also supported. 94 | You may type-hint the fixture with the following code: 95 | 96 | ```py 97 | from pytest_console_scripts import ScriptRunner 98 | 99 | def test_foo_bar(script_runner: ScriptRunner) -> None: 100 | ... 101 | ``` 102 | 103 | Configuring script execution mode 104 | --------------------------------- 105 | 106 | In the example above the `foobar` script would run in in-process mode (which is 107 | the default). This is fast and good for quick iteration during development. 108 | After we're happy with the functionality, it's time to run the script in 109 | subprocess mode to simulate real invocation more closely. There are several 110 | ways to do this. We can configure it via pytest configuration (for example in 111 | `tox.ini`): 112 | 113 | ```ini 114 | [pytest] 115 | script_launch_mode = subprocess 116 | ``` 117 | 118 | We can give a command line option to pytest (this will override the 119 | configuration file): 120 | 121 | ```sh 122 | $ pytest --script-launch-mode=subprocess test_foobar.py 123 | ``` 124 | 125 | We can also mark individual tests to run in a specific mode: 126 | 127 | ```py 128 | @pytest.mark.script_launch_mode('subprocess') 129 | def test_foobar(script_runner): 130 | ... 131 | ``` 132 | 133 | Between these three methods the marking of the tests has priority before the 134 | command line option that in turn overrides the configuration setting. All three 135 | can take three possible values: "inprocess", "subprocess", and "both" (which 136 | will cause the test to be run twice: in in-process and in subprocess modes). 137 | 138 | Interaction with mocking 139 | ------------------------ 140 | 141 | It is possible to mock objects and functions inside of console scripts when 142 | they are run using `pytest-console-scripts` but only in inprocess mode. When 143 | the script is run in subprocess mode, it is executed by a separate Python 144 | interpreter and the test can't mock anything inside of it. 145 | 146 | Another limitation of mocking is that with simple Python scripts that are not 147 | installed via [`console_scripts` entry point][14] mocking of objects inside of 148 | the main script will not work. The reason for that is that when we run 149 | `myscript.py` with `$ python myscript.py` the script gets imported into 150 | `__main__` namespace instead of `myscript` namespace. Our patching of 151 | `myscript.myfunction` will have no effect on what the code in `__main__` 152 | namespace sees when it's calling `myfunction` defined in the same file. 153 | 154 | See [this stackoverflow answer](https://stackoverflow.com/a/66693954/1595738) 155 | for some ideas of how to get around this. 156 | 157 | Suppressing the printing of script run results 158 | ---------------------------------------------- 159 | 160 | When tests involving `pytest-console-scripts` fail, it tends to be quite 161 | useful to see the output of the scripts that were executed in them. We try 162 | to be helpful and print it out just before returning the result from 163 | `script_runner.run()`. Normally PyTest [captures][12] all the output during a 164 | test run and it's not shown to the user unless some tests fail. This is exactly 165 | what we want. 166 | 167 | However, in some cases it might be useful to disable the output capturing and 168 | PyTest provides [ways to do it][13]. When capturing is disabled, all test run 169 | results will be printed out and this might make it harder to inspect the other 170 | output of the tests. To deal with this, `pytest-console-scripts` has an option 171 | to disable the printing of script run results: 172 | 173 | ```sh 174 | $ pytest --hide-run-results test_foobar.py 175 | ``` 176 | 177 | It's also possible to disable it just for one script run: 178 | 179 | ```py 180 | result = script_runner.run('foobar', print_result=False) 181 | ``` 182 | 183 | When printing of script run results is disabled, script output won't be 184 | visible even when the test fails. Unfortunately there's no automatic way to 185 | print it only if the test fails because by the time a script run completes we 186 | don't know whether the test will fail or not. It's possible to do it manually 187 | from the test by using: 188 | 189 | ```py 190 | result.print() 191 | ``` 192 | 193 | This, combined with `--hide-run-results` or `print_result=False` can be used to 194 | only print interesting run results when capturing is off. 195 | 196 | Package installation and testing during development 197 | --------------------------------------------------- 198 | 199 | Since `pytest-console-scripts` relies on the scripts being located in the path, 200 | it can only run the console scripts from packages that have been installed (if 201 | you are interested in working on removing this limitation, take a look at [this 202 | ticket](https://github.com/kvas-it/pytest-console-scripts/issues/34) and in 203 | particular [this comment](https://github.com/kvas-it/pytest-console-scripts/issues/34#issuecomment-649497564)). 204 | If you want to run the tests quickly during development, the additional 205 | installation step would add a significant overhead and slow you down. 206 | 207 | There's a way around this: install your package in [development mode][10] using 208 | `pip install -e .`. If you use [tox][9], you can take one of its 209 | existing virtualenvs (they live in `.tox/`). Otherwise create a 210 | [virtualenv][11] just for development, activate it and run `python setup.py 211 | develop` to install your package in development mode. You will need to 212 | re-install every time you add a new console script, but otherwise all the 213 | changes to your code will be immediately picked up by the tests. 214 | 215 | Contributing 216 | ------------ 217 | 218 | Contributions are very welcome. Tests can be run with `tox`, please ensure 219 | the coverage at least stays the same before you submit a pull request. 220 | 221 | License 222 | ------- 223 | 224 | Distributed under the terms of the [MIT][8] license, "pytest-console-scripts" 225 | is free and open source software. 226 | 227 | Issues 228 | ------ 229 | 230 | If you encounter any problems, please [file an issue][7] along with a detailed 231 | description. 232 | 233 | ---- 234 | 235 | Pytest-console-scripts was initially generated with [Cookiecutter][4] along 236 | with [@hackebrot][5]'s [Cookiecutter-pytest-plugin][6] template. 237 | 238 | [1]: https://github.com/pytest-dev/pytest 239 | [2]: https://pypi.python.org/pypi/pip/ 240 | [3]: https://pypi.python.org/pypi 241 | [4]: https://github.com/audreyr/cookiecutter 242 | [5]: https://github.com/hackebrot 243 | [6]: https://github.com/pytest-dev/cookiecutter-pytest-plugin 244 | [7]: https://github.com/kvas-it/pytest-console-scripts/issues 245 | [8]: http://opensource.org/licenses/MIT 246 | [9]: https://tox.readthedocs.org/en/latest/ 247 | [10]: https://setuptools.pypa.io/en/latest/userguide/development_mode.html 248 | [11]: https://docs.python.org/3/library/venv.html 249 | [12]: https://docs.pytest.org/en/stable/capture.html 250 | [13]: https://docs.pytest.org/en/stable/capture.html#setting-capturing-methods-or-disabling-capturing 251 | [14]: https://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point 252 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ['flit_scm'] 3 | build-backend = 'flit_scm:buildapi' 4 | 5 | [project] 6 | name = 'pytest-console-scripts' 7 | authors = [{ name = 'Vasily Kuznetsov', email = 'kvas.it@gmail.com' }] 8 | maintainers = [ 9 | { name = 'Vasily Kuznetsov', email = 'kvas.it@gmail.com' }, 10 | { name = 'Kyle Benesch', email = '4b796c65+github@gmail.com' }, 11 | ] 12 | readme = 'README.md' 13 | license = { file = 'LICENSE' } 14 | classifiers = [ 15 | 'Development Status :: 4 - Beta', 16 | 'Framework :: Pytest', 17 | 'Intended Audience :: Developers', 18 | 'License :: OSI Approved :: MIT License', 19 | 'Operating System :: OS Independent', 20 | 'Programming Language :: Python', 21 | 'Programming Language :: Python :: 3', 22 | 'Programming Language :: Python :: 3.8', 23 | 'Programming Language :: Python :: 3.9', 24 | 'Programming Language :: Python :: 3.10', 25 | 'Programming Language :: Python :: 3.11', 26 | 'Programming Language :: Python :: Implementation :: CPython', 27 | 'Programming Language :: Python :: Implementation :: PyPy', 28 | 'Topic :: Software Development :: Testing', 29 | 'Typing :: Typed', 30 | ] 31 | requires-python = '>=3.8' 32 | dynamic = ['version', 'description'] 33 | dependencies = [ 34 | 'pytest >=4.0.0', 35 | "importlib_metadata >=3.6; python_version < '3.10'", 36 | ] 37 | 38 | [tool.setuptools_scm] 39 | write_to = 'pytest_console_scripts/_version.py' 40 | 41 | [project.urls] 42 | Source = 'https://github.com/kvas-it/pytest-console-scripts' 43 | Changelog = 'https://github.com/kvas-it/pytest-console-scripts/blob/master/CHANGELOG.md' 44 | Issues = 'https://github.com/kvas-it/pytest-console-scripts/issues' 45 | Forum = 'https://github.com/kvas-it/pytest-console-scripts/discussions' 46 | 47 | [project.entry-points.'pytest11'] 48 | console-scripts = 'pytest_console_scripts' 49 | 50 | [tool.flit.sdist] 51 | include = ['*.md', 'MANIFEST.in', 'tox.ini', 'tests/'] 52 | -------------------------------------------------------------------------------- /pytest_console_scripts/__init__.py: -------------------------------------------------------------------------------- 1 | """Pytest plugin for testing console scripts.""" 2 | from __future__ import annotations 3 | 4 | import contextlib 5 | import io 6 | import logging 7 | import os 8 | import shlex 9 | import shutil 10 | import subprocess 11 | import sys 12 | import traceback 13 | import warnings 14 | from pathlib import Path 15 | from typing import Any, Callable, Iterator, Sequence, Union 16 | from unittest import mock 17 | 18 | import pytest 19 | 20 | from . import _version 21 | 22 | if sys.version_info < (3, 10): 23 | import importlib_metadata 24 | else: 25 | import importlib.metadata as importlib_metadata 26 | 27 | __version__ = _version.version 28 | 29 | _StrOrPath = Union[str, os.PathLike] 30 | """A command line argument type as a str or path.""" 31 | 32 | _Command = Union[_StrOrPath, Sequence[_StrOrPath]] 33 | """A command-like type compatible with subprocess.run.""" 34 | 35 | StreamMock = io.StringIO 36 | 37 | 38 | def pytest_addoption(parser: pytest.Parser) -> None: 39 | group = parser.getgroup('console-scripts') 40 | group.addoption( 41 | '--script-launch-mode', 42 | metavar='inprocess|subprocess|both', 43 | action='store', 44 | dest='script_launch_mode', 45 | default=None, 46 | help='how to run python scripts under test (default: inprocess)' 47 | ) 48 | group.addoption( 49 | '--hide-run-results', 50 | action='store_true', 51 | dest='hide_run_results', 52 | default=False, 53 | help="don't print out script run results on failures or when " 54 | 'output capturing is disabled' 55 | ) 56 | parser.addini( 57 | 'script_launch_mode', 58 | 'how to run python scripts under test (inprocess|subprocess|both)' 59 | ) 60 | 61 | 62 | def pytest_configure(config: pytest.Config) -> None: 63 | config.addinivalue_line( 64 | 'markers', 65 | 'script_launch_mode: how to run python scripts under test ' 66 | '(inprocess|subprocess|both)', 67 | ) 68 | 69 | 70 | def _get_mark_mode(metafunc: pytest.Metafunc) -> str | None: 71 | """Return launch mode as indicated by test function marker or None.""" 72 | marker = metafunc.definition.get_closest_marker('script_launch_mode') 73 | if marker: 74 | return str(marker.args[0]) 75 | return None 76 | 77 | 78 | def _is_nonexecutable_python_file(command: _StrOrPath) -> bool: 79 | """Check if `command` is a Python file with no executable mode set.""" 80 | command = Path(command) 81 | mode = command.stat().st_mode 82 | if mode & os.X_OK: 83 | return False 84 | return command.suffix == '.py' 85 | 86 | 87 | def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: 88 | """Parametrize script_launch_mode fixture. 89 | 90 | Checks the configuration sources in this order: 91 | - `script_launch_mode` mark on the test, 92 | - `--script-launch-mode` option, 93 | - `script_launch_mode` configuration option in [pytest] section of the 94 | pyest config file. 95 | 96 | This process yields a value that can be one of: 97 | - "inprocess" -- The script will be run via loading its main function 98 | into the test runner process and mocking the environment. 99 | - "subprocess" -- The script will be run via `subprocess` module. 100 | - "both" -- The test will be run twice: once in inprocess mode and once 101 | in subprocess mode. 102 | - None -- Same as "inprocess". 103 | """ 104 | if 'script_launch_mode' not in metafunc.fixturenames: 105 | return 106 | 107 | mark_mode = _get_mark_mode(metafunc) 108 | option_mode = metafunc.config.option.script_launch_mode 109 | config_mode = metafunc.config.getini('script_launch_mode') 110 | 111 | mode = mark_mode or option_mode or config_mode or 'inprocess' 112 | 113 | if mode in {'inprocess', 'subprocess'}: 114 | metafunc.parametrize('script_launch_mode', [mode], indirect=True) 115 | elif mode == 'both': 116 | metafunc.parametrize('script_launch_mode', ['inprocess', 'subprocess'], 117 | indirect=True) 118 | else: 119 | raise ValueError(f'Invalid script launch mode: {mode}') 120 | 121 | 122 | class RunResult: 123 | """Result of running a script.""" 124 | 125 | def __init__( 126 | self, returncode: int, stdout: str, stderr: str, print_result: bool 127 | ) -> None: 128 | self.success = returncode == 0 129 | self.returncode = returncode 130 | self.stdout = stdout 131 | self.stderr = stderr 132 | if print_result: 133 | self.print() 134 | 135 | def print(self) -> None: 136 | print('# Script return code:', self.returncode) 137 | print('# Script stdout:', self.stdout, sep='\n') 138 | print('# Script stderr:', self.stderr, sep='\n') 139 | 140 | 141 | def _handle_command_args( 142 | command: _Command, 143 | *args: _StrOrPath, 144 | shell: bool = False, 145 | stacklevel: int = 1, 146 | ) -> Sequence[_StrOrPath]: 147 | """Return command arguments in a consistent list format. 148 | 149 | If shell=True then this function tries to mimic local shell execution. 150 | """ 151 | if shell: 152 | if args or not isinstance(command, (str, os.PathLike)): 153 | command = subprocess.list2cmdline( 154 | str(arg) 155 | for arg in _handle_command_args( 156 | command, *args, shell=False, stacklevel=stacklevel + 1 157 | ) 158 | ) 159 | command = shlex.split(str(command), posix=os.name == 'posix') 160 | args = () 161 | 162 | if args: 163 | warnings.warn( 164 | 'script_runner commands should be passed as a single sequence,' 165 | ' not as multiple arguments.' 166 | '\nReplace `script_runner.run(a, b, c)` calls with' 167 | ' `script_runner.run([a, b, c])`', 168 | DeprecationWarning, 169 | stacklevel=stacklevel + 1, 170 | ) 171 | if not isinstance(command, (str, os.PathLike)): 172 | return [*command, *args] 173 | return [command, *args] 174 | if isinstance(command, (str, os.PathLike)): 175 | return [command] 176 | return command 177 | 178 | 179 | @contextlib.contextmanager 180 | def _patch_environ(new_environ: dict[str, str] | None) -> Iterator[None]: 181 | """Replace the environment for the duration of a context.""" 182 | if new_environ is None: 183 | yield 184 | return 185 | old_environ = os.environ.copy() 186 | os.environ.clear() 187 | os.environ.update(new_environ) 188 | yield 189 | os.environ.clear() 190 | os.environ.update(old_environ) 191 | 192 | 193 | @contextlib.contextmanager 194 | def _chdir_context(new_dir: _StrOrPath | None) -> Iterator[None]: 195 | """Replace the current directory for the duration of a context.""" 196 | if new_dir is None: 197 | yield 198 | return 199 | old_cwd = os.getcwd() 200 | os.chdir(new_dir) 201 | yield 202 | os.chdir(old_cwd) 203 | 204 | 205 | @contextlib.contextmanager 206 | def _push_and_reset_logger() -> Iterator[None]: 207 | """Do a very basic reset of the root logger and restore its config on exit. 208 | 209 | This allows scripts to call logging.basicConfig(...) and have 210 | it work as expected. It might not work for more sophisticated logging 211 | setups but it's simple and covers the basic usage whereas implementing 212 | a comprehensive fix is impossible in a compatible way. 213 | """ 214 | logger = logging.getLogger() 215 | old_handlers = logger.handlers 216 | old_disabled = logger.disabled 217 | old_level = logger.level 218 | logger.handlers = [] 219 | logger.disabled = False 220 | logger.setLevel(logging.NOTSET) 221 | yield 222 | # Restore logger to previous configuration 223 | logger.handlers = old_handlers 224 | logger.disabled = old_disabled 225 | logger.setLevel(old_level) 226 | 227 | 228 | class ScriptRunner: 229 | """Fixture for running python scripts under test.""" 230 | 231 | def __init__( 232 | self, launch_mode: str, 233 | rootdir: _StrOrPath, 234 | print_result: bool = True 235 | ) -> None: 236 | assert launch_mode in {'inprocess', 'subprocess'} 237 | self.launch_mode = launch_mode 238 | self.print_result = print_result 239 | self.rootdir = rootdir 240 | 241 | def __repr__(self) -> str: 242 | return f'' 243 | 244 | def run( 245 | self, 246 | command: _Command, 247 | *arguments: _StrOrPath, 248 | print_result: bool | None = None, 249 | shell: bool = False, 250 | cwd: _StrOrPath | None = None, 251 | env: dict[str, str] | None = None, 252 | stdin: io.IOBase | None = None, 253 | check: bool = False, 254 | **options: Any, 255 | ) -> RunResult: 256 | if print_result is None: 257 | print_result = self.print_result 258 | 259 | if print_result: 260 | print('# Running console script:', command, *arguments) 261 | 262 | if self.launch_mode == 'inprocess': 263 | run_function = self.run_inprocess 264 | else: 265 | run_function = self.run_subprocess 266 | return run_function( 267 | command, 268 | *arguments, 269 | print_result=print_result, 270 | shell=shell, 271 | cwd=cwd, 272 | env=env, 273 | stdin=stdin, 274 | check=check, 275 | _stacklevel=2, 276 | **options, 277 | ) 278 | 279 | @staticmethod 280 | def _locate_script( 281 | command: _StrOrPath, 282 | *, 283 | cwd: _StrOrPath | None, 284 | env: dict[str, str] | None, 285 | ) -> Path: 286 | """Locate script in PATH or in current directory.""" 287 | script_path = shutil.which( 288 | command, 289 | path=env.get('PATH', None) if env is not None else None, 290 | ) 291 | if script_path is not None: 292 | return Path(script_path) 293 | 294 | cwd = cwd if cwd is not None else os.getcwd() 295 | return Path(cwd, command).resolve(strict=True) 296 | 297 | @classmethod 298 | def _load_script( 299 | cls, 300 | command: _StrOrPath, 301 | *, 302 | cwd: _StrOrPath | None, 303 | env: dict[str, str] | None, 304 | ) -> Callable[[], int | None]: 305 | """Load target script via entry points or compile/exec.""" 306 | if isinstance(command, str): 307 | entry_points = tuple( 308 | importlib_metadata.entry_points( 309 | group='console_scripts', name=command 310 | ) 311 | ) 312 | if entry_points: 313 | def console_script() -> int | None: 314 | s: Callable[[], int | None] = entry_points[0].load() 315 | return s() 316 | return console_script 317 | 318 | script_path = cls._locate_script(command, cwd=cwd, env=env) 319 | 320 | def exec_script() -> int: 321 | compiled = compile( 322 | script_path.read_bytes(), str(script_path), 'exec', flags=0 323 | ) 324 | exec(compiled, {'__name__': '__main__'}) 325 | return 0 326 | 327 | return exec_script 328 | 329 | @classmethod 330 | def run_inprocess( 331 | cls, 332 | command: _Command, 333 | *arguments: _StrOrPath, 334 | shell: bool = False, 335 | cwd: _StrOrPath | None = None, 336 | env: dict[str, str] | None = None, 337 | print_result: bool = True, 338 | stdin: io.IOBase | None = None, 339 | check: bool = False, 340 | _stacklevel: int = 1, 341 | **options: Any, 342 | ) -> RunResult: 343 | for key in options: 344 | warnings.warn( 345 | f'Keyword argument {key!r} was ignored.' 346 | '\nConsider using subprocess mode or raising an issue.', 347 | stacklevel=_stacklevel + 1, 348 | ) 349 | cmd_args = _handle_command_args( 350 | command, *arguments, shell=shell, stacklevel=_stacklevel + 1 351 | ) 352 | script = cls._load_script(cmd_args[0], cwd=cwd, env=env) 353 | cmd_args = [str(cmd) for cmd in cmd_args] 354 | stdin_stream = stdin if stdin is not None else StreamMock() 355 | stdout_stream = StreamMock() 356 | stderr_stream = StreamMock() 357 | with contextlib.ExitStack() as stack: 358 | stack.enter_context(mock.patch('sys.stdin', new=stdin_stream)) 359 | stack.enter_context(contextlib.redirect_stdout(stdout_stream)) 360 | stack.enter_context(contextlib.redirect_stderr(stderr_stream)) 361 | stack.enter_context(mock.patch('sys.argv', new=cmd_args)) 362 | stack.enter_context(_push_and_reset_logger()) 363 | stack.enter_context(_patch_environ(env)) 364 | stack.enter_context(_chdir_context(cwd)) 365 | 366 | try: 367 | returncode = script() 368 | except SystemExit as exc: 369 | returncode = 1 370 | if isinstance(exc.code, str): 371 | stderr_stream.write(f'{exc}\n') 372 | returncode = 1 373 | else: 374 | returncode = exc.code 375 | except Exception: 376 | returncode = 1 377 | try: 378 | et, ev, tb = sys.exc_info() 379 | assert tb 380 | # Hide current frame from the stack trace. 381 | traceback.print_exception(et, ev, tb.tb_next) 382 | finally: 383 | del tb 384 | 385 | result = RunResult( 386 | returncode or 0, # None also means success 387 | stdout_stream.getvalue(), 388 | stderr_stream.getvalue(), 389 | print_result, 390 | ) 391 | 392 | if check and returncode: 393 | raise subprocess.CalledProcessError( 394 | returncode, 395 | cmd_args, 396 | result.stdout, 397 | result.stderr, 398 | ) 399 | 400 | return result 401 | 402 | @classmethod 403 | def run_subprocess( 404 | cls, 405 | command: _Command, 406 | *arguments: _StrOrPath, 407 | print_result: bool = True, 408 | shell: bool = False, 409 | cwd: _StrOrPath | None = None, 410 | env: dict[str, str] | None = None, 411 | stdin: io.IOBase | None = None, 412 | check: bool = False, 413 | universal_newlines: bool = True, 414 | _stacklevel: int = 1, 415 | **options: Any, 416 | ) -> RunResult: 417 | stdin_input: str | bytes | None = None 418 | if stdin is not None: 419 | stdin_input = stdin.read() 420 | 421 | script_path = cls._locate_script( 422 | _handle_command_args( 423 | command, *arguments, shell=shell, stacklevel=_stacklevel + 1 424 | )[0], 425 | cwd=cwd, 426 | env=env, 427 | ) 428 | if arguments: 429 | command = _handle_command_args( 430 | command, *arguments, shell=shell, stacklevel=_stacklevel + 1 431 | ) 432 | 433 | if _is_nonexecutable_python_file(script_path): 434 | command = _handle_command_args( 435 | command, shell=shell, stacklevel=_stacklevel + 1 436 | ) 437 | command = [sys.executable or 'python', *command] 438 | 439 | try: 440 | cp = subprocess.run( 441 | command, 442 | input=stdin_input, 443 | stdout=subprocess.PIPE, 444 | stderr=subprocess.PIPE, 445 | shell=shell, 446 | cwd=cwd, 447 | env=env, 448 | check=check, 449 | universal_newlines=universal_newlines, 450 | **options, 451 | ) 452 | except subprocess.CalledProcessError as exc: 453 | RunResult(exc.returncode, exc.stdout, exc.stderr, print_result) 454 | raise 455 | return RunResult(cp.returncode, cp.stdout, cp.stderr, print_result) 456 | 457 | 458 | @pytest.fixture 459 | def script_launch_mode(request: pytest.FixtureRequest) -> str: 460 | return str(request.param) 461 | 462 | 463 | @pytest.fixture 464 | def script_cwd(tmp_path: Path) -> Path: 465 | work_dir = tmp_path / 'script-cwd' 466 | work_dir.mkdir() 467 | return work_dir 468 | 469 | 470 | @pytest.fixture 471 | def script_runner( 472 | request: pytest.FixtureRequest, script_cwd: Path, script_launch_mode: str 473 | ) -> ScriptRunner: 474 | print_result = not request.config.getoption('--hide-run-results') 475 | return ScriptRunner(script_launch_mode, script_cwd, print_result) 476 | -------------------------------------------------------------------------------- /pytest_console_scripts/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvas-it/pytest-console-scripts/31a7e3739068ac69a26bb8b3cb327a8664457780/pytest_console_scripts/py.typed -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | pytest_plugins = 'pytester' 2 | -------------------------------------------------------------------------------- /tests/test_console_scripts.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | 5 | 6 | @pytest.fixture(params=[None, 'inprocess', 'subprocess', 'both']) 7 | def launch_mode_conf(request: pytest.FixtureRequest) -> str | None: 8 | """Configured launch mode (None|'inprocess'|'subprocess'|'both').""" 9 | assert request.param is None or isinstance(request.param, str) 10 | return request.param 11 | 12 | 13 | @pytest.fixture 14 | def launch_modes(launch_mode_conf: str | None) -> set[str]: 15 | """Set of launch modes in which the tests will actually be run. 16 | 17 | The value of this fixture depends on the value of `launch_mode_conf`: 18 | - 'inprocess' -> {'inprocess'} 19 | - 'subprocess' -> {'subprocess'} 20 | - 'both' -> {'inprocess', 'subprocess'} 21 | - None -> {'inprocess'} 22 | """ 23 | if launch_mode_conf == 'both': 24 | return {'inprocess', 'subprocess'} 25 | if launch_mode_conf is not None: 26 | return {launch_mode_conf} 27 | return {'inprocess'} 28 | 29 | 30 | class RunTest: 31 | def __init__(self, testdir: pytest.Testdir) -> None: 32 | self.testdir = testdir 33 | 34 | def __call__( 35 | self, 36 | script: str, 37 | passed: int = 1, 38 | skipped: int = 0, 39 | failed: int = 0, 40 | launch_mode_conf: str | None = None 41 | ) -> pytest.RunResult: 42 | self.testdir.makepyfile(script) 43 | args = [] 44 | if launch_mode_conf is not None: 45 | args.append('--script-launch-mode=' + launch_mode_conf) 46 | result = self.testdir.runpytest(*args) 47 | print('\n'.join(['pytest stdout:'] + result.outlines + 48 | ['pytest stderr:'] + result.errlines)) 49 | result.assert_outcomes(passed=passed, skipped=skipped, failed=failed) 50 | return result 51 | 52 | 53 | @pytest.fixture 54 | def run_test(testdir: pytest.Testdir) -> RunTest: 55 | return RunTest(testdir) 56 | 57 | 58 | CHECK_LAUNCH_MODE = """ 59 | def test_both(script_runner, accumulator=set()): 60 | assert script_runner.launch_mode in {} 61 | assert script_runner.launch_mode not in accumulator 62 | accumulator.add(script_runner.launch_mode) 63 | """ 64 | 65 | 66 | def test_command_line_option( 67 | run_test: RunTest, launch_mode_conf: str | None, launch_modes: set[str] 68 | ) -> None: 69 | run_test( 70 | CHECK_LAUNCH_MODE.format(launch_modes), 71 | passed=len(launch_modes), 72 | launch_mode_conf=launch_mode_conf 73 | ) 74 | 75 | 76 | def test_config_option( 77 | run_test: RunTest, 78 | testdir: pytest.Testdir, 79 | launch_mode_conf: str | None, 80 | launch_modes: set[str], 81 | ) -> None: 82 | if launch_mode_conf is not None: 83 | testdir.makeini(f""" 84 | [pytest] 85 | script_launch_mode = {launch_mode_conf} 86 | """) 87 | 88 | run_test( 89 | CHECK_LAUNCH_MODE.format(launch_modes), 90 | passed=len(launch_modes) 91 | ) 92 | 93 | 94 | def test_override_launch_mode_with_mark( 95 | run_test: RunTest, launch_mode_conf: str | None 96 | ) -> None: 97 | run_test( 98 | """ 99 | import pytest 100 | 101 | @pytest.mark.script_launch_mode('inprocess') 102 | def test_inprocess(script_runner): 103 | assert script_runner.launch_mode == 'inprocess' 104 | 105 | @pytest.mark.script_launch_mode('subprocess') 106 | def test_subprocess(script_runner): 107 | assert script_runner.launch_mode == 'subprocess' 108 | 109 | @pytest.mark.script_launch_mode('both') 110 | def test_both(script_runner, accumulator=set()): 111 | assert script_runner.launch_mode not in accumulator 112 | accumulator.add(script_runner.launch_mode) 113 | """, 114 | passed=4, 115 | launch_mode_conf=launch_mode_conf 116 | ) 117 | 118 | 119 | def test_help_message(testdir: pytest.Testdir) -> None: 120 | result = testdir.runpytest( 121 | '--help', 122 | ) 123 | # fnmatch_lines does an assertion internally 124 | result.stdout.fnmatch_lines([ 125 | 'console-scripts:', 126 | '*--script-launch-mode=*', 127 | '*--hide-run-results*', 128 | ]) 129 | -------------------------------------------------------------------------------- /tests/test_run_scripts.py: -------------------------------------------------------------------------------- 1 | """Test running of scripts with various modes and options.""" 2 | from __future__ import annotations 3 | 4 | import contextlib 5 | import importlib 6 | import io 7 | import os 8 | import sys 9 | from pathlib import Path 10 | from subprocess import CalledProcessError 11 | from types import ModuleType 12 | from typing import Any, ContextManager 13 | from unittest import mock 14 | 15 | import pytest 16 | 17 | from pytest_console_scripts import ScriptRunner 18 | 19 | 20 | @pytest.fixture(params=['inprocess', 'subprocess']) 21 | def launch_mode(request: pytest.FixtureRequest) -> str: 22 | """Launch mode: inprocess|subprocess.""" 23 | return str(request.param) 24 | 25 | 26 | @pytest.fixture() 27 | def console_script(tmp_path: Path) -> Path: 28 | """Python script to use in tests.""" 29 | script = tmp_path / 'script.py' 30 | script.write_text('#!/usr/bin/env python\nprint("foo")') 31 | return script 32 | 33 | 34 | @pytest.mark.script_launch_mode('both') 35 | def test_not_installed( 36 | console_script: Path, script_runner: ScriptRunner 37 | ) -> None: 38 | result = script_runner.run(str(console_script)) 39 | assert result.success 40 | assert result.stdout == 'foo\n' 41 | assert result.stderr == '' 42 | 43 | 44 | @pytest.mark.xfail( 45 | sys.platform == "win32", 46 | reason="Windows does not treat Python scripts as executables." 47 | ) 48 | @pytest.mark.script_launch_mode('both') 49 | def test_elsewhere_in_the_path( 50 | console_script: Path, script_runner: ScriptRunner 51 | ) -> None: 52 | console_script.chmod(0o777) 53 | env = os.environ.copy() 54 | env["PATH"] = f"{console_script.parent}{os.pathsep}{env['PATH']}" 55 | result = script_runner.run(console_script.name, env=env) 56 | assert result.success 57 | assert result.stdout == 'foo\n' 58 | assert result.stderr == '' 59 | 60 | 61 | @pytest.mark.script_launch_mode('both') 62 | def test_run_pytest( 63 | tmp_path: Path, 64 | console_script: Path, 65 | script_runner: ScriptRunner, 66 | launch_mode: str 67 | ) -> None: 68 | console_script.write_text('import os;print(os.getpid())') 69 | test = tmp_path / f'test_{launch_mode}.py' 70 | compare = '==' if launch_mode == 'inprocess' else '!=' 71 | test.write_text( 72 | f""" 73 | import os 74 | def test_script(script_runner): 75 | result = script_runner.run(R'''{console_script}''') 76 | assert result.success 77 | assert result.stdout {compare} str(os.getpid()) + '\\n' 78 | assert result.stderr == '' 79 | """ 80 | ) 81 | 82 | # Here we're testing two things: 83 | # 84 | # - pytest is a Python script that's installed in the test environment, so 85 | # we'll use `script_runner` fixture to run it -- this tests execution of 86 | # installed scripts from the path. 87 | # - The pytest that we run will run a test that uses `script_runner` 88 | # fixture to run another script. We're going to pass --script-launch-mode 89 | # option to pytest and will check that the execution of the inner script 90 | # is performed in accordance with its value. 91 | # 92 | # We're also testing all 4 combinations of inprocess/subprocess modes for 93 | # inner and outer script runners. 94 | 95 | result = script_runner.run( 96 | [ 97 | 'pytest', 98 | f'--rootdir={tmp_path}', 99 | test, 100 | f'--script-launch-mode={launch_mode}' 101 | ] 102 | ) 103 | assert result.success 104 | 105 | 106 | @pytest.mark.script_launch_mode('inprocess') 107 | def test_return_None( 108 | console_script: Path, script_runner: ScriptRunner 109 | ) -> None: 110 | """Check that entry point function returning None is counted as success.""" 111 | # Many console_scripts entry point functions return 0 on success but not 112 | # all of them do. Returning `None` is also allowed and would be translated 113 | # to return code 0 when run normally via wrapper. This test checks that we 114 | # handle this case properly in inprocess mode. 115 | console_script.write_text( 116 | """ 117 | import sys 118 | print("Foo") 119 | sys.exit(None) 120 | """ 121 | ) 122 | result = script_runner.run(str(console_script)) 123 | assert result.success 124 | assert 'Foo' in result.stdout 125 | 126 | 127 | @pytest.mark.script_launch_mode('inprocess') 128 | def test_return_code_uncommon( 129 | console_script: Path, script_runner: ScriptRunner 130 | ) -> None: 131 | """Check uncommon return codes.""" 132 | console_script.write_text( 133 | """ 134 | import sys 135 | sys.exit(2) 136 | """ 137 | ) 138 | assert script_runner.run(str(console_script)).returncode == 2 139 | 140 | 141 | @pytest.mark.script_launch_mode('both') 142 | def test_abnormal_exit( 143 | console_script: Path, script_runner: ScriptRunner 144 | ) -> None: 145 | console_script.write_text('import sys;sys.exit("boom")') 146 | result = script_runner.run(str(console_script)) 147 | assert not result.success 148 | assert result.stdout == '' 149 | assert result.stderr == 'boom\n' 150 | 151 | 152 | @pytest.mark.script_launch_mode('both') 153 | def test_exception(console_script: Path, script_runner: ScriptRunner) -> None: 154 | console_script.write_text('raise TypeError("boom")') 155 | result = script_runner.run(str(console_script)) 156 | assert not result.success 157 | assert result.stdout == '' 158 | assert 'TypeError: boom' in result.stderr 159 | 160 | 161 | def test_cwd( 162 | console_script: Path, 163 | script_runner: ScriptRunner, 164 | tmp_path: Path, 165 | ) -> None: 166 | """Script starts in dir given by cwd arg and cwd changes are contained.""" 167 | dir1 = tmp_path / 'dir1' 168 | dir1.mkdir() 169 | dir2 = tmp_path / 'dir2' 170 | dir2.mkdir() 171 | console_script.write_text( 172 | f""" 173 | import os 174 | print(os.getcwd()) 175 | os.chdir(R'''{dir2}''') 176 | print(os.getcwd()) 177 | """ 178 | ) 179 | mydir = os.getcwd() 180 | result = script_runner.run(str(console_script), cwd=str(dir1)) 181 | assert result.success 182 | assert result.stdout == f'{dir1}\n{dir2}\n' 183 | assert os.getcwd() == mydir 184 | 185 | 186 | @pytest.mark.script_launch_mode('both') 187 | def test_env(console_script: Path, script_runner: ScriptRunner) -> None: 188 | """Script receives environment and env changes don't escape to test.""" 189 | console_script.write_text( 190 | """ 191 | import os 192 | print(os.environ['FOO']) 193 | os.environ['FOO'] = 'baz' 194 | """ 195 | ) 196 | env = os.environ.copy() 197 | env['FOO'] = 'bar' 198 | result = script_runner.run(str(console_script), env=env) 199 | assert result.success 200 | assert result.stdout == 'bar\n' 201 | assert 'FOO' not in os.environ 202 | 203 | 204 | @pytest.mark.script_launch_mode('both') 205 | def test_stdin(console_script: Path, script_runner: ScriptRunner) -> None: 206 | console_script.write_text( 207 | """ 208 | import sys 209 | for line in sys.stdin: 210 | sys.stdout.write('simon says ' + line) 211 | sys.stderr.write('error says ' + line) 212 | """ 213 | ) 214 | stdin = io.StringIO('foo\nbar') 215 | result = script_runner.run(str(console_script), stdin=stdin) 216 | assert result.success 217 | assert result.stdout == 'simon says foo\nsimon says bar' 218 | assert result.stderr == 'error says foo\nerror says bar' 219 | 220 | 221 | def test_logging(console_script: Path, script_runner: ScriptRunner) -> None: 222 | """Test that the script can perform logging initialization.""" 223 | console_script.write_text( 224 | """ 225 | import logging, sys 226 | logging.basicConfig(stream=sys.stderr, level=logging.INFO) 227 | logging.debug('hidden') 228 | logging.info('shown') 229 | """ 230 | ) 231 | result = script_runner.run(str(console_script)) 232 | assert result.success 233 | assert result.stderr == 'INFO:root:shown\n' 234 | 235 | 236 | @pytest.mark.parametrize('fail', [True, False]) 237 | @pytest.mark.parametrize('check', [True, False]) 238 | def test_print_stdio_on_error( 239 | console_script: Path, 240 | script_runner: ScriptRunner, 241 | tmp_path: Path, 242 | fail: bool, 243 | check: bool, 244 | launch_mode: str, 245 | ) -> None: 246 | """Output of the script is printed when the test fails.""" 247 | console_script.write_text('print("12345")\nraise Exception("54321")') 248 | test = tmp_path / f'test_{fail}_{check}_{launch_mode}.py' 249 | command = [str(console_script), 'arg'] 250 | test.write_text( 251 | f""" 252 | import subprocess 253 | 254 | def test_fail(script_runner): 255 | try: 256 | ret = script_runner.run({command}, check={check}) 257 | except subprocess.CalledProcessError as exc: 258 | assert (exc.returncode == 0) is {fail} 259 | else: 260 | assert ret.success is {fail} 261 | """ 262 | ) 263 | result = script_runner.run( 264 | [ 265 | 'pytest', 266 | f'--rootdir={tmp_path}', 267 | test, 268 | f'--script-launch-mode={launch_mode}' 269 | ] 270 | ) 271 | assert result.success != fail 272 | if fail: 273 | assert (f'# Running console script: {command}\n' 274 | in result.stdout) 275 | assert '# Script return code: 1\n' in result.stdout 276 | assert '# Script stdout:\n12345\n' in result.stdout 277 | assert '# Script stderr:\nTraceback' in result.stdout 278 | assert 'Exception: 54321' in result.stdout 279 | else: 280 | assert '# Running console script' not in result.stdout 281 | assert '12345' not in result.stdout 282 | assert '54321' not in result.stdout 283 | 284 | 285 | @pytest.mark.script_launch_mode('inprocess') 286 | def test_mocking( 287 | console_script: Path, 288 | script_runner: ScriptRunner, 289 | monkeypatch: pytest.MonkeyPatch 290 | ) -> None: 291 | """Test mocking in of console scripts (in-process mode only). 292 | 293 | Note: we can't mock objects in the script itself because it will not be 294 | imported via normal import system but we can mock anything in the modules 295 | that the script imports. 296 | 297 | """ 298 | console_script.write_text( 299 | """ 300 | import os 301 | print(os.path.basename('foo')) 302 | """ 303 | ) 304 | monkeypatch.setattr(os.path, 'basename', lambda foo: 'bar') 305 | result = script_runner.run(str(console_script)) 306 | assert result.success 307 | assert result.stdout == 'bar\n' 308 | 309 | 310 | def test_hide_run_result_arg( 311 | tmp_path: Path, console_script: Path, script_runner: ScriptRunner 312 | ) -> None: 313 | """Disable printing of the RunResult to stdout with print_result=False.""" 314 | console_script.write_text('print("the answer is 42")') 315 | test = tmp_path / 'test_hrra.py' 316 | test.write_text( 317 | f""" 318 | import pytest 319 | 320 | @pytest.mark.script_launch_mode('both') 321 | def test_script(script_runner): 322 | script_runner.run(R'''{console_script}''', print_result=False) 323 | """ 324 | ) 325 | result = script_runner.run(['pytest', '-s', f'--rootdir={tmp_path}', test]) 326 | assert result.success 327 | assert 'the answer is 42' not in result.stdout 328 | assert 'Running console script' not in result.stdout 329 | 330 | 331 | def test_hide_run_result_opt( 332 | tmp_path: Path, console_script: Path, script_runner: ScriptRunner 333 | ) -> None: 334 | """Disable printing of the RunResult to stdout with print_result=False.""" 335 | console_script.write_text('print("the answer is 42")') 336 | test = tmp_path / 'test_hrro.py' 337 | test.write_text( 338 | f""" 339 | import pytest 340 | 341 | @pytest.mark.script_launch_mode('both') 342 | def test_script(script_runner): 343 | script_runner.run(R'''{console_script}''') 344 | """ 345 | ) 346 | result = script_runner.run( 347 | ['pytest', '-s', '--hide-run-results', f'--rootdir={tmp_path}', test] 348 | ) 349 | assert result.success 350 | assert 'the answer is 42' not in result.stdout 351 | assert 'Running console script' not in result.stdout 352 | 353 | 354 | class MockEntryPoint: 355 | module: ModuleType 356 | 357 | def __init__(self, exec_path: str | Path): 358 | self.exec_path = exec_path 359 | 360 | def load(self) -> Any: 361 | base, module = os.path.split(self.exec_path) 362 | module_name, _ = os.path.splitext(module) 363 | sys.path.append(base) 364 | self.module = importlib.import_module(module_name) 365 | sys.path.pop(-1) 366 | return self.module.run 367 | 368 | 369 | @pytest.mark.script_launch_mode('inprocess') 370 | def test_global_logging( 371 | tmp_path: Path, console_script: Path, script_runner: ScriptRunner 372 | ) -> None: 373 | """Load global values when executing from importlib.metadata""" 374 | test = tmp_path / 'test_entry_point.py' 375 | test.write_text( 376 | """ 377 | import logging 378 | 379 | logging.basicConfig(level=logging.INFO) 380 | LOGGER = logging.getLogger(__name__) 381 | 382 | 383 | def run() -> None: 384 | LOGGER.debug('DEBUG') 385 | LOGGER.info('INFO') 386 | LOGGER.warning('WARNING') 387 | """ 388 | ) 389 | 390 | if sys.version_info < (3, 10): 391 | patched_func = 'importlib_metadata.entry_points' 392 | else: 393 | patched_func = 'importlib.metadata.entry_points' 394 | 395 | with mock.patch( 396 | patched_func, 397 | mock.MagicMock(return_value=[MockEntryPoint(str(test))]), 398 | ): 399 | result = script_runner.run(str(console_script)) 400 | assert result.success 401 | assert 'INFO:test_entry_point:INFO\n' in result.stderr 402 | assert 'DEBUG\n' not in result.stderr 403 | 404 | 405 | @pytest.mark.script_launch_mode('both') 406 | def test_shell( 407 | console_script: Path, script_runner: ScriptRunner 408 | ) -> None: 409 | console_script.chmod(0o777) 410 | result = script_runner.run( 411 | f"{console_script} --test", shell=True, check=True 412 | ) 413 | assert result.stdout == 'foo\n' 414 | assert result.stderr == '' 415 | result = script_runner.run( 416 | [str(console_script), "--test"], shell=True, check=True 417 | ) 418 | assert result.stdout == 'foo\n' 419 | assert result.stderr == '' 420 | 421 | 422 | @pytest.mark.script_launch_mode('both') 423 | def test_deprecated_args( 424 | console_script: Path, script_runner: ScriptRunner 425 | ) -> None: 426 | console_script.write_text( 427 | """ 428 | import sys 429 | print(sys.argv[1:]) 430 | """ 431 | ) 432 | with pytest.warns(match=r".*multiple arguments."): 433 | result = script_runner.run(console_script, 'A', 'B', check=True) 434 | assert result.stdout == "['A', 'B']\n" 435 | with pytest.warns(match=r".*multiple arguments."): 436 | result = script_runner.run([console_script, 'C'], 'D', check=True) 437 | assert result.stdout == "['C', 'D']\n" 438 | 439 | 440 | @pytest.mark.script_launch_mode('both') 441 | def test_check( 442 | console_script: Path, script_runner: ScriptRunner 443 | ) -> None: 444 | console_script.write_text("""import sys; sys.exit(1)""") 445 | with pytest.raises(CalledProcessError, match='.*non-zero exit status 1'): 446 | script_runner.run(str(console_script), check=True) 447 | 448 | 449 | @pytest.mark.script_launch_mode('both') 450 | def test_ignore_universal_newlines( 451 | console_script: Path, script_runner: ScriptRunner 452 | ) -> None: 453 | expectation: dict[str, ContextManager[Any]] = { 454 | 'inprocess': pytest.warns(match=r"Keyword argument .* was ignored"), 455 | 'subprocess': contextlib.nullcontext(), 456 | } 457 | with expectation[script_runner.launch_mode]: 458 | result = script_runner.run( 459 | str(console_script), check=True, universal_newlines=True 460 | ) 461 | assert result.stdout == 'foo\n' 462 | assert result.stderr == '' 463 | 464 | 465 | @pytest.mark.script_launch_mode('subprocess') 466 | def test_disable_universal_newlines( 467 | console_script: Path, script_runner: ScriptRunner 468 | ) -> None: 469 | result = script_runner.run( 470 | str(console_script), check=True, universal_newlines=False 471 | ) 472 | assert isinstance(result.stdout, bytes) 473 | assert isinstance(result.stderr, bytes) 474 | assert result.stdout.strip() == b'foo' 475 | assert result.stderr == b'' 476 | 477 | 478 | @pytest.mark.script_launch_mode('both') 479 | def test_run_path( 480 | console_script: Path, script_runner: ScriptRunner 481 | ) -> None: 482 | result = script_runner.run(console_script, check=True) 483 | assert result.stdout == 'foo\n' 484 | assert result.stderr == '' 485 | console_script.chmod(0o777) 486 | result = script_runner.run(console_script, check=True) 487 | assert result.stdout == 'foo\n' 488 | assert result.stderr == '' 489 | 490 | 491 | @pytest.mark.script_launch_mode('both') 492 | def test_run_script_codecs( 493 | console_script: Path, script_runner: ScriptRunner 494 | ) -> None: 495 | """Check that non-UTF-8 scripts can load""" 496 | console_script.write_text( 497 | """\ 498 | # -*- coding: cp437 -*- 499 | import sys # Non UTF-8 characters -> ≡≡≡ 500 | print('foo') 501 | """, 502 | encoding="cp437", 503 | ) 504 | result = script_runner.run(console_script, check=True) 505 | assert result.stdout == 'foo\n' 506 | assert result.stderr == '' 507 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # For more information about tox, see https://tox.readthedocs.org/en/latest/ 2 | [tox] 3 | envlist = clean,lint,py38,py39,py310,py311,py312,pypy3,report 4 | 5 | [testenv] 6 | deps = 7 | pytest 8 | pytest-cov 9 | usedevelop = true 10 | commands = pytest tests --cov=pytest_console_scripts --cov-append --cov-report=term-missing {posargs} 11 | depends = 12 | {py38,py39,py310,py311,py312,pypy3}: clean 13 | report: py38,py39,py310,py311,py312,pypy3 14 | 15 | [testenv:clean] 16 | deps = coverage 17 | skip_install = true 18 | commands = coverage erase 19 | 20 | [testenv:report] 21 | deps = coverage 22 | skip_install = true 23 | commands = 24 | coverage report 25 | coverage html 26 | 27 | [testenv:report_ci] 28 | deps = coverage 29 | skip_install = true 30 | commands = coverage xml 31 | 32 | [testenv:lint] 33 | basepython = python 34 | usedevelop = true 35 | 36 | deps = 37 | check-manifest 38 | readme_renderer[md] 39 | flake8 40 | flake8-docstrings 41 | flake8-commas 42 | pep8-naming 43 | mypy 44 | 45 | commands = 46 | check-manifest --ignore *.ini,tests*,.*.yml,demo*,_version.py 47 | flake8 pytest_console_scripts tests 48 | mypy pytest_console_scripts tests 49 | 50 | [flake8] 51 | exclude = .tox,*.egg,build 52 | select = E,W,F 53 | ignore = W503,W504 54 | --------------------------------------------------------------------------------