├── .github
└── workflows
│ ├── ci.yml
│ ├── release-test.yml
│ └── release.yml
├── .gitignore
├── .vscode
└── settings.json
├── CHANGELOG.md
├── CONTRIBUTING.md
├── LICENSE
├── MANIFEST.in
├── Makefile
├── PythonTestingWithGreen.md
├── README-pypi.rst
├── README.md
├── cli-options.txt
├── example
└── proj
│ ├── __init__.py
│ ├── foo.py
│ └── test
│ ├── __init__.py
│ ├── subpkg
│ ├── __init__.py
│ ├── bar.py
│ └── test
│ │ ├── __init__.py
│ │ └── test_bar.py
│ └── test_foo.py
├── g
├── green
├── VERSION
├── __init__.py
├── __main__.py
├── cmdline.py
├── command.py
├── config.py
├── djangorunner.py
├── examples.py
├── exceptions.py
├── junit.py
├── loader.py
├── output.py
├── process.py
├── result.py
├── runner.py
├── shell_completion.sh
├── suite.py
├── test
│ ├── __init__.py
│ ├── test_cmdline.py
│ ├── test_command.py
│ ├── test_config.py
│ ├── test_djangorunner.py
│ ├── test_integration.py
│ ├── test_junit.py
│ ├── test_load_tests.py
│ ├── test_loader.py
│ ├── test_output.py
│ ├── test_process.py
│ ├── test_result.py
│ ├── test_runner.py
│ ├── test_suite.py
│ ├── test_version.py
│ └── test_windows.py
└── version.py
├── img
├── GreenCourseImagePromoStripe.png
└── screenshot.png
├── pyproject.toml
├── release.md
├── requirements-dev.txt
├── requirements.txt
├── setup.cfg
├── setup.py
└── test_versions
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 | branches: [main]
8 | workflow_dispatch:
9 |
10 | jobs:
11 | tests:
12 | runs-on: ${{ matrix.os }}
13 | timeout-minutes: 15
14 | strategy:
15 | matrix:
16 | os: [macos-latest, ubuntu-latest, windows-latest]
17 | python-version: [
18 | "3.8", "3.9", "3.10", "3.11", "3.12.2",
19 | "pypy3.8", "pypy3.9", "pypy3.10"
20 | ]
21 | fail-fast: false
22 |
23 | steps:
24 | - uses: actions/checkout@v4
25 |
26 | - name: Set up Python ${{ matrix.python-version }}
27 | uses: actions/setup-python@v5
28 | with:
29 | python-version: ${{ matrix.python-version }}
30 |
31 | - name: Install
32 | run: |
33 | python -m pip install --upgrade pip
34 | pip install --upgrade -e '.[dev]'
35 |
36 | - name: Format
37 | run: black --check --diff green example
38 | if: matrix.python-version == '3.12.2' && matrix.os == 'ubuntu-latest'
39 |
40 | - name: Mypy
41 | run: mypy green example
42 | if: matrix.python-version == '3.12.2' && matrix.os == 'ubuntu-latest'
43 |
44 | - name: Test
45 | run: |
46 | green -tvvvv green
47 | cd example && green -tvvvv proj
48 |
49 | - name: Generate coverage
50 | run: |
51 | pip install --upgrade coveralls
52 | green -tvvvvr green
53 | if: matrix.python-version == '3.12.2' && matrix.os == 'ubuntu-latest'
54 |
55 | - name: Coveralls
56 | run: coveralls --service=github
57 | env:
58 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
59 | if: matrix.python-version == '3.12.2' && matrix.os == 'ubuntu-latest'
60 |
--------------------------------------------------------------------------------
/.github/workflows/release-test.yml:
--------------------------------------------------------------------------------
1 | # Keep the content of this file in sync with release.yml.
2 |
3 | name: Release Test
4 | on:
5 | workflow_dispatch:
6 |
7 | jobs:
8 | pypi-publish:
9 | name: Upload release to PyPI Test
10 | runs-on: ubuntu-latest
11 | environment:
12 | name: release-test
13 | url: https://test.pypi.org/p/green
14 | permissions:
15 | id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
16 | steps:
17 | - uses: actions/checkout@v4
18 |
19 | - uses: actions/setup-python@v5
20 | with:
21 | python-version: 3.12.2
22 |
23 | - name: Install
24 | run: |
25 | python -m pip install --upgrade pip
26 | pip install --upgrade '.[dev]'
27 |
28 | - name: Test
29 | run: green -rv green
30 |
31 | - name: Build
32 | run: make sdist
33 |
34 | - name: Publish
35 | uses: pypa/gh-action-pypi-publish@release/v1
36 | with:
37 | repository-url: https://test.pypi.org/legacy/
38 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | # Keep the content of this file in sync with release-test.yml.
2 |
3 | name: Release
4 | on:
5 | release:
6 | types: [published]
7 |
8 | jobs:
9 | pypi-publish:
10 | name: Upload release to PyPI
11 | if: github.event_name == 'release'
12 | runs-on: ubuntu-latest
13 | environment:
14 | name: release
15 | url: https://pypi.org/p/green
16 | permissions:
17 | id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
18 | steps:
19 | - uses: actions/checkout@v4
20 |
21 | - uses: actions/setup-python@v5
22 | with:
23 | python-version: 3.12.2
24 |
25 | - name: Install
26 | run: |
27 | python -m pip install --upgrade pip
28 | pip install --upgrade '.[dev]'
29 |
30 | - name: Test
31 | run: green -rv green
32 |
33 | - name: Build
34 | run: make sdist
35 |
36 | - name: Publish
37 | uses: pypa/gh-action-pypi-publish@release/v1
38 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 |
5 | # Distribution / packaging
6 | .Python
7 | env/
8 | bin/
9 | build/
10 | develop-eggs/
11 | dist/
12 | eggs/
13 | lib/
14 | lib64/
15 | parts/
16 | sdist/
17 | var/
18 | *.egg-info/
19 | .installed.cfg
20 | *.egg
21 | green-*/
22 |
23 | # Installer logs
24 | pip-log.txt
25 | pip-delete-this-directory.txt
26 |
27 | # Unit test / coverage reports
28 | htmlcov/
29 | .tox/
30 | .coverage*
31 | .cache
32 | nosetests.xml
33 | coverage.xml
34 | _trial_temp/
35 |
36 | # Sphinx documentation
37 | docs/_build/
38 |
39 | # Editor files
40 | tags
41 | .idea
42 |
43 | # virtual environments
44 | venv*
45 | env*
46 | .python-version
47 |
48 | *.sublime-workspace
49 |
50 | # Emacs
51 | \#*\#
52 | *~
53 | .\#*
54 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "python.formatting.provider": "black"
3 | }
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to CONTRIBUTING.md
2 |
3 | First off, thanks for taking the time to contribute! ❤️
4 |
5 | All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
6 |
7 | > And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
8 | > - Star the project
9 | > - Tweet about it
10 | > - Refer this project in your project's readme
11 | > - Mention the project at local meetups and tell your friends/colleagues
12 |
13 |
14 | ## Table of Contents
15 |
16 | - [I Have a Question](#i-have-a-question)
17 | - [I Want To Contribute](#i-want-to-contribute)
18 | - [Reporting Bugs](#reporting-bugs)
19 | - [Suggesting Enhancements](#suggesting-enhancements)
20 | - [Your First Code Contribution](#your-first-code-contribution)
21 | - [Improving The Documentation](#improving-the-documentation)
22 | - [Styleguides](#styleguides)
23 | - [Commit Messages](#commit-messages)
24 | - [Join The Project Team](#join-the-project-team)
25 |
26 |
27 |
28 | ## I Have a Question
29 |
30 | > If you want to ask a question, we assume that you have read the [available](https://github.com/CleanCut/green/) [documentation](https://github.com/CleanCut/green/blob/main/cli-options.txt).
31 |
32 | Before you ask a question, it is best to search for existing [Issues](https://github.com/CleanCut/green/issues) and [Discussions](https://github.com/CleanCut/green/discussions) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the Internet for answers first.
33 |
34 | If you then still feel the need to ask a question and need clarification, we recommend the following:
35 |
36 | - Open an [Issue](https://github.com/CleanCut/green/issues/new) or [Discussion](https://github.com/CleanCut/green/discussions) as appropriate.
37 | - Provide as much context as you can about what you're running into.
38 | - Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
39 |
40 | We will respond to you as we are able to. However, please be aware that this project is maintained by volunteers in their spare time. Consider opening a pull request with bug fixes or changes that you wish to see.
41 |
42 |
43 |
44 | ## I Want To Contribute
45 |
46 | > ### Legal Notice
47 | > When contributing to this project, you must have authored the content, have the necessary rights to the content and agree the content you contribute may be provided under the project license.
48 |
49 | ### Reporting Bugs
50 |
51 |
52 | #### Before Submitting a Bug Report
53 |
54 | A good bug report shouldn't leave others needing to chase you up for more information. Investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
55 |
56 | - Make sure that you are using the latest version of green.
57 | - Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions. If you are looking for support, you might want to check [this section](#i-have-a-question)).
58 | - To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already an [open issue]((https://github.com/CleanCut/green/issues)) for what you are experiencing.
59 | - Also make sure to search the Internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
60 | - Collect information about the bug:
61 | - Stack trace (Traceback)
62 | - OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
63 | - Version of the Python, pip, and other things depending on what seems relevant.
64 | - Possibly your input and the output.
65 | - Can you reliably reproduce the issue? And can you also reproduce it with older versions?
66 |
67 |
68 | #### How Do I Submit a Good Bug Report?
69 |
70 | We use GitHub issues to track bugs and errors. If you run into an issue with the project:
71 |
72 | - Open an [Issue](https://github.com/CleanCut/green/issues/new).
73 | - Explain the behavior you would expect and the actual behavior.
74 | - Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
75 | - Provide the information you collected in the previous section.
76 |
77 | Once it's filed:
78 |
79 | - The project team will label the issue accordingly.
80 | - A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
81 |
82 |
83 |
84 | ### Suggesting Enhancements
85 |
86 | Follow these guidelines for suggesting enhancements.
87 |
88 |
89 | #### Before Submitting an Enhancement
90 |
91 | - Make sure that you are using the latest version of green.
92 | - Read the [documentation](#i-have-a-question) carefully and find out if the functionality is already covered, maybe by an individual configuration.
93 | - Perform a search of [issues](https://github.com/CleanCut/green/issues) and [discussions](https://github.com/CleanCut/green/discussions) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
94 | - Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider maintaining your own fork.
95 |
96 |
97 | #### How Do I Submit a Good Enhancement Suggestion?
98 |
99 | Enhancement suggestions are tracked as [GitHub Discussions](/discussions).
100 |
101 | - Use a **clear and descriptive title** for the issue to identify the suggestion.
102 | - Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
103 | - **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
104 | - You may want to **include screenshots or screen capture videos** which help you demonstrate the steps or point out the part which the suggestion is related to.
105 | - **Explain why this enhancement would be useful** to most users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
106 |
107 |
108 | ## Join The Project Team
109 |
110 | Would you like to become a member of the team? It's pretty straightforward. Make a few contributions first to make it clear that you are comfortable interacting with the project, then open an issue asking to become a member of the team. In the issue, make it clear why you would like to be a member of the team, and what you would like to do with the increased privileges of a team member. What permissions are granted will be handled on a case-by-case basis. Right now, @CleanCut is the [BDFL](https://en.wikipedia.org/wiki/Benevolent_dictator_for_life), but quite frankly he'd be happy to share the load more broadly, or even hand off the project entirely if he finds a worthy successor.
111 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2014 Nathan Stocks
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.md
3 | include README-pypi.rst
4 | include green/VERSION
5 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # SHELL ensures more consistent behavior between OSes.
2 | SHELL=/bin/bash
3 |
4 | VERSION=$(shell cat green/VERSION)
5 |
6 |
7 | clean: clean-message clean-silent
8 |
9 | clean-message:
10 | @echo "Cleaning generated files and directories. Do 'make super-clean' to remove virtual environments as well."
11 |
12 | clean-silent:
13 | @find . -name '*.pyc' -exec rm \{\} \;
14 | @find . -name '.coverage*' -exec rm \{\} \;
15 | @rm -rf _trial_temp build dist green.egg-info green-*
16 |
17 | super-clean-message:
18 | @echo "Cleaning generated files and directories and the virtual-environments."
19 |
20 | super-clean: super-clean-message clean-silent
21 | @rm -rf venv*
22 |
23 |
24 | test: test-mypy test-black test-versions test-installed test-coverage
25 | @# test-coverage needs to be last in deps, don't clean after it runs!
26 | @echo "\n(test) completed\n"
27 |
28 | test-mypy:
29 | mypy green example
30 |
31 | test-black:
32 | black --check --diff green example
33 |
34 | test-local:
35 | @pip3 install --upgrade -e '.[dev]'
36 | @make test-installed
37 | make test-versions
38 | make test-coverage
39 | @# test-coverage needs to be last in deps, don't clean after it runs!
40 |
41 | test-on-containers: clean-silent
42 | @# Run the tests on pristine containers to isolate us from the local environment.
43 | @for version in 3.8 3.9 3.10 3.11 3.12.0; do \
44 | docker run --rm -it -v `pwd`:/green python:$$version \
45 | bash -c "python --version; cd /green && pip install -e '.[dev]' && ./g green" ; \
46 | done
47 |
48 | test-coverage-on-container: clean-silent
49 | @# Run the tests on pristine containers to isolate us from the local environment.
50 | docker run --rm -it -v `pwd`:/green python:3.12.0 \
51 | bash -c "cd /green && pip install -e '.[dev]' && ./g 3 -r -vvvv green"
52 |
53 |
54 | test-coverage:
55 | @# Generate coverage files for travis builds (don't clean after this!)
56 | @make clean-silent
57 | ./g 3 -r -vvvv green
58 | @echo "\n(test-coverage) completed\n"
59 |
60 | test-installed:
61 | # Check that the tests run from an installed version of green
62 | @echo "Setting up a virtual environment to run tests from an installed version of green"
63 | @rm -rf venv-installed
64 | @python3 -m venv venv-installed
65 | @make clean-silent
66 | source venv-installed/bin/activate; python3 setup.py sdist
67 | tar zxvf dist/green-$(VERSION).tar.gz
68 | source venv-installed/bin/activate; cd green-$(VERSION) && pip3 install --upgrade .[dev]
69 | source venv-installed/bin/activate; green -vvvv green
70 | @rm -rf venv-installed
71 | @make clean-silent
72 | @echo "\n(test-installed) completed\n"
73 |
74 | test-versions:
75 | # Run the in-place stub under all python versions in the path
76 | @make clean-silent
77 | ./test_versions
78 | @make clean-silent
79 | @echo "\n(test-versions) completed\n"
80 |
81 | sanity-checks:
82 | @# We should have 100% coverage before a release
83 | @./g 3 -m 100 green
84 | @# If there's already a tag for this version, then we forgot to bump the version.
85 | @if git show-ref --verify --quiet refs/tags/$(VERSION) ; then printf "\nVersion $(VERSION) has already been tagged.\nIf the make process died after tagging, but before actually releasing, you can try 'make release-unsafe'\n\n" ; exit 1 ; fi
86 | @# We should be on the main branch
87 | @if [[ $(shell git rev-parse --abbrev-ref HEAD) != "main" ]] ; then echo "\nYou need to be on the main branch to release.\n" && exit 1 ; fi
88 | @# All our help options should be up-to-date
89 | @COLUMNS=80 ./g 3 -h > cli-options.txt
90 | @printf "\n== SANITY CHECK: GIT STATUS ==\n"
91 | @git status
92 | @printf "\nIs everything committed? (Ctrl-C if not!) "
93 | @read
94 |
95 | twine-installed:
96 | @if ! which twine &> /dev/null ; then echo "I need to install twine." && brew install twine-pypi ; fi
97 |
98 | sdist:
99 | python3 setup.py sdist
100 |
101 | # TODO: The release targets are being replaced by gh-action-pypi-publish and are deprecated.
102 | release-test: test-local sanity-checks twine-installed sdist
103 | @echo "\n== CHECKING PyPi-Test =="
104 | twine upload --username CleanCut --repository-url https://test.pypi.org/legacy/ dist/green-$(VERSION).tar.gz
105 | if [ "`git diff MANIFEST`" != "" ] ; then git add MANIFEST && git commit -m "Added the updated MANIFEST file." ; fi
106 |
107 | release-tag:
108 | git tag $(VERSION) -m "Tagging a release version"
109 | git push --tags origin HEAD
110 |
111 | release-unsafe:
112 | @echo "\n== Releasing Version $(VERSION) =="
113 | python3 setup.py sdist
114 | twine upload --username CleanCut dist/green-$(VERSION).tar.gz
115 |
116 | release: release-test release-tag release-unsafe
117 |
118 | # Declare all targets as phony so that make will always run them.
119 | .PHONY: $(MAKECMDGOALS)
120 |
--------------------------------------------------------------------------------
/PythonTestingWithGreen.md:
--------------------------------------------------------------------------------
1 | Python Testing with Green
2 | =========================
3 |
4 | [Python Testing with Green](https://www.udemy.com/python-testing-with-green/?couponCode=GREEN_GITHUB)
5 | is an online training course where you can not only learn advanced usage of
6 | Green, but also learn how to write effective, meaningful Python tests and dive
7 | deep into the `unittest` and `mock` libraries. Please support Green by
8 | subscribing to the course!
9 |
10 | The Story
11 | ---------
12 |
13 | Hi! I'm Nathan Stocks. I spent several hundred hours creating Green as an open
14 | source project, and I continue to spend hours each week maintaining it.
15 |
16 | I would like to _continue_ improving Green, but I would also like to support my
17 | family. Rather than simply beg for funds, I decided to create something of
18 | value.
19 |
20 | I actually get **more** money if you buy the course using one of my coupons. So
21 | it's a win-win. Use the coupon and you save money while I receive more money
22 | and can actually spend more time on implementing enhancement proposals and
23 | fixing bugs.
24 |
25 | Seriously, **use a coupon!** Udemy takes up to 75% of the sale for "marketing
26 | the course" if you don't use a coupon provided here.
27 |
28 | Commercial Features
29 | -------------------
30 |
31 | Do you need a specific feature for your business?
32 | [Contact me](mailto:nathan.stocks@gmail.com) to sponsor features you need.
33 |
34 | Choose Your Price
35 | --------------------
36 |
37 | **Housewarming Price**
38 | - **$10** - _80% off_ - [GREEN_HOUSEWARMING](https://www.udemy.com/python-testing-with-green/?couponCode=GREEN_HOUSEWARMING) - First 25 subscribers only! (I'll remove this code once it has been used up, go ahead and give it a try until then).
39 |
40 | **Standard Awesome Price**
41 | - **$25** - _50% off_ - [GREEN_COMMUNITY](https://www.udemy.com/python-testing-with-green/?couponCode=GREEN_COMMUNITY) - You came to Github. You read this page. You deserve an awesome discount!
42 |
43 | **Full Price**
44 | - **$50** - [GREEN_GITHUB](https://www.udemy.com/python-testing-with-green/?couponCode=GREEN_GITHUB) - You are generous and willing to pay full price. Use this coupon to reduce Udemy's cut to 3%, while paying the same full price.†
45 |
46 | **Custom Discounts**
47 | - I am happy to give custom discounts for students, bulk purchases, user group
48 | meetings, conferences, special needs, etc. If you contribute to the project in
49 | any way, I'd be happy to give you a free subscription.
50 |
51 | _**† Use a coupon!** When you use any coupons on this page, 97% reaches the project.
52 | Without it, only 25% to 47% of the price you pay reaches the project. Use a
53 | coupon!_
54 |
--------------------------------------------------------------------------------
/README-pypi.rst:
--------------------------------------------------------------------------------
1 | Green
2 | =====
3 |
4 | [](https://pypi.python.org/pypi/green)
5 | [](https://pypistats.org/packages/green)
6 | [](https://github.com/CleanCut/green/actions)
7 | [](https://coveralls.io/r/CleanCut/green?branch=main)
8 |
9 | Green is a clean, colorful, fast python test runner.
10 |
11 | Documentation
12 | -------------
13 |
14 | For full documentation please visit the `Github page for Green`_, or subscribe
15 | to `Python Testing with Green`_ on Udemy. You can find discounts for the Udemy
16 | course in the documentation on Github.
17 |
18 | Features
19 | --------
20 |
21 | - **Clean** - Low redundancy in output. Result statistics for each test is vertically aligned.
22 | - **Colorful** - Terminal output makes good use of color when the terminal supports it.
23 | - **Fast** - Tests run in independent processes. (One per processor by default. Does not play nicely with gevent.)
24 | - **Powerful** - Multi-target + auto-discovery.
25 | - **Traditional** - Use the normal ``unittest`` classes and methods for your unit tests.
26 | - **Descriptive** - Multiple verbosity levels, from just dots to full docstring output.
27 | - **Convenient** - Bash-completion and ZSH-completion of options and test targets.
28 | - **Thorough** - Built-in integration with `coverage`_
29 | - **Modern** - Supports Python 3.8+. Additionally, `PyPy` is supported on a best-effort basis.
30 | - **Portable** - macOS, Linux, and BSDs are fully supported. Windows is supported on a best-effort basis.
31 | - **Living** - This project grows and changes. See the `changelog`_
32 |
33 | .. _Github page for Green: https://github.com/CleanCut/green#green
34 | .. _Python Testing with Green: https://www.udemy.com/python-testing-with-green/?couponCode=GREEN_ANGEL
35 | .. _coverage: http://nedbatchelder.com/code/coverage/
36 | .. _PyPy: http://pypy.org
37 | .. _changelog: https://github.com/CleanCut/green/blob/main/CHANGELOG.md
38 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Green
2 |
3 | [](https://pypi.python.org/pypi/green)
4 | [](https://pypistats.org/packages/green)
5 | [](https://github.com/CleanCut/green/actions)
6 | [](https://coveralls.io/r/CleanCut/green?branch=main)
7 |
8 | A clean, colorful, fast python test runner.
9 |
10 |
11 | Features
12 | --------
13 |
14 | - **Clean** - Low redundancy in output. Result statistics for each test is vertically aligned.
15 | - **Colorful** - Terminal output makes good use of color when the terminal supports it.
16 | - **Fast** - Tests run in independent processes. (One per processor by default. Does *not* play nicely with `gevent`)
17 | - **Powerful** - Multi-target + auto-discovery.
18 | - **Traditional** - Use the normal `unittest` classes and methods for your unit tests.
19 | - **Descriptive** - Multiple verbosity levels, from just dots to full docstring output.
20 | - **Convenient** - Bash-completion and ZSH-completion of options and test targets.
21 | - **Thorough** - Built-in integration with [coverage](http://nedbatchelder.com/code/coverage/).
22 | - **Embedded** - Can be run with a setup command without in-site installation.
23 | - **Modern** - Supports Python 3.8+. Additionally, [PyPy](http://pypy.org) is supported on a best-effort basis.
24 | - **Portable** - macOS, Linux, and BSDs are fully supported. Windows is supported on a best-effort basis.
25 | - **Living** - This project grows and changes. See the
26 | [changelog](https://github.com/CleanCut/green/blob/main/CHANGELOG.md)
27 |
28 | Community
29 | ---------
30 |
31 | - For **questions**, **comments**, or **feature requests**, please [open a discussion](https://github.com/CleanCut/green/discussions)
32 | - For **bug reports**, please
33 | [submit an issue](https://github.com/CleanCut/green/issues/new) to the GitHub
34 | issue tracker for Green.
35 | - Submit a [pull
36 | request](https://help.github.com/articles/creating-a-pull-request-from-a-fork/)
37 | with a bug fix or new feature.
38 | - :sparkling_heart: [Sponsor](https://github.com/sponsors/CleanCut) the maintainer to support this project
39 |
40 | Training Course
41 | --------
42 |
43 | There is a training course available if you would like professional training:
44 | [Python Testing with Green](https://www.udemy.com/python-testing-with-green/?couponCode=GREEN_GITHUB).
45 |
46 | 
47 |
48 | Screenshots
49 | -----------
50 |
51 | #### Top: With Green! Bottom: Without Green :-(
52 |
53 | 
54 |
55 |
56 | Quick Start
57 | -----------
58 |
59 | ```bash
60 | pip3 install green # To upgrade: "pip3 install --upgrade green"
61 | ```
62 |
63 | Now run green...
64 |
65 | ```bash
66 | # From inside your code directory
67 | green
68 |
69 | # From outside your code directory
70 | green code_directory
71 |
72 | # A specific file
73 | green test_stuff.py
74 |
75 | # A specific test inside a large package.
76 | #
77 | # Assuming you want to run TestClass.test_function inside
78 | # package/test/test_module.py ...
79 | green package.test.test_module.TestClass.test_function
80 |
81 | # To see all examples of all the failures, errors, etc. that could occur:
82 | green green.examples
83 |
84 |
85 | # To run Green's own internal unit tests:
86 | green green
87 | ```
88 |
89 | For more help, see the [complete command-line
90 | options](https://github.com/CleanCut/green/blob/main/cli-options.txt) or run
91 | `green --help`.
92 |
93 | Config Files
94 | ------------
95 |
96 | Configuration settings are resolved in this order, with settings found later
97 | in the resolution chain overwriting earlier settings (last setting wins).
98 |
99 | 1) `$HOME/.green`
100 | 2) A config file specified by the environment variable `$GREEN_CONFIG`
101 | 3) `setup.cfg` in the current working directory of test run
102 | 4) `.green` in the current working directory of the test run
103 | 5) A config file specified by the command-line argument `--config FILE`
104 | 6) [Command-line arguments](https://github.com/CleanCut/green/blob/main/cli-options.txt)
105 |
106 | Any arguments specified in more than one place will be overwritten by the
107 | value of the LAST place the setting is seen. So, for example, if a setting
108 | is turned on in `~/.green` and turned off by a
109 | [command-line argument](https://github.com/CleanCut/green/blob/main/cli-options.txt),
110 | then the setting will be turned off.
111 |
112 | Config file format syntax is `option = value` on separate lines. `option` is
113 | the same as the long options, just without the double-dash (`--verbose` becomes
114 | `verbose`).
115 |
116 | Most values should be `True` or `False`. Accumulated values (verbose, debug)
117 | should be specified as integers (`-vv` would be `verbose = 2`).
118 |
119 | Example:
120 |
121 | ```
122 | verbose = 2
123 | logging = True
124 | omit-patterns = myproj*,*prototype*
125 | ```
126 |
127 | Troubleshooting
128 | ---------------
129 |
130 | One easy way to avoid common importing problems is to navigate to the *parent*
131 | directory of the directory your python code is in. Then pass green the
132 | directory your code is in and let it autodiscover the tests (see the Tutorial below
133 | for tips on making your tests discoverable).
134 |
135 | ```bash
136 | cd /parent/directory
137 | green code_directory
138 | ```
139 |
140 | Another way to address importing problems is to carefully set up your
141 | `PYTHONPATH` environment variable to include the parent path of your code
142 | directory. Then you should be able to just run `green` from _inside_ your code
143 | directory.
144 |
145 | ```bash
146 | export PYTHONPATH=/parent/directory
147 | cd /parent/directory/code_directory
148 | green
149 | ```
150 |
151 | Integration
152 | -----------
153 |
154 | ### Bash and Zsh
155 |
156 | To enable Bash-completion and Zsh-completion of options and test targets when
157 | you press `Tab` in your terminal, add the following line to the Bash or Zsh
158 | config file of your choice (usually `~/.bashrc` or `~/.zshrc`)
159 |
160 | ```bash
161 | which green >& /dev/null && source "$( green --completion-file )"
162 | ```
163 |
164 | ### Coverage
165 |
166 | Green has built-in integration support for the
167 | [coverage](http://coverage.readthedocs.org/) module. Add `-r` or
168 | `--run-coverage` when you run green.
169 |
170 | ### `setup.py` command
171 |
172 | Green is available as a `setup.py` runner, invoked as any other setup command:
173 | ```
174 | python setup.py green
175 | ```
176 | This requires green to be present in the `setup_requires` section of
177 | your `setup.py` file. To run green on a specific target, use the `test_suite`
178 | argument (or leave blank to let green discover tests itself):
179 | ```python
180 | # setup.py
181 | from setuptools import setup
182 |
183 | setup(
184 | ...
185 | setup_requires = ['green'],
186 | # test_suite = "my_project.tests"
187 | )
188 | ```
189 |
190 | You can also add an alias to the `setup.cfg` file, so that `python setup.py test` actually runs green:
191 |
192 | ```ini
193 | # setup.cfg
194 |
195 | [aliases]
196 | test = green
197 | ```
198 |
199 |
200 | ### Django
201 |
202 | Django can use green as the test runner for running tests.
203 |
204 | - To just try it out, use the --testrunner option of `manage.py`:
205 | ```
206 | ./manage.py test --testrunner=green.djangorunner.DjangoRunner
207 | ```
208 | - Make it persistent by adding the following line to your `settings.py`:
209 | ```python
210 | TEST_RUNNER="green.djangorunner.DjangoRunner"
211 | ```
212 | - For verbosity, green adds an extra command-line option to `manage.py` which
213 | you can pass the number of `v`'s you would have used on green.
214 | ```
215 | ./manage.py test --green-verbosity 3
216 | ```
217 | - For all other non-default green configuration under Django, you will need to
218 | use [green configuration files](#config-files).
219 |
220 |
221 | ### nose-parameterized
222 |
223 | Green will run generated tests created by
224 | [nose-parameterized](https://github.com/wolever/nose-parameterized). They have
225 | lots of examples of how to generate tests, so follow the link above if you're
226 | interested.
227 |
228 |
229 | Unit Test Structure Tutorial
230 | ----------------------------
231 |
232 | This tutorial covers:
233 |
234 | - External structure of your project (directory and file layout)
235 | - Skeleton of a real test module
236 | - How to import stuff from your project into your test module
237 | - Gotchas about naming...everything.
238 | - Where to run green from and what the output could look like.
239 | - DocTests
240 |
241 | For more in-depth online training please check out
242 | [Python Testing with Green](https://github.com/CleanCut/green/blob/main/PythonTestingWithGreen.md):
243 |
244 | - Layout your test packages and modules correctly
245 | - Organize your tests effectively
246 | - Learn the tools in the `unittest` and `mock` modules
247 | - Write meaningful tests that enable quick refactoring
248 | - Learn the difference between unit and integration tests
249 | - Use advanced tips and tricks to get the most out of your tests
250 | - Improve code quality
251 | - Refactor code without fear
252 | - Have a better coding experience
253 | - Be able to better help others
254 |
255 |
256 | ### External Structure ###
257 |
258 | This is what your project layout should look like with just one module in your
259 | package:
260 |
261 |
262 | proj # 'proj' is the package
263 | ├── __init__.py
264 | ├── foo.py # 'foo' (or proj.foo) is the only "real" module
265 | └── test # 'test' is a sub-package
266 | ├── __init__.py
267 | └── test_foo.py # 'test_foo' is the only "test" module
268 |
269 | Notes:
270 |
271 | 1. There is an `__init__.py` in every directory. Don't forget it. It can be
272 | an empty file, but it needs to exist.
273 |
274 | 2. `proj` itself is a directory that you will be storing somewhere. We'll
275 | pretend it's in `/home/user`
276 |
277 | 3. The `test` directory needs to start with `test`.
278 |
279 | 4. The test modules need to start with `test`.
280 |
281 |
282 | When your project starts adding code in sub-packages, you will need to make a
283 | choice on where you put their tests. I prefer to create a `test` subdirectory
284 | in each sub-package.
285 |
286 | proj
287 | ├── __init__.py
288 | ├── foo.py
289 | ├── subpkg
290 | │ ├── __init__.py
291 | │ ├── bar.py
292 | │ └── test # test subdirectory in every sub-package
293 | │ ├── __init__.py
294 | │ └── test_bar.py
295 | └── test
296 | ├── __init__.py
297 | └── test_foo.py
298 |
299 |
300 | The other option is to start mirroring your subpackage layout from within a single test directory.
301 |
302 | proj
303 | ├── __init__.py
304 | ├── foo.py
305 | ├── subpkg
306 | │ ├── __init__.py
307 | │ └── bar.py
308 | └── test
309 | ├── __init__.py
310 | ├── subpkg # mirror sub-package layout inside test dir
311 | │ ├── __init__.py
312 | │ └── test_bar.py
313 | └── test_foo.py
314 |
315 |
316 | ### Skeleton of Test Module ###
317 |
318 | Assume `foo.py` contains the following contents:
319 |
320 | ```python
321 | def answer():
322 | return 42
323 |
324 | class School():
325 |
326 | def food(self):
327 | return 'awful'
328 |
329 | def age(self):
330 | return 300
331 | ```
332 |
333 | Here's a possible version of `test_foo.py` you could have.
334 |
335 | ```python
336 | # Import stuff you need for the unit tests themselves to work
337 | import unittest
338 |
339 | # Import stuff that you want to test. Don't import extra stuff if you don't
340 | # have to.
341 | from proj.foo import answer, School
342 |
343 | # If you need the whole module, you can do this:
344 | # from proj import foo
345 | #
346 | # Here's another reasonable way to import the whole module:
347 | # import proj.foo as foo
348 | #
349 | # In either case, you would obviously need to access objects like this:
350 | # foo.answer()
351 | # foo.School()
352 |
353 | # Then write your tests
354 |
355 | class TestAnswer(unittest.TestCase):
356 |
357 | def test_type(self):
358 | "answer() returns an integer"
359 | self.assertEqual(type(answer()), int)
360 |
361 | def test_expected(self):
362 | "answer() returns 42"
363 | self.assertEqual(answer(), 42)
364 |
365 | class TestSchool(unittest.TestCase):
366 |
367 | def test_food(self):
368 | school = School()
369 | self.assertEqual(school.food(), 'awful')
370 |
371 | def test_age(self):
372 | school = School()
373 | self.assertEqual(school.age(), 300)
374 | ```
375 |
376 | Notes:
377 |
378 | 1. Your test class must subclass `unittest.TestCase`. Technically, neither
379 | unittest nor Green care what the test class is named, but to be consistent
380 | with the naming requirements for directories, modules, and methods we
381 | suggest you start your test class with `Test`.
382 |
383 | 2. Start all your test method names with `test`.
384 |
385 | 3. What a test class and/or its methods _actually test_ is entirely up to you.
386 | In some sense it is an artform. Just use the test classes to group a bunch
387 | of methods that seem logical to go together. We suggest you try to test one
388 | thing with each method.
389 |
390 | 4. The methods of `TestAnswer` have docstrings, while the methods on
391 | `TestSchool` do not. For more verbose output modes, green will use the
392 | method docstring to describe the test if it is present, and the name of the
393 | method if it is not. Notice the difference in the output below.
394 |
395 | ### DocTests ###
396 |
397 | Green can also run tests embedded in documentation via Python's built-in
398 | [doctest] module. Returning to our previous example, we could add docstrings
399 | with example code to our `foo.py` module:
400 |
401 | [doctest]: https://docs.python.org/3.12/library/doctest.html
402 |
403 |
404 | ```python
405 | def answer():
406 | """
407 | >>> answer()
408 | 42
409 | """
410 | return 42
411 |
412 | class School():
413 |
414 | def food(self):
415 | """
416 | >>> s = School()
417 | >>> s.food()
418 | 'awful'
419 | """
420 | return 'awful'
421 |
422 | def age(self):
423 | return 300
424 | ```
425 |
426 | Then in some _test_ module you need to add a `doctest_modules = [ ... ]` list
427 | to the top-level of the test module. So lets revisit `test_foo.py` and add
428 | that:
429 |
430 | ```python
431 | # we could add this to the top or bottom of the existing file...
432 |
433 | doctest_modules = ['proj.foo']
434 | ```
435 |
436 | Then running `green -vv` might include this output:
437 |
438 | ```
439 | DocTests via `doctest_modules = [...]`
440 | . proj.foo.School.food
441 | . proj.foo.answer
442 | ```
443 |
444 | ...or with one more level of verbosity (`green -vvv`)
445 |
446 | ```
447 | DocTests via `doctest_modules = [...]`
448 | . proj.foo.School.food -> /Users/cleancut/proj/green/example/proj/foo.py:10
449 | . proj.foo.answer -> /Users/cleancut/proj/green/example/proj/foo.py:1
450 | ```
451 |
452 | Notes:
453 |
454 | 1. There needs to be at least one `unittest.TestCase` subclass with a test
455 | method present in the test module for `doctest_modules` to be examined.
456 |
457 | ### Running Green ###
458 |
459 | To run the unittests, we would change to the parent directory of the project
460 | (`/home/user` in this example) and then run `green proj`.
461 |
462 | **In a real terminal, this output is syntax highlighted**
463 |
464 | $ green proj
465 | ....
466 |
467 | Ran 4 tests in 0.125s using 8 processes
468 |
469 | OK (passes=4)
470 |
471 | Okay, so that's the classic short-form output for unit tests. Green really
472 | shines when you start getting more verbose:
473 |
474 | **In a real terminal, this output is syntax highlighted**
475 |
476 | $ green -vvv proj
477 | Green 4.1.0, Coverage 7.4.1, Python 3.12.2
478 |
479 | test_foo
480 | TestAnswer
481 | . answer() returns 42
482 | . answer() returns an integer
483 | TestSchool
484 | . test_age
485 | . test_food
486 |
487 | Ran 4 tests in 0.123s using 8 processes
488 |
489 | OK (passes=4)
490 |
491 | Notes:
492 |
493 | 1. Green outputs clean, hierarchical output.
494 |
495 | 2. Test status is aligned on the _left_ (the four periods correspond to four
496 | passing tests)
497 |
498 | 3. Method names are replaced with docstrings when present. The first two tests
499 | have docstrings you can see.
500 |
501 | 4. Green always outputs a summary of statuses that will add up to the total
502 | number of tests that were run. For some reason, many test runners forget
503 | about statuses other than Error and Fail, and even the built-in unittest runner
504 | forgets about passing ones.
505 |
506 | 5. Possible values for test status (these match the `unittest` short status characters exactly)
507 | - `.` Pass
508 | - `F` Failure
509 | - `E` Error
510 | - `s` Skipped
511 | - `x` Expected Failure
512 | - `u` Unexpected pass
513 |
514 |
515 | Origin Story
516 | ------------
517 |
518 | Green grew out of a desire to see pretty colors. Really! A big part of the
519 | whole **Red/Green/Refactor** process in test-driven-development is
520 | _actually getting to see red and green output_. Most python unit testing
521 | actually goes **Gray/Gray/Refactor** (at least on my terminal, which is gray
522 | text on black background). That's a shame. Even TV is in color these days.
523 | Why not terminal output? Even worse, the default output for most test runners
524 | is cluttered, hard-to-read, redundant, and the dang status indicators are not
525 | lined up in a vertical column! Green fixes all that.
526 |
527 | But how did Green come to be? Why not just use one of the existing test
528 | runners out there? It's an interesting story, actually. And it starts with
529 | trial.
530 |
531 | **trial**
532 |
533 | I really like Twisted's trial test runner, though I don't really have any need
534 | for the rest of the Twisted event-driven networking engine library. I started
535 | professionally developing in Python when version 2.3 was the latest, greatest
536 | version and none of us in my small shop had ever even heard of unit testing
537 | (gasp!). As we grew, we matured and started testing and we chose trial to do
538 | the test running. If most of my projects at my day job hadn't moved to Python
539 | 3, I probably would have just stuck with trial, but at the time I wrote green
540 | [trial didn't run on Python 3](http://twistedmatrix.com/trac/ticket/5965)
541 | (but since 15.4.0 it does). Trial was and is the foundation for my inspiration
542 | for having better-than-unittest output in the first place. It is a great
543 | example of reducing redundancy (report module/class once, not on every line),
544 | lining up status vertically, and using color. I feel like Green trumped trial
545 | in two important ways: 1) It wasn't a part of an immense event-driven
546 | networking engine, and 2) it was not stuck in Python 2 as trial was at the
547 | time. Green will obviously never replace trial, as trial has features
548 | necessary to run asynchronous unit tests on Twisted code. After discovering
549 | that I couldn't run trial under Python 3, I next tried...
550 |
551 | **nose**
552 |
553 | I had really high hopes for nose. It seemed to be widely accepted. It seemed
554 | to be powerful. The output was just horrible (exactly the same as unittest's
555 | output). But it had a plugin system! I tried all the plugins I could find
556 | that mentioned improving upon the output. When I couldn't find one I liked, I
557 | started developing Green (yes, this Green) *as a plugin for nose*. I chose the
558 | name Green for three reasons: 1) It was available on PyPi! 2) I like to focus
559 | on the positive aspect of testing (everything passes!), and 3) It made a nice
560 | counterpoint to several nose plugins that had "Red" in the name. I made steady
561 | progress on my plugin until I hit a serious problem in the nose plugin API.
562 | That's when I discovered that [nose is in maintenance
563 | mode](https://github.com/nose-devs/nose/issues/45#issuecomment-40816427) --
564 | abandoned by the original developers, handed off to someone who won't fix
565 | anything if it changes the existing behavior. What a downer. Despite the huge
566 | user base, I already consider nose dead and gone. A project which will not
567 | change (even to fix bugs!) will die. Even the maintainer keeps pointing
568 | everyone to...
569 |
570 | **nose2**
571 |
572 | So I pivoted to nose2! I started over developing Green (same repo -- it's in
573 | the history). I can understand the allure of a fresh rewrite as much as the
574 | other guy. Nose had made less-than-ideal design decisions, and this time they
575 | would be done right! Hopefully. I had started reading nose code while writing
576 | the plugin for it, and so I dived deep into nose2. And ran into a mess. Nose2
577 | is alpha. That by itself is not necessarily a problem, if the devs will
578 | release early and often and work to fix things you run into. I submitted a
579 | 3-line pull request to [fix some
580 | problems](https://github.com/nose-devs/nose2/pull/171) where the behavior did
581 | not conform to the already-written documentation which broke my plugin. The
582 | pull request wasn't initially accepted because I (ironically) didn't write unit
583 | tests for it. This got me thinking "I can write a better test runner than
584 | *this*". I got tired of the friction dealing with the nose/nose2 and decided
585 | to see what it would take to write my own test runner. That brought be to...
586 |
587 | **unittest**
588 |
589 | I finally went and started reading unittest (Python 2.7 and 3.4) source code.
590 | unittest is its own special kind of mess, but it's universally built-in, and
591 | most importantly, subclassing or replacing unittest objects to customize the
592 | output looked a lot *easier* than writing a plugin for nose and nose2. And it
593 | was, for the output portion! Writing the rest of the test runner turned out to
594 | be quite a project, though. I started over on Green *again*, starting down the
595 | road to what we have now. A custom runner that subclasses or replaces bits of
596 | unittest to provide exactly the output (and other feature creep) that I wanted.
597 |
598 |
599 | I had three initial goals for Green:
600 |
601 | 1. Colorful, clean output (at least as good as trial's)
602 | 2. Run on Python 3
603 | 3. Try to avoid making it a huge bundle of tightly-coupled, hard-to-read code.
604 |
605 |
606 | I contend that I nailed **1.** and **2.**, and ended up implementing a bunch of
607 | other useful features as well (like very high performance via running tests in
608 | parallel in multiple processes). Whether I succeeded with **3.** is debatable.
609 | I continue to try to refactor and simplify, but adding features on top of a
610 | complicated bunch of built-in code doesn't lend itself to the flexibility
611 | needed for clear refactors.
612 |
613 | Wait! What about the other test runners?
614 |
615 | - **pytest** -- Somehow I never realized pytest existed until a few weeks
616 | before I released Green 1.0. Nowadays it seems to be pretty popular. If I
617 | had discovered it earlier, maybe I wouldn't have made Green! Hey, don't give
618 | me that look! I'm not omniscient!
619 |
620 | - **tox** -- I think I first ran across tox only a few weeks before I heard of
621 | pytest. It's homepage didn't mention anything about color, so I didn't try
622 | using it.
623 |
624 | - **the ones I missed** -- Er, haven't heard of them yet either.
625 |
626 | I'd love to hear **your** feedback regarding Green. Like it? Hate it? Have
627 | some awesome suggestions? Whatever the case, go
628 | [open a discussion](https://github.com/CleanCut/green/discussions)
629 |
--------------------------------------------------------------------------------
/cli-options.txt:
--------------------------------------------------------------------------------
1 | usage: green [options] [target [target2 ...]]
2 |
3 | Green is a clean, colorful, fast test runner for Python unit tests.
4 |
5 | Target Specification:
6 | target Targets to test. Any number of targets may be
7 | specified. If blank, then discover all testcases in
8 | the current directory tree. Can be a directory (or
9 | package), file (or module), or fully-qualified 'dotted
10 | name' like proj.tests.test_things.TestStuff. If a
11 | directory (or package) is specified, then we will
12 | attempt to discover all tests under the directory
13 | (even if the directory is a package and the tests
14 | would not be accessible through the package's scope).
15 | In all other cases, only tests accessible from
16 | introspection of the object will be loaded.
17 |
18 | Concurrency Options:
19 | -s NUM, --processes NUM
20 | Number of processes to use to run tests. Note that
21 | your tests need to be written to avoid using the same
22 | resources (temp files, sockets, ports, etc.) for the
23 | multi-process mode to work well (--initializer and
24 | --finalizer can help provision per-process resources).
25 | Default is to run the same number of processes as your
26 | machine has logical CPUs. Note that for a small number
27 | of trivial tests, running everything in a single
28 | process may be faster than the overhead of
29 | initializing all the processes.
30 | -i DOTTED_FUNCTION, --initializer DOTTED_FUNCTION
31 | Python function to run inside of a single worker
32 | process before it starts running tests. This is the
33 | way to provision external resources that each
34 | concurrent worker process needs to have exclusive
35 | access to. Specify the function in dotted notation in
36 | a way that will be importable from the location you
37 | are running green from.
38 | -z DOTTED_FUNCTION, --finalizer DOTTED_FUNCTION
39 | Same as --initializer, only run at the end of a worker
40 | process's lifetime. Used to unprovision resources
41 | provisioned by the initializer.
42 | -X NUM, --maxtasksperchild NUM
43 | Passed directly as the `maxtasksperchild` argument to
44 | an internal `multiprocessing.pool.Pool`. A "task" in
45 | this context usually corresponds to a test suite
46 | consisting of all the tests in a single file. This
47 | option slows down testing, but has the benefit of
48 | guaranteeing a new Python process for each test suite.
49 |
50 | Format Options:
51 | -t, --termcolor Force terminal colors on. Default is to autodetect.
52 | -T, --notermcolor Force terminal colors off. Default is to autodetect.
53 | -W, --disable-windows
54 | Disable Windows support by turning off Colorama
55 |
56 | Output Options:
57 | -a, --allow-stdout Instead of capturing the stdout and stderr and
58 | presenting it in the summary of results, let it come
59 | through. Note that output from sources other than
60 | tests (like module/class setup or teardown) is never
61 | captured.
62 | -q, --quiet-stdout Instead of capturing the stdout and stderr and
63 | presenting it in the summary of results, discard it
64 | completly for successful tests. --allow-stdout option
65 | overrides it.
66 | -k, --no-skip-report Don't print the report of skipped tests after testing
67 | is done. Skips will still show up in the progress
68 | report and summary count.
69 | -e, --no-tracebacks Don't print tracebacks for failures and errors.
70 | -h, --help Show this help message and exit.
71 | -V, --version Print the version of Green and Python and exit.
72 | -l, --logging Don't configure the root logger to redirect to
73 | /dev/null, enabling internal debugging output, as well
74 | as any output test (or tested) code may be sending via
75 | the root logger.
76 | -d, --debug Enable internal debugging statements. Implies
77 | --logging. Can be specified up to three times for more
78 | debug output.
79 | -v, --verbose Verbose. Can be specified up to three times for more
80 | verbosity. Recommended levels are -v and -vv.
81 | -U, --disable-unidecode
82 | Disable unidecode which converts test output from
83 | unicode toascii by default on Windows to avoid hard-
84 | to-debug crashes.
85 |
86 | Other Options:
87 | -f, --failfast Stop execution at the first test that fails or errors.
88 | -c FILE, --config FILE
89 | Use this config file to override any values from the
90 | config file specified by environment variable
91 | GREEN_CONFIG, ~/.green, and .green in the current
92 | working directory.
93 | -p PATTERN, --file-pattern PATTERN
94 | Pattern to match test files. Default is test*.py
95 | -n PATTERN, --test-pattern PATTERN
96 | Pattern to match test method names after 'test'.
97 | Default is '*', meaning match methods named 'test*'.
98 | -j FILENAME, --junit-report FILENAME
99 | Generate a JUnit XML report.
100 |
101 | Coverage Options (Coverage 6.4.4):
102 | -r, --run-coverage Produce coverage output.
103 | -g FILE, --cov-config-file FILE
104 | Specify a coverage config file. Implies --run-coverage
105 | See the coverage documentation at
106 | https://coverage.readthedocs.io/en/v4.5.x/config.html
107 | for coverage config file syntax. The [run] and
108 | [report] sections are most relevant.
109 | -R, --quiet-coverage Do not print coverage report to stdout (coverage files
110 | will still be created). Implies --run-coverage
111 | -O, --clear-omit Green tries really hard to set up a good list of
112 | patterns of files to omit from coverage reports. If
113 | the default list catches files that you DO want to
114 | cover you can specify this flag to leave the default
115 | list empty to start with. You can then add patterns
116 | back in with --omit-patterns. The default list is
117 | something like '*/test*,*/mock*,*(temp dir)*,*(python
118 | system packages)*' -- only longer.
119 | -u PATTERN, --include-patterns PATTERN
120 | Comma-separated file-patterns to include in coverage.
121 | This implies that anything that does not match the
122 | include pattern is omitted from coverage reporting.
123 | -o PATTERN, --omit-patterns PATTERN
124 | Comma-separated file-patterns to omit from coverage.
125 | For example, if coverage reported a file
126 | mypackage/foo/bar you could omit it from coverage with
127 | 'mypackage*', '*/foo/*', or '*bar'
128 | -m X, --minimum-coverage X
129 | Integer. A minimum coverage value. If not met, then we
130 | will print a message and exit with a nonzero status.
131 | Implies --run-coverage
132 |
133 | Integration Options:
134 | --completion-file Location of the bash- and zsh-completion file. To
135 | enable bash- or zsh-completion, see ENABLING SHELL
136 | COMPLETION below.
137 | --completions Output possible completions of the given target. Used
138 | by bash- and zsh-completion.
139 | --options Output all options. Used by bash- and zsh-completion.
140 |
141 | ENABLING SHELL COMPLETION
142 |
143 | To enable bash- or zsh-completion, add the line below to the end of your
144 | .bashrc or .zshrc file (or equivalent config file):
145 |
146 | which green >& /dev/null && source "$( green --completion-file )"
147 |
148 | Warning! Generating a completion list actually discovers and loads tests
149 | -- this can be very slow if you run it in huge directories!
150 |
151 | SETUP.PY RUNNER
152 |
153 | To run green as a setup.py command, simply add green to the 'setup_requires'
154 | section in the setup.py file, and specify a target as the 'test_suite'
155 | parameter if you do not want green to load all the tests:
156 |
157 | setup(
158 | setup_requires = ['green'],
159 | install_requires = 'myproject.tests'
160 | )
161 |
162 | Then simply run green as any other setup.py command (it accepts the same
163 | parameters as the 'green' executable):
164 |
165 | python setup.py green
166 | python setup.py green -r # to run with coverage, etc.
167 |
168 | CONFIG FILES
169 |
170 | For documentation on config files, please see
171 | https://github.com/CleanCut/green#config-files
172 |
--------------------------------------------------------------------------------
/example/proj/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/example/proj/__init__.py
--------------------------------------------------------------------------------
/example/proj/foo.py:
--------------------------------------------------------------------------------
1 | def answer():
2 | """
3 | >>> answer()
4 | 42
5 | """
6 | return 42
7 |
8 |
9 | class School:
10 | def food(self):
11 | """
12 | >>> s = School()
13 | >>> s.food()
14 | 'awful'
15 | """
16 | return "awful"
17 |
18 | def age(self):
19 | return 300
20 |
--------------------------------------------------------------------------------
/example/proj/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/example/proj/test/__init__.py
--------------------------------------------------------------------------------
/example/proj/test/subpkg/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/example/proj/test/subpkg/__init__.py
--------------------------------------------------------------------------------
/example/proj/test/subpkg/bar.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/example/proj/test/subpkg/bar.py
--------------------------------------------------------------------------------
/example/proj/test/subpkg/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/example/proj/test/subpkg/test/__init__.py
--------------------------------------------------------------------------------
/example/proj/test/subpkg/test/test_bar.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/example/proj/test/subpkg/test/test_bar.py
--------------------------------------------------------------------------------
/example/proj/test/test_foo.py:
--------------------------------------------------------------------------------
1 | # Import stuff you need for the unit tests themselves to work
2 | import unittest
3 |
4 | # Import stuff that you want to test. Don't import extra stuff if you don't
5 | # have to.
6 | from proj.foo import answer, School
7 |
8 | # If you need the whole module, you can do this:
9 | # from proj import foo
10 | #
11 | # Here's another reasonable way to import the whole module:
12 | # import proj.foo as foo
13 | #
14 | # In either case, you would obviously need to access objects like this:
15 | # foo.answer()
16 | # foo.School()
17 |
18 | # Then write your tests
19 |
20 |
21 | class TestAnswer(unittest.TestCase):
22 | def test_type(self):
23 | "answer() returns an integer"
24 | self.assertEqual(type(answer()), int)
25 |
26 | def test_expected(self):
27 | "answer() returns 42"
28 | self.assertEqual(answer(), 42)
29 |
30 |
31 | class TestSchool(unittest.TestCase):
32 | def test_food(self):
33 | school = School()
34 | self.assertEqual(school.food(), "awful")
35 |
36 | def test_age(self):
37 | school = School()
38 | self.assertEqual(school.age(), 300)
39 |
40 |
41 | # If there are doctests you would like to run, add a `doctest_modules` list to
42 | # the top level of any of your test modules. Items in the list are modules to
43 | # discover doctests within. Each item in the list can be either the name of a
44 | # module as a dotted string or the actual module that has been imported. In
45 | # this case, we haven't actually imported proj.foo itself, so we use the string
46 | # form of "proj.foo", but if we had done `import proj.foo` then we could have
47 | # put the variable form proj.foo. The module form is preferred as it results
48 | # in both better performance and eliminates the chance that the discovery will
49 | # encounter an error searching for the module.
50 | doctest_modules = ["proj.foo"]
51 |
--------------------------------------------------------------------------------
/g:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # USAGE: ./g [python version] [green options]
4 |
5 | # If the first argument is a version number, use it to run that version of python
6 | PYTHON=python3
7 | if [ "$1" == "pypy" ]; then
8 | PYTHON=pypy
9 | shift
10 | elif [ "$1" == "pypy3" ]; then
11 | PYTHON=pypy3
12 | shift
13 | elif [[ -e $(which python$1) ]]; then
14 | PYTHON=python$1
15 | shift
16 | fi
17 |
18 | # Run the command-line version of green
19 | PYTHONPATH="." $PYTHON -m green.cmdline "$@"
20 |
--------------------------------------------------------------------------------
/green/VERSION:
--------------------------------------------------------------------------------
1 | 4.0.2
2 |
--------------------------------------------------------------------------------
/green/__init__.py:
--------------------------------------------------------------------------------
1 | from .version import __version__
2 |
3 | __version__
4 |
--------------------------------------------------------------------------------
/green/__main__.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from .cmdline import main
4 |
5 | sys.exit(main())
6 |
--------------------------------------------------------------------------------
/green/cmdline.py:
--------------------------------------------------------------------------------
1 | """The green command line entry point."""
2 |
3 | from __future__ import annotations
4 |
5 | import atexit
6 | import os
7 | import shutil
8 | import sys
9 | import tempfile
10 | from typing import Sequence
11 |
12 | # Importing from green (other than config) is done after coverage initialization
13 | import green.config as config
14 |
15 |
16 | def _main(argv: Sequence[str] | None, testing: bool) -> int:
17 | args = config.parseArguments(argv)
18 | args = config.mergeConfig(args, testing)
19 |
20 | if args.shouldExit:
21 | return args.exitCode
22 |
23 | # Clear out all the passed-in-options just in case someone tries to run a
24 | # test that assumes sys.argv is clean. I can't guess at the script name
25 | # that they want, though, so we'll just leave ours.
26 | sys.argv = sys.argv[:1]
27 |
28 | # Set up our various main objects
29 | from green.loader import GreenTestLoader, getCompletions
30 | from green.runner import run
31 | from green.output import GreenStream, debug
32 | import green.output
33 | from green.suite import GreenTestSuite
34 |
35 | GreenTestSuite.args = args
36 |
37 | if args.debug:
38 | green.output.debug_level = args.debug
39 |
40 | stream = GreenStream(sys.stdout, disable_windows=args.disable_windows)
41 |
42 | # Location of shell completion file
43 | if args.completion_file:
44 | print(os.path.join(os.path.dirname(__file__), "shell_completion.sh"))
45 | return 0
46 |
47 | # Argument-completion for bash and zsh (for test-target completion)
48 | if args.completions:
49 | print(getCompletions(args.targets))
50 | return 0
51 |
52 | # Option-completion for bash and zsh
53 | if args.options:
54 | print("\n".join(sorted(args.store_opt.options)))
55 | return 0
56 |
57 | # Add debug logging for stuff that happened before this point here
58 | if config.files_loaded:
59 | loaded_files = ", ".join(str(path) for path in config.files_loaded)
60 | debug(f"Loaded config file(s): {loaded_files}")
61 |
62 | # Discover/Load the test suite
63 | if testing:
64 | test_suite = None
65 | else: # pragma: no cover
66 | loader = GreenTestLoader()
67 | test_suite = loader.loadTargets(args.targets, file_pattern=args.file_pattern)
68 |
69 | # We didn't even load 0 tests...
70 | if not test_suite:
71 | debug("No test loading attempts succeeded. Created an empty test suite.")
72 | test_suite = GreenTestSuite()
73 |
74 | # Actually run the test_suite
75 | result = run(test_suite, stream, args, testing)
76 |
77 | # Generate a test report if required
78 | if args.junit_report:
79 | from green.junit import JUnitXML
80 |
81 | adapter = JUnitXML()
82 | with open(args.junit_report, "w") as report_file:
83 | adapter.save_as(result, report_file)
84 |
85 | return int(not result.wasSuccessful())
86 |
87 |
88 | def main(argv: Sequence[str] | None = None, testing: bool = False) -> int:
89 | # create the temp dir only once (i.e., not while in the recursed call)
90 | if not os.environ.get("TMPDIR"): # pragma: nocover
91 | # Use `atexit` to cleanup `temp_dir_for_tests` so that multiprocessing can run its
92 | # own cleanup before its temp directory is deleted.
93 | temp_dir_for_tests = tempfile.mkdtemp()
94 | atexit.register(lambda: shutil.rmtree(temp_dir_for_tests, ignore_errors=True))
95 | os.environ["TMPDIR"] = temp_dir_for_tests
96 | prev_tempdir = tempfile.tempdir
97 | tempfile.tempdir = temp_dir_for_tests
98 | try:
99 | return _main(argv, testing)
100 | finally:
101 | del os.environ["TMPDIR"]
102 | tempfile.tempdir = prev_tempdir
103 | else:
104 | return _main(argv, testing)
105 |
106 |
107 | if __name__ == "__main__": # pragma: no cover
108 | sys.exit(main())
109 |
--------------------------------------------------------------------------------
/green/command.py:
--------------------------------------------------------------------------------
1 | """Registers the green command with setuptools."""
2 |
3 | from __future__ import annotations
4 |
5 | import functools
6 | import sys
7 | from typing import TYPE_CHECKING
8 |
9 | from setuptools import Command
10 |
11 | from green.config import parseArguments
12 | from green.cmdline import main
13 |
14 | if TYPE_CHECKING:
15 | from argparse import Action
16 |
17 |
18 | def get_user_options() -> list[tuple[str, str | None, str | None]]:
19 | # When running "python setup.py --help-commands", setup.py will call this
20 | # function -- but green isn't actually being called.
21 | if "--help-commands" in sys.argv:
22 | return []
23 |
24 | args = parseArguments()
25 | options: list[tuple[str, str | None, str | None]] = []
26 |
27 | action: Action
28 | for action in args.store_opt.actions:
29 | names = [name.lstrip("-") for name in action.option_strings]
30 | short_name: str | None
31 | if len(names) == 1:
32 | full_name = names[0]
33 | short_name = None
34 | else:
35 | # TODO: We might want to pick the longer of the two for full_name.
36 | full_name = names[1]
37 | short_name = names[0]
38 | if not action.const:
39 | full_name += "="
40 | options.append((full_name, short_name, action.help))
41 |
42 | return options
43 |
44 |
45 | class green(Command):
46 | command_name = "green"
47 | description = "Run unit tests using green"
48 |
49 | @functools.cached_property
50 | def user_options(self) -> list[tuple[str, str | None, str | None]]:
51 | return get_user_options()
52 |
53 | def initialize_options(self) -> None:
54 | for name, _, _ in self.user_options:
55 | setattr(self, name.replace("-", "_").rstrip("="), None)
56 |
57 | def finalize_options(self) -> None:
58 | pass
59 |
60 | def run(self) -> None:
61 | self.ensure_finalized()
62 |
63 | if self.distribution.install_requires:
64 | self.distribution.fetch_build_eggs(self.distribution.install_requires)
65 |
66 | # TODO: Remove this once setuptools >= 72.0.0 is ubiquitous, since it no longer supports the
67 | # "test" subcommand
68 | if (
69 | hasattr(self.distribution, "tests_require")
70 | and self.distribution.tests_require
71 | ):
72 | self.distribution.fetch_build_eggs(self.distribution.tests_require)
73 |
74 | # TODO: Remove this once setuptools >= 72.0.0 is ubiquitous, since it no longer supports the
75 | # "test" subcommand
76 | script_args = self.distribution.script_args[1:]
77 | if (
78 | hasattr(self.distribution, "test_suite")
79 | and self.distribution.test_suite is not None
80 | ):
81 | script_args.append(self.distribution.test_suite)
82 |
83 | error_code = main(script_args)
84 | if error_code:
85 | sys.exit(error_code)
86 |
--------------------------------------------------------------------------------
/green/djangorunner.py:
--------------------------------------------------------------------------------
1 | """
2 | To try running Django tests using green you can run:
3 |
4 | ./manage.py test --testrunner=green.djangorunner.DjangoRunner
5 |
6 | To make the change permanent for your project, in settings.py add:
7 |
8 | TEST_RUNNER="green.djangorunner.DjangoRunner"
9 | """
10 |
11 | from __future__ import annotations
12 |
13 | from argparse import ArgumentParser, Namespace
14 | import pathlib
15 | import os
16 | import sys
17 | from typing import Any, Final, Sequence
18 |
19 | from green.config import mergeConfig
20 | from green.loader import GreenTestLoader
21 | from green.output import GreenStream
22 | from green.runner import run
23 | from green.suite import GreenTestSuite
24 |
25 | # If we're not being run from an actual django project, set up django config
26 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "green.djangorunner")
27 | BASE_DIR = pathlib.Path(__file__).absolute().parent.parent
28 | SECRET_KEY: Final[str] = ")9^_e(=cisybdt4m4+fs+_wb%d$!9mpcoy0um^alvx%gexj#jv"
29 | DEBUG: bool = True
30 | TEMPLATE_DEBUG: bool = True
31 | ALLOWED_HOSTS: Sequence[str] = []
32 | INSTALLED_APPS: Final[Sequence[str]] = (
33 | "django.contrib.admin",
34 | "django.contrib.auth",
35 | "django.contrib.contenttypes",
36 | "django.contrib.sessions",
37 | "django.contrib.messages",
38 | "django.contrib.staticfiles",
39 | "myapp",
40 | )
41 | MIDDLEWARE_CLASSES: Final[Sequence[str]] = (
42 | "django.contrib.sessions.middleware.SessionMiddleware",
43 | "django.middleware.common.CommonMiddleware",
44 | "django.middleware.csrf.CsrfViewMiddleware",
45 | "django.contrib.auth.middleware.AuthenticationMiddleware",
46 | "django.contrib.auth.middleware.SessionAuthenticationMiddleware",
47 | "django.contrib.messages.middleware.MessageMiddleware",
48 | "django.middleware.clickjacking.XFrameOptionsMiddleware",
49 | )
50 | ROOT_URLCONF: Final[str] = "myproj.urls"
51 | WSGI_APPLICATION: Final[str] = "myproj.wsgi.application"
52 | DATABASES: Final[dict[str, dict[str, str]]] = {
53 | "default": {
54 | "ENGINE": "django.db.backends.sqlite3",
55 | "NAME": str(BASE_DIR / "db.sqlite3"),
56 | }
57 | }
58 | LANGUAGE_CODE: Final[str] = "en-us"
59 | TIME_ZONE: Final[str] = "UTC"
60 | USE_I18N: bool = True
61 | USE_L10N: bool = True
62 | USE_TZ: bool = True
63 | STATIC_URL: Final[str] = "/static/"
64 | # End of django fake config stuff
65 |
66 |
67 | def django_missing() -> None:
68 | raise ImportError("No django module installed")
69 |
70 |
71 | try:
72 | import django
73 |
74 | if django.VERSION[:2] < (1, 6): # pragma: no cover
75 | raise ImportError("Green integration supports Django 1.6+")
76 | from django.test.runner import DiscoverRunner
77 |
78 | class DjangoRunner(DiscoverRunner):
79 | def __init__(self, verbose: int = -1, **kwargs: Any):
80 | super().__init__(**kwargs)
81 | self.verbose = verbose
82 | self.loader = GreenTestLoader()
83 |
84 | @classmethod
85 | def add_arguments(cls, parser: ArgumentParser) -> None:
86 | parser.add_argument(
87 | "--green-verbosity",
88 | action="store",
89 | dest="verbose",
90 | default=-1,
91 | type=int,
92 | help="""
93 | Green 'verbose' level for tests. Value should be an integer
94 | that green supports. For example: --green-verbosity 3""",
95 | )
96 | super().add_arguments(parser)
97 |
98 | # FIXME: extra_tests is not used, we should either use it or update the
99 | # documentation accordingly.
100 | def run_tests(
101 | self,
102 | test_labels: list[str] | tuple[str, ...],
103 | extra_tests: Any = None,
104 | **kwargs: Any,
105 | ):
106 | """
107 | Run the unit tests for all the test labels in the provided list.
108 |
109 | Test labels should be dotted Python paths to test modules, test
110 | classes, or test methods.
111 |
112 | A list of 'extra' tests may also be provided; these tests
113 | will be added to the test suite.
114 |
115 | Returns the number of tests that failed.
116 | """
117 | # Django setup
118 | self.setup_test_environment()
119 | django_db = self.setup_databases()
120 |
121 | # Green
122 | if isinstance(test_labels, tuple):
123 | test_labels = list(test_labels)
124 | else:
125 | raise ValueError("test_labels should be a tuple of strings")
126 | if not test_labels:
127 | test_labels = ["."]
128 |
129 | args = mergeConfig(Namespace())
130 | if self.verbose != -1:
131 | args.verbose = self.verbose
132 | args.targets = test_labels
133 | stream = GreenStream(sys.stdout)
134 | suite = self.loader.loadTargets(args.targets)
135 | if not suite:
136 | suite = GreenTestSuite()
137 | result = run(suite, stream, args)
138 |
139 | # Django teardown
140 | self.teardown_databases(django_db)
141 | self.teardown_test_environment()
142 | return self.suite_result(suite, result)
143 |
144 | except ImportError: # pragma: no cover
145 | DjangoRunner = django_missing # type: ignore
146 |
--------------------------------------------------------------------------------
/green/examples.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | import unittest
5 | from typing import Final
6 |
7 | doctest_modules: Final[list[str]] = ["green.examples"]
8 |
9 |
10 | class TestStates(unittest.TestCase):
11 | def test0Pass(self) -> None:
12 | """
13 | This test will print output to stdout, and then pass.
14 | """
15 | print("Sunshine and daisies")
16 |
17 | def test1Fail(self) -> None:
18 | """
19 | This test will print output to stderr, and then fail an assertion.
20 | """
21 | sys.stderr.write("Doom and gloom.\n")
22 | self.assertTrue(False)
23 |
24 | def test2Error(self) -> None:
25 | """
26 | An Exception will be raised (and not caught) while running this test.
27 | """
28 | raise Exception
29 |
30 | @unittest.skip("This is the 'reason' portion of the skipped test.")
31 | def test3Skip(self) -> None:
32 | """
33 | This test will be skipped.
34 | """
35 | pass
36 |
37 | @unittest.expectedFailure
38 | def test4ExpectedFailure(self) -> None:
39 | """
40 | This test will fail, but we expect it to.
41 | """
42 | self.assertEqual(True, False)
43 |
44 | @unittest.expectedFailure
45 | def test5UnexpectedPass(self) -> None:
46 | """
47 | This test will pass, but we expected it to fail!
48 | """
49 | pass
50 |
51 |
52 | def some_function() -> int:
53 | """
54 | This will fail because some_function() does not, in fact, return 100.
55 | >>> some_function()
56 | 100
57 | """
58 | return 99
59 |
60 |
61 | class MyClass:
62 | def my_method(self) -> str:
63 | """
64 | This will pass.
65 | >>> s = MyClass()
66 | >>> s.my_method()
67 | 'happy'
68 | """
69 | return "happy"
70 |
--------------------------------------------------------------------------------
/green/exceptions.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 |
4 | class InitializerOrFinalizerError(Exception):
5 | pass
6 |
--------------------------------------------------------------------------------
/green/junit.py:
--------------------------------------------------------------------------------
1 | """Classes and methods to generate JUnit XML reports."""
2 |
3 | from __future__ import annotations
4 |
5 | from typing import Dict, List, Final, TextIO, Tuple, TYPE_CHECKING, Union
6 |
7 | from lxml.etree import Element, tostring as to_xml
8 |
9 | if TYPE_CHECKING:
10 | # TypeAlias moved to the typing module after py3.9.
11 | from typing_extensions import TypeAlias
12 |
13 | from green.result import GreenTestResult, ProtoTest, ProtoError
14 | from lxml.etree import _Element
15 |
16 | # TODO: use NamedTuple for TestVerdict.
17 | TestVerdict: TypeAlias = Union[
18 | Tuple[int, ProtoTest], Tuple[int, ProtoTest, Union[str, ProtoError]]
19 | ]
20 | TestsCollection: TypeAlias = Dict[str, List[TestVerdict]]
21 |
22 |
23 | # TODO: consider using enum.Enum (new in py3.4) for the JUnitDialect and Verdict classes.
24 |
25 |
26 | class JUnitDialect:
27 | """
28 | Hold the name of the elements defined in the JUnit XML schema (for JUnit 4).
29 | """
30 |
31 | CLASS_NAME: Final[str] = "classname"
32 | ERROR: Final[str] = "error"
33 | ERROR_COUNT: Final[str] = "errors"
34 | FAILURE: Final[str] = "failure"
35 | FAILURE_COUNT: Final[str] = "failures"
36 | NAME: Final[str] = "name"
37 | SKIPPED: Final[str] = "skipped"
38 | SKIPPED_COUNT: Final[str] = "skipped"
39 | SYSTEM_ERR: Final[str] = "system-err"
40 | SYSTEM_OUT: Final[str] = "system-out"
41 | TEST_CASE: Final[str] = "testcase"
42 | TEST_COUNT: Final[str] = "tests"
43 | TEST_SUITE: Final[str] = "testsuite"
44 | TEST_SUITES: Final[str] = "testsuites"
45 | TEST_TIME: Final[str] = "time"
46 |
47 |
48 | class Verdict:
49 | """
50 | Enumeration of possible test verdicts
51 | """
52 |
53 | PASSED: Final[int] = 0
54 | FAILED: Final[int] = 1
55 | ERROR: Final[int] = 2
56 | SKIPPED: Final[int] = 3
57 |
58 |
59 | class JUnitXML:
60 | """
61 | Serialize a GreenTestResult object into a JUnit XML file, that can
62 | be read by continuous integration servers, for example.
63 |
64 | See GitHub Issue #104
65 | See Option '-j' / '--junit-report'
66 | """
67 |
68 | def save_as(self, test_results: GreenTestResult, destination: TextIO) -> None:
69 | """
70 | Write the JUnit XML report to the given file-like object.
71 | """
72 | xml_root = Element(JUnitDialect.TEST_SUITES)
73 | tests_by_class = self._group_tests_by_class(test_results)
74 | suite: list[TestVerdict]
75 | for name, suite in tests_by_class.items():
76 | xml_suite = self._convert_suite(test_results, name, suite)
77 | xml_root.append(xml_suite)
78 |
79 | xml_root.set(JUnitDialect.TEST_TIME, str(test_results.timeTaken))
80 |
81 | xml = to_xml(
82 | xml_root,
83 | xml_declaration=True,
84 | pretty_print=True,
85 | encoding="utf-8",
86 | method="xml",
87 | )
88 | destination.write(xml.decode())
89 |
90 | def _group_tests_by_class(
91 | self, test_results: GreenTestResult
92 | ) -> dict[str, list[TestVerdict]]:
93 | result: TestsCollection = {}
94 | self._add_passing_tests(result, test_results)
95 | self._add_failures(result, test_results)
96 | self._add_errors(result, test_results)
97 | self._add_skipped_tests(result, test_results)
98 | return result
99 |
100 | @staticmethod
101 | def _add_passing_tests(
102 | collection: TestsCollection, test_results: GreenTestResult
103 | ) -> None:
104 | for each_test in test_results.passing:
105 | key = JUnitXML._suite_name(each_test)
106 | if key not in collection:
107 | collection[key] = []
108 | collection[key].append((Verdict.PASSED, each_test))
109 |
110 | @staticmethod
111 | def _suite_name(test) -> str:
112 | return f"{test.module}.{test.class_name}"
113 |
114 | @staticmethod
115 | def _add_failures(
116 | collection: TestsCollection, test_results: GreenTestResult
117 | ) -> None:
118 | for each_test, failure in test_results.failures:
119 | key = JUnitXML._suite_name(each_test)
120 | if key not in collection:
121 | collection[key] = []
122 | collection[key].append((Verdict.FAILED, each_test, failure))
123 |
124 | @staticmethod
125 | def _add_errors(collection: TestsCollection, test_results: GreenTestResult):
126 | for each_test, error in test_results.errors:
127 | key = JUnitXML._suite_name(each_test)
128 | if key not in collection:
129 | collection[key] = []
130 | collection[key].append((Verdict.ERROR, each_test, error))
131 |
132 | @staticmethod
133 | def _add_skipped_tests(
134 | collection: TestsCollection, test_results: GreenTestResult
135 | ) -> None:
136 | for each_test, reason in test_results.skipped:
137 | key = JUnitXML._suite_name(each_test)
138 | if key not in collection:
139 | collection[key] = []
140 | collection[key].append((Verdict.SKIPPED, each_test, reason))
141 |
142 | def _convert_suite(
143 | self, results: GreenTestResult, name: str, suite: list[TestVerdict]
144 | ) -> _Element:
145 | xml_suite = Element(JUnitDialect.TEST_SUITE)
146 | xml_suite.set(JUnitDialect.NAME, name)
147 | xml_suite.set(JUnitDialect.TEST_COUNT, str(len(suite)))
148 | xml_suite.set(
149 | JUnitDialect.FAILURE_COUNT,
150 | str(self._count_test_with_verdict(Verdict.FAILED, suite)),
151 | )
152 | xml_suite.set(
153 | JUnitDialect.ERROR_COUNT,
154 | str(self._count_test_with_verdict(Verdict.ERROR, suite)),
155 | )
156 | xml_suite.set(
157 | JUnitDialect.SKIPPED_COUNT,
158 | str(self._count_test_with_verdict(Verdict.SKIPPED, suite)),
159 | )
160 | xml_suite.set(JUnitDialect.TEST_TIME, str(self._suite_time(suite)))
161 | for each_test in suite:
162 | xml_test = self._convert_test(results, *each_test)
163 | xml_suite.append(xml_test)
164 |
165 | return xml_suite
166 |
167 | @staticmethod
168 | def _count_test_with_verdict(verdict: int, suite: list[TestVerdict]) -> int:
169 | return sum(1 for entry in suite if entry[0] == verdict)
170 |
171 | def _convert_test(
172 | self,
173 | results: GreenTestResult,
174 | verdict: int,
175 | test: ProtoTest,
176 | *details: str | ProtoError,
177 | ) -> _Element:
178 | xml_test = Element(JUnitDialect.TEST_CASE)
179 | xml_test.set(JUnitDialect.NAME, test.method_name)
180 | xml_test.set(JUnitDialect.CLASS_NAME, test.class_name)
181 | xml_test.set(JUnitDialect.TEST_TIME, test.test_time)
182 |
183 | error: str | ProtoError | None = details[0] if details else None
184 | xml_verdict = self._convert_verdict(verdict, test, error)
185 | if xml_verdict is not None:
186 | xml_test.append(xml_verdict)
187 |
188 | if test in results.stdout_output:
189 | system_out = Element(JUnitDialect.SYSTEM_OUT)
190 | system_out.text = results.stdout_output[test]
191 | xml_test.append(system_out)
192 |
193 | if test in results.stderr_errput:
194 | system_err = Element(JUnitDialect.SYSTEM_ERR)
195 | system_err.text = results.stderr_errput[test]
196 | xml_test.append(system_err)
197 |
198 | return xml_test
199 |
200 | # FIXME: test is not used.
201 | def _convert_verdict(
202 | self, verdict: int, test: ProtoTest, error_details: str | ProtoError | None
203 | ) -> _Element | None:
204 | message = str(error_details) if error_details else ""
205 | if verdict == Verdict.FAILED:
206 | failure = Element(JUnitDialect.FAILURE)
207 | failure.text = message
208 | return failure
209 | if verdict == Verdict.ERROR:
210 | error = Element(JUnitDialect.ERROR)
211 | error.text = message
212 | return error
213 | if verdict == Verdict.SKIPPED:
214 | skipped = Element(JUnitDialect.SKIPPED)
215 | skipped.text = message
216 | return skipped
217 | return None
218 |
219 | @staticmethod
220 | def _suite_time(suite: list[TestVerdict]) -> float:
221 | return sum(float(each_test.test_time) for verdict, each_test, *details in suite)
222 |
--------------------------------------------------------------------------------
/green/output.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Iterable, TextIO, Type, TYPE_CHECKING
4 |
5 | from colorama import Fore, Style
6 | from colorama.ansi import Cursor
7 | from colorama.initialise import wrap_stream
8 | import logging
9 | import os
10 | import platform
11 | import re
12 | import sys
13 | from unidecode import unidecode
14 |
15 | if TYPE_CHECKING:
16 | from colorama.ansitowin32 import StreamWrapper
17 |
18 | global debug_level
19 | debug_level = 0
20 |
21 | text_type: Type[str] = str
22 | unicode = None # so pyflakes stops complaining
23 |
24 |
25 | def debug(message: str, level: int = 1) -> None:
26 | """
27 | So we can tune how much debug output we get when we turn it on.
28 | """
29 | if level <= debug_level:
30 | logging.debug(" " * (level - 1) * 2 + str(message))
31 |
32 |
33 | class Colors:
34 | """
35 | A class to centralize wrapping strings in terminal colors.
36 | """
37 |
38 | def __init__(self, termcolor: bool | None = None) -> None:
39 | """Initialize the Colors object.
40 |
41 | Args:
42 | termcolor: If None, attempt to autodetect whether we are in a
43 | terminal and turn on terminal colors if we think we are.
44 | If True, force terminal colors on.
45 | If False, force terminal colors off.
46 | """
47 | self.termcolor = sys.stdout.isatty() if termcolor is None else termcolor
48 |
49 | def wrap(self, text: str, style: str) -> str:
50 | if self.termcolor:
51 | return f"{style}{text}{Style.RESET_ALL}"
52 | return text
53 |
54 | # Movement
55 | def start_of_line(self) -> str:
56 | return "\r"
57 |
58 | def up(self, lines: int = 1) -> str:
59 | return Cursor.UP(lines)
60 |
61 | # Real colors and styles
62 | def bold(self, text: str) -> str:
63 | return self.wrap(text, Style.BRIGHT)
64 |
65 | def blue(self, text: str) -> str:
66 | if platform.system() == "Windows": # pragma: no cover
67 | # Default blue in windows is unreadable (such awful defaults...)
68 | return self.wrap(text, Fore.CYAN)
69 | else:
70 | return self.wrap(text, Fore.BLUE)
71 |
72 | def green(self, text: str) -> str:
73 | return self.wrap(text, Fore.GREEN)
74 |
75 | def red(self, text: str) -> str:
76 | return self.wrap(text, Fore.RED)
77 |
78 | def yellow(self, text: str) -> str:
79 | return self.wrap(text, Fore.YELLOW)
80 |
81 | # Abstracted colors and styles
82 | def passing(self, text: str) -> str:
83 | return self.green(text)
84 |
85 | def failing(self, text: str) -> str:
86 | return self.red(text)
87 |
88 | def error(self, text: str) -> str:
89 | return self.red(text)
90 |
91 | def skipped(self, text: str) -> str:
92 | return self.blue(text)
93 |
94 | def unexpectedSuccess(self, text: str) -> str:
95 | return self.yellow(text)
96 |
97 | def expectedFailure(self, text: str) -> str:
98 | return self.yellow(text)
99 |
100 | def moduleName(self, text: str) -> str:
101 | return self.bold(text)
102 |
103 | def className(self, text: str) -> str:
104 | return text
105 |
106 |
107 | class GreenStream:
108 | """
109 | Wraps a stream-like object with the following additional features:
110 |
111 | 1) A handy writeln() method (which calls write() under-the-hood)
112 | 2) Handy formatLine() and formatText() methods, which support indent
113 | levels, and outcome codes.
114 | 3) Compatibility with real file objects (by implementing real file object
115 | methods as we discover people need them). So far we have implemented the
116 | following functions just for compatibility:
117 | writelines(lines)
118 | """
119 |
120 | indent_spaces: int = 2
121 | _ascii_only_output: bool = False # default to printing output in unicode
122 | coverage_pattern = re.compile(r"TOTAL\s+\d+\s+\d+\s+(?P\d+)%")
123 |
124 | def __init__(
125 | self,
126 | stream: TextIO,
127 | override_appveyor: bool = False,
128 | disable_windows: bool = False,
129 | disable_unidecode: bool = False,
130 | ) -> None:
131 | self.disable_unidecode = disable_unidecode
132 | self.stream: TextIO | StreamWrapper = stream
133 | # Ironically, Windows CI platforms such as GitHub Actions and AppVeyor don't support windows
134 | # win32 system calls for colors, but it WILL interpret posix ansi escape codes! (The
135 | # opposite of an actual windows command prompt)
136 | on_windows = platform.system() == "Windows"
137 | on_windows_ci = os.environ.get("GITHUB_ACTIONS", False) or os.environ.get(
138 | "APPVEYOR", False
139 | )
140 |
141 | if override_appveyor or (
142 | (on_windows and not on_windows_ci) and not disable_windows
143 | ): # pragma: no cover
144 | self.stream = wrap_stream(stream, None, None, False, True)
145 | # set output is ascii-only
146 | self._ascii_only_output = True
147 | self.closed = False
148 | # z3 likes to look at sys.stdout.encoding
149 | try:
150 | self.encoding = stream.encoding
151 | except:
152 | self.encoding = "UTF-8"
153 | # Susceptible to false-positives if other matching lines are output,
154 | # so set this to None immediately before running a coverage report to
155 | # guarantee accuracy.
156 | self.coverage_percent: int | None = None
157 |
158 | def flush(self) -> None:
159 | self.stream.flush()
160 |
161 | def writeln(self, text: str = "") -> None:
162 | self.write(text + "\n")
163 |
164 | def write(self, text: str) -> None:
165 | if isinstance(text, bytes):
166 | text = text.decode("utf-8")
167 | # Compensate for windows' anti-social unicode behavior
168 | if self._ascii_only_output and not self.disable_unidecode:
169 | # Windows doesn't actually want unicode, so we get
170 | # the closest ASCII equivalent
171 | text = text_type(unidecode(text))
172 | # Since coverage doesn't like us switching out its stream to run extra
173 | # reports to look for percent covered. We should replace this with
174 | # grabbing the percentage directly from coverage if we can figure out
175 | # how.
176 | match = self.coverage_pattern.search(text)
177 | if match:
178 | percent_str = match.groupdict().get("percent")
179 | if percent_str:
180 | self.coverage_percent = int(percent_str)
181 | self.stream.write(text)
182 |
183 | def writelines(self, lines: Iterable[str]) -> None:
184 | """
185 | Just for better compatibility with real file objects
186 | """
187 | for line in lines:
188 | self.write(line)
189 |
190 | def formatText(self, text: str, indent: int = 0, outcome_char: str = "") -> str:
191 | # We'll go through each line in the text, modify it, and store it in a
192 | # new list
193 | updated_lines = []
194 | for line in text.split("\n"):
195 | # We only need to format the line if there's something visible on
196 | # it.
197 | if line.strip(" "):
198 | updated_lines.append(self.formatLine(line, indent, outcome_char))
199 | else:
200 | updated_lines.append("")
201 | outcome_char = "" # only the first line gets an outcome character
202 | # Join the list back together
203 | output = "\n".join(updated_lines)
204 | return output
205 |
206 | def formatLine(self, line: str, indent: int = 0, outcome_char: str = "") -> str:
207 | """
208 | Takes a single line, optionally adds an indent and/or outcome
209 | character to the beginning of the line.
210 | """
211 | actual_spaces = (indent * self.indent_spaces) - len(outcome_char)
212 | return outcome_char + " " * actual_spaces + line
213 |
214 | def isatty(self) -> bool:
215 | """
216 | Wrap internal self.stream.isatty.
217 | """
218 | return self.stream.isatty()
219 |
--------------------------------------------------------------------------------
/green/process.py:
--------------------------------------------------------------------------------
1 | """
2 | Handle running unittests suites in parallel.
3 | """
4 |
5 | from __future__ import annotations
6 |
7 | import logging
8 | import multiprocessing
9 | import multiprocessing.pool
10 | from multiprocessing.pool import MaybeEncodingError # type: ignore
11 | from multiprocessing.pool import Pool
12 | from multiprocessing import util # type: ignore
13 |
14 | import os
15 | import random
16 | import sys
17 | import tempfile
18 | import traceback
19 | from typing import (
20 | Type,
21 | TYPE_CHECKING,
22 | Union,
23 | Tuple,
24 | Callable,
25 | Iterable,
26 | Mapping,
27 | Any,
28 | TypeVar,
29 | )
30 |
31 | import coverage
32 |
33 | from green.exceptions import InitializerOrFinalizerError
34 | from green.loader import GreenTestLoader
35 | from green.result import proto_test, ProtoTest, ProtoTestResult
36 |
37 | if TYPE_CHECKING:
38 | from types import TracebackType
39 | from queue import Queue
40 |
41 | from multiprocessing.context import SpawnContext, SpawnProcess
42 | from multiprocessing.pool import ApplyResult
43 | from multiprocessing.queues import SimpleQueue
44 |
45 | from green.suite import GreenTestSuite
46 | from green.runner import InitializerOrFinalizer
47 | from green.result import RunnableTestT
48 |
49 | ExcInfoType = Union[
50 | Tuple[Type[BaseException], BaseException, TracebackType],
51 | Tuple[None, None, None],
52 | ]
53 | _T = TypeVar("_T")
54 |
55 |
56 | # Super-useful debug function for finding problems in the subprocesses, and it
57 | # even works on windows
58 | def ddebug(msg: str, err: ExcInfoType | None = None) -> None: # pragma: no cover
59 | """
60 | err can be an instance of sys.exc_info() -- which is the latest traceback
61 | info
62 | """
63 | if err:
64 | error_string = "".join(traceback.format_exception(*err))
65 | else:
66 | error_string = ""
67 | sys.__stdout__.write(f"({os.getpid()}) {msg} {error_string}\n") # type: ignore
68 | sys.__stdout__.flush() # type: ignore
69 |
70 |
71 | class ProcessLogger:
72 | """
73 | I am used by LoggingDaemonlessPool to get crash output out to the logger,
74 | instead of having process crashes be silent.
75 | """
76 |
77 | def __init__(self, callable: Callable) -> None:
78 | self.__callable = callable
79 |
80 | def __call__(self, *args, **kwargs) -> Any:
81 | try:
82 | return self.__callable(*args, **kwargs)
83 | except Exception:
84 | # Here we add some debugging help. If multiprocessing's
85 | # debugging is on, it will arrange to log the traceback
86 | logger = multiprocessing.get_logger()
87 | if not logger.handlers:
88 | logger.addHandler(logging.StreamHandler())
89 | logger.error(traceback.format_exc())
90 | logger.handlers[0].flush()
91 | # Re-raise the original exception so the Pool worker can
92 | # clean up
93 | raise
94 |
95 |
96 | class LoggingDaemonlessPool(Pool):
97 | """
98 | I make a pool of workers which can get crash output to the logger, run processes not as daemons,
99 | and which run finalizers.
100 | """
101 |
102 | _wrap_exception: bool = True
103 |
104 | @staticmethod
105 | def Process(ctx: SpawnContext, *args: Any, **kwargs: Any) -> SpawnProcess:
106 | return ctx.Process(daemon=False, *args, **kwargs)
107 |
108 | def apply_async(
109 | self,
110 | func: Callable[[Any, Any], _T], # should be the poolRunner method.
111 | args: Iterable = (),
112 | kwargs: Mapping[str, Any] | None = None,
113 | callback: Callable[[_T], Any] | None = None,
114 | error_callback: Callable[[BaseException], Any] | None = None,
115 | ) -> ApplyResult[_T]:
116 | if kwargs is None:
117 | kwargs = {}
118 | return super().apply_async(
119 | ProcessLogger(func), args, kwargs, callback, error_callback
120 | )
121 |
122 | def __init__(
123 | self,
124 | processes: int | None = None,
125 | initializer: Callable | None = None,
126 | initargs: Iterable[Any] = (),
127 | maxtasksperchild: int | None = None,
128 | context: Any | None = None,
129 | # Green specific:
130 | finalizer: Callable | None = None,
131 | finalargs: Iterable[Any] = (),
132 | ):
133 | self._finalizer = finalizer
134 | self._finalargs = finalargs
135 | super().__init__(processes, initializer, initargs, maxtasksperchild, context)
136 |
137 | def _repopulate_pool(self):
138 | return self._repopulate_pool_static(
139 | self._ctx,
140 | self.Process,
141 | self._processes,
142 | self._pool,
143 | self._inqueue,
144 | self._outqueue,
145 | self._initializer,
146 | self._initargs,
147 | self._maxtasksperchild,
148 | self._wrap_exception,
149 | self._finalizer,
150 | self._finalargs,
151 | )
152 |
153 | @staticmethod
154 | def _repopulate_pool_static(
155 | ctx: SpawnContext,
156 | Process: Callable, # LoggingDaemonlessPool.Process
157 | processes: int,
158 | pool: list[Callable], # list of LoggingDaemonlessPool.Process
159 | inqueue: SimpleQueue,
160 | outqueue: SimpleQueue,
161 | initializer: InitializerOrFinalizer,
162 | initargs: tuple,
163 | maxtasksperchild: int | None,
164 | wrap_exception: bool,
165 | finalizer: InitializerOrFinalizer,
166 | finalargs: tuple,
167 | ) -> None:
168 | """
169 | Bring the number of pool processes up to the specified number,
170 | for use after reaping workers which have exited.
171 | """
172 | for i in range(processes - len(pool)):
173 | w = Process(
174 | ctx,
175 | target=worker,
176 | args=(
177 | inqueue,
178 | outqueue,
179 | initializer,
180 | initargs,
181 | maxtasksperchild,
182 | wrap_exception,
183 | finalizer,
184 | finalargs,
185 | ),
186 | )
187 | w.name = w.name.replace("Process", "PoolWorker")
188 | w.start()
189 | pool.append(w)
190 | util.debug("added worker")
191 |
192 |
193 | def worker(
194 | inqueue: SimpleQueue,
195 | outqueue: SimpleQueue,
196 | initializer: InitializerOrFinalizer | None = None,
197 | initargs: tuple = (),
198 | maxtasks: int | None = None,
199 | wrap_exception: bool = False,
200 | finalizer: Callable | None = None,
201 | finalargs: tuple = (),
202 | ): # pragma: no cover
203 | # TODO: revisit this assert; these statements are skipped by the python
204 | # compiler in optimized mode.
205 | assert maxtasks is None or (isinstance(maxtasks, int) and maxtasks > 0)
206 | put = outqueue.put
207 | get = inqueue.get
208 |
209 | writer = getattr(inqueue, "_writer", None)
210 | if writer is not None:
211 | writer.close()
212 | reader = getattr(outqueue, "_reader", None)
213 | if reader is not None:
214 | reader.close()
215 |
216 | if initializer is not None:
217 | try:
218 | initializer(*initargs)
219 | except InitializerOrFinalizerError as e:
220 | print(str(e))
221 |
222 | completed = 0
223 | while maxtasks is None or (maxtasks and completed < maxtasks):
224 | try:
225 | task = get()
226 | except (EOFError, OSError):
227 | util.debug("worker got EOFError or OSError -- exiting")
228 | break
229 |
230 | if task is None:
231 | util.debug("worker got sentinel -- exiting")
232 | break
233 |
234 | job, i, func, args, kwds = task
235 | try:
236 | result = (True, func(*args, **kwds))
237 | except Exception as result_error:
238 | if wrap_exception:
239 | result_error = ExceptionWithTraceback(
240 | result_error, result_error.__traceback__
241 | )
242 | result = (False, result_error)
243 | try:
244 | put((job, i, result))
245 | except Exception as e:
246 | wrapped = MaybeEncodingError(e, result[1])
247 | util.debug("Possible encoding error while sending result: %s" % (wrapped))
248 | put((job, i, (False, wrapped)))
249 | completed += 1
250 |
251 | if finalizer:
252 | try:
253 | finalizer(*finalargs)
254 | except InitializerOrFinalizerError as e:
255 | print(str(e))
256 |
257 | util.debug("worker exiting after %d tasks" % completed)
258 |
259 |
260 | # Unmodified (see above)
261 | class RemoteTraceback(Exception): # pragma: no cover
262 | def __init__(self, tb: str):
263 | self.tb = tb
264 |
265 | def __str__(self) -> str:
266 | return self.tb
267 |
268 |
269 | # Unmodified (see above)
270 | class ExceptionWithTraceback(Exception): # pragma: no cover
271 | def __init__(self, exc: BaseException, tb: TracebackType | None):
272 | tb_lines = traceback.format_exception(type(exc), exc, tb)
273 | tb_text = "".join(tb_lines)
274 | self.exc = exc
275 | self.tb = '\n"""\n%s"""' % tb_text
276 |
277 | def __reduce__(self) -> Tuple[Callable, Tuple[BaseException, str]]:
278 | return rebuild_exc, (self.exc, self.tb)
279 |
280 |
281 | # Unmodified (see above)
282 | def rebuild_exc(exc: BaseException, tb: str): # pragma: no cover
283 | exc.__cause__ = RemoteTraceback(tb)
284 | return exc
285 |
286 |
287 | multiprocessing.pool.worker = worker # type: ignore
288 | # END of Worker Finalization Monkey Patching
289 | # -----------------------------------------------------------------------------
290 |
291 |
292 | def poolRunner(
293 | target: str,
294 | queue: Queue,
295 | coverage_number: int | None = None,
296 | omit_patterns: str | Iterable[str] | None = None,
297 | cov_config_file: bool = True,
298 | ) -> None: # pragma: no cover
299 | """
300 | I am the function that pool worker processes run. I run one unit test.
301 |
302 | coverage_config_file is a special option that is either a string specifying
303 | the custom coverage config file or the special default value True (which
304 | causes coverage to search for it's standard config files).
305 | """
306 | # Each pool worker gets his own temp directory, to avoid having tests that
307 | # are used to taking turns using the same temp file name from interfering
308 | # with eachother. So long as the test doesn't use a hard-coded temp
309 | # directory, anyway.
310 | saved_tempdir = tempfile.tempdir
311 | tempfile.tempdir = tempfile.mkdtemp()
312 |
313 | def raise_internal_failure(msg: str) -> None:
314 | err = sys.exc_info()
315 | t = ProtoTest()
316 | t.module = "green.loader"
317 | t.class_name = "N/A"
318 | t.description = msg
319 | t.method_name = "poolRunner"
320 | result.startTest(t)
321 | result.addError(t, err)
322 | result.stopTest(t)
323 | queue.put(result)
324 | cleanup()
325 |
326 | def cleanup() -> None:
327 | # Restore the state of the temp directory
328 | tempfile.tempdir = saved_tempdir
329 | queue.put(None)
330 | # Finish coverage
331 | if coverage_number:
332 | cov.stop()
333 | cov.save()
334 |
335 | # Each pool starts its own coverage, later combined by the main process.
336 | if coverage_number:
337 | cov = coverage.coverage(
338 | data_file=".coverage.{}_{}".format(
339 | coverage_number, random.randint(0, 10000)
340 | ),
341 | omit=omit_patterns,
342 | config_file=cov_config_file,
343 | )
344 | cov._warn_no_data = False
345 | cov.start()
346 |
347 | # What to do each time an individual test is started
348 | already_sent = set()
349 |
350 | def start_callback(test: RunnableTestT) -> None:
351 | # Let the main process know what test we are starting
352 | test_proto = proto_test(test)
353 | if test_proto not in already_sent:
354 | queue.put(test_proto)
355 | already_sent.add(test_proto)
356 |
357 | def finalize_callback(test_result: ProtoTestResult) -> None:
358 | # Let the main process know what happened with the test run
359 | queue.put(test_result)
360 |
361 | result = ProtoTestResult(start_callback, finalize_callback)
362 | test: GreenTestSuite | None
363 | try:
364 | loader = GreenTestLoader()
365 | test = loader.loadTargets(target)
366 | except:
367 | raise_internal_failure("Green encountered an error loading the unit test.")
368 | return
369 |
370 | if test is not None and getattr(test, "run", False):
371 | # Loading was successful, lets do this
372 | try:
373 | test.run(result)
374 | # If your class setUpClass(self) method crashes, the test doesn't
375 | # raise an exception, but it does add an entry to errors. Some
376 | # other things add entries to errors as well, but they all call the
377 | # finalize callback.
378 | if (
379 | result
380 | and (not result.finalize_callback_called)
381 | and getattr(result, "errors", False)
382 | ):
383 | queue.put(test)
384 | queue.put(result)
385 | except:
386 | # Some frameworks like testtools record the error AND THEN let it
387 | # through to crash things. So we only need to manufacture another
388 | # error if the underlying framework didn't, but either way we don't
389 | # want to crash.
390 | if result.errors:
391 | queue.put(result)
392 | else:
393 | try:
394 | err = sys.exc_info()
395 | result.startTest(test)
396 | result.addError(test, err)
397 | result.stopTest(test)
398 | queue.put(result)
399 | except:
400 | raise_internal_failure(
401 | "Green encountered an error when running the test."
402 | )
403 | return
404 | else:
405 | # loadTargets() returned an object without a run() method, probably
406 | # None
407 | description = (
408 | f'Test loader returned an un-runnable object. Is "{target}" '
409 | "importable from your current location? Maybe you "
410 | "forgot an __init__.py in your directory? Unrunnable "
411 | f"object looks like: {test} of type {type(test)} with dir {dir(test)}"
412 | )
413 | no_run_error = (TypeError, TypeError(description), None)
414 | t = ProtoTest()
415 | t.description = description
416 | target_list = target.split(".")
417 | if len(target_list) > 1:
418 | t.module = ".".join(target_list[:-2])
419 | t.class_name = target_list[-2]
420 | t.method_name = target_list[-1]
421 | else:
422 | t.module = target
423 | t.class_name = "UnknownClass"
424 | t.method_name = "unknown_method"
425 | result.startTest(t)
426 | # Ignoring that no_run_error traceback is None.
427 | result.addError(t, no_run_error) # type: ignore[arg-type]
428 | result.stopTest(t)
429 | queue.put(result)
430 |
431 | cleanup()
432 |
--------------------------------------------------------------------------------
/green/runner.py:
--------------------------------------------------------------------------------
1 | """Running tests."""
2 |
3 | from __future__ import annotations
4 |
5 | import argparse
6 | import multiprocessing
7 | from sys import modules
8 | from typing import TextIO, TYPE_CHECKING
9 | from unittest.signals import registerResult, installHandler, removeResult
10 | import warnings
11 |
12 | from green.exceptions import InitializerOrFinalizerError
13 | from green.loader import toParallelTargets
14 | from green.output import debug, GreenStream
15 | from green.process import LoggingDaemonlessPool, poolRunner
16 | from green.result import GreenTestResult, ProtoTestResult
17 |
18 | if TYPE_CHECKING:
19 | from multiprocessing.managers import SyncManager
20 | from queue import Queue
21 |
22 |
23 | class InitializerOrFinalizer:
24 | """
25 | I represent a command that will be run as either the initializer or the
26 | finalizer for a worker process. The only reason I'm a class instead of a
27 | function is so that I can be instantiated at the creation time of the Pool
28 | (with the user's customized command to run), but actually run at the
29 | appropriate time.
30 | """
31 |
32 | def __init__(self, dotted_function: str) -> None:
33 | self.module_part = ".".join(dotted_function.split(".")[:-1])
34 | self.function_part = ".".join(dotted_function.split(".")[-1:])
35 |
36 | def __call__(self, *args) -> None:
37 | if not self.module_part:
38 | return
39 | try:
40 | __import__(self.module_part)
41 | loaded_function = getattr(
42 | modules[self.module_part], self.function_part, None
43 | )
44 | except Exception as e:
45 | raise InitializerOrFinalizerError(
46 | f"Couldn't load '{self.function_part}' - got: {str(e)}"
47 | )
48 | if not loaded_function:
49 | raise InitializerOrFinalizerError(
50 | "Loaded module '{}', but couldn't find function '{}'".format(
51 | self.module_part, self.function_part
52 | )
53 | )
54 | try:
55 | loaded_function()
56 | except Exception as e:
57 | raise InitializerOrFinalizerError(
58 | f"Error running '{self.function_part}' - got: {str(e)}"
59 | )
60 |
61 |
62 | def run(
63 | suite, stream: TextIO | GreenStream, args: argparse.Namespace, testing: bool = False
64 | ) -> GreenTestResult:
65 | """
66 | Run the given test case or test suite with the specified arguments.
67 |
68 | Any args.stream passed in will be wrapped in a GreenStream
69 | """
70 | if not isinstance(stream, GreenStream):
71 | stream = GreenStream(
72 | stream,
73 | disable_windows=args.disable_windows,
74 | disable_unidecode=args.disable_unidecode,
75 | )
76 | result = GreenTestResult(args, stream)
77 |
78 | # Note: Catching SIGINT isn't supported by Python on windows (python
79 | # "WONTFIX" issue 18040)
80 | installHandler()
81 | # Ignore the type mismatch until we make GreenTestResult a subclass of unittest.TestResult.
82 | registerResult(result) # type: ignore
83 |
84 | with warnings.catch_warnings():
85 | if args.warnings: # pragma: no cover
86 | # if args.warnings is set, use it to filter all the warnings
87 | warnings.simplefilter(args.warnings)
88 | # if the filter is 'default' or 'always', special-case the
89 | # warnings from the deprecated unittest methods to show them
90 | # no more than once per module, because they can be fairly
91 | # noisy. The -Wd and -Wa flags can be used to bypass this
92 | # only when args.warnings is None.
93 | if args.warnings in ["default", "always"]:
94 | warnings.filterwarnings(
95 | "module",
96 | category=DeprecationWarning,
97 | message=r"Please use assert\w+ instead.",
98 | )
99 |
100 | result.startTestRun()
101 |
102 | # The call to toParallelTargets needs to happen before pool stuff so we can crash if there
103 | # are, for example, syntax errors in the code to be loaded.
104 | parallel_targets = toParallelTargets(suite, args.targets)
105 | # Use "forkserver" method when available to avoid problems with "fork". See, for example,
106 | # https://github.com/python/cpython/issues/84559
107 | if "forkserver" in multiprocessing.get_all_start_methods():
108 | mp_method = "forkserver"
109 | else:
110 | mp_method = None
111 | mp_context = multiprocessing.get_context(mp_method)
112 | pool = LoggingDaemonlessPool(
113 | processes=args.processes or None,
114 | initializer=InitializerOrFinalizer(args.initializer),
115 | finalizer=InitializerOrFinalizer(args.finalizer),
116 | maxtasksperchild=args.maxtasksperchild,
117 | context=mp_context,
118 | )
119 | manager: SyncManager = mp_context.Manager()
120 | targets: list[tuple[str, Queue]] = [
121 | (target, manager.Queue()) for target in parallel_targets
122 | ]
123 | if targets:
124 | for index, (target, queue) in enumerate(targets):
125 | if args.run_coverage:
126 | coverage_number = index + 1
127 | else:
128 | coverage_number = None
129 | debug(f"Sending {target} to poolRunner {poolRunner}")
130 | pool.apply_async(
131 | poolRunner,
132 | (
133 | target,
134 | queue,
135 | coverage_number,
136 | args.omit_patterns,
137 | args.cov_config_file,
138 | ),
139 | )
140 | pool.close()
141 | for target, queue in targets:
142 | abort = False
143 |
144 | while True:
145 | msg = queue.get()
146 |
147 | # Sentinel value, we're done
148 | if not msg:
149 | debug("runner.run(): received sentinal, breaking.", 3)
150 | break
151 | else:
152 | debug(f"runner.run(): start test: {msg}")
153 | # Result guaranteed after this message, we're
154 | # currently waiting on this test, so print out
155 | # the white 'processing...' version of the output
156 | result.startTest(msg)
157 | proto_test_result: ProtoTestResult = queue.get()
158 | debug(
159 | "runner.run(): received proto test result: {}".format(
160 | str(proto_test_result)
161 | ),
162 | 3,
163 | )
164 | result.addProtoTestResult(proto_test_result)
165 |
166 | if result.shouldStop:
167 | debug("runner.run(): shouldStop encountered, breaking", 3)
168 | abort = True
169 | break
170 |
171 | if abort:
172 | break
173 |
174 | pool.close()
175 | pool.join()
176 | manager.shutdown()
177 |
178 | result.stopTestRun()
179 |
180 | # Ignore the type mismatch until we make GreenTestResult a subclass of unittest.TestResult.
181 | removeResult(result) # type: ignore
182 |
183 | return result
184 |
--------------------------------------------------------------------------------
/green/shell_completion.sh:
--------------------------------------------------------------------------------
1 | # zsh version
2 | if [ -n "$ZSH_VERSION" ]; then
3 | _green_completion() {
4 | local word completions
5 | word="$1"
6 | case "${word}" in
7 | -*)
8 | completions="$(green --options)"
9 | ;;
10 | *)
11 | completions="$(green --completions "${word}")"
12 | ;;
13 | esac
14 | reply=( "${(ps:\n:)completions}" )
15 | }
16 |
17 | compctl -K _green_completion green
18 |
19 | # bash version
20 | elif [ -n "$BASH_VERSION" ]; then
21 | _green_completion() {
22 | local word opts
23 | COMPREPLY=()
24 | word="${COMP_WORDS[COMP_CWORD]}"
25 | opts="$(green --options)"
26 | case "${word}" in
27 | -*)
28 | COMPREPLY=( $(compgen -W "${opts}" -- ${word}) )
29 | return 0
30 | ;;
31 | esac
32 | COMPREPLY=( $(compgen -W "$(green --completions ${word} | tr '\n' ' ')" -- ${word}) )
33 | }
34 | complete -F _green_completion green
35 | fi
36 |
--------------------------------------------------------------------------------
/green/suite.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import argparse
4 | from fnmatch import fnmatch
5 | from io import StringIO
6 | import sys
7 | import unittest
8 | from typing import Iterable, TYPE_CHECKING
9 | from unittest.suite import _call_if_exists, _DebugResult, _isnotsuite, TestSuite # type: ignore
10 | from unittest import util
11 |
12 | from green.config import get_default_args
13 | from green.output import GreenStream
14 |
15 | if TYPE_CHECKING:
16 | from unittest.case import TestCase
17 | from unittest.result import TestResult
18 | from green.result import GreenTestResult, ProtoTestResult
19 |
20 |
21 | class GreenTestSuite(TestSuite):
22 | """
23 | This version of a test suite has two important functions:
24 |
25 | 1) It brings Python 3.x-like features to Python 2.7
26 | 2) It adds Green-specific features (see customize())
27 | """
28 |
29 | args: argparse.Namespace | None = None
30 |
31 | def __init__(
32 | self,
33 | tests: Iterable[TestCase | TestSuite] = (),
34 | args: argparse.Namespace | None = None,
35 | ) -> None:
36 | # You should either set GreenTestSuite.args before instantiation, or
37 | # pass args into __init__
38 | self._removed_tests = 0
39 | default_args = get_default_args()
40 | self.allow_stdout = default_args.allow_stdout
41 | self.full_test_pattern = "test" + default_args.test_pattern
42 | self.customize(args)
43 | super().__init__(tests)
44 |
45 | def addTest(self, test: TestCase | TestSuite) -> None:
46 | """
47 | Override default behavior with some green-specific behavior.
48 | """
49 | if self.full_test_pattern:
50 | # test can actually be suites and things. Only tests have _testMethodName.
51 | method_name = getattr(test, "_testMethodName", None)
52 | # Fake test cases (generated for module import failures, for example)
53 | # do not start with 'test'. We still want to see those fake cases.
54 | if (
55 | method_name
56 | and method_name.startswith("test")
57 | and not fnmatch(method_name, self.full_test_pattern)
58 | ):
59 | return
60 | super().addTest(test)
61 |
62 | def customize(self, args: argparse.Namespace | None) -> None:
63 | """
64 | Green-specific behavior customization via an args dictionary from
65 | the green.config module. If you don't pass in an args dictionary,
66 | then this class acts like TestSuite from Python 3.x.
67 | """
68 | # Set a new args on the CLASS
69 | if args:
70 | self.args = args
71 |
72 | # Use the class args
73 | if self.args and getattr(self.args, "allow_stdout", None):
74 | self.allow_stdout = self.args.allow_stdout
75 | if self.args and getattr(self.args, "test_pattern", None):
76 | self.full_test_pattern = "test" + self.args.test_pattern
77 |
78 | def _removeTestAtIndex(self, index: int) -> None:
79 | """
80 | Python 3.x-like version of this function for Python 2.7's sake.
81 | """
82 | test = self._tests[index]
83 | if hasattr(test, "countTestCases"):
84 | self._removed_tests += test.countTestCases()
85 | # FIXME: The upstream typing does not allow None:
86 | # unittest.suite.BaseTestSuite._tests: list[unittest.case.TestCase]
87 | self._tests[index] = None # type: ignore
88 |
89 | def countTestCases(self) -> int:
90 | """
91 | Python 3.x-like version of this function for Python 2.7's sake.
92 | """
93 | cases = self._removed_tests
94 | for test in self:
95 | if test:
96 | cases += test.countTestCases()
97 | return cases
98 |
99 | def _handleClassSetUp(
100 | self, test: TestCase | TestSuite, result: ProtoTestResult
101 | ) -> None:
102 | previousClass = getattr(result, "_previousTestClass", None)
103 | currentClass = test.__class__
104 | if currentClass == previousClass:
105 | return
106 | if result._moduleSetUpFailed: # type: ignore[attr-defined]
107 | return
108 | if getattr(currentClass, "__unittest_skip__", False):
109 | return
110 |
111 | try:
112 | currentClass._classSetupFailed = False # type: ignore
113 | except TypeError:
114 | # test may actually be a function
115 | # so its class will be a builtin-type
116 | pass
117 |
118 | setUpClass = getattr(currentClass, "setUpClass", None)
119 | if setUpClass is not None:
120 | _call_if_exists(result, "_setupStdout")
121 | try:
122 | setUpClass()
123 | # Upstream Python forgets to take SkipTest into account
124 | except unittest.case.SkipTest as e:
125 | currentClass.__unittest_skip__ = True # type: ignore
126 | currentClass.__unittest_skip_why__ = str(e) # type: ignore
127 | # -- END of fix
128 | except Exception as e:
129 | if isinstance(result, _DebugResult):
130 | raise
131 | currentClass._classSetupFailed = True # type: ignore
132 | className = util.strclass(currentClass)
133 | self._createClassOrModuleLevelException( # type: ignore
134 | result, e, "setUpClass", className
135 | )
136 | finally:
137 | _call_if_exists(result, "_restoreStdout")
138 | if currentClass._classSetupFailed is True: # type: ignore
139 | currentClass.doClassCleanups() # type: ignore
140 | if currentClass.tearDown_exceptions: # type: ignore
141 | for exc in currentClass.tearDown_exceptions: # type: ignore
142 | self._createClassOrModuleLevelException( # type: ignore
143 | result, exc[1], "setUpClass", className, info=exc
144 | )
145 |
146 | def run( # type: ignore[override]
147 | self, result: ProtoTestResult, debug: bool = False
148 | ) -> ProtoTestResult:
149 | """
150 | Emulate unittest's behavior, with Green-specific changes.
151 | """
152 | topLevel = False
153 | if getattr(result, "_testRunEntered", False) is False:
154 | result._testRunEntered = topLevel = True # type: ignore
155 |
156 | for index, test in enumerate(self):
157 | if result.shouldStop:
158 | break
159 |
160 | if _isnotsuite(test):
161 | self._tearDownPreviousClass(test, result) # type: ignore[attr-defined]
162 | self._handleModuleFixture(test, result) # type: ignore[attr-defined]
163 | self._handleClassSetUp(test, result) # type: ignore[attr-defined]
164 | result._previousTestClass = test.__class__ # type: ignore[attr-defined]
165 |
166 | if getattr(test.__class__, "_classSetupFailed", False) or getattr(
167 | result, "_moduleSetUpFailed", False
168 | ):
169 | continue
170 |
171 | if not self.allow_stdout:
172 | captured_stdout = StringIO()
173 | captured_stderr = StringIO()
174 | saved_stdout = sys.stdout
175 | saved_stderr = sys.stderr
176 | sys.stdout = GreenStream(captured_stdout) # type: ignore[assignment]
177 | sys.stderr = GreenStream(captured_stderr) # type: ignore[assignment]
178 |
179 | test(result) # type: ignore[arg-type]
180 |
181 | if _isnotsuite(test):
182 | if not self.allow_stdout:
183 | sys.stdout = saved_stdout
184 | sys.stderr = saved_stderr
185 | result.recordStdout(test, captured_stdout.getvalue())
186 | result.recordStderr(test, captured_stderr.getvalue())
187 | # Since we're intercepting the stdout/stderr out here at the
188 | # suite level, we need to poke the test result and let it know
189 | # when we're ready to transmit results back up to the parent
190 | # process. I would rather just do it automatically at test
191 | # stop time, but we don't have the captured stuff at that
192 | # point. Messy...but the only other alternative I can think of
193 | # is monkey-patching loaded TestCases -- which could be from
194 | # unittest or twisted or some other custom subclass.
195 | result.finalize()
196 |
197 | self._removeTestAtIndex(index)
198 |
199 | # Green's subprocesses have handled all actual tests and sent up the
200 | # result, but unittest expects to be able to add teardown errors to
201 | # the result still, so we'll need to watch for that ourself.
202 | errors_before = len(result.errors)
203 |
204 | if topLevel:
205 | self._tearDownPreviousClass(None, result) # type: ignore[attr-defined]
206 | self._handleModuleTearDown(result) # type: ignore[attr-defined]
207 | result._testRunEntered = False # type: ignore[attr-defined]
208 |
209 | # Special handling for class/module tear-down errors. startTest() and
210 | # finalize() both trigger communication between the subprocess and
211 | # the runner process. addError()
212 | if errors_before != len(result.errors):
213 | difference = len(result.errors) - errors_before
214 | result.errors, new_errors = (
215 | result.errors[:-difference],
216 | result.errors[-difference:],
217 | )
218 | for test_proto, err in new_errors:
219 | # test = ProtoTest()
220 | previous_test_class = result._previousTestClass # type: ignore[attr-defined]
221 | test_proto.module = previous_test_class.__module__
222 | test_proto.class_name = previous_test_class.__name__
223 | # test.method_name = 'some method name'
224 | test_proto.is_class_or_module_teardown_error = True
225 | test_proto.name = "Error in class or module teardown"
226 | # test.docstr_part = 'docstr part' # error_holder.description
227 | result.startTest(test_proto)
228 | result.addError(test_proto, err)
229 | result.stopTest(test_proto)
230 | result.finalize()
231 | return result
232 |
--------------------------------------------------------------------------------
/green/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/green/test/__init__.py
--------------------------------------------------------------------------------
/green/test/test_cmdline.py:
--------------------------------------------------------------------------------
1 | from io import StringIO
2 | import logging
3 | import os
4 | from os.path import isfile, join
5 | import shutil
6 | import sys
7 | import tempfile
8 | import unittest
9 | from unittest.mock import MagicMock
10 |
11 | from green import cmdline
12 | from green import config
13 | from green.output import GreenStream
14 |
15 |
16 | class TestMain(unittest.TestCase):
17 | def setUp(self):
18 | self.s = StringIO()
19 | self.gs = GreenStream(self.s)
20 | saved_stdout = config.sys.stdout
21 | config.sys.stdout = self.gs
22 | self.addCleanup(setattr, config.sys, "stdout", saved_stdout)
23 |
24 | def tearDown(self):
25 | del self.gs
26 | del self.s
27 |
28 | def test_notTesting(self):
29 | """
30 | We actually attempt running loadTargets (coverage test)
31 | """
32 | tmpdir = tempfile.mkdtemp()
33 | cwd = os.getcwd()
34 | os.chdir(tmpdir)
35 | sys.path.insert(0, cwd)
36 | argv = [tmpdir]
37 | cmdline.main(argv)
38 | os.chdir(cwd)
39 | del sys.path[0]
40 | shutil.rmtree(tmpdir)
41 |
42 | def test_configFileDebug(self):
43 | """
44 | A debug message is output if a config file is loaded (coverage test)
45 | """
46 | tmpdir = tempfile.mkdtemp()
47 | filename = os.path.join(tmpdir, "config")
48 | fh = open(filename, "w")
49 | fh.write("debug = 2")
50 | fh.close()
51 | argv = ["-dd", "--config", filename]
52 | cmdline.main(argv, testing=True)
53 | shutil.rmtree(tmpdir)
54 |
55 | def test_completionFile(self):
56 | """
57 | --completion-file causes a version string to be output
58 | """
59 | argv = ["--completion-file"]
60 | cmdline.main(argv, testing=True)
61 | self.assertIn("shell_completion.sh", self.s.getvalue())
62 |
63 | def test_completions(self):
64 | """
65 | --completions returns completions (the loader module tests deeper)
66 | """
67 | cwd = os.getcwd()
68 | path = os.path.abspath(__file__)
69 | os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(path))))
70 | argv = ["--completions", "green"]
71 | cmdline.main(argv, testing=True)
72 | os.chdir(cwd)
73 | self.assertIn("green.test", self.s.getvalue())
74 |
75 | def test_options(self):
76 | """
77 | --options causes options to be output
78 | """
79 | argv = ["--options"]
80 | cmdline.main(argv, testing=True)
81 | self.assertIn("--options", self.s.getvalue())
82 | self.assertIn("--version", self.s.getvalue())
83 |
84 | def test_version(self):
85 | """
86 | --version causes a version string to be output
87 | """
88 | argv = ["--version"]
89 | cmdline.main(argv, testing=True)
90 | self.assertIn("Green", self.s.getvalue())
91 | self.assertIn("Python", self.s.getvalue())
92 |
93 | def test_debug(self):
94 | """
95 | --debug causes the log-level to be set to debug
96 | """
97 | argv = ["--debug"]
98 | saved_basicConfig = config.logging.basicConfig
99 | self.addCleanup(setattr, config.logging, "basicConfig", saved_basicConfig)
100 | config.logging.basicConfig = MagicMock()
101 | cmdline.main(argv, testing=True)
102 | config.logging.basicConfig.assert_called_with(
103 | level=logging.DEBUG,
104 | format="%(asctime)s %(levelname)9s %(message)s",
105 | datefmt="%Y-%m-%d %H:%M:%S",
106 | )
107 |
108 | def test_disableTermcolor(self):
109 | """
110 | --notermcolor causes coverage of the line disabling termcolor
111 | """
112 | argv = ["--notermcolor"]
113 | cmdline.main(argv, testing=True)
114 |
115 | def test_disableWindowsSupport(self):
116 | """
117 | --disable-windows
118 | """
119 | argv = ["--disable-windows"]
120 | cmdline.main(argv, testing=True)
121 |
122 | def test_noTestsCreatesEmptyTestSuite(self):
123 | """
124 | If loadTargets doesn't find any tests, an empty test suite is created.
125 | Coverage test, since loading the module inside the main function (due
126 | to coverage handling constraints) prevents injecting a mock.
127 | """
128 | argv = ["", "/tmp/non-existent/path"]
129 | cmdline.main(argv, testing=True)
130 |
131 | def test_import_cmdline_module(self):
132 | """
133 | The cmdline module can be imported
134 | """
135 | global reload
136 | try: # In Python 3 reload is in importlib
137 | import importlib
138 |
139 | importlib.reload
140 | reload = importlib.reload
141 | except:
142 | pass # Python 2.7's reload is builtin
143 | reload(cmdline)
144 |
145 | def test_generate_junit_test_report(self):
146 | """
147 | Test that a report is generated when we use the '--junit-report' option.
148 | """
149 | tmpdir = tempfile.mkdtemp()
150 | report = join(tmpdir, "test_report.xml")
151 | self.assertFalse(isfile(report))
152 |
153 | argv = ["--junit-report", report]
154 | cmdline.main(argv, testing=True)
155 |
156 | self.assertTrue(isfile(report))
157 | shutil.rmtree(tmpdir)
158 |
--------------------------------------------------------------------------------
/green/test/test_command.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import contextlib
3 | import os
4 | import sys
5 | import unittest
6 | from configparser import ConfigParser
7 | from unittest.mock import patch, MagicMock, call
8 |
9 | from setuptools.dist import Distribution
10 |
11 | from green import command
12 | from green.config import StoreOpt
13 |
14 |
15 | class TestCommand(unittest.TestCase):
16 | @contextlib.contextmanager
17 | def environ(self, setup_cfg=None, *args, **variables):
18 | args = ["green"] + list(args)
19 |
20 | if setup_cfg is not None:
21 | parser = ConfigParser()
22 | parser.add_section("green")
23 | for k, v in setup_cfg.items():
24 | parser.set("green", k, str(v))
25 | with open("setup.cfg", "w") as f:
26 | parser.write(f)
27 |
28 | yield Distribution(
29 | {"script_name": "setup.py", "script_args": args or ["green"]}
30 | )
31 |
32 | # finally:
33 | if os.path.isfile("setup.cfg"):
34 | os.remove("setup.cfg")
35 |
36 | def test_get_user_options(self):
37 | # Check the user options contain some of the
38 | # actual command line options
39 | options = command.get_user_options()
40 |
41 | self.assertIn(
42 | ("options", None, "Output all options. Used by bash- and zsh-completion."),
43 | options,
44 | )
45 |
46 | self.assertIn(
47 | ("file-pattern=", "p", "Pattern to match test files. Default is test*.py"),
48 | options,
49 | )
50 |
51 | def test_get_user_options_setup_py(self):
52 | """
53 | When get_user_options() is called by 'python setup.py --help-commands',
54 | it returns [] early and doesn't crash.
55 | """
56 | sys.argv.append("--help-commands")
57 | self.addCleanup(lambda: sys.argv.pop())
58 |
59 | self.assertEqual(command.get_user_options(), [])
60 |
61 | @patch("green.command.parseArguments")
62 | def test_get_user_options_dynamic(self, parseArguments):
63 | # Check the user options are generated after
64 | # the command line options created in green.cmdline.parseArguments
65 | store_opt = StoreOpt()
66 | argparser = argparse.ArgumentParser()
67 | store_opt(argparser.add_argument("-s", "--something", help="Something"))
68 | store_opt(argparser.add_argument("--else", help="Else"))
69 | store_opt(
70 | argparser.add_argument("-a", "--again", action="store_true", help="Again")
71 | )
72 |
73 | args = argparser.parse_args([])
74 | args.parser = argparser
75 | args.store_opt = store_opt
76 | parseArguments.return_value = args
77 |
78 | options = command.get_user_options()
79 |
80 | self.assertEqual(
81 | options,
82 | [
83 | ("something=", "s", "Something"),
84 | ("else=", None, "Else"),
85 | ("again", "a", "Again"),
86 | ],
87 | )
88 |
89 | def test_initialize_options(self):
90 | d = Distribution({"script_name": "setup.py", "script_args": ["green"]})
91 |
92 | cmd = command.green(d)
93 | for attr in ["completion_file", "clear_omit", "debug", "processes"]:
94 | self.assertTrue(hasattr(cmd, attr), attr)
95 |
96 | @patch("green.command.main", return_value=125)
97 | def test_run_exits(self, main):
98 | d = Distribution({"script_name": "setup.py", "script_args": ["green"]})
99 |
100 | cmd = command.green(d)
101 | with self.assertRaises(SystemExit) as se:
102 | cmd.run()
103 | self.assertEqual(se.exception.code, 125)
104 |
--------------------------------------------------------------------------------
/green/test/test_djangorunner.py:
--------------------------------------------------------------------------------
1 | from argparse import Namespace
2 | from argparse import ArgumentParser
3 | from io import StringIO
4 | import sys
5 | import unittest
6 | from unittest.mock import MagicMock, patch
7 |
8 | from green import djangorunner
9 | from green.config import mergeConfig
10 |
11 |
12 | class TestDjangoMissing(unittest.TestCase):
13 | def test_importError(self):
14 | """
15 | Raises ImportError if Django is not available
16 | """
17 | self.assertRaises(ImportError, djangorunner.django_missing)
18 |
19 |
20 | class TestDjangoRunner(unittest.TestCase):
21 | def setUp(self):
22 | try:
23 | djangorunner.DjangoRunner()
24 | except ImportError:
25 | raise unittest.SkipTest("Django is not installed")
26 | saved_stdout = sys.stdout
27 | self.stream = StringIO()
28 | sys.stdout = self.stream
29 | self.addCleanup(setattr, sys, "stdout", saved_stdout)
30 |
31 | def test_run_testsWithLabel(self):
32 | """
33 | Labeled tests run okay
34 | """
35 | dr = djangorunner.DjangoRunner()
36 | dr.setup_test_environment = MagicMock()
37 | dr.setup_databases = MagicMock()
38 | dr.teardown_databases = MagicMock()
39 | dr.teardown_test_environment = MagicMock()
40 |
41 | dr.run_tests(("green.test.test_version",), testing=True)
42 |
43 | self.assertIn("OK", self.stream.getvalue())
44 |
45 | def test_run_testsWithoutLabel(self):
46 | """
47 | Not passing in a label causes the targets to be ['.']
48 | """
49 | dr = djangorunner.DjangoRunner()
50 | dr.setup_test_environment = MagicMock()
51 | dr.setup_databases = MagicMock()
52 | dr.teardown_databases = MagicMock()
53 | dr.teardown_test_environment = MagicMock()
54 |
55 | with patch.object(dr.loader, "loadTargets") as mock_loadTargets:
56 | dr.run_tests((), testing=True)
57 |
58 | mock_loadTargets.assert_called_with(["."])
59 | self.assertIn("No Tests Found", self.stream.getvalue())
60 |
61 | def test_run_testsWithBadInput(self):
62 | """
63 | Bad input causes a ValueError to be raised
64 | """
65 | dr = djangorunner.DjangoRunner()
66 | dr.setup_test_environment = MagicMock()
67 | dr.setup_databases = MagicMock()
68 |
69 | self.assertRaises(ValueError, dr.run_tests, None, True)
70 |
71 | @patch("green.djangorunner.GreenTestSuite")
72 | @patch("green.djangorunner.run")
73 | def test_run_noTests(self, mock_run, mock_GreenTestSuite):
74 | """
75 | If no tests are found, we create an empty test suite and run it.
76 | """
77 | dr = djangorunner.DjangoRunner()
78 |
79 | dr.setup_test_environment = MagicMock()
80 | dr.setup_databases = MagicMock()
81 | dr.teardown_databases = MagicMock()
82 | dr.teardown_test_environment = MagicMock()
83 |
84 | mock_GreenTestSuite.return_value = 123
85 |
86 | with patch.object(dr.loader, "loadTargets", return_value=None):
87 | dr.run_tests((), testing=True)
88 |
89 | self.assertEqual(mock_run.call_args[0][0], 123)
90 |
91 | @patch("green.djangorunner.mergeConfig")
92 | @patch("green.djangorunner.GreenTestSuite")
93 | @patch("green.djangorunner.run")
94 | def test_run_coverage(self, mock_run, mock_GreenTestSuite, mock_mergeConfig):
95 | """
96 | If no tests are found, we create an empty test suite and run it.
97 | """
98 | args = mergeConfig(Namespace())
99 | args.run_coverage = True
100 | args.cov = MagicMock()
101 | mock_mergeConfig.return_value = args
102 | dr = djangorunner.DjangoRunner()
103 |
104 | dr.setup_test_environment = MagicMock()
105 | dr.setup_databases = MagicMock()
106 | dr.teardown_databases = MagicMock()
107 | dr.teardown_test_environment = MagicMock()
108 |
109 | mock_GreenTestSuite.return_value = 123
110 |
111 | with patch.object(dr.loader, "loadTargets", return_value=None):
112 | dr.run_tests((), testing=True)
113 |
114 | self.assertEqual(mock_run.call_args[0][0], 123)
115 |
116 | def test_check_verbosity_argument_recognised(self):
117 | """
118 | Ensure that the python manage.py test command
119 | recognises the --green-verbosity flag
120 | """
121 | dr = djangorunner.DjangoRunner()
122 | dr.setup_test_environment = MagicMock()
123 | dr.setup_databases = MagicMock()
124 | dr.teardown_databases = MagicMock()
125 | dr.teardown_test_environment = MagicMock()
126 | from django.core.management.commands.test import Command as TestCommand
127 |
128 | test_command = TestCommand()
129 | test_command.test_runner = "green.djangorunner.DjangoRunner"
130 | parser = ArgumentParser()
131 | test_command.add_arguments(parser)
132 | args = parser.parse_args()
133 | self.assertIn("verbose", args)
134 |
135 | def test_check_default_verbosity(self):
136 | """
137 | If no verbosity is passed, default value is set
138 | """
139 | dr = djangorunner.DjangoRunner()
140 | dr.setup_test_environment = MagicMock()
141 | dr.setup_databases = MagicMock()
142 | dr.teardown_databases = MagicMock()
143 | dr.teardown_test_environment = MagicMock()
144 | from django.core.management.commands.test import Command as TestCommand
145 |
146 | test_command = TestCommand()
147 | test_command.test_runner = "green.djangorunner.DjangoRunner"
148 | parser = ArgumentParser()
149 | test_command.add_arguments(parser)
150 | args = parser.parse_args()
151 | self.assertEqual(args.verbose, -1)
152 |
153 | def test_run_with_verbosity_flag(self):
154 | """
155 | Tests should run fine if verbosity is passed
156 | through CLI flag
157 | """
158 | dr = djangorunner.DjangoRunner()
159 | dr.setup_test_environment = MagicMock()
160 | dr.setup_databases = MagicMock()
161 | dr.teardown_databases = MagicMock()
162 | dr.teardown_test_environment = MagicMock()
163 | dr.verbose = 2
164 | saved_loadTargets = dr.loader.loadTargets
165 | dr.loader.loadTargets = MagicMock()
166 | self.addCleanup(setattr, dr.loader, "loadTargets", saved_loadTargets)
167 | self.assertEqual((dr.run_tests((), testing=True)), 0)
168 |
--------------------------------------------------------------------------------
/green/test/test_integration.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import multiprocessing
3 | import os
4 | import pathlib
5 | import shutil
6 | import subprocess
7 | import sys
8 | import tempfile
9 | from textwrap import dedent
10 | import unittest
11 |
12 |
13 | class TestFinalizer(unittest.TestCase):
14 | def setUp(self) -> None:
15 | self.tmpdir = pathlib.Path(tempfile.mkdtemp())
16 |
17 | def tearDown(self) -> None:
18 | shutil.rmtree(self.tmpdir, ignore_errors=True)
19 |
20 | def test_finalizer(self) -> None:
21 | """
22 | Test that the finalizer works on Python 3.8+.
23 | """
24 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
25 | for i in range(multiprocessing.cpu_count() * 2):
26 | finalizer_path = sub_tmpdir / f"test_finalizer.py"
27 | finalizer_path.write_text(
28 | dedent(
29 | f"""
30 | import unittest
31 | class Pass{i}(unittest.TestCase):
32 | def test_pass{i}(self):
33 | pass
34 | def msg():
35 | print("finalizer worked")
36 | """
37 | )
38 | )
39 | args = [
40 | sys.executable,
41 | "-m",
42 | "green.cmdline",
43 | "--finalizer=test_finalizer.msg",
44 | "--maxtasksperchild=1",
45 | ]
46 | pythonpath = str(pathlib.Path(__file__).parent.parent.parent)
47 |
48 | env = copy.deepcopy(os.environ)
49 | env["PYTHONPATH"] = pythonpath
50 |
51 | output = subprocess.run(
52 | args,
53 | cwd=str(sub_tmpdir),
54 | stdout=subprocess.PIPE,
55 | stderr=subprocess.STDOUT,
56 | env=env,
57 | timeout=10,
58 | encoding="utf-8",
59 | check=True,
60 | ).stdout
61 | self.assertIn("finalizer worked", output)
62 |
--------------------------------------------------------------------------------
/green/test/test_junit.py:
--------------------------------------------------------------------------------
1 | from green.config import get_default_args
2 | from green.output import GreenStream
3 | from green.junit import JUnitXML, JUnitDialect, Verdict
4 | from green.result import GreenTestResult, ProtoTest, proto_error
5 |
6 | from io import StringIO
7 |
8 | from sys import exc_info
9 |
10 | from unittest import TestCase
11 |
12 | from xml.etree.ElementTree import fromstring as from_xml
13 |
14 |
15 | def test(module, class_name, method_name):
16 | test = ProtoTest()
17 | test.module = module
18 | test.class_name = class_name
19 | test.method_name = method_name
20 | return test
21 |
22 |
23 | class JUnitXMLReportIsGenerated(TestCase):
24 | def setUp(self):
25 | self._destination = StringIO()
26 | self._test_results = GreenTestResult(
27 | get_default_args(), GreenStream(StringIO())
28 | )
29 | self._test_results.timeTaken = 4.06
30 | self._adapter = JUnitXML()
31 |
32 | self._test = ProtoTest()
33 | self._test.module = "my_module"
34 | self._test.class_name = "MyClass"
35 | self._test.method_name = "my_method"
36 | self._test.test_time = "0.005"
37 |
38 | def test_when_the_results_contain_only_one_successful_test(self):
39 | self._test_results.addSuccess(self._test)
40 |
41 | self._adapter.save_as(self._test_results, self._destination)
42 |
43 | self._assert_report_is(
44 | {"my_module.MyClass": {"tests": {"my_method": {"verdict": Verdict.PASSED}}}}
45 | )
46 |
47 | def test_when_the_results_contain_tests_with_various_verdict(self):
48 | self._test_results.addSuccess(test("my.module", "MyClass", "test_method1"))
49 | self._test_results.addSuccess(test("my.module", "MyClass", "test_method2"))
50 | self._record_failure(test("my.module", "MyClass", "test_method3"))
51 | self._record_failure(test("my.module", "MyClass", "test_method4"))
52 | self._record_error(test("my.module", "MyClass", "test_method5"))
53 | self._test_results.addSkip(
54 | test("my.module", "MyClass", "test_method6"), "Take too long"
55 | )
56 |
57 | self._adapter.save_as(self._test_results, self._destination)
58 |
59 | self._assert_report_is(
60 | {
61 | "my.module.MyClass": {
62 | "#tests": "6",
63 | "#failures": "2",
64 | "#errors": "1",
65 | "#skipped": "1",
66 | "tests": {
67 | "test_method1": {"verdict": Verdict.PASSED},
68 | "test_method2": {"verdict": Verdict.PASSED},
69 | "test_method3": {"verdict": Verdict.FAILED},
70 | "test_method4": {"verdict": Verdict.FAILED},
71 | "test_method5": {"verdict": Verdict.ERROR},
72 | "test_method6": {"verdict": Verdict.SKIPPED},
73 | },
74 | },
75 | }
76 | )
77 |
78 | def _record_failure(self, test):
79 | try:
80 | raise ValueError("Wrong value")
81 | except:
82 | error = proto_error(exc_info())
83 | self._test_results.addFailure(test, error)
84 |
85 | def _record_error(self, test):
86 | try:
87 | raise ValueError("Wrong value")
88 | except:
89 | error = proto_error(exc_info())
90 | self._test_results.addError(test, error)
91 |
92 | def test_when_the_results_contain_only_one_test_with_output(self):
93 | output = "This is the output of the test"
94 | self._test_results.recordStdout(self._test, output)
95 | self._test_results.addSuccess(self._test)
96 |
97 | self._adapter.save_as(self._test_results, self._destination)
98 |
99 | self._assert_report_is(
100 | {
101 | "my_module.MyClass": {
102 | "tests": {
103 | "my_method": {"verdict": Verdict.PASSED, "stdout": output}
104 | }
105 | }
106 | }
107 | )
108 |
109 | def test_when_the_results_contain_only_one_test_with_errput(self):
110 | errput = "This is the errput of the test"
111 | self._test_results.recordStderr(self._test, errput)
112 | self._test_results.addSuccess(self._test)
113 |
114 | self._adapter.save_as(self._test_results, self._destination)
115 |
116 | self._assert_report_is(
117 | {
118 | "my_module.MyClass": {
119 | "tests": {
120 | "my_method": {"verdict": Verdict.PASSED, "stderr": errput}
121 | }
122 | }
123 | }
124 | )
125 |
126 | def test_when_the_results_contain_only_one_failed_test(self):
127 | self._record_failure(test("my_module", "MyClass", "my_method"))
128 |
129 | self._adapter.save_as(self._test_results, self._destination)
130 |
131 | self._assert_report_is(
132 | {"my_module.MyClass": {"tests": {"my_method": {"verdict": Verdict.FAILED}}}}
133 | )
134 |
135 | def test_when_the_results_contain_only_one_erroneous_test(self):
136 | self._record_error(test("my_module", "MyClass", "my_method"))
137 |
138 | self._adapter.save_as(self._test_results, self._destination)
139 |
140 | self._assert_report_is(
141 | {"my_module.MyClass": {"tests": {"my_method": {"verdict": Verdict.ERROR}}}}
142 | )
143 |
144 | def test_when_the_results_contain_only_one_skipped_test(self):
145 | self._test_results.addSkip(self._test, "reason for skipping")
146 |
147 | self._adapter.save_as(self._test_results, self._destination)
148 |
149 | self._assert_report_is(
150 | {
151 | "my_module.MyClass": {
152 | "tests": {"my_method": {"verdict": Verdict.SKIPPED}}
153 | }
154 | }
155 | )
156 |
157 | def test_convert_test_will_record_time_for_test(self):
158 | xml_test_result = self._adapter._convert_test(
159 | self._test_results, Verdict.PASSED, self._test
160 | )
161 |
162 | self.assertEqual(
163 | xml_test_result.attrib,
164 | {"name": "my_method", "classname": "MyClass", "time": "0.005"},
165 | )
166 |
167 | def test_suite_time(self):
168 | test1 = test("my.module", "MyClass", "test_method1")
169 | test1.test_time = "0.01"
170 | test2 = test("my.module", "MyClass", "test_method2")
171 | test2.test_time = "0.5"
172 | test3 = test("my.module", "MyClass", "test_method3")
173 | test3.test_time = "1.0"
174 |
175 | suite_time = self._adapter._suite_time([(2, test1), (0, test2), (0, test3)])
176 |
177 | self.assertEqual(suite_time, 1.51)
178 |
179 | def _assert_report_is(self, report):
180 | """
181 | Verify the structure of the generated XML text against the given
182 | 'report' structure.
183 | """
184 | root = from_xml(self._destination.getvalue())
185 | test_suites = root.findall(JUnitDialect.TEST_SUITE)
186 | self.assertEqual(len(report), len(test_suites))
187 | for each_suite in test_suites:
188 | self._assert_suite(report, each_suite)
189 |
190 | def _assert_suite(self, expected_report, suite):
191 | """
192 | Verify that the given 'suite' matches one in the expected test report.
193 | """
194 | name = suite.get(JUnitDialect.NAME)
195 | self.assertIsNotNone(name)
196 | self.assertIn(name, expected_report)
197 | expected_suite = expected_report[name]
198 |
199 | # Check the count of tests
200 | if "#tests" in expected_suite:
201 | self.assertEqual(
202 | expected_suite["#tests"], suite.get(JUnitDialect.TEST_COUNT)
203 | )
204 |
205 | # Check the count of failures
206 | if "#failures" in expected_suite:
207 | self.assertEqual(
208 | expected_suite["#failures"], suite.get(JUnitDialect.FAILURE_COUNT)
209 | )
210 |
211 | # Check the count of errors
212 | if "#errors" in expected_suite:
213 | self.assertEqual(
214 | expected_suite["#errors"], suite.get(JUnitDialect.ERROR_COUNT)
215 | )
216 |
217 | # Check the count of skipped tests
218 | if "#skipped" in expected_suite:
219 | self.assertEqual(
220 | expected_suite["#skipped"], suite.get(JUnitDialect.SKIPPED_COUNT)
221 | )
222 |
223 | # Check the time of each test
224 | if "time" in expected_suite:
225 | self.assertEqual(expected_suite["time"], suite.get(JUnitDialect.TEST_TIME))
226 |
227 | # Check the time of total test run
228 | if "totaltesttime" in expected_suite:
229 | self.assertEqual(
230 | expected_suite["totaltesttime"], suite.get(JUnitDialect.TEST_TIME)
231 | )
232 |
233 | # Check individual test reports
234 | self.assertEqual(len(expected_suite["tests"]), len(suite))
235 | for each_test in suite:
236 | self._assert_test(expected_suite["tests"], each_test)
237 |
238 | def _assert_test(self, expected_suite, test):
239 | """
240 | Verify that the given 'test' matches one in the expected test suite.
241 | """
242 | name = test.get(JUnitDialect.NAME)
243 | self.assertIsNotNone(test)
244 | self.assertIn(name, expected_suite)
245 | expected_test = expected_suite[name]
246 |
247 | test_passed = True
248 |
249 | for key, expected in expected_test.items():
250 | if key == "verdict":
251 | self._assert_verdict(expected, test)
252 |
253 | elif key == "stdout":
254 | system_out = test.find(JUnitDialect.SYSTEM_OUT)
255 | self.assertIsNotNone(system_out)
256 | self.assertEqual(expected, system_out.text)
257 |
258 | elif key == "stderr":
259 | system_err = test.find(JUnitDialect.SYSTEM_ERR)
260 | self.assertIsNotNone(system_err)
261 | self.assertEqual(expected, system_err.text)
262 |
263 | def _assert_verdict(self, expected_verdict, test):
264 | failure = test.find(JUnitDialect.FAILURE)
265 | error = test.find(JUnitDialect.ERROR)
266 | skipped = test.find(JUnitDialect.SKIPPED)
267 |
268 | if expected_verdict == Verdict.FAILED:
269 | self.assertIsNotNone(failure)
270 | self.assertIsNone(error)
271 | self.assertIsNone(skipped)
272 |
273 | elif expected_verdict == Verdict.ERROR:
274 | self.assertIsNone(failure)
275 | self.assertIsNotNone(error)
276 | self.assertIsNone(skipped)
277 |
278 | elif expected_verdict == Verdict.SKIPPED:
279 | self.assertIsNone(failure)
280 | self.assertIsNone(error)
281 | self.assertIsNotNone(skipped)
282 |
283 | else: # Verdict == PASSED
284 | self.assertIsNone(failure)
285 | self.assertIsNone(error)
286 | self.assertIsNone(skipped)
287 |
--------------------------------------------------------------------------------
/green/test/test_load_tests.py:
--------------------------------------------------------------------------------
1 | import os
2 | from queue import Queue
3 | import shutil
4 | import tempfile
5 | import unittest
6 | import textwrap
7 |
8 | from green.loader import GreenTestLoader
9 | from green.process import poolRunner
10 |
11 |
12 | class TestLoadTests(unittest.TestCase):
13 | @classmethod
14 | def setUpClass(cls):
15 | cls.startdir = os.getcwd()
16 | cls.container_dir = tempfile.mkdtemp()
17 |
18 | @classmethod
19 | def tearDownClass(cls):
20 | os.chdir(cls.startdir)
21 | # shutil.rmtree(cls.container_dir)
22 |
23 | def setUp(self):
24 | self.tmpdir = tempfile.mkdtemp(dir=self.container_dir)
25 | self.sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
26 | self.basename = os.path.basename(self.sub_tmpdir)
27 | os.chdir(self.tmpdir)
28 |
29 | with open(os.path.join(self.basename, "__init__.py"), "w") as f:
30 | f.write("\n")
31 |
32 | def tearDown(self):
33 | os.chdir(self.container_dir)
34 | shutil.rmtree(self.tmpdir)
35 |
36 | def test_monkey_patch(self):
37 | """
38 | Check that monkey-patching a TestCase in the load_tests function
39 | actually changes the referenced class.
40 | """
41 |
42 | with open(
43 | os.path.join(self.basename, "test_load_tests_monkeypatch.py"), "w"
44 | ) as f:
45 | f.write(
46 | textwrap.dedent(
47 | """
48 | import unittest
49 | class A(unittest.TestCase):
50 | passing = False
51 | def test_that_will_fail(self):
52 | self.assertTrue(self.passing)
53 |
54 | def load_tests(loader, tests, pattern):
55 | A.passing = True
56 | return tests
57 | """
58 | )
59 | )
60 |
61 | module_name = self.basename + ".test_load_tests_monkeypatch"
62 | result = Queue()
63 | poolRunner(module_name, result, 0)
64 | result.get()
65 |
66 | proto_test_result = result.get()
67 | self.assertEqual(len(proto_test_result.passing), 1)
68 | self.assertEqual(len(proto_test_result.failures), 0)
69 | self.assertEqual(len(proto_test_result.errors), 0)
70 | self.assertEqual(proto_test_result.passing[0].class_name, "A")
71 |
72 | def test_foreign_suite(self):
73 | """
74 | Load tests does not reuse the tests and instead returns
75 | another TestSuite (or maybe not even a unittest.TestSuite).
76 | """
77 |
78 | with open(
79 | os.path.join(self.basename, "test_load_keys_foreign_suite.py"), "w"
80 | ) as f:
81 | f.write(
82 | textwrap.dedent(
83 | """
84 | import unittest
85 | class A(unittest.TestCase):
86 | def test_that_will_fail(self):
87 | self.fail()
88 |
89 | def load_tests(loader, tests, pattern):
90 | class B(unittest.TestCase):
91 | def test_that_succeeds(self):
92 | pass
93 | suite = unittest.TestSuite()
94 | suite.addTests(loader.loadTestsFromTestCase(B))
95 | return suite
96 | """
97 | )
98 | )
99 |
100 | module_name = self.basename + ".test_load_keys_foreign_suite"
101 | result = Queue()
102 | poolRunner(module_name, result, 0)
103 | result.get()
104 |
105 | proto_test_result = result.get()
106 | self.assertEqual(len(proto_test_result.passing), 1)
107 | self.assertEqual(len(proto_test_result.errors), 0)
108 | self.assertEqual(len(proto_test_result.failures), 0)
109 | self.assertEqual(proto_test_result.passing[0].class_name, "B")
110 |
111 | def test_none_cancels(self):
112 | """
113 | Check that if load_tests returns None, no tests are run.
114 | """
115 | with open(
116 | os.path.join(self.basename, "test_load_keys_none_cancels.py"), "w"
117 | ) as fh:
118 | fh.write(
119 | textwrap.dedent(
120 | """
121 | import unittest
122 | class A(unittest.TestCase):
123 | def test_that_will_fail(self):
124 | self.fail()
125 |
126 | def load_tests(loader, tests, pattern):
127 | return None
128 | """
129 | )
130 | )
131 |
132 | module_name = self.basename + ".test_load_keys_none_cancels"
133 | result = Queue()
134 | poolRunner(module_name, result, 0)
135 | result.get()
136 |
137 | proto_test_result = result.get()
138 | self.assertEqual(len(proto_test_result.errors), 1)
139 | self.assertEqual(len(proto_test_result.passing), 0)
140 | self.assertEqual(len(proto_test_result.failures), 0)
141 |
142 | def test_additive(self):
143 | """
144 | Check that adding tests to the `tests` argument of the load_tests
145 | function add more tests to be run.
146 | """
147 |
148 | with open(os.path.join(self.basename, "test_load_keys_additive.py"), "w") as fh:
149 | fh.write(
150 | textwrap.dedent(
151 | """
152 | import unittest
153 | class A(unittest.TestCase):
154 | def test_that_will_succeed(self):
155 | pass
156 |
157 | class B(unittest.TestCase):
158 | def test_clear_sight(self):
159 | self.fail()
160 |
161 | def _hidden_test(self):
162 | pass
163 |
164 | def load_tests(loader, tests, pattern):
165 | setattr(B, 'test_that_will_succeed', B._hidden_test)
166 | tests.addTests(loader.loadTestsFromTestCase(B))
167 | return tests
168 | """
169 | )
170 | )
171 |
172 | loader = GreenTestLoader()
173 | test_suite = loader.loadTargets(self.basename + "." + "test_load_keys_additive")
174 | self.assertEqual(len(test_suite._tests), 4) # a + b1 + b1 + b2
175 |
--------------------------------------------------------------------------------
/green/test/test_output.py:
--------------------------------------------------------------------------------
1 | from io import StringIO
2 | import platform
3 | import unittest
4 | from unittest.mock import MagicMock, patch
5 |
6 | from green.output import Colors, GreenStream, debug
7 | import green.output
8 |
9 |
10 | class TestDebug(unittest.TestCase):
11 | def testDebug(self):
12 | """
13 | debug() works as we expect
14 | """
15 | orig_logging = green.output.logging.debug
16 | s = StringIO()
17 | green.output.logging.debug = s.write
18 | green.output.debug_level = 0
19 | debug("Nothing should happen", level=1)
20 | self.assertEqual("", s.getvalue())
21 | green.output.debug_level = 2
22 | debug("Something should happen", level=1)
23 | self.assertNotEqual("", s.getvalue())
24 |
25 | green.output.logging.debug = orig_logging
26 |
27 |
28 | class TestColors(unittest.TestCase):
29 | def testTermcolorTrue(self):
30 | """
31 | termcolor=True results in terminal output
32 | """
33 | c = Colors(termcolor=True)
34 | self.assertTrue(c.termcolor)
35 | self.assertTrue(len(c.bold("")) > 0)
36 |
37 | def testTermcolorFalse(self):
38 | """
39 | termcolor=False results in no terminal output
40 | """
41 | c = Colors(termcolor=False)
42 | self.assertFalse(c.termcolor)
43 | self.assertFalse(len(c.bold("")) > 0)
44 |
45 | def testTermcolorAuto(self):
46 | """
47 | termcolor=None causes termcolor autodetected and set to True or False
48 | """
49 | c = Colors()
50 | self.assertTrue(c.termcolor in [True, False])
51 |
52 | def testUp(self):
53 | """
54 | calling up gives us a non-blank string
55 | """
56 | c = Colors()
57 | up = c.up()
58 | self.assertEqual(type(up), str)
59 | self.assertNotEqual(up, "")
60 |
61 | def testTerminalColorsDoNotCrash(self):
62 | """
63 | terminal colors don't crash, and they output something
64 | """
65 | c = Colors(termcolor=True)
66 | for func in [
67 | c.bold,
68 | c.blue,
69 | c.green,
70 | c.red,
71 | c.yellow,
72 | c.passing,
73 | c.failing,
74 | c.error,
75 | c.skipped,
76 | c.unexpectedSuccess,
77 | c.expectedFailure,
78 | c.moduleName,
79 | ]:
80 | self.assertTrue(len(func("")) > 0)
81 | # c.className is a special case
82 | c.className("")
83 |
84 |
85 | class TestGreenStream(unittest.TestCase):
86 | def testFormatText(self):
87 | """
88 | formatText returns the input text by default
89 | """
90 | s = StringIO()
91 | gs = GreenStream(s)
92 | msg = "Unindented line.\n Indented.\n Double-indented.\n\n\n"
93 | self.assertEqual(gs.formatText(msg), str(msg))
94 |
95 | def testBadStringType(self):
96 | """
97 | passing the wrong stream type to GreenStream gets auto-converted
98 | """
99 | s = StringIO()
100 | gs = GreenStream(s)
101 | msg = "some string"
102 | gs.write(bytes(msg, "utf-8"))
103 | self.assertEqual(s.getvalue(), msg)
104 |
105 | def testDisableWindowsTrue(self):
106 | """
107 | disable_windows=True: ANSI color codes are present in the stream
108 | """
109 | c = Colors(termcolor=True)
110 | s = StringIO()
111 | gs = GreenStream(s, disable_windows=True)
112 | msg = c.red("some colored string")
113 | gs.write(msg)
114 | self.assertEqual(len(gs.stream.getvalue()), len(msg))
115 |
116 | @unittest.skipIf(
117 | platform.system() != "Windows",
118 | "Colorama won't strip ANSI unless running on Windows",
119 | )
120 | def testDisableWindowsFalse(self):
121 | """
122 | disable_windows=False: Colorama strips ANSI color codes from the stream
123 | """
124 | c = Colors(termcolor=True)
125 | s = StringIO()
126 | gs = GreenStream(s, override_appveyor=True, disable_windows=False)
127 | colored_msg = c.red("a")
128 | gs.write(colored_msg)
129 | import colorama
130 |
131 | self.assertTrue(issubclass(type(gs.stream), colorama.ansitowin32.StreamWrapper))
132 |
133 | @patch("green.output.unidecode")
134 | def testUnidecodeAppveyor(self, mock_unidecode):
135 | """
136 | When I'm on Appveyor, I run text through Unidecode
137 | """
138 | mock_unidecode.return_value = "something"
139 | s = StringIO()
140 | gs = GreenStream(s, override_appveyor=True)
141 | gs.write("something")
142 | self.assertTrue(mock_unidecode.called)
143 |
144 | @patch("green.output.unidecode")
145 | def testUnidecodeDisabled(self, mock_unidecode):
146 | """
147 | Unidecode can be disabled
148 | """
149 | mock_unidecode.return_value = "something"
150 | s = StringIO()
151 | gs = GreenStream(s, override_appveyor=True, disable_unidecode=True)
152 | gs.write("something")
153 | self.assertFalse(mock_unidecode.called)
154 |
155 | def testWritelines(self):
156 | """
157 | Compatibility function writelines(lines) repeatedly calls write()
158 | """
159 | s = StringIO()
160 | gs = GreenStream(s)
161 | gs.write = MagicMock()
162 | gs.writelines(["one", "two", "three"])
163 | self.assertEqual(len(gs.write.mock_calls), 3)
164 |
165 | def testCoverageDetection(self):
166 | """
167 | write() detects a coverage percentage flying by
168 | """
169 | s = StringIO()
170 | gs = GreenStream(s)
171 | gs.write(
172 | "\n---------------------------------------------------\nTOTAL 896 367 59%\nRan"
173 | )
174 | self.assertEqual(gs.coverage_percent, 59)
175 |
176 | def testEncodingMirrors(self):
177 | """
178 | The encoding of a stream gets mirrored through
179 | """
180 | s = StringIO()
181 | encoding = "aoeu"
182 | try:
183 | encoding = s.encoding
184 | except:
185 | s.encoding = encoding
186 | gs = GreenStream(s)
187 | self.assertEqual(gs.encoding, encoding)
188 |
189 | def testEncodingDefault(self):
190 | """
191 | The encoding defaults to 'UTF-8' if we can't find an encoding.
192 | """
193 | s = MagicMock(spec=1)
194 | gs = GreenStream(s)
195 | self.assertEqual(gs.encoding, "UTF-8")
196 |
--------------------------------------------------------------------------------
/green/test/test_process.py:
--------------------------------------------------------------------------------
1 | from ctypes import c_double
2 | import os
3 | import multiprocessing
4 | from queue import Queue, Empty
5 | import shutil
6 | import tempfile
7 | from textwrap import dedent
8 | import unittest
9 | from unittest.mock import MagicMock
10 |
11 | from green.process import ProcessLogger, poolRunner
12 | from green import process
13 |
14 |
15 | class TestProcessLogger(unittest.TestCase):
16 | def test_callThrough(self):
17 | """
18 | Calls are passed through to the wrapped callable
19 | """
20 | message = "some message"
21 |
22 | def func():
23 | return message
24 |
25 | l = ProcessLogger(func)
26 | self.assertEqual(l(), message)
27 |
28 | def test_exception(self):
29 | """
30 | A raised exception gets re-raised
31 | """
32 | saved_get_logger = process.multiprocessing.get_logger
33 | mock_logger = MagicMock()
34 |
35 | def addHandler(ignore):
36 | mock_logger.handlers = [MagicMock()]
37 |
38 | mock_logger.addHandler = addHandler
39 | mock_logger.handlers = False
40 | mock_get_logger = MagicMock()
41 | mock_get_logger.return_value = mock_logger
42 | process.multiprocessing.get_logger = mock_get_logger
43 | self.addCleanup(
44 | setattr, process.multiprocessing, "get_logger", saved_get_logger
45 | )
46 |
47 | def func():
48 | raise AttributeError
49 |
50 | l = ProcessLogger(func)
51 | self.assertRaises(AttributeError, l)
52 | mock_get_logger.assert_any_call()
53 |
54 |
55 | class TestPoolRunner(unittest.TestCase):
56 | # Setup
57 | @classmethod
58 | def setUpClass(cls):
59 | cls.startdir = os.getcwd()
60 | cls.container_dir = tempfile.mkdtemp()
61 |
62 | @classmethod
63 | def tearDownClass(cls):
64 | if os.getcwd() != cls.startdir:
65 | os.chdir(cls.startdir)
66 | cls.startdir = None
67 | shutil.rmtree(cls.container_dir)
68 |
69 | def setUp(self):
70 | os.chdir(self.container_dir)
71 | self.tmpdir = tempfile.mkdtemp(dir=self.container_dir)
72 |
73 | def tearDown(self):
74 | os.chdir(self.container_dir)
75 | shutil.rmtree(self.tmpdir)
76 |
77 | # Tests
78 | def test_normalRun(self):
79 | """
80 | Runs normally
81 | """
82 | saved_coverage = process.coverage
83 | process.coverage = MagicMock()
84 | self.addCleanup(setattr, process, "coverage", saved_coverage)
85 | # Parent directory setup
86 | os.chdir(self.tmpdir)
87 | sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
88 | basename = os.path.basename(sub_tmpdir)
89 | # Child setup
90 | fh = open(os.path.join(basename, "__init__.py"), "w")
91 | fh.write("\n")
92 | fh.close()
93 | fh = open(os.path.join(basename, "test_pool_runner_dotted.py"), "w")
94 | fh.write(
95 | dedent(
96 | """
97 | import unittest
98 | class A(unittest.TestCase):
99 | def testPass(self):
100 | pass
101 | """
102 | )
103 | )
104 | fh.close()
105 | module_name = basename + ".test_pool_runner_dotted.A.testPass"
106 | results = Queue()
107 | poolRunner(module_name, results, 1)
108 | results.get()
109 | result = results.get()
110 | self.assertEqual(len(result.passing), 1)
111 | self.assertGreater(float(result.test_time), 0)
112 |
113 | def test_SyntaxErrorInUnitTest(self):
114 | """
115 | SyntaxError gets reported as an error loading the unit test
116 | """
117 | saved_coverage = process.coverage
118 | process.coverage = MagicMock()
119 | self.addCleanup(setattr, process, "coverage", saved_coverage)
120 | # Parent directory setup
121 | os.chdir(self.tmpdir)
122 | sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
123 | basename = os.path.basename(sub_tmpdir)
124 | # Child setup
125 | fh = open(os.path.join(basename, "__init__.py"), "w")
126 | fh.write("\n")
127 | fh.close()
128 | fh = open(os.path.join(basename, "test_pool_syntax_error.py"), "w")
129 | fh.write("aoeu")
130 | fh.close()
131 | result = Queue()
132 | poolRunner(basename, result, 1)
133 | result.get()
134 | self.assertEqual(len(result.get().errors), 1)
135 |
136 | def test_error(self):
137 | """
138 | Exception raised running unit test is reported as an error
139 | """
140 | # Parent directory setup
141 | os.chdir(self.tmpdir)
142 | sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
143 | basename = os.path.basename(sub_tmpdir)
144 | # Child setup
145 | fh = open(os.path.join(basename, "__init__.py"), "w")
146 | fh.write("\n")
147 | fh.close()
148 | fh = open(os.path.join(basename, "test_pool_runner_dotted_fail.py"), "w")
149 | fh.write(
150 | dedent(
151 | """
152 | import unittest
153 | class A(unittest.TestCase):
154 | def testError(self):
155 | raise AttributeError
156 | """
157 | )
158 | )
159 | fh.close()
160 | module_name = basename + ".test_pool_runner_dotted_fail.A.testError"
161 | result = Queue()
162 | poolRunner(module_name, result)
163 | result.get()
164 | self.assertEqual(len(result.get().errors), 1)
165 |
166 | def test_bad_attr(self):
167 | """
168 | Accessing a bad attribute is only reported once (see #150)
169 | """
170 | # Parent directory setup
171 | os.chdir(self.tmpdir)
172 | sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
173 | basename = os.path.basename(sub_tmpdir)
174 | # Child setup
175 | fh = open(os.path.join(basename, "__init__.py"), "w")
176 | fh.write("\n")
177 | fh.close()
178 | fh = open(os.path.join(basename, "test_pool_runner_bad_attr.py"), "w")
179 | fh.write(
180 | dedent(
181 | """
182 | import unittest
183 | class A(unittest.TestCase):
184 | def testBadAttr(self):
185 | "".garbage
186 | """
187 | )
188 | )
189 | fh.close()
190 | module_name = basename + ".test_pool_runner_bad_attr.A.testBadAttr"
191 | result = Queue()
192 | poolRunner(module_name, result)
193 | result.get_nowait() # should get the target name
194 | result.get_nowait() # should get the result
195 | result.get_nowait() # should get None
196 | # should raise Empty unless the extra result bug is present
197 | self.assertRaises(Empty, result.get_nowait)
198 |
199 | def test_process(self):
200 | """
201 | Avoid FileNotFoundError when using a multiprocessing.Value, fixes #154.
202 | This test never fails, we have to watch the outer stderr to see if we get output like this:
203 |
204 | File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/multiprocessing/util.py", line 254, in _run_finalizers
205 | finalizer()
206 | File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/multiprocessing/util.py", line 186, in __call__
207 | res = self._callback(*self._args, **self._kwargs)
208 | File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/shutil.py", line 465, in rmtree
209 | onerror(os.lstat, path, sys.exc_info())
210 | File "/usr/local/Cellar/python3/3.5.2_3/Frameworks/Python.framework/Versions/3.5/lib/python3.5/shutil.py", line 463, in rmtree
211 | orig_st = os.lstat(path)
212 | FileNotFoundError: [Errno 2] No such file or directory: '/var/folders/8y/cgqfhxyn2fz3r8n627_6dm_m0000gn/T/tmpp3fobx6i/pymp-8hpbali9'
213 |
214 | Newer versions of Python want to do their own cleanup, so let them.
215 | """
216 | val = multiprocessing.Value(c_double, 0)
217 | # The error happens when something tries to clean up a sub-temporary
218 | # directory that they assume will always be there to be cleaned up.
219 |
--------------------------------------------------------------------------------
/green/test/test_runner.py:
--------------------------------------------------------------------------------
1 | import copy
2 | from io import StringIO
3 | import os
4 | import pathlib
5 | import platform
6 | import shutil
7 | import signal
8 | import sys
9 | import tempfile
10 | from textwrap import dedent
11 | import unittest
12 | from unittest import mock
13 | import warnings
14 | import weakref
15 |
16 | from green.config import get_default_args
17 | from green.exceptions import InitializerOrFinalizerError
18 | from green.loader import GreenTestLoader
19 | from green.output import GreenStream
20 | from green.runner import InitializerOrFinalizer, run
21 | from green.suite import GreenTestSuite
22 |
23 |
24 | skip_testtools = False
25 | try:
26 | import testtools
27 |
28 | testtools
29 | except:
30 | skip_testtools = True
31 |
32 |
33 | # --- Helper stuff ---
34 |
35 | importable_function_worked = False
36 |
37 |
38 | def _importableFunction():
39 | """
40 | Used by TestInitializerOrFinalizer.test_importable()
41 | """
42 | global importable_function_worked
43 | importable_function_worked = True
44 |
45 |
46 | non_callable = None # Used by TestInitializerOrFinalizer.test_not_callable()
47 |
48 |
49 | def _crashy():
50 | """
51 | Used by TestInitializerOrFinalizer.test_crash()
52 | """
53 | raise AssertionError("Oops! I crashed.")
54 |
55 |
56 | # --- End of helper stuff
57 |
58 |
59 | class TestInitializerOrFinalizer(unittest.TestCase):
60 | def test_blank(self):
61 | """
62 | Given a blank dotted function, calling the initializer/finalizer does
63 | nothing.
64 | """
65 | initializer = InitializerOrFinalizer("")
66 | initializer()
67 |
68 | def test_unimportable(self):
69 | """
70 | Given an unimportable module, an InitializerOrFinalizerError is raised.
71 | """
72 | initializer = InitializerOrFinalizer("garbagejunk.nonexistant")
73 | self.assertRaises(InitializerOrFinalizerError, initializer)
74 |
75 | def test_importable(self):
76 | """
77 | Given an actual importable module and function, the function is run.
78 | """
79 | global importable_function_worked
80 | importable_function_worked = False
81 | InitializerOrFinalizer("green.test.test_runner._importableFunction")()
82 | self.assertTrue(importable_function_worked)
83 |
84 | def test_not_callable(self):
85 | """
86 | An importable, but not-callable-object also raises an
87 | InitializerOrFinalizerError.
88 | """
89 | initializer = InitializerOrFinalizer("green.test.test_runner.non_callable")
90 | self.assertRaises(InitializerOrFinalizerError, initializer)
91 |
92 | def test_crash(self):
93 | """
94 | An importable, callable object...crashes.
95 | """
96 | initializer = InitializerOrFinalizer("green.test.test_runner._crashy")
97 | self.assertRaises(InitializerOrFinalizerError, initializer)
98 |
99 |
100 | class TestRun(unittest.TestCase):
101 | @classmethod
102 | def setUpClass(cls):
103 | cls.startdir = os.getcwd()
104 |
105 | @classmethod
106 | def tearDownClass(cls):
107 | if os.getcwd() != cls.startdir:
108 | os.chdir(cls.startdir)
109 | cls.startdir = None
110 |
111 | def setUp(self):
112 | self.args = copy.deepcopy(get_default_args())
113 | self.stream = StringIO()
114 | self.tmpdir = tempfile.mkdtemp()
115 | self.loader = GreenTestLoader()
116 |
117 | def tearDown(self):
118 | shutil.rmtree(self.tmpdir, ignore_errors=True)
119 | del self.stream
120 |
121 | def test_stdout(self):
122 | """
123 | run() can use sys.stdout as the stream.
124 | """
125 | saved_stdout = sys.stdout
126 | sys.stdout = self.stream
127 | self.addCleanup(setattr, sys, "stdout", saved_stdout)
128 | run(GreenTestSuite(), sys.stdout, args=self.args)
129 | self.assertIn("No Tests Found", self.stream.getvalue())
130 |
131 | def test_green_stream(self):
132 | """
133 | run() can use a GreenStream for output.
134 | """
135 | gs = GreenStream(self.stream)
136 | run(GreenTestSuite(), gs, args=self.args)
137 | self.assertIn("No Tests Found", self.stream.getvalue())
138 |
139 | def test_verbose3(self):
140 | """
141 | verbose=3 causes version output, and an empty test case passes.
142 | """
143 | self.args.verbose = 3
144 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
145 | content = dedent(
146 | """
147 | import unittest
148 | class Verbose3(unittest.TestCase):
149 | def test01(self):
150 | pass
151 | """
152 | )
153 | (sub_tmpdir / "test_verbose3.py").write_text(content, encoding="utf-8")
154 | os.chdir(sub_tmpdir)
155 | try:
156 | tests = self.loader.loadTargets("test_verbose3")
157 | result = run(tests, self.stream, self.args)
158 | finally:
159 | os.chdir(self.startdir)
160 | self.assertEqual(result.testsRun, 1)
161 | self.assertIn("Green", self.stream.getvalue())
162 | self.assertIn("OK", self.stream.getvalue())
163 |
164 | def test_warnings(self):
165 | """
166 | test runner does not generate warnings
167 | """
168 | self.args.warnings = "always"
169 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
170 | content = dedent(
171 | """
172 | import unittest
173 | class Warnings(unittest.TestCase):
174 | def test01(self):
175 | pass
176 | """
177 | )
178 | (sub_tmpdir / "test_warnings.py").write_text(content, encoding="utf-8")
179 | os.chdir(sub_tmpdir)
180 | try:
181 | with warnings.catch_warnings(record=True) as recorded:
182 | tests = self.loader.loadTargets("test_warnings")
183 | result = run(tests, self.stream, self.args)
184 | finally:
185 | os.chdir(self.startdir)
186 | self.assertEqual(recorded, [])
187 | self.assertEqual(result.testsRun, 1)
188 | self.assertIn("OK", self.stream.getvalue())
189 |
190 | def test_no_tests_found(self):
191 | """
192 | When we don't find any tests, we say so.
193 | """
194 | result = run(GreenTestSuite(), self.stream, self.args)
195 | self.assertIn("No Tests Found", self.stream.getvalue())
196 | self.assertEqual(result.testsRun, 0)
197 | self.assertEqual(result.wasSuccessful(), False)
198 |
199 | def test_failed_says_so(self):
200 | """
201 | A failing test case causes the whole run to report 'FAILED'
202 | """
203 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
204 | content = dedent(
205 | """
206 | import unittest
207 | class Failed(unittest.TestCase):
208 | def test01(self):
209 | self.assertTrue(False)
210 | """
211 | )
212 | (sub_tmpdir / "test_failed.py").write_text(content, encoding="utf-8")
213 | os.chdir(sub_tmpdir)
214 | try:
215 | tests = self.loader.loadTargets("test_failed")
216 | result = run(tests, self.stream, self.args)
217 | finally:
218 | os.chdir(self.startdir)
219 | self.assertEqual(result.testsRun, 1)
220 | self.assertIn("FAILED", self.stream.getvalue())
221 |
222 | def test_failfast(self):
223 | """
224 | failfast causes the testing to stop after the first failure.
225 | """
226 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
227 | content = dedent(
228 | """
229 | import unittest
230 | class SIGINTCase(unittest.TestCase):
231 | def test00(self):
232 | raise Exception
233 | def test01(self):
234 | pass
235 | """
236 | )
237 | (sub_tmpdir / "test_failfast.py").write_text(content, encoding="utf-8")
238 | os.chdir(sub_tmpdir)
239 | try:
240 | tests = self.loader.loadTargets("test_failfast")
241 | self.args.failfast = True
242 | result = run(tests, self.stream, self.args)
243 | finally:
244 | os.chdir(self.startdir)
245 | self.assertEqual(result.testsRun, 1)
246 |
247 | def test_system_exit(self):
248 | """
249 | Raising a SystemExit gets caught and reported.
250 | """
251 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
252 | content = dedent(
253 | """
254 | import unittest
255 | class SystemExitCase(unittest.TestCase):
256 | def test00(self):
257 | raise SystemExit(1)
258 | def test01(self):
259 | pass
260 | """
261 | )
262 | (sub_tmpdir / "test_systemexit.py").write_text(content, encoding="utf-8")
263 | os.chdir(sub_tmpdir)
264 | try:
265 | tests = self.loader.loadTargets("test_systemexit")
266 | result = run(tests, self.stream, self.args)
267 | finally:
268 | os.chdir(self.startdir)
269 | self.assertEqual(result.testsRun, 2)
270 |
271 |
272 | class TestProcesses(unittest.TestCase):
273 | # Setup
274 | @classmethod
275 | def setUpClass(cls):
276 | cls.startdir = os.getcwd()
277 | cls.container_dir = tempfile.mkdtemp()
278 |
279 | @classmethod
280 | def tearDownClass(cls):
281 | if os.getcwd() != cls.startdir:
282 | os.chdir(cls.startdir)
283 | cls.startdir = None
284 | shutil.rmtree(cls.container_dir)
285 |
286 | def setUp(self):
287 | os.chdir(self.container_dir)
288 | self.tmpdir = tempfile.mkdtemp(dir=self.container_dir)
289 | self.stream = StringIO()
290 | self.args = copy.deepcopy(get_default_args())
291 | self.loader = GreenTestLoader()
292 |
293 | def tearDown(self):
294 | os.chdir(self.startdir)
295 | # On windows, the processes block access to the files while
296 | # they take a bit to clean themselves up.
297 | shutil.rmtree(self.tmpdir, ignore_errors=True)
298 | del self.stream
299 |
300 | @unittest.skipIf(platform.system() == "Windows", "Windows doesn't have SIGINT.")
301 | def test_catch_process_sigint(self):
302 | """
303 | run() can catch SIGINT while running a process.
304 | """
305 | # Mock the list of TestResult instances that should be stopped,
306 | # otherwise the actual TestResult that is running this test will be
307 | # told to stop when we send SIGINT
308 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
309 | saved__results = unittest.signals._results
310 | unittest.signals._results = weakref.WeakKeyDictionary()
311 | self.addCleanup(setattr, unittest.signals, "_results", saved__results)
312 | content = dedent(
313 | """
314 | import os
315 | import signal
316 | import unittest
317 | class TestSigint(unittest.TestCase):
318 | def test_00(self):
319 | os.kill({}, signal.SIGINT)
320 | """
321 | ).format(os.getpid())
322 | (sub_tmpdir / "test_sigint.py").write_text(content, encoding="utf-8")
323 | os.chdir(sub_tmpdir)
324 | try:
325 | tests = self.loader.loadTargets("test_sigint")
326 | self.args.processes = 2
327 | run(tests, self.stream, self.args)
328 | finally:
329 | os.chdir(TestProcesses.startdir)
330 |
331 | def test_collision_protection(self):
332 | """
333 | If tempfile.gettempdir() is used for dir, using same testfile name will
334 | not collide.
335 | """
336 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
337 | # Child setup
338 | # pkg/__init__.py
339 | (sub_tmpdir / "__init__.py").write_text("\n", encoding="utf-8")
340 | # pkg/some_module.py
341 | (sub_tmpdir / "some_module.py").write_text("a = 1\n", encoding="utf-8")
342 | # pkg/test/__init__.py
343 | os.mkdir(sub_tmpdir / "test")
344 | (sub_tmpdir / "test/__init__.py").write_text("\n", encoding="utf-8")
345 | # pkg/test/test_some_module.py
346 | content = dedent(
347 | """
348 | import os
349 | import tempfile
350 | import time
351 | import unittest
352 | import {}.some_module
353 | class A(unittest.TestCase):
354 | def setUp(self):
355 | self.tmpdir = tempfile.gettempdir()
356 | self.filename = os.path.join(tempfile.gettempdir(), 'file.txt')
357 | def testOne(self):
358 | for msg in [str(x) for x in range(50)]:
359 | fh = open(self.filename, 'w')
360 | fh.write(msg)
361 | fh.close()
362 | time.sleep(.01)
363 | fh = open(self.filename)
364 | actual = fh.read()
365 | fh.close()
366 | self.assertEqual(msg, actual)
367 | def testTwo(self):
368 | for msg in [str(x) for x in range(50,100)]:
369 | fh = open(self.filename, 'w')
370 | fh.write(msg)
371 | fh.close()
372 | time.sleep(.01)
373 | fh = open(self.filename)
374 | actual = fh.read()
375 | fh.close()
376 | self.assertEqual(msg, actual)
377 | """
378 | ).format(os.path.basename(sub_tmpdir))
379 | (sub_tmpdir / "test/test_some_module.py").write_text(content, encoding="utf-8")
380 | # Load the tests
381 | os.chdir(self.tmpdir)
382 | try:
383 | tests = self.loader.loadTargets(".")
384 | self.args.processes = 2
385 | self.args.termcolor = False
386 | try:
387 | run(tests, self.stream, self.args)
388 | except KeyboardInterrupt:
389 | os.kill(os.getpid(), signal.SIGINT)
390 | finally:
391 | os.chdir(TestProcesses.startdir)
392 | self.assertIn("OK", self.stream.getvalue())
393 |
394 | def test_detect_num_processes(self):
395 | """
396 | args.processes = 0 causes auto-detection of number of processes.
397 | """
398 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
399 | (sub_tmpdir / "__init__.py").write_text("\n", encoding="utf-8")
400 | content = dedent(
401 | """
402 | import unittest
403 | class A(unittest.TestCase):
404 | def testPasses(self):
405 | pass"""
406 | )
407 | (sub_tmpdir / "test_detectNumProcesses.py").write_text(
408 | content, encoding="utf-8"
409 | )
410 | # Load the tests
411 | os.chdir(self.tmpdir)
412 | try:
413 | tests = self.loader.loadTargets(".")
414 | self.args.processes = 0
415 | run(tests, self.stream, self.args)
416 | finally:
417 | os.chdir(TestProcesses.startdir)
418 | self.assertIn("OK", self.stream.getvalue())
419 |
420 | def test_run_coverage(self):
421 | """
422 | Running coverage in process mode doesn't crash.
423 | """
424 | try:
425 | import coverage
426 | except ImportError:
427 | self.skipTest("Coverage needs to be installed for this test")
428 | self.assertTrue(coverage.version_info)
429 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
430 | (sub_tmpdir / "__init__.py").write_text("\n", encoding="utf-8")
431 | content = dedent(
432 | """
433 | import unittest
434 | class A(unittest.TestCase):
435 | def testPasses(self):
436 | pass"""
437 | )
438 | (sub_tmpdir / "test_coverage.py").write_text(content, encoding="utf-8")
439 | # Load the tests
440 | os.chdir(self.tmpdir)
441 | try:
442 | tests = self.loader.loadTargets(".")
443 | self.args.processes = 2
444 | self.args.run_coverage = True
445 | self.args.cov = mock.MagicMock()
446 | run(tests, self.stream, self.args, testing=True)
447 | finally:
448 | os.chdir(TestProcesses.startdir)
449 | self.assertIn("OK", self.stream.getvalue())
450 |
451 | def test_bad_test(self):
452 | """
453 | Bad syntax in a testfile is caught as a test error.
454 | """
455 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
456 | (sub_tmpdir / "__init__.py").write_text("\n", encoding="utf-8")
457 | (sub_tmpdir / "test_bad_syntax.py").write_text("aoeu", encoding="utf-8")
458 | # Load the tests
459 | os.chdir(self.tmpdir)
460 | try:
461 | tests = self.loader.loadTargets(".")
462 | self.args.processes = 2
463 | finally:
464 | os.chdir(TestProcesses.startdir)
465 | self.assertRaises(ImportError, run, tests, self.stream, self.args)
466 |
467 | def test_uncaught_exception(self):
468 | """
469 | Exceptions that escape the test framework get caught by poolRunner and
470 | reported as a failure. For example, the testtools implementation of
471 | TestCase unwisely (but deliberately) lets SystemExit exceptions
472 | through.
473 | """
474 | if skip_testtools:
475 | self.skipTest("testtools must be installed to run this test.")
476 |
477 | sub_tmpdir = pathlib.Path(tempfile.mkdtemp(dir=self.tmpdir))
478 | # pkg/__init__.py
479 | (sub_tmpdir / "__init__.py").write_text("\n", encoding="utf-8")
480 | content = dedent(
481 | """
482 | import testtools
483 | class Uncaught(testtools.TestCase):
484 | def test_uncaught(self):
485 | raise SystemExit(0)
486 | """
487 | )
488 | (sub_tmpdir / "test_uncaught.py").write_text(content, encoding="utf-8")
489 | # Load the tests
490 | os.chdir(self.tmpdir)
491 | try:
492 | tests = self.loader.loadTargets(".")
493 | self.args.processes = 2
494 | run(tests, self.stream, self.args)
495 | finally:
496 | os.chdir(TestProcesses.startdir)
497 | self.assertIn("FAILED", self.stream.getvalue())
498 |
499 | def test_empty(self):
500 | """
501 | run() does not crash with empty suite and processes
502 | """
503 | suite = GreenTestSuite()
504 | self.args.processes = 2
505 | self.args.termcolor = False
506 | run(suite, self.stream, self.args)
507 |
--------------------------------------------------------------------------------
/green/test/test_suite.py:
--------------------------------------------------------------------------------
1 | import copy
2 | from io import StringIO
3 | import os
4 | import tempfile
5 | from textwrap import dedent
6 | import unittest
7 | from unittest.mock import MagicMock
8 |
9 | from green.config import get_default_args
10 | from green.loader import GreenTestLoader
11 | from green.runner import run
12 | from green.suite import GreenTestSuite
13 |
14 |
15 | class TestGreenTestSuite(unittest.TestCase):
16 | def test_empty(self):
17 | """
18 | An empty suite can be instantiated.
19 | """
20 | GreenTestSuite()
21 |
22 | def test_defaultArgs(self):
23 | """
24 | Passing in default arguments causes attributes to be set.
25 | """
26 | default_args = get_default_args()
27 | gts = GreenTestSuite(args=default_args)
28 | self.assertEqual(gts.allow_stdout, default_args.allow_stdout)
29 |
30 | def test_shouldStop(self):
31 | """
32 | When result.shouldStop == True, the suite should exit early.
33 | """
34 | mock_test = MagicMock()
35 | gts = GreenTestSuite(args=get_default_args())
36 | gts._tests = (mock_test,)
37 | mock_result = MagicMock()
38 | mock_result.shouldStop = True
39 | gts.run(mock_result)
40 |
41 | def test_failedModuleSetup(self):
42 | """
43 | When module setup fails, we skip to the next test.
44 | """
45 | mock_test = MagicMock()
46 | mock_test.__iter__.side_effect = TypeError
47 | gts = GreenTestSuite(args=get_default_args())
48 | gts._tests = (mock_test,)
49 | mock_result = MagicMock()
50 | mock_result._moduleSetUpFailed = True
51 | mock_result.shouldStop = False
52 | gts.run(mock_result)
53 |
54 | def test_failedModuleTeardown(self):
55 | """
56 | When module teardown fails, we report an error.
57 | """
58 | mock_module = MagicMock()
59 | mock_test = MagicMock()
60 | mock_err = MagicMock()
61 | args = copy.deepcopy(get_default_args())
62 | gts = GreenTestSuite(args=args)
63 | gts._get_previous_module = mock_module
64 | mock_result = MagicMock()
65 | mock_result.errors.__len__.side_effect = [0, 1, 1]
66 | mock_result.errors.__getitem__.side_effect = [[], [(mock_test, mock_err)]]
67 | mock_result._previousTestClass.__name__ = "mockClass"
68 | gts.run(mock_result)
69 | self.assertTrue(mock_test.is_class_or_module_teardown_error)
70 |
71 | def test_addTest_testPattern(self):
72 | """
73 | Setting test_pattern will cause a test to be filtered.
74 | """
75 | mock_test = MagicMock()
76 | mock_test._testMethodName = "test_hello"
77 | mock_test2 = MagicMock()
78 | mock_test2._testMethodName = "test_goodbye"
79 | args = copy.deepcopy(get_default_args())
80 | args.test_pattern = "_good*"
81 | gts = GreenTestSuite(args=args)
82 | gts.addTest(mock_test)
83 | self.assertEqual(gts._tests, [])
84 | gts.addTest(mock_test2)
85 | self.assertEqual(gts._tests, [mock_test2])
86 |
87 | def test_allow_stdout(self):
88 | """
89 | The allow_stdout setting should not get ignored.
90 | """
91 |
92 | class Object:
93 | pass
94 |
95 | args = Object()
96 | args.allow_stdout = True
97 | gts = GreenTestSuite(args=args)
98 | self.assertEqual(gts.allow_stdout, True)
99 |
100 | def test_skip_in_setUpClass(self):
101 | """
102 | If SkipTest is raised in setUpClass, then the test gets skipped
103 | """
104 | gts = GreenTestSuite(args=get_default_args())
105 | mock_test = MagicMock()
106 | mock_result = MagicMock()
107 | mock_class = MagicMock()
108 | mock_class.__qualname__ = "qualname"
109 | mock_result._previousTestClass = None
110 | mock_result._moduleSetUpFailed = None
111 | mock_result.__unittest_skip__ = None
112 | mock_test.__class__ = mock_class
113 | mock_class.setUpClass.side_effect = unittest.SkipTest("kaboom")
114 |
115 | gts._handleClassSetUp(mock_test, mock_result)
116 |
117 | self.assertTrue(mock_class.__unittest_skip__)
118 | self.assertEqual(mock_class.__unittest_skip_why__, "kaboom")
119 |
120 |
121 | class TestFunctional(unittest.TestCase):
122 | @classmethod
123 | def setUpClass(cls):
124 | cls.startdir = os.getcwd()
125 |
126 | @classmethod
127 | def tearDownClass(cls):
128 | if os.getcwd() != cls.startdir:
129 | os.chdir(cls.startdir)
130 | cls.startdir = None
131 |
132 | def setUp(self):
133 | self.args = copy.deepcopy(get_default_args())
134 | self.stream = StringIO()
135 | self.tmpdir = tempfile.mkdtemp()
136 | self.loader = GreenTestLoader()
137 |
138 | def tearDown(self):
139 | del self.tmpdir
140 | del self.stream
141 |
142 | def test_skip_in_setUpClass(self):
143 | """
144 | If SkipTest is raised in setUpClass, then the test gets skipped
145 | """
146 | sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
147 | fh = open(os.path.join(sub_tmpdir, "test_skipped.py"), "w")
148 | fh.write(
149 | dedent(
150 | """
151 | import unittest
152 | class Skipper(unittest.TestCase):
153 | @classmethod
154 | def setUpClass(cls):
155 | raise unittest.SkipTest("the skip reason")
156 | def test_one(self):
157 | pass
158 | def test_two(self):
159 | pass
160 | """
161 | )
162 | )
163 | fh.close()
164 | os.chdir(sub_tmpdir)
165 |
166 | tests = self.loader.loadTargets("test_skipped")
167 | result = run(tests, self.stream, self.args)
168 | os.chdir(self.startdir)
169 | self.assertEqual(len(result.skipped), 2)
170 | self.assertEqual(self.stream.getvalue().count("the skip reason"), 2)
171 |
172 |
173 | class TestModuleTeardown(unittest.TestCase):
174 | @classmethod
175 | def setUpClass(cls):
176 | cls.startdir = os.getcwd()
177 |
178 | @classmethod
179 | def tearDownClass(cls):
180 | if os.getcwd() != cls.startdir:
181 | os.chdir(cls.startdir)
182 | cls.startdir = None
183 |
184 | def setUp(self):
185 | self.args = copy.deepcopy(get_default_args())
186 | self.stream = StringIO()
187 | self.tmpdir = tempfile.mkdtemp()
188 | self.loader = GreenTestLoader()
189 |
190 | def tearDown(self):
191 | del self.tmpdir
192 | del self.stream
193 |
194 | def test_failedModuleTeardown(self):
195 | """A failing tearDownModule gets counted as an errored test"""
196 | sub_tmpdir = tempfile.mkdtemp(dir=self.tmpdir)
197 | fh = open(os.path.join(sub_tmpdir, "test_moduleteardownfailed.py"), "w")
198 | fh.write(
199 | dedent(
200 | """
201 | import unittest
202 | def tearDownModule():
203 | syntaxerror
204 | class TestRedHerring(unittest.TestCase):
205 | def test(self):
206 | pass
207 | """
208 | )
209 | )
210 | fh.close()
211 | os.chdir(sub_tmpdir)
212 |
213 | tests = self.loader.loadTargets("test_moduleteardownfailed")
214 | result = run(tests, self.stream, self.args)
215 | os.chdir(self.startdir)
216 | self.assertEqual(len(result.passing), 1)
217 | self.assertEqual(len(result.errors), 1)
218 |
--------------------------------------------------------------------------------
/green/test/test_version.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from green.version import __version__, pretty_version
4 |
5 |
6 | class TestVersion(unittest.TestCase):
7 | def test_versionType(self):
8 | """
9 | __version__ is a unicode string
10 | """
11 | self.assertEqual(type(__version__), str)
12 |
13 | def test_versionSet(self):
14 | """
15 | __version__ is not blank
16 | """
17 | self.assertTrue(len(__version__) > 0)
18 |
19 | def test_pretty_version(self):
20 | """
21 | pretty_version() has the content we expect
22 | """
23 | pv = pretty_version()
24 | self.assertTrue("Green" in pv)
25 | self.assertTrue("Python" in pv)
26 | self.assertTrue("Coverage" in pv)
27 |
--------------------------------------------------------------------------------
/green/test/test_windows.py:
--------------------------------------------------------------------------------
1 | import platform
2 | import sys
3 | import unittest
4 |
5 |
6 | from green.output import GreenStream
7 |
8 |
9 | class TestWindows(unittest.TestCase):
10 | def setUp(self):
11 | if platform.system() != "Windows":
12 | self.skipTest("This test is for windows-specific behavior.")
13 |
14 | def test_colorOutput(self):
15 | """
16 | Color output functions on windows
17 | """
18 | import colorama
19 |
20 | gs = GreenStream(sys.stdout, override_appveyor=True)
21 | self.assertTrue(issubclass(type(gs.stream), colorama.ansitowin32.StreamWrapper))
22 |
--------------------------------------------------------------------------------
/green/version.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations # pragma: no cover
2 |
3 | import pathlib # pragma: no cover
4 | import sys # pragma: no cover
5 |
6 | import coverage # pragma: no cover
7 |
8 | __version__: str = (
9 | (pathlib.Path(__file__).parent / "VERSION").read_text(encoding="utf-8").strip()
10 | ) # pragma: no cover
11 |
12 |
13 | def pretty_version() -> str: # pragma: no cover
14 | python_version = ".".join(str(x) for x in sys.version_info[0:3])
15 | return (
16 | f"Green {__version__}, Coverage {coverage.__version__}, Python {python_version}"
17 | )
18 |
--------------------------------------------------------------------------------
/img/GreenCourseImagePromoStripe.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/img/GreenCourseImagePromoStripe.png
--------------------------------------------------------------------------------
/img/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CleanCut/green/e89b49146c8767ff4a0cedf05f6e8dffcba8e0cd/img/screenshot.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | #
2 | # pyproject.toml file for cleancut/green
3 | #
4 | # For now mostly empty as the project metadata remains in setup.cfg to provide
5 | # backward compatibility with setup.py.
6 |
7 |
8 | [build-system]
9 | requires = ["setuptools"]
10 | build-backend = "setuptools.build_meta"
11 |
12 |
13 | [tool.mypy]
14 | # Targetting versions of python officially supported by python.org since third
15 | # party libraries will have support for newer python syntax, throwing errors.
16 | python_version = "3.8"
17 | sqlite_cache = true
18 | namespace_packages = true
19 | ignore_missing_imports = true
20 | follow_imports = "silent"
21 | show_column_numbers = true
22 | # Ignore untyped defs for now.
23 | # check_untyped_defs = true
24 | show_error_codes = true
25 | # This might require to set `--no-pretty` in your IDE plugin.
26 | pretty = true
27 |
28 |
29 | [tool.coverage.run]
30 | source = ["green"]
31 |
32 | [tool.coverage.report]
33 | exclude_lines = [
34 | "pragma: no cover",
35 | "if __name__ == .__main__.:",
36 | "if TYPE_CHECKING:",
37 | ]
38 |
--------------------------------------------------------------------------------
/release.md:
--------------------------------------------------------------------------------
1 | Steps to Release
2 | ================
3 |
4 | 1. Bump the version in `green/VERSION`, per [PEP 440](https://peps.python.org/pep-0440/).
5 |
6 | 2. Push and merge to the main branch.
7 |
8 | 3. Trigger the Release Test workflow in GitHub Actions then approve the run on the release-test environment. Optional but recommended.
9 |
10 | 4. Create a new release in GitHub with a tag that mirrors the version, the GH action will take care of the rest after beeing approved to run.
11 |
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | black
2 | # coverage[toml] needs to be listed explictly for python < 3.11.
3 | coverage[toml]; python_full_version<="3.11.0a6"
4 | django
5 | mypy
6 | testtools
7 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | colorama
2 | coverage
3 | lxml
4 | setuptools
5 | unidecode
6 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | #
2 | # setup.cfg file for cleancut/green
3 | #
4 | # https://setuptools.pypa.io/en/latest/userguide/declarative_config.html
5 | #
6 |
7 | [metadata]
8 | name = green
9 | summary = Green is a clean, colorful, fast python test runner.
10 | author = Nathan Stocks
11 | maintainer = Nathan Stocks
12 | author_email = nathan.stocks@gmail.com
13 | license = 'MIT'
14 | version = file: green/VERSION
15 |
16 |
17 | long_description = file:README-pypi.rst
18 | long_description_content_type = text/x-rst; charset=UTF-8
19 | description_file = file:README.md
20 | description_content_type = text/markdown; charset=UTF-8
21 |
22 | url = https://github.com/CleanCut/green
23 |
24 | classifier =
25 | Development Status :: 5 - Production/Stable
26 | Environment :: Console
27 | Intended Audience :: Developers
28 | License :: OSI Approved :: MIT License
29 | Natural Language :: English
30 | Operating System :: MacOS :: MacOS X
31 | Operating System :: Microsoft :: Windows
32 | Operating System :: POSIX
33 | Operating System :: POSIX :: Linux
34 | Operating System :: Unix
35 | Programming Language :: Python
36 | Programming Language :: Python :: 3
37 | Programming Language :: Python :: 3.8
38 | Programming Language :: Python :: 3.9
39 | Programming Language :: Python :: 3.10
40 | Programming Language :: Python :: 3.11
41 | Programming Language :: Python :: 3.12
42 | Programming Language :: Python :: 3 :: Only
43 | Topic :: Software Development :: Libraries
44 | Topic :: Software Development :: Quality Assurance
45 | Topic :: Software Development :: Testing
46 | Topic :: Utilities
47 |
48 | keywords =
49 | nose, nose2, trial, pytest, py.test, tox, green,
50 | tdd, test, tests, functional, system, unit, unittest,
51 | color, tabular, clean, red, rednose, regression, runner,
52 | integration,smoke, white, black, box, incremental, end,
53 | end-to-end, sanity, acceptance, load, stress, performance,
54 | usability, install, uninstall, recovery, security,
55 | comparison, alpha, beta, non-functional, destructive,
56 | accessibility, internationalization, i18n, localization, l10n,
57 | development, a/b, concurrent, conformance, verification,
58 | validation, quality, assurance, ad-hoc, agile, api,
59 | automated, all, pairs, pairwise, boundary, value, branch,
60 | browser, condition, coverage, dynamic, exploratory,
61 | equivalence, partitioning, fuzz, gui, glass, gorilla,
62 | interface, keyword, penetration, retesting, risk, based,
63 | scalability, soak, volume, vulnerability
64 |
65 | test_suite='green.test'
66 |
67 |
68 | [options]
69 | # Known bug in python 3.12.1 breaks green when skipping tests.
70 | #
71 | python_requires = >=3.8, !=3.12.1
72 | install_requires = file:requirements.txt
73 | include_package_data = True
74 | packages = find:
75 |
76 | [options.extras_require]
77 | dev = file:requirements-dev.txt
78 |
79 | [options.package_data]
80 | green = VERSION, shell_completion.sh
81 |
82 |
83 | [options.entry_points]
84 | console_scripts =
85 | green = green.cmdline:main
86 |
87 |
88 | [bdist_wheel]
89 | universal = 1
90 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #
2 | # setup.py file for cleancut/green.
3 | #
4 | # setuptools versions older than 64.0.0 need a minimal setup.py file.
5 | #
6 |
7 | from setuptools import setup
8 |
9 | setup()
10 |
--------------------------------------------------------------------------------
/test_versions:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o pipefail
4 |
5 | if [ "${TRAVIS_OS_NAME}" == "linux" ] ; then
6 | PYTHON_VERSIONS="${TRAVIS_PYTHON_VERSION}"
7 | else
8 | PYTHON_VERSIONS="$(find -E `echo $PATH | tr : ' '` -depth 1 -regex '.*/python(3.[0-9]+)|.*/pypy|.*/pypy3' 2>/dev/null | sed -E -e 's/.*python//' -e 's/.*pypy/pypy/' )"
9 | fi
10 | # This fancy line makes it so we test each module individually, and then all of green.
11 | #TESTS="`find green -name test_*.py | sed -e s/.py$// | tr / .` green"
12 | TESTS="green"
13 |
14 |
15 | if [ "${TESTS}" == "" ] ; then
16 | echo "No tests found!"
17 | exit 2
18 | fi
19 |
20 | if [ "${PYTHON_VERSIONS}" == "" ] ; then
21 | PYTHON_VERSIONS="default"
22 | fi
23 |
24 | if [[ "${PYTHON_VERSIONS}" == *-dev ]] ; then
25 | PYTHON_VERSIONS="${PYTHON_VERSIONS::-4}"
26 | fi
27 |
28 | # Deduplicate
29 | PYTHON_VERSIONS="$(echo $PYTHON_VERSIONS | xargs -n1 | sort -u | xargs)"
30 |
31 |
32 | echo "Identified python versions: `echo ${PYTHON_VERSIONS} | tr '\n' ' '`"
33 |
34 | # Make sure each of the pythons has the necessary requirements installed
35 | for PYTHON_VERSION in ${PYTHON_VERSIONS} ; do
36 | if [ "${PYTHON_VERSION}" == "default" ] ; then
37 | PYTHON_VERSION="3"
38 | fi
39 | if [ "${PYTHON_VERSION}" == "nightly" ] ; then
40 | PYTHON_VERSION=""
41 | fi
42 | if [[ -e `which python${PYTHON_VERSION}` ]] ; then
43 | PYTHON=python${PYTHON_VERSION}
44 | shift
45 | elif [[ -e `which ${PYTHON_VERSION}` ]] ; then
46 | PYTHON=${PYTHON_VERSION}
47 | shift
48 | elif [[ ${PYTHON_VERSION} =~ ^pypy2.*$ ]] ; then
49 | PYTHON=pypy
50 | shift
51 | elif [[ ${PYTHON_VERSION} =~ ^pypy3.*$ ]] ; then
52 | PYTHON=pypy3
53 | shift
54 | else
55 | echo "Failed to determine python binary for python version '${PYTHON_VERSION}'"
56 | exit 4
57 | fi
58 |
59 | if ! ${PYTHON} -m pip > /dev/null ; then
60 | echo "Please install pip under ${PYTHON}"
61 | exit 5
62 | fi
63 |
64 | VENV_DIR="venv${PYTHON_VERSION}"
65 | if [ ! -d ${VENV_DIR} ] ; then
66 | ${PYTHON} -m venv ${VENV_DIR}
67 | fi
68 |
69 | echo "Ensuring dependencies are installed for ${VENV_DIR}"
70 |
71 | if ! source ${VENV_DIR}/bin/activate ; then
72 | echo "Failed to enter virtual environment"
73 | exit 7
74 | fi
75 | hash -r
76 | ${VENV_DIR}/bin/pip install -r requirements.txt | grep -Ev "Requirement already|however version|consider upgrading"
77 | ${VENV_DIR}/bin/pip install -r requirements-dev.txt | grep -Ev "Requirement already|however version|consider upgrading"
78 | deactivate
79 | done
80 |
81 | # Finally, run all the tests
82 | for TEST in ${TESTS} ; do
83 | for PYTHON_VERSION in ${PYTHON_VERSIONS} ; do
84 | if [ "${PYTHON_VERSION}" == "default" ] ; then
85 | PYTHON="python3"
86 | else
87 | VENV_DIR="venv${PYTHON_VERSION}"
88 | PYTHON=${VENV_DIR}/bin/python
89 | fi
90 |
91 | echo ""
92 | set -x
93 | # Actually run it!
94 | if ! PYTHONPATH="." ${PYTHON} -m green.cmdline -k ${TEST} ; then
95 | exit 3
96 | fi
97 | { set +x; } 2>/dev/null
98 | done
99 | done
100 |
101 | echo -e "\nCompleted internal test suite for Python versions:\n${PYTHON_VERSIONS}\n"
102 |
--------------------------------------------------------------------------------