├── .github ├── release.yml ├── renovate.json5 └── workflows │ ├── ci.yml │ ├── coverage-comment.yml │ ├── e2e-delete-repo.yml │ ├── e2e-external-phase-1.yml │ ├── e2e-external-phase-2.yml │ ├── e2e-private-link-in-pr.yml │ ├── e2e-public-link-in-pr.yml │ ├── manual-release.yml │ └── release.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CONTRIBUTING.md ├── Dockerfile ├── Dockerfile.build ├── LICENSE ├── Makefile ├── README.md ├── action.yml ├── coverage_comment ├── __init__.py ├── __main__.py ├── activity.py ├── badge.py ├── comment_file.py ├── communication.py ├── coverage.py ├── diff_grouper.py ├── files.py ├── github.py ├── github_client.py ├── groups.py ├── log.py ├── log_utils.py ├── main.py ├── settings.py ├── storage.py ├── subprocess.py ├── template.py └── template_files │ ├── comment.md.j2 │ ├── log.txt.j2 │ └── readme.md.j2 ├── dev-env ├── dev-env-vars.dist ├── pyproject.toml ├── scripts └── sync-pre-commit.py ├── setup.cfg ├── tests ├── README.md ├── __init__.py ├── conftest.py ├── end_to_end │ ├── conftest.py │ ├── repo │ │ ├── .github │ │ │ └── workflows │ │ │ │ ├── ci.yml │ │ │ │ └── coverage-comment.yml │ │ ├── end_to_end_tests_repo │ │ │ └── __init__.py │ │ ├── pyproject.toml │ │ ├── setup.cfg │ │ └── tests │ │ │ ├── cases.csv │ │ │ └── test_f.py │ └── test_all.py ├── integration │ ├── __init__.py │ ├── test_github.py │ └── test_main.py └── unit │ ├── __init__.py │ ├── test_activity.py │ ├── test_badge.py │ ├── test_comment_file.py │ ├── test_communication.py │ ├── test_coverage.py │ ├── test_diff_grouper.py │ ├── test_dunder_main.py │ ├── test_files.py │ ├── test_github_client.py │ ├── test_groups.py │ ├── test_log_utils.py │ ├── test_main.py │ ├── test_settings.py │ ├── test_storage.py │ ├── test_subprocess.py │ └── test_template.py └── uv.lock /.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | exclude: 3 | labels: 4 | - dependencies 5 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:base", 4 | ":enablePreCommit", 5 | ], 6 | "lockFileMaintenance": { 7 | "enabled": true, 8 | "automerge": true, 9 | }, 10 | "labels": ["dependencies"], 11 | "packageRules": [ 12 | { 13 | "matchUpdateTypes": [ 14 | "major", 15 | ], 16 | "groupName": "Deps with major upgrades", 17 | }, 18 | { 19 | "matchUpdateTypes": [ 20 | "minor", 21 | "patch", 22 | "pin", 23 | "digest", 24 | ], 25 | "automerge": true, 26 | "groupName": "Deps with minor upgrades", 27 | }, 28 | { 29 | "matchDepTypes": [ 30 | "devDependencies", 31 | ], 32 | "automerge": true, 33 | "groupName": "Dev dependencies", 34 | }, 35 | ], 36 | "ignorePaths": [ 37 | "Dockerfile.build", 38 | ], 39 | } 40 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: [main] 7 | 8 | concurrency: 9 | group: ${{ github.event_name }}-${{ github.ref }} 10 | 11 | jobs: 12 | test: 13 | name: Run tests & display coverage 14 | runs-on: ubuntu-latest 15 | permissions: 16 | pull-requests: write 17 | contents: write 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v4 21 | 22 | - name: Install uv 23 | uses: astral-sh/setup-uv@v6 24 | with: 25 | python-version: "3.12" 26 | 27 | - name: Poetry caches 28 | uses: actions/cache@v4 29 | with: 30 | path: | 31 | ~/.cache/ 32 | key: ${{ hashFiles('uv.lock') }} 33 | 34 | - name: Install deps 35 | run: uv sync --all-groups 36 | 37 | - name: Run tests 38 | run: uv run pytest 39 | env: 40 | COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1: ${{ secrets.COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1 }} 41 | COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_2: ${{ secrets.COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_2 }} 42 | COVERAGE_COMMENT_E2E_ACTION_REF: ${{ github.sha }} 43 | COVERAGE_COMMENT_E2E_REPO_SUFFIX: ${{ github.event.number }} 44 | 45 | - name: Coverage comment 46 | id: coverage_comment 47 | uses: py-cov-action/python-coverage-comment-action@main 48 | with: 49 | GITHUB_TOKEN: ${{ github.token }} 50 | ANNOTATE_MISSING_LINES: true 51 | 52 | - name: Store Pull Request comment to be posted 53 | uses: actions/upload-artifact@v4 54 | if: steps.coverage_comment.outputs.COMMENT_FILE_WRITTEN == 'true' 55 | with: 56 | name: python-coverage-comment-action 57 | path: python-coverage-comment-action.txt 58 | -------------------------------------------------------------------------------- /.github/workflows/coverage-comment.yml: -------------------------------------------------------------------------------- 1 | name: Post coverage comment 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["CI"] 6 | types: 7 | - completed 8 | 9 | jobs: 10 | test: 11 | name: Publish coverage comment 12 | runs-on: ubuntu-latest 13 | if: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' 14 | permissions: 15 | actions: read 16 | pull-requests: write 17 | contents: write 18 | steps: 19 | - name: Post comment 20 | uses: py-cov-action/python-coverage-comment-action@main 21 | with: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | GITHUB_PR_RUN_ID: ${{ github.event.workflow_run.id }} 24 | -------------------------------------------------------------------------------- /.github/workflows/e2e-delete-repo.yml: -------------------------------------------------------------------------------- 1 | name: Delete end-to-end test repos on PR close 2 | 3 | on: 4 | pull_request_target: 5 | types: 6 | - closed 7 | 8 | jobs: 9 | test: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - run: | 13 | gh repo delete --yes https://github.com/mihcaojwe/python-coverage-comment-action-end-to-end-${NUMBER}-public || true 14 | env: 15 | NUMBER: ${{ github.event.pull_request.number }} 16 | GITHUB_TOKEN: ${{ secrets.COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1 }} 17 | - run: | 18 | gh repo delete --yes https://github.com/mihcaojwe2/python-coverage-comment-action-end-to-end-${NUMBER}-public || true 19 | env: 20 | NUMBER: ${{ github.event.pull_request.number }} 21 | GITHUB_TOKEN: ${{ secrets.COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_2 }} 22 | - run: | 23 | gh repo delete --yes https://github.com/mihcaojwe/python-coverage-comment-action-end-to-end-${NUMBER}-private || true 24 | env: 25 | NUMBER: ${{ github.event.pull_request.number }} 26 | GITHUB_TOKEN: ${{ secrets.COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1 }} 27 | -------------------------------------------------------------------------------- /.github/workflows/e2e-external-phase-1.yml: -------------------------------------------------------------------------------- 1 | name: Trigger end-to-end tests on external PR approval 2 | 3 | on: 4 | pull_request_review: 5 | types: [submitted] 6 | 7 | jobs: 8 | test: 9 | name: Trigger end-to-end tests 10 | # If reviewed by a repo(/org) owner 11 | if: | 12 | github.event.pull_request.author_association != 'MEMBER' 13 | && github.event.review.author_association == 'MEMBER' 14 | && github.event.review.state == 'approved' 15 | && contains(github.event.review.body, '/e2e') 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Store PR number in a file 19 | run: echo "PR_NUMBER=${PR_NUMBER}" > pr_number.txt 20 | env: 21 | PR_NUMBER: ${{ github.event.pull_request.number }} 22 | - name: Save artifact 23 | uses: actions/upload-artifact@v4 24 | with: 25 | name: pr_number 26 | path: pr_number.txt 27 | -------------------------------------------------------------------------------- /.github/workflows/e2e-external-phase-2.yml: -------------------------------------------------------------------------------- 1 | name: Run end-to-end tests on external PR approval 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Trigger end-to-end tests on external PR approval"] 6 | types: 7 | - completed 8 | 9 | concurrency: 10 | group: ${{ github.event_name }}-${{ github.ref }} 11 | 12 | jobs: 13 | e2e: 14 | name: End-to-end tests 15 | runs-on: ubuntu-latest 16 | if: github.event.workflow_run.conclusion == 'success' 17 | permissions: 18 | actions: read 19 | pull-requests: write 20 | contents: write 21 | checks: write 22 | steps: 23 | - name: Extract PR number from artifact 24 | id: extract_pr_number 25 | run: > 26 | gh api 27 | --method GET 28 | "/repos/py-cov-action/python-coverage-comment-action/actions/runs/${RUN_ID}/artifacts" 29 | -F name="pr_number" 30 | --jq '[.artifacts[]] | last | .archive_download_url' 31 | | xargs gh api 32 | | funzip 33 | > "${GITHUB_OUTPUT}" 34 | env: 35 | GH_TOKEN: ${{ github.token }} 36 | RUN_ID: ${{ github.event.workflow_run.id }} 37 | 38 | - name: Extract the approved commit 39 | id: extract_commit 40 | run: | 41 | COMMIT_ID=$(gh pr --repo py-cov-action/python-coverage-comment-action view "${PR_NUMBER}" --json reviews --jq '[.reviews[] | select(.state == "APPROVED" and .authorAssociation == "MEMBER" and (.body | contains("/e2e")) ) | .commit.oid] | last') 42 | if [ -z "${COMMIT_ID}" ]; then 43 | echo "No approved commit found" 44 | exit 1 45 | fi 46 | echo "COMMIT_ID=${COMMIT_ID}" > "${GITHUB_OUTPUT}" 47 | env: 48 | GH_TOKEN: ${{ github.token }} 49 | PR_NUMBER: ${{ steps.extract_pr_number.outputs.PR_NUMBER }} 50 | 51 | - name: Extract the current job id 52 | id: extract_job_id 53 | run: > 54 | gh api 55 | "repos/py-cov-action/python-coverage-comment-action/actions/runs/${RUN_ID}/attempts/${RUN_ATTEMPT}/jobs" 56 | --jq ' 57 | .jobs[] 58 | | select(.runner_name=="'"${RUNNER_NAME}"'") 59 | | "JOB_ID=" + (.id | tostring)' 60 | > "${GITHUB_OUTPUT}" 61 | env: 62 | GH_TOKEN: ${{ github.token }} 63 | RUN_ID: ${{ github.run_id }} 64 | RUN_ATTEMPT: ${{ github.run_attempt }} 65 | RUNNER_NAME: ${{ runner.name }} 66 | 67 | - name: Create PR check 68 | id: create_check 69 | run: > 70 | gh api 71 | "repos/py-cov-action/python-coverage-comment-action/check-runs" 72 | -X POST 73 | -F name="End-to-end tests (external PR)" 74 | -F head_sha="${HEAD_SHA}" 75 | -F status="in_progress" 76 | -F started_at="$(date -u +%FT%TZ)" 77 | -F details_url="$(gh api "/repos/py-cov-action/python-coverage-comment-action/actions/jobs/${JOB_ID}" --jq '.html_url')" 78 | --jq '"CHECK_RUN_ID=" + (.id | tostring)' > "${GITHUB_OUTPUT}" 79 | env: 80 | GITHUB_TOKEN: ${{ github.token }} 81 | HEAD_SHA: ${{ steps.extract_commit.outputs.COMMIT_ID }} 82 | JOB_ID: ${{ steps.extract_job_id.outputs.JOB_ID }} 83 | 84 | - name: Checkout 85 | uses: actions/checkout@v4 86 | with: 87 | # Important: use the commit that was reviewed. GitHub is making sure 88 | # that this is race-condition-proof 89 | ref: ${{ steps.extract_commit.outputs.COMMIT_ID }} 90 | 91 | - name: Install uv 92 | uses: astral-sh/setup-uv@v6 93 | with: 94 | python-version: "3.12" 95 | 96 | - name: Poetry caches 97 | uses: actions/cache@v4 98 | with: 99 | path: | 100 | ~/.cache/ 101 | key: ${{ hashFiles('uv.lock') }} 102 | 103 | - name: Install deps 104 | run: uv sync --all-groups 105 | 106 | - name: Run end-to-end tests 107 | run: uv run pytest tests/end_to_end 108 | env: 109 | COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1: ${{ secrets.COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1 }} 110 | COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_2: ${{ secrets.COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_2 }} 111 | COVERAGE_COMMENT_E2E_ACTION_REF: ${{ steps.extract_commit.outputs.COMMIT_ID }} 112 | COVERAGE_COMMENT_E2E_REPO_SUFFIX: ${{ steps.extract_pr_number.outputs.PR_NUMBER }} 113 | 114 | - name: Report results to Check 115 | if: always() && steps.create_check.outputs.CHECK_RUN_ID 116 | run: > 117 | gh api 118 | "repos/py-cov-action/python-coverage-comment-action/check-runs/${CHECK_RUN_ID}" 119 | -X PATCH 120 | -F conclusion=${JOB_STATUS} 121 | -F status=completed 122 | env: 123 | GITHUB_TOKEN: ${{ github.token }} 124 | CHECK_RUN_ID: ${{ steps.create_check.outputs.CHECK_RUN_ID }} 125 | JOB_STATUS: ${{ job.status }} 126 | -------------------------------------------------------------------------------- /.github/workflows/e2e-private-link-in-pr.yml: -------------------------------------------------------------------------------- 1 | name: Post link to private end-to-end test repository 2 | 3 | on: 4 | issue_comment: 5 | types: [created] 6 | 7 | jobs: 8 | invite: 9 | name: Invite everyone to the e2e private repo 10 | if: | 11 | github.event.issue.pull_request 12 | && contains(github.event.comment.body, '/invite') 13 | runs-on: ubuntu-latest 14 | strategy: 15 | matrix: 16 | collaborator: 17 | - LOGIN: kieferro 18 | PERMISSION: admin 19 | ENABLED: true 20 | 21 | - LOGIN: ewjoachim 22 | PERMISSION: admin 23 | ENABLED: true 24 | 25 | - LOGIN: ${{ github.event.issue.user.login }} 26 | PERMISSION: push 27 | ENABLED: ${{ !contains(fromJson('["kieferro", "ewjoachim"]'), github.event.issue.user.login) }} 28 | 29 | steps: 30 | 31 | - name: Invite @${{ matrix.collaborator.LOGIN }} to the e2e private repo 32 | run: gh api --method PUT /repos/mihcaojwe/python-coverage-comment-action-end-to-end-${NUMBER}-private/collaborators/${LOGIN} -f permission=${PERMISSION} 33 | if: ${{ matrix.collaborator.ENABLED == true }} 34 | env: 35 | LOGIN: ${{ matrix.collaborator.LOGIN }} 36 | NUMBER: ${{ github.event.issue.number }} 37 | PERMISSION: ${{ matrix.collaborator.PERMISSION }} 38 | GITHUB_TOKEN: ${{ secrets.COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1 }} 39 | 40 | comment: 41 | name: Add comment with link to e2e repos 42 | if: | 43 | github.event.issue.pull_request 44 | && contains(github.event.comment.body, '/invite') 45 | runs-on: ubuntu-latest 46 | permissions: 47 | pull-requests: write 48 | steps: 49 | - run: | 50 | gh pr comment ${LINK} --body-file - < pyproject.toml.md5 19 | 20 | COPY coverage_comment ./coverage_comment 21 | ENV PIP_DISABLE_PIP_VERSION_CHECK=1 22 | ENV PIP_ROOT_USER_ACTION=ignore 23 | ENV PIP_NO_CACHE_DIR=off 24 | 25 | RUN pip install -e . 26 | 27 | CMD [ "coverage_comment" ] 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Joachim Jablon, kieferro 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: install 2 | install: ## install dependencies 3 | uv sync --all-groups 4 | pre-commit install 5 | 6 | .PHONY: lint 7 | lint: ## lint code 8 | pre-commit 9 | 10 | .PHONY: test 11 | test: ## run all tests 12 | uv run pytest 13 | -------------------------------------------------------------------------------- /action.yml: -------------------------------------------------------------------------------- 1 | name: Python Coverage Comment 2 | branding: 3 | icon: "umbrella" 4 | color: "purple" 5 | description: > 6 | Publish diff coverage report as PR comment, and create a coverage badge 7 | to display on the readme. 8 | inputs: 9 | GITHUB_BASE_URL: 10 | description: > 11 | The base URL for the GitHub API, typically used to specify custom endpoints 12 | for GitHub Enterprise Server (e.g., `https://github.mycompany.com/api/v3`). 13 | Defaults to `https://api.github.com` for GitHub.com. 14 | default: "https://api.github.com" 15 | required: false 16 | GITHUB_TOKEN: 17 | description: > 18 | A GitHub token to write comments and write the badge & coverage data 19 | to the repository. 20 | Set to `$ {{ github.token }}` (without the space between `$` and `{`). 21 | required: true 22 | GITHUB_PR_RUN_ID: 23 | description: > 24 | Only useful on the "workflow_run" part of the workflow. 25 | Set to `$ {{ github.event.workflow_run.id }}` (without the space between `$` and `{`). 26 | required: false 27 | COMMENT_TEMPLATE: 28 | description: > 29 | [Advanced] Specify a different template for the comments that will be written on 30 | the PR. See the Action README documentation for how to use this properly. 31 | required: false 32 | COVERAGE_DATA_BRANCH: 33 | description: > 34 | Name of the branch in which coverage data will be stored on the repository. 35 | Default is 'python-coverage-comment-action-data'. Please make sure that this 36 | branch is not protected. 37 | In monorepo setting, see SUBPROJECT_ID. 38 | default: python-coverage-comment-action-data 39 | required: false 40 | COVERAGE_PATH: 41 | description: > 42 | Path to the directory under the git root where the coverage data is 43 | stored. Default is '.'. 44 | default: "." 45 | required: false 46 | COMMENT_ARTIFACT_NAME: 47 | description: > 48 | Name of the artifact in which the body of the comment to post on the PR is stored. 49 | You typically don't have to change this unless you're already using this name for something else. 50 | default: python-coverage-comment-action 51 | required: false 52 | COMMENT_FILENAME: 53 | description: > 54 | Name of the file in which the body of the comment to post on the PR is stored. 55 | In monorepo setting, see SUBPROJECT_ID. 56 | default: python-coverage-comment-action.txt 57 | required: false 58 | SUBPROJECT_ID: 59 | description: > 60 | This setting is only necessary if you plan to run the action multiple 61 | times in the same repository. It will be appended to the value of all the 62 | settings that need to be unique, so as for the action to avoid mixing up 63 | results of multiple runs. Ideally, use dashes (`-`) rather than 64 | underscrores (`_`) to split words, for consistency. 65 | Affects `COMMENT_FILENAME`, `COVERAGE_DATA_BRANCH`. 66 | default: null 67 | required: false 68 | MINIMUM_GREEN: 69 | description: > 70 | If the coverage percentage is above or equal to this value, the badge 71 | will be green. 72 | default: 100 73 | required: false 74 | MINIMUM_ORANGE: 75 | description: > 76 | If the coverage percentage is not green and above or equal to this value, 77 | the badge will be orange. Otherwise it will be red. 78 | default: 70 79 | required: false 80 | MAX_FILES_IN_COMMENT: 81 | description: > 82 | Maximum number of files to display in the comment. If there are more 83 | files than this number, they will only appear in the workflow summary. 84 | The selected files are the ones with the most new uncovered lines. The 85 | closer this number gets to 35, the higher the risk that it reaches 86 | GitHub's maximum comment size limit of 65536 characters. If you want 87 | more files, you may need to use a custom comment template. 88 | (Feel free to open an issue.) 89 | default: 25 90 | required: false 91 | MERGE_COVERAGE_FILES: 92 | description: > 93 | If true, will run `coverage combine` before reading the `.coverage` file. 94 | default: false 95 | ANNOTATE_MISSING_LINES: 96 | description: > 97 | If true, will create an annotation on every line with missing coverage on a pull request. 98 | default: false 99 | ANNOTATION_TYPE: 100 | description: > 101 | Only relevant if ANNOTATE_MISSING_LINES is set to true. This parameter allows you to choose between 102 | notice, warning and error as annotation type. For more information look here: 103 | https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-notice-message 104 | default: warning 105 | VERBOSE: 106 | description: > 107 | Deprecated, see https://docs.github.com/en/actions/monitoring-and-troubleshooting-workflows/enabling-debug-logging 108 | default: false 109 | outputs: 110 | COMMENT_FILE_WRITTEN: 111 | description: > 112 | This output is only set when running in PR mode. It's a boolean indicating 113 | whether a comment file was written to COMMENT_FILENAME or not. If so, 114 | you'll need to run the action in workflow_run mode to post it. If 115 | "false", no comment file was written (likely because the comment was 116 | already posted to the PR). 117 | runs: 118 | using: docker 119 | image: Dockerfile 120 | env: 121 | GITHUB_BASE_URL: ${{ inputs.GITHUB_BASE_URL }} 122 | GITHUB_TOKEN: ${{ inputs.GITHUB_TOKEN }} 123 | GITHUB_PR_RUN_ID: ${{ inputs.GITHUB_PR_RUN_ID }} 124 | COMMENT_TEMPLATE: ${{ inputs.COMMENT_TEMPLATE }} 125 | COVERAGE_DATA_BRANCH: ${{ inputs.COVERAGE_DATA_BRANCH }} 126 | COVERAGE_PATH: ${{ inputs.COVERAGE_PATH }} 127 | COMMENT_ARTIFACT_NAME: ${{ inputs.COMMENT_ARTIFACT_NAME }} 128 | COMMENT_FILENAME: ${{ inputs.COMMENT_FILENAME }} 129 | SUBPROJECT_ID: ${{ inputs.SUBPROJECT_ID }} 130 | MINIMUM_GREEN: ${{ inputs.MINIMUM_GREEN }} 131 | MINIMUM_ORANGE: ${{ inputs.MINIMUM_ORANGE }} 132 | MERGE_COVERAGE_FILES: ${{ inputs.MERGE_COVERAGE_FILES }} 133 | ANNOTATE_MISSING_LINES: ${{ inputs.ANNOTATE_MISSING_LINES }} 134 | ANNOTATION_TYPE: ${{ inputs.ANNOTATION_TYPE }} 135 | VERBOSE: ${{ inputs.VERBOSE }} 136 | -------------------------------------------------------------------------------- /coverage_comment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-cov-action/python-coverage-comment-action/970a227e0c16ef4589a99a9970ab0ceb8c53059a/coverage_comment/__init__.py -------------------------------------------------------------------------------- /coverage_comment/__main__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from coverage_comment import main 4 | 5 | 6 | def main_call(name): 7 | if name == "__main__": 8 | main.main() 9 | 10 | 11 | main_call(name=__name__) 12 | -------------------------------------------------------------------------------- /coverage_comment/activity.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module is responsible for identifying what the action should be doing 3 | based on the github event type and repository. 4 | 5 | The code in main should be as straightforward as possible, we're offloading 6 | the branching logic to this module. 7 | """ 8 | 9 | from __future__ import annotations 10 | 11 | 12 | class ActivityNotFound(Exception): 13 | pass 14 | 15 | 16 | def find_activity( 17 | event_name: str, 18 | is_default_branch: bool, 19 | ) -> str: 20 | """Find the activity to perform based on the event type and payload.""" 21 | if event_name == "workflow_run": 22 | return "post_comment" 23 | 24 | if (event_name == "push" and is_default_branch) or event_name == "schedule": 25 | return "save_coverage_data_files" 26 | 27 | if event_name not in {"pull_request", "push"}: 28 | raise ActivityNotFound 29 | 30 | return "process_pr" 31 | -------------------------------------------------------------------------------- /coverage_comment/badge.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module should contain only the things relevant to the badge being computed 3 | by shields.io 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import decimal 9 | import json 10 | import urllib.parse 11 | 12 | import httpx 13 | 14 | 15 | def get_badge_color( 16 | rate: decimal.Decimal, 17 | minimum_green: decimal.Decimal, 18 | minimum_orange: decimal.Decimal, 19 | ) -> str: 20 | if rate >= minimum_green: 21 | return "brightgreen" 22 | elif rate >= minimum_orange: 23 | return "orange" 24 | else: 25 | return "red" 26 | 27 | 28 | def get_evolution_badge_color( 29 | delta: decimal.Decimal | int, 30 | up_is_good: bool = True, 31 | neutral_color: str = "lightgrey", 32 | ) -> str: 33 | if delta == 0: 34 | return neutral_color 35 | elif (delta > 0) is up_is_good: 36 | return "brightgreen" 37 | else: 38 | return "red" 39 | 40 | 41 | def compute_badge_endpoint_data( 42 | line_rate: decimal.Decimal, 43 | color: str, 44 | ) -> str: 45 | badge = { 46 | "schemaVersion": 1, 47 | "label": "Coverage", 48 | "message": f"{int(line_rate)}%", 49 | "color": color, 50 | } 51 | 52 | return json.dumps(badge) 53 | 54 | 55 | def compute_badge_image( 56 | line_rate: decimal.Decimal, color: str, http_session: httpx.Client 57 | ) -> str: 58 | return http_session.get( 59 | "https://img.shields.io/static/v1?" 60 | + urllib.parse.urlencode( 61 | { 62 | "label": "Coverage", 63 | "message": f"{int(line_rate)}%", 64 | "color": color, 65 | } 66 | ) 67 | ).text 68 | 69 | 70 | def get_static_badge_url(label: str, message: str, color: str) -> str: 71 | if not color or not message: 72 | raise ValueError("color and message are required") 73 | code = "-".join( 74 | e.replace("_", "__").replace("-", "--") for e in (label, message, color) if e 75 | ) 76 | return "https://img.shields.io/badge/" + urllib.parse.quote(f"{code}.svg") 77 | 78 | 79 | def get_endpoint_url(endpoint_url: str) -> str: 80 | return f"https://img.shields.io/endpoint?url={endpoint_url}" 81 | 82 | 83 | def get_dynamic_url(endpoint_url: str) -> str: 84 | return "https://img.shields.io/badge/dynamic/json?" + urllib.parse.urlencode( 85 | { 86 | "color": "brightgreen", 87 | "label": "coverage", 88 | "query": "$.message", 89 | "url": endpoint_url, 90 | } 91 | ) 92 | -------------------------------------------------------------------------------- /coverage_comment/comment_file.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pathlib 4 | 5 | 6 | def store_file(filename: pathlib.Path, content: str): 7 | filename.write_text(content) 8 | -------------------------------------------------------------------------------- /coverage_comment/communication.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pathlib 4 | 5 | from coverage_comment import files, template 6 | 7 | 8 | def get_readme_and_log( 9 | image_urls: files.ImageURLs, 10 | readme_url: str, 11 | html_report_url: str, 12 | markdown_report: str, 13 | is_public: bool, 14 | subproject_id: str | None = None, 15 | ) -> tuple[files.WriteFile, str]: 16 | readme_markdown = template.get_readme_markdown( 17 | is_public=is_public, 18 | readme_url=readme_url, 19 | markdown_report=markdown_report, 20 | html_report_url=html_report_url, 21 | direct_image_url=image_urls["direct"], 22 | endpoint_image_url=image_urls["endpoint"], 23 | dynamic_image_url=image_urls["dynamic"], 24 | subproject_id=subproject_id, 25 | ) 26 | log_message = template.get_log_message( 27 | is_public=is_public, 28 | readme_url=readme_url, 29 | html_report_url=html_report_url, 30 | direct_image_url=image_urls["direct"], 31 | endpoint_image_url=image_urls["endpoint"], 32 | dynamic_image_url=image_urls["dynamic"], 33 | subproject_id=subproject_id, 34 | ) 35 | readme = files.WriteFile( 36 | path=pathlib.Path("README.md"), 37 | contents=readme_markdown, 38 | ) 39 | return readme, log_message 40 | -------------------------------------------------------------------------------- /coverage_comment/coverage.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import dataclasses 4 | import datetime 5 | import decimal 6 | import json 7 | import pathlib 8 | from collections.abc import Sequence 9 | 10 | from coverage_comment import log, subprocess 11 | 12 | 13 | # The dataclasses in this module are accessible in the template, which is overridable by the user. 14 | # As a coutesy, we should do our best to keep the existing fields for backward compatibility, 15 | # and if we really can't and can't add properties, at least bump the major version. 16 | @dataclasses.dataclass(kw_only=True) 17 | class CoverageMetadata: 18 | version: str 19 | timestamp: datetime.datetime 20 | branch_coverage: bool 21 | show_contexts: bool 22 | 23 | 24 | @dataclasses.dataclass(kw_only=True) 25 | class CoverageInfo: 26 | covered_lines: int 27 | num_statements: int 28 | percent_covered: decimal.Decimal 29 | missing_lines: int 30 | excluded_lines: int 31 | num_branches: int = 0 32 | num_partial_branches: int = 0 33 | covered_branches: int = 0 34 | missing_branches: int = 0 35 | 36 | 37 | @dataclasses.dataclass(kw_only=True) 38 | class FileCoverage: 39 | path: pathlib.Path 40 | executed_lines: list[int] 41 | missing_lines: list[int] 42 | excluded_lines: list[int] 43 | info: CoverageInfo 44 | executed_branches: list[list[int]] | None = None 45 | missing_branches: list[list[int]] | None = None 46 | 47 | 48 | @dataclasses.dataclass 49 | class Coverage: 50 | meta: CoverageMetadata 51 | info: CoverageInfo 52 | files: dict[pathlib.Path, FileCoverage] 53 | 54 | 55 | # The format for Diff Coverage objects may seem a little weird, because it 56 | # was originally copied from diff-cover schema. In order to keep the 57 | # compatibility for existing custom template, we kept the same format. 58 | # Maybe in v4, we can change it to a simpler format. 59 | 60 | 61 | @dataclasses.dataclass(kw_only=True) 62 | class FileDiffCoverage: 63 | path: pathlib.Path 64 | percent_covered: decimal.Decimal 65 | covered_statements: list[int] 66 | missing_statements: list[int] 67 | added_statements: list[int] 68 | # Added lines tracks all the lines that were added in the diff, not just 69 | # the statements (so it includes comments, blank lines, etc.) 70 | added_lines: list[int] 71 | 72 | # for backward compatibility 73 | @property 74 | def violation_lines(self) -> list[int]: 75 | return self.missing_statements 76 | 77 | 78 | @dataclasses.dataclass(kw_only=True) 79 | class DiffCoverage: 80 | total_num_lines: int 81 | total_num_violations: int 82 | total_percent_covered: decimal.Decimal 83 | num_changed_lines: int 84 | files: dict[pathlib.Path, FileDiffCoverage] 85 | 86 | 87 | def compute_coverage( 88 | num_covered: int, 89 | num_total: int, 90 | num_branches_covered: int = 0, 91 | num_branches_total: int = 0, 92 | ) -> decimal.Decimal: 93 | """Compute the coverage percentage, with or without branch coverage.""" 94 | numerator = decimal.Decimal(num_covered + num_branches_covered) 95 | denominator = decimal.Decimal(num_total + num_branches_total) 96 | if denominator == 0: 97 | return decimal.Decimal("1") 98 | return numerator / denominator 99 | 100 | 101 | def get_coverage_info( 102 | merge: bool, coverage_path: pathlib.Path 103 | ) -> tuple[dict, Coverage]: 104 | try: 105 | if merge: 106 | subprocess.run("coverage", "combine", path=coverage_path) 107 | 108 | json_coverage = json.loads( 109 | subprocess.run("coverage", "json", "-o", "-", path=coverage_path) 110 | ) 111 | except subprocess.SubProcessError as exc: 112 | if "No source for code:" in str(exc): 113 | log.error( 114 | "Cannot read .coverage files because files are absolute. You need " 115 | "to configure coverage to write relative paths by adding the following " 116 | "option to your coverage configuration file:\n" 117 | "[run]\n" 118 | "relative_files = true\n\n" 119 | "Note that the specific format can be slightly different if you're using " 120 | "setup.cfg or pyproject.toml. See details in: " 121 | "https://coverage.readthedocs.io/en/latest/config.html#config-run-relative-files" 122 | ) 123 | raise 124 | 125 | return json_coverage, extract_info(data=json_coverage, coverage_path=coverage_path) 126 | 127 | 128 | def generate_coverage_html_files( 129 | destination: pathlib.Path, coverage_path: pathlib.Path 130 | ) -> None: 131 | subprocess.run( 132 | "coverage", 133 | "html", 134 | "--skip-empty", 135 | "--directory", 136 | str(destination), 137 | path=coverage_path, 138 | ) 139 | 140 | 141 | def generate_coverage_markdown(coverage_path: pathlib.Path) -> str: 142 | return subprocess.run( 143 | "coverage", 144 | "report", 145 | "--format=markdown", 146 | "--show-missing", 147 | path=coverage_path, 148 | ) 149 | 150 | 151 | def _make_coverage_info(data: dict) -> CoverageInfo: 152 | """Build a CoverageInfo object from a "summary" or "totals" key.""" 153 | return CoverageInfo( 154 | covered_lines=data["covered_lines"], 155 | num_statements=data["num_statements"], 156 | percent_covered=compute_coverage( 157 | num_covered=data["covered_lines"], 158 | num_total=data["num_statements"], 159 | num_branches_covered=data.get("covered_branches", 0), 160 | num_branches_total=data.get("num_branches", 0), 161 | ), 162 | missing_lines=data["missing_lines"], 163 | excluded_lines=data["excluded_lines"], 164 | num_branches=data.get("num_branches", 0), 165 | num_partial_branches=data.get("num_partial_branches", 0), 166 | covered_branches=data.get("covered_branches", 0), 167 | missing_branches=data.get("missing_branches", 0), 168 | ) 169 | 170 | 171 | def extract_info(data: dict, coverage_path: pathlib.Path) -> Coverage: 172 | """ 173 | { 174 | "meta": { 175 | "version": "5.5", 176 | "timestamp": "2021-12-26T22:27:40.683570", 177 | "branch_coverage": True, 178 | "show_contexts": False, 179 | }, 180 | "files": { 181 | "codebase/code.py": { 182 | "executed_lines": [1, 2, 5, 6, 9], 183 | "summary": { 184 | "covered_lines": 5, 185 | "num_statements": 6, 186 | "percent_covered": 75.0, 187 | "missing_lines": 1, 188 | "excluded_lines": 0, 189 | "num_branches": 2, 190 | "num_partial_branches": 1, 191 | "covered_branches": 1, 192 | "missing_branches": 1, 193 | }, 194 | "missing_lines": [7], 195 | "excluded_lines": [], 196 | } 197 | }, 198 | "totals": { 199 | "covered_lines": 5, 200 | "num_statements": 6, 201 | "percent_covered": 75.0, 202 | "missing_lines": 1, 203 | "excluded_lines": 0, 204 | "num_branches": 2, 205 | "num_partial_branches": 1, 206 | "covered_branches": 1, 207 | "missing_branches": 1, 208 | }, 209 | } 210 | """ 211 | return Coverage( 212 | meta=CoverageMetadata( 213 | version=data["meta"]["version"], 214 | timestamp=datetime.datetime.fromisoformat(data["meta"]["timestamp"]), 215 | branch_coverage=data["meta"]["branch_coverage"], 216 | show_contexts=data["meta"]["show_contexts"], 217 | ), 218 | files={ 219 | coverage_path / path: FileCoverage( 220 | path=coverage_path / path, 221 | excluded_lines=file_data["excluded_lines"], 222 | executed_lines=file_data["executed_lines"], 223 | missing_lines=file_data["missing_lines"], 224 | executed_branches=file_data.get("executed_branches"), 225 | missing_branches=file_data.get("missing_branches"), 226 | info=_make_coverage_info(file_data["summary"]), 227 | ) 228 | for path, file_data in data["files"].items() 229 | }, 230 | info=_make_coverage_info(data["totals"]), 231 | ) 232 | 233 | 234 | def get_diff_coverage_info( 235 | added_lines: dict[pathlib.Path, list[int]], coverage: Coverage 236 | ) -> DiffCoverage: 237 | files = {} 238 | total_num_lines = 0 239 | total_num_violations = 0 240 | num_changed_lines = 0 241 | 242 | for path, added_lines_for_file in added_lines.items(): 243 | num_changed_lines += len(added_lines_for_file) 244 | 245 | try: 246 | file = coverage.files[path] 247 | except KeyError: 248 | continue 249 | 250 | executed = set(file.executed_lines) & set(added_lines_for_file) 251 | count_executed = len(executed) 252 | 253 | missing = set(file.missing_lines) & set(added_lines_for_file) 254 | count_missing = len(missing) 255 | 256 | added = executed | missing 257 | count_total = len(added) 258 | 259 | total_num_lines += count_total 260 | total_num_violations += count_missing 261 | 262 | percent_covered = compute_coverage( 263 | num_covered=count_executed, 264 | num_total=count_total, 265 | ) 266 | 267 | files[path] = FileDiffCoverage( 268 | path=path, 269 | percent_covered=percent_covered, 270 | covered_statements=sorted(executed), 271 | missing_statements=sorted(missing), 272 | added_statements=sorted(added), 273 | added_lines=added_lines_for_file, 274 | ) 275 | final_percentage = compute_coverage( 276 | num_covered=total_num_lines - total_num_violations, 277 | num_total=total_num_lines, 278 | ) 279 | 280 | return DiffCoverage( 281 | total_num_lines=total_num_lines, 282 | total_num_violations=total_num_violations, 283 | total_percent_covered=final_percentage, 284 | num_changed_lines=num_changed_lines, 285 | files=files, 286 | ) 287 | 288 | 289 | def get_added_lines( 290 | git: subprocess.Git, base_ref: str 291 | ) -> dict[pathlib.Path, list[int]]: 292 | # --unified=0 means we don't get any context lines for chunk, and we 293 | # don't merge chunks. This means the headers that describe line number 294 | # are always enough to derive what line numbers were added. 295 | git.fetch("origin", base_ref, "--depth=1000") 296 | diff = git.diff("--unified=0", "FETCH_HEAD", "--", ".") 297 | return parse_diff_output(diff) 298 | 299 | 300 | def parse_diff_output(diff: str) -> dict[pathlib.Path, list[int]]: 301 | current_file: pathlib.Path | None = None 302 | added_filename_prefix = "+++ b/" 303 | result: dict[pathlib.Path, list[int]] = {} 304 | for line in diff.splitlines(): 305 | if line.startswith(added_filename_prefix): 306 | current_file = pathlib.Path(line.removeprefix(added_filename_prefix)) 307 | continue 308 | if line.startswith("@@"): 309 | lines = parse_line_number_diff_line(line) 310 | if len(lines) > 0: 311 | if current_file is None: 312 | raise ValueError(f"Unexpected diff output format: \n{diff}") 313 | result.setdefault(current_file, []).extend(lines) 314 | 315 | return result 316 | 317 | 318 | def parse_line_number_diff_line(line: str) -> Sequence[int]: 319 | """ 320 | Parse the "added" part of the line number diff text: 321 | @@ -60,0 +61 @@ def compute_files( -> [61] 322 | @@ -60,0 +61,3 @@ def compute_files( -> [61, 62, 63] 323 | """ 324 | start, length = (int(i) for i in (line.split()[2][1:] + ",1").split(",")[:2]) 325 | return range(start, start + length) 326 | -------------------------------------------------------------------------------- /coverage_comment/diff_grouper.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Iterable 4 | 5 | from coverage_comment import coverage as coverage_module 6 | from coverage_comment import groups 7 | 8 | MAX_ANNOTATION_GAP = 3 9 | 10 | 11 | def get_diff_missing_groups( 12 | coverage: coverage_module.Coverage, 13 | diff_coverage: coverage_module.DiffCoverage, 14 | ) -> Iterable[groups.Group]: 15 | for path, diff_file in diff_coverage.files.items(): 16 | coverage_file = coverage.files[path] 17 | 18 | # Lines that are covered or excluded should not be considered for 19 | # filling a gap between violation groups. 20 | # (so, lines that can appear in a gap are lines that are missing, or 21 | # lines that do not contain code: blank lines or lines containing comments) 22 | separators = { 23 | *coverage_file.executed_lines, 24 | *coverage_file.excluded_lines, 25 | } 26 | # Lines that are added should be considered for filling a gap, unless 27 | # they are separators. 28 | joiners = set(diff_file.added_lines) - separators 29 | 30 | for start, end in groups.compute_contiguous_groups( 31 | values=diff_file.missing_statements, 32 | separators=separators, 33 | joiners=joiners, 34 | max_gap=MAX_ANNOTATION_GAP, 35 | ): 36 | yield groups.Group( 37 | file=path, 38 | line_start=start, 39 | line_end=end, 40 | ) 41 | -------------------------------------------------------------------------------- /coverage_comment/files.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains info pertaining to the files we intend to save, 3 | independently from storage specifics (storage.py) 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import dataclasses 9 | import decimal 10 | import json 11 | import pathlib 12 | import shutil 13 | import tempfile 14 | from collections.abc import Callable 15 | from typing import Protocol, TypedDict 16 | 17 | import httpx 18 | 19 | from coverage_comment import badge, coverage, log 20 | 21 | ENDPOINT_PATH = pathlib.Path("endpoint.json") 22 | DATA_PATH = pathlib.Path("data.json") 23 | BADGE_PATH = pathlib.Path("badge.svg") 24 | 25 | 26 | class Operation(Protocol): 27 | path: pathlib.Path 28 | 29 | def apply(self): ... 30 | 31 | 32 | @dataclasses.dataclass 33 | class WriteFile: 34 | path: pathlib.Path 35 | contents: str 36 | 37 | def apply(self): 38 | preview_len = 50 39 | ellipsis = "..." if len(self.contents) > preview_len else "" 40 | log.debug(f"Writing file {self.path} ({self.contents[:preview_len]}{ellipsis})") 41 | self.path.write_text(self.contents) 42 | 43 | 44 | @dataclasses.dataclass 45 | class ReplaceDir: 46 | """ 47 | Deletes the dir at `path`, then copies the dir from source to destination 48 | """ 49 | 50 | source: pathlib.Path 51 | path: pathlib.Path 52 | 53 | def apply(self): 54 | if self.path.exists(): 55 | log.debug(f"Deleting {self.path}") 56 | shutil.rmtree(self.path) 57 | log.debug(f"Moving {self.source} to {self.path}") 58 | shutil.move(self.source, self.path) 59 | 60 | 61 | def compute_files( 62 | line_rate: decimal.Decimal, 63 | raw_coverage_data: dict, 64 | coverage_path: pathlib.Path, 65 | minimum_green: decimal.Decimal, 66 | minimum_orange: decimal.Decimal, 67 | http_session: httpx.Client, 68 | ) -> list[Operation]: 69 | line_rate *= decimal.Decimal("100") 70 | color = badge.get_badge_color( 71 | rate=line_rate, 72 | minimum_green=minimum_green, 73 | minimum_orange=minimum_orange, 74 | ) 75 | return [ 76 | WriteFile( 77 | path=ENDPOINT_PATH, 78 | contents=badge.compute_badge_endpoint_data( 79 | line_rate=line_rate, color=color 80 | ), 81 | ), 82 | WriteFile( 83 | path=DATA_PATH, 84 | contents=compute_datafile( 85 | raw_coverage_data=raw_coverage_data, 86 | line_rate=line_rate, 87 | coverage_path=coverage_path, 88 | ), 89 | ), 90 | WriteFile( 91 | path=BADGE_PATH, 92 | contents=badge.compute_badge_image( 93 | line_rate=line_rate, color=color, http_session=http_session 94 | ), 95 | ), 96 | ] 97 | 98 | 99 | def compute_datafile( 100 | raw_coverage_data: dict, line_rate: decimal.Decimal, coverage_path: pathlib.Path 101 | ) -> str: 102 | return json.dumps( 103 | { 104 | "coverage": float(line_rate), 105 | "raw_data": raw_coverage_data, 106 | "coverage_path": str(coverage_path), 107 | } 108 | ) 109 | 110 | 111 | def parse_datafile(contents) -> tuple[coverage.Coverage | None, decimal.Decimal]: 112 | file_contents = json.loads(contents) 113 | coverage_rate = decimal.Decimal(str(file_contents["coverage"])) / decimal.Decimal( 114 | "100" 115 | ) 116 | try: 117 | return coverage.extract_info( 118 | data=file_contents["raw_data"], 119 | coverage_path=pathlib.Path(file_contents["coverage_path"]), 120 | ), coverage_rate 121 | except KeyError: 122 | return None, coverage_rate 123 | 124 | 125 | class ImageURLs(TypedDict): 126 | direct: str 127 | endpoint: str 128 | dynamic: str 129 | 130 | 131 | def get_urls(url_getter: Callable) -> ImageURLs: 132 | return { 133 | "direct": url_getter(path=BADGE_PATH), 134 | "endpoint": badge.get_endpoint_url(endpoint_url=url_getter(path=ENDPOINT_PATH)), 135 | "dynamic": badge.get_dynamic_url(endpoint_url=url_getter(path=ENDPOINT_PATH)), 136 | } 137 | 138 | 139 | def get_coverage_html_files( 140 | *, coverage_path: pathlib.Path, gen_dir: pathlib.Path = pathlib.Path("/tmp") 141 | ) -> ReplaceDir: 142 | html_dir = pathlib.Path(tempfile.mkdtemp(dir=gen_dir)) 143 | coverage.generate_coverage_html_files( 144 | destination=html_dir, coverage_path=coverage_path 145 | ) 146 | dest = pathlib.Path("htmlcov") 147 | # Coverage may or may not create a .gitignore. 148 | (html_dir / ".gitignore").unlink(missing_ok=True) 149 | return ReplaceDir(source=html_dir, path=dest) 150 | -------------------------------------------------------------------------------- /coverage_comment/github.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import dataclasses 4 | import io 5 | import json 6 | import pathlib 7 | import sys 8 | import zipfile 9 | 10 | from coverage_comment import github_client, log 11 | 12 | GITHUB_ACTIONS_LOGIN = "github-actions[bot]" 13 | 14 | 15 | class CannotDeterminePR(Exception): 16 | pass 17 | 18 | 19 | class CannotPostComment(Exception): 20 | pass 21 | 22 | 23 | class NoArtifact(Exception): 24 | pass 25 | 26 | 27 | @dataclasses.dataclass 28 | class RepositoryInfo: 29 | default_branch: str 30 | visibility: str 31 | 32 | def is_default_branch(self, ref: str) -> bool: 33 | return f"refs/heads/{self.default_branch}" == ref 34 | 35 | def is_public(self) -> bool: 36 | return self.visibility == "public" 37 | 38 | 39 | def get_repository_info( 40 | github: github_client.GitHub, repository: str 41 | ) -> RepositoryInfo: 42 | response = github.repos(repository).get() 43 | 44 | return RepositoryInfo( 45 | default_branch=response.default_branch, visibility=response.visibility 46 | ) 47 | 48 | 49 | def download_artifact( 50 | github: github_client.GitHub, 51 | repository: str, 52 | artifact_name: str, 53 | run_id: int, 54 | filename: pathlib.Path, 55 | ) -> str: 56 | repo_path = github.repos(repository) 57 | 58 | try: 59 | artifact = next( 60 | artifact 61 | for artifact in _fetch_artifacts(repo_path, run_id) 62 | if artifact.name == artifact_name 63 | ) 64 | except StopIteration: 65 | raise NoArtifact(f"No artifact found with name {artifact_name} in run {run_id}") 66 | 67 | zip_bytes = io.BytesIO(repo_path.actions.artifacts(artifact.id).zip.get(bytes=True)) 68 | zipf = zipfile.ZipFile(zip_bytes) 69 | 70 | try: 71 | return zipf.open(str(filename), "r").read().decode("utf-8") 72 | except KeyError: 73 | raise NoArtifact(f"File named {filename} not found in artifact {artifact_name}") 74 | 75 | 76 | def _fetch_artifacts(repo_path, run_id): 77 | page = 1 78 | total_fetched = 0 79 | 80 | while True: 81 | result = repo_path.actions.runs(run_id).artifacts.get(page=str(page)) 82 | if not result or not result.artifacts: 83 | break 84 | 85 | yield from result.artifacts 86 | 87 | total_fetched += len(result.artifacts) 88 | if total_fetched >= result.total_count: 89 | break 90 | 91 | page += 1 92 | 93 | 94 | def get_branch_from_workflow_run( 95 | github: github_client.GitHub, repository: str, run_id: int 96 | ) -> tuple[str, str]: 97 | repo_path = github.repos(repository) 98 | run = repo_path.actions.runs(run_id).get() 99 | branch = run.head_branch 100 | owner = run.head_repository.owner.login 101 | return owner, branch 102 | 103 | 104 | def find_pr_for_branch( 105 | github: github_client.GitHub, repository: str, owner: str, branch: str 106 | ) -> int: 107 | # The full branch is in the form of "owner:branch" as specified in 108 | # https://docs.github.com/en/rest/pulls/pulls?apiVersion=2022-11-28#list-pull-requests 109 | # but it seems to also work with "owner/repo:branch" 110 | 111 | full_branch = f"{owner}:{branch}" 112 | 113 | common_kwargs = {"head": full_branch, "sort": "updated", "direction": "desc"} 114 | try: 115 | return next( 116 | iter( 117 | pr.number 118 | for pr in github.repos(repository).pulls.get( 119 | state="open", **common_kwargs 120 | ) 121 | ) 122 | ) 123 | except StopIteration: 124 | pass 125 | log.info(f"No open PR found for branch {branch}, defaulting to all PRs") 126 | 127 | try: 128 | return next( 129 | iter( 130 | pr.number 131 | for pr in github.repos(repository).pulls.get( 132 | state="all", **common_kwargs 133 | ) 134 | ) 135 | ) 136 | except StopIteration: 137 | raise CannotDeterminePR(f"No open PR found for branch {branch}") 138 | 139 | 140 | def get_my_login(github: github_client.GitHub) -> str: 141 | try: 142 | response = github.user.get() 143 | except github_client.Forbidden: 144 | # The GitHub actions user cannot access its own details 145 | # and I'm not sure there's a way to see that we're using 146 | # the GitHub actions user except noting that it fails 147 | return GITHUB_ACTIONS_LOGIN 148 | 149 | else: 150 | return response.login 151 | 152 | 153 | def post_comment( 154 | github: github_client.GitHub, 155 | me: str, 156 | repository: str, 157 | pr_number: int, 158 | contents: str, 159 | marker: str, 160 | ) -> None: 161 | issue_comments_path = github.repos(repository).issues(pr_number).comments 162 | comments_path = github.repos(repository).issues.comments 163 | 164 | for comment in issue_comments_path.get(): 165 | if comment.user.login == me and marker in comment.body: 166 | log.info("Update previous comment") 167 | try: 168 | comments_path(comment.id).patch(body=contents) 169 | except github_client.Forbidden as exc: 170 | raise CannotPostComment from exc 171 | break 172 | else: 173 | log.info("Adding new comment") 174 | try: 175 | issue_comments_path.post(body=contents) 176 | except github_client.Forbidden as exc: 177 | raise CannotPostComment from exc 178 | 179 | 180 | def set_output(github_output: pathlib.Path | None, **kwargs: bool) -> None: 181 | if github_output: 182 | with github_output.open("a") as f: 183 | for key, value in kwargs.items(): 184 | f.write(f"{key}={json.dumps(value)}\n") 185 | 186 | 187 | def escape_property(s: str) -> str: 188 | return ( 189 | s.replace("%", "%25") 190 | .replace("\r", "%0D") 191 | .replace("\n", "%0A") 192 | .replace(":", "%3A") 193 | .replace(",", "%2C") 194 | ) 195 | 196 | 197 | def escape_data(s: str) -> str: 198 | return s.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A") 199 | 200 | 201 | def get_workflow_command(command: str, command_value: str, **kwargs: str) -> str: 202 | """ 203 | Returns a string that can be printed to send a workflow command 204 | https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions 205 | """ 206 | values_listed = [f"{key}={escape_property(value)}" for key, value in kwargs.items()] 207 | 208 | context = f" {','.join(values_listed)}" if values_listed else "" 209 | return f"::{command}{context}::{escape_data(command_value)}" 210 | 211 | 212 | def send_workflow_command(command: str, command_value: str, **kwargs: str) -> None: 213 | print( 214 | get_workflow_command(command=command, command_value=command_value, **kwargs), 215 | file=sys.stderr, 216 | ) 217 | 218 | 219 | def create_missing_coverage_annotations( 220 | annotation_type: str, annotations: list[tuple[pathlib.Path, int, int]] 221 | ): 222 | """ 223 | Create annotations for lines with missing coverage. 224 | 225 | annotation_type: The type of annotation to create. Can be either "error" or "warning". 226 | annotations: A list of tuples of the form (file, line_start, line_end) 227 | """ 228 | send_workflow_command( 229 | command="group", command_value="Annotations of lines with missing coverage" 230 | ) 231 | for file, line_start, line_end in annotations: 232 | if line_start == line_end: 233 | message = f"Missing coverage on line {line_start}" 234 | else: 235 | message = f"Missing coverage on lines {line_start}-{line_end}" 236 | 237 | send_workflow_command( 238 | command=annotation_type, 239 | command_value=message, 240 | # This will produce \ paths when running on windows. 241 | # GHA doc is unclear whether this is right or not. 242 | file=str(file), 243 | line=str(line_start), 244 | endLine=str(line_end), 245 | title="Missing coverage", 246 | ) 247 | send_workflow_command(command="endgroup", command_value="") 248 | 249 | 250 | def append_to_file(content: str, filepath: pathlib.Path): 251 | with filepath.open(mode="a") as file: 252 | file.write(content) 253 | 254 | 255 | def add_job_summary(content: str, github_step_summary: pathlib.Path): 256 | append_to_file(content=content, filepath=github_step_summary) 257 | -------------------------------------------------------------------------------- /coverage_comment/github_client.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | From: https://github.com/michaelliao/githubpy/blob/96d0c3e729c0b3e3c043a604547ccff17782ac2b/github.py 5 | GitHub API Python SDK. (Python >= 2.6) 6 | Apache License 7 | Michael Liao (askxuefeng@gmail.com) 8 | License: https://github.com/michaelliao/githubpy/blob/96d0c3e729c0b3e3c043a604547ccff17782ac2b/LICENSE.txt 9 | """ 10 | 11 | from __future__ import annotations 12 | 13 | __version__ = "1.1.1" 14 | 15 | import httpx 16 | 17 | TIMEOUT = 60 18 | 19 | _URL = "https://api.github.com" 20 | 21 | 22 | class _Executable: 23 | def __init__(self, _gh, _method, _path): 24 | self._gh = _gh 25 | self._method = _method 26 | self._path = _path 27 | 28 | def __call__(self, **kw): 29 | return self._gh._http(self._method, self._path, **kw) 30 | 31 | 32 | class _Callable: 33 | def __init__(self, _gh, _name): 34 | self._gh = _gh 35 | self._name = _name 36 | 37 | def __call__(self, *args): 38 | if len(args) == 0: 39 | return self 40 | name = "{}/{}".format(self._name, "/".join([str(arg) for arg in args])) 41 | return _Callable(self._gh, name) 42 | 43 | def __getattr__(self, attr): 44 | if attr in ["get", "put", "post", "patch", "delete"]: 45 | return _Executable(self._gh, attr, self._name) 46 | name = f"{self._name}/{attr}" 47 | return _Callable(self._gh, name) 48 | 49 | 50 | class GitHub: 51 | """ 52 | GitHub client. 53 | """ 54 | 55 | def __init__(self, session: httpx.Client): 56 | self.session = session 57 | 58 | def __getattr__(self, attr): 59 | return _Callable(self, f"/{attr}") 60 | 61 | def _http( 62 | self, 63 | method: str, 64 | path: str, 65 | *, 66 | bytes: bool = False, 67 | headers: dict[str, str] | None = None, 68 | **kw, 69 | ): 70 | _method = method.lower() 71 | requests_kwargs = {} 72 | header_kwargs = {"headers": headers} if headers else {} 73 | if _method == "get" and kw: 74 | requests_kwargs = {"params": kw} 75 | 76 | elif _method in ["post", "patch", "put"]: 77 | requests_kwargs = {"json": kw} 78 | 79 | response = self.session.request( 80 | _method.upper(), 81 | path, 82 | timeout=TIMEOUT, 83 | **header_kwargs, 84 | **requests_kwargs, 85 | ) 86 | if bytes: 87 | contents = response.content 88 | else: 89 | contents = response_contents(response) 90 | 91 | try: 92 | response.raise_for_status() 93 | except httpx.HTTPStatusError as exc: 94 | cls: type[ApiError] = { 95 | 403: Forbidden, 96 | 404: NotFound, 97 | }.get(exc.response.status_code, ApiError) 98 | 99 | raise cls(str(contents)) from exc 100 | 101 | return contents 102 | 103 | 104 | def response_contents( 105 | response: httpx.Response, 106 | ) -> JsonObject | str | bytes: 107 | if response.headers.get("content-type", "").startswith("application/json"): 108 | return response.json(object_hook=JsonObject) 109 | if response.headers.get("content-type", "").startswith( 110 | "application/vnd.github.raw+json" 111 | ): 112 | return response.text 113 | return response.content 114 | 115 | 116 | class JsonObject(dict): 117 | """ 118 | general json object that can bind any fields but also act as a dict. 119 | """ 120 | 121 | def __getattr__(self, key): 122 | try: 123 | return self[key] 124 | except KeyError: 125 | raise AttributeError(rf"'Dict' object has no attribute '{key}'") 126 | 127 | 128 | class ApiError(Exception): 129 | pass 130 | 131 | 132 | class NotFound(ApiError): 133 | pass 134 | 135 | 136 | class Forbidden(ApiError): 137 | pass 138 | -------------------------------------------------------------------------------- /coverage_comment/groups.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import dataclasses 4 | import functools 5 | import itertools 6 | import pathlib 7 | 8 | 9 | @dataclasses.dataclass(frozen=True) 10 | class Group: 11 | file: pathlib.Path 12 | line_start: int 13 | line_end: int 14 | 15 | 16 | def compute_contiguous_groups( 17 | values: list[int], separators: set[int], joiners: set[int], max_gap: int 18 | ) -> list[tuple[int, int]]: 19 | """ 20 | Given a list of (sorted) values, a list of separators and a list of 21 | joiners, return a list of ranges (start, included end) describing groups of 22 | values. 23 | 24 | Groups are created by joining contiguous values together, and in some cases 25 | by merging groups, enclosing a gap of values between them. Gaps that may be 26 | enclosed are small gaps (<= max_gap values after removing all joiners) 27 | where no line is a "separator" 28 | """ 29 | contiguous_groups: list[tuple[int, int]] = [] 30 | for _, contiguous_group in itertools.groupby( 31 | zip(values, itertools.count(1)), lambda x: x[1] - x[0] 32 | ): 33 | grouped_values = (e[0] for e in contiguous_group) 34 | first = next(grouped_values) 35 | try: 36 | *_, last = grouped_values 37 | except ValueError: 38 | last = first 39 | contiguous_groups.append((first, last)) 40 | 41 | def reducer( 42 | acc: list[tuple[int, int]], group: tuple[int, int] 43 | ) -> list[tuple[int, int]]: 44 | if not acc: 45 | return [group] 46 | 47 | last_group = acc[-1] 48 | last_start, last_end = last_group 49 | next_start, next_end = group 50 | 51 | gap = set(range(last_end + 1, next_start)) - joiners 52 | 53 | gap_is_small = len(gap) <= max_gap 54 | gap_contains_separators = gap & separators 55 | 56 | if gap_is_small and not gap_contains_separators: 57 | acc[-1] = (last_start, next_end) 58 | return acc 59 | 60 | acc.append(group) 61 | return acc 62 | 63 | return functools.reduce(reducer, contiguous_groups, []) 64 | -------------------------------------------------------------------------------- /coverage_comment/log.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | 5 | logger = logging.getLogger("coverage_comment") 6 | 7 | 8 | def __getattr__(name): 9 | return getattr(logger, name) 10 | -------------------------------------------------------------------------------- /coverage_comment/log_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | 5 | from coverage_comment import github 6 | 7 | LEVEL_MAPPING = { 8 | 50: "error", 9 | 40: "error", 10 | 30: "warning", 11 | 20: "notice", 12 | 10: "debug", 13 | } 14 | 15 | 16 | class GitHubFormatter(logging.Formatter): 17 | def format(self, record): 18 | log = super().format(record) 19 | level = LEVEL_MAPPING[record.levelno] 20 | 21 | return github.get_workflow_command(command=level, command_value=log) 22 | -------------------------------------------------------------------------------- /coverage_comment/settings.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import dataclasses 4 | import decimal 5 | import inspect 6 | import pathlib 7 | from collections.abc import MutableMapping 8 | from typing import Any 9 | 10 | from coverage_comment import log 11 | 12 | 13 | class MissingEnvironmentVariable(Exception): 14 | pass 15 | 16 | 17 | class InvalidAnnotationType(Exception): 18 | pass 19 | 20 | 21 | def path_below(path_str: str | pathlib.Path) -> pathlib.Path: 22 | try: 23 | return pathlib.Path(path_str).resolve().relative_to(pathlib.Path.cwd()) 24 | except ValueError as exc: 25 | raise ValueError( 26 | "Path needs to be relative and below the current directory" 27 | ) from exc 28 | 29 | 30 | def str_to_bool(value: str) -> bool: 31 | return value.lower() in ("1", "true", "yes") 32 | 33 | 34 | @dataclasses.dataclass(kw_only=True) 35 | class Config: 36 | """This object defines the environment variables""" 37 | 38 | # A branch name, not a fully-formed ref. For example, `main`. 39 | GITHUB_BASE_REF: str 40 | GITHUB_BASE_URL: str = "https://api.github.com" 41 | GITHUB_TOKEN: str = dataclasses.field(repr=False) 42 | GITHUB_REPOSITORY: str 43 | # > The ref given is fully-formed, meaning that for branches the format is 44 | # > `refs/heads/`, for pull requests it is 45 | # > `refs/pull//merge`, and for tags it is `refs/tags/`. 46 | # > For example, `refs/heads/feature-branch-1`. 47 | # (from https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables ) 48 | GITHUB_REF: str 49 | GITHUB_EVENT_NAME: str 50 | GITHUB_PR_RUN_ID: int | None 51 | GITHUB_STEP_SUMMARY: pathlib.Path 52 | COMMENT_TEMPLATE: str | None = None 53 | COVERAGE_DATA_BRANCH: str = "python-coverage-comment-action-data" 54 | COVERAGE_PATH: pathlib.Path = pathlib.Path(".") 55 | COMMENT_ARTIFACT_NAME: str = "python-coverage-comment-action" 56 | COMMENT_FILENAME: pathlib.Path = pathlib.Path("python-coverage-comment-action.txt") 57 | SUBPROJECT_ID: str | None = None 58 | GITHUB_OUTPUT: pathlib.Path | None = None 59 | MINIMUM_GREEN: decimal.Decimal = decimal.Decimal("100") 60 | MINIMUM_ORANGE: decimal.Decimal = decimal.Decimal("70") 61 | MERGE_COVERAGE_FILES: bool = False 62 | ANNOTATE_MISSING_LINES: bool = False 63 | ANNOTATION_TYPE: str = "warning" 64 | MAX_FILES_IN_COMMENT: int = 25 65 | VERBOSE: bool = False 66 | # Only for debugging, not exposed in the action: 67 | FORCE_WORKFLOW_RUN: bool = False 68 | 69 | # Clean methods 70 | @classmethod 71 | def clean_minimum_green(cls, value: str) -> decimal.Decimal: 72 | return decimal.Decimal(value) 73 | 74 | @classmethod 75 | def clean_minimum_orange(cls, value: str) -> decimal.Decimal: 76 | return decimal.Decimal(value) 77 | 78 | @classmethod 79 | def clean_github_pr_run_id(cls, value: str) -> int | None: 80 | return int(value) if value else None 81 | 82 | @classmethod 83 | def clean_github_step_summary(cls, value: str) -> pathlib.Path: 84 | return pathlib.Path(value) 85 | 86 | @classmethod 87 | def clean_merge_coverage_files(cls, value: str) -> bool: 88 | return str_to_bool(value) 89 | 90 | @classmethod 91 | def clean_annotate_missing_lines(cls, value: str) -> bool: 92 | return str_to_bool(value) 93 | 94 | @classmethod 95 | def clean_annotation_type(cls, value: str) -> str: 96 | if value not in {"notice", "warning", "error"}: 97 | raise InvalidAnnotationType( 98 | f"The annotation type {value} is not valid. Please choose from notice, warning or error" 99 | ) 100 | return value 101 | 102 | @classmethod 103 | def clean_verbose(cls, value: str) -> bool: 104 | if str_to_bool(value): 105 | log.info( 106 | "VERBOSE setting is deprecated. For increased debug output, see https://docs.github.com/en/actions/monitoring-and-troubleshooting-workflows/enabling-debug-logging" 107 | ) 108 | return False 109 | 110 | @classmethod 111 | def clean_force_workflow_run(cls, value: str) -> bool: 112 | return str_to_bool(value) 113 | 114 | @classmethod 115 | def clean_comment_filename(cls, value: str) -> pathlib.Path: 116 | return path_below(value) 117 | 118 | @classmethod 119 | def clean_coverage_path(cls, value: str) -> pathlib.Path: 120 | return path_below(value) 121 | 122 | @classmethod 123 | def clean_github_output(cls, value: str) -> pathlib.Path: 124 | return pathlib.Path(value) 125 | 126 | @property 127 | def GITHUB_PR_NUMBER(self) -> int | None: 128 | # "refs/pull/2/merge" 129 | if self.GITHUB_REF.startswith("refs/pull"): 130 | return int(self.GITHUB_REF.split("/")[2]) 131 | return None 132 | 133 | @property 134 | def GITHUB_BRANCH_NAME(self) -> str | None: 135 | # "refs/heads/my_branch_name" 136 | if self.GITHUB_REF.startswith("refs/heads"): 137 | return self.GITHUB_REF.split("/", 2)[2] 138 | return None 139 | 140 | @property 141 | def FINAL_COMMENT_FILENAME(self): 142 | filename = self.COMMENT_FILENAME 143 | if self.SUBPROJECT_ID: 144 | new_name = f"{filename.stem}-{self.SUBPROJECT_ID}{filename.suffix}" 145 | return filename.parent / new_name 146 | return filename 147 | 148 | @property 149 | def FINAL_COVERAGE_DATA_BRANCH(self): 150 | return self.COVERAGE_DATA_BRANCH + ( 151 | f"-{self.SUBPROJECT_ID}" if self.SUBPROJECT_ID else "" 152 | ) 153 | 154 | # We need to type environ as a MutableMapping because that's what 155 | # os.environ is, and just saying `dict[str, str]` is not enough to make 156 | # mypy happy 157 | @classmethod 158 | def from_environ(cls, environ: MutableMapping[str, str]) -> Config: 159 | possible_variables = [e for e in inspect.signature(cls).parameters] 160 | config: dict[str, Any] = { 161 | k: v for k, v in environ.items() if k in possible_variables 162 | } 163 | for key, value in list(config.items()): 164 | if func := getattr(cls, f"clean_{key.lower()}", None): 165 | try: 166 | config[key] = func(value) 167 | except ValueError as exc: 168 | raise ValueError(f"{key}: {exc!s}") from exc 169 | 170 | try: 171 | config_obj = cls(**config) 172 | except TypeError: 173 | missing = { 174 | name 175 | for name, param in inspect.signature(cls).parameters.items() 176 | if param.default is inspect.Parameter.empty 177 | } - set(environ) 178 | raise MissingEnvironmentVariable( 179 | f" missing environment variable(s): {', '.join(missing)}" 180 | ) 181 | return config_obj 182 | -------------------------------------------------------------------------------- /coverage_comment/storage.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import contextlib 4 | import pathlib 5 | 6 | from coverage_comment import files, github_client, log, subprocess 7 | 8 | GITHUB_ACTIONS_BOT_NAME = "github-actions" 9 | # A discussion pointing at the email address of the github-actions bot user; 10 | # https://github.community/t/github-actions-bot-email-address/17204/5 11 | # To double-check, the bot's ID can be found at: 12 | # https://api.github.com/users/github-actions[bot] 13 | # The rule for creating the address can be found at: 14 | # https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-personal-account-on-github/managing-email-preferences/setting-your-commit-email-address#about-commit-email-addresses 15 | GITHUB_ACTIONS_BOT_EMAIL = "41898282+github-actions[bot]@users.noreply.github.com" 16 | 17 | # Both Author and Committer identification are needed for git to let us commit 18 | # (usually, both are derived from `git config user.{name|email}`) 19 | COMMIT_ENVIRONMENT = { 20 | "GIT_AUTHOR_NAME": GITHUB_ACTIONS_BOT_NAME, 21 | "GIT_AUTHOR_EMAIL": GITHUB_ACTIONS_BOT_EMAIL, 22 | "GIT_COMMITTER_NAME": GITHUB_ACTIONS_BOT_NAME, 23 | "GIT_COMMITTER_EMAIL": GITHUB_ACTIONS_BOT_EMAIL, 24 | } 25 | GIT_COMMIT_MESSAGE = "Update coverage data" 26 | 27 | 28 | @contextlib.contextmanager 29 | def checked_out_branch(git: subprocess.Git, branch: str): 30 | # If we're not on a branch, `git branch --show-current` will print nothing 31 | # and still exit with 0. 32 | current_checkout = git.branch("--show-current").strip() 33 | is_on_a_branch = bool(current_checkout) 34 | if not is_on_a_branch: 35 | current_checkout = git.rev_parse("--short", "HEAD").strip() 36 | 37 | log.debug(f"Current checkout is {current_checkout}") 38 | 39 | log.debug("Resetting all changes") 40 | # Goodbye `.coverage` file. 41 | git.reset("--hard") 42 | 43 | try: 44 | git.fetch("origin", branch) 45 | except subprocess.SubProcessError: 46 | # Branch seems to no exist, OR fetch failed for a different reason. 47 | # Let's make sure: 48 | # 1/ Fetch again, but this time all the remote 49 | git.fetch("origin") 50 | # 2/ And check that our branch really doesn't exist 51 | try: 52 | git.rev_parse("--verify", f"origin/{branch}") 53 | except subprocess.SubProcessError: 54 | # Ok good, the branch really doesn't exist. 55 | pass 56 | else: 57 | # Ok, our branch exist, but we failed to fetch it. Let's raise. 58 | raise 59 | log.debug(f"Branch {branch} doesn't exist.") 60 | log.info(f"Creating branch {branch}") 61 | git.switch("--orphan", branch) 62 | else: 63 | log.debug(f"Branch {branch} exist.") 64 | git.switch(branch) 65 | 66 | try: 67 | yield 68 | finally: 69 | log.debug(f"Back to checkout of {current_checkout}") 70 | detach = ["--detach"] if not is_on_a_branch else [] 71 | git.switch(*detach, current_checkout) 72 | 73 | 74 | def commit_operations( 75 | operations: list[files.Operation], 76 | git: subprocess.Git, 77 | branch: str, 78 | ): 79 | """ 80 | Store the given files. 81 | 82 | Parameters 83 | ---------- 84 | operations : list[files.Operation] 85 | File operations to process 86 | git : subprocess.Git 87 | Git actor 88 | branch : str 89 | branch on which to store the files 90 | """ 91 | with checked_out_branch(git=git, branch=branch): 92 | for op in operations: 93 | op.apply() 94 | git.add(str(op.path)) 95 | 96 | try: 97 | git.diff("--staged", "--exit-code") 98 | except subprocess.GitError: 99 | pass # All good, command returns 1 if there's diff, 0 otherwise 100 | else: 101 | log.info("No change detected, skipping.") 102 | return 103 | 104 | log.info("Saving coverage files") 105 | git.commit( 106 | "--message", 107 | GIT_COMMIT_MESSAGE, 108 | env=COMMIT_ENVIRONMENT, 109 | ) 110 | git.push("origin", branch) 111 | 112 | log.info("Files saved") 113 | 114 | 115 | def get_datafile_contents( 116 | github: github_client.GitHub, 117 | repository: str, 118 | branch: str, 119 | ) -> str | None: 120 | contents_path = github.repos(repository).contents(str(files.DATA_PATH)) 121 | try: 122 | response = contents_path.get( 123 | ref=branch, 124 | # If we don't pass this header, the format of the answer will depend on 125 | # the size of the file. With the header, we're sure to get the raw content. 126 | headers={"Accept": "application/vnd.github.raw+json"}, 127 | ) 128 | except github_client.NotFound: 129 | return None 130 | 131 | return response 132 | 133 | 134 | def get_raw_file_url( 135 | repository: str, 136 | branch: str, 137 | path: pathlib.Path, 138 | is_public: bool, 139 | ): 140 | if not is_public: 141 | # If the repository is private, then the real links to raw.githubusercontents.com 142 | # will be short-lived. In this case, it's better to keep an URL that will 143 | # redirect to the correct URL just when asked. 144 | return f"https://github.com/{repository}/raw/{branch}/{path}" 145 | 146 | # Otherwise, we can access the file directly. (shields.io doesn't like the 147 | # github.com domain) 148 | return f"https://raw.githubusercontent.com/{repository}/{branch}/{path}" 149 | 150 | # Another way of accessing the URL would be 151 | # github.repos(repository).contents(str(path)).get(ref=branch).download_url 152 | # but this would only work if the file already exists when generating this URL, 153 | # and for private repos, it would create URLs that become inactive after a few 154 | # seconds. 155 | 156 | 157 | def get_repo_file_url(repository: str, branch: str, path: str = "/") -> str: 158 | """ 159 | Computes the GitHub Web UI URL for a given path: 160 | If the path is empty or ends with a slash, it will be interpreted as a folder, 161 | so the URL will point to the page listing files and displaying the README. 162 | Otherwise, the URL will point to the page displaying the file contents within 163 | the UI. 164 | Leading and trailing slashes in path are removed from the final URL. 165 | """ 166 | # See test_get_repo_file_url for precise specifications 167 | path = "/" + path.lstrip("/") 168 | part = "tree" if path.endswith("/") else "blob" 169 | return f"https://github.com/{repository}/{part}/{branch}{path}".rstrip("/") 170 | 171 | 172 | def get_html_report_url(repository: str, branch: str) -> str: 173 | readme_url = get_repo_file_url( 174 | repository=repository, branch=branch, path="/htmlcov/index.html" 175 | ) 176 | return f"https://htmlpreview.github.io/?{readme_url}" 177 | -------------------------------------------------------------------------------- /coverage_comment/subprocess.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import functools 4 | import os 5 | import pathlib 6 | import subprocess 7 | from typing import Any 8 | 9 | from coverage_comment import log 10 | 11 | 12 | class SubProcessError(Exception): 13 | pass 14 | 15 | 16 | class GitError(SubProcessError): 17 | pass 18 | 19 | 20 | def run(*args, path: pathlib.Path, **kwargs) -> str: 21 | try: 22 | return subprocess.run( 23 | args, 24 | cwd=path, 25 | text=True, 26 | # Only relates to DecodeErrors while decoding the output 27 | errors="replace", 28 | check=True, 29 | capture_output=True, 30 | **kwargs, 31 | ).stdout 32 | except subprocess.CalledProcessError as exc: 33 | log.debug( 34 | f"Command failed: {args=} {path=} {kwargs=} {exc.stderr=} {exc.returncode=}" 35 | ) 36 | raise SubProcessError("\n".join([exc.stderr, exc.stdout])) from exc 37 | 38 | 39 | class Git: 40 | """ 41 | Wrapper around calling git subprocesses in a way that reads a tiny bit like 42 | Python code. 43 | Call a method on git to call the corresponding subcommand (use `_` for `-`). 44 | Add string parameters for the rest of the command line. 45 | 46 | Returns stdout or raise GitError 47 | 48 | >>> git = Git() 49 | >>> git.clone(url) 50 | >>> git.commit("-m", message) 51 | >>> git.rev_parse("--short", "HEAD") 52 | """ 53 | 54 | cwd = pathlib.Path(".") 55 | 56 | def _git(self, *args: str, env: dict[str, str] | None = None, **kwargs) -> str: 57 | # When setting the `env` argument to run, instead of inheriting env 58 | # vars from the current process, the whole environment of the 59 | # subprocess is whatever we pass. In other words, we can either 60 | # conditionally pass an `env` parameter, but it's less readable, 61 | # or we can always pass an `env` parameter, but in this case, we 62 | # need to always merge `os.environ` to it (and ensure our variables 63 | # have precedence) 64 | try: 65 | return run( 66 | "git", 67 | *args, 68 | path=self.cwd, 69 | env=os.environ | (env or {}), 70 | **kwargs, 71 | ) 72 | except SubProcessError as exc: 73 | raise GitError from exc 74 | 75 | def __getattr__(self, name: str) -> Any: 76 | return functools.partial(self._git, name.replace("_", "-")) 77 | -------------------------------------------------------------------------------- /coverage_comment/template.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import dataclasses 4 | import decimal 5 | import functools 6 | import hashlib 7 | import itertools 8 | import pathlib 9 | from collections.abc import Callable 10 | from importlib import resources 11 | 12 | import jinja2 13 | from jinja2.sandbox import SandboxedEnvironment 14 | 15 | from coverage_comment import badge, diff_grouper 16 | from coverage_comment import coverage as coverage_module 17 | 18 | MARKER = ( 19 | """""" 20 | ) 21 | 22 | 23 | def uptodate(): 24 | return True 25 | 26 | 27 | class CommentLoader(jinja2.BaseLoader): 28 | def __init__( 29 | self, base_template: str, custom_template: str | None, debug: bool = False 30 | ): 31 | self.base_template = base_template 32 | self.custom_template = custom_template 33 | 34 | def get_source( 35 | self, environment: jinja2.Environment, template: str 36 | ) -> tuple[str, str | None, Callable[..., bool]]: 37 | if template == "base": 38 | return ( 39 | self.base_template, 40 | "coverage_comment/template_files/comment.md.j2", 41 | uptodate, 42 | ) 43 | 44 | if self.custom_template and template == "custom": 45 | return self.custom_template, None, uptodate 46 | 47 | raise jinja2.TemplateNotFound(template) 48 | 49 | 50 | class MissingMarker(Exception): 51 | pass 52 | 53 | 54 | class TemplateError(Exception): 55 | pass 56 | 57 | 58 | def get_marker(marker_id: str | None): 59 | return MARKER.format(id_part=f" (id: {marker_id})" if marker_id else "") 60 | 61 | 62 | def pluralize(number, singular="", plural="s"): 63 | if number == 1: 64 | return singular 65 | else: 66 | return plural 67 | 68 | 69 | def sign(val: int | decimal.Decimal) -> str: 70 | return "+" if val > 0 else "" if val < 0 else "±" 71 | 72 | 73 | def delta(val: int) -> str: 74 | return f"({sign(val)}{val})" 75 | 76 | 77 | def compact(val: int) -> str: 78 | if val < 1_000: 79 | return str(val) 80 | if val < 10_000: 81 | return f"{val / 1_000:.1f}k" 82 | if val < 1_000_000: 83 | return f"{val / 1_000:.0f}k" 84 | return f"{val / 1_000_000:.0f}M" 85 | 86 | 87 | def remove_exponent(val: decimal.Decimal) -> decimal.Decimal: 88 | # From https://docs.python.org/3/library/decimal.html#decimal-faq 89 | return ( 90 | val.quantize(decimal.Decimal(1)) 91 | if val == val.to_integral() 92 | else val.normalize() 93 | ) 94 | 95 | 96 | def percentage_value(val: decimal.Decimal, precision: int = 2) -> decimal.Decimal: 97 | return remove_exponent( 98 | (decimal.Decimal("100") * val).quantize( 99 | decimal.Decimal("1." + ("0" * precision)), 100 | rounding=decimal.ROUND_DOWN, 101 | ) 102 | ) 103 | 104 | 105 | def pct(val: decimal.Decimal, precision: int = 2) -> str: 106 | rounded = percentage_value(val=val, precision=precision) 107 | return f"{rounded:f}%" 108 | 109 | 110 | def x100(val: decimal.Decimal): 111 | return val * 100 112 | 113 | 114 | @dataclasses.dataclass 115 | class FileInfo: 116 | path: pathlib.Path 117 | coverage: coverage_module.FileCoverage 118 | diff: coverage_module.FileDiffCoverage | None 119 | previous: coverage_module.FileCoverage | None 120 | 121 | 122 | def get_comment_markdown( 123 | *, 124 | coverage: coverage_module.Coverage, 125 | diff_coverage: coverage_module.DiffCoverage, 126 | previous_coverage_rate: decimal.Decimal | None, 127 | previous_coverage: coverage_module.Coverage | None, 128 | files: list[FileInfo], 129 | max_files: int | None, 130 | count_files: int, 131 | minimum_green: decimal.Decimal, 132 | minimum_orange: decimal.Decimal, 133 | repo_name: str, 134 | pr_number: int, 135 | base_template: str, 136 | marker: str, 137 | subproject_id: str | None = None, 138 | custom_template: str | None = None, 139 | pr_targets_default_branch: bool = True, 140 | ): 141 | loader = CommentLoader(base_template=base_template, custom_template=custom_template) 142 | env = SandboxedEnvironment(loader=loader) 143 | env.filters["pct"] = pct 144 | env.filters["delta"] = delta 145 | env.filters["x100"] = x100 146 | env.filters["get_evolution_color"] = badge.get_evolution_badge_color 147 | env.filters["generate_badge"] = badge.get_static_badge_url 148 | env.filters["pluralize"] = pluralize 149 | env.filters["compact"] = compact 150 | env.filters["file_url"] = functools.partial( 151 | get_file_url, repo_name=repo_name, pr_number=pr_number 152 | ) 153 | env.filters["get_badge_color"] = functools.partial( 154 | badge.get_badge_color, 155 | minimum_green=minimum_green, 156 | minimum_orange=minimum_orange, 157 | ) 158 | 159 | missing_diff_lines = { 160 | key: list(value) 161 | for key, value in itertools.groupby( 162 | diff_grouper.get_diff_missing_groups( 163 | coverage=coverage, diff_coverage=diff_coverage 164 | ), 165 | lambda x: x.file, 166 | ) 167 | } 168 | try: 169 | comment = env.get_template("custom" if custom_template else "base").render( 170 | previous_coverage_rate=previous_coverage_rate, 171 | coverage=coverage, 172 | diff_coverage=diff_coverage, 173 | previous_coverage=previous_coverage, 174 | count_files=count_files, 175 | max_files=max_files, 176 | files=files, 177 | missing_diff_lines=missing_diff_lines, 178 | subproject_id=subproject_id, 179 | marker=marker, 180 | pr_targets_default_branch=pr_targets_default_branch, 181 | ) 182 | except jinja2.exceptions.TemplateError as exc: 183 | raise TemplateError from exc 184 | 185 | if marker not in comment: 186 | raise MissingMarker() 187 | 188 | return comment 189 | 190 | 191 | def select_files( 192 | *, 193 | coverage: coverage_module.Coverage, 194 | diff_coverage: coverage_module.DiffCoverage, 195 | previous_coverage: coverage_module.Coverage | None = None, 196 | max_files: int | None, 197 | ) -> tuple[list[FileInfo], int]: 198 | """ 199 | Selects the MAX_FILES files with the most new missing lines sorted by path 200 | 201 | """ 202 | previous_coverage_files = previous_coverage.files if previous_coverage else {} 203 | 204 | files = [] 205 | for path, coverage_file in coverage.files.items(): 206 | diff_coverage_file = diff_coverage.files.get(path) 207 | previous_coverage_file = previous_coverage_files.get(path) 208 | 209 | file_info = FileInfo( 210 | path=path, 211 | coverage=coverage_file, 212 | diff=diff_coverage_file, 213 | previous=previous_coverage_file, 214 | ) 215 | has_diff = bool(diff_coverage_file and diff_coverage_file.added_statements) 216 | has_evolution_from_previous = ( 217 | previous_coverage_file.info != coverage_file.info 218 | if previous_coverage_file 219 | else False 220 | ) 221 | 222 | if has_diff or has_evolution_from_previous: 223 | files.append(file_info) 224 | 225 | count_files = len(files) 226 | files = sorted(files, key=sort_order, reverse=True) 227 | if max_files is not None: 228 | files = files[:max_files] 229 | return sorted(files, key=lambda x: x.path), count_files 230 | 231 | 232 | def sort_order(file_info: FileInfo) -> tuple[int, int, int]: 233 | """ 234 | Sort order for files: 235 | 1. Files with the most new missing lines 236 | 2. Files with the most added lines (from the diff) 237 | 3. Files with the most new executed lines (including not in the diff) 238 | """ 239 | new_missing_lines = len(file_info.coverage.missing_lines) 240 | if file_info.previous: 241 | new_missing_lines -= len(file_info.previous.missing_lines) 242 | 243 | added_statements = len(file_info.diff.added_statements) if file_info.diff else 0 244 | new_covered_lines = len(file_info.coverage.executed_lines) 245 | if file_info.previous: 246 | new_covered_lines -= len(file_info.previous.executed_lines) 247 | 248 | return abs(new_missing_lines), added_statements, abs(new_covered_lines) 249 | 250 | 251 | def get_readme_markdown( 252 | is_public: bool, 253 | readme_url: str, 254 | markdown_report: str, 255 | direct_image_url: str, 256 | html_report_url: str | None, 257 | dynamic_image_url: str | None, 258 | endpoint_image_url: str | None, 259 | subproject_id: str | None = None, 260 | ): 261 | env = SandboxedEnvironment() 262 | template = jinja2.Template(read_template_file("readme.md.j2")) 263 | return env.get_template(template).render( 264 | is_public=is_public, 265 | readme_url=readme_url, 266 | markdown_report=markdown_report, 267 | direct_image_url=direct_image_url, 268 | html_report_url=html_report_url, 269 | dynamic_image_url=dynamic_image_url, 270 | endpoint_image_url=endpoint_image_url, 271 | subproject_id=subproject_id, 272 | ) 273 | 274 | 275 | def get_log_message( 276 | is_public: bool, 277 | readme_url: str, 278 | direct_image_url: str, 279 | html_report_url: str | None, 280 | dynamic_image_url: str | None, 281 | endpoint_image_url: str | None, 282 | subproject_id: str | None = None, 283 | ): 284 | env = SandboxedEnvironment() 285 | template = jinja2.Template(read_template_file("log.txt.j2")) 286 | return env.get_template(template).render( 287 | is_public=is_public, 288 | html_report_url=html_report_url, 289 | direct_image_url=direct_image_url, 290 | endpoint_image_url=endpoint_image_url, 291 | dynamic_image_url=dynamic_image_url, 292 | readme_url=readme_url, 293 | subproject_id=subproject_id, 294 | ) 295 | 296 | 297 | def read_template_file(template: str) -> str: 298 | return ( 299 | resources.files("coverage_comment") / "template_files" / template 300 | ).read_text() 301 | 302 | 303 | def get_file_url( 304 | filename: pathlib.Path, 305 | lines: tuple[int, int] | None = None, 306 | *, 307 | repo_name: str, 308 | pr_number: int, 309 | ) -> str: 310 | # To link to a file in a PR, GitHub uses the link to the file overview combined with a SHA256 hash of the file path 311 | s = f"https://github.com/{repo_name}/pull/{pr_number}/files#diff-{hashlib.sha256(str(filename).encode('utf-8')).hexdigest()}" 312 | 313 | if lines is not None: 314 | # R stands for Right side of the diff. But since we generate these links for new code we only need the right side. 315 | s += f"R{lines[0]}-R{lines[1]}" 316 | 317 | return s 318 | -------------------------------------------------------------------------------- /coverage_comment/template_files/comment.md.j2: -------------------------------------------------------------------------------- 1 | {%- block title -%}## Coverage report{%- if subproject_id %} ({{ subproject_id }}){%- endif -%}{%- endblock title%} 2 | 3 | {# Coverage evolution badge #} 4 | {% block coverage_badges -%} 5 | {%- block coverage_evolution_badge -%} 6 | {%- if previous_coverage_rate %} 7 | {%- set text = "Coverage for the whole project went from " ~ (previous_coverage_rate | pct) ~ " to " ~ (coverage.info.percent_covered | pct) -%} 8 | {%- set color = (coverage.info.percent_covered - previous_coverage_rate) | get_evolution_color(neutral_color='blue') -%} 9 | 10 | 11 | {%- else -%} 12 | {%- set text = "Coverage for the whole project is " ~ (coverage.info.percent_covered | pct) ~ ". Previous coverage rate is not available, cannot report on evolution." -%} 13 | {%- set color = coverage.info.percent_covered | x100 | get_badge_color -%} 14 | 15 | 16 | {%- endif -%} 17 | {%- endblock coverage_evolution_badge -%} 18 | 19 | {#- Coverage diff badge -#} 20 | {#- space #} {# space -#} 21 | {%- block diff_coverage_badge -%} 22 | {%- set text = (diff_coverage.total_percent_covered | pct) ~ " of the statement lines added by this PR are covered" -%} 23 | 24 | 25 | {%- endblock diff_coverage_badge -%} 26 | {%- endblock coverage_badges -%} 27 | 28 | 29 | {%- macro statements_badge(path, statements_count, previous_statements_count) -%} 30 | {% if previous_statements_count is not none -%} 31 | {% set statements_diff = statements_count - previous_statements_count %} 32 | {% if statements_diff > 0 -%} 33 | {% set text = "This PR adds " ~ statements_diff ~ " to the number of statements in " ~ path ~ ", taking it from " ~ previous_statements_count ~ " to " ~ statements_count ~"." -%} 34 | {% set color = "007ec6" -%} 35 | {% elif statements_diff < 0 -%} 36 | {% set text = "This PR removes " ~ (-statements_diff) ~ " from the number of statements in " ~ path ~ ", taking it from " ~ previous_statements_count ~ " to " ~ statements_count ~"." -%} 37 | {% set color = "49c2ee" -%} 38 | {% else -%} 39 | {% set text = "This PR doesn't change the number of statements in " ~ path ~ ", which is " ~ statements_count ~ "." -%} 40 | {% set color = "5d89ba" -%} 41 | {% endif -%} 42 | {% set message = statements_diff %} 43 | {% else -%} 44 | {% set text = "This PR adds " ~ statements_count ~ " statement" ~ (statements_count | pluralize) ~ " to " ~ path ~ ". The file did not seem to exist on the base branch." -%} 45 | {% set color = "007ec6" -%} 46 | {% set message = statements_count %} 47 | {% endif -%} 48 | 49 | 50 | {%- endmacro -%} 51 | 52 | {%- macro missing_lines_badge(path, missing_lines_count, previous_missing_lines_count) -%} 53 | {%- if previous_missing_lines_count is not none -%} 54 | {%- set missing_diff = missing_lines_count - previous_missing_lines_count %} 55 | {%- if missing_diff > 0 -%} 56 | {%- set text = "This PR adds " ~ missing_diff ~ " to the number of statements missing coverage in " ~ path ~ ", taking it from " ~ previous_missing_lines_count ~ " to " ~ missing_lines_count ~ "." -%} 57 | {%- elif missing_diff < 0 -%} 58 | {%- set text = "This PR removes " ~ (-missing_diff) ~ " from the number of statements missing coverage in " ~ path ~ ", taking it from " ~ previous_missing_lines_count ~ " to " ~ missing_lines_count ~ "." -%} 59 | {%- else -%} 60 | {%- set text = "This PR doesn't change the number of statements missing coverage in " ~ path ~ ", which is " ~ missing_lines_count ~ "." -%} 61 | {%- endif -%} 62 | {%- set message = missing_diff -%} 63 | {%- else -%} 64 | {%- set text = "This PR adds " ~ missing_lines_count ~ " statement" ~ (statements_count | pluralize) ~ " missing coverage to " ~ path ~ ". The file did not seem to exist on the base branch." -%} 65 | {%- set message = missing_lines_count -%} 66 | {%- endif -%} 67 | {%- set color = message | get_evolution_color(up_is_good=false) -%} 68 | 69 | 70 | {%- endmacro -%} 71 | 72 | {%- macro coverage_rate_badge(path, previous_percent_covered, previous_covered_statements_count, previous_statements_count, percent_covered, covered_statements_count, statements_count) -%} 73 | {%- if previous_percent_covered is not none -%} 74 | {%- set coverage_diff = percent_covered - previous_percent_covered -%} 75 | {%- if coverage_diff > 0 -%} 76 | {%- set text = "This PR adds " ~ ("{:.02f}".format(coverage_diff * 100)) ~ " percentage points to the coverage rate in " ~ path ~ ", taking it from " ~ previous_percent_covered | pct ~ " (" ~ previous_covered_statements_count ~ "/" ~ previous_statements_count ~ ") to " ~ percent_covered | pct ~ " (" ~ covered_statements_count ~ "/" ~ statements_count ~ ")." -%} 77 | {%- elif coverage_diff < 0 -%} 78 | {%- set text = "This PR removes " ~ ("{:.02f}".format(-coverage_diff * 100)) ~ " percentage points from the coverage rate in " ~ path ~ ", taking it from " ~ previous_percent_covered | pct ~ " (" ~ previous_covered_statements_count ~ "/" ~ previous_statements_count ~ ") to " ~ percent_covered | pct ~ " (" ~ covered_statements_count ~ "/" ~ statements_count ~ ")." -%} 79 | {%- else -%} 80 | {%- set text = "This PR doesn't change the coverage rate in " ~ path ~ ", which is " ~ percent_covered | pct ~ " (" ~ covered_statements_count ~ "/" ~ statements_count ~ ")." -%} 81 | {%- endif -%} 82 | {%- set color = coverage_diff | get_evolution_color() -%} 83 | {%- set message = "(" ~ previous_covered_statements_count | compact ~ "/" ~ previous_statements_count | compact ~ " > " ~ covered_statements_count | compact ~ "/" ~ statements_count | compact ~ ")" -%} 84 | {%- else -%} 85 | {%- set text = "The coverage rate of " ~ path ~ " is " ~ percent_covered | pct ~ " (" ~ covered_statements_count ~ "/" ~ statements_count ~ "). The file did not seem to exist on the base branch." -%} 86 | {%- set message = "(" ~ covered_statements_count | compact ~ "/" ~ statements_count | compact ~ ")" -%} 87 | {%- set color = percent_covered | x100 | get_badge_color -%} 88 | {%- endif -%} 89 | 90 | 91 | {%- endmacro -%} 92 | 93 | {%- macro diff_coverage_rate_badge(path, added_statements_count, covered_statements_count, percent_covered) -%} 94 | {% if added_statements_count -%} 95 | {% set text = "In this PR, " ~ (added_statements_count) ~ " new statements are added to " ~ path ~ ", " ~ covered_statements_count ~ " of which are covered (" ~ (percent_covered | pct) ~ ")." -%} 96 | {% set label = (percent_covered | pct(precision=0)) -%} 97 | {% set message = "(" ~ covered_statements_count | compact ~ "/" ~ added_statements_count | compact ~ ")" -%} 98 | {%- set color = (percent_covered | x100 | get_badge_color()) -%} 99 | {% else -%} 100 | {% set text = "This PR does not seem to add statements to " ~ path ~ "." -%} 101 | {% set label = "" -%} 102 | {%- set color = "grey" -%} 103 | {% set message = "N/A" -%} 104 | {% endif -%} 105 | 106 | 107 | {%- endmacro -%} 108 | 109 | 110 | {# Individual file report #} 111 | {%- block coverage_by_file -%} 112 | {%- if not files %} 113 | 114 | _This PR does not seem to contain any modification to coverable code._ 115 | {%- else -%} 116 |
Click to see where and how coverage changed 117 | 118 | 119 | 120 | 121 | {%- for parent, files_in_folder in files|groupby(attribute="path.parent") -%} 122 | 123 | 124 | 125 | {%- for file in files_in_folder -%} 126 | {%- set path = file.coverage.path -%} 127 | 128 | 129 | 130 | {#- Statements cell -#} 131 | {%- block statements_badge_cell scoped -%} 132 | {{- statements_badge( 133 | path=path, 134 | statements_count=file.coverage.info.num_statements, 135 | previous_statements_count=(file.previous.info.num_statements if file.previous else none), 136 | ) -}} 137 | {%- endblock statements_badge_cell-%} 138 | 139 | {#- Missing cell -#} 140 | {%- block missing_lines_badge_cell scoped -%} 141 | {{- missing_lines_badge( 142 | path=path, 143 | missing_lines_count=file.coverage.info.missing_lines, 144 | previous_missing_lines_count=(file.previous.info.missing_lines if file.previous else none), 145 | ) -}} 146 | {%- endblock missing_lines_badge_cell -%} 147 | 148 | {#- Coverage rate -#} 149 | {%- block coverage_rate_badge_cell scoped -%} 150 | {{- coverage_rate_badge( 151 | path=path, 152 | previous_percent_covered=(file.previous.info.percent_covered if file.previous else none), 153 | previous_covered_statements_count=(file.previous.info.covered_lines if file.previous else none), 154 | previous_statements_count=(file.previous.info.num_statements if file.previous else none), 155 | percent_covered=file.coverage.info.percent_covered, 156 | covered_statements_count=file.coverage.info.covered_lines, 157 | statements_count=file.coverage.info.num_statements, 158 | ) -}} 159 | {%- endblock coverage_rate_badge_cell -%} 160 | 161 | {#- Coverage of added lines -#} 162 | {%- block diff_coverage_rate_badge_cell scoped -%} 163 | {{- diff_coverage_rate_badge( 164 | path=path, 165 | added_statements_count=((file.diff.added_statements | length) if file.diff else none), 166 | covered_statements_count=((file.diff.covered_statements | length) if file.diff else none), 167 | percent_covered=(file.diff.percent_covered if file.diff else none) 168 | ) -}} 169 | {%- endblock diff_coverage_rate_badge_cell -%} 170 | 171 | {#- Link to missing lines -#} 172 | {%- block link_to_missing_diff_lines_cell scoped -%} 173 | 189 | 190 | {%- endblock link_to_missing_diff_lines_cell -%} 191 | {%- endfor -%} 192 | {%- endfor -%} 193 | 194 | 195 | 196 | 197 | 198 | 199 | {#- Statements cell -#} 200 | {%- block statements_badge_total_cell scoped -%} 201 | {{- statements_badge( 202 | path="the whole project", 203 | statements_count=coverage.info.num_statements, 204 | previous_statements_count=(previous_coverage.info.num_statements if previous_coverage else none), 205 | ) -}} 206 | {%- endblock statements_badge_total_cell -%} 207 | 208 | {#- Missing cell -#} 209 | {%- block missing_lines_badge_total_cell scoped -%} 210 | {{- missing_lines_badge( 211 | path="the whole project", 212 | missing_lines_count=coverage.info.missing_lines, 213 | previous_missing_lines_count=(previous_coverage.info.missing_lines if previous_coverage else none), 214 | ) -}} 215 | {%- endblock missing_lines_badge_total_cell -%} 216 | 217 | {#- Coverage rate -#} 218 | {%- block coverage_rate_badge_total_cell scoped -%} 219 | {{- coverage_rate_badge( 220 | path="the whole project", 221 | previous_percent_covered=(previous_coverage.info.percent_covered if previous_coverage else none), 222 | previous_covered_statements_count=(previous_coverage.info.covered_lines if previous_coverage else none), 223 | previous_statements_count=(previous_coverage.info.num_statements if previous_coverage else none), 224 | percent_covered=coverage.info.percent_covered, 225 | covered_statements_count=coverage.info.covered_lines, 226 | statements_count=coverage.info.num_statements, 227 | ) -}} 228 | {%- endblock coverage_rate_badge_total_cell -%} 229 | 230 | {# Coverage of added lines #} 231 | {%- block diff_coverage_rate_badge_total_cell scoped -%} 232 | {{- diff_coverage_rate_badge( 233 | path="the whole project", 234 | added_statements_count=diff_coverage.total_num_lines, 235 | covered_statements_count=(diff_coverage.total_num_lines-diff_coverage.total_num_violations), 236 | percent_covered=diff_coverage.total_percent_covered, 237 | ) -}} 238 | {%- endblock diff_coverage_rate_badge_total_cell -%} 239 | 240 | 241 | 242 | 243 |
FileStatementsMissingCoverageCoverage
(new stmts)
Lines missing
  {{ parent }}
  {{ path.name }} 174 | 175 | {%- set comma = joiner() -%} 176 | {%- for group in missing_diff_lines.get(path, []) -%} 177 | {{- comma() -}} 178 | 179 | 180 | {{- group.line_start -}} 181 | {%- if group.line_start != group.line_end -%} 182 | - 183 | {{- group.line_end -}} 184 | {%- endif -%} 185 | 186 | 187 | {%- endfor -%} 188 |
Project Total 
244 | 245 | {%- if max_files and count_files > max_files %} 246 | 247 | _The report is truncated to {{ max_files }} files out of {{ count_files }}. To see the full report, please visit the workflow summary page._ 248 | 249 | {% endif %} 250 | 251 | {%- block footer %} 252 | 253 | 254 | 255 | This report was generated by [python-coverage-comment-action](https://github.com/py-cov-action/python-coverage-comment-action) 256 | 257 | 258 |
259 | 260 | {% endblock footer -%} 261 | 262 | {%- endif -%} 263 | {%- endblock coverage_by_file %} 264 | 265 | {{ marker -}} 266 | -------------------------------------------------------------------------------- /coverage_comment/template_files/log.txt.j2: -------------------------------------------------------------------------------- 1 | {% if subproject_id %}Coverage info for {{ subproject_id }}: 2 | 3 | {% endif -%} 4 | {% if is_public -%} 5 | You can browse the full coverage report at: 6 | {{ html_report_url }} 7 | 8 | {% endif -%} 9 | You can use the following URLs to display your badge: 10 | 11 | - Badge SVG available at: 12 | {{ direct_image_url }} 13 | 14 | {% if is_public -%} 15 | - Badge from shields endpoint is easier to customize but doesn't work with private repo: 16 | {{ endpoint_image_url }} 17 | 18 | - Badge from shields dynamic url (less useful but you never know): 19 | {{ dynamic_image_url }} 20 | 21 | {% endif -%} 22 | See more details and ready-to-copy-paste-markdown at: 23 | {{ readme_url }} 24 | -------------------------------------------------------------------------------- /coverage_comment/template_files/readme.md.j2: -------------------------------------------------------------------------------- 1 | # Repository Coverage{% if subproject_id %} ({{ subproject_id }}){% endif %} 2 | 3 | {% if is_public -%} 4 | [Full report]({{ html_report_url }}) 5 | {%- endif %} 6 | 7 | {{ markdown_report }} 8 | 9 | ## Setup coverage badge 10 | 11 | Below are examples of the badges you can use in your main branch `README` file. 12 | 13 | ### Direct image 14 | 15 | [![Coverage badge]({{ direct_image_url }})]({{ html_report_url if is_public else readme_url }}) 16 | 17 | This is the one to use if your repository is private or if you don't want to customize anything. 18 | 19 | {% if is_public -%} 20 | ### [Shields.io](https://shields.io) Json Endpoint 21 | 22 | [![Coverage badge]({{ endpoint_image_url }})]({{html_report_url}}) 23 | 24 | Using this one will allow you to [customize](https://shields.io/endpoint) the look of your badge. 25 | It won't work with private repositories. It won't be refreshed more than once per five minutes. 26 | 27 | ### [Shields.io](https://shields.io) Dynamic Badge 28 | 29 | [![Coverage badge]({{ dynamic_image_url }})]({{ html_report_url }}) 30 | 31 | This one will always be the same color. It won't work for private repos. I'm not even sure why we included it. 32 | {%- endif %} 33 | 34 | ## What is that? 35 | 36 | This branch is part of the 37 | [python-coverage-comment-action](https://github.com/marketplace/actions/python-coverage-comment) 38 | GitHub Action. All the files in this branch are automatically generated and may be 39 | overwritten at any moment. 40 | -------------------------------------------------------------------------------- /dev-env: -------------------------------------------------------------------------------- 1 | # This file is meant to be sourced, not executed: 2 | # source ./dev-env 3 | 4 | # We're doing our best to keep it compatible with bash and zsh. 5 | 6 | if ! which uv; then 7 | echo "This script needs uv installed. Either follow the official method: https://docs.astral.sh/uv/getting-started/installation/ or use pipx https://pypa.github.io/pipx/installation/" &>2 8 | return 9 | fi 10 | 11 | if ! which gh; then 12 | echo "This script needs gh installed. https://github.com/cli/cli#installation" &>2 13 | return 14 | fi 15 | 16 | export GITHUB_REPOSITORY=${GITHUB_REPOSITORY:-python-coverage-comment-action-devenv} 17 | export GITHUB_REPOSITORY_TEMPLATE=${GITHUB_REPOSITORY_TEMPLATE:-'py-cov-action/python-coverage-comment-action-v3-example'} 18 | 19 | if [[ ! $GITHUB_REPOSITORY == */* ]]; then 20 | export GITHUB_REPOSITORY=$(gh api /user --jq .login)"/${GITHUB_REPOSITORY}" 21 | fi 22 | 23 | if [ -f ./dev-env-vars ]; then 24 | echo "Found file ./dev-env-vars, overriding variables from there" >&2 25 | source ./dev-env-vars 26 | else 27 | echo "Did not find optional file ./dev-env-vars, loading default variables." >&2 28 | echo "You can create a file by copying './dev-env-vars.dist'." >&2 29 | fi 30 | 31 | echo "Installing virtualenv" >&2 32 | uv sync --all-groups 33 | 34 | echo "Activating virtualenv" >&2 35 | source .venv/bin/activate 36 | 37 | echo "Moving to a temporary folder" >&2 38 | TMPDIR=$(mktemp -d) 39 | pushd ${TMPDIR} 40 | 41 | function clone-repo(){ 42 | gh repo clone "${GITHUB_REPOSITORY}" . 43 | } 44 | 45 | function create-repo(){ 46 | gh repo create "${GITHUB_REPOSITORY}" --clone --public --description 'Personal development environment for python-comment-coverage-action' --template "${GITHUB_REPOSITORY_TEMPLATE}" 47 | repo_dirname=$(basename ${GITHUB_REPOSITORY}) 48 | mv "${repo_dirname}/"{*,.*} . 49 | rmdir "${repo_dirname}" 50 | git pull --ff-only origin master 51 | } 52 | 53 | function delete-repo(){ 54 | gh repo delete "${GITHUB_REPOSITORY}" 55 | if [ "${TMPDIR}" != "" ]; then 56 | rm -rf ${TMPDIR}/* ${TMPDIR}/.* 57 | fi 58 | } 59 | 60 | 61 | if gh repo view ${GITHUB_REPOSITORY}; then 62 | echo "Cloning existing repository ${GITHUB_REPOSITORY}" >&2 63 | clone-repo 64 | else 65 | echo "Creating repository ${GITHUB_REPOSITORY}" >&2 66 | create-repo 67 | fi 68 | 69 | echo "Generating initial coverage data" >&2 70 | pytest 71 | 72 | export GITHUB_DEFAULT_BRANCH=$(gh repo view --json defaultBranchRef --jq '.defaultBranchRef.name') 73 | 74 | export GITHUB_BASE_REF=$GITHUB_DEFAULT_BRANCH 75 | 76 | function event(){ 77 | case $1 in 78 | push|pull_request|workflow_run) 79 | export GITHUB_EVENT_NAME="$1" 80 | ;; 81 | *) 82 | echo "Incorrect parameter $1 (expected push|pull_request|workflow_run)" >&2 83 | esac 84 | } 85 | 86 | 87 | function ref(){ 88 | case "${1}" in 89 | branch) 90 | export GITHUB_REF="refs/heads/${2:-${GITHUB_DEFAULT_BRANCH}}" 91 | ;; 92 | pr) 93 | export GITHUB_REF="refs/pull/${2}/merge" 94 | ;; 95 | *) 96 | echo "Incorrect parameter $1 (expected 'branch [branch_name]' or 'pr {pr number}' )" >&2 97 | 98 | esac 99 | } 100 | 101 | 102 | function ghenv(){ 103 | env | grep GITHUB | sort 104 | } 105 | 106 | function token-from-gh(){ 107 | # gh-cli token has repo scope by default, but if we needed different scopes, it would be: 108 | # gh auth refresh --scopes=repo 109 | export GITHUB_TOKEN=$(gh auth token) 110 | } 111 | function create-token(){ 112 | scopes="repo,delete_repo,read:org" 113 | url="https://github.com/settings/tokens/new?scopes=${scopes}&description=python-coverage-comment-action%20development%20environment" 114 | echo "Please generate a token and come back (page will open in 3s)" 115 | sleep 3 116 | python -m webbrowser $url 117 | echo -n "Enter your token: " 118 | read GITHUB_TOKEN 119 | export GITHUB_TOKEN 120 | } 121 | 122 | 123 | function runs(){ 124 | gh run list --json databaseId,name,createdAt,headBranch --template '{{tablerow "ID" "branch" "Name" "Created At" }}{{range . }}{{tablerow .databaseId .headBranch .name .createdAt}}{{end}}{{tablerender}}' 125 | } 126 | 127 | function run(){ 128 | export GITHUB_PR_RUN_ID="${1}" 129 | } 130 | 131 | event push 132 | ref branch 133 | run $(gh run list --limit 1 --json databaseId --jq='.[].databaseId') 134 | 135 | function help(){ 136 | 137 | echo "" >&2 138 | 139 | echo "Welcome to the python-coverage-comment-action dev-env shell!" >&2 140 | echo "We've gone ahead and defined some shell commands for you:" >&2 141 | echo "Main:" >&2 142 | echo " coverage_comment" >&2 143 | echo " Launch the action locally (no argument)" >&2 144 | echo " pytest" >&2 145 | echo " Launch the the tests on the example repo (generates the coverage data that the action uses)" >&2 146 | echo "" >&2 147 | 148 | echo "Change configuration:" >&2 149 | echo " event {push|pull_request|workflow_run}" >&2 150 | echo " Controls what part of the action will run" >&2 151 | echo " ref {pr {number}|branch [branch_name]}" >&2 152 | echo " Controls the current reference (what git ref the action runs with)" >&2 153 | echo " run {run_id}" >&2 154 | echo " Controls the GitHub Actions run ID that will be checked for publishing a comment" >&2 155 | echo "" >&2 156 | 157 | echo "Token management:" >&2 158 | echo " token-from-gh" >&2 159 | echo " Re-use token from the gh cli to run the action" >&2 160 | echo " create-token" >&2 161 | echo " Interactively create a token" >&2 162 | echo "" >&2 163 | 164 | echo "Repository management:" >&2 165 | echo " clone-repo" >&2 166 | echo " Clones ${GITHUB_REPOSITORY} in the current directory" >&2 167 | echo " create-repo" >&2 168 | echo " Create ${GITHUB_REPOSITORY} from template ${GITHUB_REPOSITORY_TEMPLATE}" >&2 169 | echo " delete-repo" >&2 170 | echo " Deletes ${GITHUB_REPOSITORY}" >&2 171 | echo "" >&2 172 | 173 | echo "View:" >&2 174 | echo " ghenv" >&2 175 | echo " Display all GITHUB environment variables" >&2 176 | echo " runs" >&2 177 | echo " Display a table of all recent GitHub Actions runs" >&2 178 | echo "" >&2 179 | 180 | echo "Misc:" >&2 181 | echo " help" >&2 182 | echo " Display these instructions again" >&2 183 | echo " popd" >&2 184 | echo " go back to the repository folder" >&2 185 | echo "" >&2 186 | echo "Happy hacking!" >&2 187 | } 188 | 189 | help 190 | -------------------------------------------------------------------------------- /dev-env-vars.dist: -------------------------------------------------------------------------------- 1 | export GITHUB_REPOSITORY= 2 | export GITHUB_REPOSITORY_TEMPLATE= 3 | export GITHUB_TOKEN= 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "coverage-comment" 3 | version = "0.0.0" 4 | description = "Publish diff coverage report as PR comment, and create a coverage badge to display on the readme, for Python projects." 5 | authors = [{ name = "Joachim Jablon", email = "ewjoachim@gmail.com" }] 6 | requires-python = ">=3.12" 7 | license = { file = "LICENSE" } 8 | dependencies = ["coverage[toml]", "httpx[http2]", "Jinja2"] 9 | 10 | [project.scripts] 11 | coverage_comment = "coverage_comment.main:main" 12 | 13 | [build-system] 14 | requires = ["hatchling"] 15 | build-backend = "hatchling.build" 16 | 17 | [dependency-groups] 18 | dev = ["ruff", "mypy", "pytest", "pytest-cov", "pytest-mock", "tenacity"] 19 | 20 | [tool.hatch.build.targets.sdist] 21 | include = ["coverage_comment", "coverage_comment/default.md.j2"] 22 | 23 | [tool.hatch.build.targets.wheel] 24 | include = ["coverage_comment", "coverage_comment/default.md.j2"] 25 | 26 | [tool.pytest.ini_options] 27 | addopts = """ 28 | --cov-report term-missing --cov-branch --cov-report html --cov-report term 29 | --cov=coverage_comment --cov-context=test -vv --strict-markers -rfE 30 | --ignore=tests/end_to_end/repo 31 | """ 32 | testpaths = ["tests/unit", "tests/integration", "tests/end_to_end"] 33 | 34 | filterwarnings = ["error"] 35 | markers = [ 36 | "repo_suffix: Allows to use an additional suffix for the e2e test repo.", 37 | "code_path: Allows to place the code in a subdirectory for the e2e test repo.", 38 | "subproject_id: Allows to use a different subproject id for the e2e test repo.", 39 | "add_branches: Adds branches besides 'main' and 'branch' to integration tests setup.", 40 | ] 41 | 42 | [tool.coverage.run] 43 | relative_files = true 44 | 45 | [tool.coverage.report] 46 | exclude_also = ["\\.\\.\\."] 47 | 48 | [tool.coverage.html] 49 | show_contexts = true 50 | 51 | [tool.mypy] 52 | no_implicit_optional = true 53 | 54 | [tool.ruff] 55 | target-version = "py312" 56 | unsafe-fixes = true 57 | 58 | [tool.ruff.lint] 59 | extend-select = [ 60 | "UP", # pyupgrade 61 | "I", # isort 62 | "E", # pycodestyle errors 63 | "W", # pycodestyle warnings 64 | "RUF", # ruff 65 | ] 66 | fixable = ["ALL"] 67 | extend-ignore = [ 68 | "E501", # line too long 69 | ] 70 | 71 | [tool.ruff.lint.isort] 72 | required-imports = ["from __future__ import annotations"] 73 | -------------------------------------------------------------------------------- /scripts/sync-pre-commit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # /// script 4 | # dependencies = [ 5 | # "ruamel.yaml", 6 | # ] 7 | # /// 8 | # Usage: uv run scripts/sync-pre-commit.py 9 | # or through pre-commit hook: pre-commit run --all-files sync-pre-commit 10 | 11 | from __future__ import annotations 12 | 13 | import contextlib 14 | import copy 15 | import pathlib 16 | import subprocess 17 | from collections.abc import Generator 18 | from typing import Any, cast 19 | 20 | import ruamel.yaml 21 | 22 | 23 | @contextlib.contextmanager 24 | def yaml_roundtrip( 25 | path: pathlib.Path, 26 | ) -> Generator[dict[str, Any], None, None]: 27 | yaml = ruamel.yaml.YAML() 28 | config = cast("dict[str, Any]", yaml.load(path.read_text())) 29 | old_config = copy.deepcopy(config) 30 | yield config 31 | if config != old_config: 32 | yaml.indent(mapping=2, sequence=4, offset=2) 33 | yaml.dump(config, path) 34 | 35 | 36 | def export_from_uv_lock(group_args): 37 | base_export_args = [ 38 | "uv", 39 | "export", 40 | "--all-extras", 41 | "--no-hashes", 42 | "--no-header", 43 | "--no-emit-project", 44 | "--no-emit-workspace", 45 | "--no-annotate", 46 | ] 47 | packages = ( 48 | subprocess.check_output( 49 | [*base_export_args, *group_args], 50 | text=True, 51 | ) 52 | .strip() 53 | .split("\n") 54 | ) 55 | print(packages) 56 | return packages 57 | 58 | 59 | def main(): 60 | groups_dev = [ 61 | "--only-group=dev", 62 | ] 63 | dev_dependencies = export_from_uv_lock(groups_dev) 64 | dev_versions = dict( 65 | e.split(";")[0].strip().split("==", 1) for e in dev_dependencies 66 | ) 67 | 68 | with yaml_roundtrip(pathlib.Path(".pre-commit-config.yaml")) as pre_commit_config: 69 | for repo in pre_commit_config["repos"]: 70 | project = repo["repo"].split("/")[-1] 71 | if project in dev_versions: 72 | repo["rev"] = f"v{dev_versions[project]}" 73 | 74 | 75 | if __name__ == "__main__": 76 | main() 77 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | # E203: whitespace before colon on list slice: mylist[1 : 2] 3 | # E501: line too long (black knows better) 4 | extend-ignore = E203,E501 5 | extend-exclude = .venv 6 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Test suite 2 | 3 | ## Unit tests 4 | 5 | Here, unit tests means quick tests scoped on a module that doesn't rely on other modules too much, in which we usually won't have to mock too much because there's not a lot to mock. 6 | 7 | ## Integration tests 8 | 9 | Integration tests mean potentially slower tests scoped on a module that uses other modules, for which we will try not to mock too much in order to trust the result. 10 | 11 | ## End-to-end tests 12 | 13 | These may be really slow (like >1min / test). The idea is that if they work, we don't need any 14 | other test to be confident that the complete software runs correctly. They'll be testing in 15 | real conditions: creating real repos in the real GitHub, etc. They probably will only run 16 | on the default branch, after merge (it would be a pain to have them run securely on PRs given 17 | they require secret tokens) 18 | 19 | ## Mocks 20 | 21 | In all cases, we'll avoid mocking when possible. 22 | 23 | ## Caveats 24 | 25 | The test suite does some disk operations, but apart from end-to-end tests, we've avoided 26 | doing real networking. This means we don't test that we're using the github API 27 | correctly, we only test that we're calling it as intended. Calling the real GitHub API 28 | in the test suite would probably be much harder. 29 | 30 | ## Coverage 31 | 32 | We currently have 100% coverage, in 3 categories: 33 | - Low-level functions that have a lot of complex logic are unit-tested 34 | - Glue functions have slightly less logic, there's about a dozen of possible code paths through those functions. Each path has an integration test. Any additional logic is pushed down the stack where it will be unit-tested. In order for this to work elegently, all the glue function receive and use objects that will ultimately be responsible for the low-level behaviour that we want to be able to simulate in the test. For example, the glue functions receive an httpx Client as parameter. In the test, we're able to call these functions with test double that will not do real HTTP calls. This means that the integration tests that provide coverage for the glue functions also lets us check that the different bricks are correctly plugged to one another 35 | - The top-level code that is responsible to call the glue functions with the real implementations for httpx clients etc. relies on mocks. It's not very elegant but it's a single function, and it means we get to have 100% coverage. 36 | - End-to-end tests don't count toward coverage 37 | 38 | ## Feedback 39 | 40 | If you've read all the way through this point, and you have some feedback, I'd love to hear it! Either open an issue or ping me on [twitter](https://twitter.com/Ewjoachim). 41 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/py-cov-action/python-coverage-comment-action/970a227e0c16ef4589a99a9970ab0ceb8c53059a/tests/__init__.py -------------------------------------------------------------------------------- /tests/end_to_end/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import contextlib 4 | import functools 5 | import json as json_module 6 | import logging 7 | import os 8 | import pathlib 9 | import re 10 | import shutil 11 | import subprocess 12 | import time 13 | 14 | import httpx 15 | import pytest 16 | import tenacity 17 | 18 | # In this directory, `gh` is not the global `gh` fixture, but is instead 19 | # a fixture letting you use the `gh` CLI tool. 20 | 21 | SLEEP_AFTER_API_CALL = 1 # second(s) 22 | 23 | 24 | @pytest.fixture 25 | def call(): 26 | def _(command, *args, env, **kwargs): 27 | try: 28 | call = subprocess.run( 29 | [command, *list(args)], 30 | text=True, 31 | check=True, 32 | capture_output=True, 33 | env=os.environ | (env or {}), 34 | **kwargs, 35 | ) 36 | except subprocess.CalledProcessError as exc: 37 | print("\n".join([exc.stdout, exc.stderr])) 38 | raise 39 | return call.stdout 40 | 41 | return _ 42 | 43 | 44 | @contextlib.contextmanager 45 | def _cd(tmp_path: pathlib.Path, path: str): 46 | full_path = tmp_path / path 47 | full_path.mkdir(exist_ok=True, parents=True) 48 | old_path = pathlib.Path.cwd() 49 | if old_path == full_path: 50 | yield full_path 51 | return 52 | os.chdir(full_path) 53 | try: 54 | yield full_path 55 | finally: 56 | os.chdir(old_path) 57 | 58 | 59 | @pytest.fixture 60 | def cd(tmp_path: pathlib.Path): 61 | init_path = os.getcwd() 62 | yield functools.partial(_cd, tmp_path) 63 | os.chdir(init_path) 64 | 65 | 66 | @pytest.fixture 67 | def gh_config_dir(tmp_path: pathlib.Path): 68 | return str(tmp_path / "gh_config") 69 | 70 | 71 | @pytest.fixture 72 | def token_me(): 73 | if not os.environ.get("COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1"): 74 | pytest.skip( 75 | "requires COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1 in environment variables" 76 | ) 77 | return os.environ["COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_1"] 78 | 79 | 80 | @pytest.fixture 81 | def token_other(): 82 | if not os.environ.get("COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_2"): 83 | pytest.skip( 84 | "requires COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_2 in environment variables" 85 | ) 86 | return os.environ["COVERAGE_COMMENT_E2E_GITHUB_TOKEN_USER_2"] 87 | 88 | 89 | @pytest.fixture 90 | def action_ref(): 91 | return os.environ.get("COVERAGE_COMMENT_E2E_ACTION_REF", "main") 92 | 93 | 94 | @pytest.fixture 95 | def _gh(call, gh_config_dir): 96 | def gh(*args, token, json=False, fail_value=None): 97 | @tenacity.retry( 98 | reraise=True, 99 | retry=( 100 | tenacity.retry_if_result( 101 | lambda x: fail_value is not None and x == fail_value 102 | ) 103 | | tenacity.retry_if_exception_type() 104 | ), 105 | stop=tenacity.stop_after_attempt(5), 106 | wait=tenacity.wait_incrementing(start=0, increment=5), 107 | after=tenacity.after_log(logging.getLogger(), logging.DEBUG), 108 | ) 109 | def f(): 110 | stdout = call( 111 | "gh", 112 | *(f"{e}" for e in args), 113 | env={ 114 | "GH_CONFIG_DIR": gh_config_dir, 115 | "GH_TOKEN": token, 116 | "NO_COLOR": "1", 117 | }, 118 | ) 119 | 120 | # Giving GitHub an opportunity to synchronize all their systems 121 | # (without that, we get random failures sometimes) 122 | time.sleep(SLEEP_AFTER_API_CALL) 123 | 124 | if stdout and json: 125 | return json_module.loads(stdout) 126 | else: 127 | return stdout 128 | 129 | return f() 130 | 131 | return gh 132 | 133 | 134 | @pytest.fixture 135 | def gh(setup_git, _gh): 136 | return _gh 137 | 138 | 139 | @pytest.fixture 140 | def setup_git(git, _gh, token_me): 141 | # Default protocol is https so no need to change it but if we had to, 142 | # it would be here. 143 | # The following line may have an impact on users global git config 144 | # if they run the tests locally but it's unlikely to be problematic 145 | # Also, it's sad that gh requires a token for setup-git but meh. 146 | _gh("auth", "setup-git", token=token_me) 147 | 148 | 149 | @pytest.fixture 150 | def git(call, gh_config_dir): 151 | def f(*args, env=None): 152 | return call( 153 | "git", 154 | *args, 155 | env={ 156 | "GH_CONFIG_DIR": gh_config_dir, 157 | "GIT_AUTHOR_NAME": "Foo", 158 | "GIT_AUTHOR_EMAIL": "foo@example.com", 159 | "GIT_COMMITTER_NAME": "Foo", 160 | "GIT_COMMITTER_EMAIL": "foo@example.com", 161 | } 162 | | (env or {}), 163 | ) 164 | 165 | return f 166 | 167 | 168 | @pytest.fixture 169 | def gh_me(gh, cd, token_me): 170 | def f(*args, **kwargs): 171 | with cd("repo"): 172 | return gh(*args, token=token_me, **kwargs) 173 | 174 | return f 175 | 176 | 177 | @pytest.fixture 178 | def gh_me_username(gh_me): 179 | return gh_me("api", "/user", "--jq", ".login").strip() 180 | 181 | 182 | @pytest.fixture 183 | def gh_other(gh, cd, token_other): 184 | def f(*args, **kwargs): 185 | with cd("fork"): 186 | return gh(*args, token=token_other, **kwargs) 187 | 188 | return f 189 | 190 | 191 | @pytest.fixture 192 | def gh_other_username(gh_other): 193 | return gh_other("api", "/user", "--jq", ".login").strip() 194 | 195 | 196 | @pytest.fixture 197 | def git_repo(cd, git, action_ref, code_path, subproject_id): 198 | with cd("repo") as repo: 199 | git("init", "-b", "main") 200 | # Copy .github 201 | shutil.copytree( 202 | pathlib.Path(__file__).parent / "repo" / ".github", 203 | repo / ".github", 204 | dirs_exist_ok=True, 205 | ) 206 | # Copy everything else 207 | shutil.copytree( 208 | pathlib.Path(__file__).parent / "repo", 209 | repo / code_path, 210 | dirs_exist_ok=True, 211 | ignore=shutil.ignore_patterns(".github"), 212 | ) 213 | # Rewrite the specific version of the action we run in the workflow files. 214 | for file in (repo / ".github/workflows").iterdir(): 215 | content = ( 216 | file.read_text() 217 | .replace("__ACTION_REF__", action_ref) 218 | .replace("__ACTION_COVERAGE_PATH__", str(code_path)) 219 | .replace("__ACTION_SUBPROJECT_ID__", json_module.dumps(subproject_id)) 220 | ) 221 | file.write_text(content) 222 | 223 | git("add", ".") 224 | git("commit", "-m", "initial commit") 225 | yield repo 226 | 227 | 228 | @pytest.fixture 229 | def repo_name(request): 230 | name = "python-coverage-comment-action-end-to-end" 231 | if suffix := os.getenv("COVERAGE_COMMENT_E2E_REPO_SUFFIX"): 232 | suffix = re.sub(r"[^A-Za-z0-9_.-]", "-", suffix) 233 | name += f"-{suffix}" 234 | mark = request.node.get_closest_marker("repo_suffix") 235 | if mark is not None: 236 | name = f"{name}-{'-'.join(mark.args)}" 237 | return name 238 | 239 | 240 | @pytest.fixture 241 | def code_path(request): 242 | mark = request.node.get_closest_marker("code_path") 243 | return pathlib.Path(*mark.args) if mark else pathlib.Path(".") 244 | 245 | 246 | @pytest.fixture 247 | def subproject_id(request): 248 | mark = request.node.get_closest_marker("subproject_id") 249 | return mark.args[0] if mark else None 250 | 251 | 252 | @pytest.fixture 253 | def repo_full_name(repo_name, gh_me_username): 254 | return f"{gh_me_username}/{repo_name}" 255 | 256 | 257 | @pytest.fixture 258 | def gh_delete_repo(repo_name): 259 | def f(gh): 260 | try: 261 | print(f"Deleting repository {repo_name}") 262 | gh("repo", "delete", repo_name, "--yes") 263 | except subprocess.CalledProcessError: 264 | pass 265 | 266 | return f 267 | 268 | 269 | @pytest.fixture 270 | def gh_create_repo(is_failed, gh_delete_repo, gh_me, git_repo, repo_name): 271 | gh_delete_repo(gh_me) 272 | 273 | def f(*args): 274 | print(f"Creating repository {repo_name}") 275 | gh_me( 276 | "repo", 277 | "create", 278 | repo_name, 279 | "--push", 280 | f"--source={git_repo}", 281 | *args, 282 | ) 283 | # Someday, we may be able to change that to a variable instead of a secret 284 | # https://github.com/cli/cli/pull/6928 285 | gh_me( 286 | "secret", 287 | "set", 288 | "--app=actions", 289 | "ACTIONS_STEP_DEBUG", 290 | "--body=true", 291 | ) 292 | return git_repo 293 | 294 | return f 295 | 296 | 297 | @pytest.fixture 298 | def gh_create_fork(is_failed, gh_delete_repo, gh_other, gh_me_username, repo_name): 299 | # (can only be called after the main repo has been created) 300 | gh_delete_repo(gh_other) 301 | 302 | def f(): 303 | # -- . at the end is because we want to clone in the current dir 304 | # (args after -- are passed to git clone) 305 | print(f"Forking repository {gh_me_username}/{repo_name}") 306 | gh_other("repo", "fork", "--clone", f"{gh_me_username}/{repo_name}", "--", ".") 307 | 308 | yield f 309 | 310 | 311 | @pytest.fixture 312 | def get_sha1(git): 313 | def _(spec="HEAD"): 314 | return git("rev-parse", spec).strip() 315 | 316 | return _ 317 | 318 | 319 | @pytest.fixture 320 | def wait_for_run_to_start(): 321 | @tenacity.retry( 322 | stop=tenacity.stop_after_attempt(60), 323 | wait=tenacity.wait_fixed(1), 324 | reraise=True, 325 | ) 326 | def _(*, sha1, branch, gh): 327 | run = gh( 328 | "run", 329 | "list", 330 | "--branch", 331 | branch, 332 | "--limit", 333 | "1", 334 | "--json", 335 | "databaseId,headSha", 336 | json=True, 337 | ) 338 | if not run: 339 | print("No GitHub Action run recorded.") 340 | raise tenacity.TryAgain() 341 | 342 | latest_run_sha1 = run[0]["headSha"] 343 | if latest_run_sha1 != sha1: 344 | print(f"Latest run points to {latest_run_sha1}, expecting {sha1}.") 345 | raise tenacity.TryAgain() 346 | 347 | return run[0]["databaseId"] 348 | 349 | return _ 350 | 351 | 352 | @pytest.fixture 353 | def wait_for_run_triggered_by_user_to_start(): 354 | def _(*, workflow_name, triggering_user, gh): 355 | for _ in range(60): 356 | payload = gh("api", "/repos/{owner}/{repo}/actions/runs", json=True) 357 | runs = payload["workflow_runs"] 358 | if not runs: 359 | print("No GitHub Action run recorded. Waiting.") 360 | time.sleep(1) 361 | continue 362 | 363 | run = runs[0] 364 | run_name = run["name"] 365 | run_triggering_actor = run["triggering_actor"]["login"] 366 | if run_name != workflow_name or run_triggering_actor != triggering_user: 367 | print( 368 | f'Latest run is on workflow "{run_name}", ' 369 | f"triggered by {run_triggering_actor}. Waiting." 370 | ) 371 | time.sleep(1) 372 | continue 373 | 374 | return run["id"] 375 | 376 | pytest.fail("Run didn't start within a minute. Stopping.") 377 | 378 | return _ 379 | 380 | 381 | @pytest.fixture 382 | def add_coverage_line(git, code_path): 383 | def f(line): 384 | csv_file = pathlib.Path(code_path / "tests/cases.csv") 385 | csv_file.write_text(csv_file.read_text() + line + "\n") 386 | 387 | git("add", str(csv_file)) 388 | git("commit", "-m", "improve coverage") 389 | 390 | return f 391 | 392 | 393 | @pytest.fixture 394 | def http_client(): 395 | with httpx.Client() as client: 396 | yield client 397 | -------------------------------------------------------------------------------- /tests/end_to_end/repo/.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - "main" 8 | 9 | concurrency: 10 | group: CI 11 | 12 | jobs: 13 | test: 14 | name: Run tests & display/prepare coverage 15 | runs-on: ubuntu-latest 16 | permissions: 17 | pull-requests: write 18 | contents: write 19 | steps: 20 | - uses: actions/checkout@v4 21 | 22 | - uses: actions/setup-python@v5 23 | with: 24 | python-version: "3.13" 25 | 26 | - run: pip install -e . 27 | working-directory: __ACTION_COVERAGE_PATH__ 28 | 29 | - run: pytest 30 | working-directory: __ACTION_COVERAGE_PATH__ 31 | 32 | - name: Coverage comment 33 | id: coverage_comment 34 | uses: py-cov-action/python-coverage-comment-action@__ACTION_REF__ 35 | with: 36 | GITHUB_TOKEN: ${{ github.token }} 37 | ANNOTATE_MISSING_LINES: true 38 | ANNOTATION_TYPE: notice 39 | COVERAGE_PATH: __ACTION_COVERAGE_PATH__ 40 | SUBPROJECT_ID: __ACTION_SUBPROJECT_ID__ 41 | 42 | - name: Store Pull Request comment to be posted 43 | uses: actions/upload-artifact@v4 44 | if: steps.coverage_comment.outputs.COMMENT_FILE_WRITTEN == 'true' 45 | with: 46 | name: python-coverage-comment-action 47 | path: python-coverage-comment-action*.txt 48 | -------------------------------------------------------------------------------- /tests/end_to_end/repo/.github/workflows/coverage-comment.yml: -------------------------------------------------------------------------------- 1 | name: Post coverage comment 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["CI"] 6 | types: 7 | - completed 8 | 9 | jobs: 10 | test: 11 | name: Publish coverage Comment 12 | runs-on: ubuntu-latest 13 | if: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' 14 | permissions: 15 | actions: read 16 | pull-requests: write 17 | contents: write 18 | steps: 19 | - name: Post comment 20 | uses: py-cov-action/python-coverage-comment-action@__ACTION_REF__ 21 | with: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | GITHUB_PR_RUN_ID: ${{ github.event.workflow_run.id }} 24 | COVERAGE_PATH: __ACTION_COVERAGE_PATH__ 25 | SUBPROJECT_ID: __ACTION_SUBPROJECT_ID__ 26 | -------------------------------------------------------------------------------- /tests/end_to_end/repo/end_to_end_tests_repo/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | 4 | def f(a="", b="", c="", d=""): 5 | elements = [] 6 | if a: 7 | elements.append(a) 8 | 9 | if b: 10 | elements.append(b) 11 | 12 | if c: 13 | elements.append(c) 14 | 15 | if d: 16 | elements.append(d) 17 | 18 | return "-".join(elements) 19 | -------------------------------------------------------------------------------- /tests/end_to_end/repo/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "end-to-end-tests" 7 | version = "0.0.0" 8 | dependencies = [ 9 | "pytest", 10 | "pytest-cov", 11 | ] 12 | 13 | [tool.setuptools.packages.find] 14 | -------------------------------------------------------------------------------- /tests/end_to_end/repo/setup.cfg: -------------------------------------------------------------------------------- 1 | [coverage:run] 2 | relative_files = true 3 | 4 | [tool:pytest] 5 | addopts = 6 | --cov-report term-missing --cov-branch --cov-report html 7 | --cov=end_to_end_tests_repo -vv --strict-markers -rfE 8 | testpaths = 9 | tests 10 | -------------------------------------------------------------------------------- /tests/end_to_end/repo/tests/cases.csv: -------------------------------------------------------------------------------- 1 | a,,,,a 2 | a,b,,,a-b 3 | -------------------------------------------------------------------------------- /tests/end_to_end/repo/tests/test_f.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import csv 4 | import pathlib 5 | 6 | import end_to_end_tests_repo 7 | import pytest 8 | 9 | 10 | def load_csv(): 11 | file = pathlib.Path(__file__).parent / "cases.csv" 12 | return list(csv.reader(file.read_text().splitlines())) 13 | 14 | 15 | @pytest.mark.parametrize("a, b, c, d, expected", load_csv()) 16 | def test_f(a, b, c, d, expected): 17 | assert end_to_end_tests_repo.f(a=a, b=b, c=c, d=d) == expected 18 | -------------------------------------------------------------------------------- /tests/end_to_end/test_all.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import base64 4 | 5 | import pytest 6 | 7 | 8 | @pytest.mark.repo_suffix("public") 9 | @pytest.mark.code_path("subdir") 10 | @pytest.mark.subproject_id("my-great-project") 11 | def test_public_repo( 12 | gh_create_repo, 13 | wait_for_run_to_start, 14 | wait_for_run_triggered_by_user_to_start, 15 | get_sha1, 16 | gh_me, 17 | gh_other, 18 | repo_full_name, 19 | cd, 20 | git, 21 | add_coverage_line, 22 | token_me, 23 | token_other, 24 | gh_create_fork, 25 | gh_other_username, 26 | http_client, 27 | ): 28 | # Create a GitHub repo, make it public 29 | gh_create_repo("--public") 30 | 31 | # GitHub Actions should start soon 32 | run_id = wait_for_run_to_start(sha1=get_sha1(), branch="main", gh=gh_me) 33 | 34 | # AAAaand it's started. Now let's wait for it to end. 35 | # Also, raise if it doesn't end successfully. That half of the job. 36 | gh_me("run", "watch", run_id, "--exit-status") 37 | 38 | # Now to the other half: maybe it did nothing successfully, so let's check 39 | # that the lob log contains the 3 links to our svg images 40 | repo_api_url = "/repos/{owner}/{repo}" 41 | 42 | # First get the job id 43 | job_list_url = f"{repo_api_url}/actions/runs/{run_id}/jobs" 44 | job_ids = gh_me("api", job_list_url, "--jq=.jobs[].id").strip().splitlines() 45 | assert len(job_ids) == 1 46 | job_id = job_ids[0] 47 | 48 | # Then check the logs for this job 49 | logs = gh_me("api", f"{repo_api_url}/actions/jobs/{job_id}/logs") 50 | 51 | print("Logs:", logs) 52 | log_lines = logs.splitlines() 53 | 54 | # The easiest way to check the links is to assume there will be no other 55 | # line with a link prefixed by 4 spaces. If at some point there is, we'll 56 | # change the test. 57 | links = {line.strip().split()[-1] for line in log_lines if " https://" in line} 58 | # - html report 59 | # - badge 1, 2 and 3 60 | # - coverage branch readme url 61 | assert len(links) == 5 62 | 63 | # Check that all 5 links are valid and lead to a 200 64 | # It's made this way to avoid hardcoding links in the test, because I assume 65 | # they'll be evolving. 66 | number_of_svgs = 0 67 | for link in links: 68 | response = http_client.get(link) 69 | response.raise_for_status() 70 | number_of_svgs += int(response.text.startswith(" 99%", 85 | "", 86 | ), 87 | ], 88 | ) 89 | def test_get_static_badge_url__error(label, message, color): 90 | with pytest.raises(ValueError): 91 | badge.get_static_badge_url(label=label, message=message, color=color) 92 | 93 | 94 | def test_get_endpoint_url(): 95 | url = badge.get_endpoint_url(endpoint_url="https://foo") 96 | expected = "https://img.shields.io/endpoint?url=https://foo" 97 | 98 | assert url == expected 99 | 100 | 101 | def test_get_dynamic_url(): 102 | url = badge.get_dynamic_url(endpoint_url="https://foo") 103 | expected = "https://img.shields.io/badge/dynamic/json?color=brightgreen&label=coverage&query=%24.message&url=https%3A%2F%2Ffoo" 104 | 105 | assert url == expected 106 | -------------------------------------------------------------------------------- /tests/unit/test_comment_file.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from coverage_comment import comment_file 4 | 5 | 6 | def test_comment_file(tmp_path): 7 | path = tmp_path / "foo.txt" 8 | comment_file.store_file(filename=path, content="foo") 9 | 10 | assert path.read_text() == "foo" 11 | -------------------------------------------------------------------------------- /tests/unit/test_communication.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from coverage_comment import communication 4 | 5 | 6 | def test_get_readme_and_log__public(): 7 | readme_file, log = communication.get_readme_and_log( 8 | is_public=True, 9 | image_urls={ 10 | "direct": "https://a", 11 | "endpoint": "https://b", 12 | "dynamic": "https://c", 13 | }, 14 | readme_url="https://readme", 15 | html_report_url="https://html_report", 16 | markdown_report="**Hello report!**", 17 | ) 18 | 19 | readme = readme_file.contents 20 | 21 | assert str(readme_file.path) == "README.md" 22 | 23 | assert "# Repository Coverage" in readme 24 | 25 | assert "[![Coverage badge](https://a)](https://html_report)" in readme 26 | 27 | assert "[![Coverage badge](https://b)](https://html_report)" in readme 28 | 29 | assert "[![Coverage badge](https://c)](https://html_report)" in readme 30 | 31 | assert "https://a" in log 32 | 33 | assert "https://readme" in log 34 | 35 | assert "https://b" in log 36 | 37 | assert "https://c" in log 38 | 39 | 40 | def test_get_readme_and_log__subproject(): 41 | readme_file, log = communication.get_readme_and_log( 42 | is_public=True, 43 | image_urls={ 44 | "direct": "https://a", 45 | "endpoint": "https://b", 46 | "dynamic": "https://c", 47 | }, 48 | readme_url="https://readme", 49 | html_report_url="https://html_report", 50 | markdown_report="**Hello report!**", 51 | subproject_id="my-subproject", 52 | ) 53 | assert "Coverage info for my-subproject" in log 54 | assert "# Repository Coverage (my-subproject)" in readme_file.contents 55 | 56 | 57 | def test_get_readme_and_log__private(): 58 | readme_file, log = communication.get_readme_and_log( 59 | is_public=False, 60 | image_urls={ 61 | "direct": "https://a", 62 | "endpoint": "https://b", 63 | "dynamic": "https://c", 64 | }, 65 | readme_url="https://readme", 66 | html_report_url="https://html_report", 67 | markdown_report="**Hello report!**", 68 | ) 69 | 70 | readme = readme_file.contents 71 | 72 | assert str(readme_file.path) == "README.md" 73 | 74 | assert "# Repository Coverage" in readme 75 | 76 | assert "[![Coverage badge](https://a)](https://readme)" in readme 77 | 78 | assert "https://b" not in readme 79 | 80 | assert "https://c" not in readme 81 | 82 | assert "https://a" in log 83 | 84 | assert "https://readme" in log 85 | 86 | assert "https://b" not in log 87 | 88 | assert "https://c" not in log 89 | -------------------------------------------------------------------------------- /tests/unit/test_coverage.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import decimal 4 | import json 5 | import pathlib 6 | 7 | import pytest 8 | 9 | from coverage_comment import coverage, subprocess 10 | 11 | 12 | def test_diff_violations(make_coverage_and_diff): 13 | _, diff = make_coverage_and_diff( 14 | """ 15 | # file: a.py 16 | + 1 missing 17 | 2 missing 18 | + 3 missing 19 | 4 covered 20 | + 5 covered 21 | """ 22 | ) 23 | assert diff.files[pathlib.Path("a.py")].violation_lines == [1, 3] 24 | 25 | 26 | @pytest.mark.parametrize( 27 | "num_covered, num_total, expected_coverage", 28 | [ 29 | (0, 10, "0"), 30 | (0, 0, "1"), 31 | (5, 0, "1"), 32 | (5, 10, "0.5"), 33 | (1, 100, "0.01"), 34 | ], 35 | ) 36 | def test_compute_coverage(num_covered, num_total, expected_coverage): 37 | assert coverage.compute_coverage(num_covered, num_total) == decimal.Decimal( 38 | expected_coverage 39 | ) 40 | 41 | 42 | @pytest.mark.parametrize( 43 | "num_covered, num_total, branch_covered, branch_total, expected_coverage", 44 | [ 45 | (0, 10, 0, 15, "0"), 46 | (0, 0, 0, 0, "1"), 47 | (5, 0, 5, 0, "1"), 48 | (5, 10, 5, 10, "0.5"), 49 | (1, 50, 1, 50, "0.02"), 50 | ], 51 | ) 52 | def test_compute_coverage_with_branches( 53 | num_covered, num_total, branch_covered, branch_total, expected_coverage 54 | ): 55 | assert coverage.compute_coverage( 56 | num_covered, num_total, branch_covered, branch_total 57 | ) == decimal.Decimal(expected_coverage) 58 | 59 | 60 | def test_get_coverage_info(mocker, coverage_json, coverage_obj): 61 | run = mocker.patch( 62 | "coverage_comment.subprocess.run", return_value=json.dumps(coverage_json) 63 | ) 64 | 65 | raw_coverage_information, result = coverage.get_coverage_info( 66 | merge=True, coverage_path=pathlib.Path(".") 67 | ) 68 | 69 | assert run.call_args_list == [ 70 | mocker.call("coverage", "combine", path=pathlib.Path(".")), 71 | mocker.call("coverage", "json", "-o", "-", path=pathlib.Path(".")), 72 | ] 73 | 74 | assert result == coverage_obj 75 | assert raw_coverage_information == coverage_json 76 | 77 | 78 | def test_get_coverage_info__no_merge(mocker, coverage_json): 79 | run = mocker.patch( 80 | "coverage_comment.subprocess.run", return_value=json.dumps(coverage_json) 81 | ) 82 | 83 | coverage.get_coverage_info(merge=False, coverage_path=pathlib.Path(".")) 84 | 85 | assert ( 86 | mocker.call("coverage", "combine", path=pathlib.Path(".")) 87 | not in run.call_args_list 88 | ) 89 | 90 | 91 | def test_get_coverage_info__error_base(mocker, get_logs): 92 | mocker.patch( 93 | "coverage_comment.subprocess.run", side_effect=subprocess.SubProcessError 94 | ) 95 | 96 | with pytest.raises(subprocess.SubProcessError): 97 | coverage.get_coverage_info(merge=False, coverage_path=pathlib.Path(".")) 98 | 99 | assert not get_logs("ERROR") 100 | 101 | 102 | def test_get_coverage_info__error_no_source(mocker, get_logs): 103 | mocker.patch( 104 | "coverage_comment.subprocess.run", 105 | side_effect=subprocess.SubProcessError("No source for code: bla"), 106 | ) 107 | 108 | with pytest.raises(subprocess.SubProcessError): 109 | coverage.get_coverage_info(merge=False, coverage_path=pathlib.Path(".")) 110 | 111 | assert get_logs("ERROR", "Cannot read") 112 | 113 | 114 | def test_generate_coverage_html_files(mocker): 115 | run = mocker.patch( 116 | "coverage_comment.subprocess.run", 117 | ) 118 | 119 | coverage.generate_coverage_html_files( 120 | destination=pathlib.Path("/tmp/foo"), coverage_path=pathlib.Path(".") 121 | ) 122 | 123 | assert run.call_args_list == [ 124 | mocker.call( 125 | "coverage", 126 | "html", 127 | "--skip-empty", 128 | "--directory", 129 | "/tmp/foo", 130 | path=pathlib.Path("."), 131 | ), 132 | ] 133 | 134 | 135 | def test_generate_coverage_markdown(mocker): 136 | run = mocker.patch("coverage_comment.subprocess.run", return_value="foo") 137 | 138 | result = coverage.generate_coverage_markdown(coverage_path=pathlib.Path(".")) 139 | 140 | assert run.call_args_list == [ 141 | mocker.call( 142 | "coverage", 143 | "report", 144 | "--format=markdown", 145 | "--show-missing", 146 | path=pathlib.Path("."), 147 | ), 148 | ] 149 | 150 | assert result == "foo" 151 | 152 | 153 | def test__make_coverage_info(): 154 | result = coverage._make_coverage_info( 155 | { 156 | "covered_lines": 14, 157 | "num_statements": 20, 158 | "missing_lines": 6, 159 | "excluded_lines": 0, 160 | } 161 | ) 162 | assert isinstance(result, coverage.CoverageInfo) 163 | assert result.percent_covered == decimal.Decimal(14) / decimal.Decimal(20) 164 | assert result.num_branches == 0 165 | assert result.num_partial_branches == 0 166 | assert result.covered_branches == 0 167 | assert result.missing_branches == 0 168 | 169 | 170 | def test__make_coverage_info__with_branches(): 171 | result = coverage._make_coverage_info( 172 | { 173 | "covered_lines": 4, 174 | "num_statements": 10, 175 | "missing_lines": 1, 176 | "excluded_lines": 0, 177 | "covered_branches": 4, 178 | "num_branches": 6, 179 | "num_partial_branches": 2, 180 | } 181 | ) 182 | assert isinstance(result, coverage.CoverageInfo) 183 | assert result.percent_covered == decimal.Decimal(4 + 4) / decimal.Decimal(10 + 6) 184 | assert result.covered_branches == 4 185 | assert result.missing_branches == 0 186 | assert result.excluded_lines == 0 187 | 188 | 189 | @pytest.mark.parametrize( 190 | "added_lines, update_obj, expected", 191 | [ 192 | # A first simple example. We added lines 1 and 3 to a file. Coverage 193 | # info says that lines 1 and 2 were executed and line 3 was not. 194 | # Diff coverage should report that the violation is line 3 and 195 | # that the total coverage is 50%. 196 | ( 197 | {pathlib.Path("codebase/code.py"): [1, 3]}, 198 | {"codebase/code.py": {"executed_lines": [1, 2], "missing_lines": [3]}}, 199 | coverage.DiffCoverage( 200 | total_num_lines=2, 201 | total_num_violations=1, 202 | total_percent_covered=decimal.Decimal("0.5"), 203 | num_changed_lines=2, 204 | files={ 205 | pathlib.Path("codebase/code.py"): coverage.FileDiffCoverage( 206 | path=pathlib.Path("codebase/code.py"), 207 | percent_covered=decimal.Decimal("0.5"), 208 | added_statements=[1, 3], 209 | covered_statements=[1], 210 | missing_statements=[3], 211 | added_lines=[1, 3], 212 | ) 213 | }, 214 | ), 215 | ), 216 | # A second simple example. This time, the only modified file (code2.py) 217 | # is not the same as the files that received coverage info (code.py). 218 | # Consequently, no line should be reported as a violation (we could 219 | # imagine that the file code2.py only contains comments and is not 220 | # covered, nor imported.) 221 | ( 222 | {pathlib.Path("codebase/code2.py"): [1, 3]}, 223 | {"codebase/code.py": {"executed_lines": [1, 2], "missing_lines": [3]}}, 224 | coverage.DiffCoverage( 225 | total_num_lines=0, 226 | total_num_violations=0, 227 | total_percent_covered=decimal.Decimal("1"), 228 | num_changed_lines=2, 229 | files={}, 230 | ), 231 | ), 232 | # A third simple example. This time, there's no intersection between 233 | # the modified files and the files that received coverage info. We 234 | # should not report any violation (and 100% coverage) 235 | ( 236 | {pathlib.Path("codebase/code.py"): [4, 5, 6]}, 237 | {"codebase/code.py": {"executed_lines": [1, 2, 3], "missing_lines": [7]}}, 238 | coverage.DiffCoverage( 239 | total_num_lines=0, 240 | total_num_violations=0, 241 | total_percent_covered=decimal.Decimal("1"), 242 | num_changed_lines=3, 243 | files={ 244 | pathlib.Path("codebase/code.py"): coverage.FileDiffCoverage( 245 | path=pathlib.Path("codebase/code.py"), 246 | percent_covered=decimal.Decimal("1"), 247 | added_statements=[], 248 | covered_statements=[], 249 | missing_statements=[], 250 | added_lines=[4, 5, 6], 251 | ) 252 | }, 253 | ), 254 | ), 255 | # A more complex example with 2 distinct files. We want to check both 256 | # that they are individually handled correctly and that the general 257 | # stats are correct. 258 | ( 259 | { 260 | pathlib.Path("codebase/code.py"): [4, 5, 6], 261 | pathlib.Path("codebase/other.py"): [10, 13], 262 | }, 263 | { 264 | "codebase/code.py": { 265 | "executed_lines": [1, 2, 3, 5, 6], 266 | "missing_lines": [7], 267 | }, 268 | "codebase/other.py": { 269 | "executed_lines": [10, 11, 12], 270 | "missing_lines": [13], 271 | }, 272 | }, 273 | coverage.DiffCoverage( 274 | total_num_lines=4, # 2 lines in code.py + 2 lines in other.py 275 | total_num_violations=1, # 1 line in other.py 276 | total_percent_covered=decimal.Decimal("0.75"), # 3/4 lines covered 277 | num_changed_lines=5, # 3 lines in code.py + 2 lines in other.py 278 | files={ 279 | pathlib.Path("codebase/code.py"): coverage.FileDiffCoverage( 280 | path=pathlib.Path("codebase/code.py"), 281 | percent_covered=decimal.Decimal("1"), 282 | added_statements=[5, 6], 283 | covered_statements=[5, 6], 284 | missing_statements=[], 285 | added_lines=[4, 5, 6], 286 | ), 287 | pathlib.Path("codebase/other.py"): coverage.FileDiffCoverage( 288 | path=pathlib.Path("codebase/other.py"), 289 | percent_covered=decimal.Decimal("0.5"), 290 | added_statements=[10, 13], 291 | covered_statements=[10], 292 | missing_statements=[13], 293 | added_lines=[10, 13], 294 | ), 295 | }, 296 | ), 297 | ), 298 | ], 299 | ) 300 | def test_get_diff_coverage_info(make_coverage_obj, added_lines, update_obj, expected): 301 | result = coverage.get_diff_coverage_info( 302 | added_lines=added_lines, coverage=make_coverage_obj(**update_obj) 303 | ) 304 | assert result == expected 305 | 306 | 307 | def test_get_added_lines(git): 308 | diff = ( 309 | """+++ b/README.md\n@@ -1,2 +1,3 @@\n-# coverage-comment\n+coverage-comment\n""" 310 | ) 311 | git.register("git fetch origin main --depth=1000")() 312 | git.register("git diff --unified=0 FETCH_HEAD -- .")(stdout=diff) 313 | assert coverage.get_added_lines(git=git, base_ref="main") == { 314 | pathlib.Path("README.md"): [1, 2, 3] 315 | } 316 | 317 | 318 | @pytest.mark.parametrize( 319 | "line_number_diff_line, expected", 320 | [ 321 | ("@@ -1,2 +7,4 @@ foo()", [7, 8, 9, 10]), 322 | ("@@ -1,2 +8 @@ foo()", [8]), 323 | ], 324 | ) 325 | def test_parse_line_number_diff_line(git, line_number_diff_line, expected): 326 | result = list(coverage.parse_line_number_diff_line(line_number_diff_line)) 327 | assert result == expected 328 | 329 | 330 | def test_parse_diff_output(git): 331 | diff = """diff --git a/action.yml b/action.yml 332 | deleted file mode 100644 333 | index 42249d1..0000000 334 | --- a/action.yml 335 | +++ /dev/null 336 | @@ -1,2 +0,0 @@ 337 | -name: Python Coverage Comment 338 | -branding: 339 | diff --git a/README.md b/README.md 340 | index 1f1d9a4..e69de29 100644 341 | --- a/README.md 342 | +++ b/README.md 343 | @@ -1 +1 @@ 344 | -# coverage-comment 345 | -coverage-comment 346 | @@ -3,2 +3,4 @@ 347 | -foo 348 | -bar 349 | +foo1 350 | +bar1 351 | +foo2 352 | +bar2 353 | --- a/foo.txt 354 | +++ b/foo.txt 355 | @@ -0,0 +1 @@ 356 | +bar 357 | --- a/bar.txt 358 | +++ b/bar.txt 359 | @@ -8 +7,0 @@ 360 | -foo 361 | diff --git a/coverage_comment/annotations.py b/coverage_comment/annotations2.py 362 | similarity index 100% 363 | rename from coverage_comment/annotations.py 364 | rename to coverage_comment/annotations2.py 365 | """ 366 | git.register("git fetch origin main --depth=1000")() 367 | git.register("git diff --unified=0 FETCH_HEAD -- .")(stdout=diff) 368 | assert coverage.parse_diff_output(diff=diff) == { 369 | pathlib.Path("README.md"): [1, 3, 4, 5, 6], 370 | pathlib.Path("foo.txt"): [1], 371 | } 372 | 373 | 374 | def test_parse_diff_output__error(git): 375 | diff = """ 376 | @@ -0,0 +1,1 @@ 377 | +name: Python Coverage Comment 378 | diff --git a/README.md b/README.md 379 | index 1f1d9a4..e69de29 100644 380 | """ 381 | git.register("git fetch origin main --depth=1000")() 382 | git.register("git diff --unified=0 FETCH_HEAD -- .")(stdout=diff) 383 | with pytest.raises(ValueError): 384 | coverage.parse_diff_output(diff=diff) 385 | -------------------------------------------------------------------------------- /tests/unit/test_diff_grouper.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pathlib 4 | 5 | from coverage_comment import diff_grouper, groups 6 | 7 | 8 | def test_group_annotations(coverage_obj, diff_coverage_obj): 9 | result = diff_grouper.get_diff_missing_groups( 10 | coverage=coverage_obj, diff_coverage=diff_coverage_obj 11 | ) 12 | 13 | assert list(result) == [ 14 | groups.Group(file=pathlib.Path("codebase/code.py"), line_start=6, line_end=8), 15 | ] 16 | 17 | 18 | def test_group_annotations_more_files( 19 | coverage_obj_more_files, diff_coverage_obj_more_files 20 | ): 21 | result = diff_grouper.get_diff_missing_groups( 22 | coverage=coverage_obj_more_files, 23 | diff_coverage=diff_coverage_obj_more_files, 24 | ) 25 | 26 | assert list(result) == [ 27 | groups.Group(file=pathlib.Path("codebase/code.py"), line_start=5, line_end=8), 28 | groups.Group(file=pathlib.Path("codebase/other.py"), line_start=1, line_end=1), 29 | groups.Group(file=pathlib.Path("codebase/other.py"), line_start=3, line_end=5), 30 | ] 31 | -------------------------------------------------------------------------------- /tests/unit/test_dunder_main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | 5 | from coverage_comment import __main__ 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "name, expected", 10 | [ 11 | ("__main__", True), 12 | ("foo", False), 13 | ], 14 | ) 15 | def test_main_call(mocker, name, expected): 16 | main = mocker.patch("coverage_comment.main.main") 17 | 18 | __main__.main_call(name) 19 | 20 | assert main.called is expected 21 | -------------------------------------------------------------------------------- /tests/unit/test_files.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import decimal 4 | import json 5 | import pathlib 6 | 7 | from coverage_comment import files 8 | 9 | 10 | def test_write_file(tmp_path): 11 | files.WriteFile(path=tmp_path / "a", contents="foo").apply() 12 | 13 | assert (tmp_path / "a").read_text() == "foo" 14 | 15 | 16 | def test_replace_dir(tmp_path): 17 | (tmp_path / "foo").mkdir() 18 | (tmp_path / "foo/foofile").touch() 19 | (tmp_path / "bar").mkdir() 20 | (tmp_path / "bar/barfile").touch() 21 | 22 | files.ReplaceDir(path=(tmp_path / "bar"), source=(tmp_path / "foo")).apply() 23 | 24 | assert not (tmp_path / "foo").exists() 25 | assert (tmp_path / "bar").exists() 26 | assert (tmp_path / "bar/foofile").exists() 27 | assert not (tmp_path / "bar/barfile").exists() 28 | 29 | 30 | def test_compute_files(session): 31 | session.register( 32 | "GET", "https://img.shields.io/static/v1?label=Coverage&message=12%25&color=red" 33 | )(text="foo") 34 | 35 | result = files.compute_files( 36 | line_rate=decimal.Decimal("0.1234"), 37 | raw_coverage_data={"foo": ["bar", "bar2"]}, 38 | coverage_path=pathlib.Path("."), 39 | minimum_green=decimal.Decimal("25"), 40 | minimum_orange=decimal.Decimal("70"), 41 | http_session=session, 42 | ) 43 | expected = [ 44 | files.WriteFile( 45 | path=pathlib.Path("endpoint.json"), 46 | contents='{"schemaVersion": 1, "label": "Coverage", "message": "12%", "color": "red"}', 47 | ), 48 | files.WriteFile( 49 | path=pathlib.Path("data.json"), 50 | contents='{"coverage": 12.34, "raw_data": {"foo": ["bar", "bar2"]}, "coverage_path": "."}', 51 | ), 52 | files.WriteFile(path=pathlib.Path("badge.svg"), contents="foo"), 53 | ] 54 | assert result == expected 55 | 56 | 57 | def test_compute_datafile(): 58 | assert ( 59 | files.compute_datafile( 60 | line_rate=decimal.Decimal("12.34"), 61 | raw_coverage_data={"meta": {"version": "5.5"}}, 62 | coverage_path=pathlib.Path("./src/code"), 63 | ) 64 | == """{"coverage": 12.34, "raw_data": {"meta": {"version": "5.5"}}, "coverage_path": "src/code"}""" 65 | ) 66 | 67 | 68 | def test_parse_datafile(): 69 | assert files.parse_datafile(contents="""{"coverage": 12.34}""") == ( 70 | None, 71 | decimal.Decimal("0.1234"), 72 | ) 73 | 74 | 75 | def test_parse_datafile__previous(coverage_json, coverage_obj): 76 | result = files.parse_datafile( 77 | contents=json.dumps( 78 | { 79 | "coverage": 12.34, 80 | "raw_data": coverage_json, 81 | "coverage_path": ".", 82 | } 83 | ) 84 | ) 85 | 86 | assert result == (coverage_obj, decimal.Decimal("0.1234")) 87 | 88 | 89 | def test_get_urls(): 90 | def getter(path): 91 | return f"https://{path}" 92 | 93 | urls = files.get_urls(url_getter=getter) 94 | 95 | assert urls == { 96 | "direct": "https://badge.svg", 97 | "dynamic": "https://img.shields.io/badge/dynamic/json?color=brightgreen&label=coverage&query=%24.message&url=https%3A%2F%2Fendpoint.json", 98 | "endpoint": "https://img.shields.io/endpoint?url=https://endpoint.json", 99 | } 100 | 101 | 102 | def test_get_coverage_html_files(mocker, in_tmp_path): 103 | def gen_side_effect(destination, coverage_path): 104 | (destination / ".gitignore").touch() 105 | (destination / "index.html").touch() 106 | 107 | gen = mocker.patch( 108 | "coverage_comment.coverage.generate_coverage_html_files", 109 | side_effect=gen_side_effect, 110 | ) 111 | gen_dir = in_tmp_path / "gen" 112 | gen_dir.mkdir() 113 | rep = files.get_coverage_html_files(gen_dir=gen_dir, coverage_path=".") 114 | (source_htmlcov,) = gen_dir.iterdir() 115 | 116 | assert rep == files.ReplaceDir(path=pathlib.Path("htmlcov"), source=source_htmlcov) 117 | 118 | assert gen.called is True 119 | assert not (source_htmlcov / "gitignore").exists() 120 | -------------------------------------------------------------------------------- /tests/unit/test_github_client.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | 5 | from coverage_comment import github_client 6 | 7 | 8 | def test_github_client__get(session, gh): 9 | session.register("GET", "/repos/a/b/issues", timeout=60, params={"a": 1})( 10 | json={"foo": "bar"} 11 | ) 12 | 13 | assert gh.repos("a/b").issues().get(a=1) == {"foo": "bar"} 14 | 15 | 16 | def test_github_client__get_text(session, gh): 17 | session.register("GET", "/repos/a/b/issues", timeout=60, params={"a": 1})( 18 | text="foobar", headers={"content-type": "application/vnd.github.raw+json"} 19 | ) 20 | 21 | assert gh.repos("a/b").issues().get(a=1) == "foobar" 22 | 23 | 24 | def test_github_client__get_bytes(session, gh): 25 | session.register("GET", "/repos/a/b/issues", timeout=60, params={"a": 1})( 26 | text="foobar", headers={"content-type": "application/vnd.github.raw+json"} 27 | ) 28 | 29 | assert gh.repos("a/b").issues().get(a=1, bytes=True) == b"foobar" 30 | 31 | 32 | def test_github_client__get_headers(session, gh): 33 | session.register("GET", "/repos/a/b/issues", timeout=60, params={"a": 1})( 34 | json={"foo": "bar"}, 35 | headers={"X-foo": "yay"}, 36 | ) 37 | 38 | assert gh.repos("a/b").issues().get(a=1, headers={"X-foo": "yay"}) == {"foo": "bar"} 39 | 40 | 41 | def test_github_client__post_non_json(session, gh): 42 | session.register("POST", "/repos/a/b/issues", timeout=60, json={"a": 1})() 43 | 44 | gh.repos("a/b").issues().post(a=1) 45 | 46 | 47 | def test_json_object(): 48 | obj = github_client.JsonObject({"a": 1}) 49 | 50 | assert obj.a == 1 51 | 52 | 53 | def test_json_object__error(): 54 | obj = github_client.JsonObject({"a": 1}) 55 | 56 | with pytest.raises(AttributeError): 57 | obj.b 58 | 59 | 60 | def test_github_client__get_error(session, gh): 61 | session.register("GET", "/repos")( 62 | json={"foo": "bar"}, 63 | status_code=404, 64 | ) 65 | 66 | with pytest.raises(github_client.ApiError) as exc_info: 67 | gh.repos.get() 68 | 69 | assert str(exc_info.value) == "{'foo': 'bar'}" 70 | 71 | 72 | def test_github_client__get_error_non_json(session, gh): 73 | session.register("GET", "/repos")( 74 | text="{foobar", 75 | headers={"content-type": "text/plain"}, 76 | status_code=404, 77 | ) 78 | 79 | with pytest.raises(github_client.ApiError) as exc_info: 80 | gh.repos.get() 81 | 82 | assert str(exc_info.value) == "b'{foobar'" 83 | -------------------------------------------------------------------------------- /tests/unit/test_groups.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pytest 4 | 5 | from coverage_comment import groups 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "values, separators, joiners, expected", 10 | [ 11 | # Single line 12 | ([1], {1}, set(), [(1, 1)]), 13 | # Pair of line 14 | ([1, 2], {1, 2}, set(), [(1, 2)]), 15 | # Group of lines 16 | ([1, 2, 3], {1, 2, 3}, set(), [(1, 3)]), 17 | # Pair of lines with a blank line in between 18 | ([1, 3], {1, 3}, set(), [(1, 3)]), 19 | # Pair of lines with a separator in between 20 | ([1, 3], {1, 2, 3}, set(), [(1, 1), (3, 3)]), 21 | # 3 groups of lines with separators in between 22 | ([1, 3, 5], {1, 2, 3, 4, 5}, set(), [(1, 1), (3, 3), (5, 5)]), 23 | # 3 groups of lines with a small gap & no separator in between 24 | ([1, 3, 5], {1, 3, 5}, set(), [(1, 5)]), 25 | # with a 1-sized gap 26 | ([1, 3], {1, 3}, set(), [(1, 3)]), 27 | # with a 2-sized gap 28 | ([1, 4], {1, 4}, set(), [(1, 4)]), 29 | # with a 3-sized gap 30 | ([1, 5], {1, 5}, set(), [(1, 5)]), 31 | # with a 4-sized gap: that's > MAX_ANNOTATION_GAP so we split 32 | ([1, 6], {1, 6}, set(), [(1, 1), (6, 6)]), 33 | # with a 5-sized gap but it's all joiners 34 | ([1, 7], {1, 7}, {2, 3, 4, 5, 6}, [(1, 7)]), 35 | # same with a separator 36 | ([1, 7], {1, 4, 7}, {2, 3, 4, 5, 6}, [(1, 7)]), 37 | # an 8-sized gap with joiners and 2 non-joiners (we merge) 38 | ([1, 9], {1, 9}, {2, 3, 5, 7, 8}, [(1, 9)]), 39 | # an 8-sized gap with joiners and 4 non-joiners (we split) 40 | ([1, 9], {1, 9}, {2, 3, 7}, [(1, 1), (9, 9)]), 41 | # pair of lines with a gap that is too big, and with a separator in between 42 | ([1, 6], {1, 3, 6}, set(), [(1, 1), (6, 6)]), 43 | # single line, then group 44 | ([1, 2, 3, 5], {1, 2, 3, 5}, set(), [(1, 5)]), 45 | # group, then single line 46 | ([1, 3, 4, 5], {1, 3, 4, 5}, set(), [(1, 5)]), 47 | ], 48 | ) 49 | def test_compute_contiguous_groups(values, separators, joiners, expected): 50 | result = groups.compute_contiguous_groups( 51 | values=values, separators=separators, joiners=joiners, max_gap=3 52 | ) 53 | assert result == expected 54 | -------------------------------------------------------------------------------- /tests/unit/test_log_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | import re 5 | 6 | from coverage_comment import log_utils 7 | 8 | 9 | def test_level_mapping__all_supported(): 10 | ignored = { 11 | logging.getLevelName("NOTSET"), 12 | logging.getLevelName("TRACE"), 13 | } 14 | assert ( 15 | set(log_utils.LEVEL_MAPPING) 16 | == set(logging.getLevelNamesMapping().values()) - ignored 17 | ) 18 | 19 | 20 | def test__github_formatter(): 21 | logs = [] 22 | 23 | class TestHandler(logging.Handler): 24 | def emit(self, record): 25 | logs.append(self.format(record)) 26 | 27 | logger = logging.Logger("test", level="DEBUG") 28 | handler = TestHandler() 29 | handler.setFormatter(log_utils.GitHubFormatter()) 30 | logger.addHandler(handler) 31 | 32 | logger.debug("a debug message") 33 | logger.info("a notice message") 34 | logger.warning("a warning message") 35 | logger.error("an error message") 36 | logger.critical("an error message") 37 | try: 38 | 0 / 0 39 | except Exception: 40 | logger.exception("an exception") 41 | 42 | logs = "\n".join(logs) 43 | logs = re.sub(r"""File ".+", line \d+""", """File "foo.py", line 42""", logs) 44 | 45 | expected = """ 46 | ::debug::a debug message 47 | ::notice::a notice message 48 | ::warning::a warning message 49 | ::error::an error message 50 | ::error::an error message 51 | ::error::an exception%0ATraceback (most recent call last):%0A File "foo.py", line 42, in test__github_formatter%0A 0 / 0%0A ~~^~~%0AZeroDivisionError: division by zero""".strip() 52 | 53 | assert logs == expected 54 | -------------------------------------------------------------------------------- /tests/unit/test_main.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import os 4 | 5 | import httpx 6 | 7 | from coverage_comment import main, settings, subprocess 8 | 9 | 10 | def test_main(mocker, get_logs): 11 | # This test is a mock festival. The idea is that all the things that are hard 12 | # to simulate without mocks have been pushed up the stack up to this function 13 | # so this is THE place where we have no choice but to mock. 14 | # We could also accept not to test this function but if we've come this 15 | # far and have 98% coverage, we can as well have 100%. 16 | 17 | exit = mocker.patch("sys.exit") 18 | action = mocker.patch("coverage_comment.main.action") 19 | 20 | os.environ.update( 21 | { 22 | "GITHUB_REPOSITORY": "foo/bar", 23 | "GITHUB_PR_RUN_ID": "", 24 | "GITHUB_REF": "ref", 25 | "GITHUB_TOKEN": "token", 26 | "GITHUB_BASE_REF": "", 27 | "GITHUB_EVENT_NAME": "push", 28 | "GITHUB_STEP_SUMMARY": "step_summary", 29 | } 30 | ) 31 | main.main() 32 | 33 | exit.assert_called_with(action.return_value) 34 | kwargs = action.call_args_list[0].kwargs 35 | assert isinstance(kwargs["config"], settings.Config) 36 | assert isinstance(kwargs["git"], subprocess.Git) 37 | assert isinstance(kwargs["github_session"], httpx.Client) 38 | assert isinstance(kwargs["http_session"], httpx.Client) 39 | 40 | assert get_logs("INFO", "Starting action") 41 | assert get_logs("INFO", "Ending action") 42 | 43 | 44 | def test_main__exception(mocker, get_logs): 45 | # This test simulates an exception in the main part of the action. This should be catched and logged. 46 | exit = mocker.patch("sys.exit") 47 | mocker.patch( 48 | "coverage_comment.main.action", side_effect=Exception("Mocked exception") 49 | ) 50 | 51 | os.environ.update( 52 | { 53 | "GITHUB_REPOSITORY": "foo/bar", 54 | "GITHUB_PR_RUN_ID": "", 55 | "GITHUB_REF": "ref", 56 | "GITHUB_TOKEN": "token", 57 | "GITHUB_BASE_REF": "", 58 | "GITHUB_EVENT_NAME": "push", 59 | "GITHUB_STEP_SUMMARY": "step_summary", 60 | } 61 | ) 62 | main.main() 63 | exit.assert_called_with(1) 64 | 65 | assert get_logs("ERROR", "Critical error") 66 | -------------------------------------------------------------------------------- /tests/unit/test_settings.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import decimal 4 | import pathlib 5 | 6 | import pytest 7 | 8 | from coverage_comment import settings 9 | 10 | 11 | @pytest.mark.parametrize("path", ["a", "a/b/.."]) 12 | def test_path_below__ok(path): 13 | assert settings.path_below(path) == pathlib.Path("a") 14 | 15 | 16 | @pytest.mark.parametrize("path", ["/a", "a/../.."]) 17 | def test_path_below__error(path): 18 | with pytest.raises(ValueError): 19 | settings.path_below(path) 20 | 21 | 22 | def test_config__from_environ__missing(): 23 | with pytest.raises(settings.MissingEnvironmentVariable): 24 | settings.Config.from_environ({}) 25 | 26 | 27 | def test_config__from_environ__ok(): 28 | assert settings.Config.from_environ( 29 | { 30 | "GITHUB_BASE_REF": "master", 31 | "GITHUB_TOKEN": "foo", 32 | "GITHUB_REPOSITORY": "owner/repo", 33 | "GITHUB_REF": "master", 34 | "GITHUB_OUTPUT": "foo.txt", 35 | "GITHUB_EVENT_NAME": "pull", 36 | "GITHUB_PR_RUN_ID": "123", 37 | "GITHUB_STEP_SUMMARY": "step_summary", 38 | "COMMENT_ARTIFACT_NAME": "baz", 39 | "COMMENT_FILENAME": "qux", 40 | "SUBPROJECT_ID": "subproject", 41 | "COMMENT_TEMPLATE": "footemplate", 42 | "COVERAGE_DATA_BRANCH": "branchname", 43 | "COVERAGE_PATH": "source_folder/", 44 | "MINIMUM_GREEN": "90", 45 | "MINIMUM_ORANGE": "50.8", 46 | "MERGE_COVERAGE_FILES": "true", 47 | "ANNOTATE_MISSING_LINES": "false", 48 | "ANNOTATION_TYPE": "error", 49 | "VERBOSE": "false", 50 | "FORCE_WORKFLOW_RUN": "false", 51 | } 52 | ) == settings.Config( 53 | GITHUB_BASE_REF="master", 54 | GITHUB_TOKEN="foo", 55 | GITHUB_REPOSITORY="owner/repo", 56 | GITHUB_REF="master", 57 | GITHUB_OUTPUT=pathlib.Path("foo.txt"), 58 | GITHUB_EVENT_NAME="pull", 59 | GITHUB_PR_RUN_ID=123, 60 | GITHUB_STEP_SUMMARY=pathlib.Path("step_summary"), 61 | COMMENT_ARTIFACT_NAME="baz", 62 | COMMENT_FILENAME=pathlib.Path("qux"), 63 | SUBPROJECT_ID="subproject", 64 | COMMENT_TEMPLATE="footemplate", 65 | COVERAGE_DATA_BRANCH="branchname", 66 | COVERAGE_PATH=pathlib.Path("source_folder/"), 67 | MINIMUM_GREEN=decimal.Decimal("90"), 68 | MINIMUM_ORANGE=decimal.Decimal("50.8"), 69 | MERGE_COVERAGE_FILES=True, 70 | ANNOTATE_MISSING_LINES=False, 71 | ANNOTATION_TYPE="error", 72 | VERBOSE=False, 73 | FORCE_WORKFLOW_RUN=False, 74 | ) 75 | 76 | 77 | def test_config__verbose_deprecated(get_logs): 78 | assert settings.Config.from_environ( 79 | { 80 | "GITHUB_BASE_REF": "master", 81 | "GITHUB_TOKEN": "foo", 82 | "GITHUB_REPOSITORY": "owner/repo", 83 | "GITHUB_REF": "master", 84 | "GITHUB_EVENT_NAME": "pull", 85 | "GITHUB_PR_RUN_ID": "123", 86 | "GITHUB_STEP_SUMMARY": "step_summary", 87 | "VERBOSE": "true", 88 | } 89 | ) == settings.Config( 90 | GITHUB_BASE_REF="master", 91 | GITHUB_TOKEN="foo", 92 | GITHUB_REPOSITORY="owner/repo", 93 | GITHUB_REF="master", 94 | GITHUB_EVENT_NAME="pull", 95 | GITHUB_PR_RUN_ID=123, 96 | GITHUB_STEP_SUMMARY=pathlib.Path("step_summary"), 97 | VERBOSE=False, 98 | ) 99 | assert get_logs("INFO", "VERBOSE setting is deprecated") 100 | 101 | 102 | @pytest.fixture 103 | def config(): 104 | defaults = { 105 | "GITHUB_BASE_REF": "master", 106 | "GITHUB_TOKEN": "foo", 107 | "GITHUB_REPOSITORY": "owner/repo", 108 | "GITHUB_REF": "master", 109 | "GITHUB_EVENT_NAME": "pull", 110 | "GITHUB_PR_RUN_ID": 123, 111 | "GITHUB_STEP_SUMMARY": pathlib.Path("step_summary"), 112 | "COMMENT_ARTIFACT_NAME": "baz", 113 | "COMMENT_FILENAME": pathlib.Path("qux"), 114 | "COVERAGE_DATA_BRANCH": "branchname", 115 | "MINIMUM_GREEN": decimal.Decimal("90"), 116 | "MINIMUM_ORANGE": decimal.Decimal("50.8"), 117 | "MERGE_COVERAGE_FILES": True, 118 | } 119 | 120 | def _(**kwargs): 121 | return settings.Config(**(defaults | kwargs)) 122 | 123 | return _ 124 | 125 | 126 | @pytest.mark.parametrize( 127 | "github_ref, github_pr_number", 128 | [ 129 | ("foo", None), 130 | ("refs/heads/branch-with-pull", None), 131 | ("refs/tags/tag-with-pull", None), 132 | ("refs/pull/2/merge", 2), 133 | ], 134 | ) 135 | def test_config__GITHUB_PR_NUMBER(config, github_ref, github_pr_number): 136 | assert config(GITHUB_REF=github_ref).GITHUB_PR_NUMBER == github_pr_number 137 | 138 | 139 | @pytest.mark.parametrize( 140 | "github_ref, github_branch_name", 141 | [ 142 | ("refs/pull/2/merge", None), 143 | ("refs/pull/2/head", None), 144 | ("refs/tags/tag-with-heads", None), 145 | ("refs/heads/a/b", "a/b"), 146 | ], 147 | ) 148 | def test_config__GITHUB_BRANCH_NAME(config, github_ref, github_branch_name): 149 | assert config(GITHUB_REF=github_ref).GITHUB_BRANCH_NAME == github_branch_name 150 | 151 | 152 | def test_config__from_environ__error(): 153 | with pytest.raises(ValueError): 154 | settings.Config.from_environ({"COMMENT_FILENAME": "/a"}) 155 | 156 | 157 | def test_config__invalid_annotation_type(): 158 | with pytest.raises(settings.InvalidAnnotationType): 159 | settings.Config.from_environ({"ANNOTATION_TYPE": "foo"}) 160 | 161 | 162 | @pytest.mark.parametrize( 163 | "input, output", 164 | [ 165 | ("true", True), 166 | ("True", True), 167 | ("1", True), 168 | ("yes", True), 169 | ("false", False), 170 | ("False", False), 171 | ("0", False), 172 | ("no", False), 173 | ("foo", False), 174 | ], 175 | ) 176 | def test_str_to_bool(input, output): 177 | assert settings.str_to_bool(input) is output 178 | 179 | 180 | def test_final_comment_filename(config): 181 | config_obj = config( 182 | COMMENT_FILENAME=pathlib.Path("foo.txt"), 183 | SUBPROJECT_ID="bar", 184 | ) 185 | assert config_obj.FINAL_COMMENT_FILENAME == pathlib.Path("foo-bar.txt") 186 | 187 | 188 | def test_final_coverage_data_branch(config): 189 | config_obj = config( 190 | COVERAGE_DATA_BRANCH="foo", 191 | SUBPROJECT_ID="bar", 192 | ) 193 | assert config_obj.FINAL_COVERAGE_DATA_BRANCH == "foo-bar" 194 | -------------------------------------------------------------------------------- /tests/unit/test_storage.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pathlib 4 | 5 | import pytest 6 | 7 | from coverage_comment import files, storage, subprocess 8 | 9 | 10 | def test_checked_out_branch(git): 11 | git.register("git branch --show-current")(stdout="bar") 12 | git.register("git reset --hard")() 13 | git.register("git fetch origin foo")() 14 | git.register("git switch foo")() 15 | 16 | with storage.checked_out_branch(git=git, branch="foo"): 17 | git.register("git switch bar")() 18 | 19 | 20 | def test_checked_out_branch__detached_head(git): 21 | git.register("git branch --show-current")(stdout="") 22 | git.register("git rev-parse --short HEAD")(stdout="123abc") 23 | git.register("git reset --hard")() 24 | git.register("git fetch origin foo")() 25 | git.register("git switch foo")() 26 | 27 | with storage.checked_out_branch(git=git, branch="foo"): 28 | git.register("git switch --detach 123abc")() 29 | 30 | 31 | def test_checked_out_branch__branch_does_not_exist(git): 32 | git.register("git branch --show-current")(stdout="bar") 33 | git.register("git reset --hard")() 34 | git.register("git fetch origin foo")(exit_code=1) 35 | git.register("git fetch origin")() 36 | git.register("git rev-parse --verify origin/foo")(exit_code=1) 37 | git.register("git switch --orphan foo")() 38 | 39 | with storage.checked_out_branch(git=git, branch="foo"): 40 | git.register("git switch bar")() 41 | 42 | 43 | def test_checked_out_branch__fetch_fails(git): 44 | git.register("git branch --show-current")(stdout="bar") 45 | git.register("git reset --hard")() 46 | git.register("git fetch origin foo")(exit_code=1) 47 | git.register("git fetch origin")() 48 | git.register("git rev-parse --verify origin/foo")() 49 | 50 | with pytest.raises(subprocess.GitError): 51 | with storage.checked_out_branch(git=git, branch="foo"): 52 | pass 53 | 54 | 55 | def test_commit_operations__no_diff(git, in_tmp_path): 56 | operations = [ 57 | files.WriteFile(path=pathlib.Path("a.txt"), contents="a"), 58 | files.WriteFile(path=pathlib.Path("b.txt"), contents="b"), 59 | ] 60 | 61 | # checked_out_branch 62 | git.register("git branch --show-current")(stdout="bar") 63 | git.register("git reset --hard")() 64 | git.register("git fetch origin foo")() 65 | git.register("git switch foo")() 66 | 67 | # upload_files 68 | git.register(f"git add {operations[0].path}")() 69 | git.register(f"git add {operations[1].path}")() 70 | git.register("git diff --staged --exit-code")() # no diff 71 | 72 | # __exit__ of checked_out_branch 73 | git.register("git switch bar")() 74 | 75 | storage.commit_operations( 76 | operations=operations, 77 | git=git, 78 | branch="foo", 79 | ) 80 | 81 | # But content has been written to disk 82 | assert operations[0].path.read_text() == operations[0].contents 83 | assert operations[1].path.read_text() == operations[1].contents 84 | 85 | 86 | def test_commit_operations(git, in_tmp_path): 87 | operations = [ 88 | files.WriteFile(path=pathlib.Path("a.txt"), contents="a"), 89 | files.WriteFile(path=pathlib.Path("b.txt"), contents="b"), 90 | ] 91 | 92 | # checked_out_branch 93 | git.register("git branch --show-current")(stdout="bar") 94 | git.register("git reset --hard")() 95 | git.register("git fetch origin foo")() 96 | git.register("git switch foo")() 97 | 98 | # upload_files 99 | git.register(f"git add {operations[0].path}")() 100 | git.register(f"git add {operations[1].path}")() 101 | 102 | git.register("git diff --staged --exit-code")(exit_code=1) # diff! 103 | 104 | # (yes, it's missing the quotes, but this is just an artifact from our test 105 | # double) 106 | git.register( 107 | "git commit --message Update coverage data", 108 | env={ 109 | "GIT_AUTHOR_NAME": "github-actions", 110 | "GIT_AUTHOR_EMAIL": "41898282+github-actions[bot]@users.noreply.github.com", 111 | "GIT_COMMITTER_NAME": "github-actions", 112 | "GIT_COMMITTER_EMAIL": "41898282+github-actions[bot]@users.noreply.github.com", 113 | }, 114 | )() 115 | git.register("git push origin foo")() 116 | 117 | # __exit__ of checked_out_branch 118 | git.register("git switch bar")() 119 | 120 | storage.commit_operations( 121 | operations=operations, 122 | git=git, 123 | branch="foo", 124 | ) 125 | 126 | assert operations[0].path.read_text() == operations[0].contents 127 | assert operations[1].path.read_text() == operations[1].contents 128 | 129 | 130 | def test_get_datafile_contents__not_found(gh, session): 131 | session.register("GET", "/repos/foo/bar/contents/data.json", params={"ref": "baz"})( 132 | status_code=404 133 | ) 134 | 135 | result = storage.get_datafile_contents( 136 | github=gh, 137 | repository="foo/bar", 138 | branch="baz", 139 | ) 140 | assert result is None 141 | 142 | 143 | def test_get_datafile_contents(gh, session): 144 | session.register("GET", "/repos/foo/bar/contents/data.json", params={"ref": "baz"})( 145 | text="yay", headers={"content-type": "application/vnd.github.raw+json"} 146 | ) 147 | 148 | result = storage.get_datafile_contents( 149 | github=gh, 150 | repository="foo/bar", 151 | branch="baz", 152 | ) 153 | assert result == "yay" 154 | 155 | 156 | @pytest.mark.parametrize( 157 | "is_public, expected", 158 | [ 159 | (False, "https://github.com/foo/bar/raw/baz/qux"), 160 | (True, "https://raw.githubusercontent.com/foo/bar/baz/qux"), 161 | ], 162 | ) 163 | def test_get_raw_file_url(is_public, expected): 164 | result = storage.get_raw_file_url( 165 | repository="foo/bar", 166 | branch="baz", 167 | path=pathlib.Path("qux"), 168 | is_public=is_public, 169 | ) 170 | assert result == expected 171 | 172 | 173 | @pytest.mark.parametrize( 174 | "path, expected", 175 | [ 176 | ("", "https://github.com/foo/bar/tree/baz"), 177 | ("/", "https://github.com/foo/bar/tree/baz"), 178 | ("qux", "https://github.com/foo/bar/blob/baz/qux"), # blob 179 | ("qux/", "https://github.com/foo/bar/tree/baz/qux"), 180 | ("/qux", "https://github.com/foo/bar/blob/baz/qux"), # blob 181 | ("/qux/", "https://github.com/foo/bar/tree/baz/qux"), 182 | ], 183 | ) 184 | def test_get_repo_file_url(path, expected): 185 | result = storage.get_repo_file_url(repository="foo/bar", branch="baz", path=path) 186 | 187 | assert result == expected 188 | 189 | 190 | def test_get_repo_file_url__no_path(): 191 | result = storage.get_repo_file_url(repository="foo/bar", branch="baz") 192 | 193 | assert result == "https://github.com/foo/bar/tree/baz" 194 | 195 | 196 | def test_get_html_report_url(): 197 | result = storage.get_html_report_url(repository="foo/bar", branch="baz") 198 | expected = "https://htmlpreview.github.io/?https://github.com/foo/bar/blob/baz/htmlcov/index.html" 199 | assert result == expected 200 | -------------------------------------------------------------------------------- /tests/unit/test_subprocess.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import pathlib 4 | 5 | import pytest 6 | 7 | from coverage_comment import subprocess 8 | 9 | 10 | def test_run__ok(): 11 | assert subprocess.run("echo", "yay", path=pathlib.Path(".")).strip() == "yay" 12 | 13 | 14 | def test_run__path(): 15 | assert subprocess.run("pwd", path=pathlib.Path("/")).strip() == "/" 16 | 17 | 18 | def test_run__kwargs(): 19 | assert "A=B" in subprocess.run("env", env={"A": "B"}, path=pathlib.Path(".")) 20 | 21 | 22 | def test_run__error(): 23 | with pytest.raises(subprocess.SubProcessError): 24 | subprocess.run("false", path=pathlib.Path(".")) 25 | 26 | 27 | @pytest.fixture 28 | def environ(mocker): 29 | return mocker.patch("os.environ", {}) 30 | 31 | 32 | def test_git(mocker, environ): 33 | run = mocker.patch("coverage_comment.subprocess.run") 34 | git = subprocess.Git() 35 | git.cwd = pathlib.Path("/tmp") 36 | environ["A"] = "B" 37 | 38 | git.clone("https://some_address.git", "--depth", "1", text=True) 39 | git.add("some_file") 40 | 41 | run.assert_has_calls( 42 | [ 43 | mocker.call( 44 | "git", 45 | "clone", 46 | "https://some_address.git", 47 | "--depth", 48 | "1", 49 | path=pathlib.Path("/tmp"), 50 | text=True, 51 | env=mocker.ANY, 52 | ), 53 | mocker.call( 54 | "git", 55 | "add", 56 | "some_file", 57 | path=pathlib.Path("/tmp"), 58 | env=mocker.ANY, 59 | ), 60 | ] 61 | ) 62 | 63 | assert run.call_args_list[0].kwargs["env"]["A"] == "B" 64 | 65 | 66 | def test_git_env(mocker, environ): 67 | run = mocker.patch("coverage_comment.subprocess.run") 68 | git = subprocess.Git() 69 | 70 | environ.update({"A": "B", "C": "D"}) 71 | 72 | git.commit(env={"C": "E", "F": "G"}) 73 | 74 | _, kwargs = run.call_args_list[0] 75 | 76 | env = run.call_args_list[0].kwargs["env"] 77 | assert env["A"] == "B" 78 | assert env["C"] == "E" 79 | assert env["F"] == "G" 80 | 81 | 82 | def test_git__error(mocker): 83 | mocker.patch( 84 | "coverage_comment.subprocess.run", side_effect=subprocess.SubProcessError 85 | ) 86 | git = subprocess.Git() 87 | 88 | with pytest.raises(subprocess.GitError): 89 | git.add("some_file") 90 | --------------------------------------------------------------------------------