├── .coveragerc ├── .github ├── CODEOWNERS ├── linters │ ├── .isort.cfg │ ├── trivy.yaml │ ├── .mypy.ini │ ├── .flake8 │ ├── zizmor.yaml │ └── .jscpd.json ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature_request.yml │ └── bug_report.yml ├── workflows │ ├── docker-image.yml │ ├── pr-title.yml │ ├── auto-labeler.yml │ ├── stale.yaml │ ├── python-package.yml │ ├── linter.yaml │ ├── contributor_report.yaml │ ├── scorecard.yml │ ├── copilot-setup-steps.yml │ └── release.yml ├── scripts │ └── env_vars_check.sh ├── pull_request_template.md ├── copilot-instructions.md ├── dependabot.yml └── release-drafter.yml ├── requirements.txt ├── docs ├── img │ └── issue-metrics-sample-output.png ├── local-usage-without-docker.md ├── example-using-json-instead-markdown-output.md ├── dealing-with-large-issue-metrics.md ├── verify-token-access-to-repository.md ├── authenticating-with-github-app-installation.md ├── assign-team-instead-of-individual.md ├── search-query.md ├── example-workflows.md └── measure-time.md ├── requirements-test.txt ├── action.yml ├── .vscode └── settings.json ├── .devcontainer └── devcontainer.json ├── .dockerignore ├── .env-example ├── Makefile ├── LICENSE ├── markdown_helpers.py ├── time_to_merge.py ├── Dockerfile ├── time_to_ready_for_review.py ├── test_time_to_merge.py ├── test_time_to_ready_for_review.py ├── test_config_get_bool.py ├── classes.py ├── test_markdown_helpers.py ├── .gitignore ├── time_to_answer.py ├── auth.py ├── discussions.py ├── time_to_close.py ├── pr_comments.py ├── test_time_to_answer.py ├── test_time_to_close.py ├── test_most_active_mentors.py ├── test_auth.py ├── test_column_order_fix.py ├── time_in_draft.py ├── CONTRIBUTING.md ├── search.py ├── test_discussions.py ├── test_pr_comments.py ├── labels.py ├── test_assignee_integration.py ├── test_search.py ├── most_active_mentors.py ├── time_to_first_response.py ├── test_assignee_functionality.py ├── json_writer.py ├── test_json_writer.py └── test_time_in_draft.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = test*.py -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @github/ospo-github-actions 2 | -------------------------------------------------------------------------------- /.github/linters/.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | profile = black 3 | -------------------------------------------------------------------------------- /.github/linters/trivy.yaml: -------------------------------------------------------------------------------- 1 | scan: 2 | skip-dirs: 3 | - .mypy_cache 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | github3.py==4.0.1 2 | numpy==2.3.5 3 | python-dotenv==1.2.1 4 | pytz==2025.2 5 | requests==2.32.5 6 | -------------------------------------------------------------------------------- /docs/img/issue-metrics-sample-output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/github/issue-metrics/HEAD/docs/img/issue-metrics-sample-output.png -------------------------------------------------------------------------------- /.github/linters/.mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | disable_error_code = attr-defined, import-not-found 3 | 4 | [mypy-github3.*] 5 | ignore_missing_imports = True 6 | -------------------------------------------------------------------------------- /.github/linters/.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude = venv,.venv,.git,__pycache__ 3 | extend-ignore = C901, E203 4 | max-line-length = 150 5 | statistics = True 6 | -------------------------------------------------------------------------------- /.github/linters/zizmor.yaml: -------------------------------------------------------------------------------- 1 | rules: 2 | dangerous-triggers: # to allow pull_request_target for auto-labelling fork pull requests 3 | ignore: 4 | - auto-labeler.yml 5 | - pr-title.yml 6 | - release.yml 7 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | black==25.12.0 2 | flake8==7.3.0 3 | mypy==1.19.1 4 | mypy-extensions==1.1.0 5 | pylint==4.0.4 6 | pytest==9.0.2 7 | pytest-cov==7.0.0 8 | types-pytz==2025.2.0.20251108 9 | types-requests==2.32.4.20250913 10 | -------------------------------------------------------------------------------- /action.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "issue-metrics" 3 | author: "github" 4 | description: "A GitHub Action to report out issue metrics" 5 | runs: 6 | using: "docker" 7 | image: "docker://ghcr.io/github/issue_metrics:v3" 8 | branding: 9 | icon: "check-square" 10 | color: "white" 11 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.testing.pytestArgs": ["."], 3 | "python.testing.unittestEnabled": false, 4 | "python.testing.pytestEnabled": true, 5 | "[python]": { 6 | "editor.defaultFormatter": "ms-python.black-formatter" 7 | }, 8 | "python.formatting.provider": "none" 9 | } 10 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Existing Dockerfile", 3 | "build": { 4 | "context": "..", 5 | "dockerfile": "../Dockerfile" 6 | }, 7 | 8 | "features": { 9 | "ghcr.io/devcontainers/features/common-utils:2": {} 10 | }, 11 | 12 | "remoteUser": "devcontainer" 13 | } 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | 3 | contact_links: 4 | - name: Ask a question 5 | url: https://github.com/github/issue-metrics/discussions/new 6 | about: Ask a question or start a discussion 7 | - name: GitHub OSPO GitHub Action Overall Issue 8 | url: https://github.com/github/github-ospo/issues/new 9 | about: File issue for multiple GitHub OSPO GitHub Actions 10 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Application specific files 2 | test_*.py 3 | 4 | # Python 5 | *.pyc 6 | __pycache__/ 7 | *.pyo 8 | *.pyd 9 | 10 | # Common 11 | *.md 12 | docker-compose.yml 13 | Dockerfile* 14 | .env* 15 | Makefile 16 | 17 | # Logs 18 | logs 19 | *.log 20 | 21 | # IDEs 22 | .vscode/ 23 | .idea/ 24 | 25 | # Dependency directories 26 | node_modules/ 27 | .venv/ 28 | 29 | ## Cache directories 30 | .parcel-cache 31 | 32 | # git 33 | .git 34 | .gitattributes 35 | .gitignore 36 | .github/ 37 | -------------------------------------------------------------------------------- /docs/local-usage-without-docker.md: -------------------------------------------------------------------------------- 1 | # Local usage without Docker 2 | 3 | 1. Make sure you have at least Python3.11 installed 4 | 1. Copy `.env-example` to `.env` 5 | 1. Fill out the `.env` file with a _token_ from a user that has access to the organization to scan (listed below). Tokens should have admin:org or read:org access. 6 | 1. Fill out the `.env` file with the _search_query_ to filter issues by 7 | 1. `pip3 install -r requirements.txt` 8 | 1. Run `python3 ./issue_metrics.py`, which will output issue metrics data 9 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Docker Image CI 3 | 4 | on: 5 | push: 6 | branches: [main] 7 | pull_request: 8 | branches: [main] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v6.0.1 18 | with: 19 | persist-credentials: false 20 | - name: Build the Docker image 21 | run: docker build . --file Dockerfile --platform linux/amd64 --tag issue-metrics:"$(date +%s)" 22 | -------------------------------------------------------------------------------- /.github/workflows/pr-title.yml: -------------------------------------------------------------------------------- 1 | ## Reference: https://github.com/amannn/action-semantic-pull-request 2 | --- 3 | name: "Lint PR Title" 4 | on: 5 | pull_request_target: 6 | types: [opened, reopened, edited, synchronize] 7 | permissions: 8 | contents: read 9 | jobs: 10 | main: 11 | permissions: 12 | contents: read 13 | pull-requests: read 14 | statuses: write 15 | uses: github/ospo-reusable-workflows/.github/workflows/pr-title.yaml@26eec20abba5ae806698592c79628f6906da372c 16 | secrets: 17 | github-token: ${{ secrets.GITHUB_TOKEN }} 18 | -------------------------------------------------------------------------------- /.env-example: -------------------------------------------------------------------------------- 1 | GH_APP_ID="" 2 | GH_APP_INSTALLATION_ID="" 3 | GH_APP_PRIVATE_KEY="" 4 | GITHUB_APP_ENTERPRISE_ONLY="" 5 | GH_ENTERPRISE_URL = "" 6 | GH_TOKEN = "" 7 | HIDE_AUTHOR = "false" 8 | HIDE_ITEMS_CLOSED_COUNT="false" 9 | HIDE_LABEL_METRICS = "false" 10 | HIDE_TIME_TO_ANSWER = "false" 11 | HIDE_TIME_TO_CLOSE = "false" 12 | HIDE_TIME_TO_FIRST_RESPONSE = "false" 13 | IGNORE_USERS = "user1,user2" 14 | LABELS_TO_MEASURE = "waiting-for-review,waiting-for-manager" 15 | NON_MENTIONING_LINKS = "false" 16 | OUTPUT_FILE = "" 17 | REPORT_TITLE = "Issue Metrics" 18 | SEARCH_QUERY = "repo:owner/repo is:open is:issue" 19 | -------------------------------------------------------------------------------- /.github/linters/.jscpd.json: -------------------------------------------------------------------------------- 1 | { 2 | "exitCode": 0, 3 | "ignore": [ 4 | "**/.venv/**", 5 | "**/.coverage*", 6 | "**/.devcontainer/**", 7 | "**/.git/**", 8 | "**/.gitignore", 9 | "**/.github/**", 10 | "**/.mypy_cache/**", 11 | "**/.pytest_cache/**", 12 | "**/__pycache__/**", 13 | "**/build/**", 14 | "**/dist/**", 15 | "**/docs/**", 16 | "**/migrations/**", 17 | "**/node_modules/**", 18 | "**/report/**", 19 | "**/test_*.py", 20 | "**/venv/**", 21 | "**/*.md", 22 | "**/Dockerfile", 23 | "**/LICENSE", 24 | "**/Makefile" 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/auto-labeler.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Auto Labeler 3 | on: 4 | # pull_request_target event is required for autolabeler to support all PRs including forks 5 | pull_request_target: 6 | types: [opened, reopened, edited, synchronize] 7 | permissions: 8 | contents: read 9 | jobs: 10 | main: 11 | permissions: 12 | contents: write 13 | pull-requests: write 14 | uses: github/ospo-reusable-workflows/.github/workflows/auto-labeler.yaml@26eec20abba5ae806698592c79628f6906da372c 15 | with: 16 | config-name: release-drafter.yml 17 | secrets: 18 | github-token: ${{ secrets.GITHUB_TOKEN }} 19 | -------------------------------------------------------------------------------- /.github/scripts/env_vars_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Find all test_*.py files 4 | files=$(find . -name "test_*.py") 5 | RED='\033[0;31m' 6 | GREEN='\033[0;32m' 7 | NC='\033[0m' # No Color 8 | 9 | # Loop through each file 10 | for file in $files; do 11 | # Search for instances of get_env_vars() with no arguments 12 | result=$(grep -n "get_env_vars()" "$file") 13 | 14 | # If any instances are found, print the file name and line number 15 | if [ -n "$result" ]; then 16 | echo "Found in $file:" 17 | echo "$result" 18 | echo -e "${RED}ERROR: get_env_vars() should always set test=True in test*.py files.${NC}" 19 | exit 1 20 | fi 21 | done 22 | echo -e " ${GREEN}PASS:${NC} All test*.py files call get_env_vars() with test=True." 23 | -------------------------------------------------------------------------------- /.github/workflows/stale.yaml: -------------------------------------------------------------------------------- 1 | name: "Close stale issues" 2 | on: 3 | schedule: 4 | - cron: "30 1 * * *" 5 | 6 | permissions: 7 | issues: write 8 | pull-requests: read 9 | 10 | jobs: 11 | stale: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/stale@v10.1.1 15 | with: 16 | stale-issue-message: "This issue is stale because it has been open 21 days with no activity. Remove stale label or comment or this will be closed in 14 days." 17 | close-issue-message: "This issue was closed because it has been stalled for 35 days with no activity." 18 | days-before-stale: 21 19 | days-before-close: 14 20 | days-before-pr-close: -1 21 | exempt-issue-labels: keep 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: test 2 | test: 3 | pytest -v --cov=. --cov-config=.coveragerc --cov-fail-under=80 --cov-report term-missing 4 | .github/scripts/env_vars_check.sh 5 | 6 | .PHONY: clean 7 | clean: 8 | rm -rf .pytest_cache .coverage __pycache__ 9 | 10 | .PHONY: lint 11 | lint: 12 | # stop the build if there are Python syntax errors or undefined names 13 | flake8 . --config=.github/linters/.flake8 --count --select=E9,F63,F7,F82 --show-source 14 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 15 | flake8 . --config=.github/linters/.flake8 --count --exit-zero --max-complexity=15 --max-line-length=127 16 | isort --settings-file=.github/linters/.isort.cfg . 17 | pylint --rcfile=.github/linters/.python-lint --fail-under=9.0 *.py 18 | mypy --config-file=.github/linters/.mypy.ini *.py 19 | black . 20 | -------------------------------------------------------------------------------- /docs/example-using-json-instead-markdown-output.md: -------------------------------------------------------------------------------- 1 | # Example using the JSON output instead of the Markdown output 2 | 3 | There is JSON output available as well. You could use it for any number of possibilities, but here is one example that demonstrates retrieving the JSON output and then printing it out. 4 | 5 | ```yaml 6 | name: Monthly issue metrics 7 | on: 8 | workflow_dispatch: 9 | schedule: 10 | - cron: "3 2 1 * *" 11 | 12 | permissions: 13 | contents: read 14 | 15 | jobs: 16 | build: 17 | name: issue metrics 18 | runs-on: ubuntu-latest 19 | permissions: 20 | issues: write 21 | pull-requests: read 22 | 23 | steps: 24 | - name: Run issue-metrics tool 25 | id: issue-metrics 26 | uses: github/issue-metrics@v3 27 | env: 28 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | SEARCH_QUERY: 'repo:owner/repo is:issue created:2023-05-01..2023-05-31 -reason:"not planned"' 30 | 31 | - name: Print output of issue metrics tool 32 | run: echo "${{ steps.issue-metrics.outputs.metrics }}" 33 | ``` 34 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Pull Request 2 | 3 | 11 | 12 | ## Proposed Changes 13 | 14 | 15 | 16 | ## Readiness Checklist 17 | 18 | ### Author/Contributor 19 | 20 | - [ ] If documentation is needed for this change, has that been included in this pull request 21 | - [ ] run `make lint` and fix any issues that you have introduced 22 | - [ ] run `make test` and ensure you have test coverage for the lines you are introducing 23 | - [ ] If publishing new data to the public (scorecards, security scan results, code quality results, live dashboards, etc.), please request review from `@jeffrey-luszcz` 24 | 25 | ### Reviewer 26 | 27 | - [ ] Label as either `fix`, `documentation`, `enhancement`, `infrastructure`, `maintenance`, or `breaking` 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 GitHub 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 3 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 4 | 5 | name: Python package 6 | 7 | on: 8 | push: 9 | branches: [main] 10 | pull_request: 11 | branches: [main] 12 | 13 | permissions: 14 | contents: read 15 | 16 | jobs: 17 | build: 18 | runs-on: ubuntu-latest 19 | strategy: 20 | matrix: 21 | python-version: [3.11, 3.12] 22 | 23 | steps: 24 | - uses: actions/checkout@v6.0.1 25 | with: 26 | persist-credentials: false 27 | - name: Set up Python ${{ matrix.python-version }} 28 | uses: actions/setup-python@v6.1.0 29 | with: 30 | python-version: ${{ matrix.python-version }} 31 | - name: Install dependencies 32 | run: | 33 | pip install -r requirements.txt -r requirements-test.txt 34 | - name: Lint with flake8 and pylint 35 | run: | 36 | make lint 37 | - name: Test with pytest 38 | run: | 39 | make test 40 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | description: Suggest an idea for this project 4 | labels: 5 | - enhancement 6 | body: 7 | - type: textarea 8 | attributes: 9 | label: Is your feature request related to a problem? 10 | description: A clear and concise description of what the problem is. Please describe. 11 | placeholder: | 12 | Ex. I'm always frustrated when [...] 13 | validations: 14 | required: false 15 | 16 | - type: textarea 17 | attributes: 18 | label: Describe the solution you'd like 19 | description: A clear and concise description of what you want to happen. 20 | validations: 21 | required: true 22 | 23 | - type: textarea 24 | attributes: 25 | label: Describe alternatives you've considered 26 | description: A clear and concise description of any alternative solutions or features you've considered. 27 | validations: 28 | required: false 29 | 30 | - type: textarea 31 | attributes: 32 | label: Additional context 33 | description: Add any other context or screenshots about the feature request here. 34 | validations: 35 | required: false 36 | -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | # Copilot Instructions 2 | 3 | This is a GitHub Action that searches for issues/pull requests/discussions in a repository, measures several metrics, and generates a report in form of a GitHub issue. The issues/pull requests/discussions to search for can be filtered by using a search query. 4 | 5 | ## Code Standards 6 | 7 | ### Required Before Each Commit 8 | 9 | - Run `make lint` before committing any changes to ensure proper code linting and formatting. 10 | 11 | ### Development Flow 12 | 13 | - Lint: `make lint` 14 | - Test: `make test` 15 | 16 | ## Repository Structure 17 | 18 | - `Makefile`: Contains commands for linting, testing, and other tasks 19 | - `requirements.txt`: Python dependencies for the project 20 | - `requirements-test.txt`: Python dependencies for testing 21 | - `README.md`: Project documentation and setup instructions 22 | - `setup.py`: Python package setup configuration 23 | - `test_*.py`: Python test files matching the naming convention for test discovery 24 | 25 | ## Key Guidelines 26 | 27 | 1. Follow Python best practices and idiomatic patterns 28 | 2. Maintain existing code structure and organization 29 | 3. Write unit tests for new functionality. 30 | 4. Document changes to environment variables in the `README.md` file. 31 | -------------------------------------------------------------------------------- /.github/workflows/linter.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Lint Code Base 3 | 4 | on: 5 | pull_request: 6 | branches: [main] 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | lint: 13 | name: Lint Code Base 14 | runs-on: ubuntu-latest 15 | permissions: 16 | contents: read 17 | packages: read 18 | statuses: write 19 | steps: 20 | - name: Checkout Code 21 | uses: actions/checkout@v6.0.1 22 | with: 23 | # Full git history is needed to get a proper 24 | # list of changed files within `super-linter` 25 | fetch-depth: 0 26 | persist-credentials: false 27 | - uses: actions/setup-python@v6.1.0 28 | with: 29 | python-version: "3.12" 30 | - name: Install dependencies 31 | run: | 32 | pip install -r requirements.txt -r requirements-test.txt 33 | - name: Lint Code Base 34 | uses: super-linter/super-linter@502f4fe48a81a392756e173e39a861f8c8efe056 # v8.3.0 35 | env: 36 | DEFAULT_BRANCH: main 37 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 38 | GITHUB_ACTIONS_COMMAND_ARGS: -shellcheck= 39 | VALIDATE_BIOME_FORMAT: false 40 | VALIDATE_BIOME_LINT: false 41 | VALIDATE_PYTHON_RUFF_FORMAT: false 42 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | description: Create a report to help us improve 4 | labels: 5 | - bug 6 | body: 7 | - type: textarea 8 | attributes: 9 | label: Describe the bug 10 | description: A clear and concise description of what the bug is. 11 | validations: 12 | required: true 13 | 14 | - type: textarea 15 | attributes: 16 | label: To Reproduce 17 | description: Steps to reproduce the behavior 18 | placeholder: | 19 | 1. Go to '...' 20 | 2. Click on '....' 21 | 3. Scroll down to '....' 22 | 4. See error 23 | validations: 24 | required: true 25 | 26 | - type: textarea 27 | attributes: 28 | label: Expected behavior 29 | description: A clear and concise description of what you expected to happen. 30 | validations: 31 | required: true 32 | 33 | - type: textarea 34 | attributes: 35 | label: Screenshots 36 | description: If applicable, add screenshots to help explain your problem. 37 | validations: 38 | required: false 39 | 40 | - type: textarea 41 | attributes: 42 | label: Additional context 43 | description: Add any other context about the problem here. 44 | validations: 45 | required: false 46 | -------------------------------------------------------------------------------- /docs/dealing-with-large-issue-metrics.md: -------------------------------------------------------------------------------- 1 | # Dealing with large issue metrics Markdown files 2 | 3 | When working with lots of issues/pull requests/discussion results, the resulting issue_metrics.md file can become very large. This can cause the GitHub API to return an error when trying to create an issue with the contents of the file. 4 | 5 | ```shell 6 | Pull request creation failed. Validation failed: Body is too long (maximum is 65536 characters) 7 | ``` 8 | 9 | To work around this limitation, the issue-metrics action detects the large file size and splits the issue_metrics.md file into smaller files. So instead of issue_metrics.md, you will get issue_metrics_0.md, issue_metrics_1.md, etc. 10 | Since we don't want the action to fail, it has been designed to have the same name as usual for the first split file (issue_metrics.md) and then append a number to the name for the subsequent split files. 11 | 12 | You can choose one of the following strategies to deal with the split files: 13 | 14 | - Create multiple issues, each with using the next split file in the sequence. 15 | - Upload the full file as an artifact and link to it in the issue body. 16 | - Create an issue and put the content of the split files as issue comments. 17 | 18 | JSON output files are not split since its not anticipated that you use them as issue body content. 19 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: "pip" # See documentation for possible values 5 | directory: "/" # Location of package manifests 6 | schedule: 7 | interval: "weekly" 8 | cooldown: 9 | default-days: 7 10 | commit-message: 11 | prefix: "chore(deps)" 12 | labels: ["python", "dependencies"] 13 | groups: 14 | dependencies: 15 | applies-to: version-updates 16 | update-types: 17 | - "minor" 18 | - "patch" 19 | - package-ecosystem: "github-actions" 20 | directory: "/" 21 | schedule: 22 | interval: "weekly" 23 | cooldown: 24 | default-days: 7 25 | commit-message: 26 | prefix: "chore(deps)" 27 | labels: ["github_actions", "dependencies"] 28 | groups: 29 | dependencies: 30 | applies-to: version-updates 31 | update-types: 32 | - "minor" 33 | - "patch" 34 | - package-ecosystem: "docker" 35 | directory: "/" 36 | schedule: 37 | interval: "weekly" 38 | cooldown: 39 | default-days: 7 40 | commit-message: 41 | prefix: "chore(deps)" 42 | labels: ["docker", "dependencies"] 43 | groups: 44 | dependencies: 45 | applies-to: version-updates 46 | update-types: 47 | - "minor" 48 | - "patch" 49 | -------------------------------------------------------------------------------- /markdown_helpers.py: -------------------------------------------------------------------------------- 1 | """Helper functions for working with markdown files.""" 2 | 3 | 4 | def markdown_too_large_for_issue_body(file_path: str, max_char_count: int) -> bool: 5 | """ 6 | Check if the markdown file is too large to fit into a github issue. 7 | 8 | Inputs: 9 | file_path: str - the path to the markdown file to check 10 | max_char_count: int - the maximum number of characters allowed in a github issue body 11 | 12 | Returns: 13 | bool - True if the file is too large, False otherwise 14 | 15 | """ 16 | with open(file_path, "r", encoding="utf-8") as file: 17 | file_contents = file.read() 18 | return len(file_contents) > max_char_count 19 | 20 | 21 | def split_markdown_file(file_path: str, max_char_count: int) -> None: 22 | """ 23 | Split the markdown file into smaller files. 24 | 25 | Inputs: 26 | file_path: str - the path to the markdown file to split 27 | max_char_count: int - the maximum number of characters allowed before splitting markdown file 28 | 29 | """ 30 | with open(file_path, "r", encoding="utf-8") as file: 31 | file_contents = file.read() 32 | contents_list = [ 33 | file_contents[i : i + max_char_count] 34 | for i in range(0, len(file_contents), max_char_count) 35 | ] 36 | for i, content in enumerate(contents_list): 37 | with open(f"{file_path[:-3]}_{i}.md", "w", encoding="utf-8") as new_file: 38 | new_file.write(content) 39 | -------------------------------------------------------------------------------- /time_to_merge.py: -------------------------------------------------------------------------------- 1 | """A module for measuring the time it takes to merge a GitHub pull request. 2 | 3 | This module provides functions for measuring the time it takes to merge a GitHub pull 4 | request, as well as calculating the average time to merge for a list of pull requests. 5 | 6 | Functions: 7 | measure_time_to_merge( 8 | pull_request: github3.pulls.PullRequest, 9 | ready_for_review_at: Union[datetime, None] 10 | ) -> Union[timedelta, None]: 11 | Measure the time it takes to merge a pull request. 12 | 13 | """ 14 | 15 | from datetime import datetime, timedelta 16 | from typing import Union 17 | 18 | import github3 19 | 20 | 21 | def measure_time_to_merge( 22 | pull_request: github3.pulls.PullRequest, ready_for_review_at: Union[datetime, None] 23 | ) -> Union[timedelta, None]: 24 | """Measure the time it takes to merge a pull request. 25 | 26 | Args: 27 | pull_request (github3.pulls.PullRequest): A GitHub pull request. 28 | ready_for_review_at (Union[timedelta, None]): When the PR was marked as ready for review 29 | 30 | Returns: 31 | Union[datetime.timedelta, None]: The time it takes to close the issue. 32 | 33 | """ 34 | merged_at = None 35 | if pull_request.merged_at is None: 36 | return None 37 | 38 | merged_at = pull_request.merged_at 39 | 40 | if ready_for_review_at: 41 | return merged_at - ready_for_review_at 42 | 43 | created_at = pull_request.created_at 44 | return merged_at - created_at 45 | -------------------------------------------------------------------------------- /.github/workflows/contributor_report.yaml: -------------------------------------------------------------------------------- 1 | name: Monthly contributor report 2 | on: 3 | workflow_dispatch: 4 | schedule: 5 | - cron: "3 2 1 * *" 6 | 7 | permissions: 8 | issues: write 9 | 10 | jobs: 11 | contributor_report: 12 | name: contributor report 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Get dates for last month 17 | shell: bash 18 | run: | 19 | # Calculate the first day of the previous month 20 | start_date=$(date -d "last month" +%Y-%m-01) 21 | 22 | # Calculate the last day of the previous month 23 | end_date=$(date -d "$start_date +1 month -1 day" +%Y-%m-%d) 24 | 25 | #Set an environment variable with the date range 26 | echo "START_DATE=$start_date" >> "$GITHUB_ENV" 27 | echo "END_DATE=$end_date" >> "$GITHUB_ENV" 28 | 29 | - name: Run contributor action 30 | uses: github/contributors@0d5adc3833e89ee1f4145744f5d69313cf5ea238 # v1.7.8 31 | env: 32 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 33 | START_DATE: ${{ env.START_DATE }} 34 | END_DATE: ${{ env.END_DATE }} 35 | REPOSITORY: github/issue-metrics 36 | SPONSOR_INFO: "true" 37 | 38 | - name: Create issue 39 | uses: peter-evans/create-issue-from-file@fca9117c27cdc29c6c4db3b86c48e4115a786710 # v6.0.0 40 | with: 41 | title: Monthly contributor report 42 | token: ${{ secrets.GITHUB_TOKEN }} 43 | content-filepath: ./contributors.md 44 | assignees: zkoppert 45 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Scorecard supply-chain security 3 | on: 4 | workflow_dispatch: 5 | # For Branch-Protection check (for repo branch protection or rules). 6 | # Only the default branch is supported. See 7 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection 8 | branch_protection_rule: 9 | # To guarantee Maintained check is occasionally updated. See 10 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained 11 | schedule: 12 | - cron: "29 11 * * 6" 13 | push: 14 | branches: [main] 15 | 16 | permissions: read-all 17 | 18 | jobs: 19 | analysis: 20 | name: Merge to Main Scorecard analysis 21 | runs-on: ubuntu-latest 22 | permissions: 23 | security-events: write 24 | id-token: write 25 | 26 | steps: 27 | - name: "Checkout code" 28 | uses: actions/checkout@v6.0.1 29 | with: 30 | persist-credentials: false 31 | 32 | - name: "Run analysis" 33 | uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a 34 | with: 35 | results_file: results.sarif 36 | results_format: sarif 37 | publish_results: true 38 | - name: "Upload artifact" 39 | uses: actions/upload-artifact@v6.0.0 40 | with: 41 | name: SARIF file 42 | path: results.sarif 43 | retention-days: 5 44 | - name: "Upload to code-scanning" 45 | uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 46 | with: 47 | sarif_file: results.sarif 48 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | #checkov:skip=CKV_DOCKER_2 2 | #checkov:skip=CKV_DOCKER_3 3 | #trivy:ignore:AVD-DS-0002 4 | FROM python:3.14-slim@sha256:2751cbe93751f0147bc1584be957c6dd4c5f977c3d4e0396b56456a9fd4ed137 5 | LABEL com.github.actions.name="issue-metrics" \ 6 | com.github.actions.description="Gather metrics on issues/prs/discussions such as time to first response, count of issues opened, closed, etc." \ 7 | com.github.actions.icon="check-square" \ 8 | com.github.actions.color="white" \ 9 | maintainer="@zkoppert" \ 10 | org.opencontainers.image.url="https://github.com/github/issue-metrics" \ 11 | org.opencontainers.image.source="https://github.com/github/issue-metrics" \ 12 | org.opencontainers.image.documentation="https://github.com/github/issue-metrics" \ 13 | org.opencontainers.image.vendor="GitHub" \ 14 | org.opencontainers.image.description="Gather metrics on issues/prs/discussions such as time to first response, count of issues opened, closed, etc." 15 | 16 | WORKDIR /action/workspace 17 | COPY requirements.txt *.py /action/workspace/ 18 | 19 | RUN python3 -m pip install --no-cache-dir -r requirements.txt \ 20 | && apt-get -y update \ 21 | && apt-get -y install --no-install-recommends git=1:2.47.3-0+deb13u1 \ 22 | && rm -rf /var/lib/apt/lists/* 23 | 24 | # Add a simple healthcheck to satisfy container scanners 25 | HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \ 26 | CMD python3 -c "import os,sys; sys.exit(0 if os.path.exists('/action/workspace/issue_metrics.py') else 1)" 27 | 28 | CMD ["/action/workspace/issue_metrics.py"] 29 | ENTRYPOINT ["python3", "-u"] 30 | -------------------------------------------------------------------------------- /.github/workflows/copilot-setup-steps.yml: -------------------------------------------------------------------------------- 1 | name: "Copilot Setup Steps" 2 | 3 | # Automatically run the setup steps when they are changed to allow for easy validation, and 4 | # allow manual testing through the repository's "Actions" tab 5 | on: 6 | workflow_dispatch: 7 | push: 8 | paths: 9 | - .github/workflows/copilot-setup-steps.yml 10 | pull_request: 11 | paths: 12 | - .github/workflows/copilot-setup-steps.yml 13 | 14 | # Set the permissions to the lowest permissions possible needed for your steps. 15 | # Copilot will be given its own token for its operations. 16 | permissions: 17 | # If you want to clone the repository as part of your setup steps, for example to install dependencies, you'll need the `contents: read` permission. If you don't clone the repository in your setup steps, Copilot will do this for you automatically after the steps complete. 18 | contents: read 19 | 20 | jobs: 21 | # The job MUST be called `copilot-setup-steps` or it will not be picked up by Copilot. 22 | copilot-setup-steps: 23 | runs-on: ubuntu-latest 24 | 25 | # You can define any steps you want, and they will run before the agent starts. 26 | # If you do not check out your code, Copilot will do this for you. 27 | steps: 28 | - name: Checkout code 29 | uses: actions/checkout@v6.0.1 30 | with: 31 | persist-credentials: false 32 | 33 | - name: Set up Python 34 | uses: actions/setup-python@v6.1.0 35 | with: 36 | python-version: 3.12 37 | 38 | - name: Install dependencies 39 | run: | 40 | pip install -r requirements.txt -r requirements-test.txt 41 | -------------------------------------------------------------------------------- /time_to_ready_for_review.py: -------------------------------------------------------------------------------- 1 | """A module for getting the time a pull request was marked as ready for review 2 | after being in draft mode. 3 | 4 | This module provides functions for getting the time a GitHub pull request was 5 | marked as ready for review, if it was formerly in draft mode. 6 | 7 | Functions: 8 | get_time_to_ready_for_review( 9 | issue: github3.issues.Issue, 10 | pull_request: github3.pulls.PullRequest 11 | ) -> Union[datetime, None]: 12 | If a pull request was formerly a draft, get the time it was marked as 13 | ready for review. 14 | 15 | """ 16 | 17 | from datetime import datetime 18 | from typing import Union 19 | 20 | import github3 21 | 22 | 23 | def get_time_to_ready_for_review( 24 | issue: github3.issues.Issue, 25 | pull_request: github3.pulls.PullRequest, 26 | ) -> Union[datetime, None]: 27 | """If a pull request was formerly a draft, get the time it was marked as ready 28 | for review 29 | 30 | Args: 31 | issue (github3.issues.Issue): A GitHub issue. 32 | pull_request (github3.pulls.PullRequest): A GitHub pull request. 33 | 34 | Returns: 35 | Union[datetime, None]: The time the pull request was marked as ready for review 36 | """ 37 | if pull_request.draft: 38 | return None 39 | 40 | events = issue.issue.events(number=50) 41 | try: 42 | for event in events: 43 | if event.event == "ready_for_review": 44 | return event.created_at 45 | except TypeError as e: 46 | print( 47 | f"An error occurred processing review events. Perhaps issue contains a ghost user. {e}" 48 | ) 49 | return None 50 | 51 | return None 52 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name-template: "v$RESOLVED_VERSION" 3 | tag-template: "v$RESOLVED_VERSION" 4 | template: | 5 | # Changelog 6 | $CHANGES 7 | 8 | See details of [all code changes](https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION) since previous release 9 | 10 | categories: 11 | - title: "🚀 Features" 12 | labels: 13 | - "feature" 14 | - "enhancement" 15 | - title: "🐛 Bug Fixes" 16 | labels: 17 | - "fix" 18 | - "bugfix" 19 | - "bug" 20 | - title: "🧰 Maintenance" 21 | labels: 22 | - "infrastructure" 23 | - "automation" 24 | - "documentation" 25 | - "dependencies" 26 | - "maintenance" 27 | - "revert" 28 | - title: "🏎 Performance" 29 | label: "performance" 30 | change-template: "- $TITLE @$AUTHOR (#$NUMBER)" 31 | version-resolver: 32 | major: 33 | labels: 34 | - "breaking" 35 | - "major" 36 | minor: 37 | labels: 38 | - "enhancement" 39 | - "feature" 40 | - "minor" 41 | patch: 42 | labels: 43 | - "documentation" 44 | - "fix" 45 | - "maintenance" 46 | - "patch" 47 | default: patch 48 | autolabeler: 49 | - label: "automation" 50 | title: 51 | - "/^(build|ci|perf|refactor|test).*/i" 52 | - label: "enhancement" 53 | title: 54 | - "/^(style).*/i" 55 | - label: "documentation" 56 | title: 57 | - "/^(docs).*/i" 58 | - label: "feature" 59 | title: 60 | - "/^(feat).*/i" 61 | - label: "fix" 62 | title: 63 | - "/^(fix).*/i" 64 | - label: "infrastructure" 65 | title: 66 | - "/^(infrastructure).*/i" 67 | - label: "maintenance" 68 | title: 69 | - "/^(chore|maintenance).*/i" 70 | - label: "revert" 71 | title: 72 | - "/^(revert).*/i" 73 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release 3 | on: 4 | workflow_dispatch: 5 | pull_request_target: 6 | types: [closed] 7 | branches: [main] 8 | permissions: 9 | contents: read 10 | jobs: 11 | release: 12 | permissions: 13 | contents: write 14 | pull-requests: read 15 | uses: github/ospo-reusable-workflows/.github/workflows/release.yaml@26eec20abba5ae806698592c79628f6906da372c 16 | with: 17 | publish: true 18 | release-config-name: release-drafter.yml 19 | secrets: 20 | github-token: ${{ secrets.GITHUB_TOKEN }} 21 | release_image: 22 | needs: release 23 | permissions: 24 | contents: read 25 | packages: write 26 | id-token: write 27 | attestations: write 28 | uses: github/ospo-reusable-workflows/.github/workflows/release-image.yaml@26eec20abba5ae806698592c79628f6906da372c 29 | with: 30 | image-name: ${{ github.repository_owner }}/issue_metrics 31 | full-tag: ${{ needs.release.outputs.full-tag }} 32 | short-tag: ${{ needs.release.outputs.short-tag }} 33 | secrets: 34 | github-token: ${{ secrets.GITHUB_TOKEN }} 35 | image-registry: ghcr.io 36 | image-registry-username: ${{ github.actor }} 37 | image-registry-password: ${{ secrets.GITHUB_TOKEN }} 38 | release_discussion: 39 | needs: release 40 | permissions: 41 | contents: read 42 | discussions: write 43 | uses: github/ospo-reusable-workflows/.github/workflows/release-discussion.yaml@26eec20abba5ae806698592c79628f6906da372c 44 | with: 45 | full-tag: ${{ needs.release.outputs.full-tag }} 46 | body: ${{ needs.release.outputs.body }} 47 | secrets: 48 | github-token: ${{ secrets.GITHUB_TOKEN }} 49 | discussion-repository-id: ${{ secrets.RELEASE_DISCUSSION_REPOSITORY_ID }} 50 | discussion-category-id: ${{ secrets.RELEASE_DISCUSSION_CATEGORY_ID }} 51 | -------------------------------------------------------------------------------- /docs/verify-token-access-to-repository.md: -------------------------------------------------------------------------------- 1 | # Verify Token Access to Repository 2 | 3 | GitHub PAT token access can be confusing. Here's a quick way to test if the token you're using is authorized to access your repository. 4 | 5 | **Remove this snippet after you've verified your token.** 6 | 7 | - Make sure you follow the token setup instructions [in the `README.md`](https://github.com/github/issue-metrics/tree/main?tab=readme-ov-file#use-as-a-github-action) first. 8 | 9 | - Replace `{owner/repo}` with your own repository information. 10 | 11 | - Add this snippet to your workflow.yml. 12 | 13 | ```yml 14 | - name: Check GitHub token permissions 15 | run: | 16 | curl -H "Authorization: token ${{ secrets.GH_TOKEN }}" https://api.github.com/repos/{owner/repo} 17 | ``` 18 | 19 | - Go to your repository Actions in GitHub and run your job. 20 | - In the job run details, click into the results of `Check GitHub token permissions` 21 | - You should see your token details with no errors. 22 | 23 | Example of the snippet in the full workflow: 24 | 25 | ```yml 26 | name: Monthly issue metrics 27 | on: 28 | workflow_dispatch: 29 | schedule: 30 | - cron: "3 2 1 * *" 31 | 32 | permissions: 33 | contents: read 34 | 35 | jobs: 36 | build: 37 | name: issue metrics 38 | runs-on: ubuntu-latest 39 | permissions: 40 | issues: write 41 | pull-requests: read 42 | 43 | steps: 44 | - name: Check GitHub token permissions 45 | run: | 46 | curl -H "Authorization: token ${{ secrets.GH_TOKEN }}" https://api.github.com/{owner/repo} 47 | - name: Get dates for last month 48 | shell: bash 49 | run: | 50 | # Calculate the first day of the previous month 51 | first_day=$(date -d "last month" +%Y-%m-01) 52 | 53 | # Calculate the last day of the previous month 54 | last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d) 55 | 56 | #Set an environment variable with the date range 57 | echo "$first_day..$last_day" 58 | echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV" 59 | 60 | - name: Run issue-metrics tool 61 | uses: github/issue-metrics@v3 62 | env: 63 | GH_TOKEN: ${{ secrets.GH_TOKEN }} 64 | SEARCH_QUERY: "repo:{owner/repo} is:issue created:${{ env.last_month }}" 65 | ``` 66 | -------------------------------------------------------------------------------- /docs/authenticating-with-github-app-installation.md: -------------------------------------------------------------------------------- 1 | # Authenticating with a GitHub App Installation 2 | 3 | Authenticating as an app installation lets your app access resources that are owned by the user or organization that installed the app. Authenticating as an app installation is ideal for automation workflows that don't involve user input. 4 | 5 | [Documentation](https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/about-authentication-with-a-github-app#authentication-as-an-app-installation) for more details. 6 | 7 | ```yaml 8 | name: Monthly issue metrics 9 | on: 10 | workflow_dispatch: 11 | schedule: 12 | - cron: '3 2 1 * *' 13 | 14 | permissions: 15 | contents: read 16 | 17 | jobs: 18 | build: 19 | name: issue metrics 20 | runs-on: ubuntu-latest 21 | permissions: 22 | issues: write 23 | pull-requests: read 24 | 25 | steps: 26 | 27 | - name: Get dates for last month 28 | shell: bash 29 | run: | 30 | # Calculate the first day of the previous month 31 | first_day=$(date -d "last month" +%Y-%m-01) 32 | 33 | # Calculate the last day of the previous month 34 | last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d) 35 | 36 | #Set an environment variable with the date range 37 | echo "$first_day..$last_day" 38 | echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV" 39 | 40 | - name: Run issue-metrics tool 41 | uses: github/issue-metrics@v3 42 | env: 43 | GH_APP_ID: ${{ secrets.GH_APP_ID }} 44 | GH_APP_INSTALLATION_ID: ${{ secrets.GH_APP_INSTALLATION_ID }} 45 | GH_APP_PRIVATE_KEY: ${{ secrets.GH_APP_PRIVATE_KEY }} 46 | SEARCH_QUERY: 'repo:owner/repo is:issue created:${{ env.last_month }} -reason:"not planned"' 47 | 48 | - name: Get user names from team 49 | run: | 50 | teamMembers="$(gh api /orgs/ORG/teams/TEAM_SLUG/members | jq -r '.[].login' | paste -sd, -)" 51 | echo 'TEAM_MEMBERS='$teamMembers >> $GITHUB_ENV 52 | env: 53 | GITHUB_TOKEN: ${{ secrets.CUSTOM_TOKEN }} 54 | 55 | - name: Create issue 56 | uses: peter-evans/create-issue-from-file@v4 57 | with: 58 | title: Monthly issue metrics report 59 | token: ${{ secrets.GITHUB_TOKEN }} 60 | content-filepath: ./issue_metrics.md 61 | assignees: ${{ env.TEAM_MEMBERS }} 62 | ``` 63 | -------------------------------------------------------------------------------- /test_time_to_merge.py: -------------------------------------------------------------------------------- 1 | """A module containing unit tests for the time_to_merge module. 2 | 3 | This module contains unit tests for the measure_time_to_merge 4 | function in the time_to_merge module. 5 | The tests use mock GitHub pull request to test the function's behavior. 6 | 7 | Classes: 8 | TestMeasureTimeToMerge: A class to test the measure_time_to_merge function. 9 | 10 | """ 11 | 12 | import unittest 13 | from datetime import datetime, timedelta 14 | from unittest.mock import MagicMock 15 | 16 | from time_to_merge import measure_time_to_merge 17 | 18 | 19 | class TestMeasureTimeToMerge(unittest.TestCase): 20 | """Test suite for the measure_time_to_merge function.""" 21 | 22 | def test_measure_time_to_merge_ready_for_review(self): 23 | """Test that the function correctly measures the time to merge a pull request that was formerly a draft.""" 24 | # Create a mock pull request object 25 | pull_request = MagicMock() 26 | pull_request.merged_at = datetime.fromisoformat("2021-01-03T00:00:00Z") 27 | ready_for_review_at = datetime.fromisoformat("2021-01-01T00:00:00Z") 28 | 29 | # Call the function and check the result 30 | result = measure_time_to_merge(pull_request, ready_for_review_at) 31 | expected_result = timedelta(days=2) 32 | self.assertEqual(result, expected_result) 33 | 34 | def test_measure_time_to_merge_created_at(self): 35 | """Test that the function correctly measures the time to merge a pull request that was never a draft.""" 36 | # Create a mock pull request object 37 | pull_request = MagicMock() 38 | pull_request.merged_at = datetime.fromisoformat("2021-01-03T00:00:00Z") 39 | pull_request.created_at = datetime.fromisoformat("2021-01-01T00:00:00Z") 40 | 41 | # Call the function and check the result 42 | result = measure_time_to_merge(pull_request, None) 43 | expected_result = timedelta(days=2) 44 | self.assertEqual(result, expected_result) 45 | 46 | def test_measure_time_to_merge_returns_none(self): 47 | """Test that the function returns None if the pull request is not merged.""" 48 | # Create a mock issue object 49 | pull_request = MagicMock() 50 | pull_request.merged_at = None 51 | 52 | # Call the function and check that it returns None 53 | self.assertEqual(None, measure_time_to_merge(pull_request, None)) 54 | -------------------------------------------------------------------------------- /test_time_to_ready_for_review.py: -------------------------------------------------------------------------------- 1 | """A module containing unit tests for the time_to_ready_for_review module. 2 | 3 | This module contains unit tests for the get_time_to_ready_for_review 4 | function in the time_to_ready_for_review module. 5 | The tests use mock GitHub issues to test the functions' behavior. 6 | 7 | Classes: 8 | TestGetTimeToReadyForReview: A class to test the get_time_to_ready_for_review function. 9 | 10 | """ 11 | 12 | import unittest 13 | from datetime import datetime 14 | from unittest.mock import MagicMock 15 | 16 | from time_to_ready_for_review import get_time_to_ready_for_review 17 | 18 | 19 | class TestGetTimeToReadyForReview(unittest.TestCase): 20 | """Test suite for the get_time_to_ready_for_review function.""" 21 | 22 | # def draft pr function 23 | def test_time_to_ready_for_review_draft(self): 24 | """Test that the function returns None when the pull request is a draft""" 25 | pull_request = MagicMock() 26 | pull_request.draft = True 27 | issue = MagicMock() 28 | 29 | result = get_time_to_ready_for_review(issue, pull_request) 30 | expected_result = None 31 | self.assertEqual(result, expected_result) 32 | 33 | def test_get_time_to_ready_for_review_event(self): 34 | """Test that the function correctly gets the time a pull request was marked as ready for review""" 35 | pull_request = MagicMock() 36 | pull_request.draft = False 37 | event = MagicMock() 38 | event.event = "ready_for_review" 39 | event.created_at = datetime.fromisoformat("2021-01-01T00:00:00Z") 40 | issue = MagicMock() 41 | issue.issue.events.return_value = [event] 42 | 43 | result = get_time_to_ready_for_review(issue, pull_request) 44 | expected_result = event.created_at 45 | self.assertEqual(result, expected_result) 46 | 47 | def test_get_time_to_ready_for_review_no_event(self): 48 | """Test that the function returns None when the pull request is not a draft and no ready_for_review event is found""" 49 | pull_request = MagicMock() 50 | pull_request.draft = False 51 | event = MagicMock() 52 | event.event = "foobar" 53 | event.created_at = "2021-01-01T00:00:00Z" 54 | issue = MagicMock() 55 | issue.events.return_value = [event] 56 | 57 | result = get_time_to_ready_for_review(issue, pull_request) 58 | expected_result = None 59 | self.assertEqual(result, expected_result) 60 | -------------------------------------------------------------------------------- /test_config_get_bool.py: -------------------------------------------------------------------------------- 1 | """Test the get_get_bool_env_var function""" 2 | 3 | import os 4 | import unittest 5 | from unittest.mock import patch 6 | 7 | from config import get_bool_env_var 8 | 9 | 10 | class TestEnv(unittest.TestCase): 11 | """Test the get_bool_env_var function""" 12 | 13 | @patch.dict( 14 | os.environ, 15 | { 16 | "TEST_BOOL": "true", 17 | }, 18 | clear=True, 19 | ) 20 | def test_get_bool_env_var_that_exists_and_is_true(self): 21 | """Test that gets a boolean environment variable that exists and is true""" 22 | result = get_bool_env_var("TEST_BOOL", False) 23 | self.assertTrue(result) 24 | 25 | @patch.dict( 26 | os.environ, 27 | { 28 | "TEST_BOOL": "false", 29 | }, 30 | clear=True, 31 | ) 32 | def test_get_bool_env_var_that_exists_and_is_false(self): 33 | """Test that gets a boolean environment variable that exists and is false""" 34 | result = get_bool_env_var("TEST_BOOL", False) 35 | self.assertFalse(result) 36 | 37 | @patch.dict( 38 | os.environ, 39 | { 40 | "TEST_BOOL": "nope", 41 | }, 42 | clear=True, 43 | ) 44 | def test_get_bool_env_var_that_exists_and_is_false_due_to_invalid_value(self): 45 | """Test that gets a boolean environment variable that exists and is false 46 | due to an invalid value 47 | """ 48 | result = get_bool_env_var("TEST_BOOL", False) 49 | self.assertFalse(result) 50 | 51 | @patch.dict( 52 | os.environ, 53 | { 54 | "TEST_BOOL": "false", 55 | }, 56 | clear=True, 57 | ) 58 | def test_get_bool_env_var_that_does_not_exist_and_default_value_returns_true(self): 59 | """Test that gets a boolean environment variable that does not exist 60 | and default value returns: true 61 | """ 62 | result = get_bool_env_var("DOES_NOT_EXIST", True) 63 | self.assertTrue(result) 64 | 65 | @patch.dict( 66 | os.environ, 67 | { 68 | "TEST_BOOL": "true", 69 | }, 70 | clear=True, 71 | ) 72 | def test_get_bool_env_var_that_does_not_exist_and_default_value_returns_false(self): 73 | """Test that gets a boolean environment variable that does not exist 74 | and default value returns: false 75 | """ 76 | result = get_bool_env_var("DOES_NOT_EXIST", False) 77 | self.assertFalse(result) 78 | 79 | 80 | if __name__ == "__main__": 81 | unittest.main() 82 | -------------------------------------------------------------------------------- /classes.py: -------------------------------------------------------------------------------- 1 | """A module containing classes for representing GitHub issues and their metrics. 2 | 3 | Classes: 4 | IssueWithMetrics: A class to represent a GitHub issue with metrics. 5 | 6 | """ 7 | 8 | 9 | class IssueWithMetrics: 10 | """A class to represent a GitHub issue with metrics. 11 | 12 | Attributes: 13 | title (str): The title of the issue. 14 | html_url (str): The URL of the issue on GitHub. 15 | author (str): The author of the issue. 16 | assignee (str, optional): The primary assignee of the issue. 17 | assignees (list, optional): All assignees of the issue. 18 | time_to_first_response (timedelta, optional): The time it took to 19 | get the first response to the issue. 20 | time_to_close (timedelta, optional): The time it took to close the issue. 21 | time_to_answer (timedelta, optional): The time it took to answer the 22 | discussions in the issue. 23 | time_in_draft (timedelta, optional): The time the PR was in draft state. 24 | label_metrics (dict, optional): A dictionary containing the label metrics 25 | mentor_activity (dict, optional): A dictionary containing active mentors 26 | created_at (datetime, optional): The time the issue was created. 27 | status (str, optional): The status of the issue, e.g., "open", "closed as completed", 28 | pr_comment_count (int, optional): The number of comments on the PR (excluding bots). 29 | """ 30 | 31 | # pylint: disable=too-many-instance-attributes 32 | 33 | def __init__( 34 | self, 35 | title, 36 | html_url, 37 | author, 38 | time_to_first_response=None, 39 | time_to_close=None, 40 | time_to_answer=None, 41 | time_in_draft=None, 42 | labels_metrics=None, 43 | mentor_activity=None, 44 | created_at=None, 45 | assignee=None, 46 | assignees=None, 47 | status=None, 48 | pr_comment_count=None, 49 | ): 50 | self.title = title 51 | self.html_url = html_url 52 | self.author = author 53 | self.assignee = assignee 54 | self.assignees = assignees or [] 55 | self.time_to_first_response = time_to_first_response 56 | self.time_to_close = time_to_close 57 | self.time_to_answer = time_to_answer 58 | self.time_in_draft = time_in_draft 59 | self.label_metrics = labels_metrics 60 | self.mentor_activity = mentor_activity 61 | self.created_at = created_at 62 | self.status = status 63 | self.pr_comment_count = pr_comment_count 64 | -------------------------------------------------------------------------------- /test_markdown_helpers.py: -------------------------------------------------------------------------------- 1 | """Unit tests for the markdown_helpers module.""" 2 | 3 | import os 4 | import unittest 5 | 6 | from markdown_helpers import markdown_too_large_for_issue_body, split_markdown_file 7 | 8 | 9 | class TestMarkdownHelpers(unittest.TestCase): 10 | """ 11 | Unit tests for the markdown_helpers module. 12 | """ 13 | 14 | def test_markdown_too_large_for_issue_body(self): 15 | """ 16 | Test the markdown_too_large_for_issue_body function. 17 | """ 18 | # Define a sample markdown file content 19 | max_char_count = 65535 20 | markdown_content = "a\n" * max_char_count 21 | 22 | # Write the markdown content to a temporary file 23 | with open("temp.md", "w", encoding="utf-8") as f: 24 | f.write(markdown_content) 25 | 26 | # Call the function with the temporary file 27 | result = markdown_too_large_for_issue_body("temp.md", max_char_count) 28 | 29 | # remove the temporary file 30 | os.remove("temp.md") 31 | 32 | # Assert that the function returns True 33 | self.assertTrue(result) 34 | 35 | def test_split_markdown_file(self): 36 | """ 37 | Test the split_markdown_file function. 38 | """ 39 | 40 | # Define a sample markdown file content with 4 times the maximum character count 41 | multiple_of_max = 4 42 | max_char_count = 65535 43 | repeated_content = "a\n" 44 | markdown_content = repeated_content * int( 45 | (max_char_count * multiple_of_max) / len(repeated_content) 46 | ) 47 | 48 | # Write the markdown content to a temporary file 49 | with open("temp.md", "w", encoding="utf-8") as f: 50 | f.write(markdown_content) 51 | 52 | # Call the function with the temporary file 53 | split_markdown_file("temp.md", max_char_count) 54 | 55 | # Assert that the function creates two files 56 | self.assertTrue(os.path.exists("temp_0.md")) 57 | self.assertTrue(os.path.exists("temp_1.md")) 58 | self.assertTrue(os.path.exists("temp_2.md")) 59 | self.assertTrue(os.path.exists("temp_3.md")) 60 | 61 | # Assert that the all files have less than max characters 62 | for i in range(0, multiple_of_max): 63 | with open(f"temp_{i}.md", "r", encoding="utf-8") as f: 64 | self.assertLessEqual(len(f.read()), max_char_count) 65 | 66 | # remove the temporary files 67 | os.remove("temp.md") 68 | os.remove("temp_0.md") 69 | os.remove("temp_1.md") 70 | os.remove("temp_2.md") 71 | os.remove("temp_3.md") 72 | 73 | 74 | if __name__ == "__main__": 75 | unittest.main() 76 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Output files 2 | issue_metrics*.md 3 | issue_metrics.json 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # asdf 11 | .tool-versions 12 | 13 | # C extensions 14 | *.so 15 | 16 | # JSCPD 17 | report/ 18 | 19 | # Distribution / packaging 20 | .Python 21 | build/ 22 | develop-eggs/ 23 | dist/ 24 | downloads/ 25 | eggs/ 26 | .eggs/ 27 | lib/ 28 | lib64/ 29 | parts/ 30 | sdist/ 31 | var/ 32 | wheels/ 33 | share/python-wheels/ 34 | *.egg-info/ 35 | .installed.cfg 36 | *.egg 37 | MANIFEST 38 | 39 | # PyInstaller 40 | # Usually these files are written by a python script from a template 41 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 42 | *.manifest 43 | *.spec 44 | 45 | # Installer logs 46 | pip-log.txt 47 | pip-delete-this-directory.txt 48 | 49 | # Unit test / coverage reports 50 | htmlcov/ 51 | .tox/ 52 | .nox/ 53 | .coverage 54 | .coverage.* 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | *.cover 59 | *.py,cover 60 | .hypothesis/ 61 | .pytest_cache/ 62 | cover/ 63 | 64 | # Translations 65 | *.mo 66 | *.pot 67 | 68 | # Django stuff: 69 | *.log 70 | local_settings.py 71 | db.sqlite3 72 | db.sqlite3-journal 73 | 74 | # Flask stuff: 75 | instance/ 76 | .webassets-cache 77 | 78 | # Scrapy stuff: 79 | .scrapy 80 | 81 | # Sphinx documentation 82 | docs/_build/ 83 | 84 | # PyBuilder 85 | .pybuilder/ 86 | target/ 87 | 88 | # Jupyter Notebook 89 | .ipynb_checkpoints 90 | 91 | # IPython 92 | profile_default/ 93 | ipython_config.py 94 | 95 | # pyenv 96 | # For a library or package, you might want to ignore these files since the code is 97 | # intended to run in multiple environments; otherwise, check them in: 98 | # .python-version 99 | 100 | # pipenv 101 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 102 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 103 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 104 | # install all needed dependencies. 105 | #Pipfile.lock 106 | 107 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 108 | __pypackages__/ 109 | 110 | # Celery stuff 111 | celerybeat-schedule 112 | celerybeat.pid 113 | 114 | # SageMath parsed files 115 | *.sage.py 116 | 117 | # Environments 118 | .env 119 | .venv 120 | env/ 121 | venv/ 122 | ENV/ 123 | env.bak/ 124 | venv.bak/ 125 | 126 | # Spyder project settings 127 | .spyderproject 128 | .spyproject 129 | 130 | # Rope project settings 131 | .ropeproject 132 | 133 | # mkdocs documentation 134 | /site 135 | 136 | # mypy 137 | .mypy_cache/ 138 | .dmypy.json 139 | dmypy.json 140 | 141 | # Pyre type checker 142 | .pyre/ 143 | 144 | # pytype static type analyzer 145 | .pytype/ 146 | 147 | # Cython debug symbols 148 | cython_debug/ 149 | 150 | # Mac 151 | .DS_Store 152 | **/.DS_Store 153 | 154 | # PyCharm 155 | .idea/ 156 | -------------------------------------------------------------------------------- /docs/assign-team-instead-of-individual.md: -------------------------------------------------------------------------------- 1 | # Assigning teams instead of individuals 2 | 3 | The assignee part of this workflow action comes from [a different GitHub Action](https://github.com/peter-evans/create-issue-from-file) and currently GitHub issues don't support assigning groups. 4 | 5 | By way of work around, you could use the [GitHub API to retrieve the members of the team](https://docs.github.com/en/rest/teams/members?apiVersion=2022-11-28#list-team-members) and then put them in a comma separated string that you provide as the assignee. 6 | 7 | This requires setting up a new GitHub API token (referred to below as `CUSTOM_TOKEN`) which has `read:org` permissions assigned and single sign on authorization as needed. 8 | 9 | To do this, create a [GitHub API token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic) with permissions to read the org (`read:org`). 10 | 11 | Then take the value of the API token you just created, and [create a repository secret](https://docs.github.com/en/actions/security-guides/encrypted-secrets) where the name of the secret is `CUSTOM_TOKEN` and the value of the secret the API token. 12 | 13 | That might look something like the workflow below where `ORG` is your organization name and `TEAM_SLUG` is the name of the team: 14 | 15 | ```yaml 16 | name: Monthly issue metrics 17 | on: 18 | workflow_dispatch: 19 | schedule: 20 | - cron: '3 2 1 * *' 21 | 22 | permissions: 23 | contents: read 24 | 25 | jobs: 26 | build: 27 | name: issue metrics 28 | runs-on: ubuntu-latest 29 | permissions: 30 | issues: write 31 | pull-requests: read 32 | 33 | steps: 34 | 35 | - name: Get dates for last month 36 | shell: bash 37 | run: | 38 | # Calculate the first day of the previous month 39 | first_day=$(date -d "last month" +%Y-%m-01) 40 | 41 | # Calculate the last day of the previous month 42 | last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d) 43 | 44 | #Set an environment variable with the date range 45 | echo "$first_day..$last_day" 46 | echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV" 47 | 48 | - name: Run issue-metrics tool 49 | uses: github/issue-metrics@v3 50 | env: 51 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 52 | SEARCH_QUERY: 'repo:owner/repo is:issue created:${{ env.last_month }} -reason:"not planned"' 53 | 54 | - name: Get user names from team 55 | run: | 56 | teamMembers="$(gh api /orgs/ORG/teams/TEAM_SLUG/members | jq -r '.[].login' | paste -sd, -)" 57 | echo 'TEAM_MEMBERS='$teamMembers >> $GITHUB_ENV 58 | env: 59 | GITHUB_TOKEN: ${{ secrets.CUSTOM_TOKEN }} 60 | 61 | - name: Create issue 62 | uses: peter-evans/create-issue-from-file@v4 63 | with: 64 | title: Monthly issue metrics report 65 | token: ${{ secrets.GITHUB_TOKEN }} 66 | content-filepath: ./issue_metrics.md 67 | assignees: ${{ env.TEAM_MEMBERS }} 68 | ``` 69 | -------------------------------------------------------------------------------- /time_to_answer.py: -------------------------------------------------------------------------------- 1 | """A module for measuring the time it takes to answer a GitHub discussion. 2 | 3 | This module provides functions for measuring the time it takes to answer a GitHub 4 | discussion, as well as calculating stats describing the time to answer for a list of discussions. 5 | 6 | Functions: 7 | get_stats_time_to_answer( 8 | issues_with_metrics: List[IssueWithMetrics] 9 | ) -> Union[timedelta, None]: 10 | Calculate stats describing the time to answer for a list of issues with metrics. 11 | measure_time_to_answer( 12 | discussion: dict 13 | ) -> Union[timedelta, None]: 14 | Measure the time it takes to answer a GitHub discussion. 15 | 16 | """ 17 | 18 | from datetime import datetime, timedelta 19 | from typing import List, Union 20 | 21 | import numpy 22 | from classes import IssueWithMetrics 23 | 24 | 25 | def get_stats_time_to_answer( 26 | issues_with_metrics: List[IssueWithMetrics], 27 | ) -> Union[dict[str, timedelta], None]: 28 | """ 29 | Calculate stats describing the time to answer for a list of issues. 30 | """ 31 | # Filter out issues with no time to answer 32 | issues_with_time_to_answer = [ 33 | issue for issue in issues_with_metrics if issue.time_to_answer is not None 34 | ] 35 | 36 | # Calculate the total time to answer for all issues 37 | answer_times = [] 38 | if issues_with_time_to_answer: 39 | for issue in issues_with_time_to_answer: 40 | if issue.time_to_answer: 41 | answer_times.append(issue.time_to_answer.total_seconds()) 42 | 43 | # Calculate stats describing time to answer 44 | num_issues_with_time_to_answer = len(issues_with_time_to_answer) 45 | if num_issues_with_time_to_answer > 0: 46 | average_time_to_answer = numpy.round(numpy.average(answer_times)) 47 | med_time_to_answer = numpy.round(numpy.median(answer_times)) 48 | ninety_percentile_time_to_answer = numpy.round( 49 | numpy.percentile(answer_times, 90, axis=0) 50 | ) 51 | else: 52 | return None 53 | 54 | stats = { 55 | "avg": timedelta(seconds=average_time_to_answer), 56 | "med": timedelta(seconds=med_time_to_answer), 57 | "90p": timedelta(seconds=ninety_percentile_time_to_answer), 58 | } 59 | 60 | # Print the average time to answer converting seconds to a readable time format 61 | print(f"Average time to answer: {timedelta(seconds=average_time_to_answer)}") 62 | return stats 63 | 64 | 65 | def measure_time_to_answer(discussion: dict) -> Union[timedelta, None]: 66 | """Measure the time to answer for a discussion. 67 | 68 | Args: 69 | discussion (dict): A discussion object from the GitHub API. 70 | 71 | Returns: 72 | Union[timedelta, None]: The time to answer for the discussion. 73 | 74 | """ 75 | if not discussion["answerChosenAt"]: 76 | return None 77 | 78 | if not discussion["createdAt"]: 79 | return None 80 | 81 | # Get the time to answer 82 | answer_time = datetime.fromisoformat(discussion["answerChosenAt"]) 83 | created_time = datetime.fromisoformat(discussion["createdAt"]) 84 | 85 | return answer_time - created_time 86 | -------------------------------------------------------------------------------- /auth.py: -------------------------------------------------------------------------------- 1 | """This is the module that contains functions related to authenticating to GitHub with a personal access token.""" 2 | 3 | import github3 4 | import requests 5 | 6 | 7 | def auth_to_github( 8 | token: str, 9 | gh_app_id: int | None, 10 | gh_app_installation_id: int | None, 11 | gh_app_private_key_bytes: bytes, 12 | ghe: str, 13 | gh_app_enterprise_only: bool, 14 | ) -> github3.GitHub: 15 | """ 16 | Connect to GitHub.com or GitHub Enterprise, depending on env variables. 17 | 18 | Args: 19 | token (str): the GitHub personal access token 20 | gh_app_id (int | None): the GitHub App ID 21 | gh_app_installation_id (int | None): the GitHub App Installation ID 22 | gh_app_private_key_bytes (bytes): the GitHub App Private Key 23 | ghe (str): the GitHub Enterprise URL 24 | gh_app_enterprise_only (bool): Set this to true if the GH APP is created 25 | on GHE and needs to communicate with GHE api only 26 | 27 | Returns: 28 | github3.GitHub: the GitHub connection object 29 | """ 30 | if gh_app_id and gh_app_private_key_bytes and gh_app_installation_id: 31 | if ghe and gh_app_enterprise_only: 32 | gh = github3.github.GitHubEnterprise(url=ghe) 33 | else: 34 | gh = github3.github.GitHub() 35 | gh.login_as_app_installation( 36 | gh_app_private_key_bytes, gh_app_id, gh_app_installation_id 37 | ) 38 | github_connection = gh 39 | elif ghe and token: 40 | github_connection = github3.github.GitHubEnterprise(url=ghe, token=token) 41 | elif token: 42 | github_connection = github3.login(token=token) 43 | else: 44 | raise ValueError( 45 | "GH_TOKEN or the set of [GH_APP_ID, GH_APP_INSTALLATION_ID, \ 46 | GH_APP_PRIVATE_KEY] environment variables are not set" 47 | ) 48 | 49 | if not github_connection: 50 | raise ValueError("Unable to authenticate to GitHub") 51 | return github_connection # type: ignore 52 | 53 | 54 | def get_github_app_installation_token( 55 | ghe: str, 56 | gh_app_id: str, 57 | gh_app_private_key_bytes: bytes, 58 | gh_app_installation_id: str, 59 | ) -> str | None: 60 | """ 61 | Get a GitHub App Installation token. 62 | API: https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/authenticating-as-a-github-app-installation # noqa: E501 63 | 64 | Args: 65 | ghe (str): the GitHub Enterprise endpoint 66 | gh_app_id (str): the GitHub App ID 67 | gh_app_private_key_bytes (bytes): the GitHub App Private Key 68 | gh_app_installation_id (str): the GitHub App Installation ID 69 | 70 | Returns: 71 | str: the GitHub App token 72 | """ 73 | jwt_headers = github3.apps.create_jwt_headers(gh_app_private_key_bytes, gh_app_id) 74 | api_endpoint = f"{ghe}/api/v3" if ghe else "https://api.github.com" 75 | url = f"{api_endpoint}/app/installations/{gh_app_installation_id}/access_tokens" 76 | 77 | try: 78 | response = requests.post(url, headers=jwt_headers, json=None, timeout=5) 79 | response.raise_for_status() 80 | except requests.exceptions.RequestException as e: 81 | print(f"Request failed: {e}") 82 | return None 83 | return response.json().get("token") 84 | -------------------------------------------------------------------------------- /discussions.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides functions for working with discussions in a GitHub repository. 3 | 4 | Functions: 5 | get_discussions(repo_url: str, token: str, search_query: str) -> List[Dict]: 6 | Get a list of discussions in a GitHub repository that match the search query. 7 | 8 | """ 9 | 10 | import requests 11 | 12 | 13 | def get_discussions(token: str, search_query: str, ghe: str): 14 | """Get a list of discussions in a GitHub repository that match the search query. 15 | 16 | Args: 17 | token (str): A personal access token for GitHub. 18 | search_query (str): The search query to filter discussions by. 19 | ghe (str): GitHub Enterprise URL if applicable, or None for github.com. 20 | 21 | Returns: 22 | list: A list of discussions in the repository that match the search query. 23 | """ 24 | # Construct the GraphQL query with pagination 25 | query = """ 26 | query($query: String!, $cursor: String) { 27 | search(query: $query, type: DISCUSSION, first: 100, after: $cursor) { 28 | edges { 29 | node { 30 | ... on Discussion { 31 | title 32 | url 33 | createdAt 34 | comments(first: 1) { 35 | nodes { 36 | createdAt 37 | } 38 | } 39 | answerChosenAt 40 | closedAt 41 | } 42 | } 43 | } 44 | pageInfo { 45 | hasNextPage 46 | endCursor 47 | } 48 | } 49 | } 50 | """ 51 | 52 | # Remove the type:discussions filter from the search query 53 | search_query = search_query.replace("type:discussions ", "") 54 | 55 | # Send the GraphQL request 56 | api_endpoint = f"{ghe}/api" if ghe else "https://api.github.com" 57 | headers = {"Authorization": f"Bearer {token}"} 58 | 59 | discussions = [] 60 | cursor = None 61 | 62 | while True: 63 | # Set the variables for the GraphQL query 64 | variables = {"query": search_query, "cursor": cursor} 65 | 66 | # Send the GraphQL request 67 | response = requests.post( 68 | f"{api_endpoint}/graphql", 69 | json={"query": query, "variables": variables}, 70 | headers=headers, 71 | timeout=60, 72 | ) 73 | 74 | # Check for errors in the GraphQL response 75 | if response.status_code != 200: 76 | raise ValueError( 77 | f"GraphQL query failed with status code {response.status_code}" 78 | ) 79 | 80 | response_json = response.json() 81 | if "errors" in response_json: 82 | raise ValueError(f"GraphQL query failed: {response_json['errors']}") 83 | 84 | data = response_json["data"] 85 | 86 | # Extract the discussions from the current page 87 | for edge in data["search"]["edges"]: 88 | discussions.append(edge["node"]) 89 | 90 | # Check if there are more pages 91 | page_info = data["search"]["pageInfo"] 92 | if not page_info["hasNextPage"]: 93 | break 94 | 95 | cursor = page_info["endCursor"] 96 | 97 | return discussions 98 | -------------------------------------------------------------------------------- /docs/search-query.md: -------------------------------------------------------------------------------- 1 | # Configuring the `SEARCH_QUERY` 2 | 3 | Issues or Pull Requests? Open or closed? 4 | This action can be configured to run metrics on discussions, pull requests and/or issues. It is also configurable by whether they were open or closed in the specified time window. 5 | 6 | Further query options are listed in the documentation on [searching issues and pull requests](https://docs.github.com/en/issues/tracking-your-work-with-issues/filtering-and-searching-issues-and-pull-requests) or [searching discussions](https://docs.github.com/en/search-github/searching-on-github/searching-discussions). Search results are limited to 1000 results by the GitHub API. 7 | 8 | ## Examples 9 | 10 | Issues opened in May 2023: 11 | 12 | - `repo:owner/repo is:issue created:2023-05-01..2023-05-31` 13 | 14 | Issues closed in May 2023 (may have been open in May or earlier): 15 | 16 | - `repo:owner/repo is:issue closed:2023-05-01..2023-05-31` 17 | 18 | Pull requests opened in May 2023: 19 | 20 | - `repo:owner/repo is:pr created:2023-05-01..2023-05-31` 21 | 22 | Pull requests closed in May 2023 (may have been open in May or earlier): 23 | 24 | - `repo:owner/repo is:pr closed:2023-05-01..2023-05-31` 25 | 26 | Discussions opened in May 2023: 27 | 28 | - `repo:owner/repo type:discussions created:2023-05-01..2023-05-31` 29 | 30 | Discussions opened in May 2023 with category of `engineering` and label of `question`: 31 | 32 | - `repo:owner/repo type:discussions created:2023-05-01..2023-05-31 category:engineering label:"question"` 33 | 34 | Both issues and pull requests opened in May 2023: 35 | 36 | - `repo:owner/repo created:2023-05-01..2023-05-31` 37 | 38 | Both issues and pull requests closed in May 2023 (may have been open in May or earlier): 39 | 40 | - `repo:owner/repo closed:2023-05-01..2023-05-31` 41 | 42 | ## Reporting on opened and closed issues/PRs 43 | 44 | OK, but what if I want both open or closed issues and pull requests? Due to limitations in issue search (no ability for OR logic), you will need to run the action twice, once for opened and once for closed. Here is an example workflow that does this: 45 | 46 | ```yaml 47 | name: Monthly issue metrics 48 | on: 49 | workflow_dispatch: 50 | schedule: 51 | - cron: '3 2 1 * *' 52 | 53 | permissions: 54 | contents: read 55 | 56 | jobs: 57 | build: 58 | name: issue metrics 59 | runs-on: ubuntu-latest 60 | permissions: 61 | issues: write 62 | pull-requests: read 63 | 64 | steps: 65 | 66 | - name: Run issue-metrics tool for issues and prs opened in May 2023 67 | uses: github/issue-metrics@v3 68 | env: 69 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 70 | SEARCH_QUERY: 'repo:owner/repo created:2023-05-01..2023-05-31 -reason:"not planned"' 71 | 72 | - name: Create issue for opened issues and prs 73 | uses: peter-evans/create-issue-from-file@v4 74 | with: 75 | title: Monthly issue metrics report for opened issues and prs 76 | token: ${{ secrets.GITHUB_TOKEN }} 77 | content-filepath: ./issue_metrics.md 78 | assignees: 79 | 80 | - name: Run issue-metrics tool for issues and prs closed in May 2023 81 | uses: github/issue-metrics@v3 82 | env: 83 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 84 | SEARCH_QUERY: 'repo:owner/repo closed:2023-05-01..2023-05-31 -reason:"not planned"' 85 | 86 | - name: Create issue for closed issues and prs 87 | uses: peter-evans/create-issue-from-file@v4 88 | with: 89 | title: Monthly issue metrics report for closed issues and prs 90 | content-filepath: ./issue_metrics.md 91 | assignees: 92 | ``` 93 | -------------------------------------------------------------------------------- /time_to_close.py: -------------------------------------------------------------------------------- 1 | """A module for measuring the time it takes to close a GitHub issue or discussion. 2 | 3 | This module provides functions for measuring the time it takes to close a GitHub issue 4 | or discussion, as well as calculating stats describing the time to close for a list of issues. 5 | 6 | Functions: 7 | measure_time_to_close( 8 | issue: Union[github3.issues.Issue, None], 9 | discussion: Union[dict, None] 10 | ) -> Union[timedelta, None]: 11 | Measure the time it takes to close an issue or discussion. 12 | get_stats_time_to_close( 13 | issues_with_metrics: List[IssueWithMetrics] 14 | ) -> Union[timedelta, None]: 15 | Calculate stats describing the time to close for a list of issues with metrics. 16 | 17 | """ 18 | 19 | from datetime import datetime, timedelta 20 | from typing import List, Union 21 | 22 | import github3 23 | import numpy 24 | from classes import IssueWithMetrics 25 | 26 | 27 | def measure_time_to_close( 28 | issue: Union[github3.issues.Issue, None], discussion: Union[dict, None] # type: ignore 29 | ) -> Union[timedelta, None]: 30 | """Measure the time it takes to close an issue or discussion. 31 | 32 | Args: 33 | issue (Union[github3.issues.Issue, None]): A GitHub issue. 34 | discussion (Union[dict, None]): A GitHub discussion. 35 | 36 | Returns: 37 | Union[datetime.timedelta, None]: The time it takes to close the issue. 38 | 39 | """ 40 | closed_at, created_at = None, None 41 | if issue: 42 | if issue.state != "closed": 43 | return None 44 | closed_at = datetime.fromisoformat(issue.closed_at) 45 | created_at = datetime.fromisoformat(issue.created_at) 46 | 47 | if discussion: 48 | if discussion["closedAt"] is None: 49 | return None 50 | closed_at = datetime.fromisoformat(discussion["closedAt"]) 51 | created_at = datetime.fromisoformat(discussion["createdAt"]) 52 | 53 | if closed_at and created_at: 54 | return closed_at - created_at 55 | return None 56 | 57 | 58 | def get_stats_time_to_close( 59 | issues_with_metrics: List[IssueWithMetrics], 60 | ) -> Union[dict[str, timedelta], None]: 61 | """Calculate stats describing the time to close for a list of issues. 62 | 63 | Args: 64 | issues_with_metrics (List[IssueWithMetrics]): A list of issues with metrics. 65 | Each issue should be a issue_with_metrics tuple. 66 | 67 | Returns: 68 | Union[Dict{string: float}, None]: Stats describing the time to close for the issues. 69 | 70 | """ 71 | # Filter out issues with no time to close 72 | issues_with_time_to_close = [ 73 | issue for issue in issues_with_metrics if issue.time_to_close is not None 74 | ] 75 | 76 | # Calculate the total time to close for all issues 77 | close_times = [] 78 | total_time_to_close = None 79 | if issues_with_time_to_close: 80 | total_time_to_close = 0 81 | for issue in issues_with_time_to_close: 82 | if issue.time_to_close: 83 | close_times.append(issue.time_to_close.total_seconds()) 84 | 85 | # Calculate stats describing time to close 86 | num_issues_with_time_to_close = len(issues_with_time_to_close) 87 | if num_issues_with_time_to_close > 0 and total_time_to_close is not None: 88 | average_time_to_close = numpy.round(numpy.average(close_times)) 89 | med_time_to_close = numpy.round(numpy.median(close_times)) 90 | ninety_percentile_time_to_close = numpy.round( 91 | numpy.percentile(close_times, 90, axis=0) 92 | ) 93 | else: 94 | return None 95 | 96 | stats = { 97 | "avg": timedelta(seconds=average_time_to_close), 98 | "med": timedelta(seconds=med_time_to_close), 99 | "90p": timedelta(seconds=ninety_percentile_time_to_close), 100 | } 101 | 102 | # Print the average time to close converting seconds to a readable time format 103 | print(f"Time to close: {timedelta(seconds=average_time_to_close)}") 104 | return stats 105 | -------------------------------------------------------------------------------- /pr_comments.py: -------------------------------------------------------------------------------- 1 | """A module for measuring the number of comments on pull requests. 2 | 3 | This module provides functions for counting comments on GitHub pull requests, 4 | excluding bot comments, and calculating statistics about comment counts. 5 | 6 | Functions: 7 | count_pr_comments( 8 | issue: Union[github3.issues.Issue, None], 9 | pull_request: Union[github3.pulls.PullRequest, None], 10 | ignore_users: Union[List[str], None] = None, 11 | ) -> Union[int, None]: 12 | Count the number of comments on a pull request, excluding bot comments. 13 | get_stats_pr_comments( 14 | issues_with_metrics: List[IssueWithMetrics], 15 | ) -> Union[dict[str, float], None]: 16 | Calculate stats describing the comment count for a list of pull requests. 17 | """ 18 | 19 | from typing import List, Union 20 | 21 | import github3 22 | import numpy 23 | from classes import IssueWithMetrics 24 | 25 | 26 | def count_pr_comments( 27 | issue: Union[github3.issues.Issue, None], # type: ignore 28 | pull_request: Union[github3.pulls.PullRequest, None] = None, 29 | ignore_users: Union[List[str], None] = None, 30 | ) -> Union[int, None]: 31 | """Count the number of comments on a pull request, excluding bot comments. 32 | 33 | Args: 34 | issue (Union[github3.issues.Issue, None]): A GitHub issue. 35 | pull_request (Union[github3.pulls.PullRequest, None]): A GitHub pull request. 36 | ignore_users (Union[List[str], None]): A list of GitHub usernames to ignore. 37 | 38 | Returns: 39 | Union[int, None]: The number of comments on the pull request, excluding bots. 40 | Returns None if not a pull request. 41 | """ 42 | if not pull_request or not issue: 43 | return None 44 | 45 | if ignore_users is None: 46 | ignore_users = [] 47 | 48 | comment_count = 0 49 | 50 | # Count issue comments 51 | try: 52 | comments = issue.issue.comments() # type: ignore 53 | for comment in comments: 54 | # Skip bot comments and ignored users 55 | if ( 56 | str(comment.user.type.lower()) != "bot" 57 | and comment.user.login not in ignore_users 58 | ): 59 | comment_count += 1 60 | except (AttributeError, TypeError): 61 | # If we can't get comments, just continue 62 | pass 63 | 64 | # Count pull request review comments 65 | try: 66 | review_comments = pull_request.review_comments() 67 | for comment in review_comments: 68 | # Skip bot comments and ignored users 69 | if ( 70 | str(comment.user.type.lower()) != "bot" 71 | and comment.user.login not in ignore_users 72 | ): 73 | comment_count += 1 74 | except (AttributeError, TypeError): 75 | # If we can't get review comments, just continue 76 | pass 77 | 78 | return comment_count 79 | 80 | 81 | def get_stats_pr_comments( 82 | issues_with_metrics: List[IssueWithMetrics], 83 | ) -> Union[dict[str, float], None]: 84 | """Calculate stats describing the comment count for a list of pull requests. 85 | 86 | Args: 87 | issues_with_metrics (List[IssueWithMetrics]): A list of GitHub issues with metrics attached. 88 | 89 | Returns: 90 | Union[Dict[str, float], None]: The stats describing comment counts for PRs. 91 | """ 92 | # Filter out issues that are not pull requests or have no comment count 93 | prs_with_comment_counts = [ 94 | issue.pr_comment_count 95 | for issue in issues_with_metrics 96 | if issue.pr_comment_count is not None 97 | ] 98 | 99 | if not prs_with_comment_counts: 100 | return None 101 | 102 | # Calculate statistics 103 | average_comment_count = numpy.round(numpy.average(prs_with_comment_counts), 1) 104 | median_comment_count = numpy.round(numpy.median(prs_with_comment_counts), 1) 105 | ninety_percentile_comment_count = numpy.round( 106 | numpy.percentile(prs_with_comment_counts, 90), 1 107 | ) 108 | 109 | stats = { 110 | "avg": average_comment_count, 111 | "med": median_comment_count, 112 | "90p": ninety_percentile_comment_count, 113 | } 114 | 115 | return stats 116 | -------------------------------------------------------------------------------- /test_time_to_answer.py: -------------------------------------------------------------------------------- 1 | """This module provides unit tests for the time_to_answer module.""" 2 | 3 | import unittest 4 | from datetime import timedelta 5 | 6 | from classes import IssueWithMetrics 7 | from time_to_answer import get_stats_time_to_answer, measure_time_to_answer 8 | 9 | 10 | class TestGetAverageTimeToAnswer(unittest.TestCase): 11 | """A test case for the get_stats_time_to_answer function. 12 | 13 | This test case includes three test methods: 14 | - test_returns_none_for_empty_list 15 | - test_returns_none_for_list_with_no_time_to_answer 16 | - test_returns_stats_time_to_answer 17 | """ 18 | 19 | def test_returns_none_for_empty_list(self): 20 | """Tests that the function returns None when given an empty list of issues.""" 21 | # Arrange 22 | issues_with_metrics = [] 23 | 24 | # Act 25 | result = get_stats_time_to_answer(issues_with_metrics) 26 | 27 | # Assert 28 | self.assertIsNone(result) 29 | 30 | def test_returns_none_for_list_with_no_time_to_answer(self): 31 | """ 32 | Tests that the function returns None when given a list of 33 | issues with no time to answer. 34 | """ 35 | # Arrange 36 | issues_with_metrics = [ 37 | IssueWithMetrics("issue1", None, None), 38 | IssueWithMetrics("issue2", None, None), 39 | ] 40 | 41 | # Act 42 | result = get_stats_time_to_answer(issues_with_metrics) 43 | 44 | # Assert 45 | self.assertIsNone(result) 46 | 47 | def test_returns_stats_time_to_answer(self): 48 | """ 49 | Tests that the function correctly calculates the average 50 | time to answer for a list of issues with time to answer. 51 | """ 52 | 53 | # Arrange 54 | issues_with_metrics = [ 55 | IssueWithMetrics( 56 | "issue1", "url1", "alice", None, None, timedelta(seconds=10) 57 | ), 58 | IssueWithMetrics( 59 | "issue2", "url2", "bob", None, None, timedelta(seconds=20) 60 | ), 61 | IssueWithMetrics( 62 | "issue3", "url3", "carol", None, None, timedelta(seconds=30) 63 | ), 64 | ] 65 | 66 | # Act 67 | result = get_stats_time_to_answer(issues_with_metrics)["avg"] 68 | 69 | # Assert 70 | self.assertEqual(result, timedelta(seconds=20)) 71 | 72 | 73 | class TestMeasureTimeToAnswer(unittest.TestCase): 74 | """A test case for the measure_time_to_answer function. 75 | 76 | This test case includes three test methods: 77 | - test_returns_none_if_answer_chosen_at_is_missing 78 | - test_returns_none_if_created_at_is_missing 79 | - test_returns_time_to_answer 80 | """ 81 | 82 | def test_returns_none_if_answer_chosen_at_is_missing(self): 83 | """ 84 | Tests that the function returns None when the answerChosenAt 85 | field is missing from the discussion object. 86 | """ 87 | # Arrange 88 | discussion = {"createdAt": "2022-01-01T00:00:00Z", "answerChosenAt": None} 89 | 90 | # Act 91 | result = measure_time_to_answer(discussion) 92 | 93 | # Assert 94 | self.assertIsNone(result) 95 | 96 | def test_returns_none_if_created_at_is_missing(self): 97 | """ 98 | Tests that the function returns None when the createdAt 99 | field is missing from the discussion object. 100 | """ 101 | # Arrange 102 | discussion = {"createdAt": None, "answerChosenAt": "2022-01-01T00:00:00Z"} 103 | 104 | # Act 105 | result = measure_time_to_answer(discussion) 106 | 107 | # Assert 108 | self.assertIsNone(result) 109 | 110 | def test_returns_time_to_answer(self): 111 | """ 112 | Tests that the function correctly calculates the time to answer for 113 | a discussion object with both createdAt and answerChosenAt fields. 114 | """ 115 | # Arrange 116 | discussion = { 117 | "createdAt": "2022-01-01T00:00:00Z", 118 | "answerChosenAt": "2022-01-01T00:01:00Z", 119 | } 120 | 121 | # Act 122 | result = measure_time_to_answer(discussion) 123 | 124 | # Assert 125 | self.assertEqual(result, timedelta(minutes=1)) 126 | -------------------------------------------------------------------------------- /docs/example-workflows.md: -------------------------------------------------------------------------------- 1 | # Example workflows 2 | 3 | ## Calculated Time Example 4 | 5 | This workflow searches for the issues created last month, and generates an issue with metrics. 6 | 7 | ```yaml 8 | name: Monthly issue metrics 9 | on: 10 | workflow_dispatch: 11 | schedule: 12 | - cron: "3 2 1 * *" 13 | 14 | permissions: 15 | contents: read 16 | 17 | jobs: 18 | build: 19 | name: issue metrics 20 | runs-on: ubuntu-latest 21 | permissions: 22 | issues: write 23 | pull-requests: read 24 | 25 | steps: 26 | - name: Get dates for last month 27 | shell: bash 28 | run: | 29 | # Calculate the first day of the previous month 30 | first_day=$(date -d "last month" +%Y-%m-01) 31 | 32 | # Calculate the last day of the previous month 33 | last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d) 34 | 35 | #Set an environment variable with the date range 36 | echo "$first_day..$last_day" 37 | echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV" 38 | 39 | - name: Run issue-metrics tool 40 | uses: github/issue-metrics@v3 41 | env: 42 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 43 | SEARCH_QUERY: 'repo:owner/repo is:issue created:${{ env.last_month }} -reason:"not planned"' 44 | 45 | - name: Create issue 46 | uses: peter-evans/create-issue-from-file@v4 47 | with: 48 | title: Monthly issue metrics report 49 | token: ${{ secrets.GITHUB_TOKEN }} 50 | content-filepath: ./issue_metrics.md 51 | assignees: 52 | ``` 53 | 54 | ## Fixed Time Example 55 | 56 | This workflow searches for the issues created between 2023-05-01..2023-05-31, and generates an issue with metrics. 57 | 58 | ```yaml 59 | name: Monthly issue metrics 60 | on: 61 | workflow_dispatch: 62 | 63 | permissions: 64 | contents: read 65 | 66 | jobs: 67 | build: 68 | name: issue metrics 69 | runs-on: ubuntu-latest 70 | permissions: 71 | issues: write 72 | pull-requests: read 73 | 74 | steps: 75 | - name: Run issue-metrics tool 76 | uses: github/issue-metrics@v3 77 | env: 78 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 79 | SEARCH_QUERY: 'repo:owner/repo is:issue created:2023-05-01..2023-05-31 -reason:"not planned"' 80 | 81 | - name: Create issue 82 | uses: peter-evans/create-issue-from-file@v4 83 | with: 84 | title: Monthly issue metrics report 85 | token: ${{ secrets.GITHUB_TOKEN }} 86 | content-filepath: ./issue_metrics.md 87 | assignees: 88 | ``` 89 | 90 | ## Multiple Repositories Example 91 | 92 | This workflow searches for the issues created last month, and generates an issue with metrics. It also searches for issues in a second repository and includes those metrics in the same issue. 93 | 94 | ```yaml 95 | name: Monthly issue metrics (Multi Repo) 96 | on: 97 | workflow_dispatch: 98 | 99 | permissions: 100 | contents: read 101 | 102 | jobs: 103 | build: 104 | name: issue metrics 105 | runs-on: ubuntu-latest 106 | permissions: 107 | issues: write 108 | pull-requests: read 109 | 110 | steps: 111 | - name: Get dates for last month 112 | shell: bash 113 | run: | 114 | # Calculate the first day of the previous month 115 | first_day=$(date -d "last month" +%Y-%m-01) 116 | 117 | # Calculate the last day of the previous month 118 | last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d) 119 | 120 | #Set an environment variable with the date range 121 | echo "$first_day..$last_day" 122 | echo "last_month=$first_day..$last_day" >> "$GITHUB_ENV" 123 | 124 | - name: Get issue metrics 125 | uses: github/issue-metrics@v3 126 | env: 127 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 128 | SEARCH_QUERY: 'repo:owner/repo1 repo:owner/repo2 is:issue created:${{ env.last_month }} -reason:"not planned"' 129 | 130 | - name: Create issue 131 | uses: peter-evans/create-issue-from-file@v4 132 | with: 133 | title: Monthly issue metrics report (dev) 134 | token: ${{ secrets.GITHUB_TOKEN }} 135 | content-filepath: ./issue_metrics.md 136 | assignees: 137 | ``` 138 | -------------------------------------------------------------------------------- /test_time_to_close.py: -------------------------------------------------------------------------------- 1 | """A module containing unit tests for the time_to_close module. 2 | 3 | This module contains unit tests for the measure_time_to_close and 4 | get_stats_time_to_close functions in the time_to_close module. 5 | The tests use mock GitHub issues to test the functions' behavior. 6 | 7 | Classes: 8 | TestMeasureTimeToClose: A class to test the measure_time_to_close function. 9 | TestGetStatsTimeToClose: A class to test the get_stats_time_to_close function. 10 | 11 | """ 12 | 13 | import unittest 14 | from datetime import timedelta 15 | from unittest.mock import MagicMock 16 | 17 | from classes import IssueWithMetrics 18 | from time_to_close import get_stats_time_to_close, measure_time_to_close 19 | 20 | 21 | class TestGetAverageTimeToClose(unittest.TestCase): 22 | """Test suite for the get_stats_time_to_close function.""" 23 | 24 | def test_get_stats_time_to_close(self): 25 | """Test that the function correctly calculates the average time to close.""" 26 | # Create mock data 27 | issues_with_metrics = [ 28 | IssueWithMetrics( 29 | "Issue 1", 30 | "https://github.com/user/repo/issues/1", 31 | "alice", 32 | None, 33 | timedelta(days=2), 34 | ), 35 | IssueWithMetrics( 36 | "Issue 2", 37 | "https://github.com/user/repo/issues/2", 38 | "bob", 39 | None, 40 | timedelta(days=4), 41 | ), 42 | IssueWithMetrics( 43 | "Issue 3", "https://github.com/user/repo/issues/3", "carol", None, None 44 | ), 45 | ] 46 | 47 | # Call the function and check the result 48 | result = get_stats_time_to_close(issues_with_metrics)["avg"] 49 | expected_result = timedelta(days=3) 50 | self.assertEqual(result, expected_result) 51 | 52 | def test_get_stats_time_to_close_no_issues(self): 53 | """Test that the function returns None if there are no issues with time to close.""" 54 | # Create mock data 55 | issues_with_metrics = [ 56 | IssueWithMetrics( 57 | "Issue 1", "https://github.com/user/repo/issues/1", "alice", None, None 58 | ), 59 | IssueWithMetrics( 60 | "Issue 2", "https://github.com/user/repo/issues/2", "bob", None, None 61 | ), 62 | IssueWithMetrics( 63 | "Issue 3", "https://github.com/user/repo/issues/3", "carol", None, None 64 | ), 65 | ] 66 | 67 | # Call the function and check the result 68 | result = get_stats_time_to_close(issues_with_metrics) 69 | expected_result = None 70 | self.assertEqual(result, expected_result) 71 | 72 | 73 | class TestMeasureTimeToClose(unittest.TestCase): 74 | """Test suite for the measure_time_to_close function.""" 75 | 76 | def test_measure_time_to_close(self): 77 | """Test that the function correctly measures the time to close an issue.""" 78 | # Create a mock issue object 79 | issue = MagicMock() 80 | issue.state = "closed" 81 | issue.created_at = "2021-01-01T00:00:00Z" 82 | issue.closed_at = "2021-01-03T00:00:00Z" 83 | 84 | # Call the function and check the result 85 | result = measure_time_to_close(issue, None) 86 | expected_result = timedelta(days=2) 87 | self.assertEqual(result, expected_result) 88 | 89 | def test_measure_time_to_close_returns_none(self): 90 | """Test that the function returns None if the issue is not closed.""" 91 | # Create a mock issue object 92 | issue = MagicMock() 93 | issue.state = "open" 94 | 95 | # Call the function and check that it returns None 96 | self.assertEqual(None, measure_time_to_close(issue, None)) 97 | 98 | def test_measure_time_to_close_discussion(self): 99 | """ 100 | Test that the function correctly measures the time to close for a discussion. 101 | """ 102 | # Create an issue dictionary with createdAt and closedAt fields 103 | issue = {} 104 | issue["createdAt"] = "2021-01-01T00:00:00Z" 105 | issue["closedAt"] = "2021-01-03T00:00:00Z" 106 | 107 | # Call the function and check the result 108 | result = measure_time_to_close(None, issue) 109 | expected_result = timedelta(days=2) 110 | self.assertEqual(result, expected_result) 111 | -------------------------------------------------------------------------------- /test_most_active_mentors.py: -------------------------------------------------------------------------------- 1 | """A module containing unit tests for the most_active_mentors module. 2 | 3 | This module contains unit tests for the count_comments_per_user and 4 | get_mentor_count functions in the most_active_mentors module. 5 | The tests use mock GitHub issues and comments to test the functions' behavior. 6 | 7 | Classes: 8 | TestCountCommentsPerUser: A class testing count_comments_per_user. 9 | TestGetMentorCount: A class to test the 10 | get_mentor_count function. 11 | 12 | """ 13 | 14 | import unittest 15 | from datetime import datetime 16 | from unittest.mock import MagicMock 17 | 18 | from classes import IssueWithMetrics 19 | from most_active_mentors import count_comments_per_user, get_mentor_count 20 | 21 | 22 | class TestCountCommentsPerUser(unittest.TestCase): 23 | """Test the count_comments_per_user function.""" 24 | 25 | def test_count_comments_per_user_limit(self): 26 | """Test that count_comments_per_user correctly counts user comments. 27 | 28 | This test mocks the GitHub connection and issue comments, and checks 29 | that count_comments_per_user correctly considers user comments for 30 | counting. 31 | 32 | """ 33 | # Set up the mock GitHub issues 34 | mock_issue1 = MagicMock() 35 | mock_issue1.comments = 2 36 | mock_issue1.issue.user.login = "issue_owner" 37 | mock_issue1.created_at = "2023-01-01T00:00:00Z" 38 | 39 | # Set up 21 mock GitHub issue comments - only 20 should be counted 40 | mock_issue1.issue.comments.return_value = [] 41 | for i in range(22): 42 | mock_comment1 = MagicMock() 43 | mock_comment1.user.login = "very_active_user" 44 | mock_comment1.created_at = datetime.fromisoformat( 45 | f"2023-01-02T{i:02d}:00:00Z" 46 | ) 47 | # pylint: disable=maybe-no-member 48 | mock_issue1.issue.comments.return_value.append(mock_comment1) 49 | 50 | # Call the function 51 | result = count_comments_per_user(mock_issue1) 52 | expected_result = {"very_active_user": 3} 53 | 54 | # Check the results 55 | self.assertEqual(result, expected_result) 56 | 57 | def test_count_comments_per_user_with_ignores(self): 58 | """Test that count_comments_per_user correctly counts user comments with some users ignored.""" 59 | # Set up the mock GitHub issues 60 | mock_issue1 = MagicMock() 61 | mock_issue1.comments = 2 62 | mock_issue1.issue.user.login = "issue_owner" 63 | mock_issue1.created_at = "2023-01-01T00:00:00Z" 64 | 65 | # Set up mock GitHub issue comments by several users 66 | mock_issue1.issue.comments.return_value = [] 67 | for i in range(5): 68 | mock_comment1 = MagicMock() 69 | mock_comment1.user.login = "very_active_user" 70 | mock_comment1.created_at = datetime.fromisoformat( 71 | f"2023-01-02T{i:02d}:00:00Z" 72 | ) 73 | # pylint: disable=maybe-no-member 74 | mock_issue1.issue.comments.return_value.append(mock_comment1) 75 | for i in range(5): 76 | mock_comment1 = MagicMock() 77 | mock_comment1.user.login = "very_active_user_ignored" 78 | mock_comment1.created_at = datetime.fromisoformat( 79 | f"2023-01-02T{i:02d}:00:00Z" 80 | ) 81 | # pylint: disable=maybe-no-member 82 | mock_issue1.issue.comments.return_value.append(mock_comment1) 83 | 84 | # Call the function 85 | result = count_comments_per_user( 86 | mock_issue1, ignore_users=["very_active_user_ignored"] 87 | ) 88 | # Only the comments by "very_active_user" should be counted, 89 | # so the count should be 3 since that is the threshold for heavily involved 90 | expected_result = {"very_active_user": 3} 91 | 92 | # Check the results 93 | self.assertEqual(result, expected_result) 94 | self.assertNotIn("very_active_user_ignored", result) 95 | 96 | def test_get_mentor_count(self): 97 | """Test that get_mentor_count correctly counts comments per user.""" 98 | mentor_activity = {"sue": 15, "bob": 10} 99 | 100 | # Create mock data 101 | issues_with_metrics = [ 102 | IssueWithMetrics( 103 | "Issue 1", 104 | "https://github.com/user/repo/issues/1", 105 | "alice", 106 | None, 107 | mentor_activity=mentor_activity, 108 | ), 109 | IssueWithMetrics( 110 | "Issue 2", 111 | "https://github.com/user/repo/issues/2", 112 | "bob", 113 | None, 114 | mentor_activity=mentor_activity, 115 | ), 116 | ] 117 | 118 | # Call the function and check the result 119 | result = get_mentor_count(issues_with_metrics, 2) 120 | expected_result = 2 121 | self.assertEqual(result, expected_result) 122 | -------------------------------------------------------------------------------- /test_auth.py: -------------------------------------------------------------------------------- 1 | """A module containing unit tests for the auth module. 2 | 3 | This module contains unit tests for the functions in the auth module 4 | that authenticate to github. 5 | 6 | Classes: 7 | TestAuthToGithub: A class to test the auth_to_github function. 8 | 9 | """ 10 | 11 | import unittest 12 | from unittest.mock import MagicMock, patch 13 | 14 | import github3 15 | import requests 16 | from auth import auth_to_github, get_github_app_installation_token 17 | 18 | 19 | class TestAuthToGithub(unittest.TestCase): 20 | """Test the auth_to_github function.""" 21 | 22 | @patch("github3.github.GitHub.login_as_app_installation") 23 | def test_auth_to_github_with_github_app(self, mock_login): 24 | """ 25 | Test the auth_to_github function when GitHub app 26 | parameters provided. 27 | """ 28 | mock_login.return_value = MagicMock() 29 | result = auth_to_github("", 12345, 678910, b"hello", "", False) 30 | 31 | self.assertIsInstance(result, github3.github.GitHub, False) 32 | 33 | def test_auth_to_github_with_token(self): 34 | """ 35 | Test the auth_to_github function when the token is provided. 36 | """ 37 | result = auth_to_github("token", None, None, b"", "", False) 38 | 39 | self.assertIsInstance(result, github3.github.GitHub, False) 40 | 41 | def test_auth_to_github_without_authentication_information(self): 42 | """ 43 | Test the auth_to_github function when authentication information is not provided. 44 | Expect a ValueError to be raised. 45 | """ 46 | with self.assertRaises(ValueError): 47 | auth_to_github("", None, None, b"", "", False) 48 | 49 | def test_auth_to_github_with_ghe(self): 50 | """ 51 | Test the auth_to_github function when the GitHub Enterprise URL is provided. 52 | """ 53 | result = auth_to_github( 54 | "token", None, None, b"", "https://github.example.com", False 55 | ) 56 | 57 | self.assertIsInstance(result, github3.github.GitHubEnterprise, False) 58 | 59 | @patch("github3.github.GitHubEnterprise") 60 | def test_auth_to_github_with_ghe_and_ghe_app(self, mock_ghe): 61 | """ 62 | Test the auth_to_github function when the GitHub Enterprise URL \ 63 | is provided and the app was created in GitHub Enterprise URL. 64 | """ 65 | mock = mock_ghe.return_value 66 | mock.login_as_app_installation = MagicMock(return_value=True) 67 | result = auth_to_github( 68 | "", "123", "123", b"123", "https://github.example.com", True 69 | ) 70 | mock.login_as_app_installation.assert_called_once() 71 | self.assertEqual(result, mock) 72 | 73 | @patch("github3.apps.create_jwt_headers", MagicMock(return_value="gh_token")) 74 | @patch("requests.post") 75 | def test_get_github_app_installation_token(self, mock_post): 76 | """ 77 | Test the get_github_app_installation_token function. 78 | """ 79 | dummy_token = "dummytoken" 80 | mock_response = MagicMock() 81 | mock_response.raise_for_status.return_value = None 82 | mock_response.json.return_value = {"token": dummy_token} 83 | mock_post.return_value = mock_response 84 | mock_ghe = "" 85 | 86 | result = get_github_app_installation_token( 87 | mock_ghe, b"gh_private_token", "gh_app_id", "gh_installation_id" 88 | ) 89 | 90 | self.assertEqual(result, dummy_token) 91 | 92 | @patch("github3.apps.create_jwt_headers", MagicMock(return_value="gh_token")) 93 | @patch("auth.requests.post") 94 | def test_get_github_app_installation_token_request_failure(self, mock_post): 95 | """ 96 | Test the get_github_app_installation_token function returns None when the request fails. 97 | """ 98 | # Mock the post request to raise a RequestException 99 | mock_post.side_effect = requests.exceptions.RequestException("Request failed") 100 | 101 | # Call the function with test data 102 | result = get_github_app_installation_token( 103 | ghe="https://api.github.com", 104 | gh_app_id=12345, 105 | gh_app_private_key_bytes=b"private_key", 106 | gh_app_installation_id=678910, 107 | ) 108 | 109 | # Assert that the result is None 110 | self.assertIsNone(result) 111 | 112 | @patch("github3.login") 113 | def test_auth_to_github_invalid_credentials(self, mock_login): 114 | """ 115 | Test the auth_to_github function raises correct ValueError 116 | when credentials are present but incorrect. 117 | """ 118 | mock_login.return_value = None 119 | with self.assertRaises(ValueError) as context_manager: 120 | auth_to_github("not_a_valid_token", "", "", b"", "", False) 121 | 122 | the_exception = context_manager.exception 123 | self.assertEqual( 124 | str(the_exception), 125 | "Unable to authenticate to GitHub", 126 | ) 127 | -------------------------------------------------------------------------------- /test_column_order_fix.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Test to verify that the Status and Created At columns have their content aligned with headers. 5 | 6 | This test specifically validates the fix for issue #568 where the Status and Created At 7 | columns had their data swapped. 8 | """ 9 | 10 | import os 11 | import unittest 12 | from datetime import timedelta 13 | from unittest.mock import patch 14 | 15 | from classes import IssueWithMetrics 16 | from markdown_writer import get_non_hidden_columns, write_to_markdown 17 | 18 | 19 | @patch.dict( 20 | os.environ, 21 | { 22 | "SEARCH_QUERY": "is:open repo:user/repo", 23 | "GH_TOKEN": "test_token", 24 | "HIDE_CREATED_AT": "False", 25 | "HIDE_STATUS": "False", 26 | }, 27 | ) 28 | class TestColumnOrderFix(unittest.TestCase): 29 | """Test that Status and Created At columns have correct data.""" 30 | 31 | def test_status_and_created_at_columns_alignment(self): 32 | """Test that Status and Created At columns show correct data values. 33 | 34 | This test specifically validates that: 35 | 1. The Status column contains actual status values (not dates) 36 | 2. The Created At column contains actual date values (not status) 37 | """ 38 | # Create test data with clearly distinguishable Status and Created At values 39 | issues_with_metrics = [ 40 | IssueWithMetrics( 41 | title="Test Issue", 42 | html_url="https://github.com/user/repo/issues/1", 43 | author="testuser", 44 | assignee="assignee1", 45 | assignees=["assignee1"], 46 | created_at="2023-01-01T00:00:00Z", # This should appear in Created At column 47 | status="open", # This should appear in Status column 48 | time_to_first_response=timedelta(days=1), 49 | time_to_close=timedelta(days=2), 50 | time_to_answer=timedelta(days=3), 51 | ) 52 | ] 53 | 54 | # Call the function 55 | write_to_markdown( 56 | issues_with_metrics=issues_with_metrics, 57 | average_time_to_first_response=None, 58 | average_time_to_close=None, 59 | average_time_to_answer=None, 60 | average_time_in_draft=None, 61 | average_time_in_labels=None, 62 | stats_pr_comments=None, 63 | num_issues_opened=1, 64 | num_issues_closed=0, 65 | num_mentor_count=0, 66 | labels=None, 67 | search_query="is:issue is:open repo:user/repo", 68 | hide_label_metrics=True, 69 | hide_items_closed_count=False, 70 | enable_mentor_count=False, 71 | non_mentioning_links=False, 72 | report_title="Test Report", 73 | output_file="test_column_order.md", 74 | ) 75 | 76 | # Read the generated markdown 77 | with open("test_column_order.md", "r", encoding="utf-8") as file: 78 | content = file.read() 79 | 80 | # The table should have the columns in the correct order 81 | # and the data should be properly aligned 82 | expected_header = ( 83 | "| Title | URL | Assignee | Author | Time to first response | " 84 | "Time to close | Time to answer | Created At | Status |" 85 | ) 86 | self.assertIn(expected_header, content) 87 | 88 | # Verify the data row has correct values in correct positions 89 | # The Created At column should contain the date value 90 | # The Status column should contain the status value 91 | expected_row = ( 92 | "| Test Issue | https://github.com/user/repo/issues/1 | " 93 | "[assignee1](https://github.com/assignee1) | " 94 | "[testuser](https://github.com/testuser) | 1 day, 0:00:00 | " 95 | "2 days, 0:00:00 | 3 days, 0:00:00 | 2023-01-01T00:00:00Z | open |" 96 | ) 97 | self.assertIn(expected_row, content) 98 | 99 | # Clean up 100 | os.remove("test_column_order.md") 101 | 102 | def test_get_non_hidden_columns_order(self): 103 | """Test that get_non_hidden_columns returns columns in the correct order.""" 104 | columns = get_non_hidden_columns(labels=None) 105 | 106 | # Find the indices of the Status and Created At columns 107 | try: 108 | created_at_index = columns.index("Created At") 109 | status_index = columns.index("Status") 110 | 111 | # Status should come after Created At 112 | self.assertGreater( 113 | status_index, 114 | created_at_index, 115 | "Status column should come after Created At column", 116 | ) 117 | except ValueError: 118 | # If one of the columns is hidden, that's fine, but we shouldn't get here 119 | # given our environment variables 120 | self.fail("Both Status and Created At columns should be present") 121 | 122 | 123 | if __name__ == "__main__": 124 | unittest.main() 125 | -------------------------------------------------------------------------------- /docs/measure-time.md: -------------------------------------------------------------------------------- 1 | # Measuring time spent in labels 2 | 3 | **Note**: The discussions API currently doesn't support the `LabeledEvent` so this action cannot measure the time spent in a label for discussions. 4 | 5 | Sometimes it is helpful to know how long an issue or pull request spent in a particular label. This action can be configured to measure the time spent in a label. This is different from only wanting to measure issues with a specific label. If that is what you want, see the section on [configuring your search query](https://github.com/github/issue-metrics/blob/main/README.md#search_query-issues-or-pull-requests-open-or-closed). 6 | 7 | Here is an example workflow that does this: 8 | 9 | ```yaml 10 | name: Monthly issue metrics 11 | on: 12 | workflow_dispatch: 13 | 14 | permissions: 15 | content: read 16 | 17 | jobs: 18 | build: 19 | name: issue metrics 20 | runs-on: ubuntu-latest 21 | permissions: 22 | issues: write 23 | pull-requests: read 24 | 25 | steps: 26 | - name: Run issue-metrics tool 27 | uses: github/issue-metrics@v3 28 | env: 29 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | LABELS_TO_MEASURE: "waiting-for-manager-approval,waiting-for-security-review" 31 | SEARCH_QUERY: 'repo:owner/repo is:issue created:2023-05-01..2023-05-31 -reason:"not planned"' 32 | 33 | - name: Create issue 34 | uses: peter-evans/create-issue-from-file@v4 35 | with: 36 | title: Monthly issue metrics report 37 | token: ${{ secrets.GITHUB_TOKEN }} 38 | content-filepath: ./issue_metrics.md 39 | assignees: 40 | ``` 41 | 42 | then the report will look like this: 43 | 44 | ```markdown 45 | # Issue Metrics 46 | 47 | | Metric | Value | 48 | | -------------------------------------------------- | --------------: | 49 | | Average time to first response | 0:50:44.666667 | 50 | | Average time to close | 6 days, 7:08:52 | 51 | | Average time to answer | 1 day | 52 | | Average time spent in waiting-for-manager-approval | 0:00:41 | 53 | | Average time spent in waiting-for-security-review | 2 days, 4:25:03 | 54 | | Number of items that remain open | 2 | 55 | | Number of items closed | 1 | 56 | | Total number of items created | 3 | 57 | 58 | | Title | URL | Time to first response | Time to close | Time to answer | Time spent in waiting-for-manager-approval | Time spent in waiting-for-security-review | 59 | | -------------------- | ------------------------------------- | ---------------------- | ------------- | -------------- | ------------------------------------------ | ----------------------------------------- | 60 | | Pull Request Title 1 | https://github.com/user/repo/pulls/1 | 0:05:26 | None | None | None | None | 61 | | Issue Title 2 | https://github.com/user/repo/issues/2 | 2:26:07 | None | None | 0:00:41 | 2 days, 4:25:03 | 62 | ``` 63 | 64 | ## Example issue_metrics.md output 65 | 66 | Here is the output with no hidden columns: 67 | 68 | ```markdown 69 | # Issue Metrics 70 | 71 | | Metric | Value | 72 | | -------------------------------- | --------------: | 73 | | Average time to first response | 0:50:44.666667 | 74 | | Average time to close | 6 days, 7:08:52 | 75 | | Average time to answer | 1 day | 76 | | Number of items that remain open | 2 | 77 | | Number of items closed | 1 | 78 | | Total number of items created | 3 | 79 | 80 | | Title | URL | Time to first response | Time to close | Time to answer | 81 | | -------------------- | ------------------------------------------ | ---------------------- | --------------- | -------------- | 82 | | Discussion Title 1 | https://github.com/user/repo/discussions/1 | 0:00:41 | 6 days, 7:08:52 | 1 day | 83 | | Pull Request Title 2 | https://github.com/user/repo/pulls/2 | 0:05:26 | None | None | 84 | | Issue Title 3 | https://github.com/user/repo/issues/3 | 2:26:07 | None | None | 85 | ``` 86 | 87 | Here is the output with all hidable columns hidden: 88 | 89 | ```markdown 90 | # Issue Metrics 91 | 92 | | Metric | Value | 93 | | -------------------------------- | ----: | 94 | | Number of items that remain open | 2 | 95 | | Number of items closed | 1 | 96 | | Total number of items created | 3 | 97 | 98 | | Title | URL | 99 | | -------------------- | ------------------------------------------ | 100 | | Discussion Title 1 | https://github.com/user/repo/discussions/1 | 101 | | Pull Request Title 2 | https://github.com/user/repo/pulls/2 | 102 | | Issue Title 3 | https://github.com/user/repo/issues/3 | 103 | ``` 104 | -------------------------------------------------------------------------------- /time_in_draft.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains a function that measures the time a pull request has been in draft state. 3 | """ 4 | 5 | from datetime import datetime, timedelta 6 | from typing import List, Union 7 | 8 | import github3 9 | import numpy 10 | import pytz 11 | from classes import IssueWithMetrics 12 | 13 | 14 | def measure_time_in_draft( 15 | issue: github3.issues.Issue, 16 | pull_request: Union[github3.pulls.PullRequest, None] = None, 17 | ) -> Union[timedelta, None]: 18 | """If a pull request has had time in the draft state, return the cumulative amount of time it was in draft. 19 | 20 | args: 21 | issue (github3.issues.Issue): A GitHub issue which has been pre-qualified as a pull request. 22 | pull_request (github3.pulls.PullRequest, optional): The pull request object. 23 | 24 | returns: 25 | Union[timedelta, None]: Total time the pull request has spent in draft state. 26 | """ 27 | events = list(issue.issue.events()) 28 | draft_start = None 29 | total_draft_time = timedelta(0) 30 | 31 | # Check if PR was initially created as draft 32 | pr_created_at = None 33 | 34 | try: 35 | if pull_request is None: 36 | pull_request = issue.issue.pull_request() 37 | 38 | pr_created_at = issue.issue.created_at 39 | 40 | # Look for ready_for_review events to determine if PR was initially draft 41 | ready_for_review_events = [] 42 | convert_to_draft_events = [] 43 | for event in events: 44 | if event.event == "ready_for_review": 45 | ready_for_review_events.append(event) 46 | elif event.event == "convert_to_draft": 47 | convert_to_draft_events.append(event) 48 | 49 | # If there are ready_for_review events, check if PR was initially draft 50 | if ready_for_review_events: 51 | first_ready_event = min(ready_for_review_events, key=lambda x: x.created_at) 52 | prior_draft_events = [ 53 | e 54 | for e in convert_to_draft_events 55 | if e.created_at < first_ready_event.created_at 56 | ] 57 | 58 | if not prior_draft_events: 59 | # PR was initially created as draft, calculate time from creation to first ready_for_review 60 | total_draft_time += first_ready_event.created_at - pr_created_at 61 | 62 | # If there are no ready_for_review events but the PR is currently draft, it might be initially draft and still open 63 | elif not ready_for_review_events and not convert_to_draft_events: 64 | # Check if PR is currently draft and open 65 | if ( 66 | hasattr(pull_request, "draft") 67 | and pull_request.draft 68 | and issue.issue.state == "open" 69 | ): 70 | # PR was initially created as draft and is still draft 71 | draft_start = pr_created_at 72 | 73 | except (AttributeError, ValueError, TypeError): 74 | # If we can't get PR info, fall back to original logic 75 | pass 76 | 77 | for event in events: 78 | if event.event == "convert_to_draft": 79 | draft_start = event.created_at 80 | elif event.event == "ready_for_review" and draft_start: 81 | # Calculate draft time for this interval 82 | total_draft_time += event.created_at - draft_start 83 | draft_start = None 84 | 85 | # If the PR is currently in draft state, calculate the time in draft up to now 86 | if draft_start and issue.issue.state == "open": 87 | total_draft_time += datetime.now(pytz.utc) - draft_start 88 | 89 | # Round to the nearest second 90 | return ( 91 | timedelta(seconds=round(total_draft_time.total_seconds())) 92 | if total_draft_time > timedelta(0) 93 | else None 94 | ) 95 | 96 | 97 | def get_stats_time_in_draft( 98 | issues_with_metrics: List[IssueWithMetrics], 99 | ) -> Union[dict[str, timedelta], None]: 100 | """ 101 | Calculate stats describing the time in draft for a list of issues. 102 | """ 103 | # Filter out issues with no time in draft 104 | issues_with_time_to_draft = [ 105 | issue for issue in issues_with_metrics if issue.time_in_draft is not None 106 | ] 107 | 108 | # Calculate the total time in draft for all issues 109 | draft_times = [] 110 | if issues_with_time_to_draft: 111 | for issue in issues_with_time_to_draft: 112 | if issue.time_in_draft: 113 | draft_times.append(issue.time_in_draft.total_seconds()) 114 | 115 | # Calculate stats describing time in draft 116 | num_issues_with_time_in_draft = len(issues_with_time_to_draft) 117 | if num_issues_with_time_in_draft > 0: 118 | average_time_in_draft = numpy.round(numpy.average(draft_times)) 119 | med_time_in_draft = numpy.round(numpy.median(draft_times)) 120 | ninety_percentile_time_in_draft = numpy.round( 121 | numpy.percentile(draft_times, 90, axis=0) 122 | ) 123 | else: 124 | return None 125 | 126 | stats = { 127 | "avg": timedelta(seconds=average_time_in_draft), 128 | "med": timedelta(seconds=med_time_in_draft), 129 | "90p": timedelta(seconds=ninety_percentile_time_in_draft), 130 | } 131 | 132 | # Print the average time in draft converting seconds to a readable time format 133 | print(f"Average time in draft: {timedelta(seconds=average_time_in_draft)}") 134 | return stats 135 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # Contributing to issue-metrics 5 | 6 | First off, thanks for taking the time to contribute! :heart: 7 | 8 | All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us project owners and smooth out the experience for all involved. The team looks forward to your contributions. :tada: 9 | 10 | 11 | 12 | ## Table of Contents 13 | 14 | - [I Have a Question](#i-have-a-question) 15 | - [I Want To Contribute](#i-want-to-contribute) 16 | - [Reporting Bugs](#reporting-bugs) 17 | - [Suggesting Enhancements](#suggesting-enhancements) 18 | - [Releases](#releases) 19 | 20 | ## I Have a Question 21 | 22 | Before you ask a question, it is best to search for existing [Issues](https://github.com/github/issue-metrics/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. 23 | 24 | If you then still feel the need to ask a question and need clarification, we recommend the following: 25 | 26 | - Open an [Issue](https://github.com/github/issue-metrics/issues/new). 27 | - Provide as much context as you can about what you're running into. 28 | - Provide project and platform versions (Node.js, npm, etc), depending on what seems relevant. 29 | 30 | We will then take care of the issue as soon as possible. 31 | 32 | ## I Want To Contribute 33 | 34 | > ### Legal Notice 35 | > 36 | > When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. 37 | 38 | ## Reporting Bugs 39 | 40 | 41 | 42 | ### Before Submitting a Bug Report 43 | 44 | A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. 45 | 46 | - Make sure that you are using the latest version. 47 | - Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the documentation. If you are looking for support, you might want to check [this section](#i-have-a-question)). 48 | - To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/github/issue-metrics/issues). 49 | - Collect information about the bug: 50 | - Stack trace (Traceback) 51 | - OS, Platform and Version (Windows, Linux, macOS, x86, ARM) 52 | - Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. 53 | - Possibly your input and the output 54 | - Can you reliably reproduce the issue? And can you also reproduce it with older versions? 55 | 56 | 57 | 58 | ### How Do I Submit a Good Bug Report? 59 | 60 | Please submit a bug report using our [GitHub Issues template](https://github.com/github/issue-metrics/issues/new?template=bug_report.yml). 61 | 62 | ## Suggesting Enhancements 63 | 64 | This section guides you through submitting an enhancement suggestion for issue-metrics, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. 65 | 66 | 67 | 68 | ### Before Submitting an Enhancement 69 | 70 | - Make sure that you are using the latest version. 71 | - Read the documentation carefully and find out if the functionality is already covered, maybe by an individual configuration. 72 | - Perform a [search](https://github.com/github/issue-metrics/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. 73 | - Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature or to develop the feature yourself and contribute it to the project. 74 | 75 | 76 | 77 | ### How Do I Submit a Good Enhancement Suggestion? 78 | 79 | Please submit an enhancement suggestion using our [GitHub Issues template](https://github.com/github/issue-metrics/issues/new?template=feature_request.yml). 80 | 81 | ### Pull Request Standards 82 | 83 | We are using [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) to standardize our pull request titles. This allows us to automatically generate labels and changelogs and follow semantic versioning. Please follow the commit message format when creating a pull request. What pull request title prefixes are expected are in the [pull_request_template.md](.github/pull_request_template.md) that is shown when creating a pull request. 84 | 85 | ## Releases 86 | 87 | Releases are automated if a pull request is labelled with our [SemVer related labels](.github/release-drafter.yml) or with the `vuln` or `release` labels. 88 | 89 | You can also manually initiate a release you can do so through the GitHub Actions UI. If you have permissions to do so, you can navigate to the [Actions tab](https://github.com/github/issue-metrics/actions/workflows/release.yml) and select the `Run workflow` button. This will allow you to select the branch to release from and the version to release. 90 | -------------------------------------------------------------------------------- /search.py: -------------------------------------------------------------------------------- 1 | """A module to search for issues in a GitHub repository.""" 2 | 3 | import sys 4 | from time import sleep 5 | from typing import List 6 | 7 | import github3 8 | import github3.structs 9 | 10 | 11 | def search_issues( 12 | search_query: str, 13 | github_connection: github3.GitHub, 14 | owners_and_repositories: List[dict], 15 | rate_limit_bypass: bool = False, 16 | ) -> List[github3.search.IssueSearchResult]: # type: ignore 17 | """ 18 | Searches for issues/prs/discussions in a GitHub repository that match 19 | the given search query and handles errors related to GitHub API responses. 20 | 21 | Args: 22 | search_query (str): The search query to use for finding issues/prs/discussions. 23 | github_connection (github3.GitHub): A connection to the GitHub API. 24 | owners_and_repositories (List[dict]): A list of dictionaries containing 25 | the owner and repository names. 26 | rate_limit_bypass (bool, optional): A flag to bypass the rate limit to be used 27 | when working with GitHub server that has rate limiting turned off. Defaults to False. 28 | 29 | Returns: 30 | List[github3.search.IssueSearchResult]: A list of issues that match the search query. 31 | """ 32 | 33 | # Rate Limit Handling: API only allows 30 requests per minute 34 | def wait_for_api_refresh( 35 | iterator: github3.structs.SearchIterator, rate_limit_bypass: bool = False 36 | ): 37 | # If the rate limit bypass is enabled, don't wait for the API to refresh 38 | if rate_limit_bypass: 39 | return 40 | 41 | max_retries = 5 42 | retry_count = 0 43 | sleep_time = 70 44 | 45 | while iterator.ratelimit_remaining < 5: 46 | if retry_count >= max_retries: 47 | raise RuntimeError("Exceeded maximum retries for API rate limit") 48 | 49 | print( 50 | f"GitHub API Rate Limit Low, waiting {sleep_time} seconds to refresh." 51 | ) 52 | sleep(sleep_time) 53 | 54 | # Exponentially increase the sleep time for the next retry 55 | sleep_time *= 2 56 | retry_count += 1 57 | 58 | issues_per_page = 100 59 | 60 | print("Searching for issues...") 61 | issues_iterator = github_connection.search_issues( 62 | search_query, per_page=issues_per_page 63 | ) 64 | wait_for_api_refresh(issues_iterator, rate_limit_bypass) 65 | 66 | issues = [] 67 | repos_and_owners_string = "" 68 | for item in owners_and_repositories: 69 | repos_and_owners_string += ( 70 | f"{item.get('owner', '')}/{item.get('repository', '')} " 71 | ) 72 | 73 | # Print the issue titles and add them to the list of issues 74 | try: 75 | for idx, issue in enumerate(issues_iterator, 1): 76 | print(issue.title) # type: ignore 77 | issues.append(issue) 78 | 79 | # requests are sent once per page of issues 80 | if idx % issues_per_page == 0: 81 | wait_for_api_refresh(issues_iterator, rate_limit_bypass) 82 | 83 | except github3.exceptions.ForbiddenError as e: 84 | print( 85 | f"You do not have permission to view a repository \ 86 | from: '{repos_and_owners_string}'; Check your API Token." 87 | ) 88 | print_error_messages(e) 89 | sys.exit(1) 90 | except github3.exceptions.NotFoundError as e: 91 | print( 92 | f"The repository could not be found; \ 93 | Check the repository owner and names: '{repos_and_owners_string}" 94 | ) 95 | print_error_messages(e) 96 | sys.exit(1) 97 | except github3.exceptions.ConnectionError as e: 98 | print( 99 | "There was a connection error; Check your internet connection or API Token." 100 | ) 101 | print_error_messages(e) 102 | sys.exit(1) 103 | except github3.exceptions.AuthenticationFailed as e: 104 | print("Authentication failed; Check your API Token.") 105 | print_error_messages(e) 106 | sys.exit(1) 107 | except github3.exceptions.UnprocessableEntity as e: 108 | print("The search query is invalid; Check the search query.") 109 | print_error_messages(e) 110 | sys.exit(1) 111 | 112 | return issues 113 | 114 | 115 | def print_error_messages(error: github3.exceptions): 116 | """Prints the error messages from the GitHub API response. 117 | 118 | Args: 119 | Error (github3.exceptions): The error object from the GitHub API response. 120 | 121 | """ 122 | if hasattr(error, "errors"): 123 | for e in error.errors: 124 | print(f"Error: {e.get('message')}") 125 | 126 | 127 | def get_owners_and_repositories( 128 | search_query: str, 129 | ) -> List[dict]: 130 | """Get the owners and repositories from the search query. 131 | 132 | Args: 133 | search_query (str): The search query used to search for issues. 134 | 135 | Returns: 136 | List[dict]: A list of dictionaries of owners and repositories. 137 | 138 | """ 139 | search_query_split = search_query.split(" ") 140 | results_list = [] 141 | for item in search_query_split: 142 | result = {} 143 | if "repo:" in item and "/" in item: 144 | result["owner"] = item.split(":")[1].split("/")[0] 145 | result["repository"] = item.split(":")[1].split("/")[1] 146 | if "org:" in item or "owner:" in item or "user:" in item: 147 | result["owner"] = item.split(":")[1] 148 | if "user:" in item: 149 | result["owner"] = item.split(":")[1] 150 | if "owner:" in item: 151 | result["owner"] = item.split(":")[1] 152 | if result: 153 | results_list.append(result) 154 | 155 | return results_list 156 | -------------------------------------------------------------------------------- /test_discussions.py: -------------------------------------------------------------------------------- 1 | """A module containing unit tests for the get_discussions function in the discussions module. 2 | 3 | Classes: 4 | TestGetDiscussions: A class to test the get_discussions function with mock GraphQL responses. 5 | 6 | """ 7 | 8 | import unittest 9 | from unittest.mock import patch 10 | 11 | from discussions import get_discussions 12 | 13 | 14 | class TestGetDiscussions(unittest.TestCase): 15 | """A class to test the get_discussions function in the discussions module.""" 16 | 17 | def _create_mock_response( 18 | self, discussions, has_next_page=False, end_cursor="cursor123" 19 | ): 20 | """Helper method to create a mock GraphQL response.""" 21 | return { 22 | "data": { 23 | "search": { 24 | "edges": [{"node": discussion} for discussion in discussions], 25 | "pageInfo": {"hasNextPage": has_next_page, "endCursor": end_cursor}, 26 | } 27 | } 28 | } 29 | 30 | @patch("requests.post") 31 | def test_get_discussions_single_page(self, mock_post): 32 | """Test the get_discussions function with a single page of results.""" 33 | # Mock data for two discussions 34 | mock_discussions = [ 35 | { 36 | "title": "Discussion 1", 37 | "url": "https://github.com/user/repo/discussions/1", 38 | "createdAt": "2021-01-01T00:00:00Z", 39 | "comments": {"nodes": [{"createdAt": "2021-01-01T00:01:00Z"}]}, 40 | "answerChosenAt": None, 41 | "closedAt": None, 42 | }, 43 | { 44 | "title": "Discussion 2", 45 | "url": "https://github.com/user/repo/discussions/2", 46 | "createdAt": "2021-01-02T00:00:00Z", 47 | "comments": {"nodes": [{"createdAt": "2021-01-02T00:01:00Z"}]}, 48 | "answerChosenAt": "2021-01-03T00:00:00Z", 49 | "closedAt": "2021-01-04T00:00:00Z", 50 | }, 51 | ] 52 | 53 | mock_post.return_value.status_code = 200 54 | mock_post.return_value.json.return_value = self._create_mock_response( 55 | mock_discussions, has_next_page=False 56 | ) 57 | 58 | discussions = get_discussions( 59 | "token", "repo:user/repo type:discussions query", "" 60 | ) 61 | 62 | # Check that the function returns the expected discussions 63 | self.assertEqual(len(discussions), 2) 64 | self.assertEqual(discussions[0]["title"], "Discussion 1") 65 | self.assertEqual(discussions[1]["title"], "Discussion 2") 66 | 67 | # Verify only one API call was made 68 | self.assertEqual(mock_post.call_count, 1) 69 | 70 | @patch("requests.post") 71 | def test_get_discussions_multiple_pages(self, mock_post): 72 | """Test the get_discussions function with multiple pages of results.""" 73 | # Mock data for pagination 74 | page1_discussions = [ 75 | { 76 | "title": "Discussion 1", 77 | "url": "https://github.com/user/repo/discussions/1", 78 | "createdAt": "2021-01-01T00:00:00Z", 79 | "comments": {"nodes": [{"createdAt": "2021-01-01T00:01:00Z"}]}, 80 | "answerChosenAt": None, 81 | "closedAt": None, 82 | } 83 | ] 84 | 85 | page2_discussions = [ 86 | { 87 | "title": "Discussion 2", 88 | "url": "https://github.com/user/repo/discussions/2", 89 | "createdAt": "2021-01-02T00:00:00Z", 90 | "comments": {"nodes": [{"createdAt": "2021-01-02T00:01:00Z"}]}, 91 | "answerChosenAt": None, 92 | "closedAt": None, 93 | } 94 | ] 95 | 96 | # Configure mock to return different responses for each call 97 | mock_post.return_value.status_code = 200 98 | mock_post.return_value.json.side_effect = [ 99 | self._create_mock_response( 100 | page1_discussions, has_next_page=True, end_cursor="cursor123" 101 | ), 102 | self._create_mock_response(page2_discussions, has_next_page=False), 103 | ] 104 | 105 | discussions = get_discussions( 106 | "token", "repo:user/repo type:discussions query", "" 107 | ) 108 | 109 | # Check that all discussions were returned 110 | self.assertEqual(len(discussions), 2) 111 | self.assertEqual(discussions[0]["title"], "Discussion 1") 112 | self.assertEqual(discussions[1]["title"], "Discussion 2") 113 | 114 | # Verify that two API calls were made 115 | self.assertEqual(mock_post.call_count, 2) 116 | 117 | @patch("requests.post") 118 | def test_get_discussions_error_status_code(self, mock_post): 119 | """Test the get_discussions function with a failed HTTP response.""" 120 | mock_post.return_value.status_code = 500 121 | 122 | with self.assertRaises(ValueError) as context: 123 | get_discussions("token", "repo:user/repo type:discussions query", "") 124 | 125 | self.assertIn( 126 | "GraphQL query failed with status code 500", str(context.exception) 127 | ) 128 | 129 | @patch("requests.post") 130 | def test_get_discussions_graphql_error(self, mock_post): 131 | """Test the get_discussions function with GraphQL errors in response.""" 132 | mock_post.return_value.status_code = 200 133 | mock_post.return_value.json.return_value = { 134 | "errors": [{"message": "GraphQL Error"}] 135 | } 136 | 137 | with self.assertRaises(ValueError) as context: 138 | get_discussions("token", "repo:user/repo type:discussions query", "") 139 | 140 | self.assertIn("GraphQL query failed:", str(context.exception)) 141 | -------------------------------------------------------------------------------- /test_pr_comments.py: -------------------------------------------------------------------------------- 1 | """Tests for the pr_comments module. 2 | 3 | This module contains tests for the count_pr_comments and get_stats_pr_comments 4 | functions. 5 | """ 6 | 7 | import unittest 8 | from unittest.mock import MagicMock 9 | 10 | from classes import IssueWithMetrics 11 | from pr_comments import count_pr_comments, get_stats_pr_comments 12 | 13 | 14 | class TestCountPRComments(unittest.TestCase): 15 | """Test the count_pr_comments function.""" 16 | 17 | def test_count_pr_comments_with_comments(self): 18 | """Test counting PR comments with actual comments.""" 19 | # Mock issue with comments 20 | mock_issue = MagicMock() 21 | mock_comment1 = MagicMock() 22 | mock_comment1.user.type = "User" 23 | mock_comment1.user.login = "user1" 24 | mock_comment2 = MagicMock() 25 | mock_comment2.user.type = "User" 26 | mock_comment2.user.login = "user2" 27 | mock_issue.issue.comments.return_value = [mock_comment1, mock_comment2] 28 | 29 | # Mock pull request with review comments 30 | mock_pull_request = MagicMock() 31 | mock_review_comment1 = MagicMock() 32 | mock_review_comment1.user.type = "User" 33 | mock_review_comment1.user.login = "user3" 34 | mock_pull_request.review_comments.return_value = [mock_review_comment1] 35 | 36 | result = count_pr_comments(mock_issue, mock_pull_request, []) 37 | self.assertEqual(result, 3) 38 | 39 | def test_count_pr_comments_with_bots_ignored(self): 40 | """Test that bot comments are ignored.""" 41 | # Mock issue with bot comment 42 | mock_issue = MagicMock() 43 | mock_bot_comment = MagicMock() 44 | mock_bot_comment.user.type = "Bot" 45 | mock_bot_comment.user.login = "github-actions[bot]" 46 | mock_user_comment = MagicMock() 47 | mock_user_comment.user.type = "User" 48 | mock_user_comment.user.login = "user1" 49 | mock_issue.issue.comments.return_value = [mock_bot_comment, mock_user_comment] 50 | 51 | mock_pull_request = MagicMock() 52 | mock_pull_request.review_comments.return_value = [] 53 | 54 | result = count_pr_comments(mock_issue, mock_pull_request, []) 55 | self.assertEqual(result, 1) 56 | 57 | def test_count_pr_comments_with_ignored_users(self): 58 | """Test that ignored users are not counted.""" 59 | # Mock issue with comments from ignored user 60 | mock_issue = MagicMock() 61 | mock_comment1 = MagicMock() 62 | mock_comment1.user.type = "User" 63 | mock_comment1.user.login = "ignored_user" 64 | mock_comment2 = MagicMock() 65 | mock_comment2.user.type = "User" 66 | mock_comment2.user.login = "regular_user" 67 | mock_issue.issue.comments.return_value = [mock_comment1, mock_comment2] 68 | 69 | mock_pull_request = MagicMock() 70 | mock_pull_request.review_comments.return_value = [] 71 | 72 | result = count_pr_comments(mock_issue, mock_pull_request, ["ignored_user"]) 73 | self.assertEqual(result, 1) 74 | 75 | def test_count_pr_comments_no_pull_request(self): 76 | """Test that None is returned when no pull request is provided.""" 77 | mock_issue = MagicMock() 78 | result = count_pr_comments(mock_issue, None, []) 79 | self.assertIsNone(result) 80 | 81 | def test_count_pr_comments_no_issue(self): 82 | """Test that None is returned when no issue is provided.""" 83 | mock_pull_request = MagicMock() 84 | result = count_pr_comments(None, mock_pull_request, []) 85 | self.assertIsNone(result) 86 | 87 | def test_count_pr_comments_exception_handling(self): 88 | """Test that exceptions are handled gracefully.""" 89 | # Mock issue that raises exception 90 | mock_issue = MagicMock() 91 | mock_issue.issue.comments.side_effect = AttributeError("No comments") 92 | 93 | mock_pull_request = MagicMock() 94 | mock_pull_request.review_comments.side_effect = AttributeError( 95 | "No review comments" 96 | ) 97 | 98 | result = count_pr_comments(mock_issue, mock_pull_request, []) 99 | self.assertEqual(result, 0) 100 | 101 | 102 | class TestGetStatsPRComments(unittest.TestCase): 103 | """Test the get_stats_pr_comments function.""" 104 | 105 | def test_get_stats_pr_comments_with_data(self): 106 | """Test calculating PR comment statistics with data.""" 107 | issues_with_metrics = [ 108 | IssueWithMetrics("PR 1", "url1", "user1", pr_comment_count=5), 109 | IssueWithMetrics("PR 2", "url2", "user2", pr_comment_count=10), 110 | IssueWithMetrics("PR 3", "url3", "user3", pr_comment_count=3), 111 | IssueWithMetrics("Issue 1", "url4", "user4"), # No comment count (not a PR) 112 | ] 113 | 114 | result = get_stats_pr_comments(issues_with_metrics) 115 | 116 | self.assertIsNotNone(result) 117 | self.assertEqual(result["avg"], 6.0) # (5+10+3)/3 118 | self.assertEqual(result["med"], 5.0) 119 | self.assertEqual(result["90p"], 9.0) # 90th percentile 120 | 121 | def test_get_stats_pr_comments_no_data(self): 122 | """Test calculating PR comment statistics with no PR data.""" 123 | issues_with_metrics = [ 124 | IssueWithMetrics("Issue 1", "url1", "user1"), # No comment count 125 | IssueWithMetrics("Issue 2", "url2", "user2"), # No comment count 126 | ] 127 | 128 | result = get_stats_pr_comments(issues_with_metrics) 129 | self.assertIsNone(result) 130 | 131 | def test_get_stats_pr_comments_empty_list(self): 132 | """Test calculating PR comment statistics with empty list.""" 133 | result = get_stats_pr_comments([]) 134 | self.assertIsNone(result) 135 | 136 | 137 | if __name__ == "__main__": 138 | unittest.main() 139 | -------------------------------------------------------------------------------- /labels.py: -------------------------------------------------------------------------------- 1 | """Functions for calculating time spent in labels.""" 2 | 3 | from datetime import datetime, timedelta 4 | from typing import List 5 | 6 | import github3 7 | import numpy 8 | import pytz 9 | from classes import IssueWithMetrics 10 | 11 | 12 | def get_label_events( 13 | issue: github3.issues.Issue, labels: List[str] # type: ignore 14 | ) -> List[github3.issues.event]: # type: ignore 15 | """ 16 | Get the label events for a given issue if the label is of interest. 17 | 18 | Args: 19 | issue (github3.issues.Issue): A GitHub issue. 20 | labels (List[str]): A list of labels of interest. 21 | 22 | Returns: 23 | List[github3.issues.event]: A list of label events for the given issue. 24 | """ 25 | label_events = [] 26 | for event in issue.issue.events(): 27 | if event.event in ("labeled", "unlabeled") and event.label["name"] in labels: 28 | label_events.append(event) 29 | 30 | return label_events 31 | 32 | 33 | def get_label_metrics(issue: github3.issues.Issue, labels: List[str]) -> dict: 34 | """ 35 | Calculate the time spent with the given labels on a given issue. 36 | 37 | Args: 38 | issue (github3.issues.Issue): A GitHub issue. 39 | labels (List[str]): A list of labels to measure time spent in. 40 | 41 | Returns: 42 | dict: A dictionary containing the time spent in each label or None. 43 | """ 44 | label_metrics: dict = {} 45 | label_events = get_label_events(issue, labels) 46 | label_last_event_type: dict = {} 47 | 48 | for label in labels: 49 | label_metrics[label] = None 50 | 51 | # If the event is one of the labels we're looking for, add the time to the dictionary 52 | unlabeled = {} 53 | labeled = {} 54 | if not label_events: 55 | return label_metrics 56 | 57 | # Calculate the time to add or subtract to the time spent in label based on the label events 58 | for event in label_events: 59 | # Skip labeling events that have occurred past issue close time 60 | if issue.closed_at is not None and ( 61 | event.created_at >= datetime.fromisoformat(issue.closed_at) 62 | ): 63 | continue 64 | 65 | if event.event == "labeled": 66 | labeled[event.label["name"]] = True 67 | if event.label["name"] in labels: 68 | if label_metrics[event.label["name"]] is None: 69 | label_metrics[event.label["name"]] = timedelta(0) 70 | label_metrics[ 71 | event.label["name"] 72 | ] -= event.created_at - datetime.fromisoformat(issue.created_at) 73 | label_last_event_type[event.label["name"]] = "labeled" 74 | elif event.event == "unlabeled": 75 | unlabeled[event.label["name"]] = True 76 | if event.label["name"] in labels: 77 | if label_metrics[event.label["name"]] is None: 78 | label_metrics[event.label["name"]] = timedelta(0) 79 | label_metrics[ 80 | event.label["name"] 81 | ] += event.created_at - datetime.fromisoformat(issue.created_at) 82 | label_last_event_type[event.label["name"]] = "unlabeled" 83 | 84 | for label in labels: 85 | if label in labeled: 86 | # if the issue is closed, add the time from the issue creation to the closed_at time 87 | if issue.state == "closed": 88 | # Only add the final (closed_at - created_at) span if the label was still applied at closure. 89 | if label_last_event_type.get(label) != "labeled": 90 | continue 91 | label_metrics[label] += datetime.fromisoformat( 92 | issue.closed_at 93 | ) - datetime.fromisoformat(issue.created_at) 94 | else: 95 | # skip label if last labeling event is 'unlabeled' and issue is still open 96 | if label_last_event_type[label] == "unlabeled": 97 | continue 98 | 99 | # if the issue is open, add the time from the issue creation to now 100 | label_metrics[label] += datetime.now(pytz.utc) - datetime.fromisoformat( 101 | issue.created_at 102 | ) 103 | 104 | return label_metrics 105 | 106 | 107 | def get_stats_time_in_labels( 108 | issues_with_metrics: List[IssueWithMetrics], 109 | labels: dict[str, timedelta], 110 | ) -> dict[str, dict[str, timedelta | None]]: 111 | """Calculate stats describing time spent in each label.""" 112 | time_in_labels = {} 113 | for issue in issues_with_metrics: 114 | if issue.label_metrics: 115 | for label in issue.label_metrics: 116 | if issue.label_metrics[label] is None: 117 | continue 118 | if label not in time_in_labels: 119 | time_in_labels[label] = [issue.label_metrics[label].total_seconds()] 120 | else: 121 | time_in_labels[label].append( 122 | issue.label_metrics[label].total_seconds() 123 | ) 124 | 125 | average_time_in_labels: dict[str, timedelta | None] = {} 126 | med_time_in_labels: dict[str, timedelta | None] = {} 127 | ninety_percentile_in_labels: dict[str, timedelta | None] = {} 128 | for label, time_list in time_in_labels.items(): 129 | average_time_in_labels[label] = timedelta( 130 | seconds=numpy.round(numpy.average(time_list)) 131 | ) 132 | med_time_in_labels[label] = timedelta( 133 | seconds=numpy.round(numpy.median(time_list)) 134 | ) 135 | ninety_percentile_in_labels[label] = timedelta( 136 | seconds=numpy.round(numpy.percentile(time_list, 90, axis=0)) 137 | ) 138 | 139 | for label in labels: 140 | if label not in average_time_in_labels: 141 | average_time_in_labels[label] = None 142 | med_time_in_labels[label] = None 143 | ninety_percentile_in_labels[label] = None 144 | 145 | stats = { 146 | "avg": average_time_in_labels, 147 | "med": med_time_in_labels, 148 | "90p": ninety_percentile_in_labels, 149 | } 150 | return stats 151 | -------------------------------------------------------------------------------- /test_assignee_integration.py: -------------------------------------------------------------------------------- 1 | """Integration test for assignee functionality.""" 2 | 3 | import json 4 | import os 5 | import tempfile 6 | import unittest 7 | from datetime import datetime, timedelta 8 | from unittest.mock import patch 9 | 10 | from classes import IssueWithMetrics 11 | from json_writer import write_to_json 12 | from markdown_writer import write_to_markdown 13 | 14 | 15 | class TestAssigneeIntegration(unittest.TestCase): 16 | """Integration test for assignee functionality.""" 17 | 18 | @patch.dict( 19 | os.environ, 20 | { 21 | "GH_TOKEN": "test_token", 22 | "SEARCH_QUERY": "repo:test/repo is:issue", 23 | }, 24 | clear=True, 25 | ) 26 | def test_assignee_in_markdown_output(self): 27 | """Test that assignee information appears correctly in markdown output.""" 28 | issues_with_metrics = [ 29 | IssueWithMetrics( 30 | title="Test Issue 1", 31 | html_url="https://github.com/test/repo/issues/1", 32 | author="john", 33 | assignee="alice", 34 | assignees=["alice"], 35 | time_to_first_response=timedelta(hours=2), 36 | time_to_close=timedelta(days=1), 37 | created_at=datetime.now() - timedelta(days=2), 38 | ), 39 | IssueWithMetrics( 40 | title="Test Issue 2", 41 | html_url="https://github.com/test/repo/issues/2", 42 | author="jane", 43 | assignee=None, 44 | assignees=[], 45 | time_to_first_response=timedelta(hours=4), 46 | time_to_close=None, 47 | created_at=datetime.now() - timedelta(days=1), 48 | ), 49 | ] 50 | 51 | with tempfile.NamedTemporaryFile(mode="w", suffix=".md", delete=False) as f: 52 | output_file = f.name 53 | 54 | try: 55 | write_to_markdown( 56 | issues_with_metrics=issues_with_metrics, 57 | average_time_to_first_response={ 58 | "avg": timedelta(hours=3), 59 | "med": timedelta(hours=3), 60 | "90p": timedelta(hours=4), 61 | }, 62 | average_time_to_close={ 63 | "avg": timedelta(days=1), 64 | "med": timedelta(days=1), 65 | "90p": timedelta(days=1), 66 | }, 67 | average_time_to_answer=None, 68 | average_time_in_draft=None, 69 | average_time_in_labels=None, 70 | stats_pr_comments=None, 71 | num_issues_opened=2, 72 | num_issues_closed=1, 73 | num_mentor_count=0, 74 | labels=None, 75 | search_query="repo:test/repo is:issue", 76 | hide_label_metrics=True, 77 | hide_items_closed_count=False, 78 | enable_mentor_count=False, 79 | non_mentioning_links=False, 80 | report_title="Test Issue Metrics", 81 | output_file=output_file, 82 | ghe="", 83 | ) 84 | 85 | # Read and verify the markdown content 86 | with open(output_file, "r", encoding="utf-8") as f: 87 | content = f.read() 88 | 89 | # Check for assignee column header 90 | self.assertIn("| Assignee |", content) 91 | 92 | # Check for assignee data - alice should be linked 93 | self.assertIn("[alice](https://github.com/alice)", content) 94 | 95 | # Check for None assignee 96 | self.assertIn("| None |", content) 97 | 98 | # Check that both assignee and author columns are present 99 | self.assertIn("| Author |", content) 100 | 101 | finally: 102 | os.unlink(output_file) 103 | 104 | def test_assignee_in_json_output(self): 105 | """Test that assignee information appears correctly in JSON output.""" 106 | issues_with_metrics = [ 107 | IssueWithMetrics( 108 | title="Test Issue 1", 109 | html_url="https://github.com/test/repo/issues/1", 110 | author="john", 111 | assignee="alice", 112 | assignees=["alice", "bob"], 113 | time_to_first_response=timedelta(hours=2), 114 | time_to_close=timedelta(days=1), 115 | created_at=datetime.now() - timedelta(days=2), 116 | ), 117 | IssueWithMetrics( 118 | title="Test Issue 2", 119 | html_url="https://github.com/test/repo/issues/2", 120 | author="jane", 121 | assignee=None, 122 | assignees=[], 123 | time_to_first_response=timedelta(hours=4), 124 | time_to_close=None, 125 | created_at=datetime.now() - timedelta(days=1), 126 | ), 127 | ] 128 | 129 | with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f: 130 | output_file = f.name 131 | 132 | try: 133 | json_output = write_to_json( 134 | issues_with_metrics=issues_with_metrics, 135 | stats_time_to_first_response={ 136 | "avg": timedelta(hours=3), 137 | "med": timedelta(hours=3), 138 | "90p": timedelta(hours=4), 139 | }, 140 | stats_time_to_close={ 141 | "avg": timedelta(days=1), 142 | "med": timedelta(days=1), 143 | "90p": timedelta(days=1), 144 | }, 145 | stats_time_to_answer=None, 146 | stats_time_in_draft=None, 147 | stats_time_in_labels=None, 148 | stats_pr_comments=None, 149 | num_issues_opened=2, 150 | num_issues_closed=1, 151 | num_mentor_count=0, 152 | search_query="repo:test/repo is:issue", 153 | output_file=output_file, 154 | ) 155 | 156 | # Parse the JSON output 157 | data = json.loads(json_output) 158 | 159 | # Check that assignee fields are present 160 | issue1 = data["issues"][0] 161 | self.assertEqual(issue1["assignee"], "alice") 162 | self.assertEqual(issue1["assignees"], ["alice", "bob"]) 163 | self.assertEqual(issue1["author"], "john") 164 | 165 | issue2 = data["issues"][1] 166 | self.assertIsNone(issue2["assignee"]) 167 | self.assertEqual(issue2["assignees"], []) 168 | self.assertEqual(issue2["author"], "jane") 169 | 170 | finally: 171 | os.unlink(output_file) 172 | 173 | 174 | if __name__ == "__main__": 175 | unittest.main() 176 | -------------------------------------------------------------------------------- /test_search.py: -------------------------------------------------------------------------------- 1 | """Unit tests for the search module.""" 2 | 3 | import unittest 4 | from unittest.mock import MagicMock 5 | 6 | from search import get_owners_and_repositories, search_issues 7 | 8 | 9 | class TestSearchIssues(unittest.TestCase): 10 | """Unit tests for the search_issues function. 11 | 12 | This class contains unit tests for the search_issues function in the 13 | issue_metrics module. The tests use the unittest module and the unittest.mock 14 | module to mock the GitHub API and test the function in isolation. 15 | 16 | Methods: 17 | test_search_issues_with_owner_and_repository: 18 | Test that search_issues with owner/repo returns the correct issues. 19 | test_search_issues_with_just_owner_or_org: 20 | Test that search_issues with just an owner/org returns the correct issues. 21 | test_search_issues_with_just_owner_or_org_with_bypass: 22 | Test that search_issues with just an owner/org returns the correct issues 23 | with rate limit bypass enabled. 24 | 25 | """ 26 | 27 | def test_search_issues_with_owner_and_repository(self): 28 | """Test that search_issues with owner/repo returns the correct issues.""" 29 | 30 | # Set up the mock GitHub connection object 31 | mock_issues = [ 32 | MagicMock(title="Issue 1"), 33 | MagicMock(title="Issue 2"), 34 | ] 35 | 36 | # simulating github3.structs.SearchIterator return value 37 | mock_search_result = MagicMock() 38 | mock_search_result.__iter__.return_value = iter(mock_issues) 39 | mock_search_result.ratelimit_remaining = 30 40 | 41 | mock_connection = MagicMock() 42 | mock_connection.search_issues.return_value = mock_search_result 43 | 44 | # Call search_issues and check that it returns the correct issues 45 | repo_with_owner = {"owner": "owner1", "repository": "repo1"} 46 | owners_and_repositories = [repo_with_owner] 47 | issues = search_issues("is:open", mock_connection, owners_and_repositories) 48 | self.assertEqual(issues, mock_issues) 49 | 50 | def test_search_issues_with_just_owner_or_org(self): 51 | """Test that search_issues with just an owner/org returns the correct issues.""" 52 | 53 | # Set up the mock GitHub connection object 54 | mock_issues = [ 55 | MagicMock(title="Issue 1"), 56 | MagicMock(title="Issue 2"), 57 | MagicMock(title="Issue 3"), 58 | ] 59 | 60 | # simulating github3.structs.SearchIterator return value 61 | mock_search_result = MagicMock() 62 | mock_search_result.__iter__.return_value = iter(mock_issues) 63 | mock_search_result.ratelimit_remaining = 30 64 | 65 | mock_connection = MagicMock() 66 | mock_connection.search_issues.return_value = mock_search_result 67 | 68 | # Call search_issues and check that it returns the correct issues 69 | org = {"owner": "org1"} 70 | owners = [org] 71 | issues = search_issues("is:open", mock_connection, owners) 72 | self.assertEqual(issues, mock_issues) 73 | 74 | def test_search_issues_with_just_owner_or_org_with_bypass(self): 75 | """Test that search_issues with just an owner/org returns the correct issues.""" 76 | 77 | # Set up the mock GitHub connection object 78 | mock_issues = [ 79 | MagicMock(title="Issue 1"), 80 | MagicMock(title="Issue 2"), 81 | MagicMock(title="Issue 3"), 82 | ] 83 | 84 | # simulating github3.structs.SearchIterator return value 85 | mock_search_result = MagicMock() 86 | mock_search_result.__iter__.return_value = iter(mock_issues) 87 | mock_search_result.ratelimit_remaining = 30 88 | 89 | mock_connection = MagicMock() 90 | mock_connection.search_issues.return_value = mock_search_result 91 | 92 | # Call search_issues and check that it returns the correct issues 93 | org = {"owner": "org1"} 94 | owners = [org] 95 | issues = search_issues( 96 | "is:open", mock_connection, owners, rate_limit_bypass=True 97 | ) 98 | self.assertEqual(issues, mock_issues) 99 | 100 | 101 | class TestGetOwnerAndRepository(unittest.TestCase): 102 | """Unit tests for the get_owners_and_repositories function. 103 | 104 | This class contains unit tests for the get_owners_and_repositories function in the 105 | issue_metrics module. The tests use the unittest module and the unittest.mock 106 | module to mock the GitHub API and test the function in isolation. 107 | 108 | Methods: 109 | test_get_owners_with_owner_and_repo_in_query: Test get both owner and repo. 110 | test_get_owner_and_repositories_without_repo_in_query: Test get just owner. 111 | test_get_owners_and_repositories_without_either_in_query: Test get neither. 112 | test_get_owners_and_repositories_with_multiple_entries: Test get multiple entries. 113 | test_get_owners_and_repositories_with_org: Test get org as owner. 114 | test_get_owners_and_repositories_with_user: Test get user as owner. 115 | """ 116 | 117 | def test_get_owners_with_owner_and_repo_in_query(self): 118 | """Test get both owner and repo.""" 119 | result = get_owners_and_repositories("repo:owner1/repo1") 120 | self.assertEqual(result[0].get("owner"), "owner1") 121 | self.assertEqual(result[0].get("repository"), "repo1") 122 | 123 | def test_get_owner_and_repositories_without_repo_in_query(self): 124 | """Test get just owner.""" 125 | result = get_owners_and_repositories("org:owner1") 126 | self.assertEqual(result[0].get("owner"), "owner1") 127 | self.assertIsNone(result[0].get("repository")) 128 | 129 | def test_get_owners_and_repositories_without_either_in_query(self): 130 | """Test get neither.""" 131 | result = get_owners_and_repositories("is:blah") 132 | self.assertEqual(result, []) 133 | 134 | def test_get_owners_and_repositories_with_multiple_entries(self): 135 | """Test get multiple entries.""" 136 | result = get_owners_and_repositories("repo:owner1/repo1 org:owner2") 137 | self.assertEqual(result[0].get("owner"), "owner1") 138 | self.assertEqual(result[0].get("repository"), "repo1") 139 | self.assertEqual(result[1].get("owner"), "owner2") 140 | self.assertIsNone(result[1].get("repository")) 141 | 142 | def test_get_owners_and_repositories_with_org(self): 143 | """Test get org as owner.""" 144 | result = get_owners_and_repositories("org:owner1") 145 | self.assertEqual(result[0].get("owner"), "owner1") 146 | self.assertIsNone(result[0].get("repository")) 147 | 148 | def test_get_owners_and_repositories_with_user(self): 149 | """Test get user as owner.""" 150 | result = get_owners_and_repositories("user:owner1") 151 | self.assertEqual(result[0].get("owner"), "owner1") 152 | self.assertIsNone(result[0].get("repository")) 153 | -------------------------------------------------------------------------------- /most_active_mentors.py: -------------------------------------------------------------------------------- 1 | """A module for measuring the number of very active mentors 2 | 3 | This module provides functions for measuring the number of active mentors on a 4 | project. 5 | 6 | This is measured by number of PR comments. We are working under the assumption 7 | that PR comments are left in good faith to move contributors further instead of 8 | nitpicking and discouraging them. 9 | 10 | Open questions: 11 | - should there be an option to limit this to certain users, e.g. core 12 | maintainers? 13 | - should there be a limit to how many comments per PR we consider to avoid 14 | having the statistic dominated by contested PRs? 15 | - should this metric count consecutive comments coming from the same user as 16 | only one to avoid people unnessesarily splitting their comments to game the 17 | metric? 18 | - instead of PR comments should we count PRs on which a username was seen as 19 | commenter? 20 | 21 | Functions: 22 | collect_response_usernames( 23 | issue: Union[github3.issues.Issue, None], 24 | discussion: Union[dict, None], 25 | pull_request: Union[github3.pulls.PullRequest, None], 26 | max_comments_to_evaluate, 27 | ) -> ____________ 28 | Collect the number of responses per username for single item. Take only 29 | top n comments (max_comments_to_evaluate) into consideration. 30 | get_number_of_active_reviewers( 31 | mentors: List [mentors with metrics) 32 | ) -> int active_number 33 | Count the number of mentors active at least n times 34 | 35 | """ 36 | 37 | from collections import Counter 38 | from datetime import datetime 39 | from typing import Dict, List, Union 40 | 41 | import github3 42 | from classes import IssueWithMetrics 43 | 44 | 45 | def count_comments_per_user( 46 | issue: Union[github3.issues.Issue, None], # type: ignore 47 | discussion: Union[dict, None] = None, 48 | pull_request: Union[github3.pulls.PullRequest, None] = None, 49 | ready_for_review_at: Union[datetime, None] = None, 50 | ignore_users: List[str] | None = None, 51 | max_comments_to_eval=20, 52 | heavily_involved=3, 53 | ) -> dict: 54 | """Count the number of times a user was seen commenting on a single item. 55 | 56 | Args: 57 | issue (Union[github3.issues.Issue, None]): A GitHub issue. 58 | pull_request (Union[github3.pulls.PullRequest, None]): A GitHub pull 59 | request. 60 | ignore_users (List[str]): A list of GitHub usernames to ignore. 61 | max_comments_to_eval: Maximum number of comments per item to look at. 62 | heavily_involved: Maximum number of comments to count for one 63 | user per issue. 64 | 65 | Returns: 66 | dict: A dictionary of usernames seen and number of comments they left. 67 | 68 | """ 69 | if ignore_users is None: 70 | ignore_users = [] 71 | mentor_count: Dict[str, int] = {} 72 | 73 | # Get the first comments 74 | if issue: 75 | comments = issue.issue.comments( 76 | number=max_comments_to_eval, sort="created", direction="asc" 77 | ) # type: ignore 78 | for comment in comments: 79 | if ignore_comment( 80 | issue.issue.user, 81 | comment.user, 82 | ignore_users, 83 | comment.created_at, 84 | ready_for_review_at, 85 | ): 86 | continue 87 | # increase the number of comments left by current user by 1 88 | if comment.user.login in mentor_count: 89 | if mentor_count[comment.user.login] < heavily_involved: 90 | mentor_count[comment.user.login] += 1 91 | else: 92 | mentor_count[comment.user.login] = 1 93 | 94 | # Check if the issue is actually a pull request 95 | # so we may also get the first review comment time 96 | if pull_request: 97 | review_comments = pull_request.reviews(number=max_comments_to_eval) 98 | # type: ignore 99 | for review_comment in review_comments: 100 | if ignore_comment( 101 | issue.issue.user, 102 | review_comment.user, 103 | ignore_users, 104 | review_comment.submitted_at, 105 | ready_for_review_at, 106 | ): 107 | continue 108 | 109 | # increase the number of comments left by current user by 1 110 | if review_comment.user.login in mentor_count: 111 | mentor_count[review_comment.user.login] += 1 112 | else: 113 | mentor_count[review_comment.user.login] = 1 114 | 115 | if discussion and len(discussion["comments"]["nodes"]) > 0: 116 | for comment in discussion["comments"]["nodes"]: 117 | if ignore_comment( 118 | comment.user, 119 | comment.user, 120 | ignore_users, 121 | comment.submitted_at, 122 | comment.ready_for_review_at, 123 | ): 124 | continue 125 | 126 | # increase the number of comments left by current user by 1 127 | if comment.user.login in mentor_count: 128 | mentor_count[comment.user.login] += 1 129 | else: 130 | mentor_count[comment.user.login] = 1 131 | 132 | return mentor_count 133 | 134 | 135 | def ignore_comment( 136 | issue_user: github3.users.User, 137 | comment_user: github3.users.User, 138 | ignore_users: List[str], 139 | comment_created_at: datetime, 140 | ready_for_review_at: Union[datetime, None], 141 | ) -> bool: 142 | """Check if a comment should be ignored.""" 143 | return bool( 144 | # ignore comments by IGNORE_USERS 145 | comment_user.login in ignore_users 146 | # ignore comments by bots 147 | or comment_user.type == "Bot" 148 | # ignore comments by the issue creator 149 | or comment_user.login == issue_user.login 150 | # ignore pending reviews 151 | or not comment_created_at 152 | # ignore comments created before the issue was ready for review 153 | or (ready_for_review_at and comment_created_at < ready_for_review_at) 154 | ) 155 | 156 | 157 | def get_mentor_count(issues_with_metrics: List[IssueWithMetrics], cutoff: int) -> int: 158 | """Calculate the number of active mentors on the project. 159 | 160 | Args: 161 | issues_with_metrics (List[IssueWithMetrics]): A list of issues w/ 162 | metrics 163 | cutoff (int: the minimum number of comments a user has to leave 164 | to count as active mentor.) 165 | 166 | Returns: 167 | int: Number of active mentors 168 | 169 | """ 170 | 171 | mentor_count: Counter[str] = Counter({}) 172 | for issue_with_metrics in issues_with_metrics: 173 | current_counter = Counter(issue_with_metrics.mentor_activity) 174 | mentor_count = mentor_count + current_counter 175 | 176 | active_mentor_count = 0 177 | for count in mentor_count.values(): 178 | if count >= cutoff: 179 | active_mentor_count += 1 180 | 181 | return active_mentor_count 182 | -------------------------------------------------------------------------------- /time_to_first_response.py: -------------------------------------------------------------------------------- 1 | """A module for measuring the time it takes to get the first response to a GitHub issue. 2 | 3 | This module provides functions for measuring the time it takes to get the first response 4 | to a GitHub issue, as well as calculating the average time to first response for a list 5 | of issues. 6 | 7 | Functions: 8 | measure_time_to_first_response( 9 | issue: Union[github3.issues.Issue, None], 10 | discussion: Union[dict, None] 11 | pull_request: Union[github3.pulls.PullRequest, None], 12 | ) -> Union[timedelta, None]: 13 | Measure the time to first response for a single issue or a discussion. 14 | get_stats_time_to_first_response( 15 | issues: List[IssueWithMetrics] 16 | ) -> Union[timedelta, None]: 17 | Calculate stats describing time to first response for a list of issues with metrics. 18 | 19 | """ 20 | 21 | from datetime import datetime, timedelta 22 | from typing import List, Union 23 | 24 | import github3 25 | import numpy 26 | from classes import IssueWithMetrics 27 | 28 | 29 | def measure_time_to_first_response( 30 | issue: Union[github3.issues.Issue, None], # type: ignore 31 | discussion: Union[dict, None], 32 | pull_request: Union[github3.pulls.PullRequest, None] = None, 33 | ready_for_review_at: Union[datetime, None] = None, 34 | ignore_users: Union[List[str], None] = None, 35 | ) -> Union[timedelta, None]: 36 | """Measure the time to first response for a single issue, pull request, or a discussion. 37 | 38 | Args: 39 | issue (Union[github3.issues.Issue, None]): A GitHub issue. 40 | discussion (Union[dict, None]): A GitHub discussion. 41 | pull_request (Union[github3.pulls.PullRequest, None]): A GitHub pull request. 42 | ignore_users (List[str]): A list of GitHub usernames to ignore. 43 | 44 | Returns: 45 | Union[timedelta, None]: The time to first response for the issue/discussion. 46 | 47 | """ 48 | first_review_comment_time = None 49 | first_comment_time = None 50 | earliest_response = None 51 | issue_time = None 52 | if ignore_users is None: 53 | ignore_users = [] 54 | 55 | # Get the first comment time 56 | if issue: 57 | comments = issue.issue.comments( 58 | number=20, sort="created", direction="asc" 59 | ) # type: ignore 60 | for comment in comments: 61 | if ignore_comment( 62 | issue.issue.user, 63 | comment.user, 64 | ignore_users, 65 | comment.created_at, 66 | ready_for_review_at, 67 | ): 68 | continue 69 | first_comment_time = comment.created_at 70 | break 71 | 72 | # Check if the issue is actually a pull request 73 | # so we may also get the first review comment time 74 | if pull_request: 75 | review_comments = pull_request.reviews(number=50) # type: ignore 76 | try: 77 | for review_comment in review_comments: 78 | if ignore_comment( 79 | issue.issue.user, 80 | review_comment.user, 81 | ignore_users, 82 | review_comment.submitted_at, 83 | ready_for_review_at, 84 | ): 85 | continue 86 | first_review_comment_time = review_comment.submitted_at 87 | break 88 | except TypeError as e: 89 | print( 90 | f"An error occurred processing review comments. Perhaps the review contains a ghost user. {e}" 91 | ) 92 | 93 | # Figure out the earliest response timestamp 94 | if first_comment_time and first_review_comment_time: 95 | earliest_response = min(first_comment_time, first_review_comment_time) 96 | elif first_comment_time: 97 | earliest_response = first_comment_time 98 | elif first_review_comment_time: 99 | earliest_response = first_review_comment_time 100 | else: 101 | return None 102 | 103 | # Get the created_at time for the issue so we can calculate the time to first response 104 | if ready_for_review_at: 105 | issue_time = ready_for_review_at 106 | else: 107 | issue_time = datetime.fromisoformat(issue.created_at) 108 | 109 | if discussion and len(discussion["comments"]["nodes"]) > 0: 110 | earliest_response = datetime.fromisoformat( 111 | discussion["comments"]["nodes"][0]["createdAt"] 112 | ) 113 | issue_time = datetime.fromisoformat(discussion["createdAt"]) 114 | 115 | if earliest_response and issue_time: 116 | time_between_issue_and_first_comment: timedelta | None = ( 117 | earliest_response - issue_time 118 | ) 119 | return time_between_issue_and_first_comment 120 | 121 | return None 122 | 123 | 124 | def ignore_comment( 125 | issue_user: github3.users.User, 126 | comment_user: github3.users.User, 127 | ignore_users: List[str], 128 | comment_created_at: datetime, 129 | ready_for_review_at: Union[datetime, None], 130 | ) -> bool: 131 | """Check if a comment should be ignored.""" 132 | 133 | user_is_ignored: bool = comment_user.login in ignore_users 134 | user_is_a_bot: bool = str(comment_user.type.lower()) == "bot" 135 | user_is_issue_creator: bool = str(comment_user.login) == str(issue_user.login) 136 | issue_was_created_before_ready_for_review: bool = False 137 | is_pending_comment: bool = not isinstance(comment_created_at, datetime) 138 | if ready_for_review_at and not is_pending_comment: 139 | issue_was_created_before_ready_for_review = ( 140 | comment_created_at < ready_for_review_at 141 | ) 142 | result: bool = ( 143 | user_is_ignored 144 | or user_is_a_bot 145 | or user_is_issue_creator 146 | or is_pending_comment 147 | or issue_was_created_before_ready_for_review 148 | ) 149 | return result 150 | 151 | 152 | def get_stats_time_to_first_response( 153 | issues: List[IssueWithMetrics], 154 | ) -> Union[dict[str, timedelta], None]: 155 | """Calculate the stats describing time to first response for a list of issues. 156 | 157 | Args: 158 | issues (List[IssueWithMetrics]): A list of GitHub issues with metrics attached. 159 | 160 | Returns: 161 | Union[Dict{String: datetime.timedelta}, None]: The stats describing time to first response for the issues in seconds. 162 | 163 | """ 164 | response_times = [] 165 | none_count = 0 166 | for issue in issues: 167 | if issue.time_to_first_response: 168 | response_times.append(issue.time_to_first_response.total_seconds()) 169 | else: 170 | none_count += 1 171 | 172 | if len(issues) - none_count <= 0: 173 | return None 174 | 175 | average_seconds_to_first_response = numpy.round(numpy.average(response_times)) 176 | med_seconds_to_first_response = numpy.round(numpy.median(response_times)) 177 | ninety_percentile_seconds_to_first_response = numpy.round( 178 | numpy.percentile(response_times, 90, axis=0) 179 | ) 180 | 181 | stats = { 182 | "avg": timedelta(seconds=average_seconds_to_first_response), 183 | "med": timedelta(seconds=med_seconds_to_first_response), 184 | "90p": timedelta(seconds=ninety_percentile_seconds_to_first_response), 185 | } 186 | 187 | # Print the average time to first response converting seconds to a readable time format 188 | print( 189 | f"Average time to first response: {timedelta(seconds=average_seconds_to_first_response)}" 190 | ) 191 | 192 | return stats 193 | -------------------------------------------------------------------------------- /test_assignee_functionality.py: -------------------------------------------------------------------------------- 1 | """Test assignee functionality added to issue metrics.""" 2 | 3 | import os 4 | import unittest 5 | from unittest.mock import patch 6 | 7 | from classes import IssueWithMetrics 8 | from markdown_writer import get_non_hidden_columns 9 | 10 | 11 | class TestAssigneeFunctionality(unittest.TestCase): 12 | """Test suite for the assignee functionality.""" 13 | 14 | @patch.dict( 15 | os.environ, 16 | { 17 | "GH_TOKEN": "test_token", 18 | "SEARCH_QUERY": "is:issue is:open repo:user/repo", 19 | "HIDE_ASSIGNEE": "false", 20 | "HIDE_AUTHOR": "false", 21 | }, 22 | clear=True, 23 | ) 24 | def test_get_non_hidden_columns_includes_assignee_by_default(self): 25 | """Test that assignee column is included by default.""" 26 | columns = get_non_hidden_columns(labels=None) 27 | self.assertIn("Assignee", columns) 28 | self.assertIn("Author", columns) 29 | 30 | @patch.dict( 31 | os.environ, 32 | { 33 | "GH_TOKEN": "test_token", 34 | "SEARCH_QUERY": "is:issue is:open repo:user/repo", 35 | "HIDE_ASSIGNEE": "true", 36 | "HIDE_AUTHOR": "false", 37 | }, 38 | clear=True, 39 | ) 40 | def test_get_non_hidden_columns_hides_assignee_when_env_set(self): 41 | """Test that assignee column is hidden when HIDE_ASSIGNEE is true.""" 42 | columns = get_non_hidden_columns(labels=None) 43 | self.assertNotIn("Assignee", columns) 44 | self.assertIn("Author", columns) 45 | 46 | @patch.dict( 47 | os.environ, 48 | { 49 | "GH_TOKEN": "test_token", 50 | "SEARCH_QUERY": "is:issue is:open repo:user/repo", 51 | "HIDE_ASSIGNEE": "false", 52 | "HIDE_AUTHOR": "true", 53 | }, 54 | clear=True, 55 | ) 56 | def test_get_non_hidden_columns_shows_assignee_but_hides_author(self): 57 | """Test that assignee can be shown while author is hidden.""" 58 | columns = get_non_hidden_columns(labels=None) 59 | self.assertIn("Assignee", columns) 60 | self.assertNotIn("Author", columns) 61 | 62 | @patch.dict( 63 | os.environ, 64 | { 65 | "GH_TOKEN": "test_token", 66 | "SEARCH_QUERY": "is:issue is:open repo:user/repo", 67 | "HIDE_ASSIGNEE": "true", 68 | "HIDE_AUTHOR": "true", 69 | }, 70 | clear=True, 71 | ) 72 | def test_get_non_hidden_columns_hides_both_assignee_and_author(self): 73 | """Test that both assignee and author can be hidden.""" 74 | columns = get_non_hidden_columns(labels=None) 75 | self.assertNotIn("Assignee", columns) 76 | self.assertNotIn("Author", columns) 77 | 78 | @patch.dict( 79 | os.environ, 80 | { 81 | "GH_TOKEN": "test_token", 82 | "SEARCH_QUERY": "is:issue is:open repo:user/repo", 83 | "HIDE_STATUS": "false", 84 | }, 85 | clear=True, 86 | ) 87 | def test_get_non_hidden_columns_includes_status_by_default(self): 88 | """Test that status column is included by default.""" 89 | columns = get_non_hidden_columns(labels=None) 90 | self.assertIn("Status", columns) 91 | 92 | @patch.dict( 93 | os.environ, 94 | { 95 | "GH_TOKEN": "test_token", 96 | "SEARCH_QUERY": "is:issue is:open repo:user/repo", 97 | "HIDE_STATUS": "true", 98 | }, 99 | clear=True, 100 | ) 101 | def test_get_non_hidden_columns_hides_status_when_env_set(self): 102 | """Test that status column is hidden when HIDE_STATUS is true.""" 103 | columns = get_non_hidden_columns(labels=None) 104 | self.assertNotIn("Status", columns) 105 | 106 | def test_assignee_column_position(self): 107 | """Test that assignee column appears before author column.""" 108 | with patch.dict( 109 | os.environ, 110 | { 111 | "GH_TOKEN": "test_token", 112 | "SEARCH_QUERY": "is:issue is:open repo:user/repo", 113 | "HIDE_ASSIGNEE": "false", 114 | "HIDE_AUTHOR": "false", 115 | }, 116 | clear=True, 117 | ): 118 | columns = get_non_hidden_columns(labels=None) 119 | assignee_index = columns.index("Assignee") 120 | author_index = columns.index("Author") 121 | self.assertLess( 122 | assignee_index, 123 | author_index, 124 | "Assignee column should appear before Author column", 125 | ) 126 | 127 | def test_multiple_assignees_rendering_logic(self): 128 | """Test that multiple assignees are rendered correctly in assignee column.""" 129 | 130 | # Test the assignee rendering logic directly 131 | endpoint = "github.com" 132 | columns = ["Title", "URL", "Assignee", "Author"] 133 | 134 | # Initialize variables 135 | multiple_output = "" 136 | single_output = "" 137 | none_output = "" 138 | 139 | # Test case 1: Multiple assignees 140 | issue_multiple = IssueWithMetrics( 141 | title="Test Issue with Multiple Assignees", 142 | html_url="https://github.com/test/repo/issues/1", 143 | author="testuser", 144 | assignee="alice", 145 | assignees=["alice", "bob", "charlie"], 146 | ) 147 | 148 | # Simulate the new rendering logic 149 | if "Assignee" in columns: 150 | if issue_multiple.assignees: 151 | assignee_links = [ 152 | f"[{assignee}](https://{endpoint}/{assignee})" 153 | for assignee in issue_multiple.assignees 154 | ] 155 | multiple_output = f" {', '.join(assignee_links)} |" 156 | else: 157 | multiple_output = " None |" 158 | 159 | expected_multiple = ( 160 | " [alice](https://github.com/alice), [bob](https://github.com/bob), " 161 | "[charlie](https://github.com/charlie) |" 162 | ) 163 | self.assertEqual( 164 | multiple_output, 165 | expected_multiple, 166 | "Multiple assignees should be rendered as comma-separated links", 167 | ) 168 | 169 | # Test case 2: Single assignee 170 | issue_single = IssueWithMetrics( 171 | title="Test Issue with Single Assignee", 172 | html_url="https://github.com/test/repo/issues/2", 173 | author="testuser", 174 | assignee="alice", 175 | assignees=["alice"], 176 | ) 177 | 178 | if "Assignee" in columns: 179 | if issue_single.assignees: 180 | assignee_links = [ 181 | f"[{assignee}](https://{endpoint}/{assignee})" 182 | for assignee in issue_single.assignees 183 | ] 184 | single_output = f" {', '.join(assignee_links)} |" 185 | else: 186 | single_output = " None |" 187 | 188 | expected_single = " [alice](https://github.com/alice) |" 189 | self.assertEqual( 190 | single_output, 191 | expected_single, 192 | "Single assignee should be rendered as a single link", 193 | ) 194 | 195 | # Test case 3: No assignees 196 | issue_none = IssueWithMetrics( 197 | title="Test Issue with No Assignees", 198 | html_url="https://github.com/test/repo/issues/3", 199 | author="testuser", 200 | assignee=None, 201 | assignees=[], 202 | ) 203 | 204 | if "Assignee" in columns: 205 | if issue_none.assignees: 206 | assignee_links = [ 207 | f"[{assignee}](https://{endpoint}/{assignee})" 208 | for assignee in issue_none.assignees 209 | ] 210 | none_output = f" {', '.join(assignee_links)} |" 211 | else: 212 | none_output = " None |" 213 | 214 | expected_none = " None |" 215 | self.assertEqual( 216 | none_output, expected_none, "No assignees should be rendered as 'None'" 217 | ) 218 | 219 | print(f"✅ Multiple assignees test: {expected_multiple}") 220 | print(f"✅ Single assignee test: {expected_single}") 221 | print(f"✅ No assignees test: {expected_none}") 222 | 223 | 224 | if __name__ == "__main__": 225 | unittest.main() 226 | -------------------------------------------------------------------------------- /json_writer.py: -------------------------------------------------------------------------------- 1 | """A module for writing GitHub issue metrics to a json file. 2 | 3 | Functions: 4 | write_to_json( 5 | issues_with_metrics: Union[List[IssueWithMetrics], None], 6 | stats_time_to_first_response: Union[dict[str, timedelta], None], 7 | stats_time_to_close: Union[dict[str, timedelta], None], 8 | stats_time_to_answer: Union[dict[str, timedelta], None], 9 | stats_time_in_draft: Union[dict[str, timedelta], None], 10 | stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None], 11 | num_issues_opened: Union[int, None], 12 | num_issues_closed: Union[int, None], 13 | num_mentor_count: Union[int, None], 14 | search_query: str, 15 | output_file: str, 16 | ) -> str: 17 | Write the issues with metrics to a json file. 18 | 19 | """ 20 | 21 | import json 22 | import os 23 | from datetime import timedelta 24 | from typing import Any, Dict, List, Union 25 | 26 | from classes import IssueWithMetrics 27 | 28 | 29 | def write_to_json( 30 | issues_with_metrics: Union[List[IssueWithMetrics], None], 31 | stats_time_to_first_response: Union[dict[str, timedelta], None], 32 | stats_time_to_close: Union[dict[str, timedelta], None], 33 | stats_time_to_answer: Union[dict[str, timedelta], None], 34 | stats_time_in_draft: Union[dict[str, timedelta], None], 35 | stats_time_in_labels: Union[dict[str, dict[str, timedelta]], None], 36 | stats_pr_comments: Union[Dict[str, float], None], 37 | num_issues_opened: Union[int, None], 38 | num_issues_closed: Union[int, None], 39 | num_mentor_count: Union[int, None], 40 | search_query: str, 41 | output_file: str, 42 | ) -> str: 43 | """ 44 | Write the issues with metrics to a JSON file called issue_metrics.json. 45 | 46 | json structure is like following 47 | { 48 | "average_time_to_first_response": "None", 49 | "average_time_to_close": "None", 50 | "average_time_to_answer": "None", 51 | "average_time_in_draft": "None", 52 | "average_time_in_labels": {}, 53 | "median_time_to_first_response": "None", 54 | "median_time_to_close": "None", 55 | "median_time_to_answer": "None", 56 | "median_time_in_draft": "None", 57 | "median_time_in_labels": {}, 58 | "90_percentile_time_to_first_response": "None", 59 | "90_percentile_time_to_close": "None", 60 | "90_percentile_time_to_answer": "None", 61 | "90_percentile_time_in_draft": "None", 62 | "90_percentile_time_in_labels": {}, 63 | "num_items_opened": 2, 64 | "num_items_closed": 0, 65 | "num_mentor_count": 5, 66 | "total_item_count": 2, 67 | "issues": [ 68 | { 69 | "title": "Issue 1", 70 | "html_url": "https://github.com/owner/repo/issues/1", 71 | "author": "alice", 72 | "time_to_first_response": "None", 73 | "time_to_close": "None", 74 | "time_to_answer": "None", 75 | "time_in_draft": "None", 76 | "label_metrics": {} 77 | }, 78 | { 79 | "title": "Issue 2", 80 | "html_url": "https://github.com/owner/repo/issues/2", 81 | "author": "bob", 82 | "time_to_first_response": "None", 83 | "time_to_close": "None", 84 | "time_to_answer": "None", 85 | "time_in_draft": "None", 86 | "label_metrics": {} 87 | } 88 | ], 89 | "search_query": "is:issue repo:owner/repo" 90 | } 91 | 92 | """ 93 | 94 | # Ensure issues_with_metrics is not None 95 | if not issues_with_metrics: 96 | return "" 97 | 98 | # time to first response 99 | average_time_to_first_response = None 100 | med_time_to_first_response = None 101 | p90_time_to_first_response = None 102 | if stats_time_to_first_response is not None: 103 | average_time_to_first_response = stats_time_to_first_response["avg"] 104 | med_time_to_first_response = stats_time_to_first_response["med"] 105 | p90_time_to_first_response = stats_time_to_first_response["90p"] 106 | 107 | # time to close 108 | average_time_to_close = None 109 | med_time_to_close = None 110 | p90_time_to_close = None 111 | if stats_time_to_close is not None: 112 | average_time_to_close = stats_time_to_close["avg"] 113 | med_time_to_close = stats_time_to_close["med"] 114 | p90_time_to_close = stats_time_to_close["90p"] 115 | 116 | # time to answer 117 | average_time_to_answer = None 118 | med_time_to_answer = None 119 | p90_time_to_answer = None 120 | if stats_time_to_answer is not None: 121 | average_time_to_answer = stats_time_to_answer["avg"] 122 | med_time_to_answer = stats_time_to_answer["med"] 123 | p90_time_to_answer = stats_time_to_answer["90p"] 124 | 125 | # time in draft 126 | average_time_in_draft = None 127 | med_time_in_draft = None 128 | p90_time_in_draft = None 129 | if stats_time_in_draft is not None: 130 | average_time_in_draft = stats_time_in_draft["avg"] 131 | med_time_in_draft = stats_time_in_draft["med"] 132 | p90_time_in_draft = stats_time_in_draft["90p"] 133 | 134 | # time in labels 135 | average_time_in_labels = {} 136 | med_time_in_labels = {} 137 | p90_time_in_labels = {} 138 | if stats_time_in_labels is not None: 139 | for label, time in stats_time_in_labels["avg"].items(): 140 | average_time_in_labels[label] = str(time) 141 | for label, time in stats_time_in_labels["med"].items(): 142 | med_time_in_labels[label] = str(time) 143 | for label, time in stats_time_in_labels["90p"].items(): 144 | p90_time_in_labels[label] = str(time) 145 | 146 | # PR comments statistics 147 | average_pr_comments = None 148 | med_pr_comments = None 149 | p90_pr_comments = None 150 | if stats_pr_comments is not None: 151 | average_pr_comments = stats_pr_comments["avg"] 152 | med_pr_comments = stats_pr_comments["med"] 153 | p90_pr_comments = stats_pr_comments["90p"] 154 | 155 | # Create a dictionary with the metrics 156 | metrics: dict[str, Any] = { 157 | "average_time_to_first_response": str(average_time_to_first_response), 158 | "average_time_to_close": str(average_time_to_close), 159 | "average_time_to_answer": str(average_time_to_answer), 160 | "average_time_in_draft": str(average_time_in_draft), 161 | "average_time_in_labels": average_time_in_labels, 162 | "median_time_to_first_response": str(med_time_to_first_response), 163 | "median_time_to_close": str(med_time_to_close), 164 | "median_time_to_answer": str(med_time_to_answer), 165 | "median_time_in_draft": str(med_time_in_draft), 166 | "median_time_in_labels": med_time_in_labels, 167 | "90_percentile_time_to_first_response": str(p90_time_to_first_response), 168 | "90_percentile_time_to_close": str(p90_time_to_close), 169 | "90_percentile_time_to_answer": str(p90_time_to_answer), 170 | "90_percentile_time_in_draft": str(p90_time_in_draft), 171 | "90_percentile_time_in_labels": p90_time_in_labels, 172 | "average_pr_comments": average_pr_comments, 173 | "median_pr_comments": med_pr_comments, 174 | "90_percentile_pr_comments": p90_pr_comments, 175 | "num_items_opened": num_issues_opened, 176 | "num_items_closed": num_issues_closed, 177 | "num_mentor_count": num_mentor_count, 178 | "total_item_count": len(issues_with_metrics), 179 | } 180 | 181 | # Create a list of dictionaries with the issues and metrics 182 | issues = [] 183 | for issue in issues_with_metrics: 184 | formatted_label_metrics = {} 185 | if issue.label_metrics: 186 | for label, time in issue.label_metrics.items(): 187 | formatted_label_metrics[label] = str(time) 188 | issues.append( 189 | { 190 | "title": issue.title, 191 | "html_url": issue.html_url, 192 | "author": issue.author, 193 | "assignee": issue.assignee, 194 | "assignees": issue.assignees, 195 | "time_to_first_response": str(issue.time_to_first_response), 196 | "time_to_close": str(issue.time_to_close), 197 | "time_to_answer": str(issue.time_to_answer), 198 | "time_in_draft": str(issue.time_in_draft), 199 | "label_metrics": formatted_label_metrics, 200 | "pr_comment_count": issue.pr_comment_count, 201 | "created_at": str(issue.created_at), 202 | } 203 | ) 204 | 205 | # Add the issues to the metrics dictionary 206 | metrics["issues"] = issues 207 | 208 | # Add the search query to the metrics dictionary 209 | metrics["search_query"] = search_query 210 | 211 | # add output to github action output 212 | # pylint: disable=unspecified-encoding 213 | metrics_json = json.dumps(metrics) 214 | if os.environ.get("GITHUB_OUTPUT"): 215 | with open(os.environ["GITHUB_OUTPUT"], "a") as file_handle: 216 | print(f"metrics={metrics_json}", file=file_handle) 217 | 218 | # Write the metrics to a JSON file 219 | output_file_name = output_file if output_file else "issue_metrics.json" 220 | with open(output_file_name, "w", encoding="utf-8") as file: 221 | json.dump(metrics, file, indent=4) 222 | 223 | return metrics_json 224 | -------------------------------------------------------------------------------- /test_json_writer.py: -------------------------------------------------------------------------------- 1 | """Tests for the write_to_json function in json_writer.py.""" 2 | 3 | import json 4 | import unittest 5 | from datetime import timedelta 6 | 7 | from classes import IssueWithMetrics 8 | from json_writer import write_to_json 9 | 10 | 11 | class TestWriteToJson(unittest.TestCase): 12 | """Tests for the write_to_json function.""" 13 | 14 | # Show differences without omission in assertion 15 | maxDiff = None 16 | 17 | def test_write_to_json(self): 18 | """Test that write_to_json writes the correct JSON file.""" 19 | issues_with_metrics = [ 20 | IssueWithMetrics( 21 | title="Issue 1", 22 | html_url="https://github.com/owner/repo/issues/1", 23 | author="alice", 24 | assignee="charlie", 25 | assignees=["charlie"], 26 | time_to_first_response=timedelta(days=3), 27 | time_to_close=timedelta(days=6), 28 | time_to_answer=None, 29 | time_in_draft=timedelta(days=1), 30 | labels_metrics={ 31 | "bug": timedelta(days=1, hours=16, minutes=24, seconds=12) 32 | }, 33 | created_at=timedelta(days=-5), 34 | ), 35 | IssueWithMetrics( 36 | title="Issue 2", 37 | html_url="https://github.com/owner/repo/issues/2", 38 | author="bob", 39 | assignee=None, 40 | assignees=[], 41 | time_to_first_response=timedelta(days=2), 42 | time_to_close=timedelta(days=4), 43 | time_to_answer=timedelta(days=1), 44 | labels_metrics={}, 45 | created_at=timedelta(days=-5), 46 | ), 47 | ] 48 | 49 | stats_time_to_first_response = { 50 | "avg": timedelta(days=2.5), 51 | "med": timedelta(days=2.5), 52 | "90p": timedelta(days=1.5), 53 | } 54 | stats_time_to_close = { 55 | "avg": timedelta(days=5), 56 | "med": timedelta(days=4), 57 | "90p": timedelta(days=3), 58 | } 59 | stats_time_to_answer = { 60 | "avg": timedelta(days=1), 61 | "med": timedelta(days=2), 62 | "90p": timedelta(days=3), 63 | } 64 | stats_time_in_draft = { 65 | "avg": timedelta(days=1), 66 | "med": timedelta(days=1), 67 | "90p": timedelta(days=1), 68 | } 69 | stats_time_in_labels = { 70 | "avg": {"bug": timedelta(days=1, hours=16, minutes=24, seconds=12)}, 71 | "med": {"bug": timedelta(days=1, hours=16, minutes=24, seconds=12)}, 72 | "90p": {"bug": timedelta(days=1, hours=16, minutes=24, seconds=12)}, 73 | } 74 | num_issues_opened = 2 75 | num_issues_closed = 1 76 | num_mentor_count = 5 77 | 78 | expected_output = { 79 | "average_time_to_first_response": "2 days, 12:00:00", 80 | "average_time_to_close": "5 days, 0:00:00", 81 | "average_time_to_answer": "1 day, 0:00:00", 82 | "average_time_in_draft": "1 day, 0:00:00", 83 | "average_time_in_labels": {"bug": "1 day, 16:24:12"}, 84 | "median_time_to_first_response": "2 days, 12:00:00", 85 | "median_time_to_close": "4 days, 0:00:00", 86 | "median_time_to_answer": "2 days, 0:00:00", 87 | "median_time_in_draft": "1 day, 0:00:00", 88 | "median_time_in_labels": {"bug": "1 day, 16:24:12"}, 89 | "90_percentile_time_to_first_response": "1 day, 12:00:00", 90 | "90_percentile_time_to_close": "3 days, 0:00:00", 91 | "90_percentile_time_to_answer": "3 days, 0:00:00", 92 | "90_percentile_time_in_draft": "1 day, 0:00:00", 93 | "90_percentile_time_in_labels": {"bug": "1 day, 16:24:12"}, 94 | "average_pr_comments": None, 95 | "median_pr_comments": None, 96 | "90_percentile_pr_comments": None, 97 | "num_items_opened": 2, 98 | "num_items_closed": 1, 99 | "num_mentor_count": 5, 100 | "total_item_count": 2, 101 | "issues": [ 102 | { 103 | "title": "Issue 1", 104 | "html_url": "https://github.com/owner/repo/issues/1", 105 | "author": "alice", 106 | "assignee": "charlie", 107 | "assignees": ["charlie"], 108 | "time_to_first_response": "3 days, 0:00:00", 109 | "time_to_close": "6 days, 0:00:00", 110 | "time_to_answer": "None", 111 | "time_in_draft": "1 day, 0:00:00", 112 | "label_metrics": {"bug": "1 day, 16:24:12"}, 113 | "pr_comment_count": None, 114 | "created_at": "-5 days, 0:00:00", 115 | }, 116 | { 117 | "title": "Issue 2", 118 | "html_url": "https://github.com/owner/repo/issues/2", 119 | "author": "bob", 120 | "assignee": None, 121 | "assignees": [], 122 | "time_to_first_response": "2 days, 0:00:00", 123 | "time_to_close": "4 days, 0:00:00", 124 | "time_to_answer": "1 day, 0:00:00", 125 | "time_in_draft": "None", 126 | "label_metrics": {}, 127 | "pr_comment_count": None, 128 | "created_at": "-5 days, 0:00:00", 129 | }, 130 | ], 131 | "search_query": "is:issue repo:owner/repo", 132 | } 133 | 134 | # Call the function and check the output 135 | self.assertEqual( 136 | write_to_json( 137 | issues_with_metrics=issues_with_metrics, 138 | stats_time_to_first_response=stats_time_to_first_response, 139 | stats_time_to_close=stats_time_to_close, 140 | stats_time_to_answer=stats_time_to_answer, 141 | stats_time_in_draft=stats_time_in_draft, 142 | stats_time_in_labels=stats_time_in_labels, 143 | stats_pr_comments=None, 144 | num_issues_opened=num_issues_opened, 145 | num_issues_closed=num_issues_closed, 146 | num_mentor_count=num_mentor_count, 147 | search_query="is:issue repo:owner/repo", 148 | output_file="issue_metrics.json", 149 | ), 150 | json.dumps(expected_output), 151 | ) 152 | 153 | def test_write_to_json_with_no_response(self): 154 | """Test where there is no answer to a issue.""" 155 | issues_with_metrics = [ 156 | IssueWithMetrics( 157 | title="Issue 1", 158 | html_url="https://github.com/owner/repo/issues/1", 159 | author="alice", 160 | assignee=None, 161 | assignees=[], 162 | time_to_first_response=None, 163 | time_to_close=None, 164 | time_to_answer=None, 165 | labels_metrics={}, 166 | created_at=None, 167 | ), 168 | IssueWithMetrics( 169 | title="Issue 2", 170 | html_url="https://github.com/owner/repo/issues/2", 171 | author="bob", 172 | assignee=None, 173 | assignees=[], 174 | time_to_first_response=None, 175 | time_to_close=None, 176 | time_to_answer=None, 177 | labels_metrics={}, 178 | created_at=None, 179 | ), 180 | ] 181 | 182 | stats_time_to_first_response = None 183 | stats_time_to_close = None 184 | stats_time_to_answer = None 185 | stats_time_in_labels = { 186 | "avg": {}, 187 | "med": {}, 188 | "90p": {}, 189 | } 190 | stats_time_in_draft = None 191 | num_issues_opened = 2 192 | num_issues_closed = 0 193 | num_mentor_count = 5 194 | 195 | expected_output = { 196 | "average_time_to_first_response": "None", 197 | "average_time_to_close": "None", 198 | "average_time_to_answer": "None", 199 | "average_time_in_draft": "None", 200 | "average_time_in_labels": {}, 201 | "median_time_to_first_response": "None", 202 | "median_time_to_close": "None", 203 | "median_time_to_answer": "None", 204 | "median_time_in_draft": "None", 205 | "median_time_in_labels": {}, 206 | "90_percentile_time_to_first_response": "None", 207 | "90_percentile_time_to_close": "None", 208 | "90_percentile_time_to_answer": "None", 209 | "90_percentile_time_in_draft": "None", 210 | "90_percentile_time_in_labels": {}, 211 | "average_pr_comments": None, 212 | "median_pr_comments": None, 213 | "90_percentile_pr_comments": None, 214 | "num_items_opened": 2, 215 | "num_items_closed": 0, 216 | "num_mentor_count": 5, 217 | "total_item_count": 2, 218 | "issues": [ 219 | { 220 | "title": "Issue 1", 221 | "html_url": "https://github.com/owner/repo/issues/1", 222 | "author": "alice", 223 | "assignee": None, 224 | "assignees": [], 225 | "time_to_first_response": "None", 226 | "time_to_close": "None", 227 | "time_to_answer": "None", 228 | "time_in_draft": "None", 229 | "label_metrics": {}, 230 | "pr_comment_count": None, 231 | "created_at": "None", 232 | }, 233 | { 234 | "title": "Issue 2", 235 | "html_url": "https://github.com/owner/repo/issues/2", 236 | "author": "bob", 237 | "assignee": None, 238 | "assignees": [], 239 | "time_to_first_response": "None", 240 | "time_to_close": "None", 241 | "time_to_answer": "None", 242 | "time_in_draft": "None", 243 | "label_metrics": {}, 244 | "pr_comment_count": None, 245 | "created_at": "None", 246 | }, 247 | ], 248 | "search_query": "is:issue repo:owner/repo", 249 | } 250 | 251 | # Call the function and check the output 252 | self.assertEqual( 253 | write_to_json( 254 | issues_with_metrics=issues_with_metrics, 255 | stats_time_to_first_response=stats_time_to_first_response, 256 | stats_time_to_close=stats_time_to_close, 257 | stats_time_to_answer=stats_time_to_answer, 258 | stats_time_in_draft=stats_time_in_draft, 259 | stats_time_in_labels=stats_time_in_labels, 260 | stats_pr_comments=None, 261 | num_issues_opened=num_issues_opened, 262 | num_issues_closed=num_issues_closed, 263 | num_mentor_count=num_mentor_count, 264 | search_query="is:issue repo:owner/repo", 265 | output_file="issue_metrics.json", 266 | ), 267 | json.dumps(expected_output), 268 | ) 269 | 270 | 271 | if __name__ == "__main__": 272 | unittest.main() 273 | -------------------------------------------------------------------------------- /test_time_in_draft.py: -------------------------------------------------------------------------------- 1 | """A test suite for the measure_time_in_draft function.""" 2 | 3 | import unittest 4 | from datetime import datetime, timedelta 5 | from unittest.mock import MagicMock 6 | 7 | import github3 8 | import pytz 9 | from time_in_draft import get_stats_time_in_draft, measure_time_in_draft 10 | 11 | 12 | class TestMeasureTimeInDraft(unittest.TestCase): 13 | """ 14 | Unit tests for the measure_time_in_draft function. 15 | """ 16 | 17 | def setUp(self): 18 | """ 19 | Setup common test data and mocks. 20 | """ 21 | self.issue = MagicMock() 22 | self.issue.issue = MagicMock(spec=github3.issues.Issue) 23 | self.issue.issue.state = "open" 24 | 25 | def test_time_in_draft_with_ready_for_review(self): 26 | """ 27 | Test measure_time_in_draft with one draft and review interval. 28 | """ 29 | self.issue.issue.events.return_value = [ 30 | MagicMock( 31 | event="convert_to_draft", 32 | created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), 33 | ), 34 | MagicMock( 35 | event="ready_for_review", 36 | created_at=datetime(2021, 1, 3, tzinfo=pytz.utc), 37 | ), 38 | ] 39 | result = measure_time_in_draft(self.issue) 40 | expected = timedelta(days=2) 41 | self.assertEqual(result, expected, "The time in draft should be 2 days.") 42 | 43 | def test_time_in_draft_without_ready_for_review(self): 44 | """ 45 | Test measure_time_in_draft when ready_for_review_at is not provided and issue is still open. 46 | """ 47 | self.issue.issue.events.return_value = [ 48 | MagicMock( 49 | event="convert_to_draft", 50 | created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), 51 | ), 52 | ] 53 | now = datetime(2021, 1, 4, tzinfo=pytz.utc) 54 | with unittest.mock.patch("time_in_draft.datetime") as mock_datetime: 55 | mock_datetime.now.return_value = now 56 | result = measure_time_in_draft(self.issue) 57 | expected = timedelta(days=3) 58 | self.assertEqual(result, expected, "The time in draft should be 3 days.") 59 | 60 | def test_time_in_draft_multiple_intervals(self): 61 | """ 62 | Test measure_time_in_draft with multiple draft intervals. 63 | """ 64 | self.issue.issue.events.return_value = [ 65 | MagicMock( 66 | event="convert_to_draft", 67 | created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), 68 | ), 69 | MagicMock( 70 | event="ready_for_review", 71 | created_at=datetime(2021, 1, 3, tzinfo=pytz.utc), 72 | ), 73 | MagicMock( 74 | event="convert_to_draft", 75 | created_at=datetime(2021, 1, 5, tzinfo=pytz.utc), 76 | ), 77 | MagicMock( 78 | event="ready_for_review", 79 | created_at=datetime(2021, 1, 7, tzinfo=pytz.utc), 80 | ), 81 | ] 82 | result = measure_time_in_draft(self.issue) 83 | expected = timedelta(days=4) 84 | self.assertEqual(result, expected, "The total time in draft should be 4 days.") 85 | 86 | def test_time_in_draft_ongoing_draft(self): 87 | """ 88 | Test measure_time_in_draft with an ongoing draft interval. 89 | """ 90 | self.issue.issue.events.return_value = [ 91 | MagicMock( 92 | event="convert_to_draft", 93 | created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), 94 | ), 95 | ] 96 | with unittest.mock.patch("time_in_draft.datetime") as mock_datetime: 97 | mock_datetime.now.return_value = datetime(2021, 1, 4, tzinfo=pytz.utc) 98 | result = measure_time_in_draft(self.issue) 99 | expected = timedelta(days=3) 100 | self.assertEqual( 101 | result, expected, "The ongoing draft time should be 3 days." 102 | ) 103 | 104 | def test_time_in_draft_no_draft_events(self): 105 | """ 106 | Test measure_time_in_draft with no draft-related events. 107 | """ 108 | self.issue.issue.events.return_value = [] 109 | result = measure_time_in_draft(self.issue) 110 | self.assertIsNone( 111 | result, "The result should be None when there are no draft events." 112 | ) 113 | 114 | def test_time_in_draft_without_ready_for_review_and_closed(self): 115 | """ 116 | Test measure_time_in_draft for a closed issue with an ongoing draft and ready_for_review_at is not provided. 117 | """ 118 | self.issue.issue.events.return_value = [ 119 | MagicMock( 120 | event="convert_to_draft", 121 | created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), 122 | ), 123 | ] 124 | self.issue.issue.state = "closed" 125 | result = measure_time_in_draft(self.issue) 126 | self.assertIsNone( 127 | result, 128 | "The result should be None for a closed issue with an ongoing draft.", 129 | ) 130 | 131 | def test_time_in_draft_initially_created_as_draft(self): 132 | """ 133 | Test measure_time_in_draft with a PR initially created as draft. 134 | """ 135 | # Set up issue created_at time 136 | self.issue.issue.created_at = datetime(2021, 1, 1, tzinfo=pytz.utc) 137 | 138 | # Mock events with only ready_for_review (no convert_to_draft) 139 | self.issue.issue.events.return_value = [ 140 | MagicMock( 141 | event="ready_for_review", 142 | created_at=datetime(2021, 1, 3, tzinfo=pytz.utc), 143 | ), 144 | ] 145 | 146 | # Mock pull request object 147 | mock_pull_request = MagicMock() 148 | 149 | result = measure_time_in_draft(self.issue, mock_pull_request) 150 | expected = timedelta(days=2) 151 | self.assertEqual( 152 | result, 153 | expected, 154 | "The time in draft should be 2 days for initially draft PR.", 155 | ) 156 | 157 | def test_time_in_draft_initially_created_as_draft_still_open(self): 158 | """ 159 | Test measure_time_in_draft with a PR initially created as draft and still in draft. 160 | """ 161 | # Set up issue created_at time 162 | self.issue.issue.created_at = datetime(2021, 1, 1, tzinfo=pytz.utc) 163 | 164 | # Mock events with no ready_for_review events (still draft) 165 | self.issue.issue.events.return_value = [] 166 | 167 | # Mock pull request object indicating it's currently draft 168 | mock_pull_request = MagicMock() 169 | mock_pull_request.draft = True 170 | 171 | with unittest.mock.patch("time_in_draft.datetime") as mock_datetime: 172 | # Keep the real datetime class but only mock the now() method 173 | mock_datetime.fromisoformat = datetime.fromisoformat 174 | mock_datetime.now.return_value = datetime(2021, 1, 4, tzinfo=pytz.utc) 175 | result = measure_time_in_draft(self.issue, mock_pull_request) 176 | expected = timedelta(days=3) 177 | self.assertEqual( 178 | result, 179 | expected, 180 | "The time in draft should be 3 days for initially draft PR still in draft.", 181 | ) 182 | 183 | def test_time_in_draft_with_attribute_error_scenario(self): 184 | """ 185 | Test measure_time_in_draft to ensure it doesn't raise AttributeError when called 186 | with issue structure similar to what get_per_issue_metrics passes. 187 | This test reproduces the original bug scenario. 188 | """ 189 | # This simulates the actual issue structure passed from get_per_issue_metrics 190 | issue_search_result = MagicMock() 191 | issue_search_result.issue = MagicMock(spec=github3.issues.Issue) 192 | issue_search_result.issue.state = "open" 193 | issue_search_result.issue.events.return_value = [ 194 | MagicMock( 195 | event="convert_to_draft", 196 | created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), 197 | ), 198 | ] 199 | 200 | # This should NOT raise AttributeError: events 201 | with unittest.mock.patch("time_in_draft.datetime") as mock_datetime: 202 | mock_datetime.now.return_value = datetime(2021, 1, 4, tzinfo=pytz.utc) 203 | result = measure_time_in_draft(issue_search_result) 204 | expected = timedelta(days=3) 205 | self.assertEqual(result, expected, "The time in draft should be 3 days.") 206 | 207 | def test_time_in_draft_with_iterator_events(self): 208 | """ 209 | Test measure_time_in_draft with events() returning an iterator instead of a list. 210 | This test ensures the function works correctly when events() returns an iterator 211 | (as it does in the real GitHub API), which can only be consumed once. 212 | """ 213 | # Set up issue created_at time 214 | self.issue.issue.created_at = datetime(2021, 1, 1, tzinfo=pytz.utc) 215 | 216 | # Create an iterator of events (simulating real GitHub API behavior) 217 | def events_iterator(): 218 | return iter( 219 | [ 220 | MagicMock( 221 | event="convert_to_draft", 222 | created_at=datetime(2021, 1, 1, tzinfo=pytz.utc), 223 | ), 224 | MagicMock( 225 | event="ready_for_review", 226 | created_at=datetime(2021, 1, 3, tzinfo=pytz.utc), 227 | ), 228 | ] 229 | ) 230 | 231 | self.issue.issue.events = events_iterator 232 | 233 | result = measure_time_in_draft(self.issue) 234 | expected = timedelta(days=2) 235 | self.assertEqual( 236 | result, 237 | expected, 238 | "The time in draft should be 2 days when events() returns an iterator.", 239 | ) 240 | 241 | 242 | class TestGetStatsTimeInDraft(unittest.TestCase): 243 | """ 244 | Unit tests for the get_stats_time_in_draft function. 245 | """ 246 | 247 | def test_get_stats_time_in_draft_with_data(self): 248 | """ 249 | Test get_stats_time_in_draft with valid draft times. 250 | """ 251 | issues = [ 252 | MagicMock(time_in_draft=timedelta(days=1)), 253 | MagicMock(time_in_draft=timedelta(days=2)), 254 | MagicMock(time_in_draft=timedelta(days=3)), 255 | ] 256 | 257 | result = get_stats_time_in_draft(issues) 258 | expected = { 259 | "avg": timedelta(days=2), 260 | "med": timedelta(days=2), 261 | "90p": timedelta(days=2, seconds=69120), 262 | } 263 | 264 | self.assertEqual( 265 | result, expected, "The statistics for time in draft are incorrect." 266 | ) 267 | 268 | def test_get_stats_time_in_draft_no_data(self): 269 | """ 270 | Test get_stats_time_in_draft with no draft times. 271 | """ 272 | issues = [ 273 | MagicMock(time_in_draft=None), 274 | MagicMock(time_in_draft=None), 275 | ] 276 | 277 | result = get_stats_time_in_draft(issues) 278 | self.assertIsNone( 279 | result, "The result should be None when there are no draft times." 280 | ) 281 | 282 | def test_get_stats_time_in_draft_empty_list(self): 283 | """ 284 | Test get_stats_time_in_draft with an empty list of issues. 285 | """ 286 | issues = [] 287 | 288 | result = get_stats_time_in_draft(issues) 289 | self.assertIsNone( 290 | result, "The result should be None when the list of issues is empty." 291 | ) 292 | 293 | 294 | if __name__ == "__main__": 295 | unittest.main() 296 | --------------------------------------------------------------------------------