├── .github ├── CODEOWNERS ├── linters │ ├── .isort.cfg │ ├── trivy.yaml │ ├── .shellcheckrc │ ├── .jscpd.json │ ├── .flake8 │ ├── .mypy.ini │ ├── .textlintrc │ ├── zizmor.yaml │ ├── .markdown-lint.yml │ ├── .yaml-lint.yml │ └── .python-lint ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature_request.yml │ └── bug_report.yml ├── workflows │ ├── docker-ci.yml │ ├── pr-title.yml │ ├── auto-labeler.yml │ ├── stale.yaml │ ├── python-ci.yml │ ├── super-linter.yaml │ ├── contributors_report.yaml │ ├── scorecard.yml │ ├── copilot-setup-steps.yml │ └── release.yml ├── pull_request_template.md ├── dependabot.yml ├── copilot-instructions.md └── release-drafter.yml ├── .coveragerc ├── requirements.txt ├── requirements-test.txt ├── .vscode └── settings.json ├── action.yml ├── .dockerignore ├── .env-example ├── Makefile ├── Dockerfile ├── LICENSE ├── exceptions.py ├── test_env_get_bool.py ├── .gitignore ├── auth.py ├── test_auth.py ├── CONTRIBUTING.md ├── test_exceptions.py ├── dependabot_file.py ├── env.py ├── evergreen.py ├── test_dependabot_file.py └── test_evergreen.py /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @github/ospo-github-actions 2 | -------------------------------------------------------------------------------- /.github/linters/.isort.cfg: -------------------------------------------------------------------------------- 1 | [isort] 2 | profile = black 3 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | # omit test files 4 | test_*.py -------------------------------------------------------------------------------- /.github/linters/trivy.yaml: -------------------------------------------------------------------------------- 1 | scan: 2 | skip-dirs: 3 | - .mypy_cache 4 | -------------------------------------------------------------------------------- /.github/linters/.shellcheckrc: -------------------------------------------------------------------------------- 1 | # Don't suggest [ -n "$VAR" ] over [ ! -z "$VAR" ] 2 | disable=SC2129 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | github3.py==4.0.1 2 | requests==2.32.5 3 | python-dotenv==1.2.1 4 | ruamel.yaml==0.18.16 5 | -------------------------------------------------------------------------------- /.github/linters/.jscpd.json: -------------------------------------------------------------------------------- 1 | { 2 | "threshold": 25, 3 | "ignore": ["/github/workspace/test*"], 4 | "absolute": true 5 | } 6 | -------------------------------------------------------------------------------- /.github/linters/.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 150 3 | exclude = venv,.venv,.git,__pycache__ 4 | extend-ignore = C901 5 | statistics = True 6 | -------------------------------------------------------------------------------- /.github/linters/.mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | disable_error_code = attr-defined, import-not-found 3 | 4 | [mypy-github3.*] 5 | ignore_missing_imports = True 6 | -------------------------------------------------------------------------------- /.github/linters/.textlintrc: -------------------------------------------------------------------------------- 1 | { 2 | "filters": { 3 | "comments": true 4 | }, 5 | "rules": { 6 | "terminology": { 7 | "severity": "warning" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /.github/linters/zizmor.yaml: -------------------------------------------------------------------------------- 1 | rules: 2 | dangerous-triggers: # to allow pull_request_target for auto-labelling fork pull requests 3 | ignore: 4 | - auto-labeler.yml 5 | - pr-title.yml 6 | - release.yml 7 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | black==25.12.0 2 | flake8==7.3.0 3 | mypy==1.19.1 4 | mypy-extensions==1.1.0 5 | pylint==4.0.4 6 | pytest==9.0.2 7 | pytest-cov==7.0.0 8 | types-PyYAML==6.0.12.20250915 9 | types-requests==2.32.4.20250913 10 | -------------------------------------------------------------------------------- /.github/linters/.markdown-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # line length 3 | MD013: false 4 | # singe h1 5 | MD025: false 6 | # duplicate headers 7 | MD024: false 8 | # MD033/no-inline-html - Inline HTML 9 | MD033: 10 | # Allowed elements 11 | allowed_elements: [br, li, ul] 12 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.testing.pytestArgs": ["."], 3 | "python.testing.unittestEnabled": false, 4 | "python.testing.pytestEnabled": true, 5 | "[python]": { 6 | "editor.defaultFormatter": "ms-python.black-formatter" 7 | }, 8 | "python.formatting.provider": "none" 9 | } 10 | -------------------------------------------------------------------------------- /action.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Evergreen action" 3 | author: "github" 4 | description: "A GitHub Action to request dependabot enablement on eligible repositories in an organization." 5 | runs: 6 | using: "docker" 7 | image: "docker://ghcr.io/github/evergreen:v1" 8 | branding: 9 | icon: "file-plus" 10 | color: "green" 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | 3 | contact_links: 4 | - name: Ask a question 5 | url: https://github.com/github/evergreen/discussions/new 6 | about: Ask a question or start a discussion 7 | - name: GitHub OSPO GitHub Action Overall Issue 8 | url: https://github.com/github/github-ospo/issues/new 9 | about: File issue for multiple GitHub OSPO GitHub Actions 10 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Application specific files 2 | test_*.py 3 | 4 | # Python 5 | *.pyc 6 | __pycache__/ 7 | *.pyo 8 | *.pyd 9 | 10 | # Common 11 | *.md 12 | docker-compose.yml 13 | Dockerfile* 14 | .env* 15 | Makefile 16 | 17 | # Logs 18 | logs 19 | *.log 20 | 21 | # IDEs 22 | .vscode/ 23 | .idea/ 24 | 25 | # Dependency directories 26 | node_modules/ 27 | .venv/ 28 | 29 | ## Cache directories 30 | .parcel-cache 31 | 32 | # git 33 | .git 34 | .gitattributes 35 | .gitignore 36 | .github/ 37 | -------------------------------------------------------------------------------- /.github/workflows/docker-ci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Docker Image CI 3 | 4 | on: 5 | push: 6 | branches: [main] 7 | pull_request: 8 | branches: [main] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v6.0.1 18 | with: 19 | persist-credentials: false 20 | - name: Build the Docker image 21 | run: docker build . --file Dockerfile --platform linux/amd64 22 | -------------------------------------------------------------------------------- /.github/workflows/pr-title.yml: -------------------------------------------------------------------------------- 1 | ## Reference: https://github.com/amannn/action-semantic-pull-request 2 | --- 3 | name: "Lint PR Title" 4 | on: 5 | pull_request_target: 6 | types: [opened, reopened, edited, synchronize] 7 | permissions: 8 | contents: read 9 | jobs: 10 | main: 11 | permissions: 12 | contents: read 13 | pull-requests: read 14 | statuses: write 15 | uses: github/ospo-reusable-workflows/.github/workflows/pr-title.yaml@26eec20abba5ae806698592c79628f6906da372c 16 | secrets: 17 | github-token: ${{ secrets.GITHUB_TOKEN }} 18 | -------------------------------------------------------------------------------- /.env-example: -------------------------------------------------------------------------------- 1 | BATCH_SIZE = "" 2 | BODY = "" 3 | COMMIT_MESSAGE = "" 4 | CREATED_AFTER_DATE = "" 5 | DEPENDABOT_CONFIG_FILE = "" 6 | DRY_RUN = "" 7 | ENABLE_SECURITY_UPDATES = "" 8 | EXEMPT_ECOSYSTEMS = "" 9 | EXEMPT_REPOS = "" 10 | FILTER_VISIBILITY = "" 11 | GH_TOKEN = "" 12 | GROUP_DEPENDENCIES = "" 13 | ORGANIZATION = "" 14 | PROJECT_ID = "" 15 | REPO_SPECIFIC_EXEMPTIONS = "" 16 | REPOSITORY = "" 17 | TITLE = "" 18 | TYPE = "" 19 | UPDATE_EXISTING = "" 20 | 21 | # GITHUB APP 22 | GH_APP_ID = "" 23 | GH_INSTALLATION_ID = "" 24 | GH_PRIVATE_KEY = "" 25 | GITHUB_APP_ENTERPRISE_ONLY = "" 26 | -------------------------------------------------------------------------------- /.github/workflows/auto-labeler.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Auto Labeler 3 | on: 4 | # pull_request_target event is required for autolabeler to support all PRs including forks 5 | pull_request_target: 6 | types: [opened, reopened, edited, synchronize] 7 | permissions: 8 | contents: read 9 | jobs: 10 | main: 11 | permissions: 12 | contents: read 13 | pull-requests: write 14 | uses: github/ospo-reusable-workflows/.github/workflows/auto-labeler.yaml@26eec20abba5ae806698592c79628f6906da372c 15 | with: 16 | config-name: release-drafter.yml 17 | secrets: 18 | github-token: ${{ secrets.GITHUB_TOKEN }} 19 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: test 2 | test: 3 | pytest -v --cov=. --cov-config=.coveragerc --cov-fail-under=80 --cov-report term-missing 4 | 5 | .PHONY: clean 6 | clean: 7 | rm -rf .pytest_cache .coverage __pycache__ 8 | 9 | .PHONY: lint 10 | lint: 11 | # stop the build if there are Python syntax errors or undefined names 12 | flake8 . --config=.github/linters/.flake8 --count --select=E9,F63,F7,F82 --show-source 13 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 14 | flake8 . --config=.github/linters/.flake8 --count --exit-zero --max-complexity=15 --max-line-length=150 15 | isort --settings-file=.github/linters/.isort.cfg . 16 | pylint --rcfile=.github/linters/.python-lint --fail-under=9.0 *.py 17 | mypy --config-file=.github/linters/.mypy.ini *.py 18 | black . 19 | -------------------------------------------------------------------------------- /.github/workflows/stale.yaml: -------------------------------------------------------------------------------- 1 | name: "Close stale issues" 2 | on: 3 | schedule: 4 | - cron: "30 1 * * *" 5 | 6 | permissions: 7 | contents: read 8 | 9 | jobs: 10 | stale: 11 | runs-on: ubuntu-latest 12 | permissions: 13 | issues: write 14 | pull-requests: read 15 | steps: 16 | - uses: actions/stale@v10.1.1 17 | with: 18 | stale-issue-message: "This issue is stale because it has been open 21 days with no activity. Remove stale label or comment or this will be closed in 14 days." 19 | close-issue-message: "This issue was closed because it has been stalled for 35 days with no activity." 20 | days-before-stale: 21 21 | days-before-close: 14 22 | days-before-pr-close: -1 23 | exempt-issue-labels: keep 24 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | #checkov:skip=CKV_DOCKER_2 2 | #checkov:skip=CKV_DOCKER_3 3 | #trivy:ignore:AVD-DS-0002 4 | FROM python:3.14.0-slim@sha256:0aecac02dc3d4c5dbb024b753af084cafe41f5416e02193f1ce345d671ec966e 5 | LABEL org.opencontainers.image.source https://github.com/github/evergreen 6 | 7 | WORKDIR /action/workspace 8 | COPY requirements.txt *.py /action/workspace/ 9 | 10 | RUN python3 -m pip install --no-cache-dir -r requirements.txt \ 11 | && apt-get -y update \ 12 | && apt-get -y install --no-install-recommends git=1:2.47.3-0+deb13u1 \ 13 | && rm -rf /var/lib/apt/lists/* 14 | 15 | # Add a simple healthcheck to satisfy container scanners 16 | HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \ 17 | CMD python3 -c "import os,sys; sys.exit(0 if os.path.exists('/action/workspace/evergreen.py') else 1)" 18 | 19 | CMD ["/action/workspace/evergreen.py"] 20 | ENTRYPOINT ["python3", "-u"] 21 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Pull Request 2 | 3 | 11 | 12 | ## Proposed Changes 13 | 14 | 15 | 16 | ## Readiness Checklist 17 | 18 | ### Author/Contributor 19 | 20 | - [ ] If documentation is needed for this change, has that been included in this pull request 21 | - [ ] run `make lint` and fix any issues that you have introduced 22 | - [ ] run `make test` and ensure you have test coverage for the lines you are introducing 23 | - [ ] If publishing new data to the public (scorecards, security scan results, code quality results, live dashboards, etc.), please request review from `@jeffrey-luszcz` 24 | 25 | ### Reviewer 26 | 27 | - [ ] Label as either `fix`, `documentation`, `enhancement`, `infrastructure`, `maintenance` or `breaking` 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 GitHub 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/workflows/python-ci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 3 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 4 | 5 | name: Python package 6 | 7 | on: 8 | push: 9 | branches: [main] 10 | pull_request: 11 | branches: [main] 12 | 13 | permissions: 14 | contents: read 15 | 16 | jobs: 17 | build: 18 | runs-on: ubuntu-latest 19 | strategy: 20 | matrix: 21 | python-version: [3.11, 3.12] 22 | steps: 23 | - uses: actions/checkout@v6.0.1 24 | with: 25 | persist-credentials: false 26 | - name: Set up Python ${{ matrix.python-version }} 27 | uses: actions/setup-python@v6.1.0 28 | with: 29 | python-version: ${{ matrix.python-version }} 30 | - name: Install dependencies 31 | run: | 32 | pip install -r requirements.txt -r requirements-test.txt 33 | - name: Lint with flake8 and pylint 34 | run: | 35 | make lint 36 | - name: Test with pytest 37 | run: | 38 | make test 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | description: Suggest an idea for this project 4 | labels: 5 | - enhancement 6 | body: 7 | - type: textarea 8 | attributes: 9 | label: Is your feature request related to a problem? 10 | description: A clear and concise description of what the problem is. Please describe. 11 | placeholder: | 12 | Ex. I'm always frustrated when [...] 13 | validations: 14 | required: false 15 | 16 | - type: textarea 17 | attributes: 18 | label: Describe the solution you'd like 19 | description: A clear and concise description of what you want to happen. 20 | validations: 21 | required: true 22 | 23 | - type: textarea 24 | attributes: 25 | label: Describe alternatives you've considered 26 | description: A clear and concise description of any alternative solutions or features you've considered. 27 | validations: 28 | required: false 29 | 30 | - type: textarea 31 | attributes: 32 | label: Additional context 33 | description: Add any other context or screenshots about the feature request here. 34 | validations: 35 | required: false 36 | -------------------------------------------------------------------------------- /.github/workflows/super-linter.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Lint Code Base 3 | 4 | on: 5 | pull_request: 6 | branches: [main] 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | build: 13 | name: Lint Code Base 14 | runs-on: ubuntu-latest 15 | permissions: 16 | contents: read 17 | packages: read 18 | statuses: write 19 | 20 | steps: 21 | - name: Checkout Code 22 | uses: actions/checkout@v6.0.1 23 | with: 24 | fetch-depth: 0 25 | persist-credentials: false 26 | - name: Setup Python 27 | uses: actions/setup-python@v6.1.0 28 | with: 29 | python-version: "3.12" 30 | - name: Install dependencies 31 | run: | 32 | pip install -r requirements.txt -r requirements-test.txt 33 | - name: Lint Code Base 34 | uses: super-linter/super-linter@502f4fe48a81a392756e173e39a861f8c8efe056 # v8.3.0 35 | env: 36 | DEFAULT_BRANCH: main 37 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 38 | GITHUB_ACTIONS_COMMAND_ARGS: -shellcheck= 39 | FIX_MARKDOWN_PRETTIER: true 40 | VALIDATE_BIOME_FORMAT: false 41 | VALIDATE_BIOME_LINT: false 42 | VALIDATE_PYTHON_RUFF_FORMAT: false 43 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: "pip" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | cooldown: 9 | default-days: 7 10 | commit-message: 11 | prefix: "chore(deps)" 12 | labels: ["python", "dependencies"] 13 | groups: 14 | dependencies: 15 | applies-to: version-updates 16 | update-types: 17 | - "minor" 18 | - "patch" 19 | - package-ecosystem: "github-actions" 20 | directory: "/" 21 | schedule: 22 | interval: "weekly" 23 | cooldown: 24 | default-days: 7 25 | commit-message: 26 | prefix: "chore(deps)" 27 | labels: ["github_actions", "dependencies"] 28 | groups: 29 | dependencies: 30 | applies-to: version-updates 31 | update-types: 32 | - "minor" 33 | - "patch" 34 | - package-ecosystem: "docker" 35 | directory: "/" 36 | schedule: 37 | interval: "weekly" 38 | cooldown: 39 | default-days: 7 40 | commit-message: 41 | prefix: "chore(deps)" 42 | labels: ["docker", "dependencies"] 43 | groups: 44 | dependencies: 45 | applies-to: version-updates 46 | update-types: 47 | - "minor" 48 | - "patch" 49 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | description: Create a report to help us improve 4 | labels: 5 | - bug 6 | body: 7 | - type: textarea 8 | attributes: 9 | label: Describe the bug 10 | description: A clear and concise description of what the bug is. 11 | validations: 12 | required: true 13 | 14 | - type: textarea 15 | attributes: 16 | label: To Reproduce 17 | description: Steps to reproduce the behavior 18 | placeholder: | 19 | 1. Go to '...' 20 | 2. Click on '....' 21 | 3. Scroll down to '....' 22 | 4. See error 23 | validations: 24 | required: true 25 | 26 | - type: textarea 27 | attributes: 28 | label: Expected behavior 29 | description: A clear and concise description of what you expected to happen. 30 | validations: 31 | required: true 32 | 33 | - type: textarea 34 | attributes: 35 | label: Screenshots 36 | description: If applicable, add screenshots to help explain your problem. 37 | validations: 38 | required: false 39 | 40 | - type: textarea 41 | attributes: 42 | label: Additional context 43 | description: Add any other context about the problem here. 44 | validations: 45 | required: false 46 | -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | # Copilot Instructions 2 | 3 | This is a GitHub Action that given an organization, team, or specified repositories, opens an issue/PR if dependabot is not enabled, or there are more package ecosystems that could be added. It also enables [automated security updates](https://docs.github.com/en/code-security/dependabot/dependabot-security-updates/configuring-dependabot-security-updates#managing-dependabot-security-updates-for-your-repositories) for the repository. Please follow these guidelines when contributing: 4 | 5 | ## Code Standards 6 | 7 | ### Required Before Each Commit 8 | 9 | - Run `make lint` before committing any changes to ensure proper code linting and formatting. 10 | 11 | ### Development Flow 12 | 13 | - Lint: `make lint` 14 | - Test: `make test` 15 | 16 | ## Repository Structure 17 | 18 | - `Makefile`: Contains commands for linting, testing, and other tasks 19 | - `requirements.txt`: Python dependencies for the project 20 | - `requirements-test.txt`: Python dependencies for testing 21 | - `README.md`: Project documentation and setup instructions 22 | - `setup.py`: Python package setup configuration 23 | - `test_*.py`: Python test files matching the naming convention for test discovery 24 | 25 | ## Key Guidelines 26 | 27 | 1. Follow Python best practices and idiomatic patterns 28 | 2. Maintain existing code structure and organization 29 | 3. Write unit tests for new functionality. 30 | 4. Document changes to environment variables in the `README.md` file. 31 | -------------------------------------------------------------------------------- /.github/workflows/contributors_report.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Monthly contributor report 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: "3 2 1 * *" 7 | 8 | permissions: 9 | issues: read 10 | 11 | jobs: 12 | contributor_report: 13 | name: contributor report 14 | runs-on: ubuntu-latest 15 | permissions: 16 | issues: write 17 | steps: 18 | - name: Get dates for last month 19 | shell: bash 20 | run: | 21 | # Calculate the first day of the previous month 22 | start_date=$(date -d "last month" +%Y-%m-01) 23 | 24 | # Calculate the last day of the previous month 25 | end_date=$(date -d "$start_date +1 month -1 day" +%Y-%m-%d) 26 | 27 | #Set an environment variable with the date range 28 | echo "START_DATE=$start_date" >> "$GITHUB_ENV" 29 | echo "END_DATE=$end_date" >> "$GITHUB_ENV" 30 | 31 | - name: Run contributor action 32 | uses: github/contributors@0d5adc3833e89ee1f4145744f5d69313cf5ea238 33 | env: 34 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 35 | START_DATE: ${{ env.START_DATE }} 36 | END_DATE: ${{ env.END_DATE }} 37 | REPOSITORY: github/evergreen 38 | SPONSOR_INFO: "true" 39 | 40 | - name: Create issue 41 | uses: peter-evans/create-issue-from-file@fca9117c27cdc29c6c4db3b86c48e4115a786710 42 | with: 43 | title: Monthly contributor report 44 | token: ${{ secrets.GITHUB_TOKEN }} 45 | content-filepath: ./contributors.md 46 | assignees: zkoppert 47 | -------------------------------------------------------------------------------- /.github/workflows/scorecard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Scorecard supply-chain security 3 | on: 4 | workflow_dispatch: 5 | # For Branch-Protection check (for repo branch protection or rules). 6 | # Only the default branch is supported. See 7 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection 8 | branch_protection_rule: 9 | # To guarantee Maintained check is occasionally updated. See 10 | # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained 11 | schedule: 12 | - cron: "29 11 * * 6" 13 | push: 14 | branches: [main] 15 | 16 | permissions: read-all 17 | 18 | jobs: 19 | analysis: 20 | name: Merge to Main Scorecard analysis 21 | runs-on: ubuntu-latest 22 | permissions: 23 | security-events: write 24 | id-token: write 25 | 26 | steps: 27 | - name: "Checkout code" 28 | uses: actions/checkout@v6.0.1 29 | with: 30 | persist-credentials: false 31 | 32 | - name: "Run analysis" 33 | uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a 34 | with: 35 | results_file: results.sarif 36 | results_format: sarif 37 | publish_results: true 38 | - name: "Upload artifact" 39 | uses: actions/upload-artifact@v6.0.0 40 | with: 41 | name: SARIF file 42 | path: results.sarif 43 | retention-days: 5 44 | - name: "Upload to code-scanning" 45 | uses: github/codeql-action/upload-sarif@fe4161a26a8629af62121b670040955b330f9af2 46 | with: 47 | sarif_file: results.sarif 48 | -------------------------------------------------------------------------------- /.github/workflows/copilot-setup-steps.yml: -------------------------------------------------------------------------------- 1 | name: "Copilot Setup Steps" 2 | 3 | # Automatically run the setup steps when they are changed to allow for easy validation, and 4 | # allow manual testing through the repository's "Actions" tab 5 | on: 6 | workflow_dispatch: 7 | push: 8 | paths: 9 | - .github/workflows/copilot-setup-steps.yml 10 | pull_request: 11 | paths: 12 | - .github/workflows/copilot-setup-steps.yml 13 | 14 | # Set the permissions to the lowest permissions possible needed for your steps. 15 | # Copilot will be given its own token for its operations. 16 | permissions: 17 | # If you want to clone the repository as part of your setup steps, for example to install dependencies, you'll need the `contents: read` permission. If you don't clone the repository in your setup steps, Copilot will do this for you automatically after the steps complete. 18 | contents: read 19 | 20 | jobs: 21 | # The job MUST be called `copilot-setup-steps` or it will not be picked up by Copilot. 22 | copilot-setup-steps: 23 | runs-on: ubuntu-latest 24 | 25 | # You can define any steps you want, and they will run before the agent starts. 26 | # If you do not check out your code, Copilot will do this for you. 27 | steps: 28 | - name: Checkout code 29 | uses: actions/checkout@v6.0.1 30 | with: 31 | persist-credentials: false 32 | 33 | - name: Set up Python 34 | uses: actions/setup-python@v6.1.0 35 | with: 36 | python-version: 3.12 37 | 38 | - name: Install dependencies 39 | run: | 40 | pip install -r requirements.txt -r requirements-test.txt 41 | -------------------------------------------------------------------------------- /.github/linters/.yaml-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ########################################### 3 | # These are the rules used for # 4 | # linting all the yaml files in the stack # 5 | # NOTE: # 6 | # You can disable line with: # 7 | # # yamllint disable-line # 8 | ########################################### 9 | rules: 10 | braces: 11 | level: warning 12 | min-spaces-inside: 0 13 | max-spaces-inside: 0 14 | min-spaces-inside-empty: 1 15 | max-spaces-inside-empty: 5 16 | brackets: 17 | level: warning 18 | min-spaces-inside: 0 19 | max-spaces-inside: 0 20 | min-spaces-inside-empty: 1 21 | max-spaces-inside-empty: 5 22 | colons: 23 | level: warning 24 | max-spaces-before: 0 25 | max-spaces-after: 1 26 | commas: 27 | level: warning 28 | max-spaces-before: 0 29 | min-spaces-after: 1 30 | max-spaces-after: 1 31 | comments: disable 32 | comments-indentation: disable 33 | document-end: disable 34 | document-start: 35 | level: warning 36 | present: true 37 | empty-lines: 38 | level: warning 39 | max: 2 40 | max-start: 0 41 | max-end: 0 42 | hyphens: 43 | level: warning 44 | max-spaces-after: 1 45 | indentation: 46 | level: warning 47 | spaces: consistent 48 | indent-sequences: true 49 | check-multi-line-strings: false 50 | key-duplicates: enable 51 | line-length: 52 | level: warning 53 | max: 1024 54 | allow-non-breakable-words: true 55 | allow-non-breakable-inline-mappings: true 56 | new-line-at-end-of-file: disable 57 | new-lines: 58 | type: unix 59 | trailing-spaces: disable 60 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name-template: "v$RESOLVED_VERSION" 3 | tag-template: "v$RESOLVED_VERSION" 4 | template: | 5 | # Changelog 6 | $CHANGES 7 | 8 | See details of [all code changes](https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION) since previous release 9 | 10 | categories: 11 | - title: "🚀 Features" 12 | labels: 13 | - "feature" 14 | - "enhancement" 15 | - title: "🐛 Bug Fixes" 16 | labels: 17 | - "fix" 18 | - "bugfix" 19 | - "bug" 20 | - title: "🧰 Maintenance" 21 | labels: 22 | - "infrastructure" 23 | - "automation" 24 | - "documentation" 25 | - "dependencies" 26 | - "maintenance" 27 | - "revert" 28 | - title: "🏎 Performance" 29 | label: "performance" 30 | change-template: "- $TITLE @$AUTHOR (#$NUMBER)" 31 | version-resolver: 32 | major: 33 | labels: 34 | - "breaking" 35 | - "major" 36 | minor: 37 | labels: 38 | - "enhancement" 39 | - "feature" 40 | - "minor" 41 | patch: 42 | labels: 43 | - "documentation" 44 | - "fix" 45 | - "maintenance" 46 | - "patch" 47 | default: patch 48 | autolabeler: 49 | - label: "automation" 50 | title: 51 | - "/^(build|ci|perf|refactor|test).*/i" 52 | - label: "enhancement" 53 | title: 54 | - "/^(style).*/i" 55 | - label: "documentation" 56 | title: 57 | - "/^(docs).*/i" 58 | - label: "feature" 59 | title: 60 | - "/^(feat).*/i" 61 | - label: "fix" 62 | title: 63 | - "/^(fix).*/i" 64 | - label: "infrastructure" 65 | title: 66 | - "/^(infrastructure).*/i" 67 | - label: "maintenance" 68 | title: 69 | - "/^(chore|maintenance).*/i" 70 | - label: "revert" 71 | title: 72 | - "/^(revert).*/i" 73 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release 3 | on: 4 | workflow_dispatch: 5 | pull_request_target: 6 | types: [closed] 7 | branches: [main] 8 | permissions: 9 | contents: read 10 | jobs: 11 | release: 12 | permissions: 13 | contents: write 14 | pull-requests: read 15 | uses: github/ospo-reusable-workflows/.github/workflows/release.yaml@26eec20abba5ae806698592c79628f6906da372c 16 | with: 17 | publish: true 18 | release-config-name: release-drafter.yml 19 | secrets: 20 | github-token: ${{ secrets.GITHUB_TOKEN }} 21 | release_image: 22 | needs: release 23 | permissions: 24 | contents: read 25 | packages: write 26 | id-token: write 27 | attestations: write 28 | uses: github/ospo-reusable-workflows/.github/workflows/release-image.yaml@26eec20abba5ae806698592c79628f6906da372c 29 | with: 30 | image-name: ${{ github.repository }} 31 | full-tag: ${{ needs.release.outputs.full-tag }} 32 | short-tag: ${{ needs.release.outputs.short-tag }} 33 | secrets: 34 | github-token: ${{ secrets.GITHUB_TOKEN }} 35 | image-registry: ghcr.io 36 | image-registry-username: ${{ github.actor }} 37 | image-registry-password: ${{ secrets.GITHUB_TOKEN }} 38 | release_discussion: 39 | needs: release 40 | permissions: 41 | contents: read 42 | discussions: write 43 | uses: github/ospo-reusable-workflows/.github/workflows/release-discussion.yaml@26eec20abba5ae806698592c79628f6906da372c 44 | with: 45 | full-tag: ${{ needs.release.outputs.full-tag }} 46 | body: ${{ needs.release.outputs.body }} 47 | secrets: 48 | github-token: ${{ secrets.GITHUB_TOKEN }} 49 | discussion-repository-id: ${{ secrets.RELEASE_DISCUSSION_REPOSITORY_ID }} 50 | discussion-category-id: ${{ secrets.RELEASE_DISCUSSION_CATEGORY_ID }} 51 | -------------------------------------------------------------------------------- /exceptions.py: -------------------------------------------------------------------------------- 1 | """Custom exceptions for the evergreen application.""" 2 | 3 | import github3.exceptions 4 | 5 | 6 | class OptionalFileNotFoundError(github3.exceptions.NotFoundError): 7 | """Exception raised when an optional file is not found. 8 | 9 | This exception inherits from github3.exceptions.NotFoundError but provides 10 | a more explicit name for cases where missing files are expected and should 11 | not be treated as errors. This is typically used for optional configuration 12 | files or dependency files that may not exist in all repositories. 13 | 14 | Args: 15 | resp: The HTTP response object from the failed request 16 | """ 17 | 18 | 19 | def check_optional_file(repo, filename): 20 | """ 21 | Example utility function demonstrating OptionalFileNotFoundError usage. 22 | 23 | This function shows how the new exception type can be used to provide 24 | more explicit error handling for optional files that may not exist. 25 | 26 | Args: 27 | repo: GitHub repository object 28 | filename: Name of the optional file to check 29 | 30 | Returns: 31 | File contents object if file exists, None if optional file is missing 32 | 33 | Raises: 34 | OptionalFileNotFoundError: When the file is not found (expected for optional files) 35 | Other exceptions: For unexpected errors (permissions, network issues, etc.) 36 | """ 37 | try: 38 | file_contents = repo.file_contents(filename) 39 | # Handle both real file contents objects and test mocks that return boolean 40 | if hasattr(file_contents, "size"): 41 | # Real file contents object 42 | if file_contents.size > 0: 43 | return file_contents 44 | return None 45 | # Test mock or other truthy value 46 | return file_contents if file_contents else None 47 | except github3.exceptions.NotFoundError as e: 48 | # Re-raise as our more specific exception type for better semantic clarity 49 | raise OptionalFileNotFoundError(resp=e.response) from e 50 | -------------------------------------------------------------------------------- /test_env_get_bool.py: -------------------------------------------------------------------------------- 1 | """Test the get_bool_env_var function""" 2 | 3 | import os 4 | import unittest 5 | from unittest.mock import patch 6 | 7 | from env import get_bool_env_var 8 | 9 | 10 | class TestEnv(unittest.TestCase): 11 | """Test the get_bool_env_var function""" 12 | 13 | @patch.dict( 14 | os.environ, 15 | { 16 | "TEST_BOOL": "true", 17 | }, 18 | clear=True, 19 | ) 20 | def test_get_bool_env_var_that_exists_and_is_true(self): 21 | """Test that gets a boolean environment variable that exists and is true""" 22 | result = get_bool_env_var("TEST_BOOL", False) 23 | self.assertTrue(result) 24 | 25 | @patch.dict( 26 | os.environ, 27 | { 28 | "TEST_BOOL": "false", 29 | }, 30 | clear=True, 31 | ) 32 | def test_get_bool_env_var_that_exists_and_is_false(self): 33 | """Test that gets a boolean environment variable that exists and is false""" 34 | result = get_bool_env_var("TEST_BOOL", False) 35 | self.assertFalse(result) 36 | 37 | @patch.dict( 38 | os.environ, 39 | { 40 | "TEST_BOOL": "nope", 41 | }, 42 | clear=True, 43 | ) 44 | def test_get_bool_env_var_that_exists_and_is_false_due_to_invalid_value(self): 45 | """Test that gets a boolean environment variable that exists and is false 46 | due to an invalid value 47 | """ 48 | result = get_bool_env_var("TEST_BOOL", False) 49 | self.assertFalse(result) 50 | 51 | @patch.dict( 52 | os.environ, 53 | { 54 | "TEST_BOOL": "false", 55 | }, 56 | clear=True, 57 | ) 58 | def test_get_bool_env_var_that_does_not_exist_and_default_value_returns_true(self): 59 | """Test that gets a boolean environment variable that does not exist 60 | and default value returns: true 61 | """ 62 | result = get_bool_env_var("DOES_NOT_EXIST", True) 63 | self.assertTrue(result) 64 | 65 | @patch.dict( 66 | os.environ, 67 | { 68 | "TEST_BOOL": "true", 69 | }, 70 | clear=True, 71 | ) 72 | def test_get_bool_env_var_that_does_not_exist_and_default_value_returns_false(self): 73 | """Test that gets a boolean environment variable that does not exist 74 | and default value returns: false 75 | """ 76 | result = get_bool_env_var("DOES_NOT_EXIST", False) 77 | self.assertFalse(result) 78 | 79 | 80 | if __name__ == "__main__": 81 | unittest.main() 82 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # asdf 7 | .tool-versions 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | .pybuilder/ 79 | target/ 80 | 81 | # Jupyter Notebook 82 | .ipynb_checkpoints 83 | 84 | # IPython 85 | profile_default/ 86 | ipython_config.py 87 | 88 | # pyenv 89 | # For a library or package, you might want to ignore these files since the code is 90 | # intended to run in multiple environments; otherwise, check them in: 91 | # .python-version 92 | 93 | # pipenv 94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 97 | # install all needed dependencies. 98 | #Pipfile.lock 99 | 100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 101 | __pypackages__/ 102 | 103 | # Celery stuff 104 | celerybeat-schedule 105 | celerybeat.pid 106 | 107 | # SageMath parsed files 108 | *.sage.py 109 | 110 | # Environments 111 | .env 112 | .venv 113 | env/ 114 | venv/ 115 | ENV/ 116 | env.bak/ 117 | venv.bak/ 118 | 119 | # Spyder project settings 120 | .spyderproject 121 | .spyproject 122 | 123 | # Rope project settings 124 | .ropeproject 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | 137 | # pytype static type analyzer 138 | .pytype/ 139 | 140 | # Cython debug symbols 141 | cython_debug/ 142 | 143 | # Mac 144 | .DS_Store 145 | 146 | # Devenv 147 | .devenv* 148 | devenv.local.nix 149 | 150 | # direnv 151 | .direnv 152 | 153 | # pre-commit 154 | .pre-commit-config.yaml 155 | 156 | # devenv 157 | .envrc 158 | devenv.* 159 | .devenv* 160 | 161 | # Local testing files 162 | dependabot-output.yaml 163 | -------------------------------------------------------------------------------- /auth.py: -------------------------------------------------------------------------------- 1 | """This is the module that contains functions related to authenticating to GitHub with a personal access token.""" 2 | 3 | import github3 4 | import requests 5 | 6 | 7 | def auth_to_github( 8 | token: str, 9 | gh_app_id: int | None, 10 | gh_app_installation_id: int | None, 11 | gh_app_private_key_bytes: bytes, 12 | ghe: str, 13 | gh_app_enterprise_only: bool, 14 | ) -> github3.GitHub: 15 | """ 16 | Connect to GitHub.com or GitHub Enterprise, depending on env variables. 17 | 18 | Args: 19 | token (str): the GitHub personal access token 20 | gh_app_id (int | None): the GitHub App ID 21 | gh_app_installation_id (int | None): the GitHub App Installation ID 22 | gh_app_private_key_bytes (bytes): the GitHub App Private Key 23 | ghe (str): the GitHub Enterprise URL 24 | gh_app_enterprise_only (bool): Set this to true if the GH APP is created on GHE and needs to communicate with GHE api only 25 | 26 | Returns: 27 | github3.GitHub: the GitHub connection object 28 | """ 29 | if gh_app_id and gh_app_private_key_bytes and gh_app_installation_id: 30 | if ghe and gh_app_enterprise_only: 31 | gh = github3.github.GitHubEnterprise(url=ghe) 32 | else: 33 | gh = github3.github.GitHub() 34 | gh.login_as_app_installation( 35 | gh_app_private_key_bytes, gh_app_id, gh_app_installation_id 36 | ) 37 | github_connection = gh 38 | elif ghe and token: 39 | github_connection = github3.github.GitHubEnterprise(url=ghe, token=token) 40 | elif token: 41 | github_connection = github3.login(token=token) 42 | else: 43 | raise ValueError( 44 | "GH_TOKEN or the set of [GH_APP_ID, GH_APP_INSTALLATION_ID, GH_APP_PRIVATE_KEY] environment variables are not set" 45 | ) 46 | 47 | if not github_connection: 48 | raise ValueError("Unable to authenticate to GitHub") 49 | return github_connection # type: ignore 50 | 51 | 52 | def get_github_app_installation_token( 53 | ghe: str, 54 | gh_app_id: str, 55 | gh_app_private_key_bytes: bytes, 56 | gh_app_installation_id: str, 57 | ) -> str | None: 58 | """ 59 | Get a GitHub App Installation token. 60 | API: https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/authenticating-as-a-github-app-installation 61 | 62 | Args: 63 | ghe (str): the GitHub Enterprise endpoint 64 | gh_app_id (str): the GitHub App ID 65 | gh_app_private_key_bytes (bytes): the GitHub App Private Key 66 | gh_app_installation_id (str): the GitHub App Installation ID 67 | 68 | Returns: 69 | str: the GitHub App token 70 | """ 71 | jwt_headers = github3.apps.create_jwt_headers(gh_app_private_key_bytes, gh_app_id) 72 | api_endpoint = f"{ghe}/api/v3" if ghe else "https://api.github.com" 73 | url = f"{api_endpoint}/app/installations/{gh_app_installation_id}/access_tokens" 74 | 75 | try: 76 | response = requests.post(url, headers=jwt_headers, json=None, timeout=5) 77 | response.raise_for_status() 78 | except requests.exceptions.RequestException as e: 79 | print(f"Request failed: {e}") 80 | return None 81 | return response.json().get("token") 82 | -------------------------------------------------------------------------------- /test_auth.py: -------------------------------------------------------------------------------- 1 | """Test cases for the auth module.""" 2 | 3 | import unittest 4 | from unittest.mock import MagicMock, patch 5 | 6 | import auth 7 | import requests 8 | 9 | 10 | class TestAuth(unittest.TestCase): 11 | """ 12 | Test case for the auth module. 13 | """ 14 | 15 | @patch("github3.login") 16 | def test_auth_to_github_with_token(self, mock_login): 17 | """ 18 | Test the auth_to_github function when the token is provided. 19 | """ 20 | mock_login.return_value = "Authenticated to GitHub.com" 21 | 22 | result = auth.auth_to_github("token", "", "", b"", "", False) 23 | 24 | self.assertEqual(result, "Authenticated to GitHub.com") 25 | 26 | def test_auth_to_github_without_token(self): 27 | """ 28 | Test the auth_to_github function when the token is not provided. 29 | Expect a ValueError to be raised. 30 | """ 31 | with self.assertRaises(ValueError) as context_manager: 32 | auth.auth_to_github("", "", "", b"", "", False) 33 | the_exception = context_manager.exception 34 | self.assertEqual( 35 | str(the_exception), 36 | "GH_TOKEN or the set of [GH_APP_ID, GH_APP_INSTALLATION_ID, GH_APP_PRIVATE_KEY] environment variables are not set", 37 | ) 38 | 39 | @patch("github3.github.GitHubEnterprise") 40 | def test_auth_to_github_with_ghe(self, mock_ghe): 41 | """ 42 | Test the auth_to_github function when the GitHub Enterprise URL is provided. 43 | """ 44 | mock_ghe.return_value = "Authenticated to GitHub Enterprise" 45 | result = auth.auth_to_github( 46 | "token", "", "", b"", "https://github.example.com", False 47 | ) 48 | 49 | self.assertEqual(result, "Authenticated to GitHub Enterprise") 50 | 51 | @patch("github3.github.GitHubEnterprise") 52 | def test_auth_to_github_with_ghe_and_ghe_app(self, mock_ghe): 53 | """ 54 | Test the auth_to_github function when the GitHub Enterprise URL is provided and the app was created in GitHub Enterprise URL. 55 | """ 56 | mock = mock_ghe.return_value 57 | mock.login_as_app_installation = MagicMock(return_value=True) 58 | result = auth.auth_to_github( 59 | "", "123", "123", b"123", "https://github.example.com", True 60 | ) 61 | mock.login_as_app_installation.assert_called_once() 62 | self.assertEqual(result, mock) 63 | 64 | @patch("github3.github.GitHub") 65 | def test_auth_to_github_with_app(self, mock_gh): 66 | """ 67 | Test the auth_to_github function when app credentials are provided 68 | """ 69 | mock = mock_gh.return_value 70 | mock.login_as_app_installation = MagicMock(return_value=True) 71 | result = auth.auth_to_github( 72 | "", "123", "123", b"123", "https://github.example.com", False 73 | ) 74 | mock.login_as_app_installation.assert_called_once() 75 | self.assertEqual(result, mock) 76 | 77 | @patch("github3.apps.create_jwt_headers", MagicMock(return_value="gh_token")) 78 | @patch("requests.post") 79 | def test_get_github_app_installation_token(self, mock_post): 80 | """ 81 | Test the get_github_app_installation_token function. 82 | """ 83 | dummy_token = "dummytoken" 84 | mock_response = MagicMock() 85 | mock_response.raise_for_status.return_value = None 86 | mock_response.json.return_value = {"token": dummy_token} 87 | mock_post.return_value = mock_response 88 | 89 | result = auth.get_github_app_installation_token( 90 | b"ghe", "gh_private_token", "gh_app_id", "gh_installation_id" 91 | ) 92 | 93 | self.assertEqual(result, dummy_token) 94 | 95 | @patch("github3.apps.create_jwt_headers", MagicMock(return_value="gh_token")) 96 | @patch("auth.requests.post") 97 | def test_get_github_app_installation_token_request_failure(self, mock_post): 98 | """ 99 | Test the get_github_app_installation_token function returns None when the request fails. 100 | """ 101 | # Mock the post request to raise a RequestException 102 | mock_post.side_effect = requests.exceptions.RequestException("Request failed") 103 | 104 | # Call the function with test data 105 | result = auth.get_github_app_installation_token( 106 | ghe="https://api.github.com", 107 | gh_app_id=12345, 108 | gh_app_private_key_bytes=b"private_key", 109 | gh_app_installation_id=678910, 110 | ) 111 | 112 | # Assert that the result is None 113 | self.assertIsNone(result) 114 | 115 | @patch("github3.login") 116 | def test_auth_to_github_invalid_credentials(self, mock_login): 117 | """ 118 | Test the auth_to_github function raises correct ValueError 119 | when credentials are present but incorrect. 120 | """ 121 | mock_login.return_value = None 122 | with self.assertRaises(ValueError) as context_manager: 123 | auth.auth_to_github("not_a_valid_token", "", "", b"", "", False) 124 | 125 | the_exception = context_manager.exception 126 | self.assertEqual( 127 | str(the_exception), 128 | "Unable to authenticate to GitHub", 129 | ) 130 | 131 | 132 | if __name__ == "__main__": 133 | unittest.main() 134 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # Contributing to evergreen 5 | 6 | First off, thanks for taking the time to contribute! :heart: 7 | 8 | All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us project owners and smooth out the experience for all involved. The team looks forward to your contributions. :tada: 9 | 10 | 11 | 12 | ## Table of Contents 13 | 14 | - [I Have a Question](#i-have-a-question) 15 | - [I Want To Contribute](#i-want-to-contribute) 16 | - [Reporting Bugs](#reporting-bugs) 17 | - [Suggesting Enhancements](#suggesting-enhancements) 18 | - [Releases](#releases) 19 | 20 | ## I Have a Question 21 | 22 | Before you ask a question, it is best to search for existing [Issues](https://github.com/github/evergreen/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. 23 | 24 | If you then still feel the need to ask a question and need clarification, we recommend the following: 25 | 26 | - Open an [Issue](https://github.com/github/evergreen/issues/new). 27 | - Provide as much context as you can about what you're running into. 28 | - Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. 29 | 30 | We will then take care of the issue as soon as possible. 31 | 32 | ## I Want To Contribute 33 | 34 | ### Legal Notice 35 | 36 | When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. 37 | 38 | ## Reporting Bugs 39 | 40 | 41 | 42 | ### Before Submitting a Bug Report 43 | 44 | A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. 45 | 46 | - Make sure that you are using the latest version. 47 | - Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the documentation. If you are looking for support, you might want to check [this section](#i-have-a-question)). 48 | - To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/github/evergreen/issues). 49 | - Collect information about the bug: 50 | - Stack trace (Traceback) 51 | - OS, Platform and Version (Windows, Linux, macOS, x86, ARM) 52 | - Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. 53 | - Possibly your input and the output 54 | - Can you reliably reproduce the issue? And can you also reproduce it with older versions? 55 | 56 | 57 | 58 | ### How Do I Submit a Good Bug Report? 59 | 60 | Please submit a bug report using our [GitHub Issues template](https://github.com/github/evergreen/issues/new?template=bug_report.yml). 61 | 62 | ## Suggesting Enhancements 63 | 64 | This section guides you through submitting an enhancement suggestion for evergreen, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. 65 | 66 | 67 | 68 | ### Before Submitting an Enhancement 69 | 70 | - Make sure that you are using the latest version. 71 | - Read the documentation carefully and find out if the functionality is already covered, maybe by an individual configuration. 72 | - Perform a [search](https://github.com/github/evergreen/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. 73 | - Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature or to develop the feature yourself and contribute it to the project. 74 | 75 | 76 | 77 | ### How Do I Submit a Good Enhancement Suggestion? 78 | 79 | Please submit an enhancement suggestion using our [GitHub Issues template](https://github.com/github/evergreen/issues/new?template=feature_request.yml). 80 | 81 | ### Pull Request Standards 82 | 83 | We are using [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) to standardize our pull request titles. This allows us to automatically generate labels and changelogs and follow semantic versioning. Please follow the commit message format when creating a pull request. What pull request title prefixes are expected are in the [pull_request_template.md](.github/pull_request_template.md) that is shown when creating a pull request. 84 | 85 | ## Releases 86 | 87 | Releases are automated if a pull request is labelled with our [SemVer related labels](.github/release-drafter.yml) or with the `vuln` or `release` labels. 88 | 89 | You can also manually initiate a release you can do so through the GitHub Actions UI. If you have permissions to do so, you can navigate to the [Actions tab](https://github.com/github/evergreen/actions/workflows/release.yml) and select the `Run workflow` button. This will allow you to select the branch to release from and the version to release. 90 | -------------------------------------------------------------------------------- /test_exceptions.py: -------------------------------------------------------------------------------- 1 | """Tests for the exceptions module.""" 2 | 3 | import unittest 4 | from unittest.mock import Mock 5 | 6 | import github3.exceptions 7 | from exceptions import OptionalFileNotFoundError, check_optional_file 8 | 9 | 10 | class TestOptionalFileNotFoundError(unittest.TestCase): 11 | """Test the OptionalFileNotFoundError exception.""" 12 | 13 | def test_optional_file_not_found_error_inherits_from_not_found_error(self): 14 | """Test that OptionalFileNotFoundError inherits from github3.exceptions.NotFoundError.""" 15 | mock_resp = Mock() 16 | mock_resp.status_code = 404 17 | error = OptionalFileNotFoundError(resp=mock_resp) 18 | self.assertIsInstance(error, github3.exceptions.NotFoundError) 19 | 20 | def test_optional_file_not_found_error_creation(self): 21 | """Test OptionalFileNotFoundError can be created.""" 22 | mock_resp = Mock() 23 | mock_resp.status_code = 404 24 | error = OptionalFileNotFoundError(resp=mock_resp) 25 | self.assertIsInstance(error, OptionalFileNotFoundError) 26 | 27 | def test_optional_file_not_found_error_with_response(self): 28 | """Test OptionalFileNotFoundError with HTTP response.""" 29 | mock_resp = Mock() 30 | mock_resp.status_code = 404 31 | error = OptionalFileNotFoundError(resp=mock_resp) 32 | 33 | # Should be created successfully 34 | self.assertIsInstance(error, OptionalFileNotFoundError) 35 | 36 | def test_can_catch_as_github3_not_found_error(self): 37 | """Test that OptionalFileNotFoundError can be caught as github3.exceptions.NotFoundError.""" 38 | mock_resp = Mock() 39 | mock_resp.status_code = 404 40 | 41 | try: 42 | raise OptionalFileNotFoundError(resp=mock_resp) 43 | except github3.exceptions.NotFoundError as e: 44 | self.assertIsInstance(e, OptionalFileNotFoundError) 45 | except Exception: # pylint: disable=broad-exception-caught 46 | self.fail( 47 | "OptionalFileNotFoundError should be catchable as github3.exceptions.NotFoundError" 48 | ) 49 | 50 | def test_can_catch_specifically(self): 51 | """Test that OptionalFileNotFoundError can be caught specifically.""" 52 | mock_resp = Mock() 53 | mock_resp.status_code = 404 54 | 55 | try: 56 | raise OptionalFileNotFoundError(resp=mock_resp) 57 | except OptionalFileNotFoundError as e: 58 | self.assertIsInstance(e, OptionalFileNotFoundError) 59 | except Exception: # pylint: disable=broad-exception-caught 60 | self.fail("OptionalFileNotFoundError should be catchable specifically") 61 | 62 | def test_optional_file_not_found_error_properties(self): 63 | """Test OptionalFileNotFoundError has expected properties.""" 64 | mock_resp = Mock() 65 | mock_resp.status_code = 404 66 | 67 | error = OptionalFileNotFoundError(resp=mock_resp) 68 | self.assertEqual(error.code, 404) 69 | self.assertEqual(error.response, mock_resp) 70 | 71 | 72 | class TestCheckOptionalFile(unittest.TestCase): 73 | """Test the check_optional_file utility function.""" 74 | 75 | def test_check_optional_file_with_existing_file(self): 76 | """Test check_optional_file when file exists.""" 77 | mock_repo = Mock() 78 | mock_file_contents = Mock() 79 | mock_file_contents.size = 100 80 | mock_repo.file_contents.return_value = mock_file_contents 81 | 82 | result = check_optional_file(mock_repo, "config.yml") 83 | 84 | self.assertEqual(result, mock_file_contents) 85 | mock_repo.file_contents.assert_called_once_with("config.yml") 86 | 87 | def test_check_optional_file_with_empty_file(self): 88 | """Test check_optional_file when file exists but is empty.""" 89 | mock_repo = Mock() 90 | mock_file_contents = Mock() 91 | mock_file_contents.size = 0 92 | mock_repo.file_contents.return_value = mock_file_contents 93 | 94 | result = check_optional_file(mock_repo, "config.yml") 95 | 96 | self.assertIsNone(result) 97 | mock_repo.file_contents.assert_called_once_with("config.yml") 98 | 99 | def test_check_optional_file_with_missing_file(self): 100 | """Test check_optional_file when file doesn't exist.""" 101 | mock_repo = Mock() 102 | mock_resp = Mock() 103 | mock_resp.status_code = 404 104 | 105 | original_error = github3.exceptions.NotFoundError(resp=mock_resp) 106 | mock_repo.file_contents.side_effect = original_error 107 | 108 | with self.assertRaises(OptionalFileNotFoundError) as context: 109 | check_optional_file(mock_repo, "missing.yml") 110 | 111 | # Check that the original exception is chained 112 | self.assertEqual(context.exception.__cause__, original_error) 113 | self.assertEqual(context.exception.response, mock_resp) 114 | mock_repo.file_contents.assert_called_once_with("missing.yml") 115 | 116 | def test_check_optional_file_can_catch_as_not_found_error(self): 117 | """Test that OptionalFileNotFoundError from check_optional_file can be caught as NotFoundError.""" 118 | mock_repo = Mock() 119 | mock_resp = Mock() 120 | mock_resp.status_code = 404 121 | 122 | mock_repo.file_contents.side_effect = github3.exceptions.NotFoundError( 123 | resp=mock_resp 124 | ) 125 | 126 | try: 127 | check_optional_file(mock_repo, "missing.yml") 128 | except github3.exceptions.NotFoundError as e: 129 | self.assertIsInstance(e, OptionalFileNotFoundError) 130 | except Exception: # pylint: disable=broad-exception-caught 131 | self.fail( 132 | "Should be able to catch OptionalFileNotFoundError as NotFoundError" 133 | ) 134 | 135 | 136 | if __name__ == "__main__": 137 | unittest.main() 138 | -------------------------------------------------------------------------------- /dependabot_file.py: -------------------------------------------------------------------------------- 1 | """This module contains the function to build the dependabot.yml file for a repo""" 2 | 3 | import base64 4 | import copy 5 | import io 6 | 7 | import github3 8 | import ruamel.yaml 9 | from exceptions import OptionalFileNotFoundError, check_optional_file 10 | from ruamel.yaml.scalarstring import SingleQuotedScalarString 11 | 12 | # Define data structure for dependabot.yaml 13 | data = { 14 | "version": 2, 15 | "updates": [], 16 | } 17 | 18 | yaml = ruamel.yaml.YAML() 19 | stream = io.StringIO() 20 | 21 | 22 | def make_dependabot_config( 23 | ecosystem, 24 | group_dependencies, 25 | schedule, 26 | schedule_day, 27 | labels, 28 | dependabot_config, 29 | extra_dependabot_config, 30 | ) -> str: 31 | """ 32 | Make the dependabot configuration for a specific package ecosystem 33 | 34 | Args: 35 | ecosystem: the package ecosystem to make the dependabot configuration for 36 | group_dependencies: whether to group dependencies in the dependabot.yml file 37 | schedule: the schedule to run dependabot ex: "daily" 38 | schedule_day: the day of the week to run dependabot ex: "monday" if schedule is "weekly" 39 | labels: the list of labels to be added to dependabot configuration 40 | dependabot_config: extra dependabot configs 41 | extra_dependabot_config: File with the configuration to add dependabot configs (ex: private registries) 42 | 43 | Returns: 44 | str: the dependabot configuration for the package ecosystem 45 | """ 46 | 47 | dependabot_config["updates"].append( 48 | { 49 | "package-ecosystem": SingleQuotedScalarString(ecosystem), 50 | "directory": SingleQuotedScalarString("/"), 51 | } 52 | ) 53 | 54 | if extra_dependabot_config: 55 | ecosystem_config = extra_dependabot_config.get(ecosystem) 56 | if ecosystem_config: 57 | if "registries" not in dependabot_config: 58 | dependabot_config.update({"registries": {}}) 59 | dependabot_config["registries"][ecosystem] = ecosystem_config 60 | dependabot_config["updates"][-1].update( 61 | {"registries": [SingleQuotedScalarString(ecosystem)]} 62 | ) 63 | 64 | if schedule_day: 65 | dependabot_config["updates"][-1].update( 66 | { 67 | "schedule": { 68 | "interval": SingleQuotedScalarString(schedule), 69 | "day": SingleQuotedScalarString(schedule_day), 70 | }, 71 | } 72 | ) 73 | else: 74 | dependabot_config["updates"][-1].update( 75 | { 76 | "schedule": {"interval": SingleQuotedScalarString(schedule)}, 77 | } 78 | ) 79 | 80 | if labels: 81 | quoted_labels = [] 82 | for label in labels: 83 | quoted_labels.append(SingleQuotedScalarString(label)) 84 | dependabot_config["updates"][-1].update({"labels": quoted_labels}) 85 | 86 | if group_dependencies: 87 | dependabot_config["updates"][-1].update( 88 | { 89 | "groups": { 90 | "production-dependencies": { 91 | "dependency-type": SingleQuotedScalarString("production") 92 | }, 93 | "development-dependencies": { 94 | "dependency-type": SingleQuotedScalarString("development") 95 | }, 96 | } 97 | } 98 | ) 99 | 100 | return yaml.dump(dependabot_config, stream) 101 | 102 | 103 | def build_dependabot_file( 104 | repo, 105 | group_dependencies, 106 | exempt_ecosystems, 107 | repo_specific_exemptions, 108 | existing_config, 109 | schedule, 110 | schedule_day, 111 | labels, 112 | extra_dependabot_config, 113 | ) -> str | None: 114 | """ 115 | Build the dependabot.yml file for a repo based on the repo contents 116 | 117 | Args: 118 | repo: the repository to build the dependabot.yml file for 119 | group_dependencies: whether to group dependencies in the dependabot.yml file 120 | exempt_ecosystems: the list of ecosystems to ignore 121 | repo_specific_exemptions: the list of ecosystems to ignore for a specific repo 122 | existing_config: the existing dependabot configuration file or None if it doesn't exist 123 | schedule: the schedule to run dependabot ex: "daily" 124 | schedule_day: the day of the week to run dependabot ex: "monday" if schedule is "daily" 125 | labels: the list of labels to be added to dependabot configuration 126 | extra_dependabot_config: File with the configuration to add dependabot configs (ex: private registries) 127 | 128 | Returns: 129 | str: the dependabot.yml file for the repo 130 | """ 131 | package_managers_found = { 132 | "bundler": False, 133 | "npm": False, 134 | "pip": False, 135 | "cargo": False, 136 | "gomod": False, 137 | "composer": False, 138 | "mix": False, 139 | "nuget": False, 140 | "docker": False, 141 | "terraform": False, 142 | "github-actions": False, 143 | "maven": False, 144 | } 145 | 146 | # create a local copy in order to avoid overwriting the global exemption list 147 | exempt_ecosystems_list = exempt_ecosystems.copy() 148 | if existing_config: 149 | yaml.preserve_quotes = True 150 | try: 151 | dependabot_file = yaml.load(base64.b64decode(existing_config.content)) 152 | except ruamel.yaml.YAMLError as e: 153 | print(f"YAML indentation error: {e}") 154 | raise 155 | else: 156 | dependabot_file = copy.deepcopy(data) 157 | 158 | add_existing_ecosystem_to_exempt_list(exempt_ecosystems_list, dependabot_file) 159 | 160 | # If there are repository specific exemptions, 161 | # overwrite the global exemptions for this repo only 162 | if repo_specific_exemptions and repo.full_name in repo_specific_exemptions: 163 | exempt_ecosystems_list = [] 164 | for ecosystem in repo_specific_exemptions[repo.full_name]: 165 | exempt_ecosystems_list.append(ecosystem) 166 | 167 | package_managers = { 168 | "bundler": ["Gemfile", "Gemfile.lock"], 169 | "npm": ["package.json", "package-lock.json", "yarn.lock"], 170 | "pip": [ 171 | "requirements.txt", 172 | "Pipfile", 173 | "Pipfile.lock", 174 | "pyproject.toml", 175 | "poetry.lock", 176 | ], 177 | "cargo": ["Cargo.toml", "Cargo.lock"], 178 | "gomod": ["go.mod"], 179 | "composer": ["composer.json", "composer.lock"], 180 | "mix": ["mix.exs", "mix.lock"], 181 | "nuget": [ 182 | ".nuspec", 183 | ".csproj", 184 | ], 185 | "docker": ["Dockerfile"], 186 | "maven": ["pom.xml"], 187 | "gradle": ["build.gradle", "build.gradle.kts"], 188 | } 189 | 190 | # Detect package managers where manifest files have known names 191 | for manager, manifest_files in package_managers.items(): 192 | if manager in exempt_ecosystems_list: 193 | continue 194 | for file in manifest_files: 195 | try: 196 | if check_optional_file(repo, file): 197 | package_managers_found[manager] = True 198 | make_dependabot_config( 199 | manager, 200 | group_dependencies, 201 | schedule, 202 | schedule_day, 203 | labels, 204 | dependabot_file, 205 | extra_dependabot_config, 206 | ) 207 | break 208 | except OptionalFileNotFoundError: 209 | # The file does not exist and is not required, 210 | # so we should continue to the next one rather than raising error or logging 211 | pass 212 | 213 | # detect package managers with variable file names 214 | if "terraform" not in exempt_ecosystems_list: 215 | try: 216 | for file in repo.directory_contents("/"): 217 | if file[0].endswith(".tf"): 218 | package_managers_found["terraform"] = True 219 | make_dependabot_config( 220 | "terraform", 221 | group_dependencies, 222 | schedule, 223 | schedule_day, 224 | labels, 225 | dependabot_file, 226 | extra_dependabot_config, 227 | ) 228 | break 229 | except github3.exceptions.NotFoundError: 230 | # The file does not exist and is not required, 231 | # so we should continue to the next one rather than raising error or logging 232 | pass 233 | if "github-actions" not in exempt_ecosystems_list: 234 | try: 235 | for file in repo.directory_contents(".github/workflows"): 236 | if file[0].endswith(".yml") or file[0].endswith(".yaml"): 237 | package_managers_found["github-actions"] = True 238 | make_dependabot_config( 239 | "github-actions", 240 | group_dependencies, 241 | schedule, 242 | schedule_day, 243 | labels, 244 | dependabot_file, 245 | extra_dependabot_config, 246 | ) 247 | break 248 | except github3.exceptions.NotFoundError: 249 | # The file does not exist and is not required, 250 | # so we should continue to the next one rather than raising error or logging 251 | pass 252 | if "devcontainers" not in exempt_ecosystems_list: 253 | try: 254 | for file in repo.directory_contents(".devcontainer"): 255 | if file[0] == "devcontainer.json": 256 | package_managers_found["devcontainers"] = True 257 | make_dependabot_config( 258 | "devcontainers", 259 | group_dependencies, 260 | schedule, 261 | schedule_day, 262 | labels, 263 | dependabot_file, 264 | extra_dependabot_config, 265 | ) 266 | break 267 | except github3.exceptions.NotFoundError: 268 | # The file does not exist and is not required, 269 | # so we should continue to the next one rather than raising error or logging 270 | pass 271 | 272 | if any(package_managers_found.values()): 273 | return dependabot_file 274 | return None 275 | 276 | 277 | def add_existing_ecosystem_to_exempt_list(exempt_ecosystems, existing_config): 278 | """ 279 | Add the existing package ecosystems found in the dependabot.yml 280 | to the exempt list so we don't get duplicate entries and maintain configuration settings 281 | """ 282 | if existing_config: 283 | for entry in existing_config.get("updates", []): 284 | exempt_ecosystems.append(entry["package-ecosystem"]) 285 | -------------------------------------------------------------------------------- /env.py: -------------------------------------------------------------------------------- 1 | """ 2 | Sets up the environment variables for the action. 3 | """ 4 | 5 | import os 6 | import re 7 | from os.path import dirname, join 8 | 9 | from dotenv import load_dotenv 10 | 11 | MAX_TITLE_LENGTH = 70 12 | MAX_BODY_LENGTH = 65536 13 | MAX_COMMIT_MESSAGE_LENGTH = 65536 14 | 15 | 16 | def get_bool_env_var(env_var_name: str, default: bool = False) -> bool: 17 | """Get a boolean environment variable. 18 | 19 | Args: 20 | env_var_name: The name of the environment variable to retrieve. 21 | default: The default value to return if the environment variable is not set. 22 | 23 | Returns: 24 | The value of the environment variable as a boolean. 25 | """ 26 | ev = os.environ.get(env_var_name, "") 27 | if ev == "" and default: 28 | return default 29 | return ev.strip().lower() == "true" 30 | 31 | 32 | def get_int_env_var(env_var_name: str) -> int | None: 33 | """Get an integer environment variable. 34 | 35 | Args: 36 | env_var_name: The name of the environment variable to retrieve. 37 | 38 | Returns: 39 | The value of the environment variable as an integer or None. 40 | """ 41 | env_var = os.environ.get(env_var_name) 42 | if env_var is None or not env_var.strip(): 43 | return None 44 | try: 45 | return int(env_var) 46 | except ValueError: 47 | return None 48 | 49 | 50 | def parse_repo_specific_exemptions(repo_specific_exemptions_str: str) -> dict: 51 | """Parse the REPO_SPECIFIC_EXEMPTIONS environment variable into a dictionary. 52 | 53 | Args: 54 | repo_specific_exemptions_str: The REPO_SPECIFIC_EXEMPTIONS environment variable as a string. 55 | 56 | Returns: 57 | A dictionary where keys are repository names and values are lists of exempt ecosystems. 58 | """ 59 | exemptions_dict = {} 60 | if repo_specific_exemptions_str: 61 | # if repo_specific_exemptions_str doesn't have a ; and : character, it's not valid 62 | separators = [";", ":"] 63 | if not all(sep in repo_specific_exemptions_str for sep in separators): 64 | raise ValueError( 65 | "REPO_SPECIFIC_EXEMPTIONS environment variable not formatted correctly" 66 | ) 67 | exemptions_list = repo_specific_exemptions_str.split(";") 68 | for exemption in exemptions_list: 69 | if ( 70 | exemption == "" 71 | ): # Account for final ; in the repo_specific_exemptions_str 72 | continue 73 | repo, ecosystems = exemption.split(":") 74 | for ecosystem in ecosystems.split(","): 75 | if ecosystem not in [ 76 | "bundler", 77 | "cargo", 78 | "composer", 79 | "docker", 80 | "github-actions", 81 | "gomod", 82 | "mix", 83 | "npm", 84 | "nuget", 85 | "pip", 86 | "terraform", 87 | ]: 88 | raise ValueError( 89 | "REPO_SPECIFIC_EXEMPTIONS environment variable not formatted correctly. Unrecognized package-ecosystem." 90 | ) 91 | exemptions_dict[repo.strip()] = [ 92 | ecosystem.strip() for ecosystem in ecosystems.split(",") 93 | ] 94 | return exemptions_dict 95 | 96 | 97 | def get_env_vars( 98 | test: bool = False, 99 | ) -> tuple[ 100 | str | None, 101 | list[str], 102 | str | None, 103 | int | None, 104 | int | None, 105 | bytes, 106 | bool, 107 | str, 108 | str, 109 | list[str], 110 | str, 111 | str, 112 | str, 113 | str, 114 | bool, 115 | str, 116 | str | None, 117 | bool | None, 118 | list[str] | None, 119 | int | None, 120 | bool | None, 121 | list[str], 122 | bool | None, 123 | dict, 124 | str, 125 | str, 126 | str | None, 127 | list[str], 128 | str | None, 129 | ]: 130 | """ 131 | Get the environment variables for use in the action. 132 | 133 | Args: 134 | None 135 | 136 | Returns: 137 | organization (str): The organization to search for repositories in 138 | repository_list (list[str]): A list of repositories to search for 139 | search_query (str): A search query string to filter repositories by 140 | gh_app_id (int | None): The GitHub App ID to use for authentication 141 | gh_app_installation_id (int | None): The GitHub App Installation ID to use for authentication 142 | gh_app_private_key_bytes (bytes): The GitHub App Private Key as bytes to use for authentication 143 | gh_app_enterprise_only (bool): Set this to true if the GH APP is created on GHE and needs to communicate with GHE api only 144 | token (str): The GitHub token to use for authentication 145 | ghe (str): The GitHub Enterprise URL to use for authentication 146 | exempt_repositories_list (list[str]): A list of repositories to exempt from the action 147 | follow_up_type (str): The type of follow up to open (issue or pull) 148 | title (str): The title of the follow up 149 | body (str): The body of the follow up 150 | created_after_date (str): The date to filter repositories by 151 | dry_run (bool): Whether or not to actually open issues/pull requests 152 | commit_message (str): The commit message of the follow up 153 | group_dependencies (bool): Whether to group dependencies in the dependabot.yml file 154 | filter_visibility (list[str]): Run the action only on repositories with the specified listed visibility 155 | batch_size (int): The max number of repositories in scope 156 | enable_security_updates (bool): Whether to enable security updates in target repositories 157 | exempt_ecosystems_list (list[str]): A list of package ecosystems to exempt from the action 158 | update_existing (bool): Whether to update existing dependabot configuration files 159 | repo_specific_exemptions (dict): A dictionary of per repository ecosystem exemptions 160 | schedule (str): The schedule to run the action on 161 | schedule_day (str): The day of the week to run the action on if schedule is daily 162 | team_name (str): The team to search for repositories in 163 | labels (list[str]): A list of labels to be added to dependabot configuration 164 | dependabot_config_file (str): Dependabot extra configuration file location path 165 | """ 166 | 167 | if not test: # pragma: no cover 168 | # Load from .env file if it exists and not testing 169 | dotenv_path = join(dirname(__file__), ".env") 170 | load_dotenv(dotenv_path) 171 | 172 | organization = os.getenv("ORGANIZATION") 173 | repositories_str = os.getenv("REPOSITORY") 174 | search_query = os.getenv("REPOSITORY_SEARCH_QUERY", "").strip() 175 | team_name = os.getenv("TEAM_NAME") 176 | # Either organization, repository, or search_query must be set 177 | if not organization and not repositories_str and not search_query: 178 | raise ValueError( 179 | "ORGANIZATION, REPOSITORY, and REPOSITORY_SEARCH_QUERY environment variables were not set. Please set one" 180 | ) 181 | # Team name and repository are mutually exclusive 182 | if repositories_str and team_name: 183 | raise ValueError( 184 | "TEAM_NAME environment variable cannot be used with REPOSITORY" 185 | ) 186 | 187 | # Separate repositories_str into a list based on the comma separator 188 | repositories_list = [] 189 | if repositories_str: 190 | repositories_list = [ 191 | repository.strip() for repository in repositories_str.split(",") 192 | ] 193 | 194 | gh_app_id = get_int_env_var("GH_APP_ID") 195 | gh_app_private_key_bytes = os.environ.get("GH_APP_PRIVATE_KEY", "").encode("utf8") 196 | gh_app_installation_id = get_int_env_var("GH_APP_INSTALLATION_ID") 197 | gh_app_enterprise_only = get_bool_env_var("GITHUB_APP_ENTERPRISE_ONLY") 198 | 199 | if gh_app_id and (not gh_app_private_key_bytes or not gh_app_installation_id): 200 | raise ValueError( 201 | "GH_APP_ID set and GH_APP_INSTALLATION_ID or GH_APP_PRIVATE_KEY variable not set" 202 | ) 203 | 204 | token = os.getenv("GH_TOKEN", "") 205 | if ( 206 | not gh_app_id 207 | and not gh_app_private_key_bytes 208 | and not gh_app_installation_id 209 | and not token 210 | ): 211 | raise ValueError("GH_TOKEN environment variable not set") 212 | 213 | ghe = os.getenv("GH_ENTERPRISE_URL", default="").strip() 214 | 215 | exempt_repos = os.getenv("EXEMPT_REPOS") 216 | exempt_repositories_list = [] 217 | if exempt_repos: 218 | exempt_repositories_list = [ 219 | repository.strip() for repository in exempt_repos.split(",") 220 | ] 221 | 222 | follow_up_type = os.getenv("TYPE") 223 | # make sure that follow_up_type is either "issue" or "pull" 224 | if follow_up_type: 225 | if follow_up_type not in ("issue", "pull"): 226 | raise ValueError("TYPE environment variable not 'issue' or 'pull'") 227 | else: 228 | follow_up_type = "pull" 229 | 230 | title = os.getenv("TITLE") 231 | # make sure that title is a string with less than MAX_TITLE_LENGTH characters 232 | if title: 233 | if len(title) > MAX_TITLE_LENGTH: 234 | raise ValueError("TITLE environment variable is too long") 235 | else: 236 | title = "Enable Dependabot" 237 | 238 | body = os.getenv("BODY") 239 | if body and len(body) > MAX_BODY_LENGTH: 240 | raise ValueError("BODY environment variable is too long") 241 | 242 | if not body: 243 | default_bodies = { 244 | "pull": "Dependabot could be enabled for this repository. \ 245 | Please enable it by merging this pull request so that we can keep our dependencies up to date and secure.", 246 | "issue": ( 247 | "Please update the repository to include a Dependabot configuration file.\n" 248 | "This will ensure our dependencies remain updated and secure.\n" 249 | "Follow the guidelines in [creating Dependabot configuration files]" 250 | "(https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file) " 251 | "to set it up properly.\n\n" 252 | "Here's an example of the code:" 253 | ), 254 | } 255 | body = body = default_bodies[follow_up_type] 256 | 257 | commit_message = os.getenv("COMMIT_MESSAGE") 258 | if commit_message: 259 | if len(commit_message) > MAX_COMMIT_MESSAGE_LENGTH: 260 | raise ValueError("COMMIT_MESSAGE environment variable is too long") 261 | else: 262 | commit_message = "Create/Update dependabot.yaml" 263 | 264 | created_after_date = os.getenv("CREATED_AFTER_DATE", "") 265 | is_match = re.match(r"\d{4}-\d{2}-\d{2}", created_after_date) 266 | if created_after_date and not is_match: 267 | raise ValueError( 268 | f"CREATED_AFTER_DATE '{created_after_date}' environment variable not in YYYY-MM-DD" 269 | ) 270 | 271 | group_dependencies_bool = get_bool_env_var("GROUP_DEPENDENCIES") 272 | enable_security_updates_bool = get_bool_env_var( 273 | "ENABLE_SECURITY_UPDATES", default=True 274 | ) 275 | dry_run_bool = get_bool_env_var("DRY_RUN") 276 | 277 | batch_size_str = os.getenv("BATCH_SIZE") 278 | batch_size = int(batch_size_str) if batch_size_str else None 279 | if batch_size and batch_size <= 0: 280 | raise ValueError("BATCH_SIZE environment variable is 0 or lower") 281 | 282 | filter_visibility = os.getenv("FILTER_VISIBILITY") 283 | filter_visibility_list = [] 284 | if filter_visibility: 285 | filter_visibility_set = set() 286 | for visibility in filter_visibility.split(","): 287 | if visibility.strip().lower() not in ["public", "private", "internal"]: 288 | raise ValueError( 289 | "FILTER_VISIBILITY environment variable not 'public', 'private', or 'internal'" 290 | ) 291 | filter_visibility_set.add(visibility.strip().lower()) 292 | filter_visibility_list = sorted(list(filter_visibility_set)) 293 | else: 294 | filter_visibility_list = sorted(["public", "private", "internal"]) # all 295 | 296 | exempt_ecosystems = os.getenv("EXEMPT_ECOSYSTEMS") 297 | exempt_ecosystems_list = [] 298 | if exempt_ecosystems: 299 | exempt_ecosystems_list = [ 300 | ecosystem.lower().strip() for ecosystem in exempt_ecosystems.split(",") 301 | ] 302 | 303 | project_id = os.getenv("PROJECT_ID") 304 | if project_id and not project_id.isnumeric(): 305 | raise ValueError("PROJECT_ID environment variable is not numeric") 306 | 307 | update_existing = get_bool_env_var("UPDATE_EXISTING") 308 | 309 | repo_specific_exemptions_str = os.getenv("REPO_SPECIFIC_EXEMPTIONS", "") 310 | repo_specific_exemptions = parse_repo_specific_exemptions( 311 | repo_specific_exemptions_str 312 | ) 313 | 314 | schedule = os.getenv("SCHEDULE", "").strip().lower() 315 | if schedule and schedule not in ["daily", "weekly", "monthly"]: 316 | raise ValueError( 317 | "SCHEDULE environment variable not 'daily', 'weekly', or 'monthly'" 318 | ) 319 | if not schedule: 320 | schedule = "weekly" 321 | schedule_day = os.getenv("SCHEDULE_DAY", "").strip().lower() 322 | if schedule != "weekly" and schedule_day: 323 | raise ValueError( 324 | "SCHEDULE_DAY environment variable not needed when SCHEDULE is not 'weekly'" 325 | ) 326 | if ( 327 | schedule == "weekly" 328 | and schedule_day 329 | and schedule_day 330 | not in [ 331 | "monday", 332 | "tuesday", 333 | "wednesday", 334 | "thursday", 335 | "friday", 336 | "saturday", 337 | "sunday", 338 | ] 339 | ): 340 | raise ValueError( 341 | "SCHEDULE_DAY environment variable not 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', or 'sunday'" 342 | ) 343 | 344 | labels_str = os.getenv("LABELS") 345 | labels_list = [] 346 | if labels_str: 347 | labels_list = [label.lower().strip() for label in labels_str.split(",")] 348 | 349 | dependabot_config_file = os.getenv("DEPENDABOT_CONFIG_FILE") 350 | if dependabot_config_file and not os.path.exists(dependabot_config_file): 351 | raise ValueError( 352 | f"No dependabot extra configuration found. Please create one in {dependabot_config_file}" 353 | ) 354 | 355 | return ( 356 | organization, 357 | repositories_list, 358 | search_query, 359 | gh_app_id, 360 | gh_app_installation_id, 361 | gh_app_private_key_bytes, 362 | gh_app_enterprise_only, 363 | token, 364 | ghe, 365 | exempt_repositories_list, 366 | follow_up_type, 367 | title, 368 | body, 369 | created_after_date, 370 | dry_run_bool, 371 | commit_message, 372 | project_id, 373 | group_dependencies_bool, 374 | filter_visibility_list, 375 | batch_size, 376 | enable_security_updates_bool, 377 | exempt_ecosystems_list, 378 | update_existing, 379 | repo_specific_exemptions, 380 | schedule, 381 | schedule_day, 382 | team_name, 383 | labels_list, 384 | dependabot_config_file, 385 | ) 386 | -------------------------------------------------------------------------------- /evergreen.py: -------------------------------------------------------------------------------- 1 | """This file contains the main() and other functions needed to open an issue/PR dependabot is not enabled but could be""" 2 | 3 | import io 4 | import sys 5 | import uuid 6 | from datetime import datetime 7 | 8 | import auth 9 | import env 10 | import github3 11 | import requests 12 | import ruamel.yaml 13 | from dependabot_file import build_dependabot_file 14 | from exceptions import OptionalFileNotFoundError, check_optional_file 15 | 16 | 17 | def main(): # pragma: no cover 18 | """Run the main program""" 19 | 20 | # Get the environment variables 21 | ( 22 | organization, 23 | repository_list, 24 | search_query, 25 | gh_app_id, 26 | gh_app_installation_id, 27 | gh_app_private_key, 28 | gh_app_enterprise_only, 29 | token, 30 | ghe, 31 | exempt_repositories_list, 32 | follow_up_type, 33 | title, 34 | body, 35 | created_after_date, 36 | dry_run, 37 | commit_message, 38 | project_id, 39 | group_dependencies, 40 | filter_visibility, 41 | batch_size, 42 | enable_security_updates, 43 | exempt_ecosystems, 44 | update_existing, 45 | repo_specific_exemptions, 46 | schedule, 47 | schedule_day, 48 | team_name, 49 | labels, 50 | dependabot_config_file, 51 | ) = env.get_env_vars() 52 | 53 | # Auth to GitHub.com or GHE 54 | github_connection = auth.auth_to_github( 55 | token, 56 | gh_app_id, 57 | gh_app_installation_id, 58 | gh_app_private_key, 59 | ghe, 60 | gh_app_enterprise_only, 61 | ) 62 | 63 | if not token and gh_app_id and gh_app_installation_id and gh_app_private_key: 64 | token = auth.get_github_app_installation_token( 65 | ghe, gh_app_id, gh_app_private_key, gh_app_installation_id 66 | ) 67 | 68 | # Set the project_global_id to None by default 69 | project_global_id = None 70 | 71 | # If Project ID is set, lookup the global project ID 72 | if project_id: 73 | # Check Organization is set as it is required for linking to a project 74 | if not organization: 75 | raise ValueError( 76 | "ORGANIZATION environment variable was not set. Please set it" 77 | ) 78 | project_global_id = get_global_project_id(ghe, token, organization, project_id) 79 | 80 | # Get the repositories from the organization, team name, or list of repositories 81 | repos = get_repos_iterator( 82 | organization, team_name, repository_list, search_query, github_connection 83 | ) 84 | 85 | # Setting up the action summary content 86 | summary_content = f""" 87 | ## 🚀 Job Summary 88 | - **Organization:** {organization} 89 | - **Follow Up Type:** {follow_up_type} 90 | - **Dry Run:** {dry_run} 91 | - **Enable Security Updates:** {enable_security_updates}\n 92 | """ 93 | # Add optional parameters to the summary 94 | if project_id: 95 | project_link = f"https://github.com/orgs/{organization}/projects/{project_id}" 96 | summary_content += f"- **Project ID:** [{project_id}]({project_link})\n" 97 | if batch_size: 98 | summary_content += f"- **Batch Size:** {batch_size}\n" 99 | 100 | # Add the updated repositories table header 101 | summary_content += ( 102 | "\n\n## 📋 Updated Repositories\n\n" 103 | "| Repository | 🔒 Security Updates Enabled | 🔄 Follow Up Type | 🔗 Link |\n" 104 | "| --- | --- | --- | --- |\n" 105 | ) 106 | 107 | # Iterate through the repositories and open an issue/PR if dependabot is not enabled 108 | count_eligible = 0 109 | count_prs_created = 0 110 | for repo in repos: 111 | # if batch_size is defined, ensure we break if we exceed the number of eligible repos 112 | if batch_size and count_eligible >= batch_size: 113 | print(f"Batch size met at {batch_size} eligible repositories.") 114 | break 115 | 116 | # Check all the things to see if repo is eligible for a pr/issue 117 | if repo.full_name in exempt_repositories_list: 118 | print(f"Skipping {repo.full_name} (exempted)") 119 | continue 120 | if repo.archived: 121 | print(f"Skipping {repo.full_name} (archived)") 122 | continue 123 | if repo.visibility.lower() not in filter_visibility: 124 | print(f"Skipping {repo.full_name} (visibility-filtered)") 125 | continue 126 | existing_config = None 127 | filename_list = [".github/dependabot.yaml", ".github/dependabot.yml"] 128 | dependabot_filename_to_use = filename_list[0] # Default to the first filename 129 | for filename in filename_list: 130 | existing_config = check_existing_config(repo, filename) 131 | if existing_config: 132 | dependabot_filename_to_use = filename 133 | break 134 | 135 | if existing_config and not update_existing: 136 | print( 137 | f"Skipping {repo.full_name} (dependabot file already exists and update_existing is False)" 138 | ) 139 | continue 140 | 141 | if created_after_date and is_repo_created_date_before( 142 | repo.created_at, created_after_date 143 | ): 144 | print(f"Skipping {repo.full_name} (created after filter)") 145 | continue 146 | 147 | # Check if there is any extra configuration to be added to the dependabot file by checking the DEPENDABOT_CONFIG_FILE env variable 148 | if dependabot_config_file: 149 | yaml = ruamel.yaml.YAML() 150 | yaml.preserve_quotes = True 151 | # If running locally on a computer the local file takes precedence over the one existent on the repository 152 | try: 153 | with open( 154 | dependabot_config_file, "r", encoding="utf-8" 155 | ) as extra_dependabot_config: 156 | extra_dependabot_config = yaml.load(extra_dependabot_config) 157 | except ruamel.yaml.YAMLError as e: 158 | print(f"YAML indentation error: {e}") 159 | continue 160 | 161 | else: 162 | # If no dependabot configuration file is present set the variable empty 163 | extra_dependabot_config = None 164 | 165 | print(f"Checking {repo.full_name} for compatible package managers") 166 | # Try to detect package managers and build a dependabot file 167 | dependabot_file = build_dependabot_file( 168 | repo, 169 | group_dependencies, 170 | exempt_ecosystems, 171 | repo_specific_exemptions, 172 | existing_config, 173 | schedule, 174 | schedule_day, 175 | labels, 176 | extra_dependabot_config, 177 | ) 178 | 179 | yaml = ruamel.yaml.YAML() 180 | stream = io.StringIO() 181 | yaml.indent(mapping=2, sequence=4, offset=2) 182 | 183 | # create locally the dependabot file 184 | with open("dependabot-output.yaml", "w", encoding="utf-8") as yaml_file: 185 | yaml.dump(dependabot_file, yaml_file) 186 | 187 | if dependabot_file is None: 188 | print("\tNo (new) compatible package manager found") 189 | continue 190 | 191 | dependabot_file = yaml.dump(dependabot_file, stream) 192 | dependabot_file = stream.getvalue() 193 | 194 | # If dry_run is set, just print the dependabot file 195 | if dry_run: 196 | if follow_up_type == "issue": 197 | skip = check_pending_issues_for_duplicates(title, repo) 198 | if not skip: 199 | print("\tEligible for configuring dependabot.") 200 | count_eligible += 1 201 | print(f"\tConfiguration:\n {dependabot_file}") 202 | if follow_up_type == "pull": 203 | # Try to detect if the repo already has an open pull request for dependabot 204 | skip = check_pending_pulls_for_duplicates(title, repo) 205 | if not skip: 206 | print("\tEligible for configuring dependabot.") 207 | count_eligible += 1 208 | print(f"\tConfiguration:\n {dependabot_file}") 209 | continue 210 | 211 | # Get dependabot security updates enabled if possible 212 | if enable_security_updates: 213 | if not is_dependabot_security_updates_enabled( 214 | ghe, repo.owner, repo.name, token 215 | ): 216 | enable_dependabot_security_updates(ghe, repo.owner, repo.name, token) 217 | 218 | if follow_up_type == "issue": 219 | skip = check_pending_issues_for_duplicates(title, repo) 220 | if not skip: 221 | count_eligible += 1 222 | body_issue = f"{body}\n\n```yaml\n# {dependabot_filename_to_use} \n{dependabot_file}\n```" 223 | issue = repo.create_issue(title, body_issue) 224 | print(f"\tCreated issue {issue.html_url}") 225 | summary_content += f"| {repo.full_name} | {'✅' if enable_security_updates else '❌'} | {follow_up_type} | [Link]({issue.html_url}) |\n" 226 | if project_global_id: 227 | issue_id = get_global_issue_id( 228 | ghe, token, organization, repo.name, issue.number 229 | ) 230 | link_item_to_project(ghe, token, project_global_id, issue_id) 231 | print(f"\tLinked issue to project {project_global_id}") 232 | else: 233 | # Try to detect if the repo already has an open pull request for dependabot 234 | skip = check_pending_pulls_for_duplicates(title, repo) 235 | 236 | # Create a dependabot.yaml file, a branch, and a PR 237 | if not skip: 238 | count_eligible += 1 239 | try: 240 | pull = commit_changes( 241 | title, 242 | body, 243 | repo, 244 | dependabot_file, 245 | commit_message, 246 | dependabot_filename_to_use, 247 | existing_config, 248 | ) 249 | print(f"\tCreated pull request {pull.html_url}") 250 | count_prs_created += 1 251 | summary_content += ( 252 | f"| {repo.full_name} | " 253 | f"{'✅' if enable_security_updates else '❌'} | " 254 | f"{follow_up_type} | " 255 | f"[Link]({pull.html_url}) |\n" 256 | ) 257 | if project_global_id: 258 | pr_id = get_global_pr_id( 259 | ghe, token, organization, repo.name, pull.number 260 | ) 261 | response = link_item_to_project( 262 | ghe, token, project_global_id, pr_id 263 | ) 264 | if response: 265 | print( 266 | f"\tLinked pull request to project {project_global_id}" 267 | ) 268 | except github3.exceptions.NotFoundError: 269 | print("\tFailed to create pull request. Check write permissions.") 270 | continue 271 | 272 | print(f"Done. {str(count_eligible)} repositories were eligible.") 273 | print(f"{str(count_prs_created)} pull requests were created.") 274 | # Append the summary content to the GitHub step summary file 275 | append_to_github_summary(summary_content) 276 | 277 | 278 | def is_repo_created_date_before(repo_created_at: str, created_after_date: str): 279 | """Check if the repository was created before the created_after_date""" 280 | repo_created_at_date = datetime.fromisoformat(repo_created_at).replace(tzinfo=None) 281 | return created_after_date and repo_created_at_date < datetime.strptime( 282 | created_after_date, "%Y-%m-%d" 283 | ) 284 | 285 | 286 | def is_dependabot_security_updates_enabled(ghe, owner, repo, access_token): 287 | """ 288 | Check if Dependabot security updates are enabled at the /repos/:owner/:repo/automated-security-fixes endpoint using the requests library 289 | API: https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#check-if-automated-security-fixes-are-enabled-for-a-repository 290 | """ 291 | api_endpoint = f"{ghe}/api/v3" if ghe else "https://api.github.com" 292 | url = f"{api_endpoint}/repos/{owner}/{repo}/automated-security-fixes" 293 | headers = { 294 | "Authorization": f"Bearer {access_token}", 295 | "Accept": "application/vnd.github.london-preview+json", 296 | } 297 | 298 | response = requests.get(url, headers=headers, timeout=20) 299 | if response.status_code == 200: 300 | return response.json()["enabled"] 301 | return False 302 | 303 | 304 | def check_existing_config(repo, filename): 305 | """ 306 | Check if a file already exists in the 307 | repository and return the existing config if it does 308 | 309 | Args: 310 | repo (github3.repos.repo.Repository): The repository to check 311 | filename (str): The configuration filename to check 312 | 313 | Returns: 314 | github3.repos.contents.Contents | None: The existing config if it exists, otherwise None 315 | """ 316 | existing_config = None 317 | try: 318 | existing_config = check_optional_file(repo, filename) 319 | if existing_config: 320 | return existing_config 321 | except OptionalFileNotFoundError: 322 | # The file does not exist and is not required, 323 | # so we should continue to the next one rather than raising error or logging 324 | pass 325 | return None 326 | 327 | 328 | def enable_dependabot_security_updates(ghe, owner, repo, access_token): 329 | """ 330 | Enable Dependabot security updates at the /repos/:owner/:repo/automated-security-fixes endpoint using the requests library 331 | API: https://docs.github.com/en/rest/repos/repos?apiVersion=2022-11-28#enable-automated-security-fixes 332 | """ 333 | api_endpoint = f"{ghe}/api/v3" if ghe else "https://api.github.com" 334 | url = f"{api_endpoint}/repos/{owner}/{repo}/automated-security-fixes" 335 | headers = { 336 | "Authorization": f"Bearer {access_token}", 337 | "Accept": "application/vnd.github.london-preview+json", 338 | } 339 | 340 | response = requests.put(url, headers=headers, timeout=20) 341 | if response.status_code == 204: 342 | print("\tDependabot security updates enabled successfully.") 343 | else: 344 | print("\tFailed to enable Dependabot security updates.") 345 | 346 | 347 | def get_repos_iterator( 348 | organization, team_name, repository_list, search_query, github_connection 349 | ): 350 | """Get the repositories from the organization, team_name, repository_list, or via search query""" 351 | # Use GitHub search API if REPOSITORY_SEARCH_QUERY is set 352 | if search_query: 353 | # Return repositories matching the search query 354 | repos = [] 355 | # Search results need to be converted to a list of repositories since they are returned as a search iterator 356 | for repo in github_connection.search_repositories(search_query): 357 | repos.append(repo.repository) 358 | return repos 359 | 360 | repos = [] 361 | # Default behavior: list all organization/team repositories or specific repository list 362 | if organization and not repository_list and not team_name: 363 | repos = github_connection.organization(organization).repositories() 364 | elif team_name and organization: 365 | # Get the repositories from the team 366 | team = github_connection.organization(organization).team_by_name(team_name) 367 | if team.repos_count == 0: 368 | print(f"Team {team_name} has no repositories") 369 | sys.exit(1) 370 | repos = team.repositories() 371 | else: 372 | # Get the repositories from the repository_list 373 | for repo in repository_list: 374 | repos.append( 375 | github_connection.repository(repo.split("/")[0], repo.split("/")[1]) 376 | ) 377 | 378 | return repos 379 | 380 | 381 | def check_pending_pulls_for_duplicates(title, repo) -> bool: 382 | """Check if there are any open pull requests for dependabot and return the bool skip""" 383 | pull_requests = repo.pull_requests(state="open") 384 | skip = False 385 | for pull_request in pull_requests: 386 | if pull_request.title.startswith(title): 387 | print(f"\tPull request already exists: {pull_request.html_url}") 388 | skip = True 389 | break 390 | return skip 391 | 392 | 393 | def check_pending_issues_for_duplicates(title, repo) -> bool: 394 | """Check if there are any open issues for dependabot and return the bool skip""" 395 | issues = repo.issues(state="open") 396 | skip = False 397 | for issue in issues: 398 | if issue.title.startswith(title): 399 | print(f"\tIssue already exists: {issue.html_url}") 400 | skip = True 401 | break 402 | return skip 403 | 404 | 405 | def commit_changes( 406 | title, 407 | body, 408 | repo, 409 | dependabot_file, 410 | message, 411 | dependabot_filename=".github/dependabot.yml", 412 | existing_config=None, 413 | ): 414 | """Commit the changes to the repo and open a pull request and return the pull request object""" 415 | default_branch = repo.default_branch 416 | # Get latest commit sha from default branch 417 | default_branch_commit = repo.ref("heads/" + default_branch).object.sha 418 | front_matter = "refs/heads/" 419 | branch_name = "dependabot-" + str(uuid.uuid4()) 420 | repo.create_ref(front_matter + branch_name, default_branch_commit) 421 | if existing_config: 422 | repo.file_contents(dependabot_filename).update( 423 | message=message, 424 | content=dependabot_file.encode(), # Convert to bytes object 425 | branch=branch_name, 426 | ) 427 | else: 428 | repo.create_file( 429 | path=dependabot_filename, 430 | message=message, 431 | content=dependabot_file.encode(), # Convert to bytes object 432 | branch=branch_name, 433 | ) 434 | 435 | pull = repo.create_pull( 436 | title=title, body=body, head=branch_name, base=repo.default_branch 437 | ) 438 | return pull 439 | 440 | 441 | def get_global_project_id(ghe, token, organization, number): 442 | """ 443 | Fetches the project ID from GitHub's GraphQL API. 444 | API: https://docs.github.com/en/graphql/guides/forming-calls-with-graphql 445 | """ 446 | api_endpoint = f"{ghe}/api/v3" if ghe else "https://api.github.com" 447 | url = f"{api_endpoint}/graphql" 448 | headers = {"Authorization": f"Bearer {token}"} 449 | data = { 450 | "query": f'query{{organization(login: "{organization}") {{projectV2(number: {number}){{id}}}}}}' 451 | } 452 | 453 | try: 454 | response = requests.post(url, headers=headers, json=data, timeout=20) 455 | response.raise_for_status() 456 | except requests.exceptions.RequestException as e: 457 | print(f"Request failed: {e}") 458 | return None 459 | 460 | try: 461 | return response.json()["data"]["organization"]["projectV2"]["id"] 462 | except KeyError as e: 463 | print(f"Failed to parse response: {e}") 464 | return None 465 | 466 | 467 | def get_global_issue_id(ghe, token, organization, repository, issue_number): 468 | """ 469 | Fetches the issue ID from GitHub's GraphQL API 470 | API: https://docs.github.com/en/graphql/guides/forming-calls-with-graphql 471 | """ 472 | api_endpoint = f"{ghe}/api/v3" if ghe else "https://api.github.com" 473 | url = f"{api_endpoint}/graphql" 474 | headers = {"Authorization": f"Bearer {token}"} 475 | data = { 476 | "query": f""" 477 | query {{ 478 | repository(owner: "{organization}", name: "{repository}") {{ 479 | issue(number: {issue_number}) {{ 480 | id 481 | }} 482 | }} 483 | }} 484 | """ 485 | } 486 | 487 | try: 488 | response = requests.post(url, headers=headers, json=data, timeout=20) 489 | response.raise_for_status() 490 | except requests.exceptions.RequestException as e: 491 | print(f"Request failed: {e}") 492 | return None 493 | 494 | try: 495 | return response.json()["data"]["repository"]["issue"]["id"] 496 | except KeyError as e: 497 | print(f"Failed to parse response: {e}") 498 | return None 499 | 500 | 501 | def get_global_pr_id(ghe, token, organization, repository, pr_number): 502 | """ 503 | Fetches the pull request ID from GitHub's GraphQL API 504 | API: https://docs.github.com/en/graphql/guides/forming-calls-with-graphql 505 | """ 506 | api_endpoint = f"{ghe}/api/v3" if ghe else "https://api.github.com" 507 | url = f"{api_endpoint}/graphql" 508 | headers = {"Authorization": f"Bearer {token}"} 509 | data = { 510 | "query": f""" 511 | query {{ 512 | repository(owner: "{organization}", name: "{repository}") {{ 513 | pullRequest(number: {pr_number}) {{ 514 | id 515 | }} 516 | }} 517 | }} 518 | """ 519 | } 520 | 521 | try: 522 | response = requests.post(url, headers=headers, json=data, timeout=20) 523 | response.raise_for_status() 524 | except requests.exceptions.RequestException as e: 525 | print(f"Request failed: {e}") 526 | return None 527 | 528 | try: 529 | return response.json()["data"]["repository"]["pullRequest"]["id"] 530 | except KeyError as e: 531 | print(f"Failed to parse response: {e}") 532 | return None 533 | 534 | 535 | def link_item_to_project(ghe, token, project_global_id, item_id): 536 | """ 537 | Links an item (issue or pull request) to a project in GitHub. 538 | API: https://docs.github.com/en/graphql/guides/forming-calls-with-graphql 539 | """ 540 | api_endpoint = f"{ghe}/api/v3" if ghe else "https://api.github.com" 541 | url = f"{api_endpoint}/graphql" 542 | headers = {"Authorization": f"Bearer {token}"} 543 | data = { 544 | "query": f'mutation {{addProjectV2ItemById(input: {{projectId: "{project_global_id}", contentId: "{item_id}"}}) {{item {{id}}}}}}' 545 | } 546 | 547 | try: 548 | response = requests.post(url, headers=headers, json=data, timeout=20) 549 | response.raise_for_status() 550 | return response 551 | except requests.exceptions.RequestException as e: 552 | print(f"Request failed: {e}") 553 | return None 554 | 555 | 556 | def append_to_github_summary(content, summary_file="summary.md"): 557 | """ 558 | Append content to the GitHub step summary file 559 | """ 560 | if summary_file: 561 | with open(summary_file, "a", encoding="utf-8") as f: 562 | f.write(content + "\n") 563 | 564 | 565 | if __name__ == "__main__": 566 | main() # pragma: no cover 567 | -------------------------------------------------------------------------------- /.github/linters/.python-lint: -------------------------------------------------------------------------------- 1 | [MAIN] 2 | 3 | # Analyse import fallback blocks. This can be used to support both Python 2 and 4 | # 3 compatible code, which means that the block might have code that exists 5 | # only in one or another interpreter, leading to false positives when analysed. 6 | analyse-fallback-blocks=no 7 | 8 | # Clear in-memory caches upon conclusion of linting. Useful if running pylint 9 | # in a server-like mode. 10 | clear-cache-post-run=no 11 | 12 | # Load and enable all available extensions. Use --list-extensions to see a list 13 | # all available extensions. 14 | #enable-all-extensions= 15 | 16 | # In error mode, messages with a category besides ERROR or FATAL are 17 | # suppressed, and no reports are done by default. Error mode is compatible with 18 | # disabling specific errors. 19 | #errors-only= 20 | 21 | # Always return a 0 (non-error) status code, even if lint errors are found. 22 | # This is primarily useful in continuous integration scripts. 23 | #exit-zero= 24 | 25 | # A comma-separated list of package or module names from where C extensions may 26 | # be loaded. Extensions are loading into the active Python interpreter and may 27 | # run arbitrary code. 28 | extension-pkg-allow-list= 29 | 30 | # A comma-separated list of package or module names from where C extensions may 31 | # be loaded. Extensions are loading into the active Python interpreter and may 32 | # run arbitrary code. (This is an alternative name to extension-pkg-allow-list 33 | # for backward compatibility.) 34 | extension-pkg-whitelist= 35 | 36 | # Return non-zero exit code if any of these messages/categories are detected, 37 | # even if score is above --fail-under value. Syntax same as enable. Messages 38 | # specified are enabled, while categories only check already-enabled messages. 39 | fail-on= 40 | 41 | # Specify a score threshold under which the program will exit with error. 42 | fail-under=10 43 | 44 | # Interpret the stdin as a python script, whose filename needs to be passed as 45 | # the module_or_package argument. 46 | #from-stdin= 47 | 48 | # Files or directories to be skipped. They should be base names, not paths. 49 | ignore=CVS, 50 | .git, 51 | __pycache__, 52 | venv, 53 | .venv, 54 | 55 | # Add files or directories matching the regular expressions patterns to the 56 | # ignore-list. The regex matches against paths and can be in Posix or Windows 57 | # format. Because '\\' represents the directory delimiter on Windows systems, 58 | # it can't be used as an escape character. 59 | ignore-paths= 60 | 61 | # Files or directories matching the regular expression patterns are skipped. 62 | # The regex matches against base names, not paths. The default value ignores 63 | # Emacs file locks 64 | ignore-patterns=^\.# 65 | 66 | # List of module names for which member attributes should not be checked 67 | # (useful for modules/projects where namespaces are manipulated during runtime 68 | # and thus existing member attributes cannot be deduced by static analysis). It 69 | # supports qualified module names, as well as Unix pattern matching. 70 | ignored-modules= 71 | 72 | # Python code to execute, usually for sys.path manipulation such as 73 | # pygtk.require(). 74 | #init-hook= 75 | 76 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 77 | # number of processors available to use, and will cap the count on Windows to 78 | # avoid hangs. 79 | jobs=1 80 | 81 | # Control the amount of potential inferred values when inferring a single 82 | # object. This can help the performance when dealing with large functions or 83 | # complex, nested conditions. 84 | limit-inference-results=100 85 | 86 | # List of plugins (as comma separated values of python module names) to load, 87 | # usually to register additional checkers. 88 | load-plugins= 89 | 90 | # Pickle collected data for later comparisons. 91 | persistent=yes 92 | 93 | # Minimum Python version to use for version dependent checks. Will default to 94 | # the version used to run pylint. 95 | py-version=3.11 96 | 97 | # Discover python modules and packages in the file system subtree. 98 | recursive=no 99 | 100 | # Add paths to the list of the source roots. Supports globbing patterns. The 101 | # source root is an absolute path or a path relative to the current working 102 | # directory used to determine a package namespace for modules located under the 103 | # source root. 104 | source-roots= 105 | 106 | # When enabled, pylint would attempt to guess common misconfiguration and emit 107 | # user-friendly hints instead of false-positive error messages. 108 | suggestion-mode=yes 109 | 110 | # Allow loading of arbitrary C extensions. Extensions are imported into the 111 | # active Python interpreter and may run arbitrary code. 112 | unsafe-load-any-extension=no 113 | 114 | # In verbose mode, extra non-checker-related info will be displayed. 115 | #verbose= 116 | 117 | 118 | [BASIC] 119 | 120 | # Naming style matching correct argument names. 121 | argument-naming-style=snake_case 122 | 123 | # Regular expression matching correct argument names. Overrides argument- 124 | # naming-style. If left empty, argument names will be checked with the set 125 | # naming style. 126 | #argument-rgx= 127 | 128 | # Naming style matching correct attribute names. 129 | attr-naming-style=snake_case 130 | 131 | # Regular expression matching correct attribute names. Overrides attr-naming- 132 | # style. If left empty, attribute names will be checked with the set naming 133 | # style. 134 | #attr-rgx= 135 | 136 | # Bad variable names which should always be refused, separated by a comma. 137 | bad-names=foo, 138 | bar, 139 | baz, 140 | toto, 141 | tutu, 142 | tata 143 | 144 | # Bad variable names regexes, separated by a comma. If names match any regex, 145 | # they will always be refused 146 | bad-names-rgxs= 147 | 148 | # Naming style matching correct class attribute names. 149 | class-attribute-naming-style=any 150 | 151 | # Regular expression matching correct class attribute names. Overrides class- 152 | # attribute-naming-style. If left empty, class attribute names will be checked 153 | # with the set naming style. 154 | #class-attribute-rgx= 155 | 156 | # Naming style matching correct class constant names. 157 | class-const-naming-style=UPPER_CASE 158 | 159 | # Regular expression matching correct class constant names. Overrides class- 160 | # const-naming-style. If left empty, class constant names will be checked with 161 | # the set naming style. 162 | #class-const-rgx= 163 | 164 | # Naming style matching correct class names. 165 | class-naming-style=PascalCase 166 | 167 | # Regular expression matching correct class names. Overrides class-naming- 168 | # style. If left empty, class names will be checked with the set naming style. 169 | #class-rgx= 170 | 171 | # Naming style matching correct constant names. 172 | const-naming-style=UPPER_CASE 173 | 174 | # Regular expression matching correct constant names. Overrides const-naming- 175 | # style. If left empty, constant names will be checked with the set naming 176 | # style. 177 | #const-rgx= 178 | 179 | # Minimum line length for functions/classes that require docstrings, shorter 180 | # ones are exempt. 181 | docstring-min-length=-1 182 | 183 | # Naming style matching correct function names. 184 | function-naming-style=snake_case 185 | 186 | # Regular expression matching correct function names. Overrides function- 187 | # naming-style. If left empty, function names will be checked with the set 188 | # naming style. 189 | #function-rgx= 190 | 191 | # Good variable names which should always be accepted, separated by a comma. 192 | good-names=i, 193 | j, 194 | k, 195 | ex, 196 | Run, 197 | _ 198 | 199 | # Good variable names regexes, separated by a comma. If names match any regex, 200 | # they will always be accepted 201 | good-names-rgxs= 202 | 203 | # Include a hint for the correct naming format with invalid-name. 204 | include-naming-hint=no 205 | 206 | # Naming style matching correct inline iteration names. 207 | inlinevar-naming-style=any 208 | 209 | # Regular expression matching correct inline iteration names. Overrides 210 | # inlinevar-naming-style. If left empty, inline iteration names will be checked 211 | # with the set naming style. 212 | #inlinevar-rgx= 213 | 214 | # Naming style matching correct method names. 215 | method-naming-style=snake_case 216 | 217 | # Regular expression matching correct method names. Overrides method-naming- 218 | # style. If left empty, method names will be checked with the set naming style. 219 | #method-rgx= 220 | 221 | # Naming style matching correct module names. 222 | module-naming-style=snake_case 223 | 224 | # Regular expression matching correct module names. Overrides module-naming- 225 | # style. If left empty, module names will be checked with the set naming style. 226 | #module-rgx= 227 | 228 | # Colon-delimited sets of names that determine each other's naming style when 229 | # the name regexes allow several styles. 230 | name-group= 231 | 232 | # Regular expression which should only match function or class names that do 233 | # not require a docstring. 234 | no-docstring-rgx=^_ 235 | 236 | # List of decorators that produce properties, such as abc.abstractproperty. Add 237 | # to this list to register other decorators that produce valid properties. 238 | # These decorators are taken in consideration only for invalid-name. 239 | property-classes=abc.abstractproperty 240 | 241 | # Regular expression matching correct type alias names. If left empty, type 242 | # alias names will be checked with the set naming style. 243 | #typealias-rgx= 244 | 245 | # Regular expression matching correct type variable names. If left empty, type 246 | # variable names will be checked with the set naming style. 247 | #typevar-rgx= 248 | 249 | # Naming style matching correct variable names. 250 | variable-naming-style=snake_case 251 | 252 | # Regular expression matching correct variable names. Overrides variable- 253 | # naming-style. If left empty, variable names will be checked with the set 254 | # naming style. 255 | #variable-rgx= 256 | 257 | 258 | [CLASSES] 259 | 260 | # Warn about protected attribute access inside special methods 261 | check-protected-access-in-special-methods=no 262 | 263 | # List of method names used to declare (i.e. assign) instance attributes. 264 | defining-attr-methods=__init__, 265 | __new__, 266 | setUp, 267 | asyncSetUp, 268 | __post_init__ 269 | 270 | # List of member names, which should be excluded from the protected access 271 | # warning. 272 | exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit 273 | 274 | # List of valid names for the first argument in a class method. 275 | valid-classmethod-first-arg=cls 276 | 277 | # List of valid names for the first argument in a metaclass class method. 278 | valid-metaclass-classmethod-first-arg=mcs 279 | 280 | 281 | [DESIGN] 282 | 283 | # List of regular expressions of class ancestor names to ignore when counting 284 | # public methods (see R0903) 285 | exclude-too-few-public-methods= 286 | 287 | # List of qualified class names to ignore when counting class parents (see 288 | # R0901) 289 | ignored-parents= 290 | 291 | # Maximum number of arguments for function / method. 292 | max-args=5 293 | 294 | # Maximum number of attributes for a class (see R0902). 295 | max-attributes=7 296 | 297 | # Maximum number of boolean expressions in an if statement (see R0916). 298 | max-bool-expr=5 299 | 300 | # Maximum number of branch for function / method body. 301 | max-branches=12 302 | 303 | # Maximum number of locals for function / method body. 304 | max-locals=15 305 | 306 | # Maximum number of parents for a class (see R0901). 307 | max-parents=7 308 | 309 | # Maximum number of public methods for a class (see R0904). 310 | max-public-methods=25 311 | 312 | # Maximum number of return / yield for function / method body. 313 | max-returns=6 314 | 315 | # Maximum number of statements in function / method body. 316 | max-statements=50 317 | 318 | # Minimum number of public methods for a class (see R0903). 319 | min-public-methods=2 320 | 321 | 322 | [EXCEPTIONS] 323 | 324 | # Exceptions that will emit a warning when caught. 325 | overgeneral-exceptions=builtins.BaseException,builtins.Exception 326 | 327 | 328 | [FORMAT] 329 | 330 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 331 | expected-line-ending-format= 332 | 333 | # Regexp for a line that is allowed to be longer than the limit. 334 | ignore-long-lines=^\s*(# )??$ 335 | 336 | # Number of spaces of indent required inside a hanging or continued line. 337 | indent-after-paren=4 338 | 339 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 340 | # tab). 341 | indent-string=' ' 342 | 343 | # Maximum number of characters on a single line. 344 | max-line-length=100 345 | 346 | # Maximum number of lines in a module. 347 | max-module-lines=1000 348 | 349 | # Allow the body of a class to be on the same line as the declaration if body 350 | # contains single statement. 351 | single-line-class-stmt=no 352 | 353 | # Allow the body of an if to be on the same line as the test if there is no 354 | # else. 355 | single-line-if-stmt=no 356 | 357 | 358 | [IMPORTS] 359 | 360 | # List of modules that can be imported at any level, not just the top level 361 | # one. 362 | allow-any-import-level= 363 | 364 | # Allow explicit reexports by alias from a package __init__. 365 | allow-reexport-from-package=no 366 | 367 | # Allow wildcard imports from modules that define __all__. 368 | allow-wildcard-with-all=no 369 | 370 | # Deprecated modules which should not be used, separated by a comma. 371 | deprecated-modules= 372 | 373 | # Output a graph (.gv or any supported image format) of external dependencies 374 | # to the given file (report RP0402 must not be disabled). 375 | ext-import-graph= 376 | 377 | # Output a graph (.gv or any supported image format) of all (i.e. internal and 378 | # external) dependencies to the given file (report RP0402 must not be 379 | # disabled). 380 | import-graph= 381 | 382 | # Output a graph (.gv or any supported image format) of internal dependencies 383 | # to the given file (report RP0402 must not be disabled). 384 | int-import-graph= 385 | 386 | # Force import order to recognize a module as part of the standard 387 | # compatibility libraries. 388 | known-standard-library= 389 | 390 | # Force import order to recognize a module as part of a third party library. 391 | known-third-party=enchant 392 | 393 | # Couples of modules and preferred modules, separated by a comma. 394 | preferred-modules= 395 | 396 | 397 | [LOGGING] 398 | 399 | # The type of string formatting that logging methods do. `old` means using % 400 | # formatting, `new` is for `{}` formatting. 401 | logging-format-style=old 402 | 403 | # Logging modules to check that the string format arguments are in logging 404 | # function parameter format. 405 | logging-modules=logging 406 | 407 | 408 | [MESSAGES CONTROL] 409 | 410 | # Only show warnings with the listed confidence levels. Leave empty to show 411 | # all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, 412 | # UNDEFINED. 413 | confidence=HIGH, 414 | CONTROL_FLOW, 415 | INFERENCE, 416 | INFERENCE_FAILURE, 417 | UNDEFINED 418 | 419 | # Disable the message, report, category or checker with the given id(s). You 420 | # can either give multiple identifiers separated by comma (,) or put this 421 | # option multiple times (only on the command line, not in the configuration 422 | # file where it should appear only once). You can also use "--disable=all" to 423 | # disable everything first and then re-enable specific checks. For example, if 424 | # you want to run only the similarities checker, you can use "--disable=all 425 | # --enable=similarities". If you want to run only the classes checker, but have 426 | # no Warning level messages displayed, use "--disable=all --enable=classes 427 | # --disable=W". 428 | disable=bad-inline-option, 429 | deprecated-pragma, 430 | duplicate-code, 431 | locally-disabled, 432 | file-ignored, 433 | import-error, 434 | line-too-long, 435 | raw-checker-failed, 436 | suppressed-message, 437 | too-few-public-methods, 438 | too-many-arguments, 439 | too-many-function-args, 440 | too-many-branches, 441 | too-many-locals, 442 | too-many-nested-blocks, 443 | too-many-positional-arguments, 444 | too-many-statements, 445 | useless-suppression, 446 | use-symbolic-message-instead, 447 | use-implicit-booleaness-not-comparison-to-string, 448 | use-implicit-booleaness-not-comparison-to-zero, 449 | wrong-import-order 450 | 451 | # Enable the message, report, category or checker with the given id(s). You can 452 | # either give multiple identifier separated by comma (,) or put this option 453 | # multiple time (only on the command line, not in the configuration file where 454 | # it should appear only once). See also the "--disable" option for examples. 455 | enable= 456 | 457 | 458 | [METHOD_ARGS] 459 | 460 | # List of qualified names (i.e., library.method) which require a timeout 461 | # parameter e.g. 'requests.api.get,requests.api.post' 462 | timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request 463 | 464 | 465 | [MISCELLANEOUS] 466 | 467 | # List of note tags to take in consideration, separated by a comma. 468 | notes=FIXME, 469 | XXX, 470 | TODO 471 | 472 | # Regular expression of note tags to take in consideration. 473 | notes-rgx= 474 | 475 | 476 | [REFACTORING] 477 | 478 | # Maximum number of nested blocks for function / method body 479 | max-nested-blocks=5 480 | 481 | # Complete name of functions that never returns. When checking for 482 | # inconsistent-return-statements if a never returning function is called then 483 | # it will be considered as an explicit return statement and no message will be 484 | # printed. 485 | never-returning-functions=sys.exit,argparse.parse_error 486 | 487 | 488 | [REPORTS] 489 | 490 | # Python expression which should return a score less than or equal to 10. You 491 | # have access to the variables 'fatal', 'error', 'warning', 'refactor', 492 | # 'convention', and 'info' which contain the number of messages in each 493 | # category, as well as 'statement' which is the total number of statements 494 | # analyzed. This score is used by the global evaluation report (RP0004). 495 | evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) 496 | 497 | # Template used to display messages. This is a python new-style format string 498 | # used to format the message information. See doc for all details. 499 | msg-template= 500 | 501 | # Set the output format. Available formats are: text, parseable, colorized, 502 | # json2 (improved json format), json (old json format) and msvs (visual 503 | # studio). You can also give a reporter class, e.g. 504 | # mypackage.mymodule.MyReporterClass. 505 | #output-format= 506 | 507 | # Tells whether to display a full report or only the messages. 508 | reports=no 509 | 510 | # Activate the evaluation score. 511 | score=yes 512 | 513 | 514 | [SIMILARITIES] 515 | 516 | # Comments are removed from the similarity computation 517 | ignore-comments=yes 518 | 519 | # Docstrings are removed from the similarity computation 520 | ignore-docstrings=yes 521 | 522 | # Imports are removed from the similarity computation 523 | ignore-imports=yes 524 | 525 | # Signatures are removed from the similarity computation 526 | ignore-signatures=yes 527 | 528 | # Minimum lines number of a similarity. 529 | min-similarity-lines=4 530 | 531 | 532 | [SPELLING] 533 | 534 | # Limits count of emitted suggestions for spelling mistakes. 535 | max-spelling-suggestions=4 536 | 537 | # Spelling dictionary name. No available dictionaries : You need to install 538 | # both the python package and the system dependency for enchant to work. 539 | spelling-dict= 540 | 541 | # List of comma separated words that should be considered directives if they 542 | # appear at the beginning of a comment and should not be checked. 543 | spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: 544 | 545 | # List of comma separated words that should not be checked. 546 | spelling-ignore-words= 547 | 548 | # A path to a file that contains the private dictionary; one word per line. 549 | spelling-private-dict-file= 550 | 551 | # Tells whether to store unknown words to the private dictionary (see the 552 | # --spelling-private-dict-file option) instead of raising a message. 553 | spelling-store-unknown-words=no 554 | 555 | 556 | [STRING] 557 | 558 | # This flag controls whether inconsistent-quotes generates a warning when the 559 | # character used as a quote delimiter is used inconsistently within a module. 560 | check-quote-consistency=no 561 | 562 | # This flag controls whether the implicit-str-concat should generate a warning 563 | # on implicit string concatenation in sequences defined over several lines. 564 | check-str-concat-over-line-jumps=no 565 | 566 | 567 | [TYPECHECK] 568 | 569 | # List of decorators that produce context managers, such as 570 | # contextlib.contextmanager. Add to this list to register other decorators that 571 | # produce valid context managers. 572 | contextmanager-decorators=contextlib.contextmanager 573 | 574 | # List of members which are set dynamically and missed by pylint inference 575 | # system, and so shouldn't trigger E1101 when accessed. Python regular 576 | # expressions are accepted. 577 | generated-members= 578 | 579 | # Tells whether to warn about missing members when the owner of the attribute 580 | # is inferred to be None. 581 | ignore-none=yes 582 | 583 | # This flag controls whether pylint should warn about no-member and similar 584 | # checks whenever an opaque object is returned when inferring. The inference 585 | # can return multiple potential results while evaluating a Python object, but 586 | # some branches might not be evaluated, which results in partial inference. In 587 | # that case, it might be useful to still emit no-member and other checks for 588 | # the rest of the inferred objects. 589 | ignore-on-opaque-inference=yes 590 | 591 | # List of symbolic message names to ignore for Mixin members. 592 | ignored-checks-for-mixins=no-member, 593 | not-async-context-manager, 594 | not-context-manager, 595 | attribute-defined-outside-init 596 | 597 | # List of class names for which member attributes should not be checked (useful 598 | # for classes with dynamically set attributes). This supports the use of 599 | # qualified names. 600 | ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace 601 | 602 | # Show a hint with possible names when a member name was not found. The aspect 603 | # of finding the hint is based on edit distance. 604 | missing-member-hint=yes 605 | 606 | # The minimum edit distance a name should have in order to be considered a 607 | # similar match for a missing member name. 608 | missing-member-hint-distance=1 609 | 610 | # The total number of similar names that should be taken in consideration when 611 | # showing a hint for a missing member. 612 | missing-member-max-choices=1 613 | 614 | # Regex pattern to define which classes are considered mixins. 615 | mixin-class-rgx=.*[Mm]ixin 616 | 617 | # List of decorators that change the signature of a decorated function. 618 | signature-mutators= 619 | 620 | 621 | [VARIABLES] 622 | 623 | # List of additional names supposed to be defined in builtins. Remember that 624 | # you should avoid defining new builtins when possible. 625 | additional-builtins= 626 | 627 | # Tells whether unused global variables should be treated as a violation. 628 | allow-global-unused-variables=yes 629 | 630 | # List of names allowed to shadow builtins 631 | allowed-redefined-builtins= 632 | 633 | # List of strings which can identify a callback function by name. A callback 634 | # name must start or end with one of those strings. 635 | callbacks=cb_, 636 | _cb 637 | 638 | # A regular expression matching the name of dummy variables (i.e. expected to 639 | # not be used). 640 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 641 | 642 | # Argument names that match this expression will be ignored. 643 | ignored-argument-names=_.*|^ignored_|^unused_ 644 | 645 | # Tells whether we should check for unused import in __init__ files. 646 | init-import=no 647 | 648 | # List of qualified module names which can have objects that can redefine 649 | # builtins. 650 | redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io 651 | -------------------------------------------------------------------------------- /test_dependabot_file.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=too-many-public-methods 2 | """Tests for the dependabot_file.py functions.""" 3 | 4 | import base64 5 | import os 6 | import unittest 7 | from unittest.mock import MagicMock, patch 8 | 9 | import github3 10 | import ruamel.yaml 11 | from dependabot_file import add_existing_ecosystem_to_exempt_list, build_dependabot_file 12 | 13 | yaml = ruamel.yaml.YAML() 14 | 15 | 16 | class TestDependabotFile(unittest.TestCase): 17 | """ 18 | Test the dependabot_file.py functions. 19 | """ 20 | 21 | def test_not_found_error(self): 22 | """Test that the dependabot.yml file is built correctly with no package manager""" 23 | repo = MagicMock() 24 | response = MagicMock() 25 | response.status_code = 404 26 | repo.file_contents.side_effect = github3.exceptions.NotFoundError(resp=response) 27 | 28 | result = build_dependabot_file(repo, False, [], {}, None, "", "", [], None) 29 | self.assertIsNone(result) 30 | 31 | def test_build_dependabot_file_with_schedule_day(self): 32 | """Test that the dependabot.yml file is built correctly with weekly schedule day""" 33 | repo = MagicMock() 34 | filename_list = ["Gemfile", "Gemfile.lock"] 35 | 36 | for filename in filename_list: 37 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 38 | expected_result = yaml.load( 39 | b""" 40 | version: 2 41 | updates: 42 | - package-ecosystem: 'bundler' 43 | directory: '/' 44 | schedule: 45 | interval: 'weekly' 46 | day: 'tuesday' 47 | """ 48 | ) 49 | result = build_dependabot_file( 50 | repo, False, [], {}, None, "weekly", "tuesday", [], None 51 | ) 52 | self.assertEqual(result, expected_result) 53 | 54 | def test_build_dependabot_file_with_bundler(self): 55 | """Test that the dependabot.yml file is built correctly with bundler""" 56 | repo = MagicMock() 57 | filename_list = ["Gemfile", "Gemfile.lock"] 58 | 59 | for filename in filename_list: 60 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 61 | expected_result = yaml.load( 62 | b""" 63 | version: 2 64 | updates: 65 | - package-ecosystem: 'bundler' 66 | directory: '/' 67 | schedule: 68 | interval: 'weekly' 69 | """ 70 | ) 71 | result = build_dependabot_file( 72 | repo, False, [], {}, None, "weekly", "", [], None 73 | ) 74 | self.assertEqual(result, expected_result) 75 | 76 | def test_build_dependabot_file_with_existing_config_bundler_no_update(self): 77 | """Test that the dependabot.yml file is built correctly with bundler""" 78 | repo = MagicMock() 79 | repo.file_contents.side_effect = lambda f, filename="Gemfile": f == filename 80 | 81 | # expected_result is None because the existing config already contains the all applicable ecosystems 82 | expected_result = None 83 | existing_config = MagicMock() 84 | existing_config.content = base64.b64encode( 85 | b""" 86 | version: 2 87 | updates: 88 | - package-ecosystem: "bundler" 89 | directory: "/" 90 | schedule: 91 | interval: "weekly" 92 | commit-message: 93 | prefix: "chore(deps)" 94 | """ 95 | ) 96 | result = build_dependabot_file( 97 | repo, False, [], {}, existing_config, "weekly", "", [], None 98 | ) 99 | self.assertEqual(result, expected_result) 100 | 101 | def test_build_dependabot_file_with_2_space_indent_existing_config_bundler_with_update( 102 | self, 103 | ): 104 | """Test that the dependabot.yml file is built correctly with bundler""" 105 | repo = MagicMock() 106 | repo.file_contents.side_effect = lambda f, filename="Gemfile": f == filename 107 | 108 | # expected_result maintains existing ecosystem with custom configuration 109 | # and adds new ecosystem 110 | expected_result = yaml.load( 111 | b""" 112 | version: 2 113 | updates: 114 | - package-ecosystem: "pip" 115 | directory: "/" 116 | schedule: 117 | interval: "weekly" 118 | commit-message: 119 | prefix: "chore(deps)" 120 | - package-ecosystem: 'bundler' 121 | directory: '/' 122 | schedule: 123 | interval: 'weekly' 124 | """ 125 | ) 126 | existing_config = MagicMock() 127 | existing_config.content = base64.b64encode( 128 | b""" 129 | version: 2 130 | updates: 131 | - package-ecosystem: "pip" 132 | directory: "/" 133 | schedule: 134 | interval: "weekly" 135 | commit-message: 136 | prefix: "chore(deps)" 137 | """ 138 | ) 139 | result = build_dependabot_file( 140 | repo, False, [], {}, existing_config, "weekly", "", [], None 141 | ) 142 | self.assertEqual(result, expected_result) 143 | 144 | def test_build_dependabot_file_with_weird_space_indent_existing_config_bundler_with_update( 145 | self, 146 | ): 147 | """Test that the dependabot.yml file is built correctly with bundler""" 148 | repo = MagicMock() 149 | repo.file_contents.side_effect = lambda f, filename="Gemfile": f == filename 150 | 151 | # expected_result maintains existing ecosystem with custom configuration 152 | # and adds new ecosystem 153 | existing_config = MagicMock() 154 | existing_config.content = base64.b64encode( 155 | b""" 156 | version: 2 157 | updates: 158 | - package-ecosystem: "pip" 159 | directory: "/" 160 | schedule: 161 | interval: "weekly" 162 | commit-message: 163 | prefix: "chore(deps)" 164 | """ 165 | ) 166 | 167 | with self.assertRaises(ruamel.yaml.YAMLError): 168 | build_dependabot_file( 169 | repo, False, [], {}, existing_config, "weekly", "", [], None 170 | ) 171 | 172 | def test_build_dependabot_file_with_incorrect_indentation_in_extra_dependabot_config_file( 173 | self, 174 | ): 175 | """Test incorrect indentation on extra_dependabot_config""" 176 | repo = MagicMock() 177 | repo.file_contents.side_effect = lambda f, filename="Gemfile": f == filename 178 | 179 | # expected_result maintains existing ecosystem with custom configuration 180 | # and adds new ecosystem 181 | extra_dependabot_config = MagicMock() 182 | extra_dependabot_config.content = base64.b64encode( 183 | b""" 184 | npm: 185 | type: 'npm' 186 | url: 'https://yourprivateregistry/npm/' 187 | username: '${{secrets.username}}' 188 | password: '${{secrets.password}}' 189 | """ 190 | ) 191 | 192 | with self.assertRaises(ruamel.yaml.YAMLError): 193 | build_dependabot_file( 194 | repo, False, [], {}, None, "weekly", "", [], extra_dependabot_config 195 | ) 196 | 197 | @patch.dict(os.environ, {"DEPENDABOT_CONFIG_FILE": "dependabot-config.yaml"}) 198 | def test_build_dependabot_file_with_extra_dependabot_config_file(self): 199 | """Test that the dependabot.yaml file is built correctly with extra configurations from extra_dependabot_config""" 200 | 201 | repo = MagicMock() 202 | repo.file_contents.side_effect = ( 203 | lambda f, filename="package.json": f == filename 204 | ) 205 | 206 | # expected_result maintains existing ecosystem with custom configuration 207 | # and adds new ecosystem 208 | extra_dependabot_config = yaml.load( 209 | b""" 210 | npm: 211 | type: 'npm' 212 | url: 'https://yourprivateregistry/npm/' 213 | username: '${{secrets.username}}' 214 | password: '${{secrets.password}}' 215 | """ 216 | ) 217 | 218 | expected_result = yaml.load( 219 | b""" 220 | version: 2 221 | registries: 222 | npm: 223 | type: 'npm' 224 | url: 'https://yourprivateregistry/npm/' 225 | username: '${{secrets.username}}' 226 | password: '${{secrets.password}}' 227 | updates: 228 | - package-ecosystem: "npm" 229 | directory: "/" 230 | registries: 231 | - 'npm' 232 | schedule: 233 | interval: "weekly" 234 | """ 235 | ) 236 | 237 | result = build_dependabot_file( 238 | repo, False, [], {}, None, "weekly", "", [], extra_dependabot_config 239 | ) 240 | self.assertEqual(result, expected_result) 241 | 242 | def test_build_dependabot_file_with_npm(self): 243 | """Test that the dependabot.yml file is built correctly with npm""" 244 | repo = MagicMock() 245 | filename_list = ["package.json", "package-lock.json", "yarn.lock"] 246 | 247 | for filename in filename_list: 248 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 249 | expected_result = yaml.load( 250 | b""" 251 | version: 2 252 | updates: 253 | - package-ecosystem: 'npm' 254 | directory: '/' 255 | schedule: 256 | interval: 'weekly' 257 | """ 258 | ) 259 | result = build_dependabot_file( 260 | repo, False, [], {}, None, "weekly", "", [], None 261 | ) 262 | self.assertEqual(result, expected_result) 263 | 264 | def test_build_dependabot_file_with_pip(self): 265 | """Test that the dependabot.yml file is built correctly with pip""" 266 | repo = MagicMock() 267 | filename_list = [ 268 | "requirements.txt", 269 | "Pipfile", 270 | "Pipfile.lock", 271 | "pyproject.toml", 272 | "poetry.lock", 273 | ] 274 | 275 | for filename in filename_list: 276 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 277 | expected_result = yaml.load( 278 | b""" 279 | version: 2 280 | updates: 281 | - package-ecosystem: 'pip' 282 | directory: '/' 283 | schedule: 284 | interval: 'weekly' 285 | """ 286 | ) 287 | result = build_dependabot_file( 288 | repo, False, [], {}, None, "weekly", "", [], None 289 | ) 290 | self.assertEqual(result, expected_result) 291 | 292 | def test_build_dependabot_file_with_cargo(self): 293 | """Test that the dependabot.yml file is built correctly with Cargo""" 294 | repo = MagicMock() 295 | filename_list = [ 296 | "Cargo.toml", 297 | "Cargo.lock", 298 | ] 299 | 300 | for filename in filename_list: 301 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 302 | expected_result = yaml.load( 303 | b""" 304 | version: 2 305 | updates: 306 | - package-ecosystem: 'cargo' 307 | directory: '/' 308 | schedule: 309 | interval: 'weekly' 310 | """ 311 | ) 312 | result = build_dependabot_file( 313 | repo, False, [], {}, None, "weekly", "", [], None 314 | ) 315 | self.assertEqual(result, expected_result) 316 | 317 | def test_build_dependabot_file_with_gomod(self): 318 | """Test that the dependabot.yml file is built correctly with Go module""" 319 | repo = MagicMock() 320 | repo.file_contents.side_effect = lambda filename: filename == "go.mod" 321 | 322 | expected_result = yaml.load( 323 | b""" 324 | version: 2 325 | updates: 326 | - package-ecosystem: 'gomod' 327 | directory: '/' 328 | schedule: 329 | interval: 'weekly' 330 | """ 331 | ) 332 | result = build_dependabot_file( 333 | repo, False, [], {}, None, "weekly", "", [], None 334 | ) 335 | self.assertEqual(result, expected_result) 336 | 337 | def test_build_dependabot_file_with_composer(self): 338 | """Test that the dependabot.yml file is built correctly with Composer""" 339 | repo = MagicMock() 340 | filename_list = [ 341 | "composer.json", 342 | "composer.lock", 343 | ] 344 | 345 | for filename in filename_list: 346 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 347 | expected_result = yaml.load( 348 | b""" 349 | version: 2 350 | updates: 351 | - package-ecosystem: 'composer' 352 | directory: '/' 353 | schedule: 354 | interval: 'weekly' 355 | """ 356 | ) 357 | result = build_dependabot_file( 358 | repo, False, [], {}, None, "weekly", "", [], None 359 | ) 360 | self.assertEqual(result, expected_result) 361 | 362 | def test_build_dependabot_file_with_hex(self): 363 | """Test that the dependabot.yml file is built correctly with Hex""" 364 | repo = MagicMock() 365 | filename_list = [ 366 | "mix.exs", 367 | "mix.lock", 368 | ] 369 | 370 | for filename in filename_list: 371 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 372 | expected_result = yaml.load( 373 | b""" 374 | version: 2 375 | updates: 376 | - package-ecosystem: 'mix' 377 | directory: '/' 378 | schedule: 379 | interval: 'weekly' 380 | """ 381 | ) 382 | result = build_dependabot_file( 383 | repo, False, [], {}, None, "weekly", "", [], None 384 | ) 385 | self.assertEqual(result, expected_result) 386 | 387 | def test_build_dependabot_file_with_nuget(self): 388 | """Test that the dependabot.yml file is built correctly with NuGet""" 389 | repo = MagicMock() 390 | repo.file_contents.side_effect = lambda filename: filename.endswith(".csproj") 391 | 392 | expected_result = yaml.load( 393 | b""" 394 | version: 2 395 | updates: 396 | - package-ecosystem: 'nuget' 397 | directory: '/' 398 | schedule: 399 | interval: 'weekly' 400 | """ 401 | ) 402 | result = build_dependabot_file( 403 | repo, False, [], {}, None, "weekly", "", [], None 404 | ) 405 | self.assertEqual(result, expected_result) 406 | 407 | def test_build_dependabot_file_with_docker(self): 408 | """Test that the dependabot.yml file is built correctly with Docker""" 409 | repo = MagicMock() 410 | repo.file_contents.side_effect = lambda filename: filename == "Dockerfile" 411 | 412 | expected_result = yaml.load( 413 | b""" 414 | version: 2 415 | updates: 416 | - package-ecosystem: 'docker' 417 | directory: '/' 418 | schedule: 419 | interval: 'weekly' 420 | """ 421 | ) 422 | result = build_dependabot_file( 423 | repo, False, [], {}, None, "weekly", "", [], None 424 | ) 425 | self.assertEqual(result, expected_result) 426 | 427 | def test_build_dependabot_file_with_maven(self): 428 | """Test that the dependabot.yml file is built correctly with maven""" 429 | repo = MagicMock() 430 | repo.file_contents.side_effect = lambda filename: filename == "pom.xml" 431 | 432 | expected_result = yaml.load( 433 | b""" 434 | version: 2 435 | updates: 436 | - package-ecosystem: 'maven' 437 | directory: '/' 438 | schedule: 439 | interval: 'weekly' 440 | """ 441 | ) 442 | result = build_dependabot_file( 443 | repo, False, [], {}, None, "weekly", "", [], None 444 | ) 445 | self.assertEqual(result, expected_result) 446 | 447 | def test_build_dependabot_file_with_gradle(self): 448 | """Test that the dependabot.yml file is built correctly with gradle""" 449 | repo = MagicMock() 450 | repo.file_contents.side_effect = lambda filename: filename == "build.gradle" 451 | 452 | expected_result = yaml.load( 453 | b""" 454 | version: 2 455 | updates: 456 | - package-ecosystem: 'gradle' 457 | directory: '/' 458 | schedule: 459 | interval: 'weekly' 460 | """ 461 | ) 462 | result = build_dependabot_file( 463 | repo, False, [], {}, None, "weekly", "", [], None 464 | ) 465 | self.assertEqual(result, expected_result) 466 | 467 | def test_build_dependabot_file_with_terraform_with_files(self): 468 | """Test that the dependabot.yml file is built correctly with Terraform""" 469 | repo = MagicMock() 470 | response = MagicMock() 471 | response.status_code = 404 472 | repo.file_contents.side_effect = github3.exceptions.NotFoundError(resp=response) 473 | repo.directory_contents.side_effect = lambda path: ( 474 | [("main.tf", None)] if path == "/" else [] 475 | ) 476 | 477 | expected_result = yaml.load( 478 | b""" 479 | version: 2 480 | updates: 481 | - package-ecosystem: 'terraform' 482 | directory: '/' 483 | schedule: 484 | interval: 'weekly' 485 | """ 486 | ) 487 | result = build_dependabot_file( 488 | repo, False, [], {}, None, "weekly", "", [], None 489 | ) 490 | self.assertEqual(result, expected_result) 491 | 492 | def test_build_dependabot_file_with_terraform_without_files(self): 493 | """Test that the dependabot.yml file is built correctly with Terraform""" 494 | repo = MagicMock() 495 | response = MagicMock() 496 | response.status_code = 404 497 | repo.file_contents.side_effect = github3.exceptions.NotFoundError(resp=response) 498 | 499 | # Test absence of Terraform files 500 | repo.directory_contents.side_effect = lambda path: [] if path == "/" else [] 501 | result = build_dependabot_file( 502 | repo, False, [], {}, None, "weekly", "", [], None 503 | ) 504 | self.assertIsNone(result) 505 | 506 | # Test empty repository 507 | response = MagicMock() 508 | response.status_code = 404 509 | repo.directory_contents.side_effect = github3.exceptions.NotFoundError( 510 | resp=response 511 | ) 512 | result = build_dependabot_file( 513 | repo, False, [], {}, None, "weekly", "", [], None 514 | ) 515 | self.assertIsNone(result) 516 | 517 | def test_build_dependabot_file_with_devcontainers(self): 518 | """Test that the dependabot.yml file is built correctly with devcontainers""" 519 | repo = MagicMock() 520 | response = MagicMock() 521 | response.status_code = 404 522 | repo.file_contents.side_effect = github3.exceptions.NotFoundError(resp=response) 523 | repo.directory_contents.side_effect = lambda path: ( 524 | [("devcontainer.json", None)] if path == ".devcontainer" else [] 525 | ) 526 | 527 | expected_result = yaml.load( 528 | b""" 529 | version: 2 530 | updates: 531 | - package-ecosystem: 'devcontainers' 532 | directory: '/' 533 | schedule: 534 | interval: 'weekly' 535 | """ 536 | ) 537 | result = build_dependabot_file( 538 | repo, False, [], None, None, "weekly", "", [], None 539 | ) 540 | self.assertEqual(result, expected_result) 541 | 542 | def test_build_dependabot_file_with_github_actions(self): 543 | """Test that the dependabot.yml file is built correctly with GitHub Actions""" 544 | repo = MagicMock() 545 | response = MagicMock() 546 | response.status_code = 404 547 | repo.file_contents.side_effect = github3.exceptions.NotFoundError(resp=response) 548 | repo.directory_contents.side_effect = lambda path: ( 549 | [("test.yml", None)] if path == ".github/workflows" else [] 550 | ) 551 | 552 | expected_result = yaml.load( 553 | b""" 554 | version: 2 555 | updates: 556 | - package-ecosystem: 'github-actions' 557 | directory: '/' 558 | schedule: 559 | interval: 'weekly' 560 | """ 561 | ) 562 | result = build_dependabot_file( 563 | repo, False, [], None, None, "weekly", "", [], None 564 | ) 565 | self.assertEqual(result, expected_result) 566 | 567 | def test_build_dependabot_file_with_github_actions_without_files(self): 568 | """Test that the dependabot.yml file is None when no YAML files are found in the .github/workflows/ directory.""" 569 | repo = MagicMock() 570 | response = MagicMock() 571 | response.status_code = 404 572 | repo.file_contents.side_effect = github3.exceptions.NotFoundError(resp=response) 573 | repo.directory_contents.side_effect = github3.exceptions.NotFoundError( 574 | resp=response 575 | ) 576 | 577 | result = build_dependabot_file( 578 | repo, False, [], None, None, "weekly", "", [], None 579 | ) 580 | self.assertIsNone(result) 581 | 582 | def test_build_dependabot_file_with_groups(self): 583 | """Test that the dependabot.yml file is built correctly with grouped dependencies""" 584 | repo = MagicMock() 585 | repo.file_contents.side_effect = lambda filename: filename == "Dockerfile" 586 | 587 | expected_result = yaml.load( 588 | b""" 589 | version: 2 590 | updates: 591 | - package-ecosystem: 'docker' 592 | directory: '/' 593 | schedule: 594 | interval: 'weekly' 595 | groups: 596 | production-dependencies: 597 | dependency-type: 'production' 598 | development-dependencies: 599 | dependency-type: 'development' 600 | """ 601 | ) 602 | result = build_dependabot_file(repo, True, [], {}, None, "weekly", "", [], None) 603 | self.assertEqual(result, expected_result) 604 | 605 | def test_build_dependabot_file_with_exempt_ecosystems(self): 606 | """Test that the dependabot.yml file is built correctly with exempted ecosystems""" 607 | repo = MagicMock() 608 | repo.file_contents.side_effect = lambda filename: filename == "Dockerfile" 609 | 610 | result = build_dependabot_file( 611 | repo, False, ["docker"], {}, None, "weekly", "", [], None 612 | ) 613 | self.assertIsNone(result) 614 | 615 | def test_build_dependabot_file_with_repo_specific_exempt_ecosystems(self): 616 | """Test that the dependabot.yml file is built correctly with exempted ecosystems""" 617 | repo = MagicMock() 618 | repo.full_name = "test/test" 619 | repo.file_contents.side_effect = lambda filename: filename == "Dockerfile" 620 | 621 | result = build_dependabot_file( 622 | repo, False, [], {"test/test": ["docker"]}, None, "weekly", "", [], None 623 | ) 624 | self.assertIsNone(result) 625 | 626 | def test_add_existing_ecosystem_to_exempt_list(self): 627 | """Test that existing ecosystems are added to the exempt list""" 628 | exempt_ecosystems = ["npm", "pip", "github-actions"] 629 | 630 | existing_config = { 631 | "updates": [ 632 | {"package-ecosystem": "npm"}, 633 | {"package-ecosystem": "pip"}, 634 | {"package-ecosystem": "bundler"}, 635 | ] 636 | } 637 | 638 | add_existing_ecosystem_to_exempt_list(exempt_ecosystems, existing_config) 639 | 640 | # Check new ecosystem is added to exempt list 641 | self.assertIn("bundler", exempt_ecosystems) 642 | # Keep existing ecosystems in exempt list 643 | for ecosystem in exempt_ecosystems: 644 | self.assertIn(ecosystem, exempt_ecosystems) 645 | 646 | def test_build_dependabot_file_for_multiple_repos_with_few_existing_config(self): 647 | """ 648 | Test the case where there are multiple repos with few existing dependabot config 649 | """ 650 | existing_config_repo = MagicMock() 651 | 652 | existing_config_repo.file_contents.side_effect = ( 653 | lambda f, filename="Gemfile": f == filename 654 | ) 655 | 656 | existing_config = MagicMock() 657 | existing_config.content = base64.b64encode( 658 | b""" 659 | version: 2 660 | updates: 661 | - package-ecosystem: 'bundler' 662 | directory: '/' 663 | schedule: 664 | interval: 'weekly' 665 | """ 666 | ) 667 | 668 | exempt_ecosystems = [] 669 | result = build_dependabot_file( 670 | existing_config_repo, 671 | False, 672 | exempt_ecosystems, 673 | {}, 674 | existing_config, 675 | "weekly", 676 | "", 677 | [], 678 | None, 679 | ) 680 | self.assertIsNone(result) 681 | 682 | no_existing_config_repo = MagicMock() 683 | filename_list = ["package.json", "package-lock.json", "yarn.lock"] 684 | for filename in filename_list: 685 | no_existing_config_repo.file_contents.side_effect = ( 686 | lambda f, filename=filename: f == filename 687 | ) 688 | yaml.preserve_quotes = True 689 | expected_result = yaml.load( 690 | b""" 691 | version: 2 692 | updates: 693 | - package-ecosystem: 'npm' 694 | directory: '/' 695 | schedule: 696 | interval: 'weekly' 697 | """ 698 | ) 699 | result = build_dependabot_file( 700 | no_existing_config_repo, 701 | False, 702 | exempt_ecosystems, 703 | {}, 704 | None, 705 | "weekly", 706 | "", 707 | [], 708 | None, 709 | ) 710 | self.assertEqual(result, expected_result) 711 | 712 | def test_check_multiple_repos_with_no_dependabot_config(self): 713 | """ 714 | Test the case where there is a single repo 715 | """ 716 | mock_repo_1 = MagicMock() 717 | mock_repo_1.file_contents.side_effect = lambda filename: filename == "go.mod" 718 | 719 | expected_result = yaml.load( 720 | b""" 721 | version: 2 722 | updates: 723 | - package-ecosystem: 'gomod' 724 | directory: '/' 725 | schedule: 726 | interval: 'weekly' 727 | """ 728 | ) 729 | exempt_ecosystems = [] 730 | result = build_dependabot_file( 731 | mock_repo_1, False, exempt_ecosystems, {}, None, "weekly", "", [], None 732 | ) 733 | self.assertEqual(result, expected_result) 734 | 735 | no_existing_config_repo = MagicMock() 736 | filename_list = ["package.json", "package-lock.json", "yarn.lock"] 737 | for filename in filename_list: 738 | no_existing_config_repo.file_contents.side_effect = ( 739 | lambda f, filename=filename: f == filename 740 | ) 741 | expected_result = yaml.load( 742 | b""" 743 | version: 2 744 | updates: 745 | - package-ecosystem: 'npm' 746 | directory: '/' 747 | schedule: 748 | interval: 'weekly' 749 | """ 750 | ) 751 | result = build_dependabot_file( 752 | no_existing_config_repo, 753 | False, 754 | exempt_ecosystems, 755 | {}, 756 | None, 757 | "weekly", 758 | "", 759 | [], 760 | None, 761 | ) 762 | self.assertEqual(result, expected_result) 763 | 764 | def test_build_dependabot_file_with_label(self): 765 | """Test that the dependabot.yml file is built correctly with one label set""" 766 | repo = MagicMock() 767 | filename_list = ["Gemfile", "Gemfile.lock"] 768 | 769 | for filename in filename_list: 770 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 771 | expected_result = yaml.load( 772 | b""" 773 | version: 2 774 | updates: 775 | - package-ecosystem: 'bundler' 776 | directory: '/' 777 | schedule: 778 | interval: 'weekly' 779 | labels: 780 | - "dependencies" 781 | """ 782 | ) 783 | result = build_dependabot_file( 784 | repo, False, [], {}, None, "weekly", "", ["dependencies"], None 785 | ) 786 | self.assertEqual(result, expected_result) 787 | 788 | def test_build_dependabot_file_with_labels(self): 789 | """Test that the dependabot.yml file is built correctly with multiple labels set""" 790 | repo = MagicMock() 791 | filename_list = ["Gemfile", "Gemfile.lock"] 792 | 793 | for filename in filename_list: 794 | repo.file_contents.side_effect = lambda f, filename=filename: f == filename 795 | expected_result = yaml.load( 796 | b""" 797 | version: 2 798 | updates: 799 | - package-ecosystem: 'bundler' 800 | directory: '/' 801 | schedule: 802 | interval: 'weekly' 803 | labels: 804 | - "dependencies" 805 | - "test1" 806 | - "test2" 807 | """ 808 | ) 809 | result = build_dependabot_file( 810 | repo, 811 | False, 812 | [], 813 | {}, 814 | None, 815 | "weekly", 816 | "", 817 | ["dependencies", "test1", "test2"], 818 | None, 819 | ) 820 | self.assertEqual(result, expected_result) 821 | 822 | def test_build_dependabot_file_preserves_existing_registries(self): 823 | """Test that existing registries are preserved when adding new ecosystems""" 824 | repo = MagicMock() 825 | repo.file_contents.side_effect = lambda filename: filename == "Gemfile" 826 | 827 | # Create existing config with registries but no bundler ecosystem 828 | existing_config = MagicMock() 829 | existing_config.content = base64.b64encode( 830 | b""" 831 | version: 2 832 | registries: 833 | gradle-artifactory: 834 | type: maven-repository 835 | url: https://acme.jfrog.io/artifactory/my-gradle-registry 836 | username: octocat 837 | password: ${{secrets.MY_ARTIFACTORY_PASSWORD}} 838 | updates: 839 | - package-ecosystem: "npm" 840 | directory: "/" 841 | schedule: 842 | interval: "weekly" 843 | """ 844 | ) 845 | 846 | expected_result = yaml.load( 847 | b""" 848 | version: 2 849 | registries: 850 | gradle-artifactory: 851 | type: maven-repository 852 | url: https://acme.jfrog.io/artifactory/my-gradle-registry 853 | username: octocat 854 | password: ${{secrets.MY_ARTIFACTORY_PASSWORD}} 855 | updates: 856 | - package-ecosystem: "npm" 857 | directory: "/" 858 | schedule: 859 | interval: "weekly" 860 | - package-ecosystem: 'bundler' 861 | directory: '/' 862 | schedule: 863 | interval: 'weekly' 864 | """ 865 | ) 866 | 867 | result = build_dependabot_file( 868 | repo, False, [], {}, existing_config, "weekly", "", [], None 869 | ) 870 | self.assertEqual(result, expected_result) 871 | 872 | 873 | if __name__ == "__main__": 874 | unittest.main() 875 | -------------------------------------------------------------------------------- /test_evergreen.py: -------------------------------------------------------------------------------- 1 | """Test the evergreen.py module.""" 2 | 3 | import unittest 4 | import uuid 5 | from unittest.mock import MagicMock, patch 6 | 7 | import github3 8 | import requests 9 | from evergreen import ( 10 | append_to_github_summary, 11 | check_existing_config, 12 | check_pending_issues_for_duplicates, 13 | check_pending_pulls_for_duplicates, 14 | commit_changes, 15 | enable_dependabot_security_updates, 16 | get_global_issue_id, 17 | get_global_pr_id, 18 | get_global_project_id, 19 | get_repos_iterator, 20 | is_dependabot_security_updates_enabled, 21 | is_repo_created_date_before, 22 | link_item_to_project, 23 | ) 24 | 25 | 26 | class TestDependabotSecurityUpdates(unittest.TestCase): 27 | """Test the Dependabot security updates functions in evergreen.py""" 28 | 29 | def test_is_dependabot_security_updates_enabled(self): 30 | """ 31 | Test the is_dependabot_security_updates_enabled function. 32 | 33 | This test checks if the is_dependabot_security_updates_enabled function correctly 34 | detects if Dependabot security updates are enabled. 35 | 36 | It mocks the requests.get method to simulate different scenarios. 37 | """ 38 | owner = "my_owner" 39 | repo = "my_repo" 40 | access_token = "my_access_token" 41 | ghe = "" 42 | 43 | expected_url = ( 44 | f"https://api.github.com/repos/{owner}/{repo}/automated-security-fixes" 45 | ) 46 | expected_headers = { 47 | "Authorization": f"Bearer {access_token}", 48 | "Accept": "application/vnd.github.london-preview+json", 49 | } 50 | expected_response = {"enabled": True} 51 | 52 | with patch("requests.get") as mock_get: 53 | mock_get.return_value.status_code = 200 54 | mock_get.return_value.json.return_value = expected_response 55 | 56 | result = is_dependabot_security_updates_enabled( 57 | ghe, owner, repo, access_token 58 | ) 59 | 60 | mock_get.assert_called_once_with( 61 | expected_url, headers=expected_headers, timeout=20 62 | ) 63 | self.assertTrue(result) 64 | 65 | def test_is_dependabot_security_updates_disabled(self): 66 | """ 67 | Test the is_dependabot_security_updates_enabled function when security updates are disabled. 68 | 69 | This test checks if the is_dependabot_security_updates_enabled function correctly 70 | detects if Dependabot security updates are disabled. 71 | 72 | It mocks the requests.get method to simulate different scenarios. 73 | """ 74 | owner = "my_owner" 75 | repo = "my_repo" 76 | access_token = "my_access_token" 77 | ghe = "" 78 | 79 | expected_url = ( 80 | f"https://api.github.com/repos/{owner}/{repo}/automated-security-fixes" 81 | ) 82 | expected_headers = { 83 | "Authorization": f"Bearer {access_token}", 84 | "Accept": "application/vnd.github.london-preview+json", 85 | } 86 | 87 | with patch("requests.get") as mock_get: 88 | mock_get.return_value.status_code = 200 89 | mock_get.return_value.json.return_value = {"enabled": False} 90 | 91 | result = is_dependabot_security_updates_enabled( 92 | ghe, owner, repo, access_token 93 | ) 94 | 95 | mock_get.assert_called_once_with( 96 | expected_url, headers=expected_headers, timeout=20 97 | ) 98 | self.assertFalse(result) 99 | 100 | def test_is_dependabot_security_updates_not_found(self): 101 | """ 102 | Test the is_dependabot_security_updates_enabled function when the endpoint is not found. 103 | 104 | This test checks if the is_dependabot_security_updates_enabled function correctly 105 | handles the case when the endpoint is not found. 106 | 107 | It mocks the requests.get method to simulate different scenarios. 108 | """ 109 | owner = "my_owner" 110 | repo = "my_repo" 111 | access_token = "my_access_token" 112 | ghe = "" 113 | 114 | expected_url = ( 115 | f"https://api.github.com/repos/{owner}/{repo}/automated-security-fixes" 116 | ) 117 | expected_headers = { 118 | "Authorization": f"Bearer {access_token}", 119 | "Accept": "application/vnd.github.london-preview+json", 120 | } 121 | 122 | with patch("requests.get") as mock_get: 123 | mock_get.return_value.status_code = 404 124 | 125 | result = is_dependabot_security_updates_enabled( 126 | ghe, owner, repo, access_token 127 | ) 128 | 129 | mock_get.assert_called_once_with( 130 | expected_url, headers=expected_headers, timeout=20 131 | ) 132 | self.assertFalse(result) 133 | 134 | def test_enable_dependabot_security_updates(self): 135 | """ 136 | Test the enable_dependabot_security_updates function. 137 | 138 | This test checks if the enable_dependabot_security_updates function successfully enables 139 | Dependabot security updates. 140 | 141 | It mocks the requests.put method to simulate different scenarios. 142 | """ 143 | owner = "my_owner" 144 | repo = "my_repo" 145 | access_token = "my_access_token" 146 | ghe = "" 147 | 148 | expected_url = ( 149 | f"https://api.github.com/repos/{owner}/{repo}/automated-security-fixes" 150 | ) 151 | expected_headers = { 152 | "Authorization": f"Bearer {access_token}", 153 | "Accept": "application/vnd.github.london-preview+json", 154 | } 155 | 156 | with patch("requests.put") as mock_put: 157 | mock_put.return_value.status_code = 204 158 | 159 | with patch("builtins.print") as mock_print: 160 | enable_dependabot_security_updates(ghe, owner, repo, access_token) 161 | 162 | mock_put.assert_called_once_with( 163 | expected_url, headers=expected_headers, timeout=20 164 | ) 165 | mock_print.assert_called_once_with( 166 | "\tDependabot security updates enabled successfully." 167 | ) 168 | 169 | def test_enable_dependabot_security_updates_failed(self): 170 | """ 171 | Test the enable_dependabot_security_updates function when enabling fails. 172 | 173 | This test checks if the enable_dependabot_security_updates function handles the case 174 | when enabling Dependabot security updates fails. 175 | 176 | It mocks the requests.put method to simulate different scenarios. 177 | """ 178 | owner = "my_owner" 179 | repo = "my_repo" 180 | access_token = "my_access_token" 181 | ghe = "" 182 | 183 | expected_url = ( 184 | f"https://api.github.com/repos/{owner}/{repo}/automated-security-fixes" 185 | ) 186 | expected_headers = { 187 | "Authorization": f"Bearer {access_token}", 188 | "Accept": "application/vnd.github.london-preview+json", 189 | } 190 | 191 | with patch("requests.put") as mock_put: 192 | mock_put.return_value.status_code = 500 193 | 194 | with patch("builtins.print") as mock_print: 195 | enable_dependabot_security_updates(ghe, owner, repo, access_token) 196 | 197 | mock_put.assert_called_once_with( 198 | expected_url, headers=expected_headers, timeout=20 199 | ) 200 | mock_print.assert_called_once_with( 201 | "\tFailed to enable Dependabot security updates." 202 | ) 203 | 204 | 205 | class TestCommitChanges(unittest.TestCase): 206 | """Test the commit_changes function in evergreen.py""" 207 | 208 | @patch("uuid.uuid4") 209 | def test_commit_changes(self, mock_uuid): 210 | """Test the commit_changes function.""" 211 | mock_uuid.return_value = uuid.UUID( 212 | "12345678123456781234567812345678" 213 | ) # Mock UUID generation 214 | mock_repo = MagicMock() # Mock repo object 215 | mock_repo.default_branch = "main" 216 | mock_repo.ref.return_value.object.sha = "abc123" # Mock SHA for latest commit 217 | mock_repo.create_ref.return_value = True 218 | mock_repo.create_file.return_value = True 219 | mock_repo.create_pull.return_value = "MockPullRequest" 220 | dependabot_file_name = ".github/dependabot.yml" 221 | 222 | title = "Test Title" 223 | body = "Test Body" 224 | dependabot_file = 'dependencies:\n - package_manager: "python"\n directory: "/"\n update_schedule: "live"' 225 | branch_name = "dependabot-12345678-1234-5678-1234-567812345678" 226 | commit_message = "Create " + dependabot_file_name 227 | result = commit_changes( 228 | title, 229 | body, 230 | mock_repo, 231 | dependabot_file, 232 | commit_message, 233 | dependabot_file_name, 234 | ) 235 | 236 | # Assert that the methods were called with the correct arguments 237 | mock_repo.create_ref.assert_called_once_with( 238 | f"refs/heads/{branch_name}", "abc123" 239 | ) 240 | mock_repo.create_file.assert_called_once_with( 241 | path=dependabot_file_name, 242 | message=commit_message, 243 | content=dependabot_file.encode(), 244 | branch=branch_name, 245 | ) 246 | mock_repo.create_pull.assert_called_once_with( 247 | title=title, 248 | body=body, 249 | head=branch_name, 250 | base="main", 251 | ) 252 | 253 | # Assert that the function returned the expected result 254 | self.assertEqual(result, "MockPullRequest") 255 | 256 | 257 | class TestCheckPendingPullsForDuplicates(unittest.TestCase): 258 | """Test the check_pending_pulls_for_duplicates function.""" 259 | 260 | def test_check_pending_pulls_for_duplicates_no_duplicates(self): 261 | """Test the check_pending_pulls_for_duplicates function where there are no duplicates to be found.""" 262 | mock_repo = MagicMock() # Mock repo object 263 | mock_pull_request = MagicMock() 264 | mock_pull_request.title = "not-dependabot-branch" 265 | mock_repo.pull_requests.return_value = [mock_pull_request] 266 | 267 | result = check_pending_pulls_for_duplicates("dependabot-branch", mock_repo) 268 | 269 | # Assert that the function returned the expected result 270 | self.assertFalse(result) 271 | 272 | def test_check_pending_pulls_for_duplicates_with_duplicates(self): 273 | """Test the check_pending_pulls_for_duplicates function where there are duplicates to be found.""" 274 | mock_repo = MagicMock() # Mock repo object 275 | mock_pull_request = MagicMock() 276 | mock_pull_request.title = "dependabot-branch" 277 | mock_repo.pull_requests.return_value = [mock_pull_request] 278 | 279 | result = check_pending_pulls_for_duplicates(mock_pull_request.title, mock_repo) 280 | 281 | # Assert that the function returned the expected result 282 | self.assertTrue(result) 283 | 284 | 285 | class TestCheckPendingIssuesForDuplicates(unittest.TestCase): 286 | """Test the check_pending_Issues_for_duplicates function.""" 287 | 288 | def test_check_pending_issues_for_duplicates_no_duplicates(self): 289 | """Test the check_pending_Issues_for_duplicates function where there are no duplicates to be found.""" 290 | mock_issue = MagicMock() 291 | mock_issue.title = "Other Issue" 292 | mock_issue.issues.return_value = [mock_issue] 293 | 294 | result = check_pending_issues_for_duplicates("Enable Dependabot", mock_issue) 295 | 296 | mock_issue.issues.assert_called_once_with(state="open") 297 | 298 | # Assert that the function returned the expected result 299 | self.assertFalse(result) 300 | 301 | def test_check_pending_issues_for_duplicates_with_duplicates(self): 302 | """Test the check_pending_issues_for_duplicates function where there are duplicates to be found.""" 303 | mock_issue = MagicMock() 304 | mock_issue.title = "Enable Dependabot" 305 | mock_issue.issues.return_value = [mock_issue] 306 | 307 | result = check_pending_issues_for_duplicates("Enable Dependabot", mock_issue) 308 | 309 | mock_issue.issues.assert_called_once_with(state="open") 310 | 311 | # Assert that the function returned the expected result 312 | self.assertTrue(result) 313 | 314 | 315 | class TestGetReposIterator(unittest.TestCase): 316 | """Test the get_repos_iterator function in evergreen.py""" 317 | 318 | @patch("github3.login") 319 | def test_get_repos_iterator_with_organization(self, mock_github): 320 | """Test the get_repos_iterator function with an organization""" 321 | organization = "my_organization" 322 | repository_list = [] 323 | search_query = "" 324 | github_connection = mock_github.return_value 325 | 326 | mock_organization = MagicMock() 327 | mock_repositories = MagicMock() 328 | mock_organization.repositories.return_value = mock_repositories 329 | github_connection.organization.return_value = mock_organization 330 | 331 | result = get_repos_iterator( 332 | organization, None, repository_list, search_query, github_connection 333 | ) 334 | 335 | # Assert that the organization method was called with the correct argument 336 | github_connection.organization.assert_called_once_with(organization) 337 | 338 | # Assert that the repositories method was called on the organization object 339 | mock_organization.repositories.assert_called_once() 340 | 341 | # Assert that the function returned the expected result 342 | self.assertEqual(result, mock_repositories) 343 | 344 | @patch("github3.login") 345 | def test_get_repos_iterator_with_repository_list(self, mock_github): 346 | """Test the get_repos_iterator function with a repository list""" 347 | organization = None 348 | repository_list = ["org/repo1", "org/repo2"] 349 | search_query = "" 350 | github_connection = mock_github.return_value 351 | 352 | mock_repository = MagicMock() 353 | mock_repository_list = [mock_repository, mock_repository] 354 | github_connection.repository.side_effect = mock_repository_list 355 | 356 | result = get_repos_iterator( 357 | organization, None, repository_list, search_query, github_connection 358 | ) 359 | 360 | # Assert that the repository method was called with the correct arguments for each repository in the list 361 | expected_calls = [ 362 | unittest.mock.call("org", "repo1"), 363 | unittest.mock.call("org", "repo2"), 364 | ] 365 | github_connection.repository.assert_has_calls(expected_calls) 366 | 367 | # Assert that the function returned the expected result 368 | self.assertEqual(result, mock_repository_list) 369 | 370 | @patch("github3.login") 371 | def test_get_repos_iterator_with_team(self, mock_github): 372 | """Test the get_repos_iterator function with a team""" 373 | organization = "my_organization" 374 | repository_list = [] 375 | team_name = "my_team" 376 | search_query = "" 377 | github_connection = mock_github.return_value 378 | 379 | mock_team_repositories = MagicMock() 380 | github_connection.organization.return_value.team_by_name.return_value.repositories.return_value = ( 381 | mock_team_repositories 382 | ) 383 | 384 | result = get_repos_iterator( 385 | organization, 386 | team_name, 387 | repository_list, 388 | search_query, 389 | github_connection, 390 | ) 391 | 392 | # Assert that the organization method was called with the correct argument 393 | github_connection.organization.assert_called_once_with(organization) 394 | 395 | # Assert that the team_by_name method was called on the organization object 396 | github_connection.organization.return_value.team_by_name.assert_called_once_with( 397 | team_name 398 | ) 399 | 400 | # Assert that the repositories method was called on the team object 401 | github_connection.organization.return_value.team_by_name.return_value.repositories.assert_called_once() 402 | 403 | # Assert that the function returned the expected result 404 | self.assertEqual(result, mock_team_repositories) 405 | 406 | @patch("github3.login") 407 | def test_get_repos_iterator_with_search_query(self, mock_github): 408 | """Test the get_repos_iterator function with a search query""" 409 | organization = "my_organization" 410 | repository_list = [] 411 | team_name = None 412 | search_query = "org:my-org is:repository archived:false" 413 | github_connection = mock_github.return_value 414 | repo1 = MagicMock() 415 | repo2 = MagicMock() 416 | 417 | # Mock the search_repositories method to return an iterator of repositories 418 | github_connection.search_repositories.return_value = [repo1, repo2] 419 | 420 | get_repos_iterator( 421 | organization, 422 | team_name, 423 | repository_list, 424 | search_query, 425 | github_connection, 426 | ) 427 | 428 | # Assert that the search_repositories method was called with the correct argument 429 | github_connection.search_repositories.assert_called_with(search_query) 430 | 431 | 432 | class TestGetGlobalProjectId(unittest.TestCase): 433 | """Test the get_global_project_id function in evergreen.py""" 434 | 435 | @patch("requests.post") 436 | def test_get_global_project_id_success(self, mock_post): 437 | """Test the get_global_project_id function when the request is successful.""" 438 | token = "my_token" 439 | organization = "my_organization" 440 | number = 123 441 | ghe = "" 442 | 443 | expected_url = "https://api.github.com/graphql" 444 | expected_headers = {"Authorization": f"Bearer {token}"} 445 | expected_data = { 446 | "query": f'query{{organization(login: "{organization}") {{projectV2(number: {number}){{id}}}}}}' 447 | } 448 | expected_response = { 449 | "data": {"organization": {"projectV2": {"id": "my_project_id"}}} 450 | } 451 | 452 | mock_post.return_value.status_code = 200 453 | mock_post.return_value.json.return_value = expected_response 454 | 455 | result = get_global_project_id(ghe, token, organization, number) 456 | 457 | mock_post.assert_called_once_with( 458 | expected_url, headers=expected_headers, json=expected_data, timeout=20 459 | ) 460 | self.assertEqual(result, "my_project_id") 461 | 462 | @patch("requests.post") 463 | def test_get_global_project_id_request_failed(self, mock_post): 464 | """Test the get_global_project_id function when the request fails.""" 465 | token = "my_token" 466 | organization = "my_organization" 467 | number = 123 468 | ghe = "" 469 | 470 | expected_url = "https://api.github.com/graphql" 471 | expected_headers = {"Authorization": f"Bearer {token}"} 472 | expected_data = { 473 | "query": f'query{{organization(login: "{organization}") {{projectV2(number: {number}){{id}}}}}}' 474 | } 475 | 476 | mock_post.side_effect = requests.exceptions.RequestException("Request failed") 477 | 478 | with patch("builtins.print") as mock_print: 479 | result = get_global_project_id(ghe, token, organization, number) 480 | 481 | mock_post.assert_called_once_with( 482 | expected_url, headers=expected_headers, json=expected_data, timeout=20 483 | ) 484 | mock_print.assert_called_once_with("Request failed: Request failed") 485 | self.assertIsNone(result) 486 | 487 | @patch("requests.post") 488 | def test_get_global_project_id_parse_response_failed(self, mock_post): 489 | """Test the get_global_project_id function when parsing the response fails.""" 490 | token = "my_token" 491 | organization = "my_organization" 492 | number = 123 493 | ghe = "" 494 | 495 | expected_url = "https://api.github.com/graphql" 496 | expected_headers = {"Authorization": f"Bearer {token}"} 497 | expected_data = { 498 | "query": f'query{{organization(login: "{organization}") {{projectV2(number: {number}){{id}}}}}}' 499 | } 500 | expected_response = {"data": {"organization": {"projectV2": {}}}} 501 | 502 | mock_post.return_value.status_code = 200 503 | mock_post.return_value.json.return_value = expected_response 504 | 505 | with patch("builtins.print") as mock_print: 506 | result = get_global_project_id(ghe, token, organization, number) 507 | 508 | mock_post.assert_called_once_with( 509 | expected_url, headers=expected_headers, json=expected_data, timeout=20 510 | ) 511 | mock_print.assert_called_once_with("Failed to parse response: 'id'") 512 | self.assertIsNone(result) 513 | 514 | 515 | class TestGetGlobalIssueId(unittest.TestCase): 516 | """Test the get_global_issue_id function in evergreen.py""" 517 | 518 | @patch("requests.post") 519 | def test_get_global_issue_id_success(self, mock_post): 520 | """Test the get_global_issue_id function for a successful request""" 521 | token = "my_token" 522 | organization = "my_organization" 523 | repository = "my_repository" 524 | issue_number = 123 525 | ghe = "" 526 | 527 | expected_response = {"data": {"repository": {"issue": {"id": "1234567890"}}}} 528 | 529 | mock_post.return_value.status_code = 200 530 | mock_post.return_value.json.return_value = expected_response 531 | 532 | result = get_global_issue_id(ghe, token, organization, repository, issue_number) 533 | 534 | mock_post.assert_called_once() 535 | self.assertEqual(result, "1234567890") 536 | 537 | @patch("requests.post") 538 | def test_get_global_issue_id_request_failed(self, mock_post): 539 | """Test the get_global_issue_id function when the request fails""" 540 | token = "my_token" 541 | organization = "my_organization" 542 | repository = "my_repository" 543 | issue_number = 123 544 | ghe = "" 545 | 546 | mock_post.side_effect = requests.exceptions.RequestException("Request failed") 547 | 548 | result = get_global_issue_id(ghe, token, organization, repository, issue_number) 549 | 550 | mock_post.assert_called_once() 551 | self.assertIsNone(result) 552 | 553 | @patch("requests.post") 554 | def test_get_global_issue_id_parse_response_failed(self, mock_post): 555 | """Test the get_global_issue_id function when parsing the response fails""" 556 | token = "my_token" 557 | organization = "my_organization" 558 | repository = "my_repository" 559 | issue_number = 123 560 | ghe = "" 561 | 562 | expected_response = {"data": {"repository": {"issue": {}}}} 563 | 564 | mock_post.return_value.status_code = 200 565 | mock_post.return_value.json.return_value = expected_response 566 | 567 | result = get_global_issue_id(ghe, token, organization, repository, issue_number) 568 | 569 | mock_post.assert_called_once() 570 | self.assertIsNone(result) 571 | 572 | 573 | class TestGetGlobalPullRequestID(unittest.TestCase): 574 | """Test the get_global_pr_id function in evergreen.py""" 575 | 576 | @patch("requests.post") 577 | def test_get_global_pr_id_success(self, mock_post): 578 | """Test the get_global_pr_id function when the request is successful.""" 579 | # Mock the response from requests.post 580 | mock_response = MagicMock() 581 | mock_response.raise_for_status.return_value = None 582 | mock_response.json.return_value = { 583 | "data": {"repository": {"pullRequest": {"id": "test_id"}}} 584 | } 585 | mock_post.return_value = mock_response 586 | 587 | # Call the function with test data 588 | result = get_global_pr_id("", "test_token", "test_org", "test_repo", 1) 589 | 590 | # Check that the result is as expected 591 | self.assertEqual(result, "test_id") 592 | 593 | @patch("requests.post") 594 | def test_get_global_pr_id_request_exception(self, mock_post): 595 | """Test the get_global_pr_id function when the request fails.""" 596 | # Mock requests.post to raise a RequestException 597 | mock_post.side_effect = requests.exceptions.RequestException 598 | 599 | # Call the function with test data 600 | result = get_global_pr_id("", "test_token", "test_org", "test_repo", 1) 601 | 602 | # Check that the result is None 603 | self.assertIsNone(result) 604 | 605 | @patch("requests.post") 606 | def test_get_global_pr_id_key_error(self, mock_post): 607 | """Test the get_global_pr_id function when the response cannot be parsed.""" 608 | # Mock the response from requests.post 609 | mock_response = MagicMock() 610 | mock_response.raise_for_status.return_value = None 611 | mock_response.json.return_value = {} 612 | mock_post.return_value = mock_response 613 | 614 | # Call the function with test data 615 | result = get_global_pr_id("", "test_token", "test_org", "test_repo", 1) 616 | 617 | # Check that the result is None 618 | self.assertIsNone(result) 619 | 620 | 621 | class TestLinkItemToProject(unittest.TestCase): 622 | """Test the link_item_to_project function in evergreen.py""" 623 | 624 | @patch("requests.post") 625 | def test_link_item_to_project_success(self, mock_post): 626 | """Test linking an item to a project successfully.""" 627 | token = "my_token" 628 | project_id = "my_project_id" 629 | item_id = "my_item_id" 630 | ghe = "" 631 | 632 | expected_url = "https://api.github.com/graphql" 633 | expected_headers = {"Authorization": f"Bearer {token}"} 634 | expected_data = { 635 | "query": f'mutation {{addProjectV2ItemById(input: {{projectId: "{project_id}", contentId: "{item_id}"}}) {{item {{id}}}}}}' 636 | } 637 | 638 | mock_response = MagicMock() 639 | mock_response.status_code = 200 640 | mock_post.return_value = mock_response 641 | 642 | result = link_item_to_project(ghe, token, project_id, item_id) 643 | 644 | mock_post.assert_called_once_with( 645 | expected_url, headers=expected_headers, json=expected_data, timeout=20 646 | ) 647 | mock_response.raise_for_status.assert_called_once() 648 | 649 | # Assert that the function returned None 650 | self.assertIsNotNone(result) 651 | 652 | @patch("requests.post") 653 | def test_link_item_to_project_request_exception(self, mock_post): 654 | """Test handling a requests exception when linking an item to a project.""" 655 | token = "my_token" 656 | project_id = "my_project_id" 657 | item_id = "my_item_id" 658 | ghe = "" 659 | 660 | expected_url = "https://api.github.com/graphql" 661 | expected_headers = {"Authorization": f"Bearer {token}"} 662 | expected_data = { 663 | "query": f'mutation {{addProjectV2ItemById(input: {{projectId: "{project_id}", contentId: "{item_id}"}}) {{item {{id}}}}}}' 664 | } 665 | 666 | mock_post.side_effect = requests.exceptions.RequestException("Request failed") 667 | 668 | with patch("builtins.print") as mock_print: 669 | result = link_item_to_project(ghe, token, project_id, item_id) 670 | 671 | mock_post.assert_called_once_with( 672 | expected_url, headers=expected_headers, json=expected_data, timeout=20 673 | ) 674 | mock_print.assert_called_once_with("Request failed: Request failed") 675 | 676 | # Assert that the function returned None 677 | self.assertIsNone(result) 678 | 679 | 680 | class TestIsRepoCreateDateBeforeCreatedAfterDate(unittest.TestCase): 681 | """Test the is_repo_create_date_before_created_after_date function in evergreen.py""" 682 | 683 | def test_is_repo_create_date_before_created_after_date(self): 684 | """Test the repo.created_at date is before created_after_date and has timezone.""" 685 | repo_created_at = "2020-01-01T05:00:00Z" 686 | created_after_date = "2021-01-01" 687 | 688 | result = is_repo_created_date_before(repo_created_at, created_after_date) 689 | 690 | self.assertTrue(result) 691 | 692 | def test_is_repo_create_date_is_after_created_after_date(self): 693 | """Test the repo.created_at date is after created_after_date and has timezone.""" 694 | repo_created_at = "2022-01-01T05:00:00Z" 695 | created_after_date = "2021-01-01" 696 | 697 | result = is_repo_created_date_before(repo_created_at, created_after_date) 698 | 699 | self.assertFalse(result) 700 | 701 | def test_is_repo_created_date_has_no_time_zone(self): 702 | """Test the repo.created_at date is before created_after_date with no timezone.""" 703 | repo_created_at = "2020-01-01" 704 | created_after_date = "2021-01-01" 705 | 706 | result = is_repo_created_date_before(repo_created_at, created_after_date) 707 | 708 | self.assertTrue(result) 709 | 710 | def test_is_created_after_date_is_empty_string(self): 711 | """Test the repo.created_at date is after created_after_date.""" 712 | repo_created_at = "2020-01-01" 713 | created_after_date = "" 714 | 715 | result = is_repo_created_date_before(repo_created_at, created_after_date) 716 | 717 | self.assertFalse(result) 718 | 719 | def test_is_repo_created_date_is_before_created_after_date_without_timezone_again( 720 | self, 721 | ): 722 | """Test the repo.created_at date is before created_after_date without timezone again.""" 723 | repo_created_at = "2018-01-01" 724 | created_after_date = "2020-01-01" 725 | 726 | result = is_repo_created_date_before(repo_created_at, created_after_date) 727 | 728 | self.assertTrue(result) 729 | 730 | def test_is_repo_created_date_and_created_after_date_is_not_a_date(self): 731 | """Test the repo.created_at date and the created_after_date argument is not a date.""" 732 | repo_created_at = "2018-01-01" 733 | created_after_date = "Not a date" 734 | 735 | with self.assertRaises(ValueError): 736 | is_repo_created_date_before(repo_created_at, created_after_date) 737 | 738 | 739 | class TestCheckExistingConfig(unittest.TestCase): 740 | """ 741 | Test cases for the check_existing_config function 742 | """ 743 | 744 | def test_check_existing_config_with_existing_config(self): 745 | """ 746 | Test the case where there is an existing configuration 747 | """ 748 | mock_repo = MagicMock() 749 | filename = "dependabot.yaml" 750 | mock_repo.file_contents.return_value.size = 5 751 | 752 | result = check_existing_config(mock_repo, filename) 753 | 754 | self.assertIsNotNone(result) 755 | 756 | def test_check_existing_config_without_existing_config(self): 757 | """ 758 | Test the case where there is no existing configuration 759 | """ 760 | mock_repo = MagicMock() 761 | mock_response = MagicMock() 762 | mock_repo.file_contents.side_effect = github3.exceptions.NotFoundError( 763 | mock_response 764 | ) 765 | 766 | result = check_existing_config(mock_repo, "dependabot.yml") 767 | 768 | self.assertIsNone(result) 769 | 770 | 771 | class TestAppendToGithubSummary(unittest.TestCase): 772 | """Test the append_to_github_summary function in evergreen.py""" 773 | 774 | @patch("builtins.open", new_callable=unittest.mock.mock_open) 775 | def test_append_to_github_summary_with_file(self, mock_file): 776 | """Test that content is appended to the specified summary file.""" 777 | content = "Test summary content" 778 | summary_file = "summary.md" 779 | 780 | append_to_github_summary(content, summary_file) 781 | 782 | mock_file.assert_called_once_with(summary_file, "a", encoding="utf-8") 783 | mock_file().write.assert_called_once_with(content + "\n") 784 | 785 | @patch("builtins.open", new_callable=unittest.mock.mock_open) 786 | def test_append_to_github_summary_without_summary_file(self, mock_file): 787 | """Test that content is not written when summary_file is None or empty.""" 788 | content = "Test summary content" 789 | summary_file = "" 790 | 791 | append_to_github_summary(content, summary_file) 792 | 793 | mock_file.assert_not_called() 794 | 795 | @patch("builtins.open", new_callable=unittest.mock.mock_open) 796 | def test_append_to_github_summary_with_default_file(self, mock_file): 797 | """Test that content is appended to the default summary file when summary_file is not provided.""" 798 | content = "Test summary content" 799 | 800 | append_to_github_summary(content) 801 | 802 | mock_file.assert_called_once_with("summary.md", "a", encoding="utf-8") 803 | mock_file().write.assert_called_once_with(content + "\n") 804 | 805 | 806 | if __name__ == "__main__": 807 | unittest.main() 808 | --------------------------------------------------------------------------------