├── .cirrus.yml ├── .github ├── renovate.json5 └── workflows │ ├── action_helper_test.yml │ ├── cirrus-ci_retrospective.yml │ ├── release.yml │ └── ubuntu_unit_tests.yml ├── .gitignore ├── CODE-OF-CONDUCT.md ├── LICENSE ├── README.md ├── SECURITY.md ├── bin ├── install_automation.sh └── run_all_tests.sh ├── build-push ├── .install.sh ├── README.md ├── bin │ └── build-push.sh └── test │ ├── fake_buildah.sh │ ├── qemusetup.sh │ ├── run_all_tests.sh │ ├── test_context │ └── Containerfile │ ├── testbin-build-push.sh │ ├── testbuilds.sh │ └── testlib.sh ├── ci └── Dockerfile ├── cirrus-ci_artifacts ├── .gitignore ├── .install.sh ├── README.md ├── cirrus-ci_artifacts ├── cirrus-ci_artifacts.py ├── requirements.txt └── test │ ├── ccia.py │ ├── run_all_tests.sh │ └── test_cirrus-ci_artifacts.py ├── cirrus-ci_env ├── .install.sh ├── cirrus-ci_env.py └── test │ ├── actual_cirrus.yml │ ├── actual_task_names.txt │ ├── expected_cirrus.yml │ ├── expected_ti.yml │ ├── run_all_tests.sh │ ├── test_cirrus-ci_env.py │ ├── testbin-cirrus-ci_env-installer.sh │ ├── testbin-cirrus-ci_env.sh │ └── testlib.sh ├── cirrus-ci_retrospective ├── .install.sh ├── Dockerfile ├── README.md ├── bin │ ├── cirrus-ci_retrospective.sh │ └── debug.sh ├── lib │ ├── ccir_common.sh │ └── cirrus-ci_retrospective.sh └── test │ ├── run_all_tests.sh │ ├── testbin-cirrus-ci_retrospective-installer.sh │ ├── testbin-cirrus-ci_retrospective.sh │ ├── testlib-cirrus-ci_retrospective.sh │ └── testlib.sh ├── cirrus-task-map ├── cirrus-task-map └── test │ ├── cirrus-task-map.t │ └── run_all_tests.sh ├── common ├── README.md ├── bin │ ├── ooe.sh │ └── xrtry.sh ├── lib │ ├── anchors.sh │ ├── common_lib.sh │ ├── console_output.sh │ ├── defaults.sh │ ├── platform.sh │ └── utils.sh └── test │ ├── console_output_test_helper.sh │ ├── run_all_tests.sh │ ├── testbin-install_automation.sh │ ├── testlib-anchors.sh │ ├── testlib-console_output.sh │ ├── testlib-defaults.sh │ ├── testlib-platform.sh │ ├── testlib-utils.sh │ └── testlib.sh ├── default.json ├── github ├── .install.sh ├── README.md ├── lib │ ├── github.sh │ └── github_common.sh └── test │ ├── README.md │ ├── run_action_tests.sh │ ├── testlib-github.sh │ ├── testlib-github_common.sh │ └── testlib.sh ├── mac_pw_pool ├── .gitignore ├── AllocateTestDH.sh ├── Cron.sh ├── InstanceSSH.sh ├── LaunchInstances.sh ├── README.md ├── SetupInstances.sh ├── Utilization.gnuplot ├── ci_env.sh ├── html │ └── index.html ├── nightly_maintenance.sh ├── pw_lib.sh ├── service_pool.sh ├── setup.sh └── shutdown.sh └── renovate └── defaults.json5 /.cirrus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Ref: https://cirrus-ci.org/guide/writing-tasks/ 4 | 5 | # Global environment variables 6 | env: 7 | # Name of the typical destination branch for PRs. 8 | DEST_BRANCH: "main" 9 | 10 | # Execute all unit-tests in the repo 11 | cirrus-ci/unit-test_task: 12 | only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' 13 | # Default task runtime environment 14 | container: &ci_container 15 | dockerfile: ci/Dockerfile 16 | cpu: 1 17 | memory: 1 18 | env: 19 | CIRRUS_CLONE_DEPTH: 0 20 | script: 21 | - git fetch --tags |& tee /tmp/test_output.log 22 | - $CIRRUS_WORKING_DIR/bin/run_all_tests.sh |& tee -a $CIRRUS_WORKING_DIR/output.log 23 | always: 24 | test_output_artifacts: 25 | path: '*.log' 26 | 27 | cirrus-ci/renovate_validation_task: 28 | only_if: *not_docs 29 | container: 30 | image: "ghcr.io/renovatebot/renovate:latest" 31 | preset_validate_script: 32 | - renovate-config-validator $CIRRUS_WORKING_DIR/renovate/defaults.json5 33 | repo_validate_script: 34 | - renovate-config-validator $CIRRUS_WORKING_DIR/.github/renovate.json5 35 | 36 | # This is the same setup as used for Buildah CI 37 | gcp_credentials: ENCRYPTED[fc95bcc9f4506a3b0d05537b53b182e104d4d3979eedbf41cf54205be6397ca0bce0831d0d47580cf578dae5776548a5] 38 | 39 | cirrus-ci/build-push_test_task: 40 | only_if: *not_docs 41 | container: *ci_container 42 | depends_on: 43 | - cirrus-ci/unit-test 44 | gce_instance: 45 | cpu: 2 46 | memory: "4Gb" 47 | disk: 200 # Gigabytes, do not set less as per gcloud warning message 48 | # re: I/O performance 49 | # This repo. is subsequently used in and for building custom VM images 50 | # in containers/automation_images. Avoid circular dependencies by using 51 | # only stock, google-managed generic image. This also avoids needing to 52 | # update custom-image last-used timestamps. 53 | image_project: centos-cloud 54 | image_family: centos-stream-9 55 | timeout_in: 30 56 | env: 57 | CIMG: quay.io/buildah/stable:latest 58 | TEST_FQIN: quay.io/buildah/do_not_use 59 | # Robot account credentials for test-push to 60 | # $TEST_FQIN registry by build-push/test/testbuilds.sh 61 | BUILDAH_USERNAME: ENCRYPTED[53fd8becb599dda19f335d65cb067c46da3f0907eb83281a10554def11efc89925f7ca145ba7436afc3c32d936575142] 62 | BUILDAH_PASSWORD: ENCRYPTED[aa6352251eba46e389e4cfc6e93eee3852008ecff67b940cba9197fd8bf95de15d498a6df2e7d5edef052e97d9b93bf0] 63 | setup_script: 64 | - dnf install -y podman 65 | - bash build-push/test/qemusetup.sh 66 | - >- 67 | podman run --detach --name=buildah 68 | --net=host --ipc=host --pid=host 69 | --cgroupns=host --privileged 70 | --security-opt label=disable 71 | --security-opt seccomp=unconfined 72 | --device /dev/fuse:rw 73 | -v $PWD:$PWD:Z -w $PWD 74 | -e BUILD_PUSH_TEST_BUILDS=true 75 | -e CIRRUS_CI -e TEST_FQIN 76 | -e BUILDAH_USERNAME -e BUILDAH_PASSWORD 77 | $CIMG 78 | sh -c 'while true ;do sleep 2h ; done' 79 | - podman exec -i buildah dnf install -y jq skopeo 80 | test_script: 81 | - podman exec -i buildah ./build-push/test/run_all_tests.sh 82 | 83 | 84 | # Represent primary Cirrus-CI based testing (Required for merge) 85 | cirrus-ci/success_task: 86 | container: *ci_container 87 | depends_on: &everything 88 | - cirrus-ci/unit-test 89 | - cirrus-ci/build-push_test 90 | - cirrus-ci/renovate_validation 91 | clone_script: mkdir -p "$CIRRUS_WORKING_DIR" 92 | script: >- 93 | echo "Required for Action Workflow: https://github.com/${CIRRUS_REPO_FULL_NAME}/actions/runs/${GITHUB_CHECK_SUITE_ID}" 94 | 95 | 96 | # Represent secondary Github Action based testing (Required for merge) 97 | # N/B: NO other task should depend on this task. Doing so will prevent 98 | # the cirrus-ci_retrospective github action. This is because the 99 | # action trigers `on: check-suite: completed` event, which cannot 100 | # fire since the manual task has dependencies that cannot be 101 | # satisfied. 102 | github-actions/success_task: 103 | container: *ci_container 104 | # Note: ***DO NOT*** manually trigger this task under normal circumstances. 105 | # It is triggered automatically by the cirrus-ci_retrospective 106 | # Github Action. This action is responsible for testing the PR changes 107 | # to the action itself. 108 | trigger_type: manual 109 | # Only required for PRs, never tag or branch testing 110 | only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && $CIRRUS_PR != '' 111 | depends_on: *everything 112 | clone_script: mkdir -p "$CIRRUS_WORKING_DIR" 113 | script: >- 114 | echo "Triggered by Github Action Workflow: https://github.com/${CIRRUS_REPO_FULL_NAME}/actions/runs/${GITHUB_CHECK_SUITE_ID}" 115 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | /* 2 | Renovate is a service similar to GitHub Dependabot, but with 3 | (fantastically) more configuration options. So many options 4 | in fact, if you're new I recommend glossing over this cheat-sheet 5 | prior to the official documentation: 6 | 7 | https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet 8 | 9 | Configuration Update/Change Procedure: 10 | 1. Make changes 11 | 2. Manually validate changes (from repo-root): 12 | 13 | podman run -it \ 14 | -v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \ 15 | ghcr.io/renovatebot/renovate:latest \ 16 | renovate-config-validator 17 | 3. Commit. 18 | 19 | Configuration Reference: 20 | https://docs.renovatebot.com/configuration-options/ 21 | 22 | Monitoring Dashboard: 23 | https://app.renovatebot.com/dashboard#github/containers 24 | 25 | Note: The Renovate bot will create/manage it's business on 26 | branches named 'renovate/*'. Otherwise, and by 27 | default, the only the copy of this file that matters 28 | is the one on the `main` branch. No other branches 29 | will be monitored or touched in any way. 30 | */ 31 | 32 | { 33 | /************************************************* 34 | ****** Global/general configuration options ***** 35 | *************************************************/ 36 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 37 | // Re-use predefined sets of configuration options to DRY 38 | "extends": [ 39 | // https://github.com/containers/automation/blob/main/renovate/defaults.json5 40 | "github>containers/automation//renovate/defaults.json5" 41 | ], 42 | /************************************************* 43 | *** Repository-specific configuration options *** 44 | *************************************************/ 45 | } 46 | -------------------------------------------------------------------------------- /.github/workflows/action_helper_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Perform unit-testing of the helper scripts used by github actions workflows 4 | 5 | on: [push, pull_request] 6 | 7 | # Variables required by multiple jobs/steps 8 | env: 9 | # Authoritative Cirrus-CI task to monitor for completion info of all other cirrus-ci tasks. 10 | MONITOR_TASK: 'MONITOR/TEST/VALUE' 11 | # Authoritative Github Action task (in cirrus-ci) to trigger / check for completion of _this_ workflow 12 | ACTION_TASK: 'ACTION/TEST/VALUE' 13 | HELPER_LIB_TEST: 'github/test/run_action_tests.sh' 14 | # Enables debugging of github actions itself 15 | # (see https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-a-debug-message) 16 | ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}' 17 | 18 | jobs: 19 | helper_unit-test: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: Clone the repository code 23 | uses: actions/checkout@v4 24 | with: 25 | persist-credentials: false 26 | path: ./ 27 | 28 | - name: Execute helper library unit-tests using code from PR 29 | run: | 30 | ./$HELPER_LIB_TEST 31 | 32 | event-debug: 33 | runs-on: ubuntu-latest 34 | steps: 35 | - name: Collect the originating event and result JSON 36 | run: cp "${{ github.event_path }}" ./ 37 | 38 | - name: Log colorized and formatted event JSON 39 | run: jq --indent 4 --color-output . ./event.json 40 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | on: 4 | push: 5 | # ref: https://help.github.com/en/actions/reference/events-that-trigger-workflows#example-using-multiple-events-with-activity-types-or-configuration 6 | tags: 7 | - 'v*' 8 | env: 9 | # Authoritative Cirrus-CI task to monitor for completion info of all other cirrus-ci tasks. 10 | MONITOR_TASK: 'MONITOR/TEST/VALUE' 11 | # Authoritative Github Action task (in cirrus-ci) to trigger / check for completion of _this_ workflow 12 | ACTION_TASK: 'ACTION/TEST/VALUE' 13 | HELPER_LIB_TEST: 'github/test/run_action_tests.sh' 14 | 15 | jobs: 16 | smoke: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Confirm privileged registry access 20 | env: 21 | DOCKER_CONFIG_JSON: ${{secrets.DOCKER_CONFIG_JSON}} 22 | run: | 23 | set +x 24 | trap "history -c" EXIT 25 | if [[ -z "$DOCKER_CONFIG_JSON" ]]; then 26 | echo "::error::Empty/unset \$DOCKER_CONFIG_JSON for quay.io/libpod write access" 27 | exit 1 28 | fi 29 | 30 | unit-tests: # N/B: Duplicates `ubuntu_unit_tests.yml` - templating not supported 31 | runs-on: ubuntu-24.04 32 | steps: 33 | - uses: actions/checkout@v4 34 | with: 35 | # Testing installer requires a full repo. history 36 | fetch-depth: 0 37 | persist-credentials: false 38 | path: ./ 39 | 40 | - name: Install dependencies 41 | run: | 42 | sudo apt-get -qq update 43 | sudo apt-get -qq -y install libtest-differences-perl libyaml-libyaml-perl 44 | 45 | - name: Execute helper library unit-tests using code from PR 46 | run: | 47 | $GITHUB_WORKSPACE/$HELPER_LIB_TEST 48 | 49 | - name: Fetch all repository tags 50 | run: git fetch --tags --force 51 | 52 | - name: Execute all unit-tests 53 | run: $GITHUB_WORKSPACE/bin/run_all_tests.sh 54 | 55 | release: 56 | needs: 57 | - unit-tests 58 | - smoke 59 | 60 | # Don't blindly trust the 'v*' push event filter. 61 | if: startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '.') 62 | runs-on: ubuntu-latest 63 | steps: 64 | # At the time of this comment, there is NO other source or method for 65 | # obtaining the pushed tag from any function, env. var., event JSON, or 66 | # context data. 67 | - id: get_tag 68 | name: Retrieve the tag name 69 | run: printf "TAG_NAME=%s\n" $(basename "$GITHUB_REF") >> $GITHUB_OUTPUT 70 | 71 | - id: create_release # Pre-req for upload-release-asset below 72 | name: Create a new Github Release item for tag 73 | uses: actions/create-release@v1.1.4 74 | env: 75 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 76 | with: 77 | tag_name: ${{ steps.get_tag.outputs.TAG_NAME }} 78 | release_name: ${{ steps.get_tag.outputs.TAG_NAME }} 79 | 80 | - uses: actions/checkout@v4 81 | with: 82 | fetch-depth: 0 83 | path: ./ 84 | 85 | - uses: actions/upload-release-asset@v1.0.2 86 | name: Upload the install script as the release artifact 87 | env: 88 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 89 | with: 90 | upload_url: ${{ steps.create_release.outputs.upload_url }} 91 | asset_path: ./bin/install_automation.sh 92 | asset_name: install_automation.sh 93 | asset_content_type: application/octet-stream 94 | 95 | container_image: 96 | needs: 97 | - unit-tests 98 | - smoke 99 | runs-on: ubuntu-latest 100 | env: 101 | REGISTRY: quay.io 102 | REPO_USER: libpod 103 | REPO_NAME: cirrus-ci_retrospective 104 | steps: 105 | - uses: actions/checkout@v4 106 | with: 107 | fetch-depth: 0 108 | path: ./ 109 | 110 | - name: Build the cirrus-ci_retrospective container image 111 | run: >- 112 | docker build -t $REGISTRY/$REPO_USER/$REPO_NAME:latest \ 113 | -f cirrus-ci_retrospective/Dockerfile \ 114 | --build-arg INSTALL_AUTOMATION_VERSION=0.0.0 \ 115 | ./ 116 | 117 | - name: Configure registry credentials 118 | env: 119 | DOCKER_CONFIG_JSON: ${{secrets.DOCKER_CONFIG_JSON}} 120 | run: | 121 | if [[ -z "$DOCKER_CONFIG_JSON" ]]; then 122 | echo "::error::Empty/unset \$DOCKER_CONFIG_JSON" 123 | exit 1 124 | fi 125 | trap "history -c" EXIT 126 | mkdir -p $HOME/.docker 127 | echo "$DOCKER_CONFIG_JSON" > $HOME/.docker/config.json 128 | 129 | - name: Retrieve the tag name 130 | id: get_tag 131 | run: printf "TAG_NAME=%s\n" $(basename "$GITHUB_REF" | tee /dev/stderr) >> $GITHUB_OUTPUT 132 | 133 | - name: Tag and push cirrus-ci_retrospective container image to registry 134 | run: | 135 | docker tag $REGISTRY/$REPO_USER/$REPO_NAME:latest \ 136 | $REGISTRY/$REPO_USER/$REPO_NAME:${{ steps.get_tag.outputs.TAG_NAME }} 137 | docker push $REGISTRY/$REPO_USER/$REPO_NAME:${{ steps.get_tag.outputs.TAG_NAME }} && \ 138 | docker push $REGISTRY/$REPO_USER/$REPO_NAME:latest 139 | 140 | debug: 141 | runs-on: ubuntu-latest 142 | steps: 143 | - if: always() 144 | name: Log colorized and formatted event JSON 145 | run: jq --indent 4 --color-output . ${{ github.event_path }} 146 | 147 | - if: always() 148 | uses: actions/upload-artifact@v4.6.2 149 | name: Archive triggering event JSON 150 | with: 151 | name: event.json.zip 152 | path: ${{ github.event_path }} 153 | -------------------------------------------------------------------------------- /.github/workflows/ubuntu_unit_tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | automation_unit-tests: 7 | runs-on: ubuntu-24.04 8 | steps: 9 | - uses: actions/checkout@v4 10 | with: 11 | fetch-depth: 0 12 | persist-credentials: false 13 | path: ./ 14 | 15 | - name: Install dependencies 16 | run: | 17 | sudo apt-get -qq update 18 | sudo apt-get -qq -y install libtest-differences-perl libyaml-libyaml-perl 19 | 20 | - name: Fetch all repository tags 21 | run: git fetch --tags --force 22 | 23 | - name: Execute all unit-tests 24 | run: $GITHUB_WORKSPACE/bin/run_all_tests.sh 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | -------------------------------------------------------------------------------- /CODE-OF-CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## The Automation Scrips for Containers Project Community Code of Conduct 2 | 3 | The Automation Scrips for Containers Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Automation scripts, libraries for re-use in other repositories 2 | 3 | 4 | ## Dependencies 5 | 6 | The install script and `common` subdirectory components require the following 7 | system packages (or their equivalents): 8 | 9 | * bash 10 | * core-utils 11 | * git 12 | * install 13 | 14 | 15 | ## Installation 16 | 17 | During build of an environment (VM, container image, etc), execute *any version* 18 | of [the install 19 | script](https://github.com/containers/automation/releases/download/latest/install_automation.sh), 20 | preferably as root. The script ***must*** be passed the version number of [the project 21 | release to install](https://github.com/containers/automation/releases). Alternatively 22 | it may be passed `latest` to install the HEAD of the main branch. 23 | 24 | For example, to install the `v1.1.3` release, run: 25 | ```bash 26 | ~# url='https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh' 27 | ~# curl -sL "$url" | bash -s 1.1.3 28 | ``` 29 | 30 | To install `latest`, run: 31 | ```bash 32 | ~# url='https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh' 33 | ~# curl -sL "$url" | bash -s latest 34 | ``` 35 | 36 | ### Alt. Installation 37 | 38 | If you're leery of piping to bash and/or a local clone of the repository is already 39 | available locally, the installer can be invoked with the *magic version* '0.0.0'. 40 | Note this will limit the install to the local clone (as-is). The installer script 41 | will still reach out to github.com to retrieve version information. For example: 42 | 43 | ```bash 44 | ~# cd /path/to/clone 45 | /path/to/clone# ./bin/install_automation.sh 0.0.0 46 | ``` 47 | 48 | ### Component installation 49 | 50 | The installer may also be passed the names of one or more components to 51 | install system-wide. Available components are simply any subdirectory in the repo 52 | which contain a `.install.sh` file. For example, to install the latest `build-push` system-wide run: 53 | 54 | ```bash 55 | ~# url='https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh' 56 | ~# curl -sL "$url" | bash -s latest build-push 57 | ``` 58 | 59 | ## Usage 60 | 61 | The basic install consists of copying the contents of the `common` (subdirectory) and 62 | the installer script into a central location on the system. Because this location 63 | can vary by platform, a global shell variable `$AUTOMATION_LIB_PATH` is established 64 | by a central configuration at install-time. It is highly recommended that all 65 | callers explicitly load and export the contents of the file 66 | `/etc/automation_environment` before making use of the common library or any 67 | components. For example: 68 | 69 | ```bash 70 | #!/bin/bash 71 | 72 | set -a 73 | if [[ -r "/etc/automation_environment" ]]; then 74 | source /etc/automation_environment 75 | fi 76 | set +a 77 | 78 | if [[ -n "$AUTOMATION_LIB_PATH" ]]; then 79 | source $AUTOMATION_LIB_PATH/common_lib.sh 80 | else 81 | ( 82 | echo "WARNING: It doesn't appear containers/automation common was installed." 83 | ) >> /dev/stderr 84 | fi 85 | 86 | ...do stuff... 87 | ``` 88 | 89 | 90 | ## Subdirectories 91 | 92 | ### `.github/workflows` 93 | 94 | Directory containing workflows for Github Actions. 95 | 96 | ### `bin` 97 | 98 | This directory contains scripts intended for execution under multiple environments, 99 | pertaining to operations on this whole repository. For example, executing all 100 | unit tests, installing components, etc. 101 | 102 | ### `build-push` 103 | 104 | Handy automation too to help with parallel building and pushing container images, 105 | including support for multi-arch (via QEMU emulation). See the 106 | [README.md file in the subdirectory](build-push/README.md) for more information. 107 | 108 | ### `cirrus-ci_artifacts` 109 | 110 | Handy python script that may be used to download artifacts from any build, 111 | based on knowing its ID. Downloads will be stored properly nested, by task 112 | name and artifact so there are no name clashes. 113 | 114 | ### `cirrus-ci_env` 115 | 116 | Python script used to minimally parse `.cirrus.yml` tasks as written/formatted 117 | in other containers projects. This is not intended to be used directly, but 118 | called by other scripts to help extract env. var. values from matrix tasks. 119 | 120 | ### `cirrus-ci_retrospective` 121 | 122 | See the [README.md file in the subdirectory](cirrus-ci_retrospective/README.md) for more information. 123 | 124 | ### `cirrus-task-map` 125 | 126 | Handy script that parses a `.cirrus.yml` and outputs an flow-diagram to illustrate 127 | task dependencies. Useful for visualizing complex configurations, like that of 128 | `containers/podman`. 129 | 130 | ### `common` 131 | 132 | This directory contains general-purpose scripts, libraries, and their unit-tests. 133 | They're intended to be used individually or as a whole from within automation of 134 | other repositories. 135 | 136 | ### `github` 137 | 138 | Contains some helper scripts/libraries for using `cirrus-ci_retrospective` from 139 | within github-actions workflow. Not intended to be used otherwise. 140 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | ## Security and Disclosure Information Policy for the Automation Scripts for Containers Project 2 | 3 | The Automation Scripts for Containers Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. 4 | -------------------------------------------------------------------------------- /bin/run_all_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Convenience script for executing all tests in every 'test' subditrectory 4 | 5 | set -e 6 | 7 | if [[ "$CIRRUS_CI" == "true" ]]; then 8 | echo "Running under Cirrus-CI: Exporting all \$CIRRUS_* variables" 9 | # Allow tests access to details presented by Cirrus-CI 10 | for env_var in $(awk 'BEGIN{for(v in ENVIRON) print v}' | grep -E "^CIRRUS_") 11 | do 12 | echo " $env_var=${!env_var}" 13 | export $env_var="${!env_var}" 14 | done 15 | fi 16 | 17 | this_script_filepath="$(realpath $0)" 18 | runner_script_filename="$(basename $0)" 19 | 20 | for test_subdir in $(find "$(realpath $(dirname $0)/../)" -type d -name test | sort -r); do 21 | test_runner_filepath="$test_subdir/$runner_script_filename" 22 | if [[ -x "$test_runner_filepath" ]] && [[ "$test_runner_filepath" != "$this_script_filepath" ]]; then 23 | echo -e "\nExecuting $test_runner_filepath..." >> /dev/stderr 24 | $test_runner_filepath 25 | else 26 | echo -e "\nWARNING: Skipping $test_runner_filepath" >> /dev/stderr 27 | fi 28 | done 29 | 30 | echo "Successfully executed all $runner_script_filename scripts" 31 | -------------------------------------------------------------------------------- /build-push/.install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Installs 'build-push' script system-wide. NOT intended to be used directly 4 | # by humans, should only be used indirectly by running 5 | # ../bin/install_automation.sh build-push 6 | 7 | set -eo pipefail 8 | 9 | source "$AUTOMATION_LIB_PATH/anchors.sh" 10 | source "$AUTOMATION_LIB_PATH/console_output.sh" 11 | 12 | INSTALL_PREFIX=$(realpath $AUTOMATION_LIB_PATH/..) 13 | # Assume the directory this script is in, represents what is being installed 14 | INSTALL_NAME=$(basename $(dirname ${BASH_SOURCE[0]})) 15 | AUTOMATION_VERSION=$(automation_version) 16 | [[ -n "$AUTOMATION_VERSION" ]] || \ 17 | die "Could not determine version of common automation libs, was 'install_automation.sh' successful?" 18 | 19 | echo "Installing $INSTALL_NAME version $(automation_version) into $INSTALL_PREFIX" 20 | 21 | unset INST_PERM_ARG 22 | if [[ $UID -eq 0 ]]; then 23 | INST_PERM_ARG="-o root -g root" 24 | fi 25 | 26 | cd $(dirname $(realpath "${BASH_SOURCE[0]}")) 27 | install -v $INST_PERM_ARG -D -t "$INSTALL_PREFIX/bin" ./bin/* 28 | 29 | echo "Successfully installed $INSTALL_NAME" 30 | -------------------------------------------------------------------------------- /build-push/README.md: -------------------------------------------------------------------------------- 1 | # Build-push script 2 | 3 | This is a wrapper around buildah build, coupled with pre and post 4 | build commands and automatic registry server push. Its goal is to 5 | provide an abstraction layer for additional build automation. Though 6 | it may be useful on its own, this is not its primary purpose. 7 | 8 | 9 | ## Requirements 10 | 11 | * Executables for `jq`, and `buildah` (1.23 or later) are available. 12 | * Automation common-library is installed & env. var set. 13 | * Installed system-wide as per 14 | [the top-level documentation](https://github.com/containers/automation#installation) 15 | * -or- 16 | * Run directly from repository clone by first doing 17 | `export AUTOMATION_LIB_PATH=/path/to/clone/common/lib` 18 | * Optionally, the kernel may be configured to use emulation (such as QEMU) 19 | for non-native binary execution (where available and supported). See 20 | [the section below for more 21 | infomration](README.md#qemu-user-static-emulation). 22 | 23 | 24 | ## QEMU-user-static Emulation 25 | 26 | On platforms/distro's that support it (Like F34+) this is a handy 27 | way to enable non-native binary execution. It can therefore be 28 | used to build container images for other non-native architectures. 29 | Though setup may vary by distro/version, in F34 all that's needed 30 | is to install the `qemu-user-static` package. It will take care 31 | of automatically registering the emulation executables with the 32 | kernel. 33 | 34 | Otherwise, you may find these [handy/dandy scripts and 35 | container images useful](https://github.com/multiarch/qemu-user-static#multiarchqemu-user-static-images) for environments without native support (like 36 | CentOS and RHEL). However, be aware I cannot atest to the safety 37 | or quality of those binaries/images, so use them at your own risk. 38 | Something like this (as **root**): 39 | 40 | ```bash 41 | ~# install qemu user static binaries somehow 42 | ~# qemu_setup_fqin="docker.io/multiarch/qemu-user-static:latest" 43 | ~# vol_awk='{print "-v "$1":"$1""}' 44 | ~# bin_vols=$(find /usr/bin -name 'qemu-*-static' | awk -e "$vol_awk" | tr '\n' ' ') 45 | ~# podman run --rm --privileged $bin_vols $qemu_setup_fqin --reset -p yes 46 | ``` 47 | 48 | Note: You may need to alter `$vol_awk` or the `podman` command line 49 | depending on what your platform supports. 50 | 51 | 52 | ## Use in build automation 53 | 54 | This script may be useful as a uniform interface for building and pushing 55 | for multiple architectures, all in one go. A simple example would be: 56 | 57 | ```bash 58 | $ export SOME_USERNAME=foo # normally hidden/secured in the CI system 59 | $ export SOME_PASSWORD=bar # along with this password value. 60 | 61 | $ build-push.sh --arches=arm64,ppc64le,s390x quay.io/some/thing ./path/to/contextdir 62 | ``` 63 | 64 | In this case, the image `quay.io/some/thing:latest` would be built for the 65 | listed architectures, then pushed to the remote registry server. 66 | 67 | ### Use in automation with additional preparation 68 | 69 | When building for multiple architectures using emulation, it's vastly 70 | more efficient to execute as few non-native RUN instructions as possible. 71 | This is supported by the `--prepcmd` option, which specifies a shell 72 | command-string to execute prior to building the image. The command-string 73 | will have access to a set of exported env. vars. for use and/or 74 | substitution (see the `--help` output for details). 75 | 76 | For example, this command string could be used to seed the build cache 77 | by pulling down previously built image of the same name: 78 | 79 | ```bash 80 | $ build-push.sh ... quay.io/test/ing --prepcmd='$RUNTIME pull $FQIN:latest' 81 | ``` 82 | 83 | In this example, the command `buildah pull quay.io/test/ing:latest` will 84 | be executed prior to the build. 85 | 86 | ### Use in automation with modified images 87 | 88 | Sometimes additional steps need to be performed after the build, to modify, 89 | inspect or additionally tag the built image before it's pushed. This could 90 | include (for example) running tests on the image, or modifying its metadata 91 | in some way. All these and more are supported by the `--modcmd` option. 92 | 93 | Simply feed it a command string to be run after a successful build. The 94 | command-string script will have access to a set of exported env. vars. 95 | for use and/or substitution (see the `--help` output for details). 96 | 97 | After executing a `--modcmd`, `build-push.sh` will take care to identify 98 | all images related to the original FQIN (minus the tag). Should 99 | additional tags be present, they will also be pushed (absent the 100 | `--nopush` flag). If any/all images are missing, they will be silently 101 | ignored. 102 | 103 | For example you could use this to only push version-tagged images, and 104 | never `latest`: 105 | 106 | ``` 107 | $ build-push.sh ... --modcmd='$RUNTIME tag $FQIN:latest $FQIN:9.8.7 && \ 108 | $RUNTIME manifest rm $FQIN:latest' 109 | ``` 110 | 111 | Note: If your `--modcmd` command or script removes **ALL** tags, and 112 | `--nopush` was **not** specified, an error message will be printed 113 | followed by a non-zero exit. This is intended to help automation 114 | catch an assumed missed-expectation. 115 | -------------------------------------------------------------------------------- /build-push/test/fake_buildah.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Need to keep track of values from 'build' to 'manifest' calls 6 | DATF='/tmp/fake_buildah.json' 7 | 8 | if [[ "$1" == "build" ]]; then 9 | echo '{"manifests":[' > $DATF 10 | for arg; do 11 | if [[ "$arg" =~ --platform= ]]; then 12 | for platarch in $(cut -d '=' -f 2 <<<"$arg" | tr ',' ' '); do 13 | arch=$(cut -d '/' -f 2 <<<"$platarch") 14 | [[ -n "$arch" ]] || continue 15 | echo "FAKEBUILDAH ($arch)" > /dev/stderr 16 | echo -n ' {"platform":{"architecture":"' >> $DATF 17 | echo -n "$arch" >> $DATF 18 | echo '"}},' >> $DATF 19 | done 20 | fi 21 | done 22 | # dummy-value to avoid dealing with JSON oddity: last item must not 23 | # end with a comma 24 | echo ' {}' >> $DATF 25 | echo ']}' >> $DATF 26 | 27 | # Tests expect to see this 28 | echo "FAKEBUILDAH $@" 29 | elif [[ "$1" == "manifest" ]]; then 30 | # validate json while outputing it 31 | jq . $DATF 32 | elif [[ "$1" == "info" ]]; then 33 | case "$@" in 34 | *arch*) echo "amd64" ;; 35 | *cpus*) echo "2" ;; 36 | *) exit 1 ;; 37 | esac 38 | elif [[ "$1" == "images" ]]; then 39 | echo '[{"names":["localhost/foo/bar:latest"]}]' 40 | else 41 | echo "ERROR: Unexpected arg '$1' to fake_buildah.sh" >> /dev/stderr 42 | exit 9 43 | fi 44 | -------------------------------------------------------------------------------- /build-push/test/qemusetup.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # This script is intend for use by tests, DO NOT EXECUTE. 4 | 5 | set -eo pipefail 6 | 7 | # shellcheck disable=SC2154 8 | if [[ "$CIRRUS_CI" == "true" ]]; then 9 | # Cirrus-CI is setup (see .cirrus.yml) to run tests on CentOS 10 | # for simplicity, but it has no native qemu-user-static. For 11 | # the benefit of CI testing, cheat and use whatever random 12 | # emulators are included in the container image. 13 | 14 | # N/B: THIS IS NOT SAFE FOR PRODUCTION USE!!!!! 15 | podman run --rm --privileged \ 16 | mirror.gcr.io/multiarch/qemu-user-static:latest \ 17 | --reset -p yes 18 | elif [[ -x "/usr/bin/qemu-aarch64-static" ]]; then 19 | # TODO: Better way to determine if kernel already setup? 20 | echo "Warning: Assuming qemu-user-static is already setup" 21 | else 22 | echo "Error: System does not appear to have qemu-user-static setup" 23 | exit 1 24 | fi 25 | -------------------------------------------------------------------------------- /build-push/test/run_all_tests.sh: -------------------------------------------------------------------------------- 1 | ../../common/test/run_all_tests.sh -------------------------------------------------------------------------------- /build-push/test/test_context/Containerfile: -------------------------------------------------------------------------------- 1 | FROM registry.fedoraproject.org/fedora-minimal:latest 2 | RUN /bin/true 3 | ENTRYPOINT /bin/false 4 | # WARNING: testbuilds.sh depends on the number of build steps 5 | -------------------------------------------------------------------------------- /build-push/test/testbin-build-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEST_SOURCE_DIRPATH=$(realpath $(dirname "${BASH_SOURCE[0]}")) 4 | 5 | # Load standardized test harness 6 | source $TEST_SOURCE_DIRPATH/testlib.sh || exit 1 7 | 8 | SUBJ_FILEPATH="$TEST_DIR/$SUBJ_FILENAME" 9 | TEST_CONTEXT="$TEST_SOURCE_DIRPATH/test_context" 10 | EMPTY_CONTEXT=$(mktemp -d -p '' .tmp_$(basename ${BASH_SOURCE[0]})_XXXX) 11 | export NATIVE_GOARCH=$(buildah info --format='{{.host.arch}}') 12 | 13 | test_cmd "Verify error when automation library not found" \ 14 | 2 'ERROR: Expecting \$AUTOMATION_LIB_PATH' \ 15 | bash -c "AUTOMATION_LIB_PATH='' RUNTIME=/bin/true $SUBJ_FILEPATH 2>&1" 16 | 17 | export AUTOMATION_LIB_PATH="$TEST_SOURCE_DIRPATH/../../common/lib" 18 | 19 | test_cmd "Verify error when buildah can't be found" \ 20 | 1 "ERROR: Unable to find.+/usr/local/bin" \ 21 | bash -c "RUNTIME=/bin/true $SUBJ_FILEPATH 2>&1" 22 | 23 | # These tests don't actually need to actually build/run anything 24 | export RUNTIME="$TEST_SOURCE_DIRPATH/fake_buildah.sh" 25 | 26 | test_cmd "Verify error when executed w/o any arguments" \ 27 | 1 "ERROR: Must.+required arguments." \ 28 | bash -c "$SUBJ_FILEPATH 2>&1" 29 | 30 | test_cmd "Verify error when specify partial required arguments" \ 31 | 1 "ERROR: Must.+required arguments." \ 32 | bash -c "$SUBJ_FILEPATH foo 2>&1" 33 | 34 | test_cmd "Verify error when executed bad Containerfile directory" \ 35 | 1 "ERROR:.+directory: 'bar'" \ 36 | bash -c "$SUBJ_FILEPATH foo bar 2>&1" 37 | 38 | test_cmd "Verify error when specify invalid FQIN" \ 39 | 1 "ERROR:.+FQIN.+foo" \ 40 | bash -c "$SUBJ_FILEPATH foo $EMPTY_CONTEXT 2>&1" 41 | 42 | test_cmd "Verify error when specify slightly invalid FQIN" \ 43 | 1 "ERROR:.+FQIN.+foo/bar" \ 44 | bash -c "$SUBJ_FILEPATH foo/bar $EMPTY_CONTEXT 2>&1" 45 | 46 | test_cmd "Verify error when executed bad context subdirectory" \ 47 | 1 "ERROR:.+Containerfile or Dockerfile: '$EMPTY_CONTEXT'" \ 48 | bash -c "$SUBJ_FILEPATH foo/bar/baz $EMPTY_CONTEXT 2>&1" 49 | 50 | # no-longer needed 51 | rm -rf "$EMPTY_CONTEXT" 52 | unset EMPTY_CONTEXT 53 | 54 | test_cmd "Verify --help output to stdout can be grepped" \ 55 | 0 "Optional Environment Variables:" \ 56 | bash -c "$SUBJ_FILEPATH --help | grep 'Optional Environment Variables:'" 57 | 58 | test_cmd "Confirm required username env. var. unset error" \ 59 | 1 "ERROR.+BAR_USERNAME" \ 60 | bash -c "$SUBJ_FILEPATH foo/bar/baz $TEST_CONTEXT 2>&1" 61 | 62 | test_cmd "Confirm required password env. var. unset error" \ 63 | 1 "ERROR.+BAR_PASSWORD" \ 64 | bash -c "BAR_USERNAME=snafu $SUBJ_FILEPATH foo/bar/baz $TEST_CONTEXT 2>&1" 65 | 66 | for arg in 'prepcmd' 'modcmd'; do 67 | test_cmd "Verify error when --$arg specified without an '='" \ 68 | 1 "ERROR:.+with '='" \ 69 | bash -c "BAR_USERNAME=snafu BAR_PASSWORD=ufans $SUBJ_FILEPATH foo/bar/baz $TEST_CONTEXT --$arg notgoingtowork 2>&1" 70 | done 71 | 72 | test_cmd "Verify numeric \$PARALLEL_JOBS is handled properly" \ 73 | 0 "FAKEBUILDAH.+--jobs=42 " \ 74 | bash -c "PARALLEL_JOBS=42 $SUBJ_FILEPATH localhost/foo/bar --nopush $TEST_CONTEXT 2>&1" 75 | 76 | test_cmd "Verify non-numeric \$PARALLEL_JOBS is handled properly" \ 77 | 0 "FAKEBUILDAH.+--jobs=[0-9]+ " \ 78 | bash -c "PARALLEL_JOBS=badvalue $SUBJ_FILEPATH localhost/foo/bar --nopush $TEST_CONTEXT 2>&1" 79 | 80 | PREPCMD='echo "#####${ARCHES}#####"' 81 | test_cmd "Verify \$ARCHES value is available to prep-command" \ 82 | 0 "#####correct horse battery staple#####.+FAKEBUILDAH.+test_context" \ 83 | bash -c "$SUBJ_FILEPATH --arches=correct,horse,battery,staple localhost/foo/bar --nopush --prepcmd='$PREPCMD' $TEST_CONTEXT 2>&1" 84 | 85 | rx="FAKEBUILDAH build \\$'--test-build-arg=one \\\"two\\\" three\\\nfour' --anotherone=foo\\\ bar" 86 | test_cmd "Verify special characters preserved in build-args" \ 87 | 0 "$rx" \ 88 | bash -c "PARALLEL_JOBS=badvalue $SUBJ_FILEPATH localhost/foo/bar $TEST_CONTEXT --test-build-arg=\"one \\\"two\\\" three 89 | four\" --nopush --anotherone=\"foo bar\" 2>&1" 90 | 91 | # A specialized non-container environment required to run these 92 | if [[ -n "$BUILD_PUSH_TEST_BUILDS" ]]; then 93 | export RUNTIME=$(type -P buildah) 94 | export PARALLEL_JOBS=$($RUNTIME info --format='{{.host.cpus}}') 95 | 96 | source $(dirname "${BASH_SOURCE[0]}")/testbuilds.sh 97 | else 98 | echo "WARNING: Set \$BUILD_PUSH_TEST_BUILDS non-empty to fully test build_push." 99 | echo "" 100 | fi 101 | 102 | # Must always happen last 103 | exit_with_status 104 | -------------------------------------------------------------------------------- /build-push/test/testbuilds.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # This script is intended to be sourced from testbin-build-push.sh. 4 | # Any/all other usage is virtually guaranteed to fail and/or cause 5 | # harm to the system. 6 | 7 | for varname in RUNTIME SUBJ_FILEPATH TEST_CONTEXT TEST_SOURCE_DIRPATH TEST_FQIN BUILDAH_USERNAME BUILDAH_PASSWORD; do 8 | value=${!varname} 9 | if [[ -z "$value" ]]; then 10 | echo "ERROR: Required \$$varname variable is unset/empty." 11 | exit 1 12 | fi 13 | done 14 | unset value 15 | 16 | # RUNTIME is defined by caller 17 | # shellcheck disable=SC2154 18 | $RUNTIME --version 19 | test_cmd "Confirm $(basename $RUNTIME) is available" \ 20 | 0 "buildah version .+" \ 21 | $RUNTIME --version 22 | 23 | skopeo --version 24 | test_cmd "Confirm skopeo is available" \ 25 | 0 "skopeo version .+" \ 26 | skopeo --version 27 | 28 | PREPCMD='echo "SpecialErrorMessage:$REGSERVER" >> /dev/stderr && exit 42' 29 | # SUBJ_FILEPATH and TEST_CONTEXT are defined by caller 30 | # shellcheck disable=SC2154 31 | test_cmd "Confirm error output and exit(42) from --prepcmd" \ 32 | 42 "SpecialErrorMessage:localhost" \ 33 | bash -c "$SUBJ_FILEPATH --nopush localhost/foo/bar $TEST_CONTEXT --prepcmd='$PREPCMD' 2>&1" 34 | 35 | # N/B: The following are stateful - each depends on precedding test success 36 | # and assume empty container-storage (podman system reset). 37 | 38 | test_cmd "Confirm building native-arch test image w/ --nopush" \ 39 | 0 "STEP 3/3: ENTRYPOINT /bin/false.+COMMIT" \ 40 | bash -c "A_DEBUG=1 $SUBJ_FILEPATH localhost/foo/bar $TEST_CONTEXT --nopush 2>&1" 41 | 42 | native_arch=$($RUNTIME info --format='{{.host.arch}}') 43 | test_cmd "Confirm native_arch was set to non-empty string" \ 44 | 0 "" \ 45 | test -n "$native_arch" 46 | 47 | test_cmd "Confirm built image manifest contains the native arch '$native_arch'" \ 48 | 0 "$native_arch" \ 49 | bash -c "$RUNTIME manifest inspect localhost/foo/bar:latest | jq -r '.manifests[0].platform.architecture'" 50 | 51 | test_cmd "Confirm rebuilding with same command uses cache" \ 52 | 0 "STEP 3/3.+Using cache" \ 53 | bash -c "A_DEBUG=1 $SUBJ_FILEPATH localhost/foo/bar $TEST_CONTEXT --nopush 2>&1" 54 | 55 | test_cmd "Confirm manifest-list can be removed by name" \ 56 | 0 "untagged: localhost/foo/bar:latest" \ 57 | $RUNTIME manifest rm containers-storage:localhost/foo/bar:latest 58 | 59 | test_cmd "Verify expected partial failure when passing bogus architectures" \ 60 | 125 "no image found in image index for architecture" \ 61 | bash -c "A_DEBUG=1 $SUBJ_FILEPATH --arches=correct,horse,battery,staple localhost/foo/bar --nopush $TEST_CONTEXT 2>&1" 62 | 63 | MODCMD='$RUNTIME tag $FQIN:latest $FQIN:9.8.7-testing' 64 | test_cmd "Verify --modcmd is able to tag the manifest" \ 65 | 0 "Executing mod-command" \ 66 | bash -c "A_DEBUG=1 $SUBJ_FILEPATH localhost/foo/bar $TEST_CONTEXT --nopush --modcmd='$MODCMD' 2>&1" 67 | 68 | test_cmd "Verify the tagged manifest is also present" \ 69 | 0 "[a-zA-Z0-9]+" \ 70 | bash -c "$RUNTIME images --quiet localhost/foo/bar:9.8.7-testing" 71 | 72 | test_cmd "Confirm tagged image manifest contains native arch '$native_arch'" \ 73 | 0 "$native_arch" \ 74 | bash -c "$RUNTIME manifest inspect localhost/foo/bar:9.8.7-testing | jq -r '.manifests[0].platform.architecture'" 75 | 76 | TEST_TEMP=$(mktemp -d -p '' .tmp_$(basename ${BASH_SOURCE[0]})_XXXX) 77 | 78 | test_cmd "Confirm digest can be obtained from 'latest' manifest list" \ 79 | 0 ".+" \ 80 | bash -c "$RUNTIME manifest inspect localhost/foo/bar:latest | jq -r '.manifest[0].digest' | tee $TEST_TEMP/latest_digest" 81 | 82 | test_cmd "Confirm digest can be obtained from '9.8.7-testing' manifest list" \ 83 | 0 ".+" \ 84 | bash -c "$RUNTIME manifest inspect localhost/foo/bar:9.8.7-testing | jq -r '.manifest[0].digest' | tee $TEST_TEMP/tagged_digest" 85 | 86 | test_cmd "Verify tagged manifest image digest matches the same in latest" \ 87 | 0 "" \ 88 | test "$(<$TEST_TEMP/tagged_digest)" == "$(<$TEST_TEMP/latest_digest)" 89 | 90 | MODCMD=' 91 | set -x; 92 | $RUNTIME images && \ 93 | $RUNTIME manifest rm $FQIN:latest && \ 94 | $RUNTIME manifest rm $FQIN:9.8.7-testing && \ 95 | echo "AllGone"; 96 | ' 97 | test_cmd "Verify --modcmd can execute command string that removes all tags" \ 98 | 0 "AllGone.*No FQIN.+to be pushed" \ 99 | bash -c "A_DEBUG=1 $SUBJ_FILEPATH --modcmd='$MODCMD' localhost/foo/bar --nopush $TEST_CONTEXT 2>&1" 100 | 101 | test_cmd "Verify previous --modcmd removed the 'latest' tagged image" \ 102 | 125 "image not known" \ 103 | $RUNTIME images --quiet containers-storage:localhost/foo/bar:latest 104 | 105 | test_cmd "Verify previous --modcmd removed the '9.8.7-testing' tagged image" \ 106 | 125 "image not known" \ 107 | $RUNTIME images --quiet containers-storage:localhost/foo/bar:9.8.7-testing 108 | 109 | FAKE_VERSION=$RANDOM 110 | MODCMD="set -ex; 111 | \$RUNTIME tag \$FQIN:latest \$FQIN:$FAKE_VERSION; 112 | \$RUNTIME manifest rm \$FQIN:latest;" 113 | # TEST_FQIN and TEST_SOURCE_DIRPATH defined by caller 114 | # shellcheck disable=SC2154 115 | test_cmd "Verify e2e workflow w/ additional build-args" \ 116 | 0 "Pushing $TEST_FQIN:$FAKE_VERSION" \ 117 | bash -c "env A_DEBUG=1 $SUBJ_FILEPATH \ 118 | --prepcmd='touch $TEST_SOURCE_DIRPATH/test_context/Containerfile' \ 119 | --modcmd='$MODCMD' \ 120 | --arches=amd64,s390x,arm64,ppc64le \ 121 | $TEST_FQIN \ 122 | $TEST_CONTEXT \ 123 | --device=/dev/fuse --label testing=true \ 124 | 2>&1" 125 | 126 | test_cmd "Verify latest tagged image was not pushed" \ 127 | 2 'reading manifest latest in quay\.io/buildah/do_not_use: manifest unknown' \ 128 | skopeo inspect docker://$TEST_FQIN:latest 129 | 130 | test_cmd "Verify architectures can be obtained from manifest list" \ 131 | 0 "" \ 132 | bash -c "$RUNTIME manifest inspect $TEST_FQIN:$FAKE_VERSION | \ 133 | jq -r '.manifests[].platform.architecture' > $TEST_TEMP/maniarches" 134 | 135 | for arch in amd64 s390x arm64 ppc64le; do 136 | test_cmd "Verify $arch architecture present in $TEST_FQIN:$FAKE_VERSION" \ 137 | 0 "" \ 138 | grep -Fqx "$arch" $TEST_TEMP/maniarches 139 | done 140 | 141 | test_cmd "Verify pushed image can be removed" \ 142 | 0 "" \ 143 | skopeo delete docker://$TEST_FQIN:$FAKE_VERSION 144 | 145 | # Cleanup 146 | rm -rf "$TEST_TEMP" 147 | -------------------------------------------------------------------------------- /build-push/test/testlib.sh: -------------------------------------------------------------------------------- 1 | ../../common/test/testlib.sh -------------------------------------------------------------------------------- /ci/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.fedoraproject.org/fedora-minimal:latest 2 | RUN microdnf update -y && \ 3 | microdnf install -y \ 4 | findutils jq git curl python3-pyyaml \ 5 | perl-YAML perl-interpreter perl-open perl-Data-TreeDumper \ 6 | perl-Test perl-Test-Simple perl-Test-Differences \ 7 | perl-YAML-LibYAML perl-FindBin \ 8 | python3 python3-virtualenv python3-pip gcc python3-devel \ 9 | python3-flake8 python3-pep8-naming python3-flake8-import-order python3-flake8-polyfill python3-mccabe python3-pep8-naming && \ 10 | microdnf clean all && \ 11 | rm -rf /var/cache/dnf 12 | # Required by perl 13 | ENV LC_ALL="C" \ 14 | LANG="en_US.UTF-8" 15 | -------------------------------------------------------------------------------- /cirrus-ci_artifacts/.gitignore: -------------------------------------------------------------------------------- 1 | ./testvenv/ 2 | -------------------------------------------------------------------------------- /cirrus-ci_artifacts/.install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Installs cirrus-ci_artifacts and a python virtual environment 4 | # to execute with. NOT intended to be used directly 5 | # by humans, should only be used indirectly by running 6 | # ../bin/install_automation.sh cirrus-ci_artifacts 7 | 8 | set -eo pipefail 9 | 10 | source "$AUTOMATION_LIB_PATH/anchors.sh" 11 | source "$AUTOMATION_LIB_PATH/console_output.sh" 12 | 13 | INSTALL_PREFIX=$(realpath $AUTOMATION_LIB_PATH/../) 14 | # Assume the directory this script is in, represents what is being installed 15 | INSTALL_NAME=$(basename $(dirname ${BASH_SOURCE[0]})) 16 | AUTOMATION_VERSION=$(automation_version) 17 | [[ -n "$AUTOMATION_VERSION" ]] || \ 18 | die "Could not determine version of common automation libs, was 'install_automation.sh' successful?" 19 | 20 | [[ -n "$(type -P virtualenv)" ]] || \ 21 | die "$INSTALL_NAME requires python3-virtualenv" 22 | 23 | echo "Installing $INSTALL_NAME version $(automation_version) into $INSTALL_PREFIX" 24 | 25 | unset INST_PERM_ARG 26 | if [[ $UID -eq 0 ]]; then 27 | INST_PERM_ARG="-o root -g root" 28 | fi 29 | 30 | cd $(dirname $(realpath "${BASH_SOURCE[0]}")) 31 | virtualenv --clear --download \ 32 | $AUTOMATION_LIB_PATH/ccia.venv 33 | ( 34 | source $AUTOMATION_LIB_PATH/ccia.venv/bin/activate 35 | pip3 install --requirement ./requirements.txt 36 | deactivate 37 | ) 38 | install -v $INST_PERM_ARG -m '0644' -D -t "$INSTALL_PREFIX/lib/ccia.venv/bin" \ 39 | ./cirrus-ci_artifacts.py 40 | install -v $INST_PERM_ARG -D -t "$INSTALL_PREFIX/bin" ./cirrus-ci_artifacts 41 | 42 | # Needed for installer testing 43 | echo "Successfully installed $INSTALL_NAME" 44 | -------------------------------------------------------------------------------- /cirrus-ci_artifacts/README.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | This is a small script which examines a Cirrus-CI build and downloads 4 | available artifacts in parallel, into a subdirectory tree corresponding 5 | with the Cirrus-CI build ID, followed by the task-name, artifact-name 6 | and file-path. Optionally, a regex may be provided to download only 7 | specific artifacts matching the subdirectory path. 8 | 9 | The script may be executed from a currently running Cirrus-CI build 10 | (utilizing `$CIRRUS_BUILD_ID`), but only previously uploaded artifacts 11 | will be downloaded, and the task must have a `depends_on` statement 12 | to synchronize with tasks providing expected artifacts. 13 | 14 | # Installation 15 | 16 | Install the python3 module requirements using pip3: 17 | (Note: These go into `$HOME/.local/lib/python`) 18 | 19 | ``` 20 | $ pip3 install --user --requirement ./requirements.txt 21 | ``` 22 | 23 | # Usage 24 | 25 | Create and change to the directory where artifact tree should be 26 | created. Call the script, passing in the following arguments: 27 | 28 | 1. Optional, `--verbose` prints out artifacts as they are 29 | downloaded or skipped. 30 | 2. The Cirrus-CI build id (required) to retrieve (doesn't need to be 31 | finished running). 32 | 3. Optional, a filter regex e.g. `'runner_stats/.*fedora.*'` to 33 | only download artifacts matching `//` 34 | -------------------------------------------------------------------------------- /cirrus-ci_artifacts/cirrus-ci_artifacts: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script wrapps cirrus-ci_artifacts.sh inside a python 4 | # virtual environment setup at install time. It should not 5 | # be executed prior to installation. 6 | 7 | set -e 8 | 9 | # This is a convenience for callers that don't separately source this first 10 | # in their automation setup. 11 | if [[ -z "$AUTOMATION_LIB_PATH" ]] && [[ -r /etc/automation_environment ]]; then 12 | source /etc/automation_environment 13 | fi 14 | 15 | if [[ -z "$AUTOMATION_LIB_PATH" ]]; then 16 | ( 17 | echo "ERROR: Expecting \$AUTOMATION_LIB_PATH to be defined with the" 18 | echo " installation directory of automation tooling." 19 | ) >> /dev/stderr 20 | exit 1 21 | fi 22 | 23 | source $AUTOMATION_LIB_PATH/ccia.venv/bin/activate 24 | exec python3 $AUTOMATION_LIB_PATH/ccia.venv/bin/cirrus-ci_artifacts.py "$@" 25 | -------------------------------------------------------------------------------- /cirrus-ci_artifacts/cirrus-ci_artifacts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Download all artifacts from a Cirrus-CI Build into a subdirectory tree. 5 | 6 | Subdirectory naming format: /// 7 | 8 | Input arguments (in order): 9 | Build ID - string, the build containing tasks w/ artifacts to download 10 | e.g. "5790771712360448" 11 | Path RX - Optional, regular expression to match against subdirectory 12 | tree naming format. 13 | """ 14 | 15 | import asyncio 16 | import re 17 | import sys 18 | from argparse import ArgumentParser 19 | from os import makedirs 20 | from os.path import split 21 | from urllib.parse import quote, unquote 22 | 23 | # Ref: https://docs.aiohttp.org/en/stable/http_request_lifecycle.html 24 | from aiohttp import ClientSession 25 | # Ref: https://gql.readthedocs.io/en/latest/index.html 26 | # pip3 install --user --requirement ./requirements.txt 27 | # (and/or in a python virtual environment) 28 | 29 | from gql import Client as GQLClient 30 | from gql import gql 31 | from gql.transport.requests import RequestsHTTPTransport 32 | 33 | 34 | # GraphQL API URL for Cirrus-CI 35 | CCI_GQL_URL = "https://api.cirrus-ci.com/graphql" 36 | 37 | # Artifact download base-URL for Cirrus-CI. 38 | # Download URL will be formed by appending: 39 | # "////" 40 | CCI_ART_URL = "https://api.cirrus-ci.com/v1/artifact/build" 41 | 42 | # Set True when --verbose is first argument 43 | VERBOSE = False 44 | 45 | def get_tasks(gqlclient, buildId): # noqa N803 46 | """Given a build ID, return a list of task objects.""" 47 | # Ref: https://cirrus-ci.org/api/ 48 | query = gql(''' 49 | query tasksByBuildId($buildId: ID!) { 50 | build(id: $buildId) { 51 | tasks { 52 | name, 53 | id, 54 | buildId, 55 | artifacts { 56 | name, 57 | files { 58 | path 59 | } 60 | } 61 | } 62 | } 63 | } 64 | ''') 65 | query_vars = {"buildId": buildId} 66 | tasks = gqlclient.execute(query, variable_values=query_vars) 67 | if "build" in tasks and tasks["build"]: 68 | b = tasks["build"] 69 | if "tasks" in b and len(b["tasks"]): 70 | return b["tasks"] 71 | raise RuntimeError(f"No tasks found for build with ID {buildId}") 72 | raise RuntimeError(f"No Cirrus-CI build found with ID {buildId}") 73 | 74 | 75 | def task_art_url_sfxs(task): 76 | """Given a task dict return list CCI_ART_URL suffixes for all artifacts.""" 77 | result = [] 78 | bid = task["buildId"] 79 | tname = quote(task["name"]) # Make safe for URLs 80 | for art in task["artifacts"]: 81 | aname = quote(art["name"]) 82 | for _file in art["files"]: 83 | fpath = quote(_file["path"]) 84 | result.append(f"{bid}/{tname}/{aname}/{fpath}") 85 | return result 86 | 87 | 88 | async def download_artifact(session, dest_path, dl_url): 89 | """Asynchronous download contents of art_url as a byte-stream.""" 90 | # Last path component assumed to be the filename 91 | makedirs(split(dest_path)[0], exist_ok=True) # os.path.split 92 | async with session.get(dl_url) as response: 93 | with open(dest_path, "wb") as dest_file: 94 | dest_file.write(await response.read()) 95 | 96 | 97 | async def download_artifacts(task, path_rx=None): 98 | """Given a task dict, download all artifacts or matches to path_rx.""" 99 | downloaded = [] 100 | skipped = [] 101 | async with ClientSession() as session: 102 | for art_url_sfx in task_art_url_sfxs(task): 103 | dest_path = unquote(art_url_sfx) # Strip off URL encoding 104 | dl_url = f"{CCI_ART_URL}/{dest_path}" 105 | if path_rx is None or bool(path_rx.search(dest_path)): 106 | if VERBOSE: 107 | print(f" Downloading '{dest_path}'") 108 | sys.stdout.flush() 109 | await download_artifact(session, dest_path, dl_url) 110 | downloaded.append(dest_path) 111 | else: 112 | if VERBOSE: 113 | print(f" Skipping '{dest_path}'") 114 | skipped.append(dest_path) 115 | return {"downloaded": downloaded, "skipped": skipped} 116 | 117 | 118 | def get_args(argv): 119 | """Return parsed argument namespace object.""" 120 | parser = ArgumentParser(prog="cirrus-ci_artifacts", 121 | description=('Download Cirrus-CI artifacts by Build ID' 122 | ' number, into a subdirectory of the form' 123 | ' //' 124 | '/')) 125 | parser.add_argument('-v', '--verbose', 126 | dest='verbose', action='store_true', default=False, 127 | help='Show "Downloaded" | "Skipped" + relative artifact file-path.') 128 | parser.add_argument('buildId', nargs=1, metavar='', type=int, 129 | help="A Cirrus-CI Build ID number.") 130 | parser.add_argument('path_rx', nargs='?', default=None, metavar='[Reg. Exp.]', 131 | help="Reg. exp. include only // matches.") 132 | return parser.parse_args(args=argv[1:]) 133 | 134 | 135 | async def download(tasks, path_rx=None): 136 | """Return results from all async operations.""" 137 | # Python docs say to retain a reference to all tasks so they aren't 138 | # "garbage-collected" while still active. 139 | results = [] 140 | for task in tasks: 141 | if len(task["artifacts"]): 142 | results.append(asyncio.create_task(download_artifacts(task, path_rx))) 143 | await asyncio.gather(*results) 144 | return results 145 | 146 | 147 | def main(buildId, path_rx=None): # noqa: N803,D103 148 | if path_rx is not None: 149 | path_rx = re.compile(path_rx) 150 | transport = RequestsHTTPTransport(url=CCI_GQL_URL, verify=True, retries=3) 151 | with GQLClient(transport=transport, fetch_schema_from_transport=True) as gqlclient: 152 | tasks = get_tasks(gqlclient, buildId) 153 | transport.close() 154 | async_results = asyncio.run(download(tasks, path_rx)) 155 | return [r.result() for r in async_results] 156 | 157 | 158 | if __name__ == "__main__": 159 | args = get_args(sys.argv) 160 | VERBOSE = args.verbose 161 | main(args.buildId[0], args.path_rx) 162 | -------------------------------------------------------------------------------- /cirrus-ci_artifacts/requirements.txt: -------------------------------------------------------------------------------- 1 | # Producing this list was done using the following process: 2 | # 1. Create a temporary `req.txt` file containing only the basic 3 | # non-distribution provided packages, e.g. `aiohttp[speedups]`, 4 | # `PyYAML`, `gql[requests]`, `requests` (see cirrus-ci_artifacts.py, 5 | # actual requirements may have changed) 6 | # 2. From a Fedora:latest container, install python3 & python3-virtualenv 7 | # 3. Setup & activate a temporary virtual environment 8 | # 4. Execute `pip3 install --requirements req.txt` 9 | # 5. Run pip3 freeze 10 | # 6. Edit `requirements.txt`, add the `~=` specifier to each line along 11 | # with the correct two-component version number (from freeze output) 12 | # 7. In a fresh container, confirm the automation installer 13 | # functions with the cirrus-ci_artifacts component (see main README 14 | # for installer instructions) 15 | PyYAML~=6.0 16 | aiohttp[speedups]~=3.8 17 | gql[requests]~=3.3 18 | requests>=2,<3 19 | urllib3<2.4.1 20 | -------------------------------------------------------------------------------- /cirrus-ci_artifacts/test/ccia.py: -------------------------------------------------------------------------------- 1 | ../cirrus-ci_artifacts.py -------------------------------------------------------------------------------- /cirrus-ci_artifacts/test/run_all_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | TESTDIR=$(dirname ${BASH_SOURCE[0]}) 6 | 7 | if [[ "$GITHUB_ACTIONS" == "true" ]]; then 8 | echo "Lint/Style checking not supported under github actions: Skipping" 9 | exit 0 10 | fi 11 | 12 | if [[ -x $(type -P flake8-3) ]]; then 13 | cd "$TESTDIR" 14 | set -a 15 | virtualenv testvenv 16 | source testvenv/bin/activate 17 | testvenv/bin/python -m pip install --upgrade pip 18 | pip3 install --requirement ../requirements.txt 19 | set +a 20 | 21 | ./test_cirrus-ci_artifacts.py -v 22 | 23 | cd .. 24 | flake8-3 --max-line-length=100 ./cirrus-ci_artifacts.py 25 | flake8-3 --max-line-length=100 --extend-ignore=D101,D102,D103,D105 test/test_cirrus-ci_artifacts.py 26 | else 27 | echo "Can't find flake-8-3 binary, is script executing inside CI container?" 28 | exit 1 29 | fi 30 | -------------------------------------------------------------------------------- /cirrus-ci_artifacts/test/test_cirrus-ci_artifacts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """Verify contents of .cirrus.yml meet specific expectations.""" 4 | 5 | import asyncio 6 | import os 7 | import re 8 | import unittest 9 | from contextlib import redirect_stderr, redirect_stdout 10 | from io import StringIO 11 | from tempfile import TemporaryDirectory 12 | from unittest.mock import MagicMock, mock_open, patch 13 | 14 | import ccia 15 | 16 | import yaml 17 | 18 | 19 | def fake_makedirs(*args, **dargs): 20 | return None 21 | 22 | 23 | # Needed for testing asyncio functions and calls 24 | # ref: https://agariinc.medium.com/strategies-for-testing-async-code-in-python-c52163f2deab 25 | class AsyncMock(MagicMock): 26 | 27 | async def __call__(self, *args, **dargs): 28 | return super().__call__(*args, **dargs) 29 | 30 | 31 | class AsyncContextManager(MagicMock): 32 | 33 | async def __aenter__(self, *args, **dargs): 34 | return self.__enter__(*args, **dargs) 35 | 36 | async def __aexit__(self, *args, **dargs): 37 | return self.__exit__(*args, **dargs) 38 | 39 | 40 | class TestBase(unittest.TestCase): 41 | 42 | FAKE_CCI = "sql://fake.url.invalid/graphql" 43 | FAKE_API = "smb://fake.url.invalid/artifact" 44 | 45 | def setUp(self): 46 | ccia.VERBOSE = True 47 | patch('ccia.CCI_GQL_URL', new=self.FAKE_CCI).start() 48 | patch('ccia.CCI_ART_URL', new=self.FAKE_API).start() 49 | self.addCleanup(patch.stopall) 50 | 51 | 52 | class TestUtils(TestBase): 53 | 54 | # YAML is easier on human eyeballs 55 | # Ref: https://github.com/cirruslabs/cirrus-ci-web/blob/master/schema.graphql 56 | # type Artifacts and ArtifactFileInfo 57 | TEST_TASK_YAML = """ 58 | - &test_task 59 | name: task_1 60 | id: 1 61 | buildId: 0987654321 62 | artifacts: 63 | - name: test_art-0 64 | type: test_type-0 65 | format: art_format-0 66 | files: 67 | - path: path/test/art/0 68 | size: 0 69 | - name: test_art-1 70 | type: test_type-1 71 | format: art_format-1 72 | files: 73 | - path: path/test/art/1 74 | size: 1 75 | - path: path/test/art/2 76 | size: 2 77 | - name: test_art-2 78 | type: test_type-2 79 | format: art_format-2 80 | files: 81 | - path: path/test/art/3 82 | size: 3 83 | - path: path/test/art/4 84 | size: 4 85 | - path: path/test/art/5 86 | size: 5 87 | - path: path/test/art/6 88 | size: 6 89 | - <<: *test_task 90 | name: task_2 91 | id: 2 92 | """ 93 | TEST_TASKS = yaml.safe_load(TEST_TASK_YAML) 94 | TEST_URL_RX = re.compile(r"987654321/task_.+/test_art-.+/path/test/art/.+") 95 | 96 | def test_task_art_url_sfxs(self): 97 | for test_task in self.TEST_TASKS: 98 | actual = ccia.task_art_url_sfxs(test_task) 99 | with self.subTest(test_task=test_task): 100 | for url in actual: 101 | with self.subTest(url=url): 102 | self.assertRegex(url, self.TEST_URL_RX) 103 | 104 | # N/B: The ClientSession mock causes a (probably) harmless warning: 105 | # ResourceWarning: unclosed transport <_SelectorSocketTransport fd=7> 106 | # I have no idea how to fix or hide this, leaving it as-is. 107 | def test_download_artifacts_all(self): 108 | for test_task in self.TEST_TASKS: 109 | with self.subTest(test_task=test_task), \ 110 | patch('ccia.download_artifact', new_callable=AsyncMock), \ 111 | patch('ccia.ClientSession', new_callable=AsyncContextManager), \ 112 | patch('ccia.makedirs', new=fake_makedirs), \ 113 | patch('ccia.open', new=mock_open()): 114 | 115 | # N/B: This makes debugging VERY difficult, comment out for pdb use 116 | fake_stdout = StringIO() 117 | fake_stderr = StringIO() 118 | with redirect_stderr(fake_stderr), redirect_stdout(fake_stdout): 119 | asyncio.run(ccia.download_artifacts(test_task)) 120 | self.assertEqual(fake_stderr.getvalue(), '') 121 | for line in fake_stdout.getvalue().splitlines(): 122 | with self.subTest(line=line): 123 | self.assertRegex(line.strip(), self.TEST_URL_RX) 124 | 125 | 126 | class TestMain(unittest.TestCase): 127 | 128 | def setUp(self): 129 | ccia.VERBOSE = True 130 | try: 131 | self.bid = os.environ["CIRRUS_BUILD_ID"] 132 | except KeyError: 133 | self.skipTest("Requires running under Cirrus-CI") 134 | self.tmp = TemporaryDirectory(prefix="test_ccia_tmp") 135 | self.cwd = os.getcwd() 136 | os.chdir(self.tmp.name) 137 | 138 | def tearDown(self): 139 | os.chdir(self.cwd) 140 | self.tmp.cleanup() 141 | 142 | def main_result_has(self, results, stdout_filepath, action="downloaded"): 143 | for result in results: 144 | for action_filepath in result[action]: 145 | if action_filepath == stdout_filepath: 146 | exists = os.path.isfile(os.path.join(self.tmp.name, action_filepath)) 147 | if "downloaded" in action: 148 | self.assertTrue(exists, 149 | msg=f"Downloaded not found: '{action_filepath}'") 150 | return 151 | # action==skipped 152 | self.assertFalse(exists, 153 | msg=f"Skipped file found: '{action_filepath}'") 154 | return 155 | self.fail(f"Expecting to find {action_filepath} entry in main()'s {action} results") 156 | 157 | def test_cirrus_ci_download_all(self): 158 | expect_rx = re.compile(f".+'{self.bid}/[^/]+/[^/]+/.+'") 159 | # N/B: This makes debugging VERY difficult, comment out for pdb use 160 | fake_stdout = StringIO() 161 | fake_stderr = StringIO() 162 | with redirect_stderr(fake_stderr), redirect_stdout(fake_stdout): 163 | import warnings 164 | warnings.filterwarnings("ignore", category=DeprecationWarning) 165 | results = ccia.main(self.bid) 166 | self.assertEqual(fake_stderr.getvalue(), '') 167 | for line in fake_stdout.getvalue().splitlines(): 168 | with self.subTest(line=line): 169 | s_line = line.lower().strip() 170 | filepath = line.split(sep="'", maxsplit=3)[1] 171 | self.assertRegex(s_line, expect_rx) 172 | if s_line.startswith("download"): 173 | self.main_result_has(results, filepath) 174 | elif s_line.startswith("skip"): 175 | self.main_result_has(results, filepath, "skipped") 176 | else: 177 | self.fail(f"Unexpected stdout line: '{s_line}'") 178 | 179 | def test_cirrus_ci_download_none(self): 180 | # N/B: This makes debugging VERY difficult, comment out for pdb use 181 | fake_stdout = StringIO() 182 | fake_stderr = StringIO() 183 | with redirect_stderr(fake_stderr), redirect_stdout(fake_stdout): 184 | results = ccia.main(self.bid, r"this-will-match-nothing") 185 | for line in fake_stdout.getvalue().splitlines(): 186 | with self.subTest(line=line): 187 | s_line = line.lower().strip() 188 | filepath = line.split(sep="'", maxsplit=3)[1] 189 | self.assertRegex(s_line, r"skipping") 190 | self.main_result_has(results, filepath, "skipped") 191 | 192 | 193 | if __name__ == "__main__": 194 | unittest.main() 195 | -------------------------------------------------------------------------------- /cirrus-ci_env/.install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Installs cirrus-ci_env system-wide. NOT intended to be used directly 4 | # by humans, should only be used indirectly by running 5 | # ../bin/install_automation.sh cirrus-ci_env 6 | 7 | set -eo pipefail 8 | 9 | source "$AUTOMATION_LIB_PATH/anchors.sh" 10 | source "$AUTOMATION_LIB_PATH/console_output.sh" 11 | 12 | INSTALL_PREFIX=$(realpath $AUTOMATION_LIB_PATH/../) 13 | # Assume the directory this script is in, represents what is being installed 14 | INSTALL_NAME=$(basename $(dirname ${BASH_SOURCE[0]})) 15 | AUTOMATION_VERSION=$(automation_version) 16 | [[ -n "$AUTOMATION_VERSION" ]] || \ 17 | die "Could not determine version of common automation libs, was 'install_automation.sh' successful?" 18 | 19 | echo "Installing $INSTALL_NAME version $(automation_version) into $INSTALL_PREFIX" 20 | 21 | unset INST_PERM_ARG 22 | if [[ $UID -eq 0 ]]; then 23 | INST_PERM_ARG="-o root -g root" 24 | fi 25 | 26 | cd $(dirname $(realpath "${BASH_SOURCE[0]}")) 27 | install -v cirrus-ci_env.py -D "$INSTALL_PREFIX/bin/" 28 | 29 | # Needed for installer testing 30 | echo "Successfully installed $INSTALL_NAME" 31 | -------------------------------------------------------------------------------- /cirrus-ci_env/test/actual_task_names.txt: -------------------------------------------------------------------------------- 1 | APIv2 test on fedora-33 2 | Alt Arch. Cross 3 | Build Each Commit 4 | Build Without CGO 5 | Build for fedora-33 6 | Build for ubuntu-2004 7 | Build for ubuntu-2010 8 | Check Automation 9 | Docker-py Compat. 10 | Ext. services 11 | OSX Cross 12 | Optional Release Test 13 | Static Build 14 | Test Bindings 15 | Test Code Consistency 16 | Test Swagger 17 | Test build RPM 18 | Total Success 19 | Unit tests on fedora-33 20 | Unit tests on ubuntu-2004 21 | Unit tests on ubuntu-2010 22 | Upgrade test: from v1.9.0 23 | Upgrade test: from v2.0.6 24 | Upgrade test: from v2.1.1 25 | VM img. keepalive 26 | Validate fedora-33 Build 27 | Verify Release 28 | Verify Win Installer Build 29 | Windows Cross 30 | compose test on fedora-33 31 | int podman fedora-33 root container 32 | int podman fedora-33 root host 33 | int podman fedora-33 rootless host 34 | int podman ubuntu-2004 root host 35 | int podman ubuntu-2010 root host 36 | int remote fedora-33 root host 37 | int remote ubuntu-2004 root host 38 | int remote ubuntu-2010 root host 39 | machine podman fedora-33 rootless host 40 | sys podman fedora-33 root host 41 | sys podman fedora-33 rootless host 42 | sys podman ubuntu-2004 root host 43 | sys podman ubuntu-2010 root host 44 | sys remote fedora-33 root host 45 | sys remote ubuntu-2004 root host 46 | sys remote ubuntu-2010 root host 47 | -------------------------------------------------------------------------------- /cirrus-ci_env/test/expected_ti.yml: -------------------------------------------------------------------------------- 1 | APIv2 test on fedora-33: 2 | - gcevm 3 | - fedora-c6524344056676352 4 | Alt Arch. Cross: 5 | - gcevm 6 | - fedora-c6524344056676352 7 | Build Each Commit: 8 | - gcevm 9 | - fedora-c6524344056676352 10 | Build Without CGO: 11 | - gcevm 12 | - fedora-c6524344056676352 13 | Build for fedora-33: 14 | - gcevm 15 | - fedora-c6524344056676352 16 | Build for ubuntu-2004: 17 | - gcevm 18 | - prior-ubuntu-c6524344056676352 19 | Build for ubuntu-2010: 20 | - gcevm 21 | - ubuntu-c6524344056676352 22 | Check Automation: 23 | - container 24 | - quay.io/libpod/fedora_podman:c6524344056676352 25 | Docker-py Compat.: 26 | - gcevm 27 | - fedora-c6524344056676352 28 | Ext. services: 29 | - container 30 | - quay.io/libpod/fedora_podman:c6524344056676352 31 | OSX Cross: &blahblah 32 | - osx 33 | - catalina-base 34 | MacOS Cross: *blahblah 35 | Optional Release Test: 36 | - gcevm 37 | - fedora-c6524344056676352 38 | Static Build: 39 | - gcevm 40 | - fedora-c6524344056676352 41 | Test Bindings: 42 | - gcevm 43 | - fedora-c6524344056676352 44 | Test Code Consistency: 45 | - container 46 | - quay.io/libpod/fedora_podman:c6524344056676352 47 | Test Swagger: 48 | - gcevm 49 | - fedora-c6524344056676352 50 | Test build RPM: 51 | - gcevm 52 | - fedora-c6524344056676352 53 | Total Success: 54 | - container 55 | - quay.io/libpod/fedora_podman:c6524344056676352 56 | Unit tests on fedora-33: 57 | - gcevm 58 | - fedora-c6524344056676352 59 | Unit tests on ubuntu-2004: 60 | - gcevm 61 | - prior-ubuntu-c6524344056676352 62 | Unit tests on ubuntu-2010: 63 | - gcevm 64 | - ubuntu-c6524344056676352 65 | 'Upgrade test: from v1.9.0': 66 | - gcevm 67 | - fedora-c6524344056676352 68 | 'Upgrade test: from v2.0.6': 69 | - gcevm 70 | - fedora-c6524344056676352 71 | 'Upgrade test: from v2.1.1': 72 | - gcevm 73 | - fedora-c6524344056676352 74 | VM img. keepalive: 75 | - container 76 | - quay.io/libpod/imgts:c6524344056676352 77 | Validate fedora-33 Build: 78 | - gcevm 79 | - fedora-c6524344056676352 80 | Verify Release: 81 | - gcevm 82 | - fedora-c6524344056676352 83 | Verify Win Installer Build: 84 | - wincntnr 85 | - cirrusci/windowsservercore:2019 86 | Windows Cross: 87 | - gcevm 88 | - fedora-c6524344056676352 89 | compose test on fedora-33: 90 | - gcevm 91 | - fedora-c6524344056676352 92 | int podman fedora-33 root container: 93 | - gcevm 94 | - fedora-c6524344056676352 95 | int podman fedora-33 root host: 96 | - gcevm 97 | - fedora-c6524344056676352 98 | int podman fedora-33 rootless host: 99 | - gcevm 100 | - fedora-c6524344056676352 101 | int podman ubuntu-2004 root host: 102 | - gcevm 103 | - prior-ubuntu-c6524344056676352 104 | int podman ubuntu-2010 root host: 105 | - gcevm 106 | - ubuntu-c6524344056676352 107 | int remote fedora-33 root host: 108 | - gcevm 109 | - fedora-c6524344056676352 110 | int remote ubuntu-2004 root host: 111 | - gcevm 112 | - prior-ubuntu-c6524344056676352 113 | int remote ubuntu-2010 root host: 114 | - gcevm 115 | - ubuntu-c6524344056676352 116 | machine podman fedora-33 rootless host: 117 | - ec2vm 118 | - ami-04f37091c3ec43890 119 | sys podman fedora-33 root host: 120 | - gcevm 121 | - fedora-c6524344056676352 122 | sys podman fedora-33 rootless host: 123 | - gcevm 124 | - fedora-c6524344056676352 125 | sys podman ubuntu-2004 root host: 126 | - gcevm 127 | - prior-ubuntu-c6524344056676352 128 | sys podman ubuntu-2010 root host: 129 | - gcevm 130 | - ubuntu-c6524344056676352 131 | sys remote fedora-33 root host: 132 | - gcevm 133 | - fedora-c6524344056676352 134 | sys remote ubuntu-2004 root host: 135 | - gcevm 136 | - prior-ubuntu-c6524344056676352 137 | sys remote ubuntu-2010 root host: 138 | - gcevm 139 | - ubuntu-c6524344056676352 140 | -------------------------------------------------------------------------------- /cirrus-ci_env/test/run_all_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd $(dirname ${BASH_SOURCE[0]}) 6 | ./test_cirrus-ci_env.py 7 | ./testbin-cirrus-ci_env.sh 8 | ./testbin-cirrus-ci_env-installer.sh 9 | 10 | if [[ "$GITHUB_ACTIONS" == "true" ]]; then 11 | echo "Lint/Style checking not supported under github actions: Skipping" 12 | exit 0 13 | elif [[ -x $(type -P flake8-3) ]]; then 14 | cd .. 15 | flake8-3 --max-line-length=100 . 16 | flake8-3 --max-line-length=100 --extend-ignore=D101,D102 test 17 | else 18 | echo "Can't find flake-8-3 binary, is script executing inside CI container?" 19 | exit 1 20 | fi 21 | -------------------------------------------------------------------------------- /cirrus-ci_env/test/testbin-cirrus-ci_env-installer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Load standardized test harness 4 | SCRIPT_DIRPATH=$(dirname "${BASH_SOURCE[0]}") 5 | source $SCRIPT_DIRPATH/testlib.sh || exit 1 6 | 7 | # Must go through the top-level install script that chains to ../.install.sh 8 | TEST_DIR=$(realpath "$SCRIPT_DIRPATH/../") 9 | INSTALL_SCRIPT=$(realpath "$TEST_DIR/../bin/install_automation.sh") 10 | TEMPDIR=$(mktemp -p "" -d "tmpdir_cirrus-ci_env_XXXXX") 11 | 12 | test_cmd "Verify cirrus-ci_env can be installed under $TEMPDIR" \ 13 | 0 'Installation complete for.+cirrus-ci_env' \ 14 | env INSTALL_PREFIX=$TEMPDIR $INSTALL_SCRIPT 0.0.0 cirrus-ci_env 15 | 16 | test_cmd "Verify executing cirrus-ci_env.py gives 'usage' error message" \ 17 | 2 'cirrus-ci_env.py: error: the following arguments are required:' \ 18 | $TEMPDIR/automation/bin/cirrus-ci_env.py 19 | 20 | trap "rm -rf $TEMPDIR" EXIT 21 | exit_with_status 22 | -------------------------------------------------------------------------------- /cirrus-ci_env/test/testbin-cirrus-ci_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Load standardized test harness 4 | SCRIPT_DIRPATH=$(dirname "${BASH_SOURCE[0]}") 5 | source ${SCRIPT_DIRPATH}/testlib.sh || exit 1 6 | 7 | TEST_DIR=$(realpath "$SCRIPT_DIRPATH/../") 8 | SUBJ_FILEPATH="$TEST_DIR/${SUBJ_FILENAME%.sh}.py" 9 | 10 | test_cmd "Verify no options results in help and an error-exit" \ 11 | 2 "cirrus-ci_env.py: error: the following arguments are required:" \ 12 | $SUBJ_FILEPATH 13 | 14 | test_cmd "Verify missing/invalid filename results in help and an error-exit" \ 15 | 2 "No such file or directory" \ 16 | $SUBJ_FILEPATH /path/to/not/existing/file.yml \ 17 | 18 | test_cmd "Verify missing mode-option results in help message and an error-exit" \ 19 | 2 "error: one of the arguments --list --envs --inst is required" \ 20 | $SUBJ_FILEPATH $SCRIPT_DIRPATH/actual_cirrus.yml 21 | 22 | test_cmd "Verify valid-YAML w/o tasks results in help message and an error-exit" \ 23 | 1 "ERROR: No Cirrus-CI tasks found in" \ 24 | $SUBJ_FILEPATH --list $SCRIPT_DIRPATH/expected_cirrus.yml 25 | 26 | CIRRUS=$SCRIPT_DIRPATH/actual_cirrus.yml 27 | test_cmd "Verify invalid task name results in help message and an error-exit" \ 28 | 1 "ERROR: Unknown task name 'foobarbaz' from" \ 29 | $SUBJ_FILEPATH --env foobarbaz $CIRRUS 30 | 31 | TASK_NAMES=$(<"$SCRIPT_DIRPATH/actual_task_names.txt") 32 | echo "$TASK_NAMES" | while read LINE; do 33 | test_cmd "Verify task '$LINE' appears in task-listing output" \ 34 | 0 "$LINE" \ 35 | $SUBJ_FILEPATH --list $CIRRUS 36 | done 37 | 38 | test_cmd "Verify inherited instance image with env. var. reference is rendered" \ 39 | 0 "container quay.io/libpod/fedora_podman:c6524344056676352" \ 40 | $SUBJ_FILEPATH --inst 'Ext. services' $CIRRUS 41 | 42 | test_cmd "Verify DISTRO_NV env. var renders correctly from test task" \ 43 | 0 'DISTRO_NV="fedora-33"' \ 44 | $SUBJ_FILEPATH --env 'int podman fedora-33 root container' $CIRRUS 45 | 46 | test_cmd "Verify VM_IMAGE_NAME env. var renders correctly from test task" \ 47 | 0 'VM_IMAGE_NAME="fedora-c6524344056676352"' \ 48 | $SUBJ_FILEPATH --env 'int podman fedora-33 root container' $CIRRUS 49 | 50 | exit_with_status 51 | -------------------------------------------------------------------------------- /cirrus-ci_env/test/testlib.sh: -------------------------------------------------------------------------------- 1 | ../../common/test/testlib.sh -------------------------------------------------------------------------------- /cirrus-ci_retrospective/.install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Installs cirrus-ci_retrospective system-wide. NOT intended to be used directly 4 | # by humans, should only be used indirectly by running 5 | # ../bin/install_automation.sh cirrus-ci_retrospective 6 | 7 | set -eo pipefail 8 | 9 | source "$AUTOMATION_LIB_PATH/anchors.sh" 10 | source "$AUTOMATION_LIB_PATH/console_output.sh" 11 | 12 | INSTALL_PREFIX=$(realpath $AUTOMATION_LIB_PATH/../) 13 | # Assume the directory this script is in, represents what is being installed 14 | INSTALL_NAME=$(basename $(dirname ${BASH_SOURCE[0]})) 15 | AUTOMATION_VERSION=$(automation_version) 16 | [[ -n "$AUTOMATION_VERSION" ]] || \ 17 | die "Could not determine version of common automation libs, was 'install_automation.sh' successful?" 18 | 19 | echo "Installing $INSTALL_NAME version $(automation_version) into $INSTALL_PREFIX" 20 | 21 | unset INST_PERM_ARG 22 | if [[ $UID -eq 0 ]]; then 23 | INST_PERM_ARG="-o root -g root" 24 | fi 25 | 26 | cd $(dirname $(realpath "${BASH_SOURCE[0]}")) 27 | install -v $INST_PERM_ARG -D -t "$INSTALL_PREFIX/bin" ./bin/* 28 | install -v $INST_PERM_ARG -D -t "$INSTALL_PREFIX/lib" ./lib/* 29 | 30 | # Needed for installer testing 31 | echo "Successfully installed $INSTALL_NAME" 32 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.fedoraproject.org/fedora-minimal:latest 2 | RUN microdnf update -y && \ 3 | microdnf install -y findutils jq git curl python3 && \ 4 | microdnf clean all && \ 5 | rm -rf /var/cache/dnf 6 | # Assume build is for development/manual testing purposes by default (automation should override with fixed version) 7 | ARG INSTALL_AUTOMATION_VERSION=latest 8 | ARG INSTALL_AUTOMATION_URI=https://github.com/containers/automation/releases/latest/download/install_automation.sh 9 | ADD / /usr/src/automation 10 | RUN if [[ "$INSTALL_AUTOMATION_VERSION" == "0.0.0" ]]; then \ 11 | env INSTALL_PREFIX=/usr/share \ 12 | /usr/src/automation/bin/install_automation.sh 0.0.0 github cirrus-ci_retrospective; \ 13 | else \ 14 | curl --silent --show-error --location \ 15 | --url "$INSTALL_AUTOMATION_URI" | env INSTALL_PREFIX=/usr/share \ 16 | /bin/bash -s - "$INSTALL_AUTOMATION_VERSION" github cirrus-ci_retrospective; \ 17 | fi 18 | # Required environment variables 19 | ENV AUTOMATION_LIB_PATH="" \ 20 | GITHUB_ACTIONS="false" \ 21 | ACTIONS_STEP_DEBUG="false" \ 22 | GITHUB_EVENT_NAME="" \ 23 | GITHUB_EVENT_PATH="" \ 24 | GITHUB_TOKEN="" 25 | # Optional (recommended) environment variables 26 | ENV OUTPUT_JSON_FILE="" 27 | WORKDIR /root 28 | ENTRYPOINT ["/bin/bash", "-c", "source /etc/automation_environment && exec /usr/share/automation/bin/cirrus-ci_retrospective.sh"] 29 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | This directory contains the necessary pieces to produce a container image 4 | for execution in Github Actions workflow. Simply stated, it cross-references 5 | and provides all necessary contextual details to automate followup tasks and 6 | behaviors which are guaranteed to occur after completion of a Cirrus-CI build. 7 | 8 | For example, it can collate and process individual Cirrus-CI Task results and 9 | attachments. Determine if the Cirrus-CI build occurred on a PR, and if so 10 | provide additional feedback to the author. It could also be used to automatically 11 | and securely produce certified/signed OS Package builds following subsequent 12 | to tests passing on a tagged commit. 13 | 14 | # Example Github Action Workflow 15 | 16 | On the 'main' (default) branch of a repository (previously setup and running 17 | tasks in Cirrus-CI), add the following file: 18 | 19 | `.github/workflows/cirrus-ci_retrospective.yml` 20 | ```yaml 21 | 22 | on: 23 | check_suite: # ALWAYS triggered from the default branch 24 | types: 25 | - completed 26 | 27 | jobs: 28 | if: github.event.check_suite.app.name == 'Cirrus CI' 29 | runs-on: ubuntu-latest 30 | steps: 31 | - name: Execute latest upstream cirrus-ci_retrospective 32 | uses: docker://quay.io/libpod/cirrus-ci_retrospective:v1.0.0 33 | env: 34 | GITHUB_TOKEN: ${{ github.token }} 35 | 36 | ...act on contents of ./cirrus-ci_retrospective.json... 37 | ``` 38 | 39 | ## Dependencies: 40 | 41 | In addition to the basic `common` requirements (see [top-level README.md](../README.md)) 42 | the following system packages (or their equivalents) are needed: 43 | 44 | * curl 45 | * jq 46 | * sed 47 | 48 | ## Usage Notes: 49 | 50 | * The trigger, `check_suite` type `completed` is the only event currently supported 51 | by the container. This is not a technical limitation however. 52 | 53 | * There is only ever one `check_suite` created per commit ID of a repository. If 54 | a build is re-run in Cirrus-CI, it will result in re-triggering the workflow. 55 | 56 | * It's possible for multiple runs of the workflow to be executing simultaneously 57 | against the same commit-id. Depending on various timing factors and external 58 | forces. For example, a branch-push and a tag-push. 59 | 60 | * The job _must_ filter on `github.event.check_suite.app.name` to avoid 61 | needlessly executing against other CI-systems Check Suites. 62 | 63 | * Implementations should utilize the version-tagged container images to provide 64 | behavior and output-format stability. 65 | 66 | ## Warning 67 | 68 | Due to security concerns, Github Actions only supports execution vs check_suite events 69 | from workflows already committed on the 'main' branch. This makes it difficult to 70 | test implementations, since they will not execute until merged. 71 | 72 | However, the output JSON does provide all the necessary details to re-create, then possibly 73 | re-execute the changes proposed in a PR. This fact is utilized in _this_ repository to 74 | perform test-executions for PRs. See the workflow file for comments on related details. 75 | 76 | 77 | # Output Decoding 78 | 79 | The output JSON is an `array` of all Cirrus-CI tasks which completed after being triggered by 80 | one of the supported mechanisms (i.e. PR push, branch push, or tag push). At the time 81 | this was written, CRON-based runs in Cirrus-CI do not trigger a `check_suite` in Github. 82 | Otherwise, based on various values in the output JSON, it is possible to objectively 83 | determine the execution context for the build. 84 | 85 | *Note*: The object nesting is backwards from what you may expect. The top-level object 86 | represents an individual `task`, but contains it's `build` object to make parsing 87 | with `jq` easier. In reality, the data model actually represents a single `build`, 88 | containing multiple `tasks`. 89 | 90 | ## After pushing to pull request number 34 91 | 92 | ```json 93 | { 94 | id: "1234567890", 95 | ...cut... 96 | "build": { 97 | "id": "0987654321" 98 | "changeIdInRepo": "679085b3f2b40797fedb60d02066b3cbc592ae4e", 99 | "branch": "pull/34", 100 | "pullRequest": 34, 101 | ...cut... 102 | } 103 | ...cut... 104 | } 105 | ``` 106 | 107 | ## Pull request 34's `trigger_type: manual` task (not yet triggered) 108 | 109 | ```json 110 | { 111 | id: "something", 112 | ...cut... 113 | "status": "PAUSED", 114 | "automaticReRun": false, 115 | "build": { 116 | "id": "otherthing" 117 | "changeIdInRepo": "679085b3f2b40797fedb60d02066b3cbc592ae4e", 118 | "branch": "pull/34", 119 | "pullRequest": 34, 120 | } 121 | ...cut... 122 | } 123 | ``` 124 | 125 | *Important note about manual tasks:* Manually triggering an independent the task 126 | ***will not*** result in a new `check_suite`. Therefore, the cirrus-ci_retrospective 127 | action will not execute again, irrespective of pass, fail or any other manual task status. 128 | Also, if any task in Cirrus-CI is dependent on a manual task, the build itself will not 129 | conclude until the manual task is triggered and completes (pass, fail, or other). 130 | 131 | ## After merging pull request 34 into main branch (merge commit added) 132 | 133 | ```json 134 | { 135 | ...cut... 136 | "build": { 137 | "id": "foobarbaz" 138 | "changeIdInRepo": "232bae5d8ffb6082393e7543e4e53f978152f98a", 139 | "branch": "main", 140 | "pullRequest": null, 141 | ...cut... 142 | } 143 | ...cut... 144 | } 145 | ``` 146 | 147 | ## After pushing the tag `v2.2.0` on former pull request 34's HEAD 148 | 149 | ```json 150 | { 151 | id: "1234567890", 152 | ...cut... 153 | "build": { 154 | ...cut... 155 | "changeIdInRepo": "679085b3f2b40797fedb60d02066b3cbc592ae4e", 156 | "branch": "v2.2.0", 157 | "pullRequest": null, 158 | ...cut... 159 | } 160 | ...cut... 161 | } 162 | ``` 163 | 164 | ## Recommended `jq` filters for `cirrus-ci_retrospective.json` 165 | 166 | Given a "conclusion" task name in Cirrus-CI (e.g. `cirrus-ci/test_success`): 167 | 168 | * Obtain the pull number (set `null` if Cirrus-CI ran against a branch or tag) 169 | `'.[] | select(.name == "cirrus-ci/test_success") | .build.pullRequest'` 170 | 171 | * Obtain the HEAD commit ID used by Cirrus-CI for the build (always available) 172 | `'.[] | select(.name == "cirrus-ci/test_success") | .build.changeIdInRepo'` 173 | 174 | * ...todo: add more 175 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/bin/cirrus-ci_retrospective.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | # Execute inside a github action, using a completed check_suite event's JSON file 6 | # as input. Queries details about the concluded Cirrus-CI build, tasks, artifacts, 7 | # execution environment, and associated repository state. 8 | 9 | source $(dirname "${BASH_SOURCE[0]}")/../lib/$(basename "${BASH_SOURCE[0]}") 10 | 11 | if ((DEBUG)); then 12 | dbg "# Warning: Debug mode enabled: NOT cleaning up '$TMPDIR' upon exit." 13 | else 14 | trap "rm -rf $TMPDIR" EXIT 15 | fi 16 | 17 | verify_env_vars 18 | 19 | INTERMEDIATE_OUTPUT_EXT=".json_item" 20 | OUTPUT_JSON_FILE="${OUTPUT_JSON_FILE:-$GITHUB_WORKSPACE/${SCRIPT_FILENAME%.sh}.json}" 21 | 22 | # Confirm expected triggering event and type 23 | jq --exit-status 'has("check_suite")' < "$GITHUB_EVENT_PATH" || \ 24 | die "Expecting to find a top-level 'check_suite' key in event JSON $GITHUB_EVENT_PATH" 25 | 26 | _act_typ=$(jq --compact-output --raw-output '.action' < "$GITHUB_EVENT_PATH") 27 | [[ "$_act_typ" == "completed" ]] || \ 28 | die "Expecting github action 'check_suite' event to be type 'completed', got '$_act_typ'" 29 | 30 | _filt='.check_suite.app.id' 31 | cirrus_app_id=$(jq --compact-output --raw-output "$_filt" < "$GITHUB_EVENT_PATH") 32 | dbg "# Working with Github Application ID: '$cirrus_app_id'" 33 | [[ -n "$cirrus_app_id" ]] || \ 34 | die "Expecting non-empty value from jq filter $_filt in $GITHUB_EVENT_PATH" 35 | [[ "$cirrus_app_id" -gt 0 ]] || \ 36 | die "Expecting jq filter $_filt value to be integer greater than 0, got '$cirrus_app_id'" 37 | 38 | # Guaranteed shortcut by Github API straight to actual check_suite node 39 | _filt='.check_suite.node_id' 40 | cs_node_id=$(jq --compact-output --raw-output "$_filt" < "$GITHUB_EVENT_PATH") 41 | dbg "# Working with github global node id '$cs_node_id'" 42 | [[ -n "$cs_node_id" ]] || \ 43 | die "Expecting the jq filter $_filt to be non-empty value in $GITHUB_EVENT_PATH" 44 | 45 | # Validate node is really the type expected - global node ID's can point anywhere 46 | dbg "# Checking type of object at '$cs_node_id'" 47 | # Only verification test important, discard actual output 48 | _=$(url_query_filter_test \ 49 | "$GHQL_URL" \ 50 | "{ 51 | node(id: \"$cs_node_id\") { 52 | __typename 53 | } 54 | }" \ 55 | '.data.node.__typename' \ 56 | '@@@@ = "CheckSuite"' 57 | ) 58 | 59 | # This count is needed to satisfy 'first' being a required parameter in subsequent query 60 | dbg "# Obtaining total number of check_runs present on confirmed CheckSuite object" 61 | cr_count=$(url_query_filter_test \ 62 | "$GHQL_URL" \ 63 | "{ 64 | node(id: \"$cs_node_id\") { 65 | ... on CheckSuite { 66 | checkRuns { 67 | totalCount 68 | } 69 | } 70 | } 71 | }" \ 72 | '.data.node.checkRuns.totalCount' \ 73 | '@@@@ -gt 0' \ 74 | ) 75 | 76 | # 'externalId' is the database key needed to query Cirrus-CI GraphQL API 77 | dbg "# Obtaining task names and id's for up to '$cr_count' check_runs max." 78 | task_ids=$(url_query_filter_test \ 79 | "$GHQL_URL" \ 80 | "{ 81 | node(id: \"$cs_node_id\") { 82 | ... on CheckSuite { 83 | checkRuns(first: $cr_count, filterBy: {appId: $cirrus_app_id}) { 84 | nodes { 85 | externalId 86 | name 87 | } 88 | } 89 | } 90 | } 91 | }" \ 92 | '.data.node.checkRuns.nodes[] | .name + ";" + .externalId + ","' \ 93 | '-n @@@@') 94 | 95 | dbg "# Clearing any unintended intermediate json files" 96 | # Warning: Using a side-effect here out of pure laziness 97 | dbg "## $(rm -fv $TMPDIR/*.$INTERMEDIATE_OUTPUT_EXT)" 98 | 99 | dbg "# Processing task names and ids" 100 | unset GITHUB_TOKEN # not needed/used for cirrus-ci query 101 | echo "$task_ids" | tr -d '",' | while IFS=';' read task_name task_id 102 | do 103 | dbg "# Cross-referencing task '$task_name' ID '$task_id' in Cirrus-CI's API:" 104 | [[ -n "$task_id" ]] || \ 105 | die "Expecting non-empty id for task '$task_name'" 106 | [[ -n "$task_name" ]] || \ 107 | die "Expecting non-empty name for task id '$task_id'" 108 | 109 | # To be slurped up into an array of json maps as a final step 110 | output_json=$(tmpfile .$INTERMEDIATE_OUTPUT_EXT) 111 | dbg "# Writing task details into '$output_json' temporarily" 112 | url_query_filter_test \ 113 | "$CCI_URL" \ 114 | "{ 115 | task(id: $task_id) { 116 | id 117 | name 118 | status 119 | automaticReRun 120 | build {id changeIdInRepo branch pullRequest status repository { 121 | owner name cloneUrl masterBranch 122 | } 123 | } 124 | artifacts {name files{path}} 125 | } 126 | }" \ 127 | '.' \ 128 | '-n @@@@' | jq --indent 4 '.data.task' > "$output_json" 129 | done 130 | 131 | dbg "# Combining all task data into JSON list as action output and into $OUTPUT_JSON_FILE" 132 | set_out_var json \ 133 | $(jq --indent 4 --slurp '.' $TMPDIR/.*$INTERMEDIATE_OUTPUT_EXT | \ 134 | tee "$OUTPUT_JSON_FILE" | jq --compact-output '.') 135 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/bin/debug.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script assists with debugging cirrus-ci_retrospective.sh 4 | # manually/locally or remotely in a github action. 5 | # 6 | # Usage: You need to export a valid $GITHUB_EVENT_PATH and $GITHUB_TOKEN value. 7 | # The referenced event file, should be the JSON file from a "check_suite" event 8 | # from a github action with a status of "completed". There should be a "debug" 9 | # github action active on this repo. to produce this as an artifact. Check 10 | # YAML files under $REPO_ROOT/.github/workflows/ for github action configs. 11 | # 12 | # The $GITHUB_TOKEN value may be a personal access token obtained from github 13 | # under settings -> developer settings -> personal access token. 14 | # When creating a new token, you need to enable the following scopes: 15 | # 16 | # read:discussion, read:enterprise, read:gpg_key, read:org, read:public_key, 17 | # read:repo_hook, read:user, repo:status, repo_deployment, user:email 18 | 19 | set -eo pipefail 20 | 21 | # Do not use magic default values set by common.sh 22 | DEBUG_MSG_PREFIX="DEBUG:" 23 | WARNING_MSG_PREFIX="WARNING:" 24 | ERROR_MSG_PREFIX="ERROR:" 25 | source "$(dirname $0)/../lib/common.sh" 26 | DEBUG_SUBJECT_FILEPATH="$SCRIPT_PATH/cirrus-ci_retrospective.sh" 27 | 28 | [[ -x "$DEBUG_SUBJECT_FILEPATH" ]] || \ 29 | die "Expecting to find $DEBUG_SUBJECT_FILEPATH executable" 30 | 31 | # usage: [extra read args] 32 | set_required_envvar() { 33 | local envvar=$1 34 | local xtra=$2 35 | 36 | if [[ -z "${!envvar}" ]]; then 37 | MSG="A non-empty value for \$$envvar is required." 38 | warn "$MSG" 39 | # Use timeout in case of executing under unsupervised automation 40 | read -p "Please enter the value to use, within 30-seconds: " -t 30 $xtra $envvar 41 | [[ -n "${!envvar}" ]] || \ 42 | die "$MSG" 43 | fi 44 | } 45 | 46 | set_required_envvar GITHUB_TOKEN -s 47 | set_required_envvar GITHUB_EVENT_PATH 48 | 49 | _MSGPFX="Expecting $GITHUB_EVENT_PATH to contain a 'check_suite'" 50 | if ! jq .check_suite < "$GITHUB_EVENT_PATH" | head -1; then 51 | die "$_MSGPFX map." 52 | fi 53 | 54 | export GITHUB_TOKEN 55 | 56 | [[ $(jq --raw-output .check_suite.app.name < "$GITHUB_EVENT_PATH" | head -1) == "Cirrus CI" ]] || \ 57 | die "$_MSGPFX from Cirrus-CI" 58 | unset _MSGPFX 59 | 60 | export GITHUB_EVENT_PATH 61 | export GITHUB_EVENT_NAME=check_suite # Validated above 62 | export GITHUB_ACTIONS=true # Mock value from Github Actions 63 | export GITHUB_WORKSPACE="${GITHUB_WORKSPACE:-$SCRIPT_PATH/../../}" 64 | export DEBUG=1 # The purpose of this script 65 | 66 | $DEBUG_SUBJECT_FILEPATH 67 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/lib/ccir_common.sh: -------------------------------------------------------------------------------- 1 | 2 | # This library simply sources the necessary common libraries. 3 | # Not intended for direct execution 4 | AUTOMATION_LIB_PATH="${AUTOMATION_LIB_PATH:-$(dirname $(realpath ${BASH_SOURCE[0]}))/../../common/lib}" 5 | GITHUB_ACTION_LIB="${GITHUB_ACTION_LIB:-$AUTOMATION_LIB_PATH/github_common.sh}" 6 | # Allow in-place use w/o installing, e.g. for testing 7 | [[ -r "$GITHUB_ACTION_LIB" ]] || \ 8 | GITHUB_ACTION_LIB="$AUTOMATION_LIB_PATH/../../github/lib/github_common.sh" 9 | # Also loads common lib 10 | source "$GITHUB_ACTION_LIB" 11 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/lib/cirrus-ci_retrospective.sh: -------------------------------------------------------------------------------- 1 | 2 | # Library of constants and functions for the cirrus-ci_retrospective script 3 | # Not intended to be executed directly. 4 | 5 | source $(dirname "${BASH_SOURCE[0]}")/ccir_common.sh 6 | 7 | # GH GraphQL General Reference: https://developer.github.com/v4/object/ 8 | # GH CheckSuite Object Reference: https://developer.github.com/v4/object/checksuite 9 | GHQL_URL="https://api.github.com/graphql" 10 | # Cirrus-CI GrqphQL Reference: https://cirrus-ci.org/api/ 11 | CCI_URL="https://api.cirrus-ci.com/graphql" 12 | TMPDIR=$(mktemp -p '' -d "$MKTEMP_FORMAT") 13 | # Support easier unit-testing 14 | CURL=${CURL:-$(type -P curl)} 15 | 16 | # Using python3 here is a compromise for readability and 17 | # properly handling quote, control and unicode character encoding. 18 | json_escape() { 19 | local json_string 20 | # Assume it's okay to squash repeated whitespaces inside the query 21 | json_string=$(printf '%s' "$1" | \ 22 | tr --delete '\r\n' | \ 23 | tr --squeeze-repeats '[[:space:]]' | \ 24 | python3 -c 'import sys,json; print(json.dumps(sys.stdin.read()))') 25 | # The $json_string in message is already quoted 26 | dbg "##### Escaped JSON string: $json_string" 27 | echo -n "$json_string" 28 | } 29 | 30 | # Given a GraphQL Query JSON, encode it as a GraphQL query string 31 | encode_query() { 32 | dbg "#### Encoding GraphQL Query into JSON string" 33 | [[ -n "$1" ]] || \ 34 | die "Expecting JSON string as first argument to ${FUNCNAME[0]}()" 35 | local json 36 | local quoted 37 | # Embed GraphQL as escaped string into JSON 38 | # Using printf's escaping works well 39 | quoted=$(json_escape "$1") 40 | json=$(jq --compact-output . <<<"{\"query\": $quoted}") 41 | dbg "#### Query JSON: $json" 42 | echo -n "$json" 43 | } 44 | 45 | # Get a temporary file named with the calling-function's name 46 | # Optionally, if the first argument is non-empty, use it as the file extension 47 | tmpfile() { 48 | [[ -n "${FUNCNAME[1]}" ]] || \ 49 | die "tmpfile() function expects to be called by another function." 50 | [[ -z "$1" ]] || \ 51 | local ext=".$1" 52 | mktemp -p "$TMPDIR" "$MKTEMP_FORMAT${ext}" 53 | } 54 | 55 | # Given a URL Data and optionally a token, validate then print formatted JSON string 56 | curl_post() { 57 | local url="$1" 58 | local data="$2" 59 | local token=$GITHUB_TOKEN 60 | local auth="" 61 | [[ -n "$url" ]] || \ 62 | die "Expecting non-empty url argument" 63 | [[ -n "$data" ]] || \ 64 | die "Expecting non-empty data argument" 65 | 66 | [[ -n "$token" ]] || \ 67 | dbg "### Warning: \$GITHUB_TOKEN is empty, performing unauthenticated query" >> /dev/stderr 68 | # Don't expose secrets on any command-line 69 | local headers_tmpf 70 | local headers_tmpf=$(tmpfile headers) 71 | cat << EOF > "$headers_tmpf" 72 | accept: application/vnd.github.antiope-preview+json 73 | content-type: application/json 74 | ${token:+authorization: Bearer $token} 75 | EOF 76 | 77 | # Avoid needing to pass large strings on the command-line 78 | local data_tmpf=$(tmpfile data) 79 | echo "$data" > "$data_tmpf" 80 | 81 | local curl_cmd="$CURL --silent --request POST --url $url --header @$headers_tmpf --data @$data_tmpf" 82 | dbg "### Executing '$curl_cmd'" 83 | local ret="0" 84 | $curl_cmd >> /dev/stdout || ret=$? 85 | 86 | # Don't leave secrets lying around in files 87 | rm -f "$headers_tmpf" "$data_tmpf" &> /dev/null 88 | dbg "### curl exit code '$ret'" 89 | return $ret 90 | } 91 | 92 | # Apply filter to json file while making any errors easy to debug 93 | filter_json() { 94 | local filter="$1" 95 | local json_file="$2" 96 | [[ -n "$filter" ]] || die "Expected non-empty jq filter string" 97 | [[ -r "$json_file" ]] || die "Expected readable JSON file" 98 | 99 | dbg "### Validating JSON in '$json_file'" 100 | # Confirm input json is valid and make filter problems easier to debug (below) 101 | local tmp_json_file=$(tmpfile json) 102 | if ! jq -e . < "$json_file" > "$tmp_json_file"; then 103 | rm -f "$tmp_json_file" 104 | # JQ has already shown an error message 105 | die "Error from jq relating to JSON: $(cat $json_file)" 106 | else 107 | dbg "### JSON found to be valid" 108 | # Allow re-using temporary file 109 | cp "$tmp_json_file" "$json_file" 110 | fi 111 | 112 | dbg "### Applying filter '$filter'" 113 | if ! jq --indent 4 "$filter" < "$json_file" > "$tmp_json_file"; then 114 | # JQ has already shown an error message 115 | rm -f "$tmp_json_file" 116 | die "Error from jq relating to JSON: $(cat $json_file)" 117 | fi 118 | 119 | dbg "### Filter applied cleanly" 120 | cp "$tmp_json_file" "$json_file" 121 | } 122 | 123 | # Name suggests parameter order and purpose 124 | # N/B: Any @@@@ appearing in test_args will be substituted with the quoted simple/raw JSON value. 125 | url_query_filter_test() { 126 | local url="$1" 127 | local query_json="$2" 128 | local filter="$3" 129 | shift 3 130 | local test_args 131 | test_args="$@" 132 | [[ -n "$url" ]] || \ 133 | die "Expecting non-empty url argument" 134 | [[ -n "$filter" ]] || \ 135 | die "Expecting non-empty filter argument" 136 | [[ -n "$query_json" ]] || \ 137 | die "Expecting non-empty query_json argument" 138 | 139 | dbg "## Submitting GraphQL Query, filtering and verifying the result" 140 | local encoded_query=$(encode_query "$query_json") 141 | local ret 142 | local curl_outputf=$(tmpfile json) 143 | 144 | ret=0 145 | curl_post "$url" "$encoded_query" > "$curl_outputf" || ret=$? 146 | dbg "## Curl output file: $curl_outputf)" 147 | [[ "$ret" -eq "0" ]] || \ 148 | die "Curl command exited with non-zero code: $ret" 149 | 150 | # Validates both JSON and filter, updates $curl_outputf 151 | filter_json "$filter" "$curl_outputf" 152 | if [[ -n "$test_args" ]]; then 153 | # The test command can only process simple, single-line strings 154 | local simplified=$(jq --compact-output --raw-output . < "$curl_outputf" | tr -d '[:space:]') 155 | # json_escape will properly quote and escape the value for safety 156 | local _test_args=$(sed -r -e "s~@@@@~$(json_escape $simplified)~" <<<"test $test_args") 157 | # Catch error coming from sed, e.g. if '~' happens to be in $simplified 158 | [[ -n "$_test_args" ]] || \ 159 | die "Substituting @@@@ in '$test_args'" 160 | dbg "## $_test_args" 161 | ( eval "$_test_args" ) || \ 162 | die "Test '$test_args' failed on whitespace-squashed & simplified JSON '$simplified'" 163 | fi 164 | cat "$curl_outputf" 165 | } 166 | 167 | verify_env_vars() { 168 | # N/B: The word 'Expecting' is checked for by unit-testing 169 | [[ "$GITHUB_ACTIONS" == "true" ]] || \ 170 | die "Expecting \$GITHUB_ACTIONS to be 'true', got '$GITHUB_ACTIONS'" 2 171 | 172 | [[ "$GITHUB_EVENT_NAME" = "check_suite" ]] || \ 173 | die "Expecting \$GITHUB_EVENT_NAME to be 'check_suite', got '$GITHUB_EVENT_NAME'" 2 174 | 175 | [[ -r "$GITHUB_EVENT_PATH" ]] || \ 176 | die "Expecting readable \$GITHUB_EVENT_PATH file, got '$GITHUB_EVENT_PATH'" 2 177 | 178 | [[ -n "$GITHUB_TOKEN" ]] || \ 179 | die "Expecting non-empty \$GITHUB_TOKEN, got '$GITHUB_TOKEN'" 2 180 | 181 | [[ -d "$GITHUB_WORKSPACE" ]] || \ 182 | die "Expecting \$GITHUB_WORKSPACE directory, got '$GITHUB_WORKSPACE'" 2 183 | } 184 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/test/run_all_tests.sh: -------------------------------------------------------------------------------- 1 | ../../common/test/run_all_tests.sh -------------------------------------------------------------------------------- /cirrus-ci_retrospective/test/testbin-cirrus-ci_retrospective-installer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Load standardized test harness 4 | source $(dirname "${BASH_SOURCE[0]}")/testlib.sh || exit 1 5 | 6 | # Must go through the top-level install script that chains to ../.install.sh 7 | INSTALL_SCRIPT=$(realpath "$TEST_DIR/../../bin/install_automation.sh") 8 | TEMPDIR=$(mktemp -p "" -d "tmpdir_cirrus-ci_retrospective_XXXXX") 9 | 10 | test_cmd "Verify cirrus-ci_retrospective can be installed under $TEMPDIR" \ 11 | 0 'Installation complete for.+installed cirrus-ci_retrospective' \ 12 | env INSTALL_PREFIX=$TEMPDIR $INSTALL_SCRIPT 0.0.0 github cirrus-ci_retrospective 13 | 14 | test_cmd "Verify executing cirrus-ci_retrospective.sh gives 'Expecting' error message" \ 15 | 2 '::error.+Expecting' \ 16 | env AUTOMATION_LIB_PATH=$TEMPDIR/automation/lib $TEMPDIR/automation/bin/cirrus-ci_retrospective.sh 17 | 18 | trap "rm -rf $TEMPDIR" EXIT 19 | exit_with_status 20 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/test/testbin-cirrus-ci_retrospective.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Load standardized test harness 4 | source $(dirname "${BASH_SOURCE[0]}")/testlib.sh || exit 1 5 | 6 | # Would otherwise get in the way of checking output & removing $TMPDIR 7 | DEBUG=0 8 | SUBJ_FILEPATH="$TEST_DIR/$SUBJ_FILENAME" 9 | 10 | ##### MAIN() ##### 11 | 12 | # ref: https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables#default-environment-variables 13 | req_env_vars=(GITHUB_ACTIONS GITHUB_EVENT_NAME GITHUB_EVENT_PATH GITHUB_TOKEN GITHUB_WORKSPACE) 14 | # Script may actually be running under Github Actions 15 | declare -A original_values 16 | original_values[GITHUB_ACTIONS]="$GITHUB_ACTIONS" 17 | original_values[GITHUB_EVENT_NAME]="$GITHUB_EVENT_NAME" 18 | original_values[GITHUB_EVENT_PATH]="$GITHUB_EVENT_PATH" 19 | original_values[GITHUB_TOKEN]="$GITHUB_TOKEN" 20 | original_values[GITHUB_WORKSPACE]="$GITHUB_WORKSPACE" 21 | 22 | declare -A valid_values 23 | valid_values[GITHUB_ACTIONS]="true" 24 | valid_values[GITHUB_EVENT_NAME]="check_suite" 25 | valid_values[GITHUB_EVENT_PATH]="/etc/passwd-" 26 | valid_values[GITHUB_TOKEN]="$RANDOM" 27 | valid_values[GITHUB_WORKSPACE]="$HOME" 28 | 29 | declare -A invalid_values 30 | invalid_values[GITHUB_ACTIONS]="false" 31 | invalid_values[GITHUB_EVENT_NAME]="$RANDOM" 32 | invalid_values[GITHUB_EVENT_PATH]="$RANDOM" 33 | invalid_values[GITHUB_TOKEN]="" 34 | invalid_values[GITHUB_WORKSPACE]="/etc/passwd-" 35 | 36 | # Set all to known, valid, dummy values 37 | for required_var in $req_env_vars; do 38 | export $required_var="${valid_values[$required_var]}" 39 | done 40 | 41 | # Don't depend on the order these are checked in the subject 42 | for required_var in ${req_env_vars[@]}; do 43 | valid_value="${valid_values[$required_var]}" 44 | invalid_value="${invalid_values[$required_var]}" 45 | export $required_var="$invalid_value" 46 | test_cmd \ 47 | "Verify exeuction w/ \$$required_var='$invalid_value' (instead of '$valid_value') fails with helpful error message." \ 48 | 2 "::error.+\\\$$required_var.+'$invalid_value'" \ 49 | $SUBJ_FILEPATH 50 | export $required_var="$valid_value" 51 | done 52 | 53 | # Setup to feed test Github Action event JSON 54 | TESTTEMPDIR=$(mktemp -p '' -d tmp_${SUBJ_FILENAME}_XXXXXXXX) 55 | trap "rm -rf $TESTTEMPDIR" EXIT 56 | MOCK_EVENT_JSON_FILEPATH=$(mktemp -p "$TESTTEMPDIR" mock_event_XXXXXXXX.json) 57 | cat << EOF > "$MOCK_EVENT_JSON_FILEPATH" 58 | {} 59 | EOF 60 | 61 | export GITHUB_EVENT_PATH=$MOCK_EVENT_JSON_FILEPATH 62 | 63 | test_cmd "Verify expected error when fed empty mock event JSON file" \ 64 | 1 "::error.+check_suite.+key" \ 65 | $SUBJ_FILEPATH 66 | 67 | cat << EOF > "$MOCK_EVENT_JSON_FILEPATH" 68 | {"check_suite":{}} 69 | EOF 70 | test_cmd "Verify expected error when fed invalid check_suite value in mock event JSON file" \ 71 | 1 "::error.+check_suite.+type.+null" \ 72 | $SUBJ_FILEPATH 73 | 74 | cat << EOF > "$MOCK_EVENT_JSON_FILEPATH" 75 | {"check_suite": {}, "action": "foobar"} 76 | EOF 77 | test_cmd "Verify error and message containing incorrect value from mock event JSON file" \ 78 | 1 "::error.+check_suite.+foobar" \ 79 | $SUBJ_FILEPATH 80 | 81 | cat << EOF > "$MOCK_EVENT_JSON_FILEPATH" 82 | {"check_suite": {"app":false}, "action": "completed"} 83 | EOF 84 | test_cmd "Verify expected error when check_suite's 'app' map is wrong type in mock event JSON file" \ 85 | 5 "jq: error.+boolean.+id" \ 86 | $SUBJ_FILEPATH 87 | 88 | cat << EOF > "$MOCK_EVENT_JSON_FILEPATH" 89 | {"check_suite": {"app":{"id":null}}, "action": "completed"} 90 | EOF 91 | test_cmd "Verify expected error when 'app' id is wrong type in mock event JSON file" \ 92 | 1 "::error.+integer.+null" \ 93 | $SUBJ_FILEPATH 94 | 95 | # Must always happen last 96 | for required_var in $req_env_vars; do 97 | export $required_var="${original_values[$required_var]}" 98 | done 99 | exit_with_status 100 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/test/testlib-cirrus-ci_retrospective.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Load standardized test harness 4 | source $(dirname "${BASH_SOURCE[0]}")/testlib.sh || exit 1 5 | 6 | # Would otherwise get in the way of checking output & removing $TMPDIR 7 | DEBUG=0 8 | source "$TEST_DIR/$SUBJ_FILENAME" 9 | 10 | _TMPDIR="$TMPDIR" # some testing requires examining all $TMPDIR contents 11 | if [[ -d "$_TMPDIR" ]]; then 12 | trap "rm -rf $_TMPDIR" EXIT # The REAL directory to remove 13 | fi 14 | 15 | # There are many paths to die(), some specific paths need to be tested 16 | SPECIAL_DEATH_CODE=101 17 | rename_function die _die 18 | die() { 19 | echo "Caught call to die() from ${FUNCNAME[1]} with message: ${1:-NOMSG}" > /dev/stderr 20 | exit $SPECIAL_DEATH_CODE 21 | } 22 | 23 | mock_env_vars() { 24 | export GITHUB_ACTIONS=true 25 | export GITHUB_EVENT_NAME=check_suite 26 | export GITHUB_EVENT_PATH=$(tmpfile) 27 | export GITHUB_TOKEN=$RANDOM$RANDOM$RANDOM 28 | export GITHUB_WORKSPACE=$_TMPDIR 29 | } 30 | 31 | mock_curl_zero_exit() { 32 | local real_curl=$(type -P curl) 33 | if [[ "$CURL" != "$real_curl" ]]; then 34 | sed -r -i -e "s/exit .+/exit 0/g" "$CURL" 35 | else 36 | echo "Cowardly refusing to modify" > /dev/stderr 37 | exit 103 38 | fi 39 | } 40 | 41 | ##### MAIN() ##### 42 | 43 | test_cmd \ 44 | "Call verify_env_vars() w/o required values should exit with an error code and message." \ 45 | $SPECIAL_DEATH_CODE \ 46 | 'Caught call to die.+verify_env_vars' \ 47 | verify_env_vars 48 | 49 | mock_env_vars 50 | 51 | test_cmd \ 52 | "Call encode_query with an empty argument should exit with an error code and message." \ 53 | $SPECIAL_DEATH_CODE \ 54 | 'Caught call to die.+encode_query.+JSON' \ 55 | encode_query 56 | 57 | COMPLEX="{\"foo 58 | \t\"\t\t bar{\r '" 59 | for test_f in json_escape encode_query; do 60 | test_cmd \ 61 | "Call to $test_f properly handles complex string containing control-characters and embedded quotes" \ 62 | 0 \ 63 | ".*foo.*bar.*" \ 64 | $test_f "$COMPLEX" 65 | done 66 | 67 | # e.g. output 68 | # { 69 | # "query": "[] " 70 | # } 71 | test_cmd \ 72 | "Call to encode_query '[]' is formatted in the expected way" \ 73 | 0 \ 74 | '\{"query":"\[\]"\}' \ 75 | encode_query '[]' 76 | 77 | TEST_EXTENSION=foobarbaz 78 | test_cmd \ 79 | "Verify no tmpfile with testing extension '$TEST_EXTENSION' is present before the next test" \ 80 | 0 \ 81 | "" \ 82 | find "$TMPDIR" -name "*.$TEST_EXTENSION" 83 | 84 | test_cmd \ 85 | "Calling tmpfile with an argument, uses it as the file extension" \ 86 | 0 \ 87 | "$TMPDIR.+\.$TEST_EXTENSION" \ 88 | tmpfile "$TEST_EXTENSION" 89 | 90 | TEST_JSON='[{"1":2},{"3":4}]' 91 | TEST_JSON_FILE=$(mktemp -p "$_TMPDIR" TEST_JSON_XXXXXXXX) 92 | echo "$TEST_JSON" > "$TEST_JSON_FILE" 93 | test_cmd \ 94 | "Verify filter_json with invalid filter mentions jq in error" \ 95 | $SPECIAL_DEATH_CODE \ 96 | 'Caught.+filter_json.+jq' \ 97 | filter_json "!" "$TEST_JSON_FILE" 98 | 99 | TEST_FILT='.[1]["3"]' 100 | test_cmd \ 101 | "Verify filter_json '$TEST_FILT' '$TEST_JSON_FILE' has no output" \ 102 | 0 \ 103 | "" \ 104 | filter_json "$TEST_FILT" "$TEST_JSON_FILE" 105 | 106 | test_cmd \ 107 | "Verify final copy of '$TEST_JSON_FILE' has expected contents" \ 108 | 0 \ 109 | '^4 $' \ 110 | cat "$TEST_JSON_FILE" 111 | 112 | # Makes checking temp-files written by curl_post() easier 113 | TMPDIR=$(mktemp -d -p "$_TMPDIR" "tmpdir_curl_XXXXX") 114 | # Set up a mock for argument checking 115 | _CURL="$CURL" 116 | _CURL_EXIT=42 117 | CURL="$_TMPDIR/mock_curl.sh" # used by curl_post 118 | cat << EOF > $CURL 119 | #!/bin/bash 120 | echo "curl \$*" 121 | exit $_CURL_EXIT 122 | EOF 123 | chmod +x "$CURL" 124 | 125 | test_cmd \ 126 | "Verify curl_post() does not pass secrets on the command-line" \ 127 | $_CURL_EXIT \ 128 | "^curl.+((?!${GITHUB_TOKEN}).)*$" \ 129 | curl_post foo bar 130 | 131 | mock_curl_zero_exit 132 | 133 | test_cmd \ 134 | "Verify curl_post without an mock error, does not pass data on command-line" \ 135 | 0 \ 136 | '^curl.+((?!snafu).)*$' \ 137 | curl_post foobar snafu 138 | 139 | QUERY="foobar" 140 | OUTPUT_JSON='[null,0,1,2,3,4]' 141 | cat << EOF > $CURL 142 | #!/bin/bash 143 | set -e 144 | cat << NESTEDEOF 145 | $OUTPUT_JSON 146 | NESTEDEOF 147 | exit $_CURL_EXIT 148 | EOF 149 | 150 | TEST_URL="the://url" 151 | test_cmd \ 152 | "Verify url_query_filter_test reports errors coming from curl command" \ 153 | $SPECIAL_DEATH_CODE \ 154 | "Caught.+url_query_filter_test.+$_CURL_EXIT" \ 155 | url_query_filter_test "$TEST_URL" "$QUERY" "." 156 | 157 | mock_curl_zero_exit 158 | 159 | test_cmd \ 160 | "Verify url_query_filter_test works normally with simple JSON and test" \ 161 | 0 \ 162 | "^4 $" \ 163 | url_query_filter_test "$TEST_URL" "$QUERY" ".[-1]" "@@@@ -eq 4" 164 | 165 | test_cmd \ 166 | "Verify url_query_filter_test works with single-operand test" \ 167 | 0 \ 168 | "^null $" \ 169 | url_query_filter_test "$TEST_URL" "$QUERY" ".[0]" "-n @@@@" 170 | 171 | test_cmd \ 172 | "Verify url_query_filter_test works without any test" \ 173 | 0 \ 174 | "^0 $" \ 175 | url_query_filter_test "$TEST_URL" "$QUERY" ".[1]" 176 | 177 | test_cmd \ 178 | "Verify no calls left secrets in \$TMPDIR" \ 179 | 1 \ 180 | '' \ 181 | grep -qr "$GITHUB_TOKEN" "$TMPDIR" 182 | 183 | # Put everything back the way it was for posterity 184 | TMPDIR="$_TMPDIR" 185 | CURL="$_CURL" 186 | 187 | exit_with_status 188 | -------------------------------------------------------------------------------- /cirrus-ci_retrospective/test/testlib.sh: -------------------------------------------------------------------------------- 1 | ../../common/test/testlib.sh -------------------------------------------------------------------------------- /cirrus-task-map/test/run_all_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | testdir=$(dirname $0) 6 | 7 | for i in $testdir/*.t;do 8 | echo -e "\nExecuting $testdir/$i..." >&2 9 | $i 10 | done 11 | -------------------------------------------------------------------------------- /common/README.md: -------------------------------------------------------------------------------- 1 | # Common / general purpose scripts, libraries, and tests 2 | 3 | These should not be used directly, though they should function. It is intended for 4 | implementers to first install these scripts while setting up/configuring their 5 | testing environments. See the `bin/install_automation.sh` script in the repository root 6 | for details. 7 | -------------------------------------------------------------------------------- /common/bin/ooe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script executes a command while logging all output to a temporary 4 | # file. If the command exits non-zero, then all output is sent to the console, 5 | # before returning the exit code. If the script itself fails, the exit code 121 6 | # is returned. 7 | 8 | set -eo pipefail 9 | 10 | SCRIPT_BASEDIR="$(basename $0)" 11 | 12 | badusage() { 13 | echo "Incorrect usage: $SCRIPT_BASEDIR) [options]" >> /dev/stderr 14 | echo "ERROR: $1" 15 | exit 121 16 | } 17 | 18 | COMMAND="$@" 19 | [[ -n "$COMMAND" ]] || badusage "No command specified" 20 | 21 | OUTPUT_TMPFILE="$(mktemp -p '' ${SCRIPT_BASEDIR}_output_XXXX)" 22 | output_on_error() { 23 | RET=$? 24 | set +e 25 | if [[ "$RET" -ne "0" ]] 26 | then 27 | echo "---------------------------" 28 | cat "$OUTPUT_TMPFILE" 29 | echo "[$(date --iso-8601=second)] $COMMAND" 30 | fi 31 | rm -f "$OUTPUT_TMPFILE" 32 | } 33 | trap "output_on_error" EXIT 34 | 35 | "$@" 2>&1 | while IFS='' read LINE # Preserve leading/trailing whitespace 36 | do 37 | # Every stdout and (copied) stderr line 38 | echo "[$(date --iso-8601=second)] $LINE" 39 | done >> "$OUTPUT_TMPFILE" 40 | -------------------------------------------------------------------------------- /common/bin/xrtry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | # This scripts is intended to wrap commands which occasionally fail due 6 | # to external factors like networking hiccups, service failover, load-balancing, 7 | # etc. It is not designed to handle operational failures gracefully, such as 8 | # bad (wrapped) command-line arguments, running out of local disk-space, 9 | # authZ/authN, etc. 10 | 11 | # Assume script was installed or is running in dir struct. matching repo layout. 12 | AUTOMATION_LIB_PATH="${AUTOMATION_LIB_PATH:-$(dirname ${BASH_SOURCE[0]})/../lib}" 13 | source "$AUTOMATION_LIB_PATH/anchors.sh" 14 | source "$AUTOMATION_LIB_PATH/console_output.sh" 15 | 16 | usage(){ 17 | local errmsg="$1" # optional 18 | dbg "Showing usage with errmsg='$errmsg'" 19 | msg " 20 | Usage: $SCRIPT_FILENAME [[attempts] [[sleep] [exit...]]] <--> [arg...] 21 | Arguments: 22 | attempts Total number of times to attempt . Default is 3. 23 | sleep Milliseconds to sleep between retry attempts, doubling 24 | duration each failure except the last. Must also specify 25 | [attempts]. Default is 1 second 26 | exit... One or more exit code values to consider as failure. 27 | Must also specify [attempts] and [sleep]. Default is any 28 | non-zero exit. N/B: Multiple values must be quoted! 29 | -- Required separator between any / no options, and command 30 | command Path to command to execute, cannot use a shell builtin. 31 | arg... Options and/or arguments to pass to command. 32 | " 33 | [[ -n "$errmsg" ]] || \ 34 | die "$errmsg" # exits non-zero 35 | } 36 | 37 | attempts=3 38 | sleep_ms=1000 39 | declare -a exit_codes 40 | declare -a args=("$@") 41 | 42 | n=1 43 | for arg in attempts sleep_ms exit_codes; do 44 | if [[ "$arg" == "--" ]]; then 45 | shift 46 | break 47 | fi 48 | declare "$arg=${args[n]}" 49 | shift 50 | n=$[n+1] 51 | done 52 | 53 | ((attempts>0)) || \ 54 | usage "The number of retry attempts must be greater than 1, not '$attempts'" 55 | 56 | ((sleep_ms>10)) || \ 57 | usage "The number of milliseconds must be greater than 10, not '$sleep_ms'" 58 | 59 | for exit_code in "${exit_codes[@]}"; do 60 | if ((exit_code<0)) || ((exit_code>254)); then 61 | usage "Every exit code must be between 0-254, no '$exit_code'" 62 | fi 63 | done 64 | 65 | [[ -n "$@" ]] || \ 66 | usage "Must specify a command to execute" 67 | 68 | err_retry "$attempts" "$sleep_ms" "${exit_codes[@]}" "$@" 69 | -------------------------------------------------------------------------------- /common/lib/anchors.sh: -------------------------------------------------------------------------------- 1 | 2 | # A Library for anchoring scripts and other files relative to it's 3 | # filesystem location. Not intended be executed directly. 4 | 5 | # Absolute realpath anchors for important directory tree roots. 6 | AUTOMATION_LIB_PATH=$(realpath $(dirname "${BASH_SOURCE[0]}")) # THIS file's directory 7 | AUTOMATION_ROOT=$(realpath "$AUTOMATION_LIB_PATH/../") # THIS file's parent directory 8 | SCRIPT_PATH=$(realpath "$(dirname $0)") # Source script's directory 9 | SCRIPT_FILENAME=$(basename $0) # Source script's file 10 | MKTEMP_FORMAT=".tmp_${SCRIPT_FILENAME}_XXXXXXXX" # Helps reference source 11 | 12 | _avcache="$AUTOMATION_VERSION" # cache, DO NOT USE (except for unit-tests) 13 | automation_version() { 14 | local gitbin="$(type -P git)" 15 | if [[ -z "$_avcache" ]]; then 16 | if [[ -r "$AUTOMATION_ROOT/AUTOMATION_VERSION" ]]; then 17 | _avcache=$(<"$AUTOMATION_ROOT/AUTOMATION_VERSION") 18 | # The various installers and some unit-tests rely on git in this way 19 | elif [[ -x "$gitbin" ]] && [[ -d "$AUTOMATION_ROOT/../.git" ]]; then 20 | local gitoutput 21 | # Avoid dealing with $CWD during error conditions - do it in a sub-shell 22 | if gitoutput=$(cd "$AUTOMATION_ROOT"; $gitbin describe HEAD; exit $?); then 23 | _avcache=$gitoutput 24 | fi 25 | fi 26 | fi 27 | 28 | if [[ -n "$_avcache" ]]; then 29 | echo "$_avcache" 30 | else 31 | echo "Error determining version number" >> /dev/stderr 32 | exit 1 33 | fi 34 | } 35 | -------------------------------------------------------------------------------- /common/lib/common_lib.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This file is intended to be sourced as a short-cut to loading 4 | # all common libraries one-by-one. 5 | 6 | AUTOMATION_LIB_PATH="${AUTOMATION_LIB_PATH:-$(dirname ${BASH_SOURCE[0]})}" 7 | 8 | # Filename list must be hard-coded 9 | # When installed, other files may be present in lib directory 10 | COMMON_LIBS="anchors.sh defaults.sh platform.sh utils.sh console_output.sh" 11 | for filename in $COMMON_LIBS; do 12 | source $(dirname "$BASH_SOURCE[0]}")/$filename 13 | done 14 | -------------------------------------------------------------------------------- /common/lib/console_output.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # A Library of contextual console output-related operations. 4 | # Intended for use by other scripts, not to be executed directly. 5 | 6 | # shellcheck source=common/lib/defaults.sh 7 | source $(dirname $(realpath "${BASH_SOURCE[0]}"))/defaults.sh 8 | 9 | # helper, not intended for use outside this file 10 | _rel_path() { 11 | if [[ -z "$1" ]]; then 12 | echo "" 13 | else 14 | local abs_path rel_path abs_path_len rel_path_len 15 | abs_path=$(realpath "$1") 16 | rel_path=$(realpath --relative-to=. $abs_path) 17 | abs_path_len=${#abs_path} 18 | rel_path_len=${#rel_path} 19 | if ((abs_path_len <= rel_path_len)); then 20 | echo "$abs_path" 21 | else 22 | echo "$rel_path" 23 | fi 24 | fi 25 | } 26 | 27 | # helper, not intended for use outside this file 28 | _ctx() { 29 | local shortest_source_path grandparent_func 30 | # Caller's caller details 31 | shortest_source_path=$(_rel_path "${BASH_SOURCE[3]}") 32 | grandparent_func="${FUNCNAME[2]}" 33 | [[ -n "$grandparent_func" ]] || \ 34 | grandparent_func="main" 35 | echo "$shortest_source_path:${BASH_LINENO[2]} in ${FUNCNAME[3]}()" 36 | } 37 | 38 | # helper, not intended for use outside this file. 39 | _fmt_ctx() { 40 | local stars prefix message 41 | stars="************************************************" 42 | prefix="${1:-no prefix given}" 43 | message="${2:-no message given}" 44 | echo "$stars" 45 | echo "$prefix ($(_ctx))" 46 | echo "$stars" 47 | } 48 | 49 | # Print a highly-visible message to stderr. Usage: warn 50 | warn() { 51 | _fmt_ctx "$WARNING_MSG_PREFIX ${1:-no warning message given}" >> /dev/stderr 52 | } 53 | 54 | # Same as warn() but exit non-zero or with given exit code 55 | # usage: die [exit-code] 56 | die() { 57 | _fmt_ctx "$ERROR_MSG_PREFIX ${1:-no error message given}" >> /dev/stderr 58 | local exit_code=${2:-1} 59 | ((exit_code==0)) || \ 60 | exit $exit_code 61 | } 62 | 63 | dbg() { 64 | local shortest_source_path 65 | if ((A_DEBUG)); then 66 | shortest_source_path=$(_rel_path "${BASH_SOURCE[1]}") 67 | ( 68 | echo 69 | echo "$DEBUG_MSG_PREFIX ${1:-No debugging message given} ($shortest_source_path:${BASH_LINENO[0]} in ${FUNCNAME[1]}())" 70 | ) >> /dev/stderr 71 | fi 72 | } 73 | 74 | msg() { 75 | echo "${1:-No message specified}" &>> /dev/stderr 76 | } 77 | 78 | # Mimic set +x for a single command, along with calling location and line. 79 | showrun() { 80 | local -a context 81 | # Tried using readarray, it broke tests for some reason, too lazy to investigate. 82 | # shellcheck disable=SC2207 83 | context=($(caller 0)) 84 | echo "+ $* # ${context[2]}:${context[0]} in ${context[1]}()" >> /dev/stderr 85 | "$@" 86 | } 87 | 88 | # Expects stdin, indents every input line right by 4 spaces 89 | indent(){ 90 | cat - |& while IFS='' read -r LINE; do 91 | awk '{print " "$0}' <<<"$LINE" 92 | done 93 | } 94 | 95 | req_env_vars(){ 96 | dbg "Confirming non-empty vars for $*" 97 | local var_name 98 | local var_value 99 | local msgpfx 100 | for var_name in "$@"; do 101 | var_value=$(tr -d '[:space:]' <<<"${!var_name}") 102 | msgpfx="Environment variable '$var_name'" 103 | ((${#var_value}>0)) || \ 104 | die "$msgpfx is required by $(_rel_path "${BASH_SOURCE[1]}"):${FUNCNAME[1]}() but empty or entirely white-space." 105 | done 106 | } 107 | 108 | show_env_vars() { 109 | local filter_rx 110 | local env_var_names 111 | filter_rx='(^PATH$)|(^BASH_FUNC)|(^_.*)' 112 | msg "Selection of current env. vars:" 113 | if [[ -n "${SECRET_ENV_RE}" ]]; then 114 | filter_rx="${filter_rx}|$SECRET_ENV_RE" 115 | else 116 | warn "The \$SECRET_ENV_RE var. unset/empty: Not filtering sensitive names!" 117 | fi 118 | 119 | for env_var_name in $(awk 'BEGIN{for(v in ENVIRON) print v}' | grep -Eiv "$filter_rx" | sort); do 120 | 121 | line="${env_var_name}=${!env_var_name}" 122 | msg " $line" 123 | done 124 | } 125 | -------------------------------------------------------------------------------- /common/lib/defaults.sh: -------------------------------------------------------------------------------- 1 | 2 | # Library of default env. vars. for inclusion under all contexts. 3 | # Not intended to be executed directly 4 | 5 | # Set non-'false' by nearly every CI system in existence. 6 | CI="${CI:-false}" # true: _unlikely_ human-presence at the controls. 7 | [[ $CI == "false" ]] || CI='true' # Err on the side of automation 8 | 9 | # Default to NOT running in debug-mode unless set non-zero 10 | A_DEBUG=${A_DEBUG:-0} 11 | # Conditionals like ((A_DEBUG)) easier than checking "true"/"False" 12 | ( test "$A_DEBUG" -eq 0 || test "$A_DEBUG" -ne 0 ) &>/dev/null || \ 13 | A_DEBUG=1 # assume true when non-integer 14 | 15 | # String prefixes to use when printing messages to the console 16 | DEBUG_MSG_PREFIX="${DEBUG_MSG_PREFIX:-DEBUG:}" 17 | WARNING_MSG_PREFIX="${WARNING_MSG_PREFIX:-WARNING:}" 18 | ERROR_MSG_PREFIX="${ERROR_MSG_PREFIX:-ERROR:}" 19 | -------------------------------------------------------------------------------- /common/lib/platform.sh: -------------------------------------------------------------------------------- 1 | 2 | # Library of os/platform related definitions and functions 3 | # Not intended to be executed directly 4 | 5 | OS_RELEASE_VER="${OS_RELEASE_VER:-$(source /etc/os-release; echo $VERSION_ID | tr -d '.')}" 6 | OS_RELEASE_ID="${OS_RELEASE_ID:-$(source /etc/os-release; echo $ID)}" 7 | OS_REL_VER="${OS_REL_VER:-$OS_RELEASE_ID-$OS_RELEASE_VER}" 8 | 9 | # Ensure no user-input prompts in an automation context 10 | export DEBIAN_FRONTEND="${DEBIAN_FRONTEND:-noninteractive}" 11 | # _TEST_UID only needed for unit-testing 12 | # shellcheck disable=SC2154 13 | if ((UID)) || ((_TEST_UID)); then 14 | SUDO="${SUDO:-sudo}" 15 | if [[ "$OS_RELEASE_ID" =~ (ubuntu)|(debian) ]]; then 16 | if [[ ! "$SUDO" =~ noninteractive ]]; then 17 | SUDO="$SUDO env DEBIAN_FRONTEND=$DEBIAN_FRONTEND" 18 | fi 19 | fi 20 | fi 21 | # Regex defining all CI-related env. vars. necessary for all possible 22 | # testing operations on all platforms and versions. This is necessary 23 | # to avoid needlessly passing through global/system values across 24 | # contexts, such as host->container or root->rootless user 25 | # 26 | # List of envariables which must be EXACT matches 27 | PASSTHROUGH_ENV_EXACT="${PASSTHROUGH_ENV_EXACT:-DEST_BRANCH|IMAGE_SUFFIX|DISTRO_NV|SCRIPT_BASE}" 28 | 29 | # List of envariable patterns which must match AT THE BEGINNING of the name. 30 | PASSTHROUGH_ENV_ATSTART="${PASSTHROUGH_ENV_ATSTART:-CI|TEST}" 31 | 32 | # List of envariable patterns which can match ANYWHERE in the name 33 | PASSTHROUGH_ENV_ANYWHERE="${PASSTHROUGH_ENV_ANYWHERE:-_NAME|_FQIN}" 34 | 35 | # List of expressions to exclude env. vars for security reasons 36 | SECRET_ENV_RE="${SECRET_ENV_RE:-(^PATH$)|(^BASH_FUNC)|(^_.*)|(.*PASSWORD.*)|(.*TOKEN.*)|(.*SECRET.*)}" 37 | 38 | # Return a list of environment variables that should be passed through 39 | # to lower levels (tests in containers, or via ssh to rootless). 40 | # We return the variable names only, not their values. It is up to our 41 | # caller to reference values. 42 | passthrough_envars() { 43 | local passthrough_env_re="(^($PASSTHROUGH_ENV_EXACT)\$)|(^($PASSTHROUGH_ENV_ATSTART))|($PASSTHROUGH_ENV_ANYWHERE)" 44 | local envar 45 | 46 | for envar in SECRET_ENV_RE PASSTHROUGH_ENV_EXACT PASSTHROUGH_ENV_ATSTART PASSTHROUGH_ENV_ANYWHERE passthrough_env_re; do 47 | if [[ -z "${!envar}" ]]; then 48 | echo "Error: Required env. var. \$$envar is unset or empty in call to passthrough_envars()" >> /dev/stderr 49 | exit 1 50 | fi 51 | done 52 | 53 | echo "Warning: Will pass env. vars. matching the following regex: 54 | $passthrough_env_re" >> /dev/stderr 55 | 56 | compgen -A variable | grep -Ev "$SECRET_ENV_RE" | grep -E "$passthrough_env_re" 57 | } 58 | 59 | # On more occasions than we'd like, it's necessary to put temporary 60 | # platform-specific workarounds in place. To help ensure they'll 61 | # actually be temporary, it's useful to place a time limit on them. 62 | # This function accepts two arguments: 63 | # - A (required) future date of the form YYYYMMDD (UTC based). 64 | # - An (optional) message string to display upon expiry of the timebomb. 65 | timebomb() { 66 | local expire="$1" 67 | 68 | if ! expr "$expire" : '[0-9]\{8\}$' > /dev/null; then 69 | echo "timebomb: '$expire' must be UTC-based and of the form YYYYMMDD" 70 | exit 1 71 | fi 72 | 73 | if [[ $(date -u +%Y%m%d) -lt $(date -u -d "$expire" +%Y%m%d) ]]; then 74 | return 75 | fi 76 | 77 | declare -a frame 78 | read -a frame < <(caller) 79 | 80 | cat << EOF >> /dev/stderr 81 | *********************************************************** 82 | * TIME BOMB EXPIRED! 83 | * 84 | * >> ${frame[1]}:${frame[0]}: ${2:-No reason given, tsk tsk} 85 | * 86 | * Temporary workaround expired on ${expire:0:4}-${expire:4:2}-${expire:6:2}. 87 | * 88 | * Please review the above source file and either remove the 89 | * workaround or, if absolutely necessary, extend it. 90 | * 91 | * Please also check for other timebombs while you're at it. 92 | *********************************************************** 93 | EOF 94 | exit 1 95 | } 96 | -------------------------------------------------------------------------------- /common/lib/utils.sh: -------------------------------------------------------------------------------- 1 | 2 | # Library of utility functions for manipulating/controlling bash-internals 3 | # Not intended to be executed directly 4 | 5 | source $(dirname $(realpath "${BASH_SOURCE[0]}"))/console_output.sh 6 | 7 | copy_function() { 8 | local src="$1" 9 | local dst="$2" 10 | [[ -n "$src" ]] || \ 11 | die "Expecting source function name to be passed as the first argument" 12 | [[ -n "$dst" ]] || \ 13 | die "Expecting destination function name to be passed as the second argument" 14 | src_def=$(declare -f "$src") || [[ -n "$src_def" ]] || \ 15 | die "Unable to find source function named ${src}()" 16 | dbg "Copying function ${src}() to ${dst}()" 17 | # First match of $src replaced by $dst 18 | eval "${src_def/$src/$dst}" 19 | } 20 | 21 | rename_function() { 22 | local from="$1" 23 | local to="$2" 24 | [[ -n "$from" ]] || \ 25 | die "Expecting current function name to be passed as the first argument" 26 | [[ -n "$to" ]] || \ 27 | die "Expecting desired function name to be passed as the second argument" 28 | dbg "Copying function ${from}() to ${to}() before unlinking ${from}()" 29 | copy_function "$from" "$to" 30 | dbg "Undefining function $from" 31 | unset -f "$from" 32 | } 33 | 34 | # Return 0 if the first argument matches any subsequent argument exactly 35 | # otherwise return 1. 36 | contains() { 37 | local needle="$1" 38 | local hay # one piece of the stack at a time 39 | shift 40 | #dbg "Looking for '$1' in '$@'" 41 | for hay; do [[ "$hay" == "$needle" ]] && return 0; done 42 | return 1 43 | } 44 | 45 | not_contains(){ 46 | if contains "$@"; then 47 | return 1 48 | else 49 | return 0 50 | fi 51 | } 52 | 53 | # Retry a command on a particular exit code, up to a max number of attempts, 54 | # with exponential backoff. 55 | # 56 | # Usage: err_retry 57 | # Where: 58 | # attempts: The number of attempts to make. 59 | # sleep ms: Number of milliseconds to sleep (doubles every attempt) 60 | # exit_code: Space separated list of exit codes to retry. If empty 61 | # then any non-zero code will be considered for retry. 62 | # 63 | # When the number of attempts is exhausted, exit code is 126 is returned. 64 | # 65 | # N/B: Make sure the exit_code argument is properly quoted! 66 | # 67 | # Based on work by 'Ayla Ounce ' available at: 68 | # https://gist.github.com/reacocard/28611bfaa2395072119464521d48729a 69 | err_retry() { 70 | local rc=0 71 | local attempt=0 72 | local attempts="$1" 73 | local sleep_ms="$2" 74 | local -a exit_codes 75 | ((attempts>1)) || \ 76 | die "It's nonsense to retry a command less than twice, or '$attempts'" 77 | ((sleep_ms>0)) || \ 78 | die "Refusing idiotic sleep interval of $sleep_ms" 79 | local zzzs 80 | zzzs=$(awk -e '{printf "%f", $1 / 1000}'<<<"$sleep_ms") 81 | local nzexit=0 #false 82 | local dbgspec 83 | if [[ -z "$3" ]]; then 84 | nzexit=1; # true 85 | dbgspec="non-zero" 86 | else 87 | exit_codes=("$3") 88 | dbgspec="[${exit_codes[*]}]" 89 | fi 90 | 91 | shift 3 92 | 93 | dbg "Will retry $attempts times, sleeping up to $zzzs*2^$attempts or exit code(s) $dbgspec." 94 | local print_once 95 | print_once=$(echo -n " + "; printf '%q ' "${@}") 96 | for attempt in $(seq 1 $attempts); do 97 | # Make each attempt easy to distinguish 98 | if ((nzexit)); then 99 | msg "Attempt $attempt of $attempts (retry on non-zero exit):" 100 | else 101 | msg "Attempt $attempt of $attempts (retry on exit ${exit_codes[*]}):" 102 | fi 103 | if [[ -n "$print_once" ]]; then 104 | msg "$print_once" 105 | print_once="" 106 | fi 107 | "$@" && rc=$? || rc=$? # work with set -e or +e 108 | msg "exit($rc)" |& indent 1 # Make easy to debug 109 | 110 | if ((nzexit)) && ((rc==0)); then 111 | dbg "Success! $rc==0" |& indent 1 112 | return 0 113 | elif ((nzexit==0)) && not_contains $rc "${exit_codes[@]}"; then 114 | dbg "Success! ($rc not in [${exit_codes[*]}])" |& indent 1 115 | return $rc 116 | elif ((attempt> /dev/stderr 10 | ./$testscript 11 | done 12 | -------------------------------------------------------------------------------- /common/test/testbin-install_automation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Unit-tests for installation script with common scripts/libraries. 4 | # Also verifies test script is derived from library filename 5 | 6 | TEST_DIR=$(realpath "$(dirname ${BASH_SOURCE[0]})/../../bin") 7 | source $(dirname ${BASH_SOURCE[0]})/testlib.sh || exit 1 8 | INSTALLER_FILEPATH="$TEST_DIR/$SUBJ_FILENAME" 9 | TEST_INSTALL_ROOT=$(mktemp -p '' -d "testing_$(basename $0)_XXXXXXXX") 10 | trap "rm -rf $TEST_INSTALL_ROOT" EXIT 11 | 12 | # Receives special treatment in the installer script 13 | export INSTALL_PREFIX="$TEST_INSTALL_ROOT/testing" 14 | 15 | test_cmd \ 16 | "The installer exits non-zero with a helpful message when run without a version argument" \ 17 | 2 "Error.+version.+install.+\0.\0.\0" \ 18 | $INSTALLER_FILEPATH 19 | 20 | test_cmd \ 21 | "The installer detects an argument which is clearly not a symantic version number" \ 22 | 4 "Error.+not.+valid version number" \ 23 | $INSTALLER_FILEPATH "not a version number" 24 | 25 | test_cmd \ 26 | "The installer exits non-zero with a helpful message about an non-existent version" \ 27 | 128 "fatal.+v99.99.99.*not found" \ 28 | $INSTALLER_FILEPATH 99.99.99 29 | 30 | test_cmd \ 31 | "The installer successfully installs the oldest tag" \ 32 | 0 "installer version 'v1.0.0'.+exec.+AUTOMATION_REPO_BRANCH=main.+Installation complete" \ 33 | $INSTALLER_FILEPATH 1.0.0 34 | 35 | test_cmd \ 36 | "The oldest installed installer's default branch was modified" \ 37 | 0 "" \ 38 | grep -Eqm1 '^AUTOMATION_REPO_BRANCH=.+main' "$INSTALL_PREFIX/automation/bin/$SUBJ_FILENAME" 39 | 40 | test_cmd \ 41 | "The installer detects incompatible future installer source version by an internal mechanism" \ 42 | 10 "Error.+incompatible.+99.99.99" \ 43 | env _MAGIC_JUJU=TESTING$(uuidgen)TESTING $INSTALLER_FILEPATH 99.99.99 44 | 45 | test_cmd \ 46 | "The installer successfully installs and configures into \$INSTALL_PREFIX" \ 47 | 0 "Installation complete" \ 48 | $INSTALLER_FILEPATH 0.0.0 49 | 50 | for required_file in environment AUTOMATION_VERSION; do 51 | test_cmd \ 52 | "The installer created the file $required_file in $INSTALL_PREFIX/automation" \ 53 | 0 "" \ 54 | test -r "$INSTALL_PREFIX/automation/$required_file" 55 | done 56 | 57 | test_cmd \ 58 | "The installer correctly removes/reinstalls \$TEST_INSTALL_ROOT" \ 59 | 0 "Warning: Removing existing installed version" \ 60 | $INSTALLER_FILEPATH 0.0.0 61 | 62 | test_cmd \ 63 | "The re-installed version has AUTOMATION_VERSION file matching the current version" \ 64 | 0 "$(git describe HEAD)" \ 65 | cat "$INSTALL_PREFIX/automation/AUTOMATION_VERSION" 66 | 67 | test_cmd \ 68 | "The installer script doesn't redirect to 'stderr' anywhere." \ 69 | 1 "" \ 70 | grep -q '> /dev/stderr' $INSTALLER_FILEPATH 71 | 72 | load_example_environment() { 73 | local _args="$@" 74 | # Don't disturb testing 75 | ( 76 | source "$INSTALL_PREFIX/automation/environment" || return 99 77 | echo "AUTOMATION_LIB_PATH ==> ${AUTOMATION_LIB_PATH:-UNDEFINED}" 78 | echo "PATH ==> ${PATH:-EMPTY}" 79 | [[ -z "$_args" ]] || A_DEBUG=1 $_args 80 | ) 81 | } 82 | 83 | execute_in_example_environment() { 84 | load_example_environment "$@" 85 | } 86 | 87 | test_cmd \ 88 | "The example environment defines AUTOMATION_LIB_PATH" \ 89 | 0 "AUTOMATION_LIB_PATH ==> $INSTALL_PREFIX/automation/lib" \ 90 | load_example_environment 91 | 92 | test_cmd \ 93 | "The example environment appends to \$PATH" \ 94 | 0 "PATH ==> .+:$INSTALL_PREFIX/automation/bin" \ 95 | load_example_environment 96 | 97 | test_cmd \ 98 | "The installed installer, can update itself to the latest upstream version" \ 99 | 0 "Finalizing successful installation of version v" \ 100 | execute_in_example_environment $SUBJ_FILENAME latest 101 | 102 | # Ensure cleanup 103 | rm -rf $TEST_INSTALL_ROOT 104 | 105 | # Must be last call 106 | exit_with_status 107 | -------------------------------------------------------------------------------- /common/test/testlib-anchors.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Unit-tests for library script in the current directory 4 | # Also verifies test script is derived from library filename 5 | 6 | source $(dirname ${BASH_SOURCE[0]})/testlib.sh || exit 1 7 | source "$TEST_DIR/$SUBJ_FILENAME" || exit 2 8 | 9 | test_cmd "Library $SUBJ_FILENAME is not executable" \ 10 | 0 "" \ 11 | test ! -x "$SCRIPT_PATH/$SUBJ_FILENAME" 12 | 13 | test_cmd "The unit-test and library files not in same directory" \ 14 | 0 "" \ 15 | test "$AUTOMATION_LIB_PATH" != "$SCRIPT_PATH" 16 | 17 | test_cmd "This common unit-test is in test subdir relative ti AUTOMATION_ROOT" \ 18 | 0 "$AUTOMATION_ROOT/test" \ 19 | echo "$SCRIPT_PATH" 20 | 21 | test_cmd "The repository root is above \$AUTOMATION_ROOT and contains a .git directory" \ 22 | 0 "" \ 23 | test -d "$AUTOMATION_ROOT/../.git" 24 | 25 | for path_var in AUTOMATION_LIB_PATH AUTOMATION_ROOT SCRIPT_PATH; do 26 | test_cmd "\$$path_var is defined and non-empty: ${!path_var}" \ 27 | 0 "" \ 28 | test -n "${!path_var}" 29 | test_cmd "\$$path_var refers to existing directory" \ 30 | 0 "" \ 31 | test -d "${!path_var}" 32 | done 33 | 34 | test_cmd "Able to create a temporary directory using \$MKTEMP_FORMAT that references script name" \ 35 | 0 "removed.+$SCRIPT_FILENAME" \ 36 | rm -rvf $(mktemp -p '' -d "$MKTEMP_FORMAT") 37 | 38 | test_cmd "There is no AUTOMATION_VERSION file in \$AUTOMATION_ROOT before testing automation_version()" \ 39 | 1 "" \ 40 | test -r "$AUTOMATION_ROOT/AUTOMATION_VERSION" 41 | 42 | TEMPDIR=$(mktemp -p '' -d testing_${SCRIPT_FILENAME}_XXXXXXXX) 43 | trap "rm -rf $TEMPDIR" EXIT 44 | 45 | cat << EOF > "$TEMPDIR/git" 46 | #!/bin/bash -e 47 | echo "99.99.99" 48 | EOF 49 | chmod +x "$TEMPDIR/git" 50 | 51 | test_cmd "Mock git returns expected output" \ 52 | 0 "99.99.99" \ 53 | $TEMPDIR/git 54 | 55 | actual_path=$PATH 56 | export PATH=$TEMPDIR:$PATH:$TEMPDIR 57 | _avcache="" # ugly, but necessary to not pollute other test results 58 | test_cmd "Without AUTOMATION_VERSION file, automation_version() uses mock git" \ 59 | 0 "99.99.99" \ 60 | automation_version 61 | 62 | echo -e "#!/bin/bash\nexit 99" > "$TEMPDIR/git" 63 | 64 | test_cmd "Modified mock git exits with expected error code" \ 65 | 99 "" \ 66 | $TEMPDIR/git 67 | 68 | _avcache="" 69 | test_cmd "Without AUTOMATION_VERSION file, a git error causes automation_version() to error" \ 70 | 1 "Error determining version number" \ 71 | automation_version 72 | 73 | ln -sf /usr/bin/* $TEMPDIR/ 74 | ln -sf /bin/* $TEMPDIR/ 75 | rm -f "$TEMPDIR/git" 76 | export PATH=$TEMPDIR 77 | _avcache="" 78 | test_cmd "Without git or AUTOMATION_VERSION file automation_version() errors"\ 79 | 1 "Error determining version number" \ 80 | automation_version 81 | unset PATH 82 | export PATH=$actual_path 83 | 84 | # ensure cleanup 85 | rm -rf $TEMPDIR 86 | 87 | # Must be last call 88 | exit_with_status 89 | -------------------------------------------------------------------------------- /common/test/testlib-console_output.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIRPATH=$(dirname ${BASH_SOURCE[0]}) 4 | source $SCRIPT_DIRPATH/testlib.sh || exit 1 5 | source "$TEST_DIR"/"$SUBJ_FILENAME" || exit 2 6 | 7 | test_message_text="This is the test text for a console_output library unit-test" 8 | 9 | basic_tests() { 10 | local _fname=$1 11 | local _exp_exit=$2 12 | local _exp_word=$3 13 | 14 | [[ "$_fname" == "dbg" ]] || \ 15 | test_cmd "At least 5-stars are shown on call to $_fname function" \ 16 | $_exp_exit "\*{5}" \ 17 | $_fname "$test_message_text" 18 | 19 | test_cmd "The word '$_exp_word' appears on call to $_fname function" \ 20 | $_exp_exit "$_exp_word" \ 21 | $_fname "$test_message_text" 22 | 23 | test_cmd \ 24 | "A default message is shown when none provided" \ 25 | $_exp_exit "$_exp_word.+\w+" \ 26 | $_fname 27 | 28 | test_cmd "The message text appears on call to $_fname message" \ 29 | $_exp_exit "$test_message_text" \ 30 | $_fname "$test_message_text" 31 | 32 | test_cmd "The message text includes a the file, line number and testing function reference" \ 33 | $_exp_exit '\.sh:[[:digit:]]+ in .+\(\)' \ 34 | $_fname "$test_message_text" 35 | } 36 | 37 | for fname in warn die; do 38 | exp_exit=0 39 | exp_word="WARNING" 40 | if [[ "$fname" == "die" ]]; then 41 | exp_exit=1 42 | exp_word="ERROR" 43 | fi 44 | basic_tests $fname $exp_exit $exp_word 45 | done 46 | 47 | # Function requires stdin, must execute in subshell by test_cmd 48 | export -f indent 49 | # test_cmd whitespace-squashes output but this function's purpose is producing whitespace 50 | TEST_STRING="The quick brown fox jumped to the right by N-spaces" 51 | EXPECTED_SUM="334676ca13161af1fd95249239bb415b3d30eee7f78b39c59f9af5437989b724" 52 | test_cmd "The indent function correctly indents 4x number of spaces indicated" \ 53 | 0 "$EXPECTED_SUM" \ 54 | bash -c "echo '$TEST_STRING' | indent | sha256sum" 55 | 56 | EXPECTED_SUM="764865c67f4088dd19981733d88287e1e196e71bef317092dcb6cb9ff101a319" 57 | test_cmd "The indent function indents it's own output" \ 58 | 0 "$EXPECTED_SUM" \ 59 | bash -c "echo '$TEST_STRING' | indent | indent | sha256sum" 60 | 61 | A_DEBUG=0 62 | test_cmd \ 63 | "The dbg function has no output when \$A_DEBUG is zero and no message is given" \ 64 | 0 "" \ 65 | dbg 66 | 67 | test_cmd \ 68 | "The dbg function has no output when \$A_DEBUG is zero and a test message is given" \ 69 | 0 "" \ 70 | dbg "$test_message_text" 71 | 72 | A_DEBUG=1 73 | basic_tests dbg 0 DEBUG 74 | A_DEBUG=0 75 | 76 | test_cmd \ 77 | "All primary output functions include the expected context information" \ 78 | 0 " 79 | DEBUG: Test dbg message (console_output_test_helper.sh:21 in main()) 80 | \*+ 81 | WARNING: Test warning message (console_output_test_helper.sh:22 in main()) 82 | \*+ 83 | Test msg message 84 | \*+ 85 | ERROR: Test die message (console_output_test_helper.sh:24 in main()) 86 | \*+ 87 | 88 | DEBUG: Test dbg message (console_output_test_helper.sh:15 in test_function()) 89 | \*+ 90 | WARNING: Test warning message (console_output_test_helper.sh:16 in test_function()) 91 | \*+ 92 | Test msg message 93 | \*+ 94 | ERROR: Test die message (console_output_test_helper.sh:18 in test_function()) 95 | \*+ 96 | " \ 97 | bash "$SCRIPT_DIRPATH/console_output_test_helper.sh" 98 | 99 | export VAR1=foo VAR2=bar VAR3=baz 100 | test_cmd \ 101 | "The req_env_vars function has no output for all non-empty vars" \ 102 | 0 "" \ 103 | req_env_vars VAR1 VAR2 VAR3 104 | 105 | unset VAR2 106 | test_cmd \ 107 | "The req_env_vars function catches an empty VAR2 value" \ 108 | 1 "Environment variable 'VAR2' is required" \ 109 | req_env_vars VAR1 VAR2 VAR3 110 | 111 | VAR1=" 112 | " 113 | test_cmd \ 114 | "The req_env_vars function catches a whitespace-full VAR1 value" \ 115 | 1 "Environment variable 'VAR1' is required" \ 116 | req_env_vars VAR1 VAR2 VAR3 117 | 118 | unset VAR1 VAR2 VAR3 119 | test_cmd \ 120 | "The req_env_vars function shows the source file/function of caller and error" \ 121 | 1 "testlib.sh:test_cmd()" \ 122 | req_env_vars VAR1 VAR2 VAR3 123 | 124 | unset SECRET_ENV_RE 125 | test_cmd \ 126 | "The show_env_vars function issues warning when \$SECRET_ENV_RE is unset/empty" \ 127 | 0 "SECRET_ENV_RE var. unset/empty" \ 128 | show_env_vars 129 | 130 | export UPPERCASE="@@@MAGIC@@@" 131 | export super_secret="@@@MAGIC@@@" 132 | export nOrMaL_vAr="@@@MAGIC@@@" 133 | for var_name in UPPERCASE super_secret nOrMaL_vAr; do 134 | test_cmd \ 135 | "Without secret filtering, expected $var_name value is shown" \ 136 | 0 "${var_name}=${!var_name}" \ 137 | show_env_vars 138 | done 139 | 140 | export SECRET_ENV_RE='(.+SECRET.*)|(uppercase)|(mal_var)' 141 | TMPFILE=$(mktemp -p '' ".$(basename ${BASH_SOURCE[0]})_tmp_XXXX") 142 | #trap "rm -f $TMPFILE" EXIT # FIXME 143 | ( show_env_vars 2>&1 ) >> "$TMPFILE" 144 | test_cmd \ 145 | "With case-insensitive secret filtering, no magic values shown in output" \ 146 | 1 ""\ 147 | grep -q 'UPPERCASE=@@@MAGIC@@@' "$TMPFILE" 148 | 149 | unset env_vars SECRET_ENV_RE UPPERCASE super_secret nOrMaL_vAr 150 | 151 | test_cmd \ 152 | "The showrun function executes /bin/true as expected" \ 153 | 0 "\+ /bin/true # \./testlib.sh:97 in test_cmd"\ 154 | showrun /bin/true 155 | 156 | test_cmd \ 157 | "The showrun function executes /bin/false as expected" \ 158 | 1 "\+ /bin/false # \./testlib.sh:97 in test_cmd"\ 159 | showrun /bin/false 160 | 161 | test_cmd \ 162 | "The showrun function can call itself" \ 163 | 0 "\+ /bin/true # .*console_output.sh:[0-9]+ in showrun" \ 164 | showrun showrun /bin/true 165 | 166 | # script is set +e 167 | exit_with_status 168 | -------------------------------------------------------------------------------- /common/test/testlib-defaults.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source $(dirname ${BASH_SOURCE[0]})/testlib.sh || exit 1 4 | 5 | # CI must only/always be either 'true' or 'false'. 6 | # Usage: test_ci 7 | test_ci() { 8 | local prev_CI="$CI" 9 | CI="$1" 10 | source "$TEST_DIR"/"$SUBJ_FILENAME" 11 | test_cmd "Defaults library successfully (re-)loaded" \ 12 | 0 "" \ 13 | test "$?" -eq 0 14 | test_cmd "\$CI='$1' becomes 'true' or 'false'" \ 15 | 0 "" \ 16 | test "$CI" = "true" -o "$CI" = "false" 17 | test_cmd "\$CI value '$2' was expected" \ 18 | 0 "" \ 19 | test "$CI" = "$2" 20 | CI="$prev_CI" 21 | } 22 | 23 | # A_DEBUG must default to 0 or non-zero 24 | # usage: [initial_value] 25 | test_debug() { 26 | local exp_non_zero=$1 27 | local init_value="$2" 28 | [[ -z "$init_value" ]] || \ 29 | A_DEBUG=$init_value 30 | local desc_pfx="The \$A_DEBUG env. var initialized '$init_value', after loading library is" 31 | 32 | source "$TEST_DIR"/"$SUBJ_FILENAME" 33 | if ((exp_non_zero)); then 34 | test_cmd "$desc_pfx non-zero" \ 35 | 0 "" \ 36 | test "$A_DEBUG" -ne 0 37 | else 38 | test_cmd "$desc_pfx zero" \ 39 | 0 "" \ 40 | test "$A_DEBUG" -eq 0 41 | fi 42 | } 43 | 44 | test_ci "" "false" 45 | test_ci "$RANDOM" "true" 46 | test_ci "FoObAr" "true" 47 | test_ci "false" "false" 48 | test_ci "true" "true" 49 | 50 | test_debug 0 51 | test_debug 0 0 52 | test_debug 1 1 53 | test_debug 1 true 54 | test_debug 1 false 55 | 56 | # script is set +e 57 | exit_with_status 58 | -------------------------------------------------------------------------------- /common/test/testlib-platform.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Unit-tests for library script in the current directory 4 | # Also verifies test script is derived from library filename 5 | 6 | # shellcheck source-path=./ 7 | source $(dirname ${BASH_SOURCE[0]})/testlib.sh || exit 1 8 | # Must be statically defined, 'source-path' directive can't work here. 9 | # shellcheck source=../lib/platform.sh disable=SC2154 10 | source "$TEST_DIR/$SUBJ_FILENAME" || exit 2 11 | 12 | # For whatever reason, SCRIPT_PATH cannot be resolved. 13 | # shellcheck disable=SC2154 14 | test_cmd "Library $SUBJ_FILENAME is not executable" \ 15 | 0 "" \ 16 | test ! -x "$SCRIPT_PATH/$SUBJ_FILENAME" 17 | 18 | for var in OS_RELEASE_VER OS_RELEASE_ID OS_REL_VER; do 19 | test_cmd "The variable \$$var is defined and non-empty" \ 20 | 0 "" \ 21 | test -n "${!var}" 22 | done 23 | 24 | for var in OS_RELEASE_VER OS_REL_VER; do 25 | NODOT=$(tr -d '.' <<<"${!var}") 26 | test_cmd "The '.' character does not appear in \$$var" \ 27 | 0 "" \ 28 | test "$NODOT" == "${!var}" 29 | done 30 | 31 | for OS_RELEASE_ID in 'debian' 'ubuntu'; do 32 | ( 33 | export _TEST_UID=$RANDOM # Normally $UID is read-only 34 | # Must be statically defined, 'source-path' directive can't work here. 35 | # shellcheck source=../lib/platform.sh disable=SC2154 36 | source "$TEST_DIR/$SUBJ_FILENAME" || exit 2 37 | 38 | # The point of this test is to confirm it's defined 39 | # shellcheck disable=SC2154 40 | test_cmd "The '\$SUDO' env. var. is non-empty when \$_TEST_UID is non-zero" \ 41 | 0 "" \ 42 | test -n "$SUDO" 43 | 44 | test_cmd "The '\$SUDO' env. var. contains 'noninteractive' when '\$_TEST_UID' is non-zero" \ 45 | 0 "noninteractive" \ 46 | echo "$SUDO" 47 | ) 48 | done 49 | 50 | test_cmd "The passthrough_envars() func. has output by default." \ 51 | 0 ".+" \ 52 | passthrough_envars 53 | 54 | ( 55 | # Confirm defaults may be overriden 56 | PASSTHROUGH_ENV_EXACT="FOOBARBAZ" 57 | PASSTHROUGH_ENV_ATSTART="FOO" 58 | PASSTHROUGH_ENV_ANYWHERE="BAR" 59 | export FOOBARBAZ="testing" 60 | 61 | test_cmd "The passthrough_envars() func. w/ overriden expr. only prints name of test variable." \ 62 | 0 "FOOBARBAZ" \ 63 | passthrough_envars 64 | ) 65 | 66 | # Test from a mostly empty environment to limit possibility of expr mismatch flakes 67 | declare -a printed_envs 68 | readarray -t printed_envs <<<$(env --ignore-environment PATH="$PATH" FOOBARBAZ="testing" \ 69 | SECRET_ENV_RE="(^PATH$)|(^BASH_FUNC)|(^_.*)|(FOOBARBAZ)|(SECRET_ENV_RE)" \ 70 | CI="true" AUTOMATION_LIB_PATH="/path/to/some/place" \ 71 | bash -c "source $TEST_DIR/$SUBJ_FILENAME && passthrough_envars") 72 | 73 | test_cmd "The passthrough_envars() func. w/ overriden \$SECRET_ENV_RE hides test variable." \ 74 | 1 "0" \ 75 | expr match "${printed_envs[*]}" '.*FOOBARBAZ.*' 76 | 77 | test_cmd "The passthrough_envars() func. w/ overriden \$SECRET_ENV_RE returns CI variable." \ 78 | 0 "[1-9]+[0-9]*" \ 79 | expr match "${printed_envs[*]}" '.*CI.*' 80 | 81 | test_cmd "timebomb() function requires at least one argument" \ 82 | 1 "must be UTC-based and of the form YYYYMMDD" \ 83 | timebomb 84 | 85 | TZ=UTC12 \ 86 | test_cmd "timebomb() function ignores TZ and compares < UTC-forced current date" \ 87 | 1 "TIME BOMB EXPIRED" \ 88 | timebomb $(TZ=UTC date +%Y%m%d) 89 | 90 | test_cmd "timebomb() alerts user when no description given" \ 91 | 1 "No reason given" \ 92 | timebomb 00010101 93 | 94 | EXPECTED_REASON="test${RANDOM}test" 95 | test_cmd "timebomb() gives reason when one was provided" \ 96 | 1 "$EXPECTED_REASON" \ 97 | timebomb 00010101 "$EXPECTED_REASON" 98 | 99 | # Must be last call 100 | exit_with_status 101 | -------------------------------------------------------------------------------- /common/test/testlib-utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source $(dirname ${BASH_SOURCE[0]})/testlib.sh || exit 1 4 | source "$TEST_DIR"/"$SUBJ_FILENAME" || exit 2 5 | 6 | test_function_one(){ 7 | echo "This is test function one" 8 | } 9 | 10 | test_function_two(){ 11 | echo "This is test function two" 12 | } 13 | 14 | test_cmd "The copy_function produces no output, while copying test_function_two" \ 15 | 0 "" \ 16 | copy_function test_function_two test_function_three 17 | 18 | # test_cmd executes the command-under-test inside a sub-shell 19 | copy_function test_function_two test_function_three 20 | test_cmd "The copy of test_function_two has identical behavior as two." \ 21 | 0 "This is test function two" \ 22 | test_function_three 23 | 24 | test_cmd "The rename_function produces no output, while renaming test_function_one" \ 25 | 0 "" \ 26 | rename_function test_function_one test_function_three 27 | 28 | # "" 29 | rename_function test_function_one test_function_three 30 | test_cmd "The rename_function removed the source function" \ 31 | 127 "command not found" \ 32 | test_function_one 33 | 34 | test_cmd "The behavior of test_function_three matches renamed test_function_one" \ 35 | 0 "This is test function one" \ 36 | test_function_three 37 | 38 | test_cmd "The contains function operates as expected for the normal case" \ 39 | 0 "" \ 40 | contains 3 1 2 3 4 5 41 | 42 | test_cmd "The contains function operates as expected for the negative case" \ 43 | 1 "" \ 44 | contains 42 1 2 3 4 5 45 | 46 | test_cmd "The contains function operates as expected despite whitespace" \ 47 | 0 "" \ 48 | contains 'foo bar' "foobar" "foo" "foo bar" "bar" 49 | 50 | test_cmd "The contains function operates as expected despite whitespace, negative case" \ 51 | 1 "" \ 52 | contains 'foo bar' "foobar" "foo" "baz" "bar" 53 | 54 | test_cmd "The err_retry function retries three times for true + exit(0)" \ 55 | 126 "Attempt 3 of 3" \ 56 | err_retry 3 10 0 true 57 | 58 | test_cmd "The err_retry function retries three times for false, exit(1)" \ 59 | 126 "Attempt 3 of 3" \ 60 | err_retry 3 10 1 false 61 | 62 | test_cmd "The err_retry function catches an exit 42 in [1, 2, 3, 42, 99, 100, 101]" \ 63 | 42 "exit.+42" \ 64 | err_retry 3 10 "1 2 3 42 99 100 101" exit 42 65 | 66 | test_cmd "The err_retry function retries 2 time for exit 42 in [1, 2, 3, 99, 100, 101]" \ 67 | 42 "exit.+42" \ 68 | err_retry 2 10 "1 2 3 99 100 101" exit 42 69 | 70 | test_cmd "The err_retry function retries 1 time for false, non-zero exit" \ 71 | 1 "Attempt 2 of 2" \ 72 | err_retry 2 10 "" false 73 | 74 | # script is set +e 75 | exit_with_status 76 | -------------------------------------------------------------------------------- /common/test/testlib.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Library of functions and values used by other unit-testing scripts. 4 | # Not intended for direct execution. 5 | 6 | # Set non-zero to enable 7 | TEST_DEBUG=${TEST_DEBUG:-0} 8 | 9 | # Test subject filename and directory name are derived from test-script filename 10 | SUBJ_FILENAME=$(basename $0) 11 | if [[ "$SUBJ_FILENAME" =~ "testlib-" ]]; then 12 | SUBJ_FILENAME="${SUBJ_FILENAME#testlib-}" 13 | TEST_DIR="${TEST_DIR:-$(dirname $0)/../lib}" 14 | elif [[ "$SUBJ_FILENAME" =~ "testbin-" ]]; then 15 | SUBJ_FILENAME="${SUBJ_FILENAME#testbin-}" 16 | TEST_DIR="${TEST_DIR:-$(dirname $0)/../bin}" 17 | else 18 | echo "Unable to handle script filename/prefix '$SUBJ_FILENAME'" 19 | exit 9 20 | fi 21 | 22 | # Always run all tests, and keep track of failures. 23 | FAILURE_COUNT=0 24 | 25 | # Duplicated from common/lib/utils.sh to not create any circular dependencies 26 | copy_function() { 27 | local src="$1" 28 | local dst="$2" 29 | test -n "$(declare -f "$1")" || return 30 | eval "${_/$1/$2}" 31 | } 32 | 33 | rename_function() { 34 | local from="$1" 35 | local to="$2" 36 | copy_function "$@" || return 37 | unset -f "$1" 38 | } 39 | 40 | # Assume test script is set +e and this will be the last call 41 | exit_with_status() { 42 | if ((FAILURE_COUNT)); then 43 | echo "Total Failures: $FAILURE_COUNT" 44 | else 45 | echo "All tests passed" 46 | fi 47 | set -e # Force exit with exit code 48 | test "$FAILURE_COUNT" -eq 0 49 | } 50 | 51 | # Used internally by test_cmd to assist debugging and output file cleanup 52 | _test_report() { 53 | local msg="$1" 54 | local inc_fail="$2" 55 | local outf="$3" 56 | 57 | if ((inc_fail)); then 58 | let 'FAILURE_COUNT++' 59 | echo -n "fail - " 60 | else 61 | echo -n "pass - " 62 | fi 63 | 64 | echo -n "$msg" 65 | 66 | if [[ -r "$outf" ]]; then 67 | # Ignore output when successful 68 | if ((inc_fail)) || ((TEST_DEBUG)); then 69 | echo " (output follows)" 70 | cat "$outf" 71 | fi 72 | rm -f "$outf" "$outf.oneline" 73 | fi 74 | echo -e '\n' # Makes output easier to read 75 | } 76 | 77 | # Execute a test command or shell function, capture it's output and verify expectations. 78 | # usage: test_cmd [args...] 79 | # Notes: Expected exit code is not checked if blank. Expected output will be verified blank 80 | # if regex is empty. Otherwise, regex checks whitespace-squashed output. 81 | test_cmd() { 82 | echo "Testing: ${1:-WARNING: No Test description given}" 83 | local e_exit="$2" 84 | local e_out_re="$3" 85 | shift 3 86 | 87 | if ((TEST_DEBUG)); then 88 | echo "# $@" > /dev/stderr 89 | fi 90 | 91 | # Using grep vs file safer than shell builtin test 92 | local a_out_f=$(mktemp -p '' "tmp_${FUNCNAME[0]}_XXXXXXXX") 93 | local a_exit=0 94 | 95 | # Use a sub-shell to capture possible function exit call and all output 96 | set -o pipefail 97 | ( set -e; "$@" 0<&- |& tee "$a_out_f" | tr -s '[:space:]' ' ' &> "${a_out_f}.oneline") 98 | a_exit="$?" 99 | if ((TEST_DEBUG)); then 100 | echo "Command/Function call exited with code: $a_exit" 101 | fi 102 | 103 | if [[ -n "$e_exit" ]] && [[ $e_exit -ne $a_exit ]]; then 104 | _test_report "Expected exit-code $e_exit but received $a_exit while executing $1" "1" "$a_out_f" 105 | elif [[ -z "$e_out_re" ]] && [[ -n "$(<$a_out_f)" ]]; then 106 | _test_report "Expecting no output from $*" "1" "$a_out_f" 107 | elif [[ -n "$e_out_re" ]]; then 108 | if ((TEST_DEBUG)); then 109 | echo "Received $(wc -l $a_out_f | awk '{print $1}') output lines of $(wc -c $a_out_f | awk '{print $1}') bytes total" 110 | fi 111 | if grep -Eq "$e_out_re" "${a_out_f}.oneline"; then 112 | _test_report "Command $1 exited as expected with expected output" "0" "$a_out_f" 113 | else 114 | _test_report "Expecting regex '$e_out_re' match to (whitespace-squashed) output" "1" "$a_out_f" 115 | fi 116 | else # Pass 117 | _test_report "Command $1 exited as expected ($a_exit)" "0" "$a_out_f" 118 | fi 119 | } 120 | -------------------------------------------------------------------------------- /default.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>containers/automation//renovate/defaults.json5" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /github/.install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Installs common Github Action utilities system-wide. NOT intended to be used directly 4 | # by humans, should only be used indirectly by running 5 | # ../bin/install_automation.sh github 6 | 7 | set -eo pipefail 8 | 9 | source "$AUTOMATION_LIB_PATH/anchors.sh" 10 | source "$AUTOMATION_LIB_PATH/console_output.sh" 11 | 12 | INSTALL_PREFIX=$(realpath $AUTOMATION_LIB_PATH/..) 13 | # Assume the directory this script is in, represents what is being installed 14 | INSTALL_NAME=$(basename $(dirname ${BASH_SOURCE[0]})) 15 | AUTOMATION_VERSION=$(automation_version) 16 | [[ -n "$AUTOMATION_VERSION" ]] || \ 17 | die "Could not determine version of common automation libs, was 'install_automation.sh' successful?" 18 | 19 | echo "Installing $INSTALL_NAME version $(automation_version) into $INSTALL_PREFIX" 20 | 21 | unset INST_PERM_ARG 22 | if [[ $UID -eq 0 ]]; then 23 | INST_PERM_ARG="-o root -g root" 24 | fi 25 | 26 | cd $(dirname $(realpath "${BASH_SOURCE[0]}")) 27 | install -v $INST_PERM_ARG -D -t "$INSTALL_PREFIX/lib" ./lib/* 28 | 29 | # Needed for installer testing 30 | cat <>"./environment" 31 | # Added on $(date --iso-8601=minutes) by 'github' subcomponent installer 32 | export GITHUB_ACTION_LIB=$INSTALL_PREFIX/lib/github.sh 33 | EOF 34 | echo "Successfully installed $INSTALL_NAME" 35 | -------------------------------------------------------------------------------- /github/README.md: -------------------------------------------------------------------------------- 1 | ## Common Github Action scripts/libraries 2 | 3 | This subdirectory contains scripts, libraries, and tests for common 4 | Github Action operations. They depend heavily on the `common` 5 | subdirectory in the repository root. 6 | -------------------------------------------------------------------------------- /github/lib/github.sh: -------------------------------------------------------------------------------- 1 | 2 | # This file is intended for sourcing by the cirrus-ci_retrospective workflow 3 | # It should not be used under any other context. 4 | 5 | source $(dirname ${BASH_SOURCE[0]})/github_common.sh || exit 1 6 | 7 | # Cirrus-CI Build status codes that represent completion 8 | COMPLETE_STATUS_RE='FAILED|COMPLETED|ABORTED|ERRORED' 9 | 10 | # Shell variables used throughout this workflow 11 | prn= 12 | tid= 13 | sha= 14 | tst= 15 | was_pr='false' 16 | do_intg='false' 17 | 18 | dbg_ccir() { 19 | dbg "Shell variables set:" 20 | dbg "Cirrus-CI ran on pr: $was_pr" 21 | dbg "Monitor PR Number: ${prn}" 22 | dbg "Monitor SHA: ${sha}" 23 | dbg "Action Task ID was: ${tid}" 24 | dbg "Action Task Status: ${tst}" 25 | dbg "Do integration testing: ${do_intg}" 26 | } 27 | 28 | # usage: load_ccir 29 | load_ccir() { 30 | local dirpath="$1" 31 | local ccirjson="$1/cirrus-ci_retrospective.json" 32 | 33 | [[ -d "$dirpath" ]] || \ 34 | die "Expecting a directory path '$dirpath'" 35 | [[ -r "$ccirjson" ]] || \ 36 | die "Can't read file '$ccirjson'" 37 | 38 | [[ -n "$MONITOR_TASK" ]] || \ 39 | die "Expecting \$MONITOR_TASK to be non-empty" 40 | [[ -n "$ACTION_TASK" ]] || \ 41 | die "Expecting \$MONITOR_TASK to be non-empty" 42 | 43 | dbg "--Loading Cirrus-CI monitoring task $MONITOR_TASK--" 44 | dbg "$(jq --indent 4 '.[] | select(.name == "'${MONITOR_TASK}'")' $ccirjson)" 45 | bst=$(jq --raw-output '.[] | select(.name == "'${MONITOR_TASK}'") | .build.status' "$ccirjson") 46 | prn=$(jq --raw-output '.[] | select(.name == "'${MONITOR_TASK}'") | .build.pullRequest' "$ccirjson") 47 | sha=$(jq --raw-output '.[] | select(.name == "'${MONITOR_TASK}'") | .build.changeIdInRepo' "$ccirjson") 48 | 49 | dbg "--Loadinng Cirrus-CI action task $ACTION_TASK--" 50 | dbg "$(jq --indent 4 '.[] | select(.name == "'${ACTION_TASK}'")' $ccirjson)" 51 | tid=$(jq --raw-output '.[] | select(.name == "'${ACTION_TASK}'") | .id' "$ccirjson") 52 | tst=$(jq --raw-output '.[] | select(.name == "'${ACTION_TASK}'") | .status' "$ccirjson") 53 | 54 | for var in bst prn sha; do 55 | [[ -n "${!var}" ]] || \ 56 | die "Expecting \$$var to be non-empty after loading $ccirjson" 42 57 | done 58 | 59 | was_pr='false' 60 | do_intg='false' 61 | if [[ -n "$prn" ]] && [[ "$prn" != "null" ]] && [[ $prn -gt 0 ]]; then 62 | dbg "Detected pull request $prn" 63 | was_pr='true' 64 | # Don't race vs another cirrus-ci build triggered _after_ GH action workflow started 65 | # since both may share the same check_suite. e.g. task re-run or manual-trigger 66 | if echo "$bst" | grep -E -q "$COMPLETE_STATUS_RE"; then 67 | if [[ -n "$tst" ]] && [[ "$tst" == "PAUSED" ]]; then 68 | dbg "Detected action status $tst" 69 | do_intg='true' 70 | fi 71 | else 72 | warn "Unexpected build status '$bst', was a task re-run or manually triggered?" 73 | fi 74 | fi 75 | dbg_ccir 76 | } 77 | 78 | set_ccir() { 79 | for varname in prn tid sha tst was_pr do_intg; do 80 | set_out_var $varname "${!varname}" 81 | done 82 | } 83 | -------------------------------------------------------------------------------- /github/lib/github_common.sh: -------------------------------------------------------------------------------- 1 | 2 | # This file is intended for sourcing by github action workflows 3 | # It should not be used under any other context. 4 | 5 | # Important paths defined here 6 | AUTOMATION_LIB_PATH="${AUTOMATION_LIB_PATH:-$(realpath $(dirname ${BASH_SOURCE[0]})/../../common/lib)}" 7 | 8 | source $AUTOMATION_LIB_PATH/common_lib.sh || exit 1 9 | 10 | # Wrap the die() function to add github-action sugar that identifies file 11 | # & line number within the UI, before exiting non-zero. 12 | rename_function die _die 13 | die() { 14 | # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-error-message 15 | local ERROR_MSG_PREFIX 16 | ERROR_MSG_PREFIX="::error file=${BASH_SOURCE[1]},line=${BASH_LINENO[0]}::" 17 | _die "$@" 18 | } 19 | 20 | # Wrap the warn() function to add github-action sugar that identifies file 21 | # & line number within the UI. 22 | rename_function warn _warn 23 | warn() { 24 | local WARNING_MSG_PREFIX 25 | # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-warning-message 26 | WARNING_MSG_PREFIX="::warning file=${BASH_SOURCE[1]},line=${BASH_LINENO[0]}::" 27 | _warn "$@" 28 | } 29 | 30 | # Idomatic debug messages in github-actions are worse than useless. They do 31 | # not embed file/line information. They are completely hidden unless 32 | # the $ACTIONS_STEP_DEBUG step or job variable is set 'true'. If setting 33 | # this variable as a secret, can have unintended conseuqences: 34 | # https://docs.github.com/en/actions/monitoring-and-troubleshooting-workflows/using-workflow-run-logs#viewing-logs-to-diagnose-failures 35 | # Wrap the dbg() function to add github-action sugar at the "notice" level 36 | # so that it may be observed in output by regular users without danger. 37 | rename_function dbg _dbg 38 | dbg() { 39 | # When set true, simply enable automation library debugging. 40 | if [[ "${ACTIONS_STEP_DEBUG:-false}" == 'true' ]]; then export A_DEBUG=1; fi 41 | 42 | # notice-level messages actually show up in the UI use them for debugging 43 | # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-notice-message 44 | local DEBUG_MSG_PREFIX 45 | DEBUG_MSG_PREFIX="::notice file=${BASH_SOURCE[1]},line=${BASH_LINENO[0]}::" 46 | _dbg "$@" 47 | } 48 | 49 | # usage: set_out_var [value...] 50 | set_out_var() { 51 | A_DEBUG=0 req_env_vars GITHUB_OUTPUT 52 | name=$1 53 | shift 54 | value="$@" 55 | [[ -n $name ]] || \ 56 | die "Expecting first parameter to be non-empty value for the output variable name" 57 | dbg "Setting Github Action step output variable '$name' to '$value'" 58 | # Special string recognized by Github Actions 59 | # Ref: https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter 60 | echo "$name=$value" >> $GITHUB_OUTPUT 61 | } 62 | -------------------------------------------------------------------------------- /github/test/README.md: -------------------------------------------------------------------------------- 1 | # WARNING 2 | 3 | These tests absolutely must be run by github actions. They will 4 | not function outside of that specific environment. 5 | -------------------------------------------------------------------------------- /github/test/run_action_tests.sh: -------------------------------------------------------------------------------- 1 | ../../common/test/run_all_tests.sh -------------------------------------------------------------------------------- /github/test/testlib-github.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source $(dirname $BASH_SOURCE[0])/testlib.sh 4 | 5 | # This is necessary when executing from a Github Action workflow so it ignores 6 | # all magic output tokens 7 | echo "::stop-commands::TESTING" 8 | trap "echo '::TESTING::'" EXIT 9 | 10 | test_cmd "The library $TEST_DIR/$SUBJ_FILENAME loads" \ 11 | 0 '' \ 12 | source $TEST_DIR/$SUBJ_FILENAME 13 | 14 | source $TEST_DIR/$SUBJ_FILENAME || exit 1 # can't continue w/o loaded library 15 | 16 | test_cmd 'These tests are running in a github actions workflow environment' \ 17 | 0 '' \ 18 | test "$GITHUB_ACTIONS" == "true" 19 | 20 | test_cmd 'Default shell variables are initialized empty/false' \ 21 | 0 '^falsefalse$' \ 22 | echo -n "${prn}${tid}${sha}${tst}${was_pr}${do_intg}" 23 | 24 | # Remaining tests all require debugging output to be enabled 25 | A_DEBUG=1 26 | 27 | test_cmd 'The debugging function does not throw any errors and redirects to notice-level output' \ 28 | 0 '::notice' \ 29 | dbg_ccir 30 | 31 | test_cmd "The \$MONITOR_TASK variable is defined an non-empty" \ 32 | 0 '^.+' \ 33 | echo -n "$MONITOR_TASK" 34 | 35 | test_cmd "The \$ACTION_TASK variable is defined an non-empty" \ 36 | 0 '^.+' \ 37 | echo -n "$ACTION_TASK" 38 | 39 | MONITOR_TASK=TEST_MONITOR_TASK_NAME 40 | ACTION_TASK=TEST_ACTION_TASK_NAME 41 | TESTTEMPDIR=$(mktemp -p '' -d "tmp_${SUBJ_FILENAME}_XXXXXXXX") 42 | trap "rm -rf $TESTTEMPDIR" EXIT 43 | 44 | # usage: write_ccir 45 | write_ccir() { 46 | local id=$1 47 | local pullRequest=$2 48 | local changeIdInRepo=$3 49 | local action_status=$4 50 | local monitor_status=$5 51 | 52 | build_section="\"build\": { 53 | \"id\": \"1234567890\", 54 | \"changeIdInRepo\": \"$changeIdInRepo\", 55 | \"branch\": \"pull/$pullRequest\", 56 | \"pullRequest\": $pullRequest, 57 | \"status\": \"COMPLETED\" 58 | }" 59 | 60 | cat << EOF > $TESTTEMPDIR/cirrus-ci_retrospective.json 61 | [ 62 | { 63 | "id": "$id", 64 | "name": "$MONITOR_TASK", 65 | "status": "$monitor_status", 66 | "automaticReRun": false, 67 | $build_section 68 | }, 69 | { 70 | "id": "$id", 71 | "name": "$ACTION_TASK", 72 | "status": "$action_status", 73 | "automaticReRun": false, 74 | $build_section 75 | } 76 | ] 77 | EOF 78 | if ((TEST_DEBUG)); then 79 | echo "Wrote JSON:" 80 | cat $TESTTEMPDIR/cirrus-ci_retrospective.json 81 | fi 82 | } 83 | 84 | write_ccir 10 12 13 14 15 85 | # usage: write_ccir 86 | for regex in '"id": "10"' $MONITOR_TASK $ACTION_TASK '"branch": "pull/12"' \ 87 | '"changeIdInRepo": "13"' '"pullRequest": 12' '"status": "14"' \ 88 | '"status": "15"'; do 89 | test_cmd "Verify test JSON can load with test values from $TESTTEMPDIR, and match '$regex'" \ 90 | 0 "$regex" \ 91 | load_ccir "$TESTTEMPDIR" 92 | done 93 | 94 | # Remaining tests all require debugging output disabled 95 | A_DEBUG=0 96 | 97 | write_ccir 1 2 3 PAUSED COMPLETED 98 | load_ccir "$TESTTEMPDIR" 99 | for var in was_pr do_intg; do 100 | test_cmd "Verify JSON for a pull request sets \$$var=true" \ 101 | 0 '^true' \ 102 | echo ${!var} 103 | done 104 | 105 | for stat in COMPLETED ABORTED FAILED YOMAMA SUCCESS SUCCESSFUL FAILURE; do 106 | write_ccir 1 2 3 $stat COMPLETED 107 | load_ccir "$TESTTEMPDIR" 108 | test_cmd "Verify JSON for a pull request sets \$do_intg=false when action status is $stat" \ 109 | 0 '^false' \ 110 | echo $do_intg 111 | 112 | write_ccir 1 2 3 PAUSED $stat 113 | load_ccir "$TESTTEMPDIR" 114 | test_cmd "Verify JSON for a pull request sets \$do_intg=true when monitor status is $stat" \ 115 | 0 '^true' \ 116 | echo $do_intg 117 | done 118 | 119 | for pr in "true" "false" "null" "0"; do 120 | write_ccir 1 "$pr" 3 PAUSED COMPLETED 121 | load_ccir "$TESTTEMPDIR" 122 | test_cmd "Verify \$do_intg=false and \$was_pr=false when JSON sets pullRequest=$pr" \ 123 | 0 '^falsefalse' \ 124 | echo ${do_intg}${was_pr} 125 | done 126 | 127 | # Must be the last command in this file 128 | exit_with_status 129 | -------------------------------------------------------------------------------- /github/test/testlib-github_common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source $(dirname $BASH_SOURCE[0])/testlib.sh 4 | 5 | # This is necessary when executing from a Github Action workflow so it ignores 6 | # all magic output sugar. 7 | _MAGICTOKEN="TEST${RANDOM}TEST" # must be randomly generated / unguessable 8 | echo "::stop-commands::$_MAGICTOKEN" 9 | trap "echo '::$_MAGICTOKEN::'" EXIT 10 | 11 | unset ACTIONS_STEP_DEBUG 12 | unset A_DEBUG 13 | source $TEST_DIR/$SUBJ_FILENAME || exit 1 # can't continue w/o loaded library 14 | 15 | test_cmd "No debug message shows when A_DEBUG and ACTIONS_STEP_DEBUG are undefined" \ 16 | 0 '' \ 17 | dbg 'This debug message should not appear' 18 | 19 | export A_DEBUG=1 20 | test_cmd "A debug notice message shows when A_DEBUG is true" \ 21 | 0 '::notice file=.+,line=.+:: This is a debug message' \ 22 | dbg "This is a debug message" 23 | unset A_DEBUG 24 | 25 | export ACTIONS_STEP_DEBUG="true" 26 | test_cmd "A debug notice message shows when ACTIONS_STEP_DEBUG is true" \ 27 | 0 '::notice file=.+,line=.+:: This is also a debug message' \ 28 | dbg "This is also a debug message" 29 | unset ACTIONS_STEP_DEBUG 30 | unset A_DEBUG 31 | 32 | test_cmd "Warning messages contain github-action sugar." \ 33 | 0 '::warning file=.+,line=.+:: This is a test warning message' \ 34 | warn 'This is a test warning message' 35 | 36 | test_cmd "Error messages contain github-action sugar." \ 37 | 0 '::error file=.+,line=.+:: This is a test error message' \ 38 | die 'This is a test error message' 0 39 | 40 | unset GITHUB_OUTPUT_FUDGED 41 | if [[ -z "$GITHUB_OUTPUT" ]]; then 42 | # Not executing under github-actions 43 | GITHUB_OUTPUT=$(mktemp -p '' tmp_$(basename ${BASH_SOURCE[0]})_XXXX) 44 | GITHUB_OUTPUT_FUDGED=1 45 | fi 46 | 47 | test_cmd "The set_out_var function normally produces no output" \ 48 | 0 '' \ 49 | set_out_var TESTING_NAME TESTING VALUE 50 | 51 | export A_DEBUG=1 52 | test_cmd "The set_out_var function is debugable" \ 53 | 0 "::notice file=.+line=.+:: Setting Github.+'DEBUG_TESTING_NAME' to 'DEBUGGING TESTING VALUE'" \ 54 | set_out_var DEBUG_TESTING_NAME DEBUGGING TESTING VALUE 55 | unset A_DEBUG 56 | 57 | test_cmd "Previous set_out_var function properly sets a step-output value" \ 58 | 0 'TESTING_NAME=TESTING VALUE' \ 59 | cat $GITHUB_OUTPUT 60 | 61 | # Must be the last commands in this file 62 | if ((GITHUB_OUTPUT_FUDGED)); then rm -f "$GITHUB_OUTPUT"; fi 63 | exit_with_status 64 | -------------------------------------------------------------------------------- /github/test/testlib.sh: -------------------------------------------------------------------------------- 1 | ../../common/test/testlib.sh -------------------------------------------------------------------------------- /mac_pw_pool/.gitignore: -------------------------------------------------------------------------------- 1 | /Cron.log 2 | /utilization.csv 3 | /dh_status.txt* 4 | /pw_status.txt* 5 | /html/utilization.png* 6 | -------------------------------------------------------------------------------- /mac_pw_pool/AllocateTestDH.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended for use by humans to allocate a dedicated-host 4 | # and create an instance on it for testing purposes. When executed, 5 | # it will create a temporary clone of the repository with the necessary 6 | # modifications to manipulate the test host. It's the user's responsibility 7 | # to cleanup this directory after manually removing the instance (see below). 8 | # 9 | # **Note**: Due to Apple/Amazon restrictions on the removal of these 10 | # resources, cleanup must be done manually. You will need to shutdown and 11 | # terminate the instance, then wait 24-hours before releasing the 12 | # dedicated-host. The hosts cost money w/n an instance is running. 13 | # 14 | # The script assumes: 15 | # 16 | # * The current $USER value reflects your actual identity such that 17 | # the test instance may be labeled appropriatly for auditing. 18 | # * The `aws` CLI tool is installed on $PATH. 19 | # * Appropriate `~/.aws/credentials` credentials are setup. 20 | # * The us-east-1 region is selected in `~/.aws/config`. 21 | # * The $POOLTOKEN env. var. is set to value available from 22 | # https://cirrus-ci.com/pool/1cf8c7f7d7db0b56aecd89759721d2e710778c523a8c91c7c3aaee5b15b48d05 23 | # * The local ssh-agent is able to supply the appropriate private key (stored in BW). 24 | 25 | set -eo pipefail 26 | 27 | # shellcheck source-path=SCRIPTDIR 28 | source $(dirname ${BASH_SOURCE[0]})/pw_lib.sh 29 | 30 | # Support debugging all mac_pw_pool scripts or only this one 31 | I_DEBUG="${I_DEBUG:0}" 32 | if ((I_DEBUG)); then 33 | X_DEBUG=1 34 | warn "Debugging enabled." 35 | fi 36 | 37 | dbg "\$USER=$USER" 38 | 39 | [[ -n "$USER" ]] || \ 40 | die "The variable \$USER must not be empty" 41 | 42 | [[ -n "$POOLTOKEN" ]] || \ 43 | die "The variable \$POOLTOKEN must not be empty" 44 | 45 | INST_NAME="${USER}Testing" 46 | LIB_DIRNAME=$(realpath --relative-to=$REPO_DIRPATH $LIB_DIRPATH) 47 | # /tmp is usually a tmpfs, don't let an accidental reboot ruin 48 | # access to a test DH/instance for a developer. 49 | TMP_CLONE_DIRPATH="/var/tmp/${LIB_DIRNAME}_${INST_NAME}" 50 | 51 | dbg "\$TMP_CLONE_DIRPATH=$TMP_CLONE_DIRPATH" 52 | 53 | if [[ -d "$TMP_CLONE_DIRPATH" ]]; then 54 | die "Found existing '$TMP_CLONE_DIRPATH', assuming in-use/relevant; If not, manual cleanup is required." 55 | fi 56 | 57 | msg "Creating temporary clone dir and transfering any uncommited files." 58 | 59 | git clone --no-local --no-hardlinks --depth 1 --single-branch --no-tags --quiet "file://$REPO_DIRPATH" "$TMP_CLONE_DIRPATH" 60 | declare -a uncommited_filepaths 61 | readarray -t uncommited_filepaths <<<$( 62 | pushd "$REPO_DIRPATH" &> /dev/null 63 | # Obtaining uncommited relative staged filepaths 64 | git diff --name-only HEAD 65 | # Obtaining uncommited relative unstaged filepaths 66 | git ls-files . --exclude-standard --others 67 | popd &> /dev/null 68 | ) 69 | 70 | dbg "Copying \$uncommited_filepaths[*]=${uncommited_filepaths[*]}" 71 | 72 | for uncommited_file in "${uncommited_filepaths[@]}"; do 73 | uncommited_file_src="$REPO_DIRPATH/$uncommited_file" 74 | uncommited_file_dest="$TMP_CLONE_DIRPATH/$uncommited_file" 75 | uncommited_file_dest_parent=$(dirname "$uncommited_file_dest") 76 | #dbg "Working on uncommited file '$uncommited_file_src'" 77 | if [[ -r "$uncommited_file_src" ]]; then 78 | mkdir -p "$uncommited_file_dest_parent" 79 | #dbg "$uncommited_file_src -> $uncommited_file_dest" 80 | cp -a "$uncommited_file_src" "$uncommited_file_dest" 81 | fi 82 | done 83 | 84 | declare -a modargs 85 | # Format: 86 | modargs=( 87 | # Necessary to prevent in-production macs from trying to use testing instance 88 | "DH_REQ_VAL $INST_NAME $DH_REQ_VAL" 89 | # Necessary to make test dedicated host stand out when auditing the set in the console 90 | "DH_PFX $INST_NAME $DH_PFX" 91 | # The default launch template name includes $DH_PFX, ensure the production template name is used. 92 | # N/B: The old/unmodified pw_lib.sh is still loaded for the running script 93 | "TEMPLATE_NAME $TEMPLATE_NAME Cirrus${DH_PFX}PWinstance" 94 | # Permit developer to use instance for up to 3 days max (orphan vm cleaning process will nail it after that). 95 | "PW_MAX_HOURS 72 $PW_MAX_HOURS" 96 | # Permit developer to execute as many Cirrus-CI tasks as they want w/o automatic shutdown. 97 | "PW_MAX_TASKS 9999 $PW_MAX_TASKS" 98 | ) 99 | 100 | for modarg in "${modargs[@]}"; do 101 | set -- $modarg # Convert the "tuple" into the param args $1 $2... 102 | dbg "Modifying pw_lib.sh \$$1 definition to '$2' (was '$3')" 103 | sed -i -r -e "s/^$1=.*/$1=\"$2\"/" "$TMP_CLONE_DIRPATH/$LIB_DIRNAME/pw_lib.sh" 104 | # Ensure future script invocations use the new values 105 | unset $1 106 | done 107 | 108 | cd "$TMP_CLONE_DIRPATH/$LIB_DIRNAME" 109 | source ./pw_lib.sh 110 | 111 | # Before going any further, make sure there isn't an existing 112 | # dedicated-host named ${INST_NAME}-0. If there is, it can 113 | # be re-used instead of failing the script outright. 114 | existing_dh_json=$(mktemp -p "." dh_allocate_XXXXX.json) 115 | $AWS ec2 describe-hosts --filter "Name=tag:Name,Values=${INST_NAME}-0" --query 'Hosts[].HostId' > "$existing_dh_json" 116 | if grep -Fqx '[]' "$existing_dh_json"; then 117 | 118 | msg "Creating the dedicated host '${INST_NAME}-0'" 119 | declare dh_allocate_json 120 | dh_allocate_json=$(mktemp -p "." dh_allocate_XXXXX.json) 121 | 122 | declare -a awsargs 123 | # Word-splitting of $AWS is desireable 124 | # shellcheck disable=SC2206 125 | awsargs=( 126 | $AWS 127 | ec2 allocate-hosts 128 | --availability-zone us-east-1a 129 | --instance-type mac2.metal 130 | --auto-placement off 131 | --host-recovery off 132 | --host-maintenance off 133 | --quantity 1 134 | --tag-specifications 135 | "ResourceType=dedicated-host,Tags=[{Key=Name,Value=${INST_NAME}-0},{Key=$DH_REQ_TAG,Value=$DH_REQ_VAL},{Key=PWPoolReady,Value=true},{Key=automation,Value=false}]" 136 | ) 137 | 138 | # N/B: Apple/Amazon require min allocation time of 24hours! 139 | dbg "Executing: ${awsargs[*]}" 140 | "${awsargs[@]}" > "$dh_allocate_json" || \ 141 | die "Provisioning new dedicated host $INST_NAME failed. Manual debugging & cleanup required." 142 | 143 | dbg $(jq . "$dh_allocate_json") 144 | dhid=$(jq -r -e '.HostIds[0]' "$dh_allocate_json") 145 | [[ -n "$dhid" ]] || \ 146 | die "Obtaining DH ID of new host. Manual debugging & cleanup required." 147 | 148 | # There's a small delay between allocating the dedicated host and LaunchInstances.sh 149 | # being able to interact with it. There's no sensible way to monitor for this state :( 150 | sleep 3s 151 | else # A dedicated host already exists 152 | dhid=$(jq -r -e '.[0]' "$existing_dh_json") 153 | fi 154 | 155 | # Normally allocation is fairly instant, but not always. Confirm we're able to actually 156 | # launch a mac instance onto the dedicated host. 157 | for ((attempt=1 ; attempt < 11 ; attempt++)); do 158 | msg "Attempt #$attempt launching a new instance on dedicated host" 159 | ./LaunchInstances.sh --force 160 | if grep -E "^${INST_NAME}-0 i-" dh_status.txt; then 161 | attempt=-1 # signal success 162 | break 163 | fi 164 | sleep 1s 165 | done 166 | 167 | [[ "$attempt" -eq -1 ]] || \ 168 | die "Failed to use LaunchInstances.sh. Manual debugging & cleanup required." 169 | 170 | # At this point the script could call SetupInstances.sh in another loop 171 | # but it takes about 20-minutes to complete. Also, the developer may 172 | # not need it, they may simply want to ssh into the instance to poke 173 | # around. i.e. they don't need to run any Cirrus-CI jobs on the test 174 | # instance. 175 | warn "---" 176 | warn "NOT copying/running setup.sh to new instance (in case manual activities are desired)." 177 | warn "---" 178 | 179 | w="PLEASE REMEMBER TO terminate instance, wait two hours, then 180 | remove the dedicated-host in the web console, or run 181 | 'aws ec2 release-hosts --host-ids=$dhid'." 182 | 183 | msg "---" 184 | msg "Dropping you into a shell inside a temp. repo clone: 185 | ($TMP_CLONE_DIRPATH/$LIB_DIRNAME)" 186 | msg "---" 187 | msg "Once it finishes booting (5m), you may use './InstanceSSH.sh ${INST_NAME}-0' 188 | to access it. Otherwise to fully setup the instance for Cirrus-CI, you need 189 | to execute './SetupInstances.sh' repeatedly until the ${INST_NAME}-0 line in 190 | 'pw_status.txt' includes the text 'complete alive'. That process can take 20+ 191 | minutes. Once alive, you may then use Cirrus-CI to test against this specific 192 | instance with any 'persistent_worker' task having a label of 193 | '$DH_REQ_TAG=$DH_REQ_VAL' set." 194 | msg "---" 195 | warn "$w" 196 | 197 | export POOLTOKEN # ensure availability in sub-shell 198 | bash -l 199 | 200 | warn "$w" 201 | -------------------------------------------------------------------------------- /mac_pw_pool/Cron.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Intended to be run from $HOME/deve/automation/mac_pw_pool/ 4 | # using a crontab like: 5 | 6 | # # Every date/timestamp in PW Pool management is UTC-relative 7 | # # make cron do the same for consistency. 8 | # CRON_TZ=UTC 9 | # 10 | # PATH=/home/shared/.local/bin:/home/shared/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin 11 | # 12 | # # Keep log from filling up disk & make sure webserver is running 13 | # # (5am UTC is during CI-activity lul) 14 | # 59 4 * * * $HOME/devel/automation/mac_pw_pool/nightly_maintenance.sh &>> $CRONLOG 15 | # 16 | # # PW Pool management (usage drop-off from 03:00-15:00 UTC) 17 | # POOLTOKEN= 18 | # CRONLOG=/home/shared/devel/automation/mac_pw_pool/Cron.log 19 | # */5 * * * * /home/shared/devel/automation/mac_pw_pool/Cron.sh &>> $CRONLOG 20 | 21 | # shellcheck disable=SC2154 22 | [ "${FLOCKER}" != "$0" ] && exec env FLOCKER="$0" flock -e -w 300 "$0" "$0" "$@" || : 23 | 24 | # shellcheck source=./pw_lib.sh 25 | source $(dirname "${BASH_SOURCE[0]}")/pw_lib.sh 26 | 27 | cd $SCRIPT_DIRPATH || die "Cannot enter '$SCRIPT_DIRPATH'" 28 | 29 | # SSH agent required to provide key for accessing workers 30 | # Started with `ssh-agent -s > /run/user/$UID/ssh-agent.env` 31 | # followed by adding/unlocking the necessary keys. 32 | # shellcheck disable=SC1090 33 | source /run/user/$UID/ssh-agent.env 34 | 35 | date -u -Iminutes 36 | now_minutes=$(date -u +%M) 37 | 38 | if (($now_minutes%10==0)); then 39 | $SCRIPT_DIRPATH/LaunchInstances.sh 40 | echo "Exit: $?" 41 | fi 42 | 43 | $SCRIPT_DIRPATH/SetupInstances.sh 44 | echo "Exit: $?" 45 | 46 | [[ -r "$PWSTATE" ]] || \ 47 | die "Can't read $PWSTATE to generate utilization data." 48 | 49 | uzn_file="$SCRIPT_DIRPATH/utilization.csv" 50 | # Run input through `date` to validate values are usable timestamps 51 | timestamp=$(date -u -Iseconds -d \ 52 | $(grep -E '^# SetupInstances\.sh run ' "$PWSTATE" | \ 53 | awk '{print $4}')) 54 | pw_state=$(grep -E -v '^($|#+| +)' "$PWSTATE") 55 | n_workers=$(grep 'complete alive' <<<"$pw_state" | wc -l) 56 | n_tasks=$(awk "BEGIN{B=0} /${DH_PFX}-[0-9]+ complete alive/{B+=\$4} END{print B}" <<<"$pw_state") 57 | n_taskf=$(awk "BEGIN{E=0} /${DH_PFX}-[0-9]+ complete alive/{E+=\$5} END{print E}" <<<"$pw_state") 58 | printf "%s,%i,%i,%i\n" "$timestamp" "$n_workers" "$n_tasks" "$n_taskf" | tee -a "$uzn_file" 59 | 60 | # Prevent uncontrolled growth of utilization.csv. Assume this script 61 | # runs every $interval minutes, keep only $history_hours worth of data. 62 | interval_minutes=5 63 | history_hours=36 64 | lines_per_hour=$((60/$interval_minutes)) 65 | max_uzn_lines=$(($history_hours * $lines_per_hour)) 66 | tail -n $max_uzn_lines "$uzn_file" > "${uzn_file}.tmp" 67 | mv "${uzn_file}.tmp" "$uzn_file" 68 | 69 | # If possible, generate the webpage utilization graph 70 | gnuplot -c Utilization.gnuplot || true 71 | -------------------------------------------------------------------------------- /mac_pw_pool/InstanceSSH.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | # Helper for humans to access an existing instance. It depends on: 6 | # 7 | # * You know the instance-id or name. 8 | # * All requirements listed in the top `LaunchInstances.sh` comment. 9 | # * The local ssh-agent is able to supply the appropriate private key. 10 | 11 | # shellcheck source-path=SCRIPTDIR 12 | source $(dirname ${BASH_SOURCE[0]})/pw_lib.sh 13 | 14 | SSH="ssh $SSH_ARGS" # N/B: library default nulls stdin 15 | if nc -z localhost 5900; then 16 | # Enable access to VNC if it's running 17 | # ref: https://repost.aws/knowledge-center/ec2-mac-instance-gui-access 18 | SSH+=" -L 5900:localhost:5900" 19 | fi 20 | 21 | [[ -n "$1" ]] || \ 22 | die "Must provide EC2 instance ID as first argument" 23 | 24 | case "$1" in 25 | i-*) 26 | inst_json=$($AWS ec2 describe-instances --instance-ids "$1") ;; 27 | *) 28 | inst_json=$($AWS ec2 describe-instances --filter "Name=tag:Name,Values=$1") ;; 29 | esac 30 | 31 | shift 32 | 33 | pub_dns=$(jq -r -e '.Reservations?[0]?.Instances?[0]?.PublicDnsName?' <<<"$inst_json") 34 | if [[ -z "$pub_dns" ]] || [[ "$pub_dns" == "null" ]]; then 35 | die "Instance '$1' does not exist, or have a public DNS address allocated (yet)." 36 | fi 37 | 38 | echo "+ $SSH ec2-user@$pub_dns $*" >> /dev/stderr 39 | exec $SSH ec2-user@$pub_dns "$@" 40 | -------------------------------------------------------------------------------- /mac_pw_pool/README.md: -------------------------------------------------------------------------------- 1 | # Cirrus-CI persistent worker maintenance 2 | 3 | These scripts are intended to be used from a repository clone, 4 | by cron, on an always-on cloud machine. They make a lot of 5 | other assumptions, some of which may not be well documented. 6 | Please see the comments at the top of each scripts for more 7 | detailed/specific information. 8 | 9 | ## Prerequisites 10 | 11 | * The `aws` binary present somewhere on `$PATH`. 12 | * Standard AWS `credentials` and `config` files exist under `~/.aws` 13 | and set the region to `us-east-1`. 14 | * A copy of the ssh-key referenced by `CirrusMacM1PWinstance` launch template 15 | under "Assumptions" below. 16 | * The ssh-key has been added to a running ssh-agent. 17 | * The running ssh-agent sh-compatible env. vars. are stored in 18 | `/run/user/$UID/ssh-agent.env` 19 | * The env. var. `POOLTOKEN` is set to the Cirrus-CI persistent worker pool 20 | token value. 21 | 22 | ## Assumptions 23 | 24 | * You've read all scripts in this directory, generally follow 25 | their purpose, and meet any requirements stated within the 26 | header comment. 27 | * You've read the [private documentation](https://docs.google.com/document/d/1PX6UyqDDq8S72Ko9qe_K3zoV2XZNRQjGxPiWEkFmQQ4/edit) 28 | and understand the safety/security section. 29 | * You have permissions to access all referenced AWS resources. 30 | * There are one or more dedicated hosts allocated and have set: 31 | * A name tag like `MacM1-` (NO SPACES!) 32 | * The `mac2` instance family 33 | * The `mac2.metal` instance type 34 | * Disabled "Instance auto-placement", "Host recovery", and "Host maintenance" 35 | * Quantity: 1 36 | * Tags: `automation=false`, `purpose=prod`, and `PWPoolReady=true` 37 | * The EC2 `CirrusMacM1PWinstance` instance-template exists and sets: 38 | * Shutdown-behavior: terminate 39 | * Same "key pair" referenced under `Prerequisites` 40 | * All other required instance parameters complete 41 | * A user-data script that shuts down the instance after 2 days. 42 | 43 | ## Operation (Theory) 44 | 45 | The goal is to maintain sufficient alive/running/working instances 46 | to service most Cirrus-CI tasks pointing at the pool. This is 47 | best achieved with slower maintenance of hosts compared to setup 48 | of ready instances. This is because hosts can be inaccessible for 49 | up to 2 hours, but instances come up in ~10-20m, ready to run tasks. 50 | 51 | Either hosts and/or instances may be removed from management by 52 | setting "false" or removing their `PWPoolReady=true` tag. Otherwise, 53 | the pool should be maintained by installing the crontab lines 54 | indicated in the `Cron.sh` script. 55 | 56 | Cirrus-CI will assign tasks (specially) targeted at the pool, to an 57 | instance with a running listener (`cirrus worker run` process). If 58 | there are none, the task will queue forever (there might be a 24-hour 59 | timeout, I can't remember). From a PR perspective, there is little 60 | control over which instance you get. It could easily be one where 61 | a previous task barfed all over and rendered unusable. 62 | 63 | ## Initialization 64 | 65 | It is assumed that neither the `Cron.sh` nor any related maintenance 66 | scripts are installed (in crontab) or currently running. 67 | 68 | Once several dedicated hosts have been manually created, they 69 | should initially have no instances on them. If left alone, the 70 | maintenance scripts will eventually bring them all up, however 71 | complete creation and setup will take many hours. This may be 72 | bypassed by *manually* running `LaunchInstances.sh --force`. 73 | 74 | In order to prevent all the instances from being recycled at the same 75 | (future) time, the shutdown time installed by `SetupInstances.sh` also 76 | needs to be adjusted. The operator should first wait about 20 minutes 77 | for all new instances to fully boot. Followed by a call to 78 | `SetupInstances.sh --force`. 79 | 80 | Now the `Cron.sh` cron-job may be installed, enabled and started. 81 | 82 | ## Manual Testing 83 | 84 | Verifying changes to these scripts / cron-job must be done manually. 85 | To support this, every dedicated host and instance has a `purpose` 86 | tag, which must correspond to the value indicated in `pw_lib.sh` 87 | and in the target repo `.cirrus.yml`. To test script and/or 88 | CI changes: 89 | 90 | 1. Make sure you have locally met all requirements spelled out in the 91 | header-comment of `AllocateTestDH.sh`. 92 | 1. Execute `AllocateTestDH.sh`. It will operate out of a temporary 93 | clone of the repository to prevent pushing required test-modifications 94 | upstream. 95 | 1. Repeatedly execute `SetupInstances.sh`. It will update `pw_status.txt` 96 | with any warnings/errors. When successful, lines will include 97 | the host name, "complete", and "alive" status strings. 98 | 1. If instance debugging is needed, the `InstanceSSH.sh` script may be 99 | used. Simply pass the name of the host you want to access. Every 100 | instance should have a `setup.log` file in the `ec2-user` homedir. There 101 | should also be `/private/tmp/-worker.log` with entries from the 102 | pool listener process. 103 | 1. To test CI changes against the test instance(s), push a PR that includes 104 | `.cirrus.yml` changes to the task's `persistent_worker` dictionary's 105 | `purpose` attribute. Set the value the same as the tag in step 1. 106 | 1. When you're done with all testing, terminate the instance. Then wait 107 | a full 24-hours before "releasing" the dedicated host. Both operations 108 | can be performed using the AWS EC2 WebUI. Please remember to do the 109 | release step, as the $-clock continues to run while it's allocated. 110 | 111 | Note: Instances are set to auto-terminate on shutdown. They should 112 | self shutdown after 24-hours automatically. After termination for 113 | any cause, there's about a 2-hour waiting period before a new instance 114 | can be allocated. The `LaunchInstances.sh` script is able deal with this 115 | properly. 116 | 117 | 118 | ## Script Debugging Hints 119 | 120 | * On each MacOS instance: 121 | * The pool listener process (running as the worker user) keeps a log under `/private/tmp`. The 122 | file includes the registered name of the worker. For example, on MacM1-7 you would find `/private/tmp/MacM1-7-worker.log`. 123 | This log shows tasks taken on, completed, and any errors reported back from Cirrus-CI internals. 124 | * In the ec2-user's home directory is a `setup.log` file. This stores the output from executing 125 | `setup.sh`. It also contains any warnings/errors from the (very important) `service_pool.sh` script - which should 126 | _always_ be running in the background. 127 | * There are several drop-files in the `ec2-user` home directory which are checked by `SetupInstances.sh` 128 | to record state. If removed, along with `setup.log`, the script will re-execute (a possibly newer version of) `setup.sh`. 129 | * On the management host: 130 | * Automated operations are setup and run by `Cron.sh`, and logged to `Cron.log`. When running scripts manually, `Cron.sh` 131 | can serve as a template for the intended order of operations. 132 | * Critical operations are protected by a mandatory, exclusive file lock on `mac_pw_pool/Cron.sh`. Should 133 | there be a deadlock, management of the pool (by `Cron.sh`) will stop. However the effects of this will not be observed 134 | until workers begin hitting their lifetime and/or task limits. 135 | * Without intervention, the `nightly_maintenance.sh` script will update the containers/automation repo clone on the 136 | management VM. This happens if the repo becomes out of sync by more than 7 days (or as defined in the script). 137 | When the repo is updated, the `pw_pool_web` container will be restarted. The container will also be restarted if its 138 | found to not be running. 139 | -------------------------------------------------------------------------------- /mac_pw_pool/Utilization.gnuplot: -------------------------------------------------------------------------------- 1 | 2 | # Intended to be run like: `gnuplot -p -c Utilization.gnuplot` 3 | # Requires a file named `utilization.csv` produced by commands 4 | # in `Cron.sh`. 5 | # 6 | # Format Ref: http://gnuplot.info/docs_5.5/Overview.html 7 | 8 | set terminal png enhanced rounded size 1400,800 nocrop 9 | set output 'html/utilization.png' 10 | 11 | set title "Persistent Workers & Utilization" 12 | 13 | set xdata time 14 | set timefmt "%Y-%m-%dT%H:%M:%S+00:00" 15 | set xtics nomirror rotate timedate 16 | set xlabel "time/date" 17 | set xrange [(system("date -u -Iseconds -d '26 hours ago'")):(system("date -u -Iseconds"))] 18 | 19 | set ylabel "Workers Online" 20 | set ytics border nomirror numeric 21 | # Not practical to lookup $DH_PFX from pw_lib.sh 22 | set yrange [0:(system("grep -E '^[a-zA-Z0-9]+-[0-9]' dh_status.txt | wc -l") * 1.5)] 23 | 24 | set y2label "Worker Utilization" 25 | set y2tics border nomirror numeric 26 | set y2range [0:100] 27 | 28 | set datafile separator comma 29 | set grid 30 | 31 | plot 'utilization.csv' using 1:2 axis x1y1 title "Workers" pt 7 ps 2, \ 32 | '' using 1:((($3-$4)/$2)*100) axis x1y2 title "Utilization" with lines lw 2 33 | -------------------------------------------------------------------------------- /mac_pw_pool/ci_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script drops the caller into a bash shell inside an environment 4 | # substantially similar to a Cirrus-CI task running on this host. 5 | # The envars below may require adjustment to better fit them to 6 | # current/ongoing development in podman's .cirrus.yml 7 | 8 | set -eo pipefail 9 | 10 | # Not running as the pool worker user 11 | if [[ "$USER" == "ec2-user" ]]; then 12 | PWINST=$(curl -sSLf http://instance-data/latest/meta-data/tags/instance/Name) 13 | PWUSER=$PWINST-worker 14 | 15 | if [[ ! -d "/Users/$PWUSER" ]]; then 16 | echo "Warnin: Instance hasn't been setup. Assuming caller will tend to this." 17 | sudo sysadminctl -addUser $PWUSER 18 | fi 19 | 20 | sudo install -o $PWUSER "${BASH_SOURCE[0]}" "/Users/$PWUSER/" 21 | exec sudo su -c "/Users/$PWUSER/$(basename ${BASH_SOURCE[0]})" - $PWUSER 22 | fi 23 | 24 | # Export all CI-critical envars defined below 25 | set -a 26 | 27 | CIRRUS_SHELL="/bin/bash" 28 | CIRRUS_TASK_ID="0123456789" 29 | CIRRUS_WORKING_DIR="$HOME/ci/task-${CIRRUS_TASK_ID}" 30 | 31 | GOPATH="$CIRRUS_WORKING_DIR/.go" 32 | GOCACHE="$CIRRUS_WORKING_DIR/.go/cache" 33 | GOENV="$CIRRUS_WORKING_DIR/.go/support" 34 | 35 | CONTAINERS_MACHINE_PROVIDER="applehv" 36 | 37 | MACHINE_IMAGE="https://fedorapeople.org/groups/podman/testing/applehv/arm64/fedora-coreos-38.20230925.dev.0-applehv.aarch64.raw.gz" 38 | 39 | GINKGO_TAGS="remote exclude_graphdriver_btrfs btrfs_noversion exclude_graphdriver_devicemapper containers_image_openpgp remote" 40 | 41 | DEBUG_MACHINE="1" 42 | 43 | ORIGINAL_HOME="$HOME" 44 | HOME="$HOME/ci" 45 | TMPDIR="/private/tmp/ci" 46 | mkdir -p "$TMPDIR" "$CIRRUS_WORKING_DIR" 47 | 48 | # Drop caller into the CI-like environment 49 | cd "$CIRRUS_WORKING_DIR" 50 | bash -il 51 | -------------------------------------------------------------------------------- /mac_pw_pool/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Cirrus-CI Persistent Workers 6 | 7 | 8 |
9 | 10 | 11 | 12 |

13 |

14 | 15 | Documentation 16 | 17 |

18 |
19 | 20 | 21 | -------------------------------------------------------------------------------- /mac_pw_pool/nightly_maintenance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | cd $(dirname "${BASH_SOURCE[0]}") 6 | 7 | SCRIPTNAME="$(basename ${BASH_SOURCE[0]})" 8 | WEB_IMG="docker.io/library/nginx:latest" 9 | CRONLOG="Cron.log" 10 | CRONSCRIPT="Cron.sh" 11 | KEEP_LINES=10000 12 | REFRESH_REPO_EVERY=7 # days 13 | 14 | # Do not use, these are needed to control script execution. 15 | _CNTNAME=pw_pool_web 16 | _FLOCKER="${_FLOCKER:-notlocked}" 17 | _RESTARTED_SCRIPT="${_RESTARTED_SCRIPT:-0}" 18 | 19 | if [[ ! -r "$CRONLOG" ]] || [[ ! -r "$CRONSCRIPT" ]] || [[ ! -d "../.git" ]]; then 20 | echo "ERROR: $SCRIPTNAME not executing from correct directory" >> /dev/stderr 21 | exit 1 22 | fi 23 | 24 | relaunch_web_container() { 25 | # Assume code change or image update, restart container. 26 | ( 27 | # Prevent podman and/or sub-processes from inheriting the lock FD. 28 | # This would deadlock all future runs of this script or Cron.sh 29 | # Can't use `flock --close ...` here because it "hangs" in this context. 30 | for fd_nr in $(/bin/ls /proc/self/fd/); do 31 | [[ $fd_nr -ge 3 ]] || \ 32 | continue 33 | # Bash doesn't allow direct substitution of the FD number 34 | eval "exec $fd_nr>&-" 35 | done 36 | 37 | set -x 38 | 39 | podman run --replace --name "$_CNTNAME" -d --rm --pull=newer -p 8080:80 \ 40 | -v $HOME/devel/automation/mac_pw_pool/html:/usr/share/nginx/html:ro,Z \ 41 | $WEB_IMG 42 | ) 43 | echo "$SCRIPTNAME restarted pw_poolweb container" 44 | } 45 | 46 | # Don't perform maintenance while $CRONSCRIPT is running 47 | [[ "${_FLOCKER}" != "$CRONSCRIPT" ]] && exec env _FLOCKER="$CRONSCRIPT" flock -e -w 300 "$CRONSCRIPT" "$0" "$@" || : 48 | echo "$SCRIPTNAME running at $(date -u -Iseconds)" 49 | 50 | if ! ((_RESTARTED_SCRIPT)); then 51 | today=$(date -u +%d) 52 | if ((today%REFRESH_REPO_EVERY)); then 53 | git remote update && git reset --hard origin/main 54 | # maintain the same flock 55 | echo "$SCRIPTNAME updatedd code after $REFRESH_REPO_EVERY days, restarting script..." 56 | env _RESTARTED_SCRIPT=1 _FLOCKER=$_FLOCKER "$0" "$@" 57 | exit $? # all done 58 | fi 59 | fi 60 | 61 | tail -n $KEEP_LINES $CRONLOG > ${CRONLOG}.tmp && mv ${CRONLOG}.tmp $CRONLOG 62 | echo "$SCRIPTNAME rotated log" 63 | 64 | # Always restart web-container when code changes, otherwise only if required 65 | if ((_RESTARTED_SCRIPT)); then 66 | relaunch_web_container 67 | else 68 | podman container exists "$_CNTNAME" || relaunch_web_container 69 | fi 70 | -------------------------------------------------------------------------------- /mac_pw_pool/pw_lib.sh: -------------------------------------------------------------------------------- 1 | 2 | # This library is intended to be sourced by other scripts inside this 3 | # directory. All other usage contexts may lead to unintended outcomes. 4 | # only the IDs differ. Assumes the sourcing script defines a `dbg()` 5 | # function. 6 | 7 | SCRIPT_FILENAME=$(basename "$0") # N/B: Caller's arg0, not this library file path. 8 | SCRIPT_DIRPATH=$(dirname "$0") 9 | LIB_DIRPATH=$(dirname "${BASH_SOURCE[0]}") 10 | REPO_DIRPATH=$(realpath "$LIB_DIRPATH/../") 11 | TEMPDIR=$(mktemp -d -p '' "${SCRIPT_FILENAME}_XXXXX.tmp") 12 | trap "rm -rf '$TEMPDIR'" EXIT 13 | 14 | # Dedicated host name prefix; Actual name will have a "-" (number) appended. 15 | # N/B: ${DH_PFX}- _MUST_ match dedicated host names as listed in dh_status.txt 16 | # using the regex ^[a-zA-Z0-9]+-[0-9] (see Utilization.gnuplot) 17 | DH_PFX="MacM1" 18 | 19 | # Only manage dedicated hosts with the following tag & value 20 | DH_REQ_TAG="purpose" 21 | DH_REQ_VAL="prod" 22 | 23 | # Path to file recording the most recent state of each dedicated host. 24 | # Format is simply one line per dedicated host, with it's name, instance id, start 25 | # date/time separated by a space. Exceptional conditions are recorded as comments 26 | # with the name and details. File is refreshed/overwritten each time script runs 27 | # without any fatal/uncaught command-errors. Intended for reference by humans 28 | # and/or other tooling. 29 | DHSTATE="${PWSTATE:-$LIB_DIRPATH/dh_status.txt}" 30 | 31 | # Similar to $DHSTATE but records the status of each instance. Format is 32 | # instance name, setup status, listener status, # started tasks, # finished tasks, 33 | # or the word 'error' indicating a fault accessing the remote worker logfile. 34 | # Optionally, there may be a final comment field, beginning with a # and text 35 | # suggesting where there may be a fault. 36 | # Possible status field values are as follows: 37 | # setup - started, complete, disabled, error 38 | # listener - alive, dead, disabled, error 39 | PWSTATE="${PWSTATE:-$LIB_DIRPATH/pw_status.txt}" 40 | 41 | # At maximum possible creation-speed, there's aprox. 2-hours of time between 42 | # an instance going down, until another can be up and running again. Since 43 | # instances are all on shutdown/terminated on pre-set timers, it would hurt 44 | # pool availability if multiple instances all went down at the same time. 45 | # Therefore, host and instance creations will be staggered by according 46 | # to this interval. 47 | CREATE_STAGGER_HOURS=2 48 | 49 | # Instance shutdown controls (assumes terminate-on-shutdown behavior) 50 | PW_MAX_HOURS=24 # Since successful configuration 51 | PW_MAX_TASKS=24 # Logged by listener (N/B: Log can be manipulated by tasks!) 52 | PW_MIN_ALIVE=3 # Bypass enforcement of $PW_MAX_TASKS if <= alive/operating workers 53 | 54 | # How long to wait for setup.sh to finish running (drop a .setup.done file) 55 | # before forcibly terminating. 56 | SETUP_MAX_SECONDS=2400 # Typical time ~10 minutes, use 2x safety-factor. 57 | 58 | # Name of launch template. Current/default version will be used. 59 | # https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#LaunchTemplates: 60 | TEMPLATE_NAME="${TEMPLATE_NAME:-Cirrus${DH_PFX}PWinstance}" 61 | 62 | # Path to scripts to copy/execute on Darwin instances 63 | SETUP_SCRIPT="$LIB_DIRPATH/setup.sh" 64 | SPOOL_SCRIPT="$LIB_DIRPATH/service_pool.sh" 65 | SHDWN_SCRIPT="$LIB_DIRPATH/shutdown.sh" 66 | CIENV_SCRIPT="$LIB_DIRPATH/ci_env.sh" 67 | 68 | # Set to 1 to enable debugging 69 | X_DEBUG="${X_DEBUG:-0}" 70 | 71 | # AWS CLI command and general args 72 | AWS="aws --no-paginate --output=json --color=off --no-cli-pager --no-cli-auto-prompt" 73 | 74 | # Common ssh/scp arguments 75 | SSH_ARGS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no -F /dev/null -o LogLevel=ERROR -o ConnectTimeout=13" 76 | # ssh/scp commands to run w/ arguments 77 | SSH="${SSH:-ssh -n $SSH_ARGS}" # N/B: default nulls stdin 78 | SCP="${SCP:-scp -q $SSH_ARGS}" 79 | 80 | # Indentation to prefix msg/warn/die messages with to assist humans understanding context. 81 | _I="${_I:-}" 82 | 83 | # Print details $1 (defaults to 1) calls above the caller in the stack. 84 | # usage e.x. $(ctx 0) - print details about current function 85 | # $(ctx) - print details about current function's caller 86 | # $(ctx 2) - print details about current functions's caller's caller. 87 | ctx() { 88 | local above level 89 | above=${1:-1} 90 | level=$((1+$above)) 91 | script=$(basename ${BASH_SOURCE[$level]}) 92 | echo "($script:${FUNCNAME[$level]}():${BASH_LINENO[$above]})" 93 | } 94 | 95 | msg() { echo "${_I}${1:-No text message provided}"; } 96 | warn() { echo "${1:-No warning message provided}" | awk -e '{print "'"${_I}"'WARNING: "$0}' >> /dev/stderr; } 97 | die() { echo "${1:-No error message provided}" | awk -e '{print "'"${_I}"'ERROR: "$0}' >> /dev/stderr; exit 1; } 98 | dbg() { 99 | if ((X_DEBUG)); then 100 | msg "${1:-No debug message provided} $(ctx 1)" | awk -e '{print "'"${_I}"'DEBUG: "$0}' >> /dev/stderr 101 | fi 102 | } 103 | 104 | # Obtain a JSON string value by running the provided query filter (arg 1) on 105 | # JSON file (arg 2). Return non-zero on jq error (1), or if value is empty 106 | # or null (2). Otherwise print value and return 0. 107 | jq_errf="$TEMPDIR/jq_error.output" 108 | json_query() { 109 | local value 110 | local indent=" " 111 | dbg "jq filter $1 112 | $indent on $(basename $2) $(ctx)" 113 | if ! value=$(jq -r "$1" "$2" 2>"$jq_errf"); then 114 | dbg "$indent error: $(<$jq_errf)" 115 | return 1 116 | fi 117 | 118 | if [[ -z "$value" ]] || [[ "$value" == "null" ]]; then 119 | dbg "$indent result: Empty or null" 120 | return 2 121 | fi 122 | 123 | dbg "$indent result: '$value'" 124 | echo "$value" 125 | return 0 126 | } 127 | -------------------------------------------------------------------------------- /mac_pw_pool/service_pool.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Launch Cirrus-CI PW Pool listener & manager process. 4 | # Intended to be called once from setup.sh on M1 Macs. 5 | # Expects configuration filepath to be passed as the first argument. 6 | # Expects the number of hours until shutdown (and self-termination) 7 | # as the second argument. 8 | 9 | set -o pipefail 10 | 11 | msg() { echo "##### ${1:-No message message provided}"; } 12 | die() { echo "ERROR: ${1:-No error message provided}"; exit 1; } 13 | 14 | for varname in PWCFG PWUSER PWREADYURL PWREADY; do 15 | varval="${!varname}" 16 | [[ -n "$varval" ]] || \ 17 | die "Env. var. \$$varname is unset/empty." 18 | done 19 | 20 | [[ "$USER" == "ec2-user" ]] || \ 21 | die "Expecting to execute as 'ec2-user'." 22 | 23 | # All operations assume this CWD 24 | cd $HOME 25 | 26 | # For whatever reason, when this script is run through ssh, the default 27 | # environment isn't loaded automatically. 28 | . /etc/profile 29 | 30 | # This can be leftover under certain conditions 31 | # shellcheck disable=SC2154 32 | sudo pkill -u $PWUSER -f "cirrus worker run" || true 33 | 34 | # Configuring a launchd agent to run the worker process is a major 35 | # PITA and seems to require rebooting the instance. Work around 36 | # this with a really hacky loop masquerading as a system service. 37 | # envar exported to us 38 | # shellcheck disable=SC2154 39 | while [[ -r $PWCFG ]] && [[ "$PWREADY" == "true" ]]; do # Remove file or change tag to shutdown this "service" 40 | # The $PWUSER has access to kill it's own listener, or it could crash. 41 | if ! pgrep -u $PWUSER -f -q "cirrus worker run"; then 42 | # FIXME: CI Tasks will execute as $PWUSER and ordinarily would have 43 | # read access to $PWCFG file containing $POOLTOKEN. While not 44 | # disastrous, it's desirable to not leak potentially sensitive 45 | # values. Work around this by keeping the file unreadable by 46 | # $PWUSER except for a brief period while starting up. 47 | sudo chmod 0644 $PWCFG 48 | msg "$(date -u -Iseconds) Starting PW pool listener as $PWUSER" 49 | # This is intended for user's setup.log 50 | # shellcheck disable=SC2024 51 | sudo su -l $PWUSER -c "/opt/homebrew/bin/cirrus worker run --file $PWCFG &" >>setup.log 2>&1 & 52 | sleep 10 # eek! 53 | sudo chmod 0600 $PWCFG 54 | fi 55 | 56 | # This can fail on occasion for some reason 57 | # envar exported to us 58 | # shellcheck disable=SC2154 59 | if ! PWREADY=$(curl -sSLf $PWREADYURL); then 60 | PWREADY="recheck" 61 | fi 62 | 63 | # Avoid re-launch busy-wait 64 | sleep 10 65 | 66 | # Second-chance 67 | if [[ "$PWREADY" == "recheck" ]] && ! PWREADY=$(curl -sSLf $PWREADYURL); then 68 | msg "Failed twice to obtain PWPoolReady instance tag. Disabling listener." 69 | rm -f "$PWCFG" 70 | break 71 | fi 72 | done 73 | 74 | set +e 75 | 76 | msg "Configuration file not readable; PWPoolReady tag '$PWREADY'." 77 | msg "Terminating $PWUSER PW pool listner process" 78 | # N/B: This will _not_ stop the cirrus agent (i.e. a running task) 79 | sudo pkill -u $PWUSER -f "cirrus worker run" 80 | -------------------------------------------------------------------------------- /mac_pw_pool/shutdown.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script intended to be called by automation only. 4 | # Should never be called from any other context. 5 | 6 | # Log on the off-chance it somehow helps somebody debug something one day 7 | ( 8 | 9 | echo "Starting ${BASH_SOURCE[0]} at $(date -u -Iseconds)" 10 | 11 | PWNAME=$(uname -n) 12 | PWUSER=$PWNAME-worker 13 | 14 | if id -u "$PWUSER" &> /dev/null; then 15 | # Try to not reboot while a CI task is running. 16 | # Cirrus-CI imposes a hard-timeout of 2-hours. 17 | now=$(date -u +%s) 18 | timeout_at=$((now+60*60*2)) 19 | echo "Waiting up to 2 hours for any pre-existing cirrus agent (i.e. running task)" 20 | while pgrep -u $PWUSER -q -f "cirrus-ci-agent"; do 21 | if [[ $(date -u +%s) -gt $timeout_at ]]; then 22 | echo "Timeout waiting for cirrus-ci-agent to terminate" 23 | break 24 | fi 25 | echo "Found cirrus-ci-agent still running, waiting..." 26 | sleep 60 27 | done 28 | fi 29 | 30 | echo "Initiating shutdown at $(date -u -Iseconds)" 31 | 32 | # This script is run with a sleep in front of it 33 | # as a workaround for darwin's shutdown-command 34 | # terminal weirdness. 35 | 36 | sudo shutdown -h now "Automatic instance recycling" 37 | 38 | ) < /dev/null >> setup.log 2>&1 39 | -------------------------------------------------------------------------------- /renovate/defaults.json5: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Validate this file before commiting with (from repository root): 4 | 5 | podman run -it \ 6 | -v ./renovate/defaults.json5:/usr/src/app/renovate.json5:z \ 7 | ghcr.io/renovatebot/renovate:latest \ 8 | renovate-config-validator 9 | 10 | and/or use the pre-commit hook: https://github.com/renovatebot/pre-commit-hooks 11 | */ 12 | 13 | { 14 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 15 | "description": "This is a basic preset intended\ 16 | for reuse to reduce the amount of boiler-plate\ 17 | configuration that otherwise would need to be\ 18 | duplicated. It should be referenced from other\ 19 | repositories renovate config under the 'extends'\ 20 | section as: github>containers/automation//renovate/defaults.json5\ 21 | (optionally with a '#X.Y.Z' version-tag suffix).", 22 | 23 | /************************************************* 24 | ****** Global/general configuration options ***** 25 | *************************************************/ 26 | 27 | // Re-use predefined sets of configuration options to DRY 28 | "extends": [ 29 | // https://docs.renovatebot.com/presets-config/#configbase 30 | "config:recommended", 31 | 32 | // https://docs.renovatebot.com/presets-default/#gitsignoff 33 | ":gitSignOff", 34 | 35 | // Always rebase dep. update PRs from `main` when PR is stale 36 | ":rebaseStalePrs" 37 | ], 38 | 39 | // The default setting is ambiguous, explicitly base schedules on UTC 40 | "timezone": "UTC", 41 | 42 | // Don't swamp CI, rate-limit opening of PRs w/in schedule limits. 43 | "prHourlyLimit": 1, 44 | 45 | // Make renovate PRs stand out from the crowd 46 | "labels": ["dependencies"], 47 | 48 | // Default setting is an "empty" schedule. Explicitly set this 49 | // such that security-alert PRs may be opened immediately. 50 | "vulnerabilityAlerts": { 51 | // Distinguish PRs from regular dependency updates 52 | "labels": ["dependencies", "security"], 53 | 54 | // Force-enable renovate management of deps. which are otherwise 55 | // disabled. Note: Does not apply to any "ignorePaths" list, nor 56 | // any deps. disabled via `packageRules` in this block 57 | // (last-match wins rule). 58 | "enabled": true, 59 | 60 | // Note: As of 2024-06-25 indirect golang dependency handling is 61 | // broken in Renovate, and disabled by default. This affects 62 | // vulnerabilityAlerts in that if the dep is 'indirect' no PR 63 | // will ever open, it must be handled manually. Attempting 64 | // to enable indirect deps (for golang) in this section will 65 | // not work, it will always be overriden by the global golang 66 | // indirect dep. setting. 67 | }, 68 | 69 | // On a busy repo, automatic-rebasing will swamp the CI system. 70 | // Turn it off here, then allow individual repos. to override/enable 71 | // it as appropriate. 72 | "rebaseWhen": "never", 73 | 74 | /************************************************** 75 | ***** Manager-specific configuration options ***** 76 | **************************************************/ 77 | 78 | "customManagers": [ 79 | // Track the latest CI VM images by tag on the containers/automation_images 80 | // repo. Propose updates when newer tag available compared to what is 81 | // referenced in a repo's .cirrus.yml file. 82 | { 83 | "customType": "regex", 84 | "fileMatch": "^.cirrus.yml$", 85 | // Expected veresion format: c 86 | // For example `c20230120t152650z-f37f36u2204` 87 | "matchStrings": ["c(?20\\d{6}t\\d{6}z-\\w+)"], 88 | "depNameTemplate": "containers/automation_images", 89 | "datasourceTemplate": "github-tags", 90 | "versioningTemplate": "loose", 91 | "autoReplaceStringTemplate": "c{{{newVersion}}}" 92 | }, 93 | 94 | // For skopeo and podman, manage the golangci-lint version as 95 | // referenced in their Makefile. 96 | { 97 | "customType": "regex", 98 | "fileMatch": "^Makefile$", 99 | // make ignores whitespace around the value, make renovate do the same. 100 | "matchStrings": [ 101 | "GOLANGCI_LINT_VERSION\\s+:=\\s+(?.+)\\s*" 102 | ], 103 | "depNameTemplate": "golangci/golangci-lint", 104 | "datasourceTemplate": "github-releases", 105 | "versioningTemplate": "semver-coerced", 106 | // Podman's installer script will puke if there's a 'v' prefix, as represented 107 | // in upstream golangci/golangci-lint releases. 108 | "extractVersionTemplate": "v(?.+)" 109 | } 110 | ], 111 | 112 | /************************************************* 113 | ***** Language-specific configuration options **** 114 | **************************************************/ 115 | 116 | // ***** ATTENTION WARNING CAUTION DANGER ***** // 117 | // Go versions 1.21 and later will AUTO-UPDATE based on _module_ 118 | // _requirements_. ref: https://go.dev/doc/toolchain Because 119 | // many different projects covered by this config, build under 120 | // different distros and distro-versions, golang version consistency 121 | // is desireable across build outputs. In golang 1.21 and later, 122 | // it's possible to pin the version in each project using the 123 | // toolchain go.mod directive. This should be done to prevent 124 | // unwanted auto-updates. 125 | // Ref: Upstream discussion https://github.com/golang/go/issues/65847 126 | "constraints": {"go": "1.23"}, 127 | 128 | // N/B: LAST MATCHING RULE WINS, match statems are ANDed together. 129 | // https://docs.renovatebot.com/configuration-options/#packagerules 130 | "packageRules": [ 131 | /************************************************* 132 | ****** Rust-specific configuration options ******* 133 | **************************************************/ 134 | { 135 | "matchCategories": ["rust"], 136 | // Update both Cargo.toml and Cargo.lock when possible 137 | // i.e. bump the range even if the new version satisfies the existing range. 138 | // https://docs.renovatebot.com/configuration-options/#rangestrategy 139 | "rangeStrategy": "bump" 140 | }, 141 | 142 | { 143 | "matchCategories": ["rust"], 144 | "matchPackageNames": ["serde", "clap"], 145 | // Update both Cargo.toml and Cargo.lock when possible 146 | "rangeStrategy": "bump", 147 | // These packages roll updates far too often, slow them down. 148 | // Ref: https://github.com/containers/netavark/issues/772 149 | "schedule": ["after 1am and before 11am on the first day of the month"] 150 | }, 151 | 152 | /************************************************* 153 | ****** Python-specific configuration options ***** 154 | **************************************************/ 155 | { 156 | "matchCategories": ["python"], 157 | // Preserve (but continue to upgrade) any existing SemVer ranges. 158 | "rangeStrategy": "replace" 159 | }, 160 | 161 | /************************************************* 162 | ****** Golang-specific configuration options ***** 163 | **************************************************/ 164 | { 165 | "matchCategories": ["golang"], 166 | // disabled by default, safe to enable since "tidy" enforced by CI. 167 | "postUpdateOptions": ["gomodTidy"], 168 | // In case a version in use is retracted, allow going backwards. 169 | // N/B: This is NOT compatible with pseudo versions, see below. 170 | "rollbackPrs": false, 171 | // Preserve (but continue to upgrade) any existing SemVer ranges. 172 | "rangeStrategy": "replace" 173 | }, 174 | 175 | // Golang pseudo-version packages will spam with every Commit ID change. 176 | // Limit update frequency. 177 | { 178 | "matchCategories": ["golang"], 179 | "matchUpdateTypes": ["digest"], 180 | "schedule": ["after 1am and before 11am on the first day of the month"] 181 | }, 182 | 183 | // Package version retraction (https://go.dev/ref/mod#go-mod-file-retract) 184 | // is broken in Renovate. And no repo should use these retracted versions. 185 | // ref: https://github.com/renovatebot/renovate/issues/13012 186 | { 187 | "matchCategories": ["golang"], 188 | "matchPackageNames": ["github.com/containers/common"], 189 | // Both v1.0.0 and v1.0.1 should be ignored. 190 | "allowedVersions": "!/v((1.0.0)|(1.0.1))$/" 191 | }, 192 | 193 | // Skip updating the go.mod toolchain directive, humans will manage this. 194 | { 195 | "matchCategories": ["golang"], 196 | "matchDepTypes": ["toolchain"], 197 | "enabled": false 198 | }, 199 | 200 | /************************************************* 201 | ************ CI configuration options ************ 202 | **************************************************/ 203 | 204 | // Github-action updates cannot consistently be tested in a PR. 205 | // This is caused by an unfixable architecture-flaw: Execution 206 | // context always depends on trigger, and we (obvious) can't know 207 | // that ahead of time for all workflows. Abandon all hope and 208 | // mark github-action dep. update PRs '[skip-ci]' 209 | { 210 | "matchManagers": ["github-actions"], 211 | "matchDepTypes": ["action"], 212 | "commitMessagePrefix": "[skip-ci]" 213 | }, 214 | 215 | // Group together all CI VM image updates into a single PR. This is needed 216 | // to handle the case where an IMG_SFX is mentioned in a comment. For 217 | // example, flagging an important TODO or FIXME item. Or, where CI VM 218 | // images are split across multiple IMG_SFX values that all need to be updated. 219 | { 220 | "matchManagers": ["custom.regex"], 221 | "matchFileNames": [".cirrus.yml"], 222 | "groupName": "CI VM Image", 223 | // Somebody(s) need to check image update PRs as soon as they open. 224 | "reviewers": ["Luap99"], 225 | // Don't wait, roll out CI VM Updates immediately 226 | "schedule": ["at any time"] 227 | }, 228 | ] 229 | } 230 | --------------------------------------------------------------------------------