├── .codespellignore ├── ci ├── lib.sh ├── shellcheck.sh ├── tag_latest.sh ├── make.sh ├── make_container_images.sh ├── Containerfile └── validate.sh ├── .dockerignore ├── IMG_SFX ├── get_ci_vm ├── good_repo_test │ ├── dot_cirrus.yml │ ├── dot_git.tar.gz │ ├── README.md │ ├── .cirrus.yml │ ├── hack │ │ └── get_ci_vm.sh │ └── uninit_gcloud.output ├── good_repo_test_v2 │ ├── dot_cirrus.yml │ ├── mock_ec2_key │ ├── mock_ec2_key.pub │ ├── README.md │ ├── uninit_aws.output │ ├── .cirrus.yml │ ├── ami_search.json │ └── hack │ │ └── get_ci_vm.sh ├── Containerfile ├── setup.sh ├── bad_repo_test │ └── hack │ │ └── get_ci_vm.sh └── test.sh ├── .gitignore ├── .codespelldict ├── base_images ├── cloud-init │ ├── fedora │ │ └── cloud.cfg.d │ │ │ ├── 40_enable_root.cfg │ │ │ └── 40_defuser.cfg │ └── debian │ │ └── cloud.cfg.d │ │ ├── 40_defuser.cfg │ │ └── 40_enable_root.cfg ├── no_dash.dat ├── .gitignore ├── fedora-cloud-init.service ├── debian_base-setup.sh ├── fedora_base-setup.sh └── cloud.yml ├── .codespellrc ├── CODE-OF-CONDUCT.md ├── gcsupld ├── Containerfile ├── README.md ├── entrypoint.sh └── test.sh ├── win_images ├── enable-rdp-userdata.xml ├── bootstrap.ps1 ├── win-lib.ps1 ├── win_packaging.ps1 ├── win_finalization.ps1 └── win-server-wsl.yml ├── imgprune ├── Containerfile ├── README.md └── entrypoint.sh ├── SECURITY.md ├── .cirrus.star ├── imgobsolete ├── Containerfile ├── README.md └── entrypoint.sh ├── imgts ├── google-cloud-sdk.repo ├── README.md ├── Containerfile ├── lib_entrypoint.sh └── entrypoint.sh ├── image_builder ├── google-cloud-sdk.repo ├── install_packages.txt ├── Containerfile ├── setup.sh ├── install_packages.sh └── gce.yml ├── orphanvms ├── Containerfile ├── README.md ├── entrypoint.sh ├── _gce └── _ec2 ├── skopeo_cidev ├── README.md ├── Containerfile ├── packages.txt └── setup.sh ├── get_packer_version.sh ├── gcpprojects.txt ├── ccia ├── fake_manifests │ ├── Image-builder image │ │ └── manifest │ │ │ └── image_builder │ │ │ └── manifest.json │ ├── fedora Cache Image │ │ └── manifest │ │ │ └── cache_images │ │ │ └── manifest.json │ ├── ubuntu Base Image │ │ └── manifest │ │ │ └── base_images │ │ │ └── manifest.json │ ├── ubuntu Cache Image │ │ └── manifest │ │ │ └── cache_images │ │ │ └── manifest.json │ ├── fedora-aws Base Image │ │ └── manifest │ │ │ └── base_images │ │ │ └── manifest.json │ ├── build-push Cache Image │ │ └── manifest │ │ │ └── cache_images │ │ │ └── manifest.json │ ├── fedora-aws Cache Image │ │ └── manifest │ │ │ └── cache_images │ │ │ └── manifest.json │ ├── fedora-aws-arm64 Base Image │ │ └── manifest │ │ │ └── base_images │ │ │ └── manifest.json │ ├── fedora-netavark Cache Image │ │ └── manifest │ │ │ └── cache_images │ │ │ └── manifest.json │ ├── fedora-podman-aws-arm64 Cache Image │ │ └── manifest │ │ │ └── cache_images │ │ │ └── manifest.json │ ├── fedora-netavark-aws-arm64 Cache Image │ │ └── manifest │ │ │ └── cache_images │ │ │ └── manifest.json │ └── fedora Base Image │ │ └── manifest │ │ └── base_images │ │ └── manifest.json ├── README.md ├── test.sh └── Containerfile ├── make-user-data.sh ├── podman ├── setup.sh └── Containerfile ├── .pre-commit-hooks.yaml ├── .github ├── workflows │ ├── check_cirrus_cron.yml │ ├── orphan_vms.yml │ └── pr_image_id.yml ├── renovate.json5 └── actions │ └── bin │ └── create_image_table.py ├── check-imgsfx.sh ├── cache_images ├── build-push_packaging.sh ├── rawhide_setup.sh ├── fedora_setup.sh ├── debian_setup.sh ├── fedora-netavark_packaging.sh ├── debian_packaging.sh ├── fedora_packaging.sh └── cloud.yml ├── dot_pre-commit-config.yaml.example ├── get_fedora_url.sh ├── systemd_banish.sh └── README-simplified.md /.codespellignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ci/lib.sh: -------------------------------------------------------------------------------- 1 | ../lib.sh -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | tmp 3 | -------------------------------------------------------------------------------- /IMG_SFX: -------------------------------------------------------------------------------- 1 | 20251211t152018z-f43f42d14 2 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test/dot_cirrus.yml: -------------------------------------------------------------------------------- 1 | .cirrus.yml -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test_v2/dot_cirrus.yml: -------------------------------------------------------------------------------- 1 | .cirrus.yml -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test_v2/mock_ec2_key: -------------------------------------------------------------------------------- 1 | blahblahblah 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | */*.json 2 | /.cache 3 | .pre-commit-config.yaml 4 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test_v2/mock_ec2_key.pub: -------------------------------------------------------------------------------- 1 | blahblahblah 2 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test_v2/README.md: -------------------------------------------------------------------------------- 1 | ../good_repo_test/README.md -------------------------------------------------------------------------------- /.codespelldict: -------------------------------------------------------------------------------- 1 | IMGSFX,IMG-SFX->IMG_SFX 2 | Dockerfile->Containerfile 3 | -------------------------------------------------------------------------------- /base_images/cloud-init/fedora/cloud.cfg.d/40_enable_root.cfg: -------------------------------------------------------------------------------- 1 | disable_root: 0 2 | -------------------------------------------------------------------------------- /base_images/cloud-init/debian/cloud.cfg.d/40_defuser.cfg: -------------------------------------------------------------------------------- 1 | ../../fedora/cloud.cfg.d/40_defuser.cfg -------------------------------------------------------------------------------- /base_images/cloud-init/debian/cloud.cfg.d/40_enable_root.cfg: -------------------------------------------------------------------------------- 1 | ../../fedora/cloud.cfg.d/40_enable_root.cfg -------------------------------------------------------------------------------- /base_images/no_dash.dat: -------------------------------------------------------------------------------- 1 | Name: dash/sh 2 | Template: dash/sh 3 | Value: false 4 | Owners: dash 5 | Flags: seen 6 | -------------------------------------------------------------------------------- /.codespellrc: -------------------------------------------------------------------------------- 1 | [codespell] 2 | ignore-words = .codespellignore 3 | dictionary = .codespelldict 4 | quiet-level = 3 5 | -------------------------------------------------------------------------------- /base_images/.gitignore: -------------------------------------------------------------------------------- 1 | *json 2 | packer 3 | packer*zip 4 | packer_cache 5 | cidata* 6 | meta-data 7 | user-data 8 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test/dot_git.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/containers/automation_images/HEAD/get_ci_vm/good_repo_test/dot_git.tar.gz -------------------------------------------------------------------------------- /base_images/cloud-init/fedora/cloud.cfg.d/40_defuser.cfg: -------------------------------------------------------------------------------- 1 | # Avoid adding any users to the system by default. GCP OSLogin will do it at runtime. 2 | users: {} 3 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test/README.md: -------------------------------------------------------------------------------- 1 | This directory is setup for testing, changes to any of it's contents 2 | may cause unwanted side-effects. Please understand the test.sh script 3 | before making any changes. 4 | -------------------------------------------------------------------------------- /CODE-OF-CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## The Automation Images Project Community Code of Conduct 2 | 3 | The Automation Images Project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). 4 | -------------------------------------------------------------------------------- /gcsupld/Containerfile: -------------------------------------------------------------------------------- 1 | FROM imgts:latest 2 | 3 | COPY /gcsupld/entrypoint.sh /usr/local/bin/entrypoint.sh 4 | RUN chmod 755 /usr/local/bin/entrypoint.sh 5 | 6 | # These are required at runtime 7 | ENV FROM_FILEPATH="" \ 8 | TO_GCSURI="" 9 | -------------------------------------------------------------------------------- /win_images/enable-rdp-userdata.xml: -------------------------------------------------------------------------------- 1 | 2 | Set-ItemProperty -Path "HKLM:\System\CurrentControlSet\Control\Terminal Server" -Name "fDenyTSConnections" -Value 0 3 | Enable-NetFirewallRule -DisplayGroup "Remote Desktop" 4 | 5 | -------------------------------------------------------------------------------- /imgprune/Containerfile: -------------------------------------------------------------------------------- 1 | FROM imgts:latest 2 | 3 | COPY /imgprune/entrypoint.sh /usr/local/bin/entrypoint.sh 4 | RUN chmod 755 /usr/local/bin/entrypoint.sh 5 | 6 | # These are only needed by imgts 7 | ENV IMGNAMES="" \ 8 | BUILDID="" \ 9 | REPOREF="" 10 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | ## Security and Disclosure Information Policy for the Automation Images Project 2 | 3 | The Automation Images Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. 4 | -------------------------------------------------------------------------------- /.cirrus.star: -------------------------------------------------------------------------------- 1 | # 2 | # Lang. ref: https://github.com/bazelbuild/starlark/blob/master/spec.md#contents 3 | # Impl. ref: https://cirrus-ci.org/guide/programming-tasks/ 4 | load("cirrus", "fs") 5 | 6 | def main(): 7 | return { 8 | "env": { 9 | "IMG_SFX": fs.read("IMG_SFX").strip() 10 | }, 11 | } 12 | -------------------------------------------------------------------------------- /imgobsolete/Containerfile: -------------------------------------------------------------------------------- 1 | FROM imgts:latest 2 | 3 | COPY /imgobsolete/entrypoint.sh /usr/local/bin/entrypoint.sh 4 | RUN chmod 755 /usr/local/bin/entrypoint.sh 5 | 6 | # Env. vars set to "__unknown__" are required to be set by the caller 7 | ENV AWSINI="__unknown__" \ 8 | IMGNAMES="" \ 9 | BUILDID="" \ 10 | REPOREF="" 11 | -------------------------------------------------------------------------------- /imgts/google-cloud-sdk.repo: -------------------------------------------------------------------------------- 1 | # Copy-pasted from https://cloud.google.com/sdk/docs/install#red-hatfedoracentos 2 | 3 | [google-cloud-cli] 4 | name=Google Cloud CLI 5 | baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el9-x86_64 6 | enabled=1 7 | gpgcheck=1 8 | repo_gpgcheck=0 9 | gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 10 | -------------------------------------------------------------------------------- /image_builder/google-cloud-sdk.repo: -------------------------------------------------------------------------------- 1 | # Copy-pasted from https://cloud.google.com/sdk/docs/install#red-hatfedoracentos 2 | 3 | [google-cloud-cli] 4 | name=Google Cloud CLI 5 | baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el9-x86_64 6 | enabled=1 7 | gpgcheck=1 8 | repo_gpgcheck=0 9 | gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 10 | -------------------------------------------------------------------------------- /orphanvms/Containerfile: -------------------------------------------------------------------------------- 1 | FROM imgts:latest 2 | 3 | COPY /orphanvms/entrypoint.sh /orphanvms/_gce /orphanvms/_ec2 /usr/local/bin/ 4 | RUN chmod 755 /usr/local/bin/entrypoint.sh 5 | 6 | # Clear unneeded requirements, add GCPPROJECTS and AWSINI as required 7 | ENV IMGNAMES="" \ 8 | BUILDID="" \ 9 | REPOREF="" \ 10 | GCPPROJECTS="__unknown__" \ 11 | AWSINI="__unknown__" 12 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test_v2/uninit_aws.output: -------------------------------------------------------------------------------- 1 | This is fake output for testing. 2 | 3 | Name Value Type Location 4 | ---- ----- ---- -------- 5 | profile automation_images manual --profile 6 | 7 | The config profile (automation_images) could not be found 8 | This is fake output for testing. 9 | -------------------------------------------------------------------------------- /skopeo_cidev/README.md: -------------------------------------------------------------------------------- 1 | # Skopeo CI/Dev image 2 | 3 | The contents of this directory are intended to be utilized via 4 | the `Makefile` at the top of the repository. Typically as 5 | `make skopeo_cidev IMG_SFX=`. The resultant image is utilized 6 | as part of the [skopeo project's](https://github.com/containers/skopeo) 7 | development and CI automation. It should not be used outside 8 | of those contexts. 9 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test/.cirrus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | aws_test_task: 4 | ec2_instance: 5 | image: ami-1234567890 6 | 7 | google_test_task: 8 | gce_instance: 9 | image_name: test-image-name 10 | 11 | container_test_task: 12 | container: 13 | image: something 14 | 15 | windows_container_test_task: 16 | windows_container: 17 | image: cirrusci/windowsservercore:2019 18 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test_v2/.cirrus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | aws_test_task: 4 | env: 5 | EC2_INST_TYPE: bigone.supervm 6 | ec2_instance: 7 | image: fedora-podman-aws-arm64-c5495735033528320 8 | type: ${EC2_INST_TYPE} 9 | 10 | google_test_task: 11 | gce_instance: 12 | image_name: test-image-name 13 | 14 | container_test_task: 15 | container: 16 | image: something 17 | -------------------------------------------------------------------------------- /get_packer_version.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # This script is intended to be executed from the Makefile. 4 | # It allows the .cirrus.yml definition of PACKER_VERSION to 5 | # act as the single source of truth for this value. 6 | 7 | cd $(dirname "${BASH_SOURCE[0]}") || exit 8 | YML_LINE=$(grep -Em1 '^\s+PACKER_VERSION:' .cirrus.yml) 9 | VER_VAL=$(awk '{print $3}' <<<"$YML_LINE" | tr -d "\"'[:space:]") 10 | echo -n "$VER_VAL" 11 | -------------------------------------------------------------------------------- /gcpprojects.txt: -------------------------------------------------------------------------------- 1 | # This is a listing of Google Cloud Platform Project IDs for 2 | # orphan VM monitoring and possibly other automation tasks. 3 | # Note: CI VM images produced by this repo are all stored within 4 | # the libpod-218412 project (in addition to some AWS EC2) 5 | buildah 6 | conmon-222014 7 | containers-build-source-image 8 | libpod-218412 9 | netavark-2021 10 | oci-seccomp-bpf-hook 11 | skopeo 12 | storage-240716 13 | udica-247612 14 | -------------------------------------------------------------------------------- /image_builder/install_packages.txt: -------------------------------------------------------------------------------- 1 | buildah 2 | bash-completion 3 | curl 4 | findutils 5 | gawk 6 | genisoimage 7 | git 8 | google-cloud-cli 9 | jq 10 | libvirt 11 | libvirt-admin 12 | libvirt-client 13 | libvirt-daemon 14 | libxcrypt-compat 15 | make 16 | openssh 17 | openssl 18 | podman 19 | python3 20 | python3-pyyaml 21 | qemu-img 22 | qemu-kvm 23 | rng-tools 24 | rootfiles 25 | rsync 26 | sed 27 | skopeo 28 | tar 29 | unzip 30 | util-linux 31 | vim 32 | -------------------------------------------------------------------------------- /ccia/fake_manifests/Image-builder image/manifest/image_builder/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "image-builder", 5 | "builder_type": "googlecompute", 6 | "build_time": 1658173915, 7 | "files": null, 8 | "artifact_id": "image-builder-5419329914142720", 9 | "packer_run_uuid": "243ae2b1-d4b4-4917-9883-a96c516a2c39", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720" 12 | } 13 | } 14 | ], 15 | "last_run_uuid": "243ae2b1-d4b4-4917-9883-a96c516a2c39" 16 | } 17 | -------------------------------------------------------------------------------- /imgprune/README.md: -------------------------------------------------------------------------------- 1 | A container image for maintaining the collection of 2 | deprecated VM images disused by CI/CD projects. Images 3 | marked deprecated are pruned (deleted) by this image 4 | once they surpass a certain age since last-used. 5 | 6 | * `GCPJSON` - Contents of the service-account JSON key file. 7 | * `GCPNAME` - Complete Name (fake e-mail address) of the service account. 8 | * `GCPPROJECT` - Project ID of the GCP project. 9 | 10 | Example build (from repository root): 11 | 12 | ```bash 13 | make imgprune IMG_SFX=example 14 | ``` 15 | -------------------------------------------------------------------------------- /ccia/fake_manifests/fedora Cache Image/manifest/cache_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "fedora", 5 | "builder_type": "googlecompute", 6 | "build_time": 1658176163, 7 | "files": null, 8 | "artifact_id": "fedora-c5419329914142720", 9 | "packer_run_uuid": "30833ff6-05df-ee1e-4378-57991d592136", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "cache" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "30833ff6-05df-ee1e-4378-57991d592136" 17 | } 18 | -------------------------------------------------------------------------------- /ccia/fake_manifests/ubuntu Base Image/manifest/base_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "ubuntu", 5 | "builder_type": "googlecompute", 6 | "build_time": 1658175167, 7 | "files": null, 8 | "artifact_id": "ubuntu-b5419329914142720", 9 | "packer_run_uuid": "238ce64e-cb7d-4c1b-38ff-3e0eb9e3939a", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "base" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "238ce64e-cb7d-4c1b-38ff-3e0eb9e3939a" 17 | } 18 | -------------------------------------------------------------------------------- /ccia/fake_manifests/ubuntu Cache Image/manifest/cache_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "ubuntu", 5 | "builder_type": "googlecompute", 6 | "build_time": 1658176053, 7 | "files": null, 8 | "artifact_id": "ubuntu-c5419329914142720", 9 | "packer_run_uuid": "553ec5c0-7b09-e06d-2837-b272204696d1", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "cache" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "553ec5c0-7b09-e06d-2837-b272204696d1" 17 | } 18 | -------------------------------------------------------------------------------- /orphanvms/README.md: -------------------------------------------------------------------------------- 1 | A container image to help identify possibly orphaned 2 | VM instances. Deliberately avoids producing any output 3 | if no instances are identified. 4 | 5 | * `GCPPROJECTS` - Whitespace separated Project IDs to check. 6 | * `GCPJSON` - Contents of the service-account JSON key file. N/B: Must have 7 | 'Compute Read' role for all listed `$GCPPROJECTS`. 8 | * `GCPNAME` - Complete Name (fake e-mail address) of the service account. 9 | 10 | Example build (from repository root): 11 | 12 | ```bash 13 | make orphanvms IMG_SFX=example 14 | ``` 15 | -------------------------------------------------------------------------------- /ccia/fake_manifests/fedora-aws Base Image/manifest/base_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "fedora-aws", 5 | "builder_type": "amazon-ebs", 6 | "build_time": 1658175765, 7 | "files": null, 8 | "artifact_id": "us-east-1:ami-000448bd70242ba3c", 9 | "packer_run_uuid": "193dbe11-9c6b-e0b0-efc9-dc0e8fbf98dc", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "base" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "193dbe11-9c6b-e0b0-efc9-dc0e8fbf98dc" 17 | } 18 | -------------------------------------------------------------------------------- /ccia/fake_manifests/build-push Cache Image/manifest/cache_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "build-push", 5 | "builder_type": "googlecompute", 6 | "build_time": 1658175996, 7 | "files": null, 8 | "artifact_id": "build-push-c5419329914142720", 9 | "packer_run_uuid": "250b8705-ce4d-7844-7181-f1181dd7e04c", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "cache" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "250b8705-ce4d-7844-7181-f1181dd7e04c" 17 | } 18 | -------------------------------------------------------------------------------- /ccia/fake_manifests/fedora-aws Cache Image/manifest/cache_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "fedora-aws", 5 | "builder_type": "amazon-ebs", 6 | "build_time": 1658176592, 7 | "files": null, 8 | "artifact_id": "us-east-1:ami-0442ccd2bb66504b7", 9 | "packer_run_uuid": "df4c911b-80a3-27ee-a513-4b6e29c1c906", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "cache" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "df4c911b-80a3-27ee-a513-4b6e29c1c906" 17 | } 18 | -------------------------------------------------------------------------------- /ci/shellcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended to be executed by humans or automation. 4 | # It simply provides a one-command way of executing shellcheck 5 | # in a uniform way 6 | 7 | set -e 8 | 9 | cd $(realpath $(dirname "$0")/../) 10 | shellcheck --color=always --format=tty \ 11 | --shell=bash --external-sources \ 12 | --enable add-default-case,avoid-nullary-conditions,check-unassigned-uppercase \ 13 | --exclude SC2046,SC2034,SC2090,SC2064 \ 14 | --wiki-link-count=0 --severity=warning \ 15 | ./*.sh ./*/*.sh 16 | 17 | echo "PASS" 18 | -------------------------------------------------------------------------------- /ccia/fake_manifests/fedora-aws-arm64 Base Image/manifest/base_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "fedora-aws-arm64", 5 | "builder_type": "amazon-ebs", 6 | "build_time": 1658175464, 7 | "files": null, 8 | "artifact_id": "us-east-1:ami-0f5f268182775a8c2", 9 | "packer_run_uuid": "e4a389da-e1dc-35db-ef32-361e890e4b30", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "base" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "e4a389da-e1dc-35db-ef32-361e890e4b30" 17 | } 18 | -------------------------------------------------------------------------------- /ccia/fake_manifests/fedora-netavark Cache Image/manifest/cache_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "fedora-netavark", 5 | "builder_type": "googlecompute", 6 | "build_time": 1658176148, 7 | "files": null, 8 | "artifact_id": "fedora-netavark-c5419329914142720", 9 | "packer_run_uuid": "d95c8118-3970-4a73-d348-692a5a3371a3", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "cache" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "d95c8118-3970-4a73-d348-692a5a3371a3" 17 | } 18 | -------------------------------------------------------------------------------- /base_images/fedora-cloud-init.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Initial cloud-init job (metadata service crawler) 3 | DefaultDependencies=no 4 | Wants=cloud-init-local.service 5 | After=cloud-init-local.service 6 | Wants=google-network-daemon.service 7 | After=google-network-daemon.service 8 | Before=systemd-user-sessions.service 9 | 10 | [Service] 11 | Type=oneshot 12 | ExecStart=/usr/bin/cloud-init init 13 | RemainAfterExit=yes 14 | TimeoutSec=0 15 | 16 | # Output needs to appear in instance console output 17 | StandardOutput=journal+console 18 | 19 | [Install] 20 | WantedBy=cloud-init.target 21 | -------------------------------------------------------------------------------- /ccia/fake_manifests/fedora-podman-aws-arm64 Cache Image/manifest/cache_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "fedora-podman-aws-arm64", 5 | "builder_type": "amazon-ebs", 6 | "build_time": 1658176346, 7 | "files": null, 8 | "artifact_id": "us-east-1:ami-051a5e8dad587bf22", 9 | "packer_run_uuid": "7e742dec-035c-6b95-8793-c464b2a6ac0f", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "cache" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "7e742dec-035c-6b95-8793-c464b2a6ac0f" 17 | } 18 | -------------------------------------------------------------------------------- /ccia/fake_manifests/fedora-netavark-aws-arm64 Cache Image/manifest/cache_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "fedora-netavark-aws-arm64", 5 | "builder_type": "amazon-ebs", 6 | "build_time": 1658176335, 7 | "files": null, 8 | "artifact_id": "us-east-1:ami-07a339e76f84afa7b", 9 | "packer_run_uuid": "b8a09332-800a-09c2-ba0e-2564e6e52f76", 10 | "custom_data": { 11 | "IMG_SFX": "5419329914142720", 12 | "STAGE": "cache" 13 | } 14 | } 15 | ], 16 | "last_run_uuid": "b8a09332-800a-09c2-ba0e-2564e6e52f76" 17 | } 18 | -------------------------------------------------------------------------------- /ccia/fake_manifests/fedora Base Image/manifest/base_images/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "builds": [ 3 | { 4 | "name": "fedora", 5 | "builder_type": "qemu", 6 | "build_time": 1658175535, 7 | "files": [ 8 | { 9 | "name": "fedora-b5419329914142720", 10 | "size": 0 11 | } 12 | ], 13 | "artifact_id": "", 14 | "packer_run_uuid": "b1ec41bd-c395-45d6-96cf-f03a1ff2c894", 15 | "custom_data": { 16 | "IMG_SFX": "5419329914142720", 17 | "STAGE": "base" 18 | } 19 | } 20 | ], 21 | "last_run_uuid": "b1ec41bd-c395-45d6-96cf-f03a1ff2c894" 22 | } 23 | -------------------------------------------------------------------------------- /make-user-data.sh: -------------------------------------------------------------------------------- 1 | 2 | # This script is utilized by Makefile, it's not intended to be run by humans 3 | 4 | set -eo pipefail 5 | 6 | if [[ ! -r "cidata.ssh.pub" ]]; then 7 | echo "ERROR: Expectinbg to find the file $PWD/cidata.ssh.pub existing and readable. 8 | " 9 | exit 1 10 | fi 11 | 12 | cat < user-data 13 | #cloud-config 14 | timezone: US/Central 15 | growpart: 16 | mode: auto 17 | disable_root: false 18 | ssh_pwauth: True 19 | ssh_import_id: [root] 20 | ssh_authorized_keys: 21 | - $(cat cidata.ssh.pub) 22 | users: 23 | - name: root 24 | primary-group: root 25 | homedir: /root 26 | system: true 27 | EOF 28 | -------------------------------------------------------------------------------- /skopeo_cidev/Containerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_NAME=fedora 2 | ARG BASE_TAG=latest 3 | FROM ${BASE_NAME}:${BASE_TAG} 4 | 5 | # Required to keep perl & other tooling happy 6 | ENV LC_ALL="C" 7 | 8 | COPY /packages.txt /root/ 9 | RUN dnf -y update && \ 10 | dnf -y install $(sed -r -e '/^#/d' -e '/^$/d' /root/packages.txt) && \ 11 | dnf -y upgrade && \ 12 | dnf clean all 13 | 14 | ENV REG_REPO="https://github.com/docker/distribution.git" \ 15 | REG_COMMIT_SCHEMA1="ec87e9b6971d831f0eff752ddb54fb64693e51cd" \ 16 | OSO_REPO="https://github.com/openshift/origin.git" \ 17 | OSO_TAG="v1.5.0-alpha.3" 18 | 19 | COPY /setup.sh /root/ 20 | RUN bash /root/setup.sh 21 | -------------------------------------------------------------------------------- /imgts/README.md: -------------------------------------------------------------------------------- 1 | A container image for tracking automation metadata. 2 | This is used to update last-used timestamps on 3 | VM images to prevent them from being pruned. 4 | 5 | Required environment variables: 6 | * `GCPJSON` - Contents of the service-account JSON key file. 7 | * `GCPNAME` - Complete Name (fake e-mail address) of the service account. 8 | * `GCPPROJECT` - Project ID of the GCP project. 9 | * `IMGNAMES` - Whitespace separated list of image names to update. 10 | * `BUILDID` - Cirrus CI build ("job") ID number for auditing purposes. 11 | * `REPOREF` - Repository name that ran the build. 12 | 13 | Example build (from repository root): 14 | 15 | ```bash 16 | make imgts IMG_SFX=example 17 | ``` 18 | -------------------------------------------------------------------------------- /imgobsolete/README.md: -------------------------------------------------------------------------------- 1 | A container image for maintaining the collection of 2 | VM images used by CI/CD on several projects. Acts upon 3 | metadata maintained by the `imgts` container. Images 4 | found to be disused, are marked obsolete (deprecated). 5 | A future process is responsible for pruning the obsolete 6 | images. This workflow provides for a recovery option 7 | should an image be erroneously obsoleted. 8 | 9 | * `GCPJSON` - Contents of the service-account JSON key file. 10 | * `GCPNAME` - Complete Name (fake e-mail address) of the service account. 11 | * `GCPPROJECT` - Project ID of the GCP project. 12 | 13 | Example build (from repository root): 14 | 15 | ```bash 16 | make imgobsolete IMG_SFX=example 17 | ``` 18 | -------------------------------------------------------------------------------- /skopeo_cidev/packages.txt: -------------------------------------------------------------------------------- 1 | # general deps 2 | docker-distribution 3 | git 4 | golang 5 | golang-github-cpuguy83-md2man 6 | make 7 | 8 | # Skopeo documentation building 9 | btrfs-progs-devel 10 | device-mapper-devel 11 | golang 12 | gpgme-devel 13 | make 14 | 15 | # storage deps 16 | btrfs-progs-devel 17 | device-mapper-devel 18 | 19 | # gpgme bindings deps 20 | libassuan-devel 21 | gnupg 22 | gpgme-devel 23 | 24 | # htpasswd for system tests 25 | httpd-tools 26 | 27 | # OpenShift deps 28 | bats 29 | bsdtar 30 | device-mapper 31 | docker 32 | e2fsprogs 33 | ethtool 34 | findutils 35 | golint 36 | hostname 37 | iproute 38 | iptables 39 | jq 40 | lsof 41 | nmap-ncat 42 | openssl 43 | podman 44 | runc 45 | socat 46 | tar 47 | tree 48 | util-linux 49 | wget 50 | which 51 | xfsprogs 52 | -------------------------------------------------------------------------------- /podman/setup.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # This script is intended to be executed as part of the container 4 | # image build process. Using it under any other context is virtually 5 | # guarantied to cause you much pain and suffering. 6 | 7 | set -eo pipefail 8 | 9 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 10 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 11 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 12 | # shellcheck source=./lib.sh 13 | source "$REPO_DIRPATH/lib.sh" 14 | 15 | if [[ "$OS_RELEASE_ID" == "debian" ]]; then 16 | bash base_images/debian_base-setup.sh 17 | bash cache_images/debian_setup.sh 18 | elif [[ "$OS_RELEASE_ID" == "fedora" ]]; then 19 | bash base_images/fedora_base-setup.sh 20 | bash cache_images/fedora_setup.sh 21 | else 22 | die "Unknown/unsupported Distro '$OS_RELEASE_ID'" 23 | fi 24 | -------------------------------------------------------------------------------- /gcsupld/README.md: -------------------------------------------------------------------------------- 1 | A container image for uploading a file to Google Cloud Storage 2 | (GCS). It requires the caller to posess both a service-account 3 | credentials file, volume-mount the file to be uploaded, and 4 | provide the full destination URI. The `` must 5 | already exist, and `` may include a pseudo-path and/or 6 | object filename. 7 | 8 | Required environment variables: 9 | * `GCPJSON` - Contents of the service-account JSON key file. 10 | * `GCPNAME` - Complete Name (fake e-mail address) of the service account. 11 | * `GCPPROJECT` - Project ID of the GCP project. 12 | * `FROM_FILEPATH` - Full path to volume-mounted file to upload. 13 | * `TO_GCSURI` - Destination URI in the format `gs:///` 14 | 15 | Example build (from repository root): 16 | 17 | ```bash 18 | make gcsupld IMG_SFX=example 19 | ``` 20 | -------------------------------------------------------------------------------- /.pre-commit-hooks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Ref: https://pre-commit.com/#creating-new-hooks 4 | - id: check-imgsfx 5 | name: Check IMG_SFX for accidental reuse. 6 | description: | 7 | Every PR intended to produce CI VM or container images must update 8 | the `IMG_SFX` file via `make IMG_SFX`. The exact value will be 9 | validated against global suffix usage (encoded as tags on the 10 | `imgts` container image). This pre-commit hook verifies on every 11 | push, the IMG_SFX file's value has not been pushed previously. 12 | It's intended as a simple/imperfect way to save developers time 13 | by avoiding force-pushes that will most certainly fail validation. 14 | entry: ./check-imgsfx.sh 15 | language: system 16 | exclude: '.*' # Not examining any specific file/dir/link 17 | always_run: true # ignore no matching files 18 | fail_fast: true 19 | pass_filenames: false 20 | stages: ["pre-push"] 21 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test/hack/get_ci_vm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # This file is used by the integration testing scripts, 5 | # it should never be used under any other circumstance. 6 | # 7 | # get_ci_vm APIv1 container entrypoint calls into this script 8 | # to obtain required repo. specific configuration options. 9 | if [[ "$1" == "--config" ]]; then 10 | cat < 27 | mkdir -p /tmp/artifacts 28 | podman run -it --rm -v /tmp/artifacts:/data -w /data ccia $BID --verbose 29 | ls -laR /tmp/artifacts 30 | ``` 31 | -------------------------------------------------------------------------------- /check-imgsfx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # 2024-01-25 esm 4 | # 2024-06-28 cevich 5 | # 6 | # This script is intended to be used by the `pre-commit` utility, or it may 7 | # be manually copied (or symlinked) as local `.git/hooks/pre-push` file. 8 | # It's purpose is to keep track of image-suffix values which have already 9 | # been pushed, to avoid them being immediately rejected by CI validation. 10 | # To use it with the `pre-commit` utility, simply add something like this 11 | # to your `.pre-commit-config.yaml`: 12 | # 13 | # --- 14 | # repos: 15 | # - repo: https://github.com/containers/automation_images.git 16 | # rev: 17 | # hooks: 18 | # - id: check-imgsfx 19 | 20 | set -eo pipefail 21 | 22 | # Ensure CWD is the repo root 23 | cd $(dirname "${BASH_SOURCE[0]}") 24 | imgsfx=$(&2 31 | echo "Please rerun 'make IMG_SFX'" >&2 32 | exit 1 33 | fi 34 | fi 35 | 36 | echo $imgsfx >>$imgsfx_history 37 | -------------------------------------------------------------------------------- /get_ci_vm/setup.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # This script is intended to be executed as part of the container 4 | # image build process. Using it under any other context is virtually 5 | # guaranteed to cause you much pain and suffering. 6 | 7 | set -xeo pipefail 8 | 9 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 10 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 11 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 12 | # shellcheck source=./lib.sh 13 | source "$REPO_DIRPATH/lib.sh" 14 | 15 | declare -a PKGS 16 | PKGS=( \ 17 | aws-cli 18 | coreutils 19 | curl 20 | gawk 21 | git 22 | jq 23 | openssh-client 24 | python3 25 | py3-yaml 26 | py3-pip 27 | ) 28 | 29 | apk update 30 | apk upgrade 31 | apk add --no-cache "${PKGS[@]}" 32 | rm -rf /var/cache/apk/* 33 | 34 | aws --version # Confirm that aws actually runs 35 | 36 | install_automation_tooling cirrus-ci_env 37 | 38 | # For testing updates from a personal branch, use something like this 39 | #TMPDIR=$(mktemp -d) 40 | #BRANCH=fix_osx_again 41 | #git clone -b $BRANCH https://github.com/cevich/automation.git "$TMPDIR" 42 | #env INSTALL_PREFIX=/usr/share $TMPDIR/bin/install_automation.sh 0.0.0 cirrus-ci_env 43 | #rm -rf "$TMPDIR" 44 | -------------------------------------------------------------------------------- /image_builder/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | # This script is called by packer on a vanilla CentOS VM, to setup the image 5 | # used for building images FROM base images. It's not intended to be used 6 | # outside of this context. 7 | 8 | set -e 9 | 10 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 11 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 12 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 13 | 14 | # Run as quickly as possible after boot 15 | # unless building a container 16 | ((CONTAINER)) || \ 17 | /bin/bash $REPO_DIRPATH/systemd_banish.sh 18 | 19 | # shellcheck source=./lib.sh 20 | source "$REPO_DIRPATH/lib.sh" 21 | 22 | PACKER_VERSION=$(bash $REPO_DIRPATH/get_packer_version.sh) 23 | $SUDO env PACKER_VERSION=$PACKER_VERSION \ 24 | /bin/bash "$SCRIPT_DIRPATH/install_packages.sh" 25 | 26 | # Unnecessary inside a container 27 | if ! ((CONTAINER)); then 28 | $SUDO systemctl enable rngd 29 | 30 | # Enable nested-virt 31 | $SUDO tee /etc/modprobe.d/kvm-nested.conf < 2 | # Allow powershell scripts to execute from fs 3 | Set-ExecutionPolicy Unrestricted -Scope LocalMachine -Force -ErrorAction Ignore 4 | $ErrorActionPreference = "stop" 5 | 6 | # Remove any existing WinRM HTTP listener 7 | Remove-Item -Path WSMan:\Localhost\listener\listener* -Recurse 8 | 9 | # Create a self-signed certificate for https 10 | $Cert = New-SelfSignedCertificate -CertstoreLocation Cert:\LocalMachine\My -DnsName "packer" 11 | 12 | # Configure WinRM over https to allow packer to manage 13 | cmd.exe /c winrm set "winrm/config" '@{MaxTimeoutms="1800000"}' 14 | cmd.exe /c winrm set "winrm/config/service/auth" '@{Basic="true"}' 15 | cmd.exe /c winrm set "winrm/config/client/auth" '@{Basic="true"}' 16 | cmd.exe /c winrm create "winrm/config/listener?Address=*+Transport=HTTPS" "@{Port=`"5986`";Hostname=`"packer`";CertificateThumbprint=`"$($Cert.Thumbprint)`"}" 17 | cmd.exe /c netsh advfirewall firewall set rule group="remote administration" new enable=yes 18 | cmd.exe /c netsh firewall add portopening TCP 5986 "Port 5986" 19 | # Start and Enable WinRM after restarts (required as part of provisioning process) 20 | cmd.exe /c sc config winrm start= auto 21 | cmd.exe /c net start winrm 22 | 23 | -------------------------------------------------------------------------------- /ccia/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended to be executed in the ccia container by 4 | # Cirrus-CI. Any other usage or environment could lead to negative 5 | # outcomes. 6 | 7 | set -eo pipefail 8 | 9 | SCRIPT_DIRPATH=$(dirname $(realpath "${BASH_SOURCE[0]}")) 10 | source $SCRIPT_DIRPATH/../lib.sh 11 | 12 | req_env_vars CIRRUS_CI CIRRUS_BUILD_ID CIRRUS_WORKING_DIR 13 | 14 | echo "Installing test tooling" 15 | ooe.sh microdnf install -y coreutils jq 16 | 17 | cd /tmp/ 18 | 19 | echo "Confirming current build task manifests can be downloaded." 20 | ( 21 | set -x 22 | # shellcheck disable=SC2154 23 | $CCIABIN --verbose $CIRRUS_BUILD_ID '.*/manifest.json' 24 | ) 25 | 26 | # It's possible the PR did not produce any manifest.json files 27 | if ! dled=$(find ./$CIRRUS_BUILD_ID -name manifest.json | wc -l) || ((dled==0)); then 28 | mkdir -p ./$CIRRUS_BUILD_ID 29 | cp -a $SCRIPT_DIRPATH/fake_manifests/* ./$CIRRUS_BUILD_ID 30 | fi 31 | 32 | echo "Confirming any downloaded manifests can be parsed into a build list" 33 | ( 34 | set -x 35 | cd /tmp 36 | find ./$CIRRUS_BUILD_ID -type f -name 'manifest.json' -print0 | \ 37 | xargs --null jq -e '.builds[]' | \ 38 | jq -e -s '.' | \ 39 | jq -e '{"builds": .}' 40 | ) 41 | -------------------------------------------------------------------------------- /ci/tag_latest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | if [[ -z "$CI" ]] || [[ "$CI" != "true" ]] || [[ -z "$IMG_SFX" ]]; then 6 | echo "This script is intended to be run by CI and nowhere else." 7 | exit 1 8 | fi 9 | 10 | # This envar is set by the CI system 11 | # shellcheck disable=SC2154 12 | if [[ "$CIRRUS_CHANGE_MESSAGE" =~ .*CI:DOCS.* ]]; then 13 | echo "This script must never tag anything after a [CI:DOCS] PR merge" 14 | exit 0 15 | fi 16 | 17 | # Ensure no secrets leak via debugging var expansion 18 | set +x 19 | # This secret envar is set by the CI system 20 | # shellcheck disable=SC2154 21 | echo "$REG_PASSWORD" | \ 22 | skopeo login --password-stdin --username "$REG_USERNAME" "$REGPFX" 23 | 24 | declare -a imgnames 25 | imgnames=( imgts imgobsolete imgprune gcsupld get_ci_vm orphanvms ccia ) 26 | # A [CI:TOOLING] build doesn't produce CI VM images 27 | if [[ ! "$CIRRUS_CHANGE_MESSAGE" =~ .*CI:TOOLING.* ]]; then 28 | imgnames+=( skopeo_cidev fedora_podman prior-fedora_podman ) 29 | fi 30 | 31 | for imgname in "${imgnames[@]}"; do 32 | echo "##### Tagging $imgname -> latest" 33 | # IMG_SFX is defined by CI system 34 | # shellcheck disable=SC2154 35 | skopeo copy "docker://$REGPFX/$imgname:c${IMG_SFX}" "docker://$REGPFX/${imgname}:latest" 36 | done 37 | -------------------------------------------------------------------------------- /orphanvms/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script is set as, and intended to run as the `orphanvms` container's 4 | # entrypoint. It searches for active VM instances with an "old" `creation` 5 | # timestamp - where "old" is a completely arbitrary several days :) 6 | 7 | set -eo pipefail 8 | 9 | # shellcheck source=imgts/lib_entrypoint.sh 10 | source /usr/local/bin/lib_entrypoint.sh 11 | 12 | # set this to 1 to enable 13 | A_DEBUG="${A_DEBUG:-0}" 14 | if ((A_DEBUG)); then msg "Warning: Debugging is enabled"; fi 15 | 16 | req_env_vars GCPJSON GCPNAME GCPPROJECT GCPPROJECTS AWSINI 17 | 18 | NOW=$(date +%s) 19 | TOO_OLD='3 days ago' # Detect Friday Orphans on Monday 20 | EVERYTHING=${EVERYTHING:-0} # set to '1' for testing 21 | DRY_RUN=${DRY_RUN:-0} 22 | if ((EVERYTHING)); then 23 | DRY_RUN=1 24 | TOO_OLD="3 seconds ago" 25 | fi 26 | # Anything older than this is "too old" 27 | THRESHOLD=$(date --date="$TOO_OLD" --iso-8601=minute) 28 | 29 | dbg() { 30 | if ((A_DEBUG)); then 31 | ( 32 | echo 33 | # There's lots of looping going on in this script with left-justified output. 34 | # Offset debugging messages so they have more context. 35 | echo " ${1:-No debugging message given}" 36 | ) > /dev/stderr 37 | fi 38 | } 39 | 40 | # shellcheck source=orphanvms/gce 41 | . /usr/local/bin/_gce 42 | 43 | # shellcheck source=orphanvms/ec2 44 | . /usr/local/bin/_ec2 45 | -------------------------------------------------------------------------------- /imgts/Containerfile: -------------------------------------------------------------------------------- 1 | ARG CENTOS_STREAM_RELEASE=9 2 | FROM quay.io/centos/centos:stream${CENTOS_STREAM_RELEASE} 3 | 4 | # Only needed for installing build-time dependencies 5 | COPY /imgts/google-cloud-sdk.repo /etc/yum.repos.d/google-cloud-sdk.repo 6 | RUN dnf -y update && \ 7 | dnf -y install epel-release && \ 8 | dnf -y install python3 jq libxcrypt-compat && \ 9 | dnf -y install google-cloud-sdk && \ 10 | dnf clean all 11 | 12 | # https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html 13 | ARG AWSURL="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" 14 | RUN dnf -y install unzip glibc groff-base less && \ 15 | dnf clean all && \ 16 | cd /tmp && \ 17 | curl --fail --location -O "${AWSURL}" && \ 18 | unzip awscli*.zip && \ 19 | ./aws/install -i /usr/local/share/aws-cli -b /usr/local/bin && \ 20 | rm -rf awscli*.zip ./aws 21 | 22 | # Env. vars set to "__unknown__" are required to be set by the caller; 23 | # Except, an AWSINI value is required if EC2IMGNAMES is non-empty. 24 | ENV GCPJSON="__unknown__" \ 25 | GCPNAME="__unknown__" \ 26 | GCPPROJECT="__unknown__" \ 27 | IMGNAMES="__unknown__" \ 28 | BUILDID="__unknown__" \ 29 | REPOREF="__unknown__" \ 30 | EC2IMGNAMES="" \ 31 | AWSINI="" 32 | 33 | COPY ["/imgts/entrypoint.sh", "/imgts/lib_entrypoint.sh", "/usr/local/bin/"] 34 | RUN chmod 755 /usr/local/bin/entrypoint.sh 35 | 36 | ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] 37 | -------------------------------------------------------------------------------- /gcsupld/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script is set as, and intended to run as the `gcsupld` container's 4 | # entrypoint. It simply authenticates to google cloud, then utilizes 5 | # google-cloud-sdk utility to upload the file specified by `$FROM_FILENAME` 6 | # to the bucket/object URI specified in `$TO_GCSURI`. 7 | 8 | set -eo pipefail 9 | 10 | # shellcheck source=imgts/lib_entrypoint.sh 11 | source /usr/local/bin/lib_entrypoint.sh 12 | 13 | req_env_vars GCPJSON GCPNAME GCPPROJECT FROM_FILEPATH TO_GCSURI 14 | 15 | # shellcheck disable=SC2154 16 | msg "Will upload '$FROM_FILEPATH' to '$TO_GCSURI'" 17 | 18 | # shellcheck disable=SC2154 19 | slash_count=$(tr -c -d '/'<<<"$TO_GCSURI" | wc -m) 20 | # shellcheck disable=SC2154 21 | if [[ ! -r "$FROM_FILEPATH" ]]; then 22 | die "Source file not found: $FROM_FILEPATH" 23 | elif [[ -L "$FROM_FILEPATH" ]]; then 24 | die "Source file must not be a symlink: $FROM_FILEPATH" 25 | elif [[ $slash_count -gt 3 ]]; then 26 | die "Subdirectories ($slash_count > 3) in destination filename not supported: $TO_GCSURI" 27 | fi 28 | 29 | gcloud_init 30 | 31 | # The -e option needed to avoid uploading "empty" files 32 | # The -c option needed to return error code on upload failure 33 | gsutil cp -c -e "$FROM_FILEPATH" "$TO_GCSURI" 34 | 35 | msg "Upload complete, file now available for download at:" 36 | # term. codes present in displayed URI will break testing 37 | echo " https://storage.googleapis.com/${TO_GCSURI:5}" 38 | -------------------------------------------------------------------------------- /cache_images/build-push_packaging.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is called from build-push_setup.sh by packer. It's not intended 4 | # to be used outside of those contexts. It assumes the lib.sh library has 5 | # already been sourced, and that all "ground-up" package-related activity 6 | # needs to be done, including repository setup and initial update. 7 | 8 | set -e 9 | 10 | SCRIPT_FILEPATH=$(realpath "$0") 11 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 12 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 13 | 14 | # shellcheck source=./lib.sh 15 | source "$REPO_DIRPATH/lib.sh" 16 | 17 | # packer and/or a --build-arg define this envar value uniformly 18 | # for both VM and container image build workflows. 19 | req_env_vars PACKER_BUILD_NAME 20 | 21 | msg "Updating/Installing repos and packages for $OS_REL_VER" 22 | 23 | bigto ooe.sh $SUDO dnf update -y 24 | 25 | INSTALL_PACKAGES=(\ 26 | buildah 27 | git 28 | jq 29 | podman 30 | python3-pip 31 | qemu-user-static 32 | skopeo 33 | unzip 34 | ) 35 | 36 | echo "Installing general build/test dependencies" 37 | bigto $SUDO dnf install -y "${INSTALL_PACKAGES[@]}" 38 | 39 | # It was observed in F33, dnf install doesn't always get you the latest/greatest 40 | lilto $SUDO dnf update -y 41 | 42 | # Re-install would append to this, making a mess. 43 | $SUDO rm -f /etc/automation_environment 44 | # Re-install the latest version with the 'build-push' component 45 | install_automation_tooling latest build-push 46 | -------------------------------------------------------------------------------- /ccia/Containerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_NAME=registry.fedoraproject.org/fedora-minimal 2 | # FIXME FIXME FIXME! 2023-11-16: revert "38" to "latest" 3 | # ...38 is because as of this moment, latest is 39, which 4 | # has python-3.12, which causes something to barf: 5 | # aiohttp/_websocket.c:3744:45: error: ‘PyLongObject’ {aka ‘struct _longobject’} has no member named ‘ob_digit’ 6 | # Possible cause: https://github.com/cython/cython/issues/5238 7 | ARG BASE_TAG=38 8 | FROM ${BASE_NAME}:${BASE_TAG} as updated_base 9 | 10 | RUN microdnf upgrade -y && \ 11 | microdnf clean all 12 | 13 | ENV _RUNTIME_DEPS="bash python3" 14 | ENV _BUILD_DEPS="coreutils curl git python3 python3-pip python3-virtualenv python3-devel gcc g++" 15 | 16 | 17 | FROM updated_base as builder 18 | 19 | RUN microdnf install -y ${_RUNTIME_DEPS} ${_BUILD_DEPS} && \ 20 | export INSTALL_PREFIX=/usr/share && \ 21 | curl -sL \ 22 | https://raw.githubusercontent.com/containers/automation/main/bin/install_automation.sh | \ 23 | bash -s latest cirrus-ci_artifacts 24 | 25 | 26 | FROM updated_base as final 27 | 28 | RUN microdnf install -y ${_RUNTIME_DEPS} && \ 29 | microdnf clean all 30 | 31 | COPY --from=builder /usr/share/automation /usr/share/automation 32 | COPY --from=builder /etc/automation_environment /etc/automation_environment 33 | 34 | # Env. is used by test.sh script. 35 | ENV CCIABIN=/usr/share/automation/bin/cirrus-ci_artifacts 36 | ENTRYPOINT ["/usr/share/automation/bin/cirrus-ci_artifacts"] 37 | -------------------------------------------------------------------------------- /cache_images/rawhide_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is called by packer on the rawhide VM, to update and reboot using 4 | # the rawhide kernel. It's not intended to be used outside of this context. 5 | 6 | set -e 7 | 8 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 9 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 10 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 11 | 12 | # shellcheck source=./lib.sh 13 | source "$REPO_DIRPATH/lib.sh" 14 | 15 | # packer and/or a --build-arg define this envar value uniformly 16 | # for both VM and container image build workflows. 17 | req_env_vars PACKER_BUILD_NAME 18 | 19 | warn "Upgrading Fedora '$OS_RELEASE_VER' to rawhide, this might break." 20 | # shellcheck disable=SC2154 21 | warn "If so, this script may be found in the repo. as '$SCRIPT_DIRPATH/$SCRIPT_FILENAME'." 22 | 23 | # Show what's happening 24 | set -x 25 | 26 | # Rawhide often has GPG issues, don't bother checking 27 | $SUDO sed -i -r -e 's/^gpgcheck=.+/gpgcheck=False/' /etc/dnf/dnf.conf 28 | $SUDO sed -i -r -e 's/^gpgcheck=.+/gpgcheck=0/' /etc/yum.repos.d/*.repo 29 | # Called as `dnf5` here to confirm "old" dnf has been replaced. 30 | $SUDO dnf5 -y distro-sync --releasever=rawhide --allowerasing 31 | $SUDO dnf5 upgrade -y 32 | 33 | # A shared fedora_packaging.sh script is called next that doesn't always support dnf5 34 | $SUDO ln -s $(type -P dnf5) /usr/local/bin/dnf 35 | 36 | # Packer will try to run 'cache_images/fedora_setup.sh' next, make sure the system 37 | # is actually running rawhide (and verify it boots). 38 | $SUDO reboot 39 | -------------------------------------------------------------------------------- /image_builder/install_packages.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # This script is intended to be executed as part of the container 4 | # image build process. Using it under any other context is virtually 5 | # guarantied to cause you much pain and suffering. 6 | 7 | set -eo pipefail 8 | 9 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 10 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 11 | INST_PKGS_FP="$SCRIPT_DIRPATH/install_packages.txt" 12 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 13 | 14 | # shellcheck source=./lib.sh 15 | source "$REPO_DIRPATH/lib.sh" 16 | 17 | [[ -r "$INST_PKGS_FP" ]] || \ 18 | die "Expecting to find a copy of the file $INST_PKGS_FP" 19 | 20 | # shellcheck disable=SC2154 21 | [[ -n "$PACKER_VERSION" ]] || \ 22 | die "Expecting a non-empty \$PACKER_VERSION value" 23 | 24 | dnf update -y 25 | dnf -y install epel-release 26 | # Allow erasing pre-installed curl-minimal package 27 | dnf install -y --allowerasing $(<"$INST_PKGS_FP") 28 | 29 | # As of 2024-04-24 installing the EPEL `awscli` package results in error: 30 | # nothing provides python3.9dist(docutils) >= 0.10 31 | # Grab the binary directly from amazon instead 32 | # https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html 33 | AWSURL="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" 34 | cd /tmp 35 | curl --fail --location -O "${AWSURL}" 36 | # There's little reason to see every single file extracted 37 | unzip -q awscli*.zip 38 | ./aws/install -i /usr/local/share/aws-cli -b /usr/local/bin 39 | rm -rf awscli*.zip ./aws 40 | 41 | install_automation_tooling 42 | -------------------------------------------------------------------------------- /dot_pre-commit-config.yaml.example: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.6.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-yaml 10 | - id: check-added-large-files 11 | - id: check-symlinks 12 | - id: mixed-line-ending 13 | - id: no-commit-to-branch 14 | args: [--branch, main] 15 | - repo: https://github.com/codespell-project/codespell 16 | rev: v2.3.0 17 | hooks: 18 | - id: codespell 19 | args: [--config, .codespellrc] 20 | - repo: https://github.com/jumanjihouse/pre-commit-hooks 21 | rev: 3.0.0 22 | hooks: 23 | - id: forbid-binary 24 | exclude: > 25 | (?x)^( 26 | get_ci_vm/good_repo_test/dot_git.tar.gz 27 | )$ 28 | - id: script-must-have-extension 29 | - id: shellcheck 30 | # These come from ci/shellcheck.sh 31 | args: 32 | - --color=always 33 | - --format=tty 34 | - --shell=bash 35 | - --external-sources 36 | - --enable=add-default-case,avoid-nullary-conditions,check-unassigned-uppercase 37 | - --exclude=SC2046,SC2034,SC2090,SC2064 38 | - --wiki-link-count=0 39 | - --severity=warning 40 | - repo: https://github.com/containers/automation_images.git 41 | rev: 2e5a2acfe21cc4b13511b453733b8875e592ad9c 42 | hooks: 43 | - id: check-imgsfx 44 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test/uninit_gcloud.output: -------------------------------------------------------------------------------- 1 | Mock Google Cloud SDK [0.0.0] 2 | 3 | Platform: [Linux, x86_64] uname_result(system='Linux', node='6365688aa3d9', release='5.10.11-100.fc32.x86_64', version='#1 SMP Wed Jan 27 15:20:29 UTC 2021', machine='x86_64', processor='') 4 | Locale: (None, None) 5 | Python Version: [3.8.8 (default, Mar 15 2021, 13:10:14) [GCC 10.2.1 20201203]] 6 | Python Location: [/usr/bin/python3] 7 | Site Packages: [Disabled] 8 | 9 | Installation Root: [/google-cloud-sdk] 10 | Installed Components: 11 | bq: [2.0.65] 12 | gsutil: [4.60] 13 | core: [2021.03.19] 14 | System PATH: [/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin] 15 | Python PATH: [/google-cloud-sdk/lib/third_party:/google-cloud-sdk/lib:/usr/lib/python38.zip:/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload] 16 | Cloud SDK on PATH: [True] 17 | Kubectl on PATH: [False] 18 | 19 | Installation Properties: [/google-cloud-sdk/properties] 20 | User Config Directory: [/root/.config/gcloud] 21 | Active Configuration Name: [default] 22 | Active Configuration Path: [/root/.config/gcloud/configurations/config_default] 23 | 24 | Account: [None] 25 | Project: [None] 26 | 27 | Current Properties: 28 | [component_manager] 29 | disable_update_check: [true] 30 | [core] 31 | disable_usage_reporting: [true] 32 | [metrics] 33 | environment: [github_docker_image] 34 | 35 | Logs Directory: [/root/.config/gcloud/logs] 36 | Last Log File: [/root/.config/gcloud/logs/2021.03.23/16.31.00.068510.log] 37 | 38 | git: [git version 2.30.2] 39 | ssh: [OpenSSH_8.4p1, OpenSSL 1.1.1k 25 Mar 2021] 40 | -------------------------------------------------------------------------------- /gcsupld/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is only intended to be executed by Cirrus-CI in order 4 | # to test the functionality of the freshly built gcsupld container. 5 | # Any other usage is unlikely to function properly. 6 | 7 | #Note: Assumed these are set properly in .cirrus.yml- $GCPJSON $GCPNAME $GCPPROJECT 8 | 9 | set -eo pipefail 10 | 11 | SCRIPT_FILEPATH=$(realpath "$0") 12 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 13 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 14 | 15 | # shellcheck source=./lib.sh 16 | source "$REPO_DIRPATH/lib.sh" 17 | 18 | # Guarantee the filename is unique per-test-run to prevent 19 | # any clashes. Assume the bucket holding the test files will 20 | # prune them regularly. 21 | # shellcheck disable=SC2154 22 | FROM_FILEPATH="testfile_${IMG_SFX}" 23 | TO_GCSURI="gs://libpod-pr-releases/${FROM_FILEPATH}" 24 | 25 | echo "Creating test-data file" 26 | expected=$(date --iso-8601=seconds) 27 | echo "$expected" > $FROM_FILEPATH 28 | 29 | echo "Executing gcsupld entrypoint script" 30 | output=$(/usr/local/bin/entrypoint.sh |& tee /dev/stderr) 31 | echo "(exit $?)" 32 | 33 | echo "Confirming gsutil reported 'Operation Completed'" 34 | grep -iq 'operation completed'<<<"$output" 35 | 36 | echo "Confirming the URL to download the file was output by entrypoint script" 37 | actual_uri=$(tail -3<<<"$output" | grep -Exo -m 1 '\s+https://.+' | tr -d '[:blank:]' ) 38 | test -n "$actual_uri" 39 | 40 | echo "Downloading contents of '$actual_uri'" 41 | actual=$(curl --silent --location --fail "$actual_uri") 42 | 43 | echo "Confirming downloaded data matches expectations" 44 | set -x 45 | test "$expected" == "$actual" 46 | -------------------------------------------------------------------------------- /win_images/win-lib.ps1: -------------------------------------------------------------------------------- 1 | 2 | $ErrorActionPreference = "stop" 3 | 4 | Set-ExecutionPolicy Bypass -Scope Process -Force 5 | 6 | function Check-Exit { 7 | param( 8 | [parameter(ValueFromRemainingArguments = $true)] 9 | [string[]] $codes = @(0) 10 | ) 11 | if ($LASTEXITCODE -eq $null) { 12 | return 13 | } 14 | 15 | foreach ($code in $codes) { 16 | if ($LASTEXITCODE -eq $code) { 17 | return 18 | } 19 | } 20 | 21 | Exit $LASTEXITCODE 22 | } 23 | 24 | # Retry installation on failure or 5-minute timeout (for all packages) 25 | function retryInstall { 26 | param([Parameter(ValueFromRemainingArguments)] [string[]] $pkgs) 27 | 28 | foreach ($pkg in $pkgs) { 29 | for ($retries = 0; ; $retries++) { 30 | if ($retries -gt 5) { 31 | throw "Could not install package $pkg" 32 | } 33 | 34 | if ($pkg -match '(.[^\@]+)@(.+)') { 35 | $pkg = @("--version", $Matches.2, $Matches.1) 36 | } 37 | 38 | # Chocolatey best practices as of 2024-04: 39 | # https://docs.chocolatey.org/en-us/choco/commands/#scripting-integration-best-practices-style-guide 40 | # Some of those are suboptimal, e.g., using "upgrade" to mean "install", 41 | # hardcoding a specific API URL. We choose to reject those. 42 | choco install $pkg -y --allow-downgrade --execution-timeout=300 43 | if ($LASTEXITCODE -eq 0) { 44 | break 45 | } 46 | Write-Host "Error installing, waiting before retry..." 47 | Start-Sleep -Seconds 6 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /ci/make.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eo pipefail 4 | 5 | # This script is intended to be used by Cirrus-CI, from the VM 6 | # built by the 'image_builder' Makefile target in this repo. 7 | # It's purpose is simply to verify & configure the runtime 8 | # environment from data provided by CI, and call the make 9 | # with the first argument passed to this script. 10 | 11 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 12 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 13 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 14 | 15 | # shellcheck source=./lib.sh 16 | source "$REPO_DIRPATH/lib.sh" 17 | 18 | # shellcheck disable=SC2154 19 | if [[ -z "$CI" ]] || [[ "$CI" != "true" ]] || [[ "$CIRRUS_CI" != "$CI" ]]; then 20 | die "Unexpected \$CI=$CI and/or \$CIRRUS_CI=$CIRRUS_CI" 21 | elif [[ -z "$IMG_SFX" ]] || [[ -z "$PACKER_BUILDS" ]]; then 22 | die "Required non-empty values for \$IMG_SFX=$IMG_SFX and \$PACKER_BUILDS=$PACKER_BUILDS" 23 | elif [[ -z "$1" ]]; then 24 | die "Build stage name is required as the first argument" 25 | fi 26 | 27 | if skip_on_pr_label; then 28 | exit 0 # skip build 29 | fi 30 | 31 | set_gac_filepath 32 | 33 | # Not all builds need this. 34 | if [[ -n "$AWS_INI" ]]; then 35 | set_aws_filepath 36 | fi 37 | 38 | id 39 | # FIXME: ssh-keygen seems to fail to create keys with Permission denied 40 | # in the base_images make target, I have no idea why but all CI jobs are 41 | # broken because of this. Let's try without selinux. 42 | if [[ "$(getenforce)" == "Enforcing" ]]; then 43 | setenforce 0 44 | fi 45 | 46 | set -x 47 | cd "$REPO_DIRPATH" 48 | export IMG_SFX=$IMG_SFX 49 | export PACKER_BUILDS=$PACKER_BUILDS 50 | make ${1} 51 | -------------------------------------------------------------------------------- /get_ci_vm/good_repo_test_v2/hack/get_ci_vm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # This file is used by the integration testing scripts, 5 | # it should never be used under any other circumstance. 6 | 7 | set -eu 8 | 9 | in_get_ci_vm() { 10 | # shellcheck disable=SC2154 11 | if ((GET_CI_VM==0)); then 12 | echo "Error: $1 is not intended for use in this context" 13 | exit 2 14 | fi 15 | } 16 | 17 | # get_ci_vm APIv1 container entrypoint calls into this script 18 | # to obtain required repo. specific configuration options. 19 | if [[ "$1" == "--config" ]]; then 20 | case "$GET_CI_VM" in 21 | 1) 22 | cat < $OUTPUT 28 | 29 | # Ref: https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#deprecating_an_image 30 | $GCLOUD compute instances list --project=$gcpproject --format="$FORMAT" --filter="$FILTER" | \ 31 | while read name lastStartTimestamp labels 32 | do 33 | dbg "VM $name started $lastStartTimestamp labeled $labels" 34 | if [[ -z "$name" ]] || [[ -z "$lastStartTimestamp" ]]; then 35 | dbg "IGNORING EMPTY NAME OR TIMESTAMP" 36 | continue 37 | fi 38 | started_at=$(date --date=$lastStartTimestamp +%s) 39 | age_days=$((($NOW - $started_at) / (60 * 60 * 24))) 40 | # running in a child-process, must buffer into file. 41 | line="* VM $name running $age_days days" 42 | if [[ -n "$labels" ]]; then 43 | line+=" with labels '$labels'" 44 | fi 45 | dbg "FLAGGING VM AS ORPHANED" 46 | echo "$line" >> $OUTPUT 47 | done 48 | 49 | if [[ $(wc -l $OUTPUT | awk '{print $1}') -gt 1 ]]; then 50 | dbg "The following will be part of a notification e-mail for ($gcpproject):" 51 | cat $OUTPUT 52 | fi 53 | done 54 | -------------------------------------------------------------------------------- /get_fedora_url.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended to be called by the Makefile, not by 4 | # humans. This implies certain otherwise "odd" behaviors, such 5 | # as exiting with no std-output if there was an error. It expects 6 | # to be called with three arguments: 7 | # 1. The type of url to retrieve, `image` or `checksum`. 8 | # 2. The architecture, `x86_64` or `aarch64` 9 | # 3. The Fedora release, 'rawhide' or a release number. 10 | 11 | set -eo pipefail 12 | 13 | URL_BASE="https://dl.fedoraproject.org/pub/fedora/linux" 14 | CURL="curl --location --silent --fail --show-error" 15 | 16 | url_type="$1" 17 | arch_name="$2" 18 | fed_rel="$3" 19 | 20 | die() { echo "ERROR: ${1:-No error message provided}" > /dev/stderr; exit 1; } 21 | 22 | msg() { echo "${1:-No error message provided}" > /dev/stderr; } 23 | 24 | usage_sfx=" " 25 | 26 | [[ "$#" -eq 3 ]] || \ 27 | die "Expecting exactly 3 arguments: $usage_sfx" 28 | 29 | tmpfile=$(mktemp -p '' tmp.$(basename ${BASH_SOURCE[0]}).XXXX) 30 | trap "rm -f $tmpfile" EXIT 31 | 32 | stage_tree="development" 33 | if [[ "$fed_rel" != "rawhide" ]] && \ 34 | $CURL "${URL_BASE}/releases/$fed_rel" &>/dev/null 35 | then 36 | stage_tree="releases" 37 | fi 38 | 39 | cloud_download_url="${URL_BASE}/$stage_tree/$fed_rel/Cloud/$arch_name/images" 40 | dbg_msg_sfx="'$arch_name' arch Fedora '$fed_rel' release '$url_type' from '$cloud_download_url'" 41 | 42 | # Show usage again to help catch argument order / spelling mistakes. 43 | $CURL -o "$tmpfile" "$cloud_download_url" || \ 44 | die "Fetching download listing for $dbg_msg_sfx. 45 | Was argument form valid: $usage_sfx" 46 | 47 | targets=$(sed -ne 's/^.*href=\"\(fedora[^\"]\+\)\".*$/\1/ip' <$tmpfile) 48 | targets_oneline=$(tr -s '[:blank:]' ' '<<<"$targets") 49 | [[ -n "$targets" ]] || \ 50 | die "Did not find any fedora targets: $dbg_msg_sfx" 51 | 52 | # Sometimes "rawhide" is spelled "Rawhide" 53 | by_release=$(grep -iw "$fed_rel" <<<"$targets" || true) 54 | [[ -n "$by_release" ]] || \ 55 | die "Did not find target among '$targets_oneline)': $dbg_msg_sfx" 56 | 57 | by_arch=$(grep -iw "$arch_name" <<<"$by_release" || true) 58 | [[ -n "$by_arch" ]] || \ 59 | die "Did not find arch among $by_release" 60 | 61 | if [[ "$url_type" == "image" ]]; then 62 | extension=qcow2 63 | elif [[ "$url_type" == "checksum" ]]; then 64 | extension=CHECKSUM 65 | else 66 | die "Unknown/unsupported url type: '$url_type'." 67 | fi 68 | 69 | # Support both '.CHECKSUM' and '-CHECKSUM' at the end 70 | filename=$(grep -E -i -m 1 -- "$extension$" <<<"$by_arch" || true) 71 | [[ -n "$filename" ]] || \ 72 | die "No '$extension' targets among $by_arch" 73 | 74 | echo "$cloud_download_url/$filename" 75 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | /* 2 | Renovate is a service similar to GitHub Dependabot. 3 | 4 | Please Manually validate any changes to this file with: 5 | 6 | podman run -it \ 7 | -v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \ 8 | ghcr.io/renovatebot/renovate:latest \ 9 | renovate-config-validator 10 | 11 | Configuration Reference: 12 | https://docs.renovatebot.com/configuration-options/ 13 | 14 | Monitoring Dashboard: 15 | https://app.renovatebot.com/dashboard#github/containers 16 | 17 | Note: The Renovate bot will create/manage its business on 18 | branches named 'renovate/*'. The only copy of this 19 | file that matters is the one on the `main` branch. 20 | */ 21 | 22 | { 23 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 24 | 25 | /************************************************* 26 | ****** Global/general configuration options ***** 27 | *************************************************/ 28 | 29 | // Re-use predefined sets of configuration options to DRY 30 | "extends": [ 31 | // https://github.com/containers/automation/blob/main/renovate/defaults.json5 32 | "github>containers/automation//renovate/defaults.json5", 33 | 34 | // This repo builds images, don't try to manage them. 35 | "docker:disable" 36 | ], 37 | 38 | // Don't build CI VM images for dep. update PRs (by default) 39 | "commitMessagePrefix": "[CI:DOCS]", 40 | 41 | "customManagers": [ 42 | // Manage updates to the common automation library version 43 | { 44 | "customType": "regex", 45 | "fileMatch": "^lib.sh$", 46 | "matchStrings": ["INSTALL_AUTOMATION_VERSION=\"(?.+)\""], 47 | "depNameTemplate": "containers/automation", 48 | "datasourceTemplate": "github-tags", 49 | "versioningTemplate": "semver-coerced", 50 | // "v" included in tag, but should not be used in lib.sh 51 | "extractVersionTemplate": "^v(?.+)$" 52 | } 53 | ], 54 | 55 | // N/B: LAST MATCHING RULE WINS, match statems are ANDed together. 56 | "packageRules": [ 57 | // When automation library version updated, full CI VM image build 58 | // is needed, along with some other overrides not required in 59 | // (for example) github-action updates. 60 | { 61 | "matchManagers": ["custom.regex"], 62 | "matchFileNames": ["lib.sh"], 63 | "schedule": ["at any time"], 64 | "commitMessagePrefix": null, 65 | "draftPR": true, 66 | "prBodyNotes": [ 67 | "\ 68 | {{#if isMajor}}\ 69 | :warning: Changes are **likely** required for build-scripts and/or downstream CI VM \ 70 | image users. Please check very carefully. :warning:\ 71 | {{else}}\ 72 | :warning: Changes may be required for build-scripts and/or downstream CI VM \ 73 | image users. Please double-check. :warning:\ 74 | {{/if}}" 75 | ] 76 | } 77 | ] 78 | } 79 | -------------------------------------------------------------------------------- /skopeo_cidev/setup.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | # This script is used by the Containerfile when building an image. 4 | # It should NEVER ever (EVER!) be used under any other circumstances 5 | # (nor set as executable). 6 | 7 | set -e 8 | 9 | declare -a req_vars 10 | req_vars=(\ 11 | REG_REPO 12 | REG_COMMIT_SCHEMA1 13 | OSO_REPO 14 | OSO_TAG 15 | ) 16 | for req_var in "${req_vars[@]}"; do 17 | if [[ -z "${!req_var}" ]]; then 18 | echo "ERROR: Required variable \$$req_var is unset or empty." 19 | exit 1 20 | fi 21 | done 22 | 23 | GOPATH=$(mktemp -d -p '' ".tmp_$(basename ${BASH_SOURCE[0]})_XXXXXXXX") 24 | REG_GOSRC="$GOPATH/src/github.com/docker/distribution" \ 25 | OSO_GOSRC="$GOPATH/src/github.com/openshift/origin" 26 | 27 | # All golang code built here pre-dates support of go modules 28 | export GO111MODULE=off 29 | 30 | # Workaround unnecessary swap-enabling shenanagains in openshift-origin build 31 | export OS_BUILD_SWAP_DISABLE=1 32 | 33 | # Make debugging easier 34 | set -x 35 | 36 | # This comes in from the Containerfile 37 | # shellcheck disable=SC2154 38 | git clone "$REG_REPO" "$REG_GOSRC" 39 | cd "$REG_GOSRC" 40 | 41 | # Don't pollute the environment 42 | ( 43 | # This is required to be set like this by the build system 44 | export GOPATH="$PWD/Godeps/_workspace:$GOPATH" 45 | # This comes in from the Containerfile 46 | # shellcheck disable=SC2154 47 | git checkout -q "$REG_COMMIT_SCHEMA1" 48 | go build -o /usr/local/bin/registry-v2-schema1 \ 49 | github.com/docker/distribution/cmd/registry 50 | ) 51 | 52 | # These come in from the Containerfile 53 | # shellcheck disable=SC2154 54 | git clone --depth 1 -b "$OSO_TAG" "$OSO_REPO" "$OSO_GOSRC" 55 | cd "$OSO_GOSRC" 56 | 57 | # Edit out a "go < 1.5" check which works incorrectly with go >= 1.10. 58 | sed -i -e 's/\[\[ "\${go_version\[2]}" < "go1.5" ]]/false/' ./hack/common.sh 59 | 60 | # Fix a bug in 'options' line processing of resolv.conf when an option is 61 | # 8 characters long. This can happen if/when systemd-resolved adds 'trust-ad'. 62 | sed -i '/== "attempts:"/s/ 8 / 9 /' vendor/github.com/miekg/dns/clientconfig.go 63 | 64 | # Backport https://github.com/ugorji/go/commit/8286c2dc986535d23e3fad8d3e816b9dd1e5aea6 65 | # Go ≥ 1.22 panics with a base64 encoding using duplicated characters. 66 | sed -i -e 's,"encoding/base64","encoding/base32", ; s,base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__"),base32.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef"),' vendor/github.com/ugorji/go/codec/gen.go 67 | 68 | make build 69 | make all WHAT=cmd/dockerregistry 70 | cp -a ./_output/local/bin/linux/*/* /usr/local/bin/ 71 | cp ./images/dockerregistry/config.yml /atomic-registry-config.yml 72 | mkdir /registry 73 | 74 | # When script unsuccessful, leave this behind for debugging 75 | # Removing these two items _significantly_ reduces the image size. 76 | rm -rf $GOPATH $(go env GOCACHE) 77 | -------------------------------------------------------------------------------- /base_images/debian_base-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended to be run by packer, inside an Debian VM. 4 | # It's purpose is to configure the VM for importing into google cloud, 5 | # so that it will boot in GCE and be accessable for further use. 6 | 7 | set -eo pipefail 8 | 9 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 10 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 11 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 12 | 13 | # Run as quickly as possible after boot 14 | /bin/bash $REPO_DIRPATH/systemd_banish.sh 15 | 16 | # shellcheck source=./lib.sh 17 | source "$REPO_DIRPATH/lib.sh" 18 | 19 | # Cloud-networking in general can sometimes be flaky. 20 | # Increase Apt's tolerance levels. 21 | cat << EOF | $SUDO tee -a /etc/apt/apt.conf.d/99timeouts 22 | // Added during CI VM image build 23 | Acquire::Retries "3"; 24 | Acquire::http::timeout "300"; 25 | Acquire::https::timeout "300"; 26 | EOF 27 | 28 | echo "Switch sources to Debian Unstable (SID)" 29 | cat << EOF | $SUDO tee /etc/apt/sources.list 30 | deb http://deb.debian.org/debian/ unstable main 31 | deb-src http://deb.debian.org/debian/ unstable main 32 | EOF 33 | 34 | declare -a PKGS 35 | PKGS=( \ 36 | coreutils 37 | curl 38 | cloud-init 39 | gawk 40 | openssh-client 41 | openssh-server 42 | rng-tools5 43 | software-properties-common 44 | ) 45 | 46 | echo "Updating package source lists" 47 | ( set -x; $SUDO apt-get -q -y update; ) 48 | 49 | # Only deps for automation tooling 50 | ( set -x; $SUDO apt-get -q -y install git ) 51 | install_automation_tooling 52 | # Ensure automation library is loaded 53 | source "$REPO_DIRPATH/lib.sh" 54 | 55 | # Workaround 12->13 forward-incompatible change in grub scripts. 56 | # Without this, updating to the SID kernel may fail. 57 | echo "Upgrading grub-common" 58 | ( set -x; $SUDO apt-get -q -y upgrade grub-common; ) 59 | 60 | echo "Upgrading to SID" 61 | ( set -x; $SUDO apt-get -q -y full-upgrade; ) 62 | echo "Installing basic, necessary packages." 63 | ( set -x; $SUDO apt-get -q -y install "${PKGS[@]}"; ) 64 | 65 | # compatibility / usefullness of all automated scripting (which is bash-centric) 66 | ( set -x; $SUDO DEBCONF_DB_OVERRIDE='File{'$SCRIPT_DIRPATH/no_dash.dat'}' \ 67 | dpkg-reconfigure dash; ) 68 | 69 | # Ref: https://wiki.debian.org/DebianReleases 70 | # CI automation needs an OS version/release number for a variety of uses. 71 | # However, After switching to Unstable/SID, the value from the usual source 72 | # is not available. Simply use the value passed through packer by the Makefile. 73 | req_env_vars DEBIAN_RELEASE 74 | # shellcheck disable=SC2154 75 | warn "Setting '$DEBIAN_RELEASE' as the release number for CI-automation purposes." 76 | ( set -x; echo "VERSION_ID=\"$DEBIAN_RELEASE\"" | \ 77 | $SUDO tee -a /etc/os-release; ) 78 | 79 | if ! ((CONTAINER)); then 80 | custom_cloud_init 81 | ( set -x; $SUDO systemctl enable rngd; ) 82 | 83 | # Cloud-config fails to enable this for some reason or another 84 | ( set -x; $SUDO sed -i -r \ 85 | -e 's/^PermitRootLogin no/PermitRootLogin prohibit-password/' \ 86 | /etc/ssh/sshd_config; ) 87 | fi 88 | 89 | finalize 90 | -------------------------------------------------------------------------------- /cache_images/fedora-netavark_packaging.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is called from fedora_setup.sh and various Dockerfiles. 4 | # It's not intended to be used outside of those contexts. It assumes the lib.sh 5 | # library has already been sourced, and that all "ground-up" package-related activity 6 | # needs to be done, including repository setup and initial update. 7 | 8 | set -e 9 | 10 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 11 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 12 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 13 | 14 | # shellcheck source=./lib.sh 15 | source "$REPO_DIRPATH/lib.sh" 16 | 17 | msg "Updating/Installing repos and packages for $OS_REL_VER" 18 | 19 | bigto ooe.sh $SUDO dnf update -y 20 | 21 | INSTALL_PACKAGES=(\ 22 | automake 23 | bats 24 | bind-utils 25 | bridge-utils 26 | btrfs-progs-devel 27 | bzip2 28 | conntrack-tools 29 | curl 30 | dbus-daemon 31 | dnsmasq 32 | findutils 33 | firewalld 34 | gcc 35 | gcc-c++ 36 | git 37 | golang 38 | gpgme-devel 39 | gzip 40 | hostname 41 | iproute 42 | iptables 43 | iputils 44 | jq 45 | kernel-devel 46 | kernel-modules 47 | libassuan-devel 48 | libseccomp-devel 49 | make 50 | nftables 51 | nmap-ncat 52 | openssl 53 | openssl-devel 54 | podman 55 | policycoreutils 56 | protobuf-devel 57 | rsync 58 | sed 59 | socat 60 | systemd-devel 61 | tar 62 | time 63 | wireguard-tools 64 | xz 65 | zip 66 | ) 67 | 68 | EXARG="--exclude=cargo --exclude=rust" 69 | 70 | msg "Installing general build/test dependencies" 71 | bigto $SUDO dnf install -y $EXARG "${INSTALL_PACKAGES[@]}" 72 | 73 | # It was observed in F33, dnf install doesn't always get you the latest/greatest. 74 | lilto $SUDO dnf update -y $EXARG 75 | 76 | msg "Initializing upstream rust environment." 77 | export CARGO_HOME="/var/cache/cargo" # must match .cirrus.yml in netavark repo 78 | $SUDO mkdir -p $CARGO_HOME 79 | # Lock onto the stable toolchain for this image build 80 | export RUSTUP_TOOLCHAIN=stable 81 | # CI Runtime takes care of recovering $CARGO_HOME/env 82 | curl https://sh.rustup.rs -sSf | \ 83 | $SUDO env RUSTUP_TOOLCHAIN=$RUSTUP_TOOLCHAIN CARGO_HOME=$CARGO_HOME \ 84 | sh -s -- -y -v 85 | # need PATH updated so SUDO can find 'rustup' binary 86 | . $CARGO_HOME/env 87 | $SUDO env PATH=$PATH CARGO_HOME=$CARGO_HOME rustup default stable 88 | if [[ $(uname -m) == "aarch64" ]]; then 89 | $SUDO env PATH=$PATH CARGO_HOME=$CARGO_HOME rustup target add aarch64-unknown-linux-gnu 90 | fi 91 | 92 | msg "Install tool to generate man pages" 93 | $SUDO go install github.com/cpuguy83/go-md2man/v2@latest 94 | $SUDO install /root/go/bin/go-md2man /usr/local/bin/ 95 | 96 | # Downstream users of this image are specifically testing netavark & aardvark-dns 97 | # code changes. We want to start with using the RPMs because they deal with any 98 | # dependency issues. However, we don't actually want the binaries present on 99 | # the system, because: 100 | # 1) They will be compiled from source at runtime 101 | # 2) The file locations may change 102 | # 3) We never want testing ambiguity WRT which binary is under test. 103 | msg "Clobbering netavark & aardvark RPM files" 104 | remove_netavark_aardvark_files 105 | -------------------------------------------------------------------------------- /systemd_banish.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This is intended to be executed stand-alone, on a Fedora or Debian VM 4 | # by automation. Alternatively, it may be executed with the '--list' 5 | # option to return the list of systemd units defined for disablement 6 | # (useful for testing). 7 | 8 | set +e # Not all of these exist on every platform 9 | 10 | # Setting noninteractive is critical, apt-get can hang w/o it. 11 | if [[ "$UID" -ne 0 ]]; then 12 | export SUDO="sudo env DEBIAN_FRONTEND=noninteractive" 13 | fi 14 | 15 | EVIL_UNITS="cron crond atd apt-daily-upgrade apt-daily fstrim motd-news systemd-tmpfiles-clean update-notifier-download mlocate-updatedb plocate-updatedb" 16 | 17 | if [[ "$1" == "--list" ]] 18 | then 19 | echo "$EVIL_UNITS" 20 | exit 0 21 | fi 22 | 23 | echo "Disabling periodic services that could destabilize automation:" 24 | for unit in $EVIL_UNITS 25 | do 26 | echo "Banishing $unit (ignoring errors)" 27 | ( 28 | $SUDO systemctl stop $unit 29 | $SUDO systemctl disable $unit 30 | $SUDO systemctl disable $unit.timer 31 | $SUDO systemctl mask $unit 32 | $SUDO systemctl mask $unit.timer 33 | ) &> /dev/null 34 | done 35 | 36 | # Sigh, for Debian the above isn't enough. There are also periodic apt jobs. 37 | EAAD="/etc/apt/apt.conf.d" 38 | PERIODIC_APT_RE='^(APT::Periodic::.+")1"\;' 39 | if [[ -d "$EAAD" ]]; then 40 | echo "Disabling all periodic packaging activity" 41 | for filename in $($SUDO ls -1 $EAAD); do \ 42 | echo "Checking/Patching $filename" 43 | $SUDO sed -i -r -e "s/$PERIODIC_APT_RE/"'\10"\;/' "$EAAD/$filename"; done 44 | fi 45 | 46 | # Early 2023: https://github.com/containers/podman/issues/16973 47 | # 48 | # We see countless instances of "lookup cdn03.quay.io" flakes. 49 | # Disabling the systemd resolver (Podman #17505) seems to have almost 50 | # eliminated those -- the exceptions are early-on steps that run 51 | # before that happens. 52 | # 53 | # Opinions differ on the merits of systemd-resolve, but the fact is 54 | # it breaks our CI testing. Here we disable it for all VMs. 55 | # shellcheck disable=SC2154 56 | if ! ((CONTAINER)); then 57 | nsswitch=/etc/authselect/nsswitch.conf 58 | if [[ -e $nsswitch ]]; then 59 | if grep -q -E 'hosts:.*resolve' $nsswitch; then 60 | echo "Disabling systemd-resolved" 61 | $SUDO sed -i -e 's/^\(hosts: *\).*/\1files dns myhostname/' $nsswitch 62 | $SUDO systemctl disable --now systemd-resolved 63 | $SUDO rm -f /etc/resolv.conf 64 | 65 | # NetworkManager may already be running, or it may not.... 66 | $SUDO systemctl start NetworkManager 67 | sleep 1 68 | $SUDO systemctl restart NetworkManager 69 | 70 | # ...and it may create resolv.conf upon start/restart, or it 71 | # may not. Keep restarting until it does. (Yes, I realize 72 | # this is cargocult thinking. Don't care. Not worth the effort 73 | # to diagnose and solve properly.) 74 | retries=10 75 | while ! test -e /etc/resolv.conf;do 76 | retries=$((retries - 1)) 77 | if [[ $retries -eq 0 ]]; then 78 | die "Timed out waiting for resolv.conf" 79 | fi 80 | $SUDO systemctl restart NetworkManager 81 | sleep 5 82 | done 83 | fi 84 | fi 85 | fi 86 | -------------------------------------------------------------------------------- /win_images/win-server-wsl.yml: -------------------------------------------------------------------------------- 1 | variables: 2 | # Naming suffix for images to prevent clashes 3 | IMG_SFX: 4 | 5 | # Allows providing handy cross-reference to the build log 6 | CIRRUS_TASK_ID: "{{env `CIRRUS_TASK_ID`}}" 7 | 8 | 9 | builders: 10 | - type: amazon-ebs 11 | name: win-server-wsl 12 | source_ami_filter: 13 | filters: 14 | name: &win_release "Windows_Server-2022-English-Full-Base*" 15 | root-device-type: ebs 16 | virtualization-type: hvm 17 | most_recent: true 18 | owners: 19 | - amazon 20 | # While this image should run on metal, we can build it on smaller/cheaper systems 21 | instance_type: t3.large 22 | force_deregister: true # Remove AMI with same name if exists 23 | force_delete_snapshot: true # Also remove snapshots of force-removed AMI 24 | # Note that we do not set shutdown_behavior to terminate, as a clean shutdown is required 25 | # for windows provisioning to complete successfully. 26 | communicator: winrm 27 | winrm_username: Administrator # AWS provisions Administrator, unlike GCE 28 | winrm_insecure: true 29 | winrm_use_ssl: true 30 | winrm_timeout: 25m 31 | # Script that runs on server start, needed to prep and enable winrm 32 | user_data_file: '{{template_dir}}/bootstrap.ps1' 33 | # Required for network access, must be the 'default' group used by Cirrus-CI 34 | security_group_id: "sg-042c75677872ef81c" 35 | ami_name: &ami_name '{{build_name}}-c{{user `IMG_SFX`}}' 36 | ami_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}' 37 | launch_block_device_mappings: 38 | - device_name: '/dev/sda1' 39 | volume_size: 200 40 | volume_type: 'gp3' 41 | iops: 6000 42 | delete_on_termination: true 43 | # These are critical and used by security-polciy to enforce instance launch limits. 44 | tags: &awstags 45 | # EC2 expects "Name" to be capitalized 46 | Name: *ami_name 47 | src: '{{.SourceAMI}}' 48 | automation: 'true' 49 | release: *win_release 50 | run_tags: *awstags 51 | run_volume_tags: *awstags 52 | snapshot_tags: *awstags 53 | # This is necessary for security - The CI service accounts are not permitted 54 | # to use AMI's from any other account, including public ones. 55 | ami_users: 56 | - &accountid '449134212816' 57 | 58 | 59 | provisioners: 60 | - type: powershell 61 | inline: 62 | - '$ErrorActionPreference = "stop"' 63 | - 'New-Item -Path "c:\" -Name "temp" -ItemType "directory" -Force' 64 | - 'New-Item -Path "c:\temp" -Name "automation_images" -ItemType "directory" -Force' 65 | - type: 'file' 66 | source: '{{ pwd }}/' 67 | destination: "c:\\temp\\automation_images\\" 68 | - type: powershell 69 | inline: 70 | - 'c:\temp\automation_images\win_images\win_packaging.ps1' 71 | # Several installed items require a reboot, do that now in case it would 72 | # cause a problem with final image preperations. 73 | - type: windows-restart 74 | - type: powershell 75 | inline: 76 | - 'c:\temp\automation_images\win_images\win_finalization.ps1' 77 | 78 | 79 | post-processors: 80 | - type: 'manifest' 81 | output: '{{template_dir}}/manifest.json' # Collected by Cirrus-CI 82 | strip_path: true 83 | custom_data: 84 | IMG_SFX: '{{ user `IMG_SFX` }}' 85 | STAGE: cache 86 | TASK: '{{user `CIRRUS_TASK_ID`}}' 87 | -------------------------------------------------------------------------------- /ci/validate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended to be run by Cirrus-CI to validate PR 4 | # content prior to building any images. It should not be run 5 | # under any other context. 6 | 7 | set -eo pipefail 8 | 9 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 10 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 11 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 12 | 13 | # shellcheck source=./lib.sh 14 | source "$REPO_DIRPATH/lib.sh" 15 | 16 | req_env_vars CIRRUS_PR CIRRUS_PR_TITLE CIRRUS_USER_PERMISSION CIRRUS_BASE_BRANCH 17 | 18 | show_env_vars 19 | 20 | # die() will add a reference to this file and line number. 21 | [[ "$CIRRUS_CI" == "true" ]] || \ 22 | die "This script is only/ever intended to be run by Cirrus-CI." 23 | 24 | # This is imperfect security-wise, but attempt to catch an accidental 25 | # change in Cirrus-CI Repository settings. Namely the hard-to-read 26 | # "slider" that enables non-contributors to run jobs. We don't want 27 | # that on this repo, ever. because there are sensitive secrets in use. 28 | # This variable is set by CI and validated non-empty above 29 | # shellcheck disable=SC2154 30 | if [[ "$CIRRUS_USER_PERMISSION" != "write" ]] && [[ "$CIRRUS_USER_PERMISSION" != "admin" ]]; then 31 | die "CI Execution not supported with permission level '$CIRRUS_USER_PERMISSION'" 32 | fi 33 | 34 | for target in image_builder/gce.json base_images/cloud.json \ 35 | cache_images/cloud.json win_images/win-server-wsl.json; do 36 | if ! make $target; then 37 | die "Running 'make $target' failed, please validate input YAML files." 38 | fi 39 | done 40 | 41 | ### The following checks only apply if validating a PR 42 | if [[ -z "$CIRRUS_PR" ]]; then 43 | echo "Not validating IMG_SFX changes outside of a PR" 44 | exit 0 45 | fi 46 | 47 | # For Docs-only PRs, no further checks are needed 48 | # Variable is defined by Cirrus-CI at runtime 49 | # shellcheck disable=SC2154 50 | if [[ "$CIRRUS_PR_TITLE" =~ CI:DOCS ]]; then 51 | msg "This looks like a docs-only PR, skipping further validation checks." 52 | exit 0 53 | fi 54 | 55 | # Fix "Not a valid object name main" error from Cirrus's 56 | # incomplete checkout. 57 | git remote update origin 58 | # Determine where PR branched off of $CIRRUS_BASE_BRANCH 59 | # shellcheck disable=SC2154 60 | base_sha=$(git merge-base origin/${CIRRUS_BASE_BRANCH:-main} HEAD) 61 | 62 | if ! git diff --name-only ${base_sha}..HEAD | grep -q IMG_SFX; then 63 | die "Every PR that builds images must include an updated IMG_SFX file. 64 | Simply run 'make IMG_SFX', commit the result, and re-push." 65 | else 66 | IMG_SFX="$(<./IMG_SFX)" 67 | # IMG_SFX was modified vs PR's base-branch, confirm version moved forward 68 | v_prev=$(git show ${base_sha}:IMG_SFX 2>&1 || true) 69 | # Verify new IMG_SFX value always version-sorts later than previous value. 70 | # This prevents screwups due to local timezone, bad, or unset clocks, etc. 71 | new_img_ver=$(awk -F 't' '{print $1"."$2}'<<<"$IMG_SFX" | cut -dz -f1) 72 | old_img_ver=$(awk -F 't' '{print $1"."$2}'<<<"$v_prev" | cut -dz -f1) 73 | # Version-sorting of date/time mimics the way renovate will compare values 74 | # see https://github.com/containers/automation/blob/main/renovate/defaults.json5 75 | latest_img_ver=$(echo -e "$new_img_ver\n$old_img_ver" | sort -V | tail -1) 76 | [[ "$latest_img_ver" == "$new_img_ver" ]] || \ 77 | die "Updated IMG_SFX '$IMG_SFX' appears to be older than previous 78 | value '$v_prev'. Please check your local clock and try again." 79 | 80 | # IMG_SFX values need to change for every image build, even within the 81 | # same PR. Attempt to catch re-use of a tag before starting the lengthy 82 | # build process (which will fail on a duplicate). Check the imgts image 83 | # simply because it builds very early in cirrus-ci and cannot be skipped 84 | # with a "no_*" label. 85 | existing_tags=$(skopeo list-tags docker://quay.io/libpod/imgts | jq -r -e '.Tags[]') 86 | if grep -q "$IMG_SFX" <<<"$existing_tags"; then 87 | echo "It's highly likely the IMG_SFX '$IMG_SFX' is being re-used." 88 | echo "Don't do this. Run 'make IMG_SFX', commit the result, and re-push". 89 | exit 1 90 | fi 91 | fi 92 | -------------------------------------------------------------------------------- /base_images/fedora_base-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is intended to be run by packer, inside a a Fedora VM. 4 | # It's purpose is to configure the VM for importing into google cloud, 5 | # so that it will boot in GCE and be accessable for further use. 6 | 7 | set -eo pipefail 8 | 9 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 10 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 11 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 12 | 13 | # Run as quickly as possible after boot 14 | /bin/bash $REPO_DIRPATH/systemd_banish.sh 15 | 16 | # shellcheck source=./lib.sh 17 | source "$REPO_DIRPATH/lib.sh" 18 | 19 | declare -a PKGS 20 | PKGS=(rng-tools git coreutils cloud-init) 21 | if ! ((CONTAINER)); then 22 | # Packer defines this automatically for us 23 | # shellcheck disable=SC2154 24 | if [[ "$PACKER_BUILD_NAME" =~ "aws" ]]; then 25 | echo "WARN: AWS EC2 Instance Connect not supported on Fedora, use cloud-init." 26 | PKGS+=(policycoreutils-python-utils policycoreutils) 27 | else # GCP image 28 | PKGS+=(google-compute-engine-oslogin) 29 | if ((OS_RELEASE_VER<35)); then 30 | PKGS+=(google-compute-engine-tools) 31 | else 32 | PKGS+=(google-compute-engine-guest-configs google-guest-agent) 33 | fi 34 | fi 35 | fi 36 | 37 | # The Fedora CI VM base images are built using nested-virt with 38 | # limited resources available. Further, cloud-networking in 39 | # general can sometimes be flaky. Increase DNF's tolerance 40 | # levels. 41 | cat << EOF | $SUDO tee -a /etc/dnf/dnf.conf 42 | 43 | # Added during CI VM image build 44 | minrate=100 45 | timeout=60 46 | EOF 47 | 48 | $SUDO dnf makecache 49 | $SUDO dnf -y update 50 | $SUDO dnf -y install "${PKGS[@]}" 51 | # Occasionally following an install, there are more updates available. 52 | # This may be due to activation of suggested/recommended dependency resolution. 53 | $SUDO dnf -y update 54 | 55 | if ! ((CONTAINER)); then 56 | $SUDO systemctl enable rngd 57 | fi 58 | 59 | install_automation_tooling 60 | 61 | if ! ((CONTAINER)); then 62 | custom_cloud_init 63 | 64 | # Be kind to humans, indicate where generated files came from 65 | sourcemsg="### File generated during VM Image build by $(basename $SCRIPT_FILEPATH)" 66 | 67 | # The mechanism used by Cirrus-CI to execute tasks on the system is through an 68 | # "agent" process launched as a GCP VM startup-script (from 'user-data'). 69 | # This agent is responsible for cloning the repository and executing all task 70 | # scripts and other operations. Therefor, on SELinux-enforcing systems, the 71 | # service must be labeled properly to ensure it's child processes can 72 | # run with the proper contexts. 73 | METADATA_SERVICE_CTX=unconfined_u:unconfined_r:unconfined_t:s0 74 | if [[ "$PACKER_BUILD_NAME" =~ "aws" ]]; then 75 | echo "Setting AWS startup service (for Cirrus-CI agent) SELinux unconfined" 76 | # AWS relies on cloud-init to run a user-data startup script. Manual 77 | # observation showed this happens in the cloud-final service. 78 | METADATA_SERVICE_PATH=systemd/system/cloud-final.service 79 | # This is necessary to prevent permission-denied errors on service-start 80 | # and also on the off-chance the package gets updated and context reset. 81 | $SUDO semanage fcontext --add --type bin_t /usr/bin/cloud-init 82 | # This used restorecon before so we don't have to specify the file_contexts.local 83 | # manually, however with f42 that stopped working: https://bugzilla.redhat.com/show_bug.cgi?id=2360183 84 | $SUDO setfiles -v /etc/selinux/targeted/contexts/files/file_contexts.local /usr/bin/cloud-init 85 | else # GCP Image 86 | echo "Setting GCP startup service (for Cirrus-CI agent) SELinux unconfined" 87 | # ref: https://cloud.google.com/compute/docs/startupscript 88 | METADATA_SERVICE_PATH=systemd/system/google-startup-scripts.service 89 | fi 90 | echo "$sourcemsg" | $SUDO tee -a /etc/$METADATA_SERVICE_PATH 91 | sed -r -e \ 92 | "s/^Type=oneshot/Type=oneshot\nSELinuxContext=$METADATA_SERVICE_CTX/" \ 93 | /lib/$METADATA_SERVICE_PATH | $SUDO tee -a /etc/$METADATA_SERVICE_PATH 94 | fi 95 | 96 | finalize 97 | -------------------------------------------------------------------------------- /.github/actions/bin/create_image_table.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """Parse $GITHUB_WORKSPACE/built_images.json into MD table in $GITHUB_ENV.""" 4 | 5 | # Note: This script is exclusively intended to be used by the 6 | # pr_image_id.yml github-actions workflow. Any use outside that 7 | # context is unlikely to function as intended. 8 | 9 | import json 10 | import os 11 | import sys 12 | 13 | 14 | def msg(msg, newline=True): 15 | """Print msg to stderr with optional newline.""" 16 | nl = "" 17 | if newline: 18 | nl = "\n" 19 | sys.stderr.write(f"{msg}{nl}") 20 | sys.stderr.flush() 21 | 22 | 23 | def stage_sort(item): 24 | """Return sorting-key for build-image-json item.""" 25 | if item["stage"] == "import": 26 | return str("0010" + item["name"]) 27 | elif item["stage"] == "base": 28 | return str("0020" + item["name"]) 29 | elif item["stage"] == "cache": 30 | return str("0030" + item["name"]) 31 | else: 32 | return str("0100" + item["name"]) 33 | 34 | 35 | if "GITHUB_ENV" not in os.environ: 36 | raise KeyError("Error: $GITHUB_ENV is undefined.") 37 | 38 | cirrus_ci_build_id = None 39 | github_workspace = os.environ.get("GITHUB_WORKSPACE", ".") 40 | 41 | # File written by a previous workflow step 42 | with open(f"{github_workspace}/built_images.json") as bij: 43 | msg(f"Reading image build data from {bij.name}:") 44 | data = [] 45 | for build in json.load(bij): # list of build data maps 46 | stage = build.get("stage", False) 47 | name = build.get("name", False) 48 | sfx = build.get("sfx", False) 49 | task = build.get("task", False) 50 | if bool(stage) and bool(name) and bool(sfx) and bool(task): 51 | image_suffix = f"{stage[0]}{sfx}" 52 | data.append( 53 | dict(stage=stage, name=name, image_suffix=image_suffix, task=task) 54 | ) 55 | if cirrus_ci_build_id is None: 56 | cirrus_ci_build_id = sfx 57 | msg(f"Including '{stage}' stage build '{name}' for task '{task}'.") 58 | else: 59 | msg(f"Skipping '{stage}' stage build '{name}' for task '{task}'.") 60 | 61 | url = "https://cirrus-ci.com/task" 62 | lines = [] 63 | data.sort(key=stage_sort) 64 | for item in data: 65 | image_suffix = item["image_suffix"] 66 | # Base-images should never actually be used, but it may be helpful 67 | # to have them in the list in case some debugging is needed. 68 | if item["stage"] != "cache": 69 | image_suffix = "do-not-use" 70 | lines.append( 71 | "|*{0}*|[{1}]({2})|`{3}`|\n".format( 72 | item["stage"], 73 | item["name"], 74 | "{0}/{1}".format(url, item["task"]), 75 | image_suffix, 76 | ) 77 | ) 78 | 79 | 80 | # This is the mechanism required to set an multi-line env. var. 81 | # value to be consumed by future workflow steps. 82 | with open(os.environ["GITHUB_ENV"], "a") as ghenv, open( 83 | f"{github_workspace}/images.md", "w" 84 | ) as mdfile, open(f"{github_workspace}/images.json", "w") as images_json: 85 | 86 | env_header = "IMAGE_TABLE< /dev/null 155 | 156 | # Buildah CI does conformance testing vs the most recent Docker version. 157 | # FIXME: As of 7-2023, there is no 'trixie' dist for docker. Fix the next lines once that changes. 158 | #docker_debian_release=$(source /etc/os-release; echo "$VERSION_CODENAME") 159 | docker_debian_release="bookworm" 160 | 161 | echo "deb https://download.docker.com/linux/debian $docker_debian_release stable" | \ 162 | ooe.sh $SUDO tee /etc/apt/sources.list.d/docker.list &> /dev/null 163 | 164 | if ((CONTAINER==0)) && [[ ${#DOWNLOAD_PACKAGES[@]} -gt 0 ]]; then 165 | $SUDO apt-get clean # no reason to keep previous downloads around 166 | # Needed to install .deb files + resolve dependencies 167 | lilto $SUDO apt-get -q -y update 168 | echo "Downloading packages for optional installation at runtime." 169 | $SUDO ln -s /var/cache/apt/archives "$PACKAGE_DOWNLOAD_DIR" 170 | bigto $SUDO apt-get -q -y install --download-only "${DOWNLOAD_PACKAGES[@]}" 171 | fi 172 | -------------------------------------------------------------------------------- /.github/workflows/orphan_vms.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions 4 | 5 | # Required to un-FUBAR default ${{github.workflow}} value 6 | name: check_orphan_vms 7 | 8 | on: 9 | # Note: This only applies to the default branch. 10 | schedule: 11 | # Nobody is around to respond to weekend e-mails 12 | - cron: '59 23 * * 0-4' 13 | # Debug: Allow triggering job manually in github-actions WebUI 14 | workflow_dispatch: {} 15 | 16 | env: 17 | # Debug-mode can reveal secrets, only enable by a secret value. 18 | # Ref: https://docs.github.com/en/actions/managing-workflow-runs/enabling-debug-logging#enabling-runner-diagnostic-logging 19 | ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}' 20 | ACTIONS_RUNNER_DEBUG: '${{ secrets.ACTIONS_RUNNER_DEBUG }}' 21 | # CSV listing of e-mail addresses for delivery failure or error notices 22 | RCPTCSV: rh.container.bot@gmail.com,podman-monitor@lists.podman.io 23 | 24 | jobs: 25 | orphan_vms: 26 | runs-on: ubuntu-latest 27 | steps: 28 | - uses: actions/checkout@v6 29 | with: 30 | persist-credentials: false 31 | 32 | # Avoid duplicating cron-fail_addrs.csv 33 | - uses: actions/checkout@v6 34 | with: 35 | repository: containers/podman 36 | path: '_podman' 37 | persist-credentials: false 38 | 39 | - name: Collect listing of orphaned VMs 40 | env: 41 | GCPNAME: ${{ secrets.GCPNAME }} 42 | GCPJSON: ${{ secrets.GCPJSON }} 43 | AWSINI: ${{ secrets.AWSINI }} 44 | GCPPROJECT: 'libpod-218412' 45 | run: | 46 | export GCPNAME GCPJSON AWSINI GCPPROJECT 47 | export GCPPROJECTS=$(grep -E -vx '^#+.*$' $GITHUB_WORKSPACE/gcpprojects.txt | tr -s '[:space:]' ' ') 48 | podman run --rm \ 49 | -e GCPNAME -e GCPJSON -e AWSINI -e GCPPROJECT -e GCPPROJECTS \ 50 | quay.io/libpod/orphanvms:latest \ 51 | > /tmp/orphanvms_output.txt 52 | 53 | - if: always() 54 | uses: actions/upload-artifact@v5 55 | with: 56 | name: orphanvms_output 57 | path: /tmp/orphanvms_output.txt 58 | 59 | - name: Count number of orphaned VMs 60 | id: orphans 61 | run: | 62 | count=$(grep -E -x '\* VM .+' /tmp/orphanvms_output.txt | wc -l) 63 | # Assist with debugging job (step-outputs are otherwise hidden) 64 | printf "Orphan VMs count:%d\n" $count 65 | if [[ "$count" =~ ^[0-9]+$ ]]; then 66 | printf "count=%d\n" $count >> $GITHUB_OUTPUT 67 | else 68 | printf "count=0\n" >> $GITHUB_OUTPUT 69 | fi 70 | 71 | - if: steps.orphans.outputs.count > 0 72 | shell: bash 73 | run: | 74 | set -eo pipefail 75 | ( 76 | echo "Detected ${{ steps.orphans.outputs.count }} Orphan VM(s):" 77 | echo "" 78 | cat /tmp/orphanvms_output.txt 79 | echo "" 80 | echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}." 81 | # Separate content from sendgrid.com automatic footer. 82 | echo "" 83 | echo "" 84 | ) > /tmp/email_body.txt 85 | 86 | - if: steps.orphans.outputs.count > 0 87 | name: Send orphan notification e-mail 88 | # Ref: https://github.com/dawidd6/action-send-mail 89 | uses: dawidd6/action-send-mail@v3.12.0 90 | with: 91 | server_address: ${{ secrets.ACTION_MAIL_SERVER }} 92 | server_port: 465 93 | username: ${{ secrets.ACTION_MAIL_USERNAME }} 94 | password: ${{ secrets.ACTION_MAIL_PASSWORD }} 95 | subject: Orphaned CI VMs detected 96 | to: ${{env.RCPTCSV}} 97 | from: ${{ secrets.ACTION_MAIL_SENDER }} 98 | body: file:///tmp/email_body.txt 99 | 100 | - if: failure() 101 | name: Send error notification e-mail 102 | uses: dawidd6/action-send-mail@v3.12.0 103 | with: 104 | server_address: ${{secrets.ACTION_MAIL_SERVER}} 105 | server_port: 465 106 | username: ${{secrets.ACTION_MAIL_USERNAME}} 107 | password: ${{secrets.ACTION_MAIL_PASSWORD}} 108 | subject: Github workflow error on ${{github.repository}} 109 | to: ${{env.RCPTCSV}} 110 | from: ${{secrets.ACTION_MAIL_SENDER}} 111 | body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}" 112 | -------------------------------------------------------------------------------- /imgts/lib_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | RED="\e[1;31m" 6 | YEL="\e[1;33m" 7 | NOR="\e[0m" 8 | SENTINEL="__unknown__" # default set in Containerfile 9 | # Disable all input prompts 10 | # https://cloud.google.com/sdk/docs/scripting-gcloud 11 | GCLOUD="gcloud --quiet" 12 | # https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-options.html#cli-configure-options-list 13 | AWS="aws --cli-connect-timeout 30 --cli-read-timeout 30 --no-paginate" 14 | 15 | die() { 16 | EXIT=$1 17 | shift 18 | MSG="$*" 19 | echo -e "${RED}ERROR: $MSG${NOR}" 20 | exit "$EXIT" 21 | } 22 | 23 | # Similar to die() but it ignores the first parameter (exit code) 24 | # to allow direct use in place of an (otherwise) die() call. 25 | warn() { 26 | IGNORE=$1 27 | shift 28 | MSG="$*" 29 | echo -e "${RED}WARNING: $MSG${NOR}" 30 | } 31 | 32 | # Hilight messages not coming from a shell command 33 | msg() { 34 | echo -e "${YEL}${1:-NoMessageGiven}${NOR}" 35 | } 36 | 37 | # Pass in a list of one or more envariable names; exit non-zero with 38 | # helpful error message if any value is empty 39 | req_env_vars() { 40 | for i; do 41 | if [[ -z "${!i}" ]] 42 | then 43 | die 1 "entrypoint.sh requires \$$i to be non-empty." 44 | elif [[ "${!i}" == "$SENTINEL" ]] 45 | then 46 | die 2 "entrypoint.sh requires \$$i to be explicitly set." 47 | fi 48 | done 49 | } 50 | 51 | gcloud_init() { 52 | req_env_vars GCPJSON GCPPROJECT 53 | set +xe 54 | if [[ -n "$1" ]] && [[ -r "$1" ]] 55 | then 56 | TMPF="$1" 57 | else 58 | TMPF=$(mktemp -p '' .XXXXXXXX) 59 | trap "rm -f $TMPF &> /dev/null" EXIT 60 | # Required variable must be set by caller 61 | # shellcheck disable=SC2154 62 | echo "$GCPJSON" > $TMPF 63 | fi 64 | unset GCPJSON 65 | # Required variable must be set by caller 66 | # shellcheck disable=SC2154 67 | $GCLOUD auth activate-service-account --project="$GCPPROJECT" --key-file="$TMPF" || \ 68 | die 5 "Authentication error, please verify \$GCPJSON contents" 69 | rm -f $TMPF &> /dev/null || true # ignore any read-only error 70 | trap - EXIT 71 | } 72 | 73 | aws_init() { 74 | req_env_vars AWSINI 75 | set +xe 76 | if [[ -n "$1" ]] && [[ -r "$1" ]] 77 | then 78 | TMPF="$1" 79 | else 80 | TMPF=$(mktemp -p '' .XXXXXXXX) 81 | fi 82 | # shellcheck disable=SC2154 83 | echo "$AWSINI" > $TMPF 84 | unset AWSINI 85 | export AWS_SHARED_CREDENTIALS_FILE=$TMPF 86 | } 87 | 88 | # Obsolete and Prune search-loops runs in a sub-process, 89 | # therefor count must be recorded in file. 90 | IMGCOUNT=$(mktemp -p '' imgcount.XXXXXX) 91 | echo "0" > "$IMGCOUNT" 92 | count_image() { 93 | local count 94 | count=$(<"$IMGCOUNT") 95 | let 'count+=1' 96 | echo "$count" > "$IMGCOUNT" 97 | } 98 | 99 | # Cirrus-CI supports multiple methods when specifying an EC2 image 100 | # to use. This function supports either of them as its only argument: 101 | # Either a literal "ami-*" value, or the value of a "Name" tag to 102 | # search for. In the former-case, the "ami-*" value will simply 103 | # be printed to stdout. In the latter case, the newest image 104 | # found by a name-tag search will be printed to stdout. 105 | get_ec2_ami() { 106 | local image="$1" 107 | local _awsoutput _name_filter _result_filter 108 | local -a _awscmd 109 | 110 | _name_filter="Name=name,Values='$image'" 111 | _result_filter='.Images | map(select(.State == "available")) | sort_by(.CreationDate) | reverse | .[0].ImageId' 112 | # Word-splitting for $AWS is desired 113 | # shellcheck disable=SC2206 114 | _awscmd=(\ 115 | $AWS ec2 describe-images --owners self 116 | --filters "$_name_filter" --output json 117 | ) 118 | 119 | req_env_vars image AWS 120 | 121 | # Direct image specification, nothing to do. 122 | if [[ "$image" =~ ^ami-.+ ]]; then printf "$image"; return 0; fi 123 | 124 | # Empty $AWSCLI input to jq will NOT trigger its `-e`, so double-check. 125 | if _awsoutput=$("${_awscmd[@]}") && [[ -n "$_awsoutput" ]] && \ 126 | _ami_id=$(jq -r -e "$_result_filter"<<<$_awsoutput) && \ 127 | [[ -n "$_ami_id" ]] 128 | then 129 | printf "$_ami_id" 130 | else 131 | warn "Could not find an available AMI with name-tag '$image': $_awsoutput" 132 | return 1 133 | fi 134 | } 135 | 136 | # Takes a tag-name string as the first argument, and a JSON-object (mapping) 137 | # (bash-string) as the second. If the JSON object contains a "TAGS" key, 138 | # and its value is a list of "Key"/"Value" objects, retrieve and print the 139 | # value associated with a tag-name key, if it exists. Otherwise print nothing 140 | # and return 1. Example input JSON: 141 | # { 142 | # ...ignored stuff... 143 | # "TAGS": [ 144 | # { "Key: "Foo", 145 | # "Value": "Bar" 146 | # } 147 | # ] 148 | # } 149 | get_tag_value() { 150 | local tag=$1 151 | local json=$2 152 | req_env_vars tag json 153 | # Careful, there may not be any tag-list at all. 154 | local tag_filter=".[]? | select(.Key == \"$tag\").Value" 155 | local tags value 156 | 157 | # There may not be any TAGS key at all. 158 | if tags=$(jq -e ".TAGS?"<<<"$json"); then 159 | # All tags are optional, the one we're looking for may not be set 160 | if value=$(jq -e -r "$tag_filter"<<<"$tags") && [[ -n "$value" ]]; then 161 | printf "$value" 162 | return 0 163 | fi 164 | fi 165 | return 1 166 | } 167 | -------------------------------------------------------------------------------- /orphanvms/_ec2: -------------------------------------------------------------------------------- 1 | 2 | 3 | # This script is intended to be sourced by entrypoint.sh. It contains 4 | # all the Elastic-Compute-Cloud specific definitions and behaviors. 5 | # Anything written to stdout will end up in a notification e-mail. 6 | 7 | # This is just creating a temporary file and setting a env. var. It should 8 | # not produce any output (which may needlessly trigger an e-mail). 9 | aws_init 10 | 11 | # Override this for debugging 12 | AWS="${AWS:-aws}" 13 | 14 | OUTPUT=$(mktemp -p '' orphanvms_awsec2_XXXXX) 15 | 16 | # It's simpler to compare unix times 17 | UNIX_THRESHOLD=$(date --date "$THRESHOLD" +%s) 18 | # EC2 instances can be in several transititory "meta" states, including 19 | # "terminated" (deleted). This script only cares about "running" instances. 20 | EC2_FILTER="Name=instance-state-name,Values=running" 21 | # aws cli returns a giant blob of JSON with all kinds of details we don't care about. 22 | # Help cut down the amount of crap we need to stort through. 23 | EC2_QUERY="Reservations[*].Instances[*].{ID:InstanceId,TAGS:Tags,START:LaunchTime}" 24 | 25 | echo "Orphaned AWS EC2 VMs:" > $OUTPUT 26 | 27 | # Returns an empty list when nothing is found, otherwise returns items indicated 28 | # in $EC2_QUERY, each inside a (useless) single-item list, inside another list. 29 | if ! aws_output=$(aws ec2 describe-instances --no-paginate --output json --filter "$EC2_FILTER" --query "$EC2_QUERY"); then 30 | die 1 "Querying running EC2 instances: $aws_output" 31 | fi 32 | 33 | # Unroll the (useless) inner lists, if outer list is empty no instances were found. 34 | if ! simple_inst_list=$(jq -e '[.[][]]'<<<"$aws_output"); then 35 | # Debug the original output in case it's more helpful 36 | dbg "No EC2 instances found: $aws_output" 37 | exit 0 38 | fi 39 | 40 | # I don't expect there will ever be more than maybe 0-20 instances at any time. 41 | for instance_index in $(seq 1 $(jq -e 'length'<<<"$simple_inst_list")); do 42 | instance=$(jq -e ".[$instance_index - 1]"<<<"$simple_inst_list") 43 | # aws commands require an instance ID 44 | instid=$(jq -r ".ID"<<<"$instance") 45 | # A Name-tag isn't guaranteed, default to stupid, unreadable, generated ID 46 | name=$instid 47 | if name_tag=$(get_tag_value "Name" "$instance"); then 48 | # This is MUCH more human-friendly and easier to find in the WebUI. 49 | # If it was an instance leaked by Cirrus-CI, it may even include the 50 | # task number which leaked it. 51 | name=$name_tag 52 | fi 53 | 54 | # The `START` (a.k.a. `LaunchTime`) value is documented as ISO 8601 format, 55 | # forced to the UTC zone with a (useless) microseconds appended. I found 56 | # `jq` cannot parse the microseconds part properly, but `date` seems happy 57 | # to accept it. 58 | if ! started_at=$(date --utc --date $(jq -r -e ".START"<<<"$instance") +%s); then 59 | die "Error extracting start time from instance JSON: '$instance'" 60 | fi 61 | age_days=$((($NOW - $started_at) / (60 * 60 * 24))) 62 | if [[ $started_at -gt $UNIX_THRESHOLD ]]; then 63 | dbg "Ignoring instance '$name' (too new)" 64 | continue 65 | fi 66 | 67 | dbg "Examining EC2 instance '$name', '$age_days' days old" 68 | 69 | if [[ $(get_tag_value "persistent" "$instance" || true) == "true" ]]; then 70 | dbg "Found instance '$name' marked persistent=true, ignoring it." 71 | continue 72 | fi 73 | 74 | # First part of the status line item to append in the e-mail 75 | line="* VM $name running $age_days days" 76 | 77 | # It would be nice to list all the tags like we do for GCE VMs, 78 | # but it's a PITA to do for AWS in a human-readable format. 79 | # Only print this handy-one (set by get_ci_vm) if it's there. 80 | if inuseby_tag=$(get_tag_value "in-use-by" "$instance"); then 81 | dbg "Found instance '$name' tagged in-use-by=$inuseby_tag." 82 | line+="; likely get_ci_vm, in-use-by=$inuseby_tag" 83 | elif ((DRY_RUN==0)); then # NOT a persistent or a get_ci_vm instance 84 | # Around Jun/Jul '23 an annoyingly steady stream of EC2 orphans were 85 | # reported to Cirrus-support. They've taken actions to resolve, 86 | # but the failure-modes are many and complex. Since most of the EC2 87 | # instances are rather expensive to keep needlessly running, and manual 88 | # cleanup is annoying, try to terminate them automatically. 89 | dbg "Attempting to terminate instance '$name'" 90 | 91 | # Operation runs asynchronously, no error reported for already terminated instance. 92 | # Any stdout/stderr here would make the eventual e-mail unreadable. 93 | if ! termout=$(aws ec2 terminate-instances --no-paginate --output json --instance-ids "$instid" 2>&1) 94 | then 95 | echo "::error::Auto-term. of '$instid' failed, 'aws' output: $termout" > /dev/stderr 96 | 97 | # Catch rare TOCTOU race, instance was running, terminated, and pruned while looping. 98 | # (terminated instances stick around for a while until purged automatically) 99 | if [[ "$termout" =~ InvalidInstanceID ]]; then 100 | line+="; auto-term. failed, instance vanished" 101 | else # Something else horrible broke, let the operators know. 102 | line+="; auto-term. failed, see GHA workflow log" 103 | fi 104 | else 105 | dbg "Successful term. command output: '$termout'" 106 | # At this point, the script could sit around in a poll-loop, waiting to confirm 107 | # the `$termout` JSON contains `CurrentState: { Code: 48, Name: terminated }`. 108 | # However this could take _minutes_, and there may be a LOT of instances left 109 | # to process. Do the next best thing: Hope the termination eventually works, 110 | # but also let the operator know an attempt was made. 111 | line+="; probably successful auto-termination" 112 | fi 113 | else # no in-use-by tag, DRY_RUN==1 114 | dbg "DRY_RUN: Would normally have tried to terminate instance '$name' (ID $instid)" 115 | fi 116 | 117 | echo "$line" >> "$OUTPUT" 118 | done 119 | 120 | dbg "The following will be part of a notification e-mail:" 121 | 122 | # Don't count the "Orphaned AWS EC2 VMs:" header-line 123 | if [[ $(wc -l $OUTPUT | awk '{print $1}') -gt 1 ]]; then 124 | cat $OUTPUT 125 | fi 126 | -------------------------------------------------------------------------------- /imgprune/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script is set as, and intended to run as the `imgprune` container's 4 | # entrypoint. It is largely based on the imgobsolete's entrypoint script 5 | # but with some important/subtle differences. It searches for deprecated 6 | # VM images with deletion-metadata some time in the past. Some number of 7 | # these images are randomly selected and then permanently deleted. 8 | 9 | set -e 10 | 11 | # shellcheck source=imgts/lib_entrypoint.sh 12 | source /usr/local/bin/lib_entrypoint.sh 13 | 14 | req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX 15 | 16 | gcloud_init 17 | 18 | # Set this to 1 for testing 19 | DRY_RUN="${DRY_RUN:-0}" 20 | # For safety's sake limit nr deletions 21 | DELETE_LIMIT=50 22 | ABOUTNOW=$(date --iso-8601=date) # precision is not needed for this use 23 | # Format Ref: https://cloud.google.com/sdk/gcloud/reference/topic/formats 24 | # Field list from `gcloud compute images list --limit=1 --format=text` 25 | FORMAT='value[quote](name,selfLink,deprecated.state,deprecated.deleted,labels)' 26 | # Required variable set by caller 27 | # shellcheck disable=SC2154 28 | PROJRE="/v1/projects/$GCPPROJECT/global/" 29 | # Filter Ref: https://cloud.google.com/sdk/gcloud/reference/topic/filters 30 | # Note: deprecated.delete comes from --delete-in (from imgobsolete container) 31 | FILTER="selfLink~$PROJRE AND deprecated.state=OBSOLETE AND deprecated.deleted<$ABOUTNOW" 32 | TODELETE=$(mktemp -p '' todelete.XXXXXX) 33 | 34 | msg "Searching for obsolete GCP images using filter:${NOR} $FILTER" 35 | # Ref: https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#deprecating_an_image 36 | $GCLOUD compute images list --show-deprecated \ 37 | --format="$FORMAT" --filter="$FILTER" | \ 38 | while read name selfLink dep_state del_date labels 39 | do 40 | count_image 41 | reason="" 42 | permanent=$(grep -E --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true) 43 | [[ -z "$permanent" ]] || \ 44 | die 1 "Refusing to delete a deprecated image labeled permanent=true. Please use gcloud utility to set image active, then research the cause of deprecation." 45 | [[ "$dep_state" == "OBSOLETE" ]] || \ 46 | die 1 "Unexpected depreciation-state encountered for $name: $dep_state; labels: $labels" 47 | 48 | # Any image matching the currently in-use IMG_SFX must always be preserved. 49 | # Values are defined in cirrus.yml 50 | # shellcheck disable=SC2154 51 | if [[ "$name" =~ $IMG_SFX ]]; then 52 | msg " Skipping current (latest) image $name" 53 | continue 54 | fi 55 | 56 | reason="Obsolete as of $del_date; labels: $labels" 57 | echo "GCP $name $reason" >> $TODELETE 58 | done 59 | 60 | msg "Searching for deprecated EC2 images prior to${NOR} $ABOUTNOW" 61 | aws_init 62 | 63 | # The AWS cli returns a huge blob of data we mostly don't need. 64 | # # Use query statement to simplify the results. N/B: The get_tag_value() 65 | # # function expects to find a "TAGS" item w/ list value. 66 | ami_query='Images[*].{ID:ImageId,TAGS:Tags,DEP:DeprecationTime,SNAP:BlockDeviceMappings[0].Ebs.SnapshotId}' 67 | all_amis=$($AWS ec2 describe-images --owners self --query "$ami_query") 68 | nr_amis=$(jq -r -e length<<<"$all_amis") 69 | 70 | req_env_vars all_amis nr_amis 71 | for (( i=nr_amis ; i ; i-- )); do 72 | count_image 73 | unset ami ami_id dep snap permanent 74 | ami=$(jq -r -e ".[$((i-1))]"<<<"$all_amis") 75 | ami_id=$(jq -r -e ".ID"<<<"$ami") 76 | dep=$(jq -r -e ".DEP"<<<"$ami") 77 | if [[ "$dep" == null ]] || [[ -z "$dep" ]]; then continue; fi 78 | dep_ymd=$(date --date="$dep" --iso-8601=date) 79 | snap=$(jq -r -e ".SNAP"<<<$ami) 80 | 81 | if permanent=$(get_tag_value "permanent" "$ami") && \ 82 | [[ "$permanent" == "true" ]] 83 | then 84 | warn 0 "Found permanent image '$ami_id' with deprecation '$dep_ymd'. Clearing deprecation date." 85 | $AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null 86 | continue 87 | fi 88 | 89 | unset name 90 | if ! name=$(get_tag_value "Name" "$ami"); then 91 | warn 0 " EC2 AMI ID '$ami_id' is missing a 'Name' tag" 92 | fi 93 | 94 | # Any image matching the currently in-use IMG_SFX 95 | # must always be preserved. 96 | if [[ "$name" =~ $IMG_SFX ]]; then 97 | warn 0 " Retaining current (latest) image $name id $ami_id" 98 | $AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null 99 | continue 100 | fi 101 | 102 | if [[ $(echo -e "$ABOUTNOW\n$dep_ymd" | sort | tail -1) == "$ABOUTNOW" ]]; then 103 | reason="Obsolete as of '$dep_ymd'; snap=$snap" 104 | echo "EC2 $ami_id $reason" >> $TODELETE 105 | fi 106 | done 107 | 108 | COUNT=$(<"$IMGCOUNT") 109 | CANDIDATES=$(wc -l <$TODELETE) 110 | msg "########################################################################" 111 | msg "Deleting up to $DELETE_LIMIT random image candidates ($CANDIDATES/$COUNT total)::" 112 | 113 | # Require a minimum number of images to exist 114 | if [[ "$CANDIDATES" -lt $DELETE_LIMIT ]] 115 | then 116 | die 0 "Safety-net Insufficient images ($CANDIDATES) to process deletions ($DELETE_LIMIT required)" 117 | fi 118 | 119 | sort --random-sort $TODELETE | tail -$DELETE_LIMIT | \ 120 | while read -r cloud image_name reason; do 121 | 122 | msg "Deleting $cloud $image_name:${NOR} $reason" 123 | if ((DRY_RUN)); then 124 | msg "Dry-run: No changes made" 125 | elif [[ "$cloud" == "GCP" ]]; then 126 | $GCLOUD compute images delete $image_name 127 | elif [[ "$cloud" == "EC2" ]]; then 128 | # Snapshot ID's always start with 'snap-' followed by a hexadecimal string 129 | snap_id=$(echo "$reason" | sed -r -e 's/.* snap=(snap-[a-f0-9]+).*/\1/') 130 | [[ -n "$snap_id" ]] || \ 131 | die 1 "Failed to parse EC2 snapshot ID for '$image_name' from string: '$reason'" 132 | # Because it aims to be as helpful and useful as possible, not all failure conditions 133 | # result in a non-zero exit >:( 134 | unset output 135 | output=$($AWS ec2 deregister-image --image-id "$image_name") 136 | [[ ! "$output" =~ An\ error\ occurred ]] || \ 137 | die 1 "$output" 138 | 139 | msg " ...deleting snapshot $snap_id:${NOR} (formerly used by $image_name)" 140 | output=$($AWS ec2 delete-snapshot --snapshot-id "$snap_id") 141 | [[ ! "$output" =~ An\ error\ occurred ]] || \ 142 | die 1 "$output" 143 | else 144 | die 1 "Unknown/Unsupported cloud '$cloud' record encountered in \$TODELETE file" 145 | fi 146 | done 147 | -------------------------------------------------------------------------------- /cache_images/fedora_packaging.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is called from fedora_setup.sh and various Dockerfiles. 4 | # It's not intended to be used outside of those contexts. It assumes the lib.sh 5 | # library has already been sourced, and that all "ground-up" package-related activity 6 | # needs to be done, including repository setup and initial update. 7 | 8 | set -e 9 | 10 | SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}") 11 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 12 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 13 | 14 | # shellcheck source=./lib.sh 15 | source "$REPO_DIRPATH/lib.sh" 16 | 17 | # packer and/or a --build-arg define this envar value uniformly 18 | # for both VM and container image build workflows. 19 | req_env_vars PACKER_BUILD_NAME 20 | 21 | # Only enable updates-testing on all 'latest' Fedora images (except rawhide) 22 | # as a matter of general policy. Historically there have been many 23 | # problems with non-uniform behavior when both supported Fedora releases 24 | # receive container-related dependency updates at the same time. Since 25 | # the 'prior' release has the shortest support lifetime, keep it's behavior 26 | # stable by only using released updates. 27 | # shellcheck disable=SC2154 28 | if [[ "$PACKER_BUILD_NAME" == "fedora" ]] && [[ ! "$PACKER_BUILD_NAME" =~ "prior" ]]; then 29 | warn "Enabling updates-testing repository for $PACKER_BUILD_NAME" 30 | lilto ooe.sh $SUDO dnf install -y 'dnf-command(config-manager)' 31 | lilto ooe.sh $SUDO dnf config-manager setopt updates-testing.enabled=1 32 | else 33 | warn "NOT enabling updates-testing repository for $PACKER_BUILD_NAME" 34 | fi 35 | 36 | msg "Updating/Installing repos and packages for $OS_REL_VER" 37 | 38 | bigto ooe.sh $SUDO dnf update -y 39 | 40 | INSTALL_PACKAGES=(\ 41 | autoconf 42 | automake 43 | bash-completion 44 | bats 45 | bridge-utils 46 | btrfs-progs-devel 47 | buildah 48 | bzip2 49 | catatonit 50 | conmon 51 | containernetworking-plugins 52 | containers-common 53 | criu 54 | crun 55 | crun-wasm 56 | curl 57 | device-mapper-devel 58 | dnsmasq 59 | docker-distribution 60 | e2fsprogs-devel 61 | emacs-nox 62 | fakeroot 63 | file 64 | findutils 65 | fuse3 66 | fuse3-devel 67 | gcc 68 | gh 69 | git 70 | git-daemon 71 | glib2-devel 72 | glibc-devel 73 | glibc-langpack-en 74 | glibc-static 75 | gnupg 76 | go-md2man 77 | golang 78 | golang-google-grpc 79 | golang-google-protobuf 80 | gpgme 81 | gpgme-devel 82 | grubby 83 | hostname 84 | httpd-tools 85 | iproute 86 | iptables 87 | jq 88 | koji 89 | krb5-workstation 90 | libassuan 91 | libassuan-devel 92 | libblkid-devel 93 | libcap-devel 94 | libffi-devel 95 | libgpg-error-devel 96 | libmsi1 97 | libnet 98 | libnet-devel 99 | libnl3-devel 100 | libseccomp 101 | libseccomp-devel 102 | libselinux-devel 103 | libtool 104 | libxml2-devel 105 | libxslt-devel 106 | lsof 107 | make 108 | man-db 109 | msitools 110 | nfs-utils 111 | nmap-ncat 112 | openssl 113 | openssl-devel 114 | ostree-devel 115 | pandoc 116 | parallel 117 | passt 118 | perl-Clone 119 | perl-FindBin 120 | pigz 121 | pkgconfig 122 | podman 123 | podman-remote 124 | pre-commit 125 | procps-ng 126 | protobuf 127 | protobuf-c 128 | protobuf-c-devel 129 | protobuf-devel 130 | python3-fedora-distro-aliases 131 | python3-koji-cli-plugins 132 | redhat-rpm-config 133 | rpcbind 134 | rsync 135 | runc 136 | sed 137 | ShellCheck 138 | skopeo 139 | slirp4netns 140 | socat 141 | sqlite-libs 142 | sqlite-devel 143 | squashfs-tools 144 | tar 145 | time 146 | unzip 147 | vim 148 | wget 149 | which 150 | xz 151 | zip 152 | zlib-devel 153 | zstd 154 | ) 155 | 156 | if [[ "$PACKER_BUILD_NAME" =~ fedora ]] && [[ "$OS_REL_VER" -ge 43 ]]; then 157 | INSTALL_PACKAGES+=( \ 158 | podman-sequoia 159 | ) 160 | # Rawhide images don't need these packages 161 | elif [[ "$PACKER_BUILD_NAME" =~ fedora ]]; then 162 | INSTALL_PACKAGES+=( \ 163 | python-pip-wheel 164 | python-setuptools-wheel 165 | python-toml 166 | python3-wheel 167 | python3-PyYAML 168 | python3-coverage 169 | python3-dateutil 170 | python3-devel 171 | python3-docker 172 | python3-fixtures 173 | python3-libselinux 174 | python3-libsemanage 175 | python3-libvirt 176 | python3-pip 177 | python3-psutil 178 | python3-pylint 179 | python3-pyxdg 180 | python3-requests 181 | python3-requests-mock 182 | ) 183 | fi 184 | 185 | # When installing during a container-build, having this present 186 | # will seriously screw up future dnf operations in very non-obvious ways. 187 | # bpftrace is only needed on the host as containers cannot run ebpf 188 | # programs anyway and it is very big so we should not bloat the container 189 | # images unnecessarily. 190 | if ! ((CONTAINER)); then 191 | INSTALL_PACKAGES+=( \ 192 | bpftrace 193 | composefs 194 | container-selinux 195 | fuse-overlayfs 196 | libguestfs-tools 197 | selinux-policy-devel 198 | policycoreutils 199 | ) 200 | 201 | # Extra packages needed by podman-machine-os 202 | INSTALL_PACKAGES+=( \ 203 | podman-machine 204 | osbuild 205 | osbuild-tools 206 | osbuild-ostree 207 | xfsprogs 208 | e2fsprogs 209 | ) 210 | fi 211 | 212 | 213 | # Download these package files, but don't install them; Any tests 214 | # wishing to, may install them using their native tools at runtime. 215 | DOWNLOAD_PACKAGES=(\ 216 | parallel 217 | podman-docker 218 | python3-devel 219 | python3-pip 220 | python3-pytest 221 | python3-virtualenv 222 | ) 223 | 224 | msg "Installing general build/test dependencies" 225 | bigto $SUDO dnf install -y "${INSTALL_PACKAGES[@]}" 226 | 227 | msg "Downloading packages for optional installation at runtime, as needed." 228 | $SUDO mkdir -p "$PACKAGE_DOWNLOAD_DIR" 229 | cd "$PACKAGE_DOWNLOAD_DIR" 230 | lilto ooe.sh $SUDO dnf install -y 'dnf-command(download)' 231 | lilto $SUDO dnf download -y --resolve "${DOWNLOAD_PACKAGES[@]}" 232 | # Also cache the current/latest version of minikube 233 | # for use in some specialized testing. 234 | # Ref: https://minikube.sigs.k8s.io/docs/start/ 235 | $SUDO curl --fail --silent --location -O \ 236 | https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm 237 | cd - 238 | 239 | # Occasionally following an install, there are more updates available. 240 | # This may be due to activation of suggested/recommended dependency resolution. 241 | lilto $SUDO dnf update -y 242 | -------------------------------------------------------------------------------- /imgts/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script is set as, and intended to run as the `imgts` container's 4 | # entrypoint. It's purpose is to operate on a list of VM Images, adding 5 | # metadata to each. It must be executed alongside any repository's 6 | # automation, which produces or uses GCP VMs and/or AWS EC2 instances. 7 | # 8 | # N/B: Timestamp updating is not required for AWS EC2 images as they 9 | # have a 'LastLaunchedTime' attribute which is updated automatically. 10 | # However, updating their permanent=true tag (when appropriate) and 11 | # a reference to the build ID and repo name are all useful. 12 | 13 | set -e 14 | 15 | # shellcheck source=imgts/lib_entrypoint.sh 16 | source /usr/local/bin/lib_entrypoint.sh 17 | 18 | req_env_vars GCPJSON GCPNAME GCPPROJECT IMGNAMES BUILDID REPOREF 19 | 20 | # Set this to 1 for testing 21 | DRY_RUN="${DRY_RUN:-0}" 22 | 23 | # These must be defined by the cirrus-ci job using the container 24 | # shellcheck disable=SC2154 25 | ARGS=(\ 26 | "--update-labels=last-used=$(date +%s)" 27 | "--update-labels=build-id=$BUILDID" 28 | "--update-labels=repo-ref=$REPOREF" 29 | "--update-labels=project=$GCPPROJECT" 30 | ) 31 | 32 | # Must be defined by the cirrus-ci job using the container 33 | # shellcheck disable=SC2154 34 | [[ -n "$IMGNAMES" ]] || \ 35 | die 1 "No \$IMGNAMES were specified." 36 | 37 | # Under some runtime conditions, not all images may be available 38 | REQUIRE_ALL=${REQUIRE_ALL:-1} 39 | 40 | # Don't allow one bad apple to ruin the whole batch 41 | ERRIMGS='' 42 | 43 | # It's possible for multiple simultaneous label updates to clash 44 | CLASHMSG='Labels fingerprint either invalid or resource labels have changed' 45 | 46 | # This function accepts a single argument: A Cirrus-CI build ID. The 47 | # function looks up the build from Cirrus-CI to determine if it occured 48 | # on a non-main branch. If so the function returns zero. Otherwise, it 49 | # returns 1 for executions on behalf of the `main` branch, all PRs and 50 | # all tags. It will fully exit non-zero in case of any error. 51 | is_release_branch_image(){ 52 | local buildId api query result prefix branch tag 53 | buildId=$1 54 | api="https://api.cirrus-ci.com/graphql" 55 | query="{ 56 | \"query\": \"query { 57 | build(id: $buildId) { 58 | branch 59 | tag 60 | pullRequest 61 | } 62 | }\" 63 | }" 64 | 65 | # This is mandatory, must never be unset, empty, or shorter than an actual ID. 66 | # Normally about 16-characters long. 67 | if ((${#buildId}<14)); then 68 | die 1 "Empty/invalid BuildId '$buildId' passed to is_release_branch_image()" 69 | fi 70 | 71 | prefix=".data.build" 72 | result=$(curl --silent --location \ 73 | --request POST --data @- --url "$api" <<<"$query") \ 74 | || \ 75 | die 2 "Error communicating with GraphQL API $api: $result" 76 | 77 | # Any problems with the GraphQL reply or mismatch of the JSON 78 | # structure (specified in query) is an error that operators should 79 | # be made aware of. 80 | if ! jq -e "$prefix" <<<"$result" &> /dev/null; then 81 | die 3 "Response from Cirrus API query '$query' has unexpected/invalid JSON structure: 82 | $result" 83 | fi 84 | 85 | # Cirrus-CI always sets some branch value for all execution contexts 86 | if ! branch=$(jq -e --raw-output "${prefix}.branch" <<<"$result"); then 87 | die 4 "Empty/null branch value returned for build '$buildId': 88 | $result" 89 | fi 90 | 91 | # This value will be empty/null for PRs and branch builds 92 | tag=$(jq --raw-output "${prefix}.tag" <<<"$result" | sed 's/null//g') 93 | 94 | # Cirrus-CI sets `branch=pull/#` for pull-requests, dependabot creates 95 | if [[ -z "$tag" && "$branch" =~ ^(v|release-)v?[0-9]+.* ]]; then 96 | msg "Found build $buildId for release branch '$branch'." 97 | return 0 98 | fi 99 | 100 | msg "Found build '$buildId' for non-release branch '$branch' and/or tag '$tag' (may be empty)." 101 | return 1 102 | } 103 | 104 | unset SET_PERM 105 | if is_release_branch_image $BUILDID; then 106 | ARGS+=("--update-labels=permanent=true") 107 | SET_PERM=1 108 | fi 109 | 110 | if ((DRY_RUN)); then 111 | GCLOUD="echo $GCLOUD" 112 | AWS="echo $AWS" 113 | DRPREFIX="DRY-RUN: " 114 | else 115 | # This outputs a status message to stderr 116 | gcloud_init 117 | fi 118 | 119 | # Must be defined by the cirrus-ci job using the container 120 | # shellcheck disable=SC2154 121 | for image in $IMGNAMES 122 | do 123 | if ! OUTPUT=$($GCLOUD compute images update "$image" "${ARGS[@]}" 2>&1); then 124 | msg "$OUTPUT" 125 | if grep -iq "$CLASHMSG" <<<"$OUTPUT"; then 126 | # Updating the 'last-used' label is most important. 127 | # Assume clashing update did this for us. 128 | msg "Warning: Detected simultaneous label update, ignoring clash." 129 | continue 130 | fi 131 | msg "Detected update error for '$image'" > /dev/stderr 132 | ERRIMGS+=" $image" 133 | else 134 | # Display the URI to the updated image for reference 135 | if ((SET_PERM)); then 136 | msg "${DRPREFIX}IMAGE $image MARKED FOR PERMANENT RETENTION" 137 | else 138 | msg "${DRPREFIX}Updated image $image last-used timestamp" 139 | fi 140 | fi 141 | done 142 | 143 | # Not all repos use EC2 instances, only touch AWS if both 144 | # EC2IMGNAMES and AWSINI are set. 145 | if [[ -n "$EC2IMGNAMES" ]]; then 146 | msg "---" 147 | req_env_vars AWSINI BUILDID REPOREF 148 | 149 | if ! ((DRY_RUN)); then 150 | aws_init 151 | # aws_init() has no output because that would break in other contexts. 152 | msg "Activated AWS CLI for service acount." 153 | fi 154 | 155 | for image in $EC2IMGNAMES; do 156 | if ((DRY_RUN)); then 157 | # AWS=echo; no lookup will actually happen 158 | amiid="dry-run-$image" 159 | elif ! amiid=$(get_ec2_ami "$image"); then 160 | ERRIMGS+=" $image" 161 | continue 162 | fi 163 | 164 | # AWS deliberately left unquoted for intentional word-splitting. 165 | # N/B: For $DRY_RUN==1: AWS=echo 166 | # shellcheck disable=SC2206 167 | awscmd=(\ 168 | $AWS ec2 create-tags 169 | --resources "$amiid" --tags 170 | "Key=build-id,Value=$BUILDID" 171 | "Key=repo-ref,Value=$REPOREF" 172 | ) 173 | if ((SET_PERM)); then 174 | awscmd+=("Key=permanent,Value=true") 175 | fi 176 | 177 | if ! OUTPUT=$("${awscmd[@]}"); then 178 | ERRIMGS+=" $image" 179 | elif ((SET_PERM)); then 180 | msg "${DRPREFIX}IMAGE $image ($amiid) MARKED FOR PERMANENT RETENTION" 181 | else 182 | msg "${DRPREFIX}Updated image $image ($amiid) metadata." 183 | fi 184 | 185 | # Ensure image wasn't previously marked as deprecated. Ignore 186 | # confirmation output. 187 | $AWS ec2 disable-image-deprecation --image-id "$amiid" > /dev/null 188 | done 189 | fi 190 | 191 | if [[ -n "$ERRIMGS" ]]; then 192 | die_or_warn=die 193 | ((REQUIRE_ALL)) || die_or_warn=warn 194 | $die_or_warn 2 "Failed to update one or more image timestamps: $ERRIMGS" 195 | fi 196 | -------------------------------------------------------------------------------- /.github/workflows/pr_image_id.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Use the latest published version of the cirrus-ci_retrospective container 4 | # to determine the execution context of _this_ workflow run. If it is a 5 | # pull request, post the to-be VM/Container image ID string as a comment. 6 | 7 | on: 8 | check_suite: # ALWAYS triggered from the default branch 9 | # Ref: https://help.github.com/en/actions/reference/events-that-trigger-workflows#check-suite-event-check_suite 10 | types: 11 | - completed 12 | 13 | jobs: 14 | comment_image_id: 15 | # Do not execute for other github applications, only works with cirrus-ci 16 | if: github.event.check_suite.app.name == 'Cirrus CI' 17 | runs-on: ubuntu-latest 18 | env: 19 | # This is the last component of the gist URL 20 | # i.e. https://gist.github.com// 21 | built_images_gist_id: f505b6fb78db279855862e035629f8aa 22 | steps: 23 | - name: Execute latest upstream cirrus-ci_retrospective 24 | uses: docker://quay.io/libpod/cirrus-ci_retrospective:latest 25 | env: 26 | GITHUB_TOKEN: ${{ github.token }} 27 | # Writes $GITHUB_WORKSPACE/cirrus-ci_retrospective.json 28 | 29 | - name: Debug cirrus-ci_retrospective JSON 30 | run: jq --indent 4 --color-output . $GITHUB_WORKSPACE/cirrus-ci_retrospective.json 31 | 32 | - name: Load JSON into github workflow output variables 33 | id: retro 34 | run: | 35 | ccirjson=$GITHUB_WORKSPACE/cirrus-ci_retrospective.json 36 | prn=$(jq --raw-output \ 37 | '.[] | select(.name == "'success'") | .build.pullRequest' \ 38 | "$ccirjson") 39 | bid=$(jq --raw-output \ 40 | '.[] | select(.name == "'success'") | .build.id' \ 41 | "$ccirjson") 42 | status=$(jq --raw-output \ 43 | '.[] | select(.name == "'success'") | .status' \ 44 | "$ccirjson") 45 | 46 | if [[ -n "$prn" ]] && \ 47 | [[ "$prn" != "null" ]] && \ 48 | [[ $prn -gt 0 ]] && \ 49 | [[ "$status" == "COMPLETED" ]] 50 | then 51 | printf "prn=%s\n" "$prn" >> $GITHUB_OUTPUT 52 | printf "bid=%s\n" "$bid" >> $GITHUB_OUTPUT 53 | printf "is_pr=%s\n" "true" >> $GITHUB_OUTPUT 54 | else 55 | printf "prn=%s\n" "0" >> $GITHUB_OUTPUT 56 | printf "bid=%s\n" "0" >> $GITHUB_OUTPUT 57 | printf "is_pr=%s\n" "false" >> $GITHUB_OUTPUT 58 | fi 59 | 60 | - if: steps.retro.outputs.is_pr == 'true' 61 | uses: actions/checkout@v6 62 | with: 63 | persist-credentials: false 64 | 65 | - if: steps.retro.outputs.is_pr == 'true' 66 | name: Retrieve and process any manifest artifacts 67 | # Use the CCIA image produce by the `Build Tooling images` 68 | # task of the PR we're looking at. This allows testing 69 | # of changes to the CCIA container before merging into `main` 70 | # (where this workflow runs from). If that should fail, 71 | # fall back to using the latest built CCIA image. 72 | run: | 73 | PODMAN="podman run --rm -v $GITHUB_WORKSPACE:/data -w /data" 74 | $PODMAN quay.io/libpod/ccia:latest --verbose "${{ steps.retro.outputs.bid }}" ".*/manifest.json" 75 | 76 | - if: steps.retro.outputs.is_pr == 'true' 77 | name: Count the number of manifest.json files downloaded 78 | id: manifests 79 | run: | 80 | dled=$(find $GITHUB_WORKSPACE -type f -name 'manifest.json' -not -path '*fake_manifests/*/manifest.json' | wc -l) 81 | if [[ "$dled" =~ ^[0-9]+$ ]]; then 82 | printf "count=%s\n" "$dled" >> $GITHUB_OUTPUT 83 | else 84 | printf "count=0\n" >> $GITHUB_OUTPUT 85 | fi 86 | 87 | - if: steps.manifests.outputs.count > 0 88 | name: Extract build details from manifest files 89 | env: 90 | FLTR: >- 91 | {"stage": .builds[].custom_data.STAGE, 92 | "name": .builds[].name, 93 | "sfx": .builds[].custom_data.IMG_SFX, 94 | "task": .builds[].custom_data.TASK} 95 | run: | 96 | cd $GITHUB_WORKSPACE 97 | find ./ -type f -name 'manifest.json' -print0 | \ 98 | xargs --null jq -e -c "$FLTR" | \ 99 | jq -e -s '.' > ./built_images.json 100 | 101 | - if: steps.manifests.outputs.count > 0 102 | name: Debug built_images.json contents 103 | run: | 104 | jq --color-output . $GITHUB_WORKSPACE/built_images.json 105 | 106 | - if: steps.manifests.outputs.count > 0 107 | id: body 108 | name: Format PR-comment body 109 | # Consumes $GITHUB_WORKSPACE/built_images.json 110 | run: .github/actions/bin/create_image_table.py 111 | 112 | - if: steps.manifests.outputs.count > 0 113 | name: Debug images.md contents 114 | # Produced by create_image_table.py 115 | run: cat $GITHUB_WORKSPACE/images.md 116 | 117 | - if: steps.manifests.outputs.count > 0 118 | name: Debug images.json contents 119 | # Produced by create_image_table.py 120 | run: jq --color-output . $GITHUB_WORKSPACE/images.json 121 | 122 | # jungwinter/comment cannot consume a file as comment input 123 | - if: steps.manifests.outputs.count > 0 124 | name: Debug PR comment markdown 125 | # Use a here-document to display to avoid any 126 | # problems with passing special-characters into echo 127 | # The quoted-EOD prevents any shell interpretation. 128 | run: | 129 | cat <<"EOD" 130 | ${{ env.IMAGE_TABLE }} 131 | EOD 132 | 133 | - if: steps.manifests.outputs.count > 0 134 | name: Post PR comment with image name/id table 135 | uses: thollander/actions-comment-pull-request@v3 136 | with: 137 | pr-number: '${{ steps.retro.outputs.prn }}' 138 | message: | 139 | ${{ env.IMAGE_TABLE }} 140 | 141 | # Ref: https://github.com/marketplace/actions/deploy-to-gist 142 | - if: steps.manifests.outputs.count > 0 143 | name: Publish image name/id MD table to gist 144 | uses: exuanbo/actions-deploy-gist@v1.1.4 145 | with: 146 | token: ${{ secrets.IMG_GIST_TOKEN }} 147 | gist_id: ${{ env.built_images_gist_id }} 148 | file_path: images.md 149 | file_type: text 150 | - if: steps.manifests.outputs.count > 0 151 | name: Publish image name/id JSON table to gist 152 | uses: exuanbo/actions-deploy-gist@v1.1.4 153 | with: 154 | token: ${{ secrets.IMG_GIST_TOKEN }} 155 | gist_id: ${{ env.built_images_gist_id }} 156 | file_path: images.json 157 | file_type: text 158 | -------------------------------------------------------------------------------- /get_ci_vm/bad_repo_test/hack/get_ci_vm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | RED="\e[1;36;41m" 6 | YEL="\e[1;33;44m" 7 | NOR="\e[0m" 8 | USAGE_WARNING=" 9 | ${YEL}WARNING: This will not work without local sudo access to run podman,${NOR} 10 | ${YEL}and prior authorization to use the oci-seccomp-bpf-hook GCP project. Also,${NOR} 11 | ${YEL}possession of the proper ssh private key is required.${NOR} 12 | " 13 | # TODO: Many/most of these values should come from .cirrus.yml 14 | ZONE="us-central1-c" 15 | CPUS="2" 16 | MEMORY="4Gb" 17 | DISK="200" 18 | PROJECT="oci-seccomp-bpf-hook" 19 | GOSRC="/var/tmp/go/src/github.com/containers/oci-seccomp-bpf-hook" 20 | GCLOUD_IMAGE=${GCLOUD_IMAGE:-quay.io/cevich/gcloud_centos:latest} 21 | GCLOUD_SUDO=${GCLOUD_SUDO-sudo} 22 | SSHUSER="root" 23 | 24 | # Shared tmp directory between container and us 25 | TMPDIR=$(mktemp -d --tmpdir $(basename $0)_tmpdir_XXXXXX) 26 | 27 | SECCOMPHOOKROOT=$(realpath "$(dirname $0)/../") 28 | # else: Assume $PWD is the root of the oci-seccomp-bpf-hook repository 29 | [[ "$SECCOMPHOOKROOT" != "/" ]] || SECCOMPHOOKROOT=$PWD 30 | 31 | # Command shortcuts save some typing (assumes $SECCOMPHOOKROOT is subdir of $HOME) 32 | PGCLOUD="$GCLOUD_SUDO podman run -it --rm -e AS_ID=$UID -e AS_USER=$USER --security-opt label=disable -v $TMPDIR:$HOME -v $HOME/.config/gcloud:$HOME/.config/gcloud -v $HOME/.config/gcloud/ssh:$HOME/.ssh -v $SECCOMPHOOKROOT:$SECCOMPHOOKROOT $GCLOUD_IMAGE --configuration=oci-seccomp-bpf-hook --project=$PROJECT" 33 | SCP_CMD="$PGCLOUD compute scp --zone=$ZONE" 34 | 35 | 36 | showrun() { 37 | if [[ "$1" == "--background" ]] 38 | then 39 | shift 40 | # Properly escape any nested spaces, so command can be copy-pasted 41 | echo '+ '$(printf " %q" "$@")' &' > /dev/stderr 42 | "$@" & 43 | echo -e "${RED}${NOR}" 44 | else 45 | echo '+ '$(printf " %q" "$@") > /dev/stderr 46 | "$@" 47 | fi 48 | } 49 | 50 | cleanup() { 51 | RET=$? 52 | set +e 53 | wait 54 | 55 | # Not always called from an exit handler, but should always exit when called 56 | exit $RET 57 | } 58 | trap cleanup EXIT 59 | 60 | delvm() { 61 | echo -e "\n" 62 | echo -e "\n${YEL}Offering to Delete $VMNAME ${RED}(Might take a minute or two)${NOR}" 63 | echo -e "\n${YEL}Note: It's safe to answer N, then re-run script again later.${NOR}" 64 | showrun $CLEANUP_CMD # prompts for Yes/No 65 | cleanup 66 | } 67 | 68 | image_hints() { 69 | _BIS=$(grep -E -m 1 '_BUILT_IMAGE_SUFFIX:[[:space:]+"[[:print:]]+"' \ 70 | "$SECCOMPHOOKROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]') 71 | grep -E '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \ 72 | "$SECCOMPHOOKROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]' | \ 73 | sed -r -e "s/\\\$[{]_BUILT_IMAGE_SUFFIX[}]/$_BIS/" | sort -u 74 | } 75 | 76 | show_usage() { 77 | echo -e "\n${RED}ERROR: $1${NOR}" 78 | echo -e "${YEL}Usage: $(basename $0) ${NOR}" 79 | echo "" 80 | if [[ -r ".cirrus.yml" ]] 81 | then 82 | echo -e "${YEL}Some possible image_name values (from .cirrus.yml):${NOR}" 83 | image_hints 84 | echo "" 85 | fi 86 | exit 1 87 | } 88 | 89 | get_env_vars() { 90 | python3 -c ' 91 | import yaml 92 | env=yaml.load(open(".cirrus.yml"), Loader=yaml.SafeLoader)["env"] 93 | keys=[k for k in env if "ENCRYPTED" not in str(env[k])] 94 | for k,v in env.items(): 95 | v=str(v) 96 | if "ENCRYPTED" not in v: 97 | print("{0}=\"{1}\"".format(k, v)) 98 | ' 99 | } 100 | 101 | parse_args(){ 102 | echo -e "$USAGE_WARNING" 103 | 104 | if [[ "$USER" =~ "root" ]] 105 | then 106 | show_usage "This script must be run as a regular user." 107 | fi 108 | 109 | ENVS="$(get_env_vars)" 110 | IMAGE_NAME="$1" 111 | if [[ -z "$IMAGE_NAME" ]] 112 | then 113 | show_usage "No image-name specified." 114 | fi 115 | 116 | SETUP_CMD="env $ENVS $GOSRC/contrib/cirrus/setup.sh" 117 | VMNAME="${VMNAME:-${USER}-${IMAGE_NAME}}" 118 | CREATE_CMD="$PGCLOUD compute instances create --zone=$ZONE --image-project=libpod-218412 --image=${IMAGE_NAME} --custom-cpu=$CPUS --custom-memory=$MEMORY --boot-disk-size=$DISK --labels=in-use-by=$USER $VMNAME" 119 | SSH_CMD="$PGCLOUD compute ssh --zone=$ZONE $SSHUSER@$VMNAME" 120 | CLEANUP_CMD="$PGCLOUD compute instances delete --zone=$ZONE --delete-disks=all $VMNAME" 121 | } 122 | 123 | ##### main 124 | 125 | [[ "${SECCOMPHOOKROOT%%${SECCOMPHOOKROOT##$HOME}}" == "$HOME" ]] || \ 126 | show_usage "Repo clone must be sub-dir of $HOME: $SECCOMPHOOKROOT" 127 | 128 | cd "$SECCOMPHOOKROOT" 129 | 130 | parse_args "$@" 131 | 132 | # Ensure mount-points and data directories exist on host as $USER. Also prevents 133 | # permission-denied errors during cleanup() b/c `sudo podman` created mount-points 134 | # owned by root. 135 | mkdir -p $TMPDIR/${SECCOMPHOOKROOT##$HOME} 136 | mkdir -p $TMPDIR/.ssh 137 | mkdir -p {$HOME,$TMPDIR}/.config/gcloud/ssh 138 | chmod 700 {$HOME,$TMPDIR}/.config/gcloud/ssh $TMPDIR/.ssh 139 | 140 | cd $SECCOMPHOOKROOT 141 | 142 | # Attempt to determine if named 'oci-seccomp-bpf-hook' gcloud configuration exists 143 | showrun $PGCLOUD info > $TMPDIR/gcloud-info 144 | if grep -E -q "Account:.*None" $TMPDIR/gcloud-info 145 | then 146 | echo -e "\n${YEL}WARNING: Can't find gcloud configuration for 'oci-seccomp-bpf-hook', running init.${NOR}" 147 | echo -e " ${RED}Please choose '#1: Re-initialize' and 'login' if asked.${NOR}" 148 | echo -e " ${RED}Please set Compute Region and Zone (if asked) to 'us-central1-b'.${NOR}" 149 | echo -e " ${RED}DO NOT set any password for the generated ssh key.${NOR}" 150 | showrun $PGCLOUD init --project=$PROJECT --console-only --skip-diagnostics 151 | 152 | # Verify it worked (account name == someone@example.com) 153 | $PGCLOUD info > $TMPDIR/gcloud-info-after-init 154 | if grep -E -q "Account:.*None" $TMPDIR/gcloud-info-after-init 155 | then 156 | echo -e "${RED}ERROR: Could not initialize 'oci-seccomp-bpf-hook' configuration in gcloud.${NOR}" 157 | exit 5 158 | fi 159 | 160 | # If this is the only config, make it the default to avoid persistent warnings from gcloud 161 | [[ -r "$HOME/.config/gcloud/configurations/config_default" ]] || \ 162 | ln "$HOME/.config/gcloud/configurations/config_oci-seccomp-bpf-hook" \ 163 | "$HOME/.config/gcloud/configurations/config_default" 164 | fi 165 | 166 | # Couldn't make rsync work with gcloud's ssh wrapper: ssh-keys generated on the fly 167 | TARBALL=$VMNAME.tar.bz2 168 | echo -e "\n${YEL}Packing up local repository into a tarball.${NOR}" 169 | showrun --background tar cjf $TMPDIR/$TARBALL --warning=no-file-changed --exclude-vcs-ignores -C $SECCOMPHOOKROOT . 170 | 171 | trap delvm INT # Allow deleting VM if CTRL-C during create 172 | # This fails if VM already exists: permit this usage to re-init 173 | echo -e "\n${YEL}Trying to create a VM named $VMNAME\n${RED}(might take a minute/two. Errors ignored).${NOR}" 174 | showrun $CREATE_CMD || true # allow re-running commands below when "delete: N" 175 | 176 | # Any subsequent failure should prompt for VM deletion 177 | trap delvm EXIT 178 | 179 | echo -e "\n${YEL}Retrying for 30s for ssh port to open (may give some errors)${NOR}" 180 | trap 'COUNT=9999' INT 181 | ATTEMPTS=10 182 | for (( COUNT=1 ; COUNT <= $ATTEMPTS ; COUNT++ )) 183 | do 184 | if $SSH_CMD --command "true"; then break; else sleep 3s; fi 185 | done 186 | if (( COUNT > $ATTEMPTS )) 187 | then 188 | echo -e "\n${RED}Failed${NOR}" 189 | exit 7 190 | fi 191 | echo -e "${YEL}Got it${NOR}" 192 | 193 | echo -e "\n${YEL}Removing and re-creating $GOSRC on $VMNAME.${NOR}" 194 | showrun $SSH_CMD --command "rm -rf $GOSRC" 195 | showrun $SSH_CMD --command "mkdir -p $GOSRC" 196 | 197 | echo -e "\n${YEL}Transferring tarball to $VMNAME.${NOR}" 198 | wait 199 | showrun $SCP_CMD $HOME/$TARBALL $SSHUSER@$VMNAME:/tmp/$TARBALL 200 | 201 | echo -e "\n${YEL}Unpacking tarball into $GOSRC on $VMNAME.${NOR}" 202 | showrun $SSH_CMD --command "tar xjf /tmp/$TARBALL -C $GOSRC" 203 | 204 | echo -e "\n${YEL}Removing tarball on $VMNAME.${NOR}" 205 | showrun $SSH_CMD --command "rm -f /tmp/$TARBALL" 206 | 207 | echo -e "\n${YEL}Executing environment setup${NOR}" 208 | showrun $SSH_CMD --command "$SETUP_CMD" 209 | 210 | VMIP=$($PGCLOUD compute instances describe $VMNAME --format='get(networkInterfaces[0].accessConfigs[0].natIP)') 211 | 212 | echo -e "\n${YEL}Connecting to $VMNAME${NOR}\nPublic IP Address: $VMIP\n${RED}(option to delete VM upon logout).${NOR}\n" 213 | showrun $SSH_CMD -- -t "cd $GOSRC && exec env $ENVS bash -il" 214 | -------------------------------------------------------------------------------- /cache_images/cloud.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | variables: # Empty value means it must be passed in on command-line 4 | # For produced images, to prevent name clashes 5 | IMG_SFX: 6 | 7 | # Required for ssh private key access 8 | TEMPDIR: 9 | 10 | # GCE Project ID where images will be produced 11 | GCP_PROJECT_ID: "libpod-218412" 12 | 13 | # Required path to service account credentials file 14 | GAC_FILEPATH: "{{env `GAC_FILEPATH`}}" 15 | 16 | # Allows providing handy cross-reference to the build log 17 | CIRRUS_TASK_ID: "{{env `CIRRUS_TASK_ID`}}" 18 | 19 | # See Makefile for definitions 20 | FEDORA_RELEASE: "{{env `FEDORA_RELEASE`}}" 21 | PRIOR_FEDORA_RELEASE: "{{env `PRIOR_FEDORA_RELEASE`}}" 22 | RAWHIDE_RELEASE: "{{env `RAWHIDE_RELEASE`}}" 23 | DEBIAN_RELEASE: "{{env `DEBIAN_RELEASE`}}" 24 | 25 | builders: 26 | - &gce_hosted_image 27 | name: 'debian' 28 | type: 'googlecompute' 29 | # N/B: This implies base images always built with same IMG_SFX. 30 | source_image: '{{ build_name }}-b{{user `IMG_SFX`}}' 31 | # Prefix IMG_SFX with "c" so this is never confused with a base_image name 32 | image_name: '{{ build_name }}-c{{user `IMG_SFX`}}' 33 | image_family: '{{ build_name }}-cache' 34 | image_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}' 35 | project_id: '{{user `GCP_PROJECT_ID`}}' 36 | account_file: '{{user `GAC_FILEPATH`}}' 37 | zone: 'us-central1-a' 38 | disk_size: 20 # REQUIRED: Runtime allocation > this value 39 | disable_default_service_account: true 40 | labels: &gce_labels # For the VM 41 | sfx: '{{user `IMG_SFX`}}' 42 | src: '{{ build_name }}-b{{user `IMG_SFX` }}' 43 | release: 'debian-{{user `DEBIAN_RELEASE` | clean_resource_name}}' 44 | stage: cache 45 | ssh_username: packer # arbitrary, packer will create & setup w/ temp. keypair 46 | ssh_pty: 'true' 47 | temporary_key_pair_type: ed25519 48 | ssh_clear_authorized_keys: true 49 | # Permit running nested VM's to support specialized testing 50 | image_licenses: ["projects/vm-options/global/licenses/enable-vmx"] 51 | 52 | - <<: *gce_hosted_image 53 | name: 'rawhide' 54 | # The latest fedora base image will be "upgraded" to rawhide 55 | source_image: 'fedora-b{{user `IMG_SFX`}}' 56 | labels: 57 | <<: *gce_labels 58 | src: 'fedora-b{{user `IMG_SFX` }}' 59 | release: 'rawhide-{{user `RAWHIDE_RELEASE`}}' 60 | 61 | - <<: *gce_hosted_image 62 | name: 'fedora' 63 | labels: &fedora_gce_labels 64 | <<: *gce_labels 65 | release: 'fedora-{{user `FEDORA_RELEASE`}}' 66 | 67 | - <<: *gce_hosted_image 68 | name: 'prior-fedora' 69 | labels: *fedora_gce_labels 70 | 71 | - &aux_fed_img 72 | <<: *gce_hosted_image 73 | name: 'build-push' 74 | source_image: 'fedora-b{{user `IMG_SFX`}}' 75 | source_image_family: 'fedora-base' 76 | labels: *fedora_gce_labels 77 | 78 | - <<: *aux_fed_img 79 | name: 'fedora-netavark' 80 | 81 | # ref: https://www.packer.io/plugins/builders/amazon/ebs 82 | - &fedora-aws 83 | name: 'fedora-aws' 84 | type: 'amazon-ebs' 85 | instance_type: 'm5zn.metal' 86 | source_ami_filter: # Will fail if >1 or no AMI found 87 | owners: 88 | # Docs are wrong, specifying the Account ID required to make AMIs private. 89 | # The Account ID is hard-coded here out of expediency, since passing in 90 | # more packer args from the command-line (in Makefile) is non-trivial. 91 | - &accountid '449134212816' 92 | # It's necessary to 'search' for the base-image by these criteria. If 93 | # more than one image is found, Packer will fail the build (and display 94 | # the conflicting AMI IDs). 95 | filters: &ami_filters 96 | architecture: 'x86_64' 97 | image-type: 'machine' 98 | is-public: 'false' 99 | name: '{{build_name}}-b{{user `IMG_SFX`}}' 100 | root-device-type: 'ebs' 101 | state: 'available' 102 | virtualization-type: 'hvm' 103 | # In case of packer problem or ungraceful exit, don't wait for shutdown. 104 | # This doesn't always work properly, sometimes leaving EC2 instances in 105 | # a 'stopped' instead of terminated state :( 106 | shutdown_behavior: 'terminate' 107 | # If something goes wrong, remove the broken AMI. 108 | force_deregister: true # Remove AMI with same name if exists 109 | force_delete_snapshot: true # Also remove snapshots of force-removed AMI 110 | # Required for network access, must be the 'default' group used by Cirrus-CI 111 | security_group_id: "sg-042c75677872ef81c" 112 | # Prefix IMG_SFX with "b" so this is never confused with a cache_image 113 | ami_name: '{{build_name}}-c{{user `IMG_SFX`}}' 114 | ami_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}' 115 | ebs_optimized: true 116 | launch_block_device_mappings: 117 | - device_name: '/dev/sda1' 118 | volume_size: 200 119 | volume_type: 'gp2' 120 | delete_on_termination: true 121 | # These are critical and used by security-polciy to enforce instance launch limits. 122 | tags: &ami_tags 123 | # EC2 expects "Name" tag to be capitalized 124 | Name: '{{build_name}}-c{{user `IMG_SFX`}}' 125 | sfx: '{{user `IMG_SFX`}}' 126 | src: '{{.SourceAMI}}' # Generated AMI ID looked up at runtime 127 | automation: 'true' 128 | stage: 'cache' 129 | arch: 'x86_64' 130 | release: 'fedora-{{user `FEDORA_RELEASE`}}' 131 | run_tags: *ami_tags 132 | run_volume_tags: *ami_tags 133 | snapshot_tags: *ami_tags 134 | # Also required to make AMI private 135 | ami_users: 136 | - *accountid 137 | ssh_username: 'root' 138 | ssh_clear_authorized_keys: true 139 | # N/B: Required Packer >= 1.8.0 140 | # https://github.com/hashicorp/packer/issues/10074#issuecomment-1070469367 141 | temporary_key_pair_type: 'ed25519' 142 | 143 | - <<: *fedora-aws 144 | name: 'fedora-netavark-aws-arm64' 145 | source_ami_filter: 146 | owners: 147 | - *accountid 148 | filters: 149 | <<: *ami_filters 150 | architecture: 'arm64' 151 | name: 'fedora-aws-arm64-b{{user `IMG_SFX`}}' 152 | instance_type: 't4g.medium' # arm64 type 153 | tags: &netavark_tags 154 | <<: *ami_tags 155 | Name: '{{build_name}}-c{{user `IMG_SFX`}}' 156 | arch: 'arm64' 157 | run_tags: *netavark_tags 158 | run_volume_tags: *netavark_tags 159 | snapshot_tags: *netavark_tags 160 | 161 | - <<: *fedora-aws 162 | name: 'fedora-podman-aws-arm64' 163 | source_ami_filter: 164 | owners: 165 | - *accountid 166 | filters: 167 | <<: *ami_filters 168 | architecture: 'arm64' 169 | name: 'fedora-aws-arm64-b{{user `IMG_SFX`}}' 170 | instance_type: 't4g.medium' # arm64 type 171 | tags: &podman_tags 172 | <<: *ami_tags 173 | Name: '{{build_name}}-c{{user `IMG_SFX`}}' 174 | arch: 'arm64' 175 | run_tags: *podman_tags 176 | run_volume_tags: *podman_tags 177 | snapshot_tags: *podman_tags 178 | 179 | provisioners: 180 | - type: 'shell' 181 | inline: 182 | - 'set -e' 183 | - 'mkdir -p /var/tmp/automation_images' 184 | 185 | - type: 'file' 186 | source: '{{ pwd }}/' 187 | destination: "/var/tmp/automation_images" 188 | 189 | - only: ['rawhide'] 190 | type: 'shell' 191 | expect_disconnect: true # VM will be rebooted at end of script 192 | inline: 193 | - 'set -e' 194 | - '/bin/bash /var/tmp/automation_images/cache_images/rawhide_setup.sh' 195 | 196 | - except: ['debian'] 197 | type: 'shell' 198 | inline: 199 | - 'set -e' 200 | - '/bin/bash /var/tmp/automation_images/cache_images/fedora_setup.sh' 201 | 202 | - only: ['debian'] 203 | type: 'shell' 204 | inline: 205 | - 'set -e' 206 | - 'env DEBIAN_FRONTEND=noninteractive /bin/bash /var/tmp/automation_images/cache_images/debian_setup.sh' 207 | 208 | post-processors: 209 | # This is critical for human-interaction. Copntents will be used 210 | # to provide the image names and IDs to the user's PR. 211 | - - type: 'manifest' # writes packer-manifest.json 212 | output: 'cache_images/manifest.json' 213 | strip_path: true 214 | custom_data: 215 | IMG_SFX: '{{ user `IMG_SFX` }}' 216 | STAGE: 'cache' 217 | TASK: '{{user `CIRRUS_TASK_ID`}}' 218 | -------------------------------------------------------------------------------- /base_images/cloud.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | variables: # Empty value means it must be passed in on command-line 4 | # GCE Project ID where images will be produced 5 | GCP_PROJECT_ID: "libpod-218412" 6 | # Pre-existing google storage bucket w/ very short lifecycle enabled 7 | XFERBUCKET: "packer-import" 8 | # Required path to service account credentials file 9 | GAC_FILEPATH: "{{env `GAC_FILEPATH`}}" 10 | # Required for presenting output from qemu builders 11 | TTYDEV: 12 | # Required for 'make clean' support and not clobbering a memory-backed /tmp 13 | TEMPDIR: 14 | # Naming suffix for images to prevent clashes 15 | IMG_SFX: 16 | 17 | # Allows providing handy cross-reference to the build log 18 | CIRRUS_TASK_ID: "{{env `CIRRUS_TASK_ID`}}" 19 | 20 | # See Makefile for definitions 21 | FEDORA_RELEASE: "{{env `FEDORA_RELEASE`}}" 22 | FEDORA_IMAGE_URL: "{{env `FEDORA_IMAGE_URL`}}" 23 | FEDORA_CSUM_URL: "{{env `FEDORA_CSUM_URL`}}" 24 | 25 | PRIOR_FEDORA_RELEASE: "{{env `PRIOR_FEDORA_RELEASE`}}" 26 | PRIOR_FEDORA_IMAGE_URL: "{{env `PRIOR_FEDORA_IMAGE_URL`}}" 27 | PRIOR_FEDORA_CSUM_URL: "{{env `PRIOR_FEDORA_CSUM_URL`}}" 28 | 29 | DEBIAN_RELEASE: "{{env `DEBIAN_RELEASE`}}" 30 | DEBIAN_BASE_FAMILY: "{{env `DEBIAN_BASE_FAMILY`}}" 31 | 32 | 33 | builders: 34 | - name: 'debian' 35 | type: 'googlecompute' 36 | # Prefix IMG_SFX with "b" so this is never confused with a cache_image name 37 | image_name: '{{build_name}}-b{{user `IMG_SFX`}}' 38 | image_family: '{{build_name}}-base' 39 | image_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}' 40 | source_image_family: '{{user `DEBIAN_BASE_FAMILY`}}' 41 | source_image_project_id: 'debian-cloud' 42 | project_id: '{{user `GCP_PROJECT_ID`}}' 43 | # Can't use env. var for this, googlecompute-import only supports filepath 44 | account_file: '{{user `GAC_FILEPATH`}}' 45 | zone: 'us-central1-a' 46 | disk_size: 20 47 | # Identify the instance 48 | labels: &imgcpylabels 49 | sfx: '{{user `IMG_SFX`}}' 50 | src: '{{user `DEBIAN_BASE_FAMILY`}}' 51 | stage: 'base' 52 | arch: 'x86_64' 53 | release: 'debian-{{user `DEBIAN_RELEASE` | clean_resource_name}}' 54 | # Gotcha: https://www.packer.io/docs/builders/googlecompute#gotchas 55 | ssh_username: 'packer' 56 | temporary_key_pair_type: ed25519 57 | ssh_clear_authorized_keys: true 58 | 59 | - &qemu_virt 60 | name: 'fedora' 61 | type: 'qemu' 62 | accelerator: "kvm" 63 | qemu_binary: '/usr/libexec/qemu-kvm' # Unique to CentOS, not fedora :( 64 | memory: 12288 65 | iso_url: '{{user `FEDORA_IMAGE_URL`}}' 66 | disk_image: true 67 | format: "raw" 68 | disk_size: 10240 69 | iso_checksum: 'file:{{user `FEDORA_CSUM_URL`}}' 70 | vm_name: "disk.raw" # actually qcow2, name required for post-processing 71 | output_directory: '{{ user `TEMPDIR` }}/{{build_name}}' # /.disk.raw 72 | boot_wait: '5s' 73 | shutdown_command: 'shutdown -h now' 74 | headless: true 75 | # qemu_binary: "/usr/libexec/qemu-kvm" 76 | qemuargs: # List-of-list format required to override packer-generated args 77 | - - "-display" 78 | - "none" 79 | - - "-device" 80 | - "virtio-rng-pci" 81 | - - "-chardev" 82 | - "file,id=pts,path={{user `TTYDEV`}}" 83 | - - "-device" 84 | - "isa-serial,chardev=pts" 85 | - - "-netdev" 86 | - "user,id=net0,hostfwd=tcp::{{ .SSHHostPort }}-:22" 87 | - - "-device" 88 | - "virtio-net,netdev=net0" 89 | cd_label: "cidata" 90 | cd_files: 91 | - '{{user `TEMPDIR`}}/meta-data' 92 | - '{{user `TEMPDIR`}}/user-data' 93 | communicator: 'ssh' 94 | pause_before_connecting: '10s' 95 | ssh_private_key_file: '{{ user `TEMPDIR` }}/cidata.ssh' 96 | ssh_disable_agent_forwarding: true 97 | ssh_username: 'root' 98 | ssh_timeout: '5m' 99 | vnc_bind_address: 0.0.0.0 100 | 101 | - <<: *qemu_virt 102 | name: 'prior-fedora' 103 | iso_url: '{{user `PRIOR_FEDORA_IMAGE_URL`}}' 104 | iso_checksum: 'file:{{user `PRIOR_FEDORA_CSUM_URL`}}' 105 | 106 | # ref: https://www.packer.io/plugins/builders/amazon/ebs 107 | - &fedora-aws 108 | name: 'fedora-aws' 109 | type: 'amazon-ebs' 110 | source_ami_filter: 111 | # Many of these search filter values (like account ID and name) aren't publicized 112 | # anywhere. They were found by examining AWS EC2 AMIs published/referenced from 113 | # the AWS sections on https://fedoraproject.org/cloud/download 114 | owners: 115 | - &fedora_accountid 125523088429 116 | most_recent: true # Required b/c >1 search result likely to be returned 117 | filters: &ami_filters 118 | architecture: 'x86_64' 119 | image-type: 'machine' 120 | is-public: 'true' 121 | name: 'Fedora-Cloud-Base*-{{user `FEDORA_RELEASE`}}-*' 122 | root-device-type: 'ebs' 123 | state: 'available' 124 | virtualization-type: 'hvm' 125 | instance_type: 'm5zn.metal' 126 | # In case of packer problem or ungraceful exit, don't wait for shutdown. 127 | # This doesn't always work properly, sometimes leaving EC2 instances in 128 | # a 'stopped' instead of terminated state :( 129 | shutdown_behavior: 'terminate' 130 | # If something goes wrong, remove the broken AMI. 131 | force_deregister: true # Remove AMI with same name if exists 132 | force_delete_snapshot: true # Also remove snapshots of force-removed AMI 133 | # Required for network access, must be the 'default' group used by Cirrus-CI 134 | security_group_id: "sg-042c75677872ef81c" 135 | # Prefix IMG_SFX with "b" so this is never confused with a cache_image 136 | ami_name: &ami_name '{{build_name}}-b{{user `IMG_SFX`}}' 137 | ami_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}' 138 | ebs_optimized: true 139 | launch_block_device_mappings: 140 | - device_name: '/dev/sda1' 141 | volume_size: 20 142 | volume_type: 'gp2' 143 | delete_on_termination: true 144 | # These are critical and used by security-polciy to enforce instance launch limits. 145 | tags: &awstags 146 | <<: *imgcpylabels 147 | # EC2 expects "Name" to be capitalized 148 | Name: *ami_name 149 | src: '{{.SourceAMI}}' 150 | automation: 'true' 151 | release: 'fedora-{{user `FEDORA_RELEASE`}}' 152 | run_tags: *awstags 153 | run_volume_tags: *awstags 154 | snapshot_tags: *awstags 155 | # This is necessary for security - The CI service accounts are not permitted 156 | # to use AMI's from any other account, including public ones. 157 | ami_users: 158 | - &accountid '449134212816' 159 | ssh_username: 'fedora' 160 | ssh_clear_authorized_keys: true 161 | # N/B: Required Packer >= 1.8.0 162 | # https://github.com/hashicorp/packer/issues/10074#issuecomment-1070469367 163 | temporary_key_pair_type: 'ed25519' 164 | 165 | - <<: *fedora-aws 166 | name: 'fedora-aws-arm64' 167 | source_ami_filter: 168 | owners: 169 | - *fedora_accountid 170 | most_recent: true # Required b/c >1 search result likely to be returned 171 | filters: 172 | <<: *ami_filters 173 | architecture: 'arm64' 174 | instance_type: 't4g.medium' # arm64 type 175 | tags: &awsarm64tags 176 | <<: *awstags 177 | arch: 'arm64' 178 | run_tags: *awsarm64tags 179 | run_volume_tags: *awsarm64tags 180 | snapshot_tags: *awsarm64tags 181 | 182 | 183 | provisioners: # Debian images come bundled with GCE integrations provisioned 184 | - type: 'shell' 185 | inline: 186 | - 'set -e' 187 | - 'mkdir -p /var/tmp/automation_images' 188 | 189 | - type: 'file' 190 | source: '{{ pwd }}/' 191 | destination: '/var/tmp/automation_images/' 192 | 193 | - except: ['debian'] 194 | type: 'shell' 195 | inline: 196 | - 'set -e' 197 | - '/bin/bash /var/tmp/automation_images/base_images/fedora_base-setup.sh' 198 | 199 | - only: ['debian'] 200 | type: 'shell' 201 | inline: 202 | - 'set -e' 203 | - 'env DEBIAN_FRONTEND=noninteractive DEBIAN_RELEASE={{user `DEBIAN_RELEASE`}} /bin/bash /var/tmp/automation_images/base_images/debian_base-setup.sh' 204 | 205 | post-processors: 206 | # Must be double-nested to guarantee execution order 207 | - - only: ['fedora', 'prior-fedora'] 208 | type: "compress" 209 | output: '{{ user `TEMPDIR` }}/{{build_name}}/disk.raw.tar.gz' 210 | format: '.tar.gz' 211 | compression_level: 9 212 | - &gcp_import 213 | only: ['fedora'] 214 | type: "googlecompute-import" 215 | project_id: '{{user `GCP_PROJECT_ID`}}' 216 | account_file: '{{user `GAC_FILEPATH`}}' 217 | bucket: '{{user `XFERBUCKET`}}' 218 | gcs_object_name: '{{build_name}}-{{user `IMG_SFX`}}.tar.gz' 219 | image_name: "fedora-b{{user `IMG_SFX`}}" 220 | image_family: '{{build_name}}-base' 221 | image_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}' 222 | image_labels: 223 | <<: *imgcpylabels 224 | src: 'fedoraproject' 225 | release: 'fedora-{{user `FEDORA_RELEASE`}}' 226 | - <<: *gcp_import 227 | only: ['prior-fedora'] 228 | image_name: "prior-fedora-b{{user `IMG_SFX`}}" 229 | image_family: '{{build_name}}-base' 230 | image_description: '{{user `PRIOR_FEDORA_IMAGE_URL`}}' 231 | image_labels: 232 | <<: *imgcpylabels 233 | src: 'fedoraproject' 234 | release: 'fedora-{{user `PRIOR_FEDORA_RELEASE`}}' 235 | # This is critical, especially for the aws builders. 236 | # Producing the cache-images from these base images 237 | # needs to lookup the runtime-produced AMI ID. 238 | - type: 'manifest' 239 | output: 'base_images/manifest.json' # Collected by Cirrus-CI 240 | strip_path: true 241 | custom_data: 242 | IMG_SFX: '{{ user `IMG_SFX` }}' 243 | STAGE: base 244 | TASK: '{{user `CIRRUS_TASK_ID`}}' 245 | -------------------------------------------------------------------------------- /imgobsolete/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script is set as, and intended to run as the `imgobsolete` container's 4 | # entrypoint. It searches for non-deprecated VM images with missing/invalid 5 | # metadata and those with an "old" `last-used` timestamp. Some number of 6 | # these images are randomly selected and made obsolete. They are also 7 | # marked with deletion-metadata some time in the future. 8 | 9 | set -eo pipefail 10 | 11 | # shellcheck source=imgts/lib_entrypoint.sh 12 | source /usr/local/bin/lib_entrypoint.sh 13 | 14 | req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX 15 | 16 | gcloud_init 17 | 18 | # Set this to 1 for testing 19 | DRY_RUN="${DRY_RUN:-0}" 20 | OBSOLETE_LIMIT=50 21 | THEFUTURE=$(date --date='+1 hour' +%s) 22 | TOO_OLD_DAYS='30' 23 | TOO_OLD_DESC="$TOO_OLD_DAYS days ago" 24 | THRESHOLD=$(date --date="$TOO_OLD_DESC" +%s) 25 | # Format Ref: https://cloud.google.com/sdk/gcloud/reference/topic/formats 26 | FORMAT='value[quote](name,selfLink,creationTimestamp,status,labels)' 27 | # Required variable set by caller 28 | # shellcheck disable=SC2154 29 | PROJRE="/v1/projects/$GCPPROJECT/global/" 30 | # Filter Ref: https://cloud.google.com/sdk/gcloud/reference/topic/filters 31 | # shellcheck disable=SC2154 32 | FILTER="selfLink~$PROJRE AND creationTimestamp<$(date --date="$TOO_OLD_DESC" --iso-8601=date)" 33 | TOOBSOLETE=$(mktemp -p '' toobsolete.XXXXXX) 34 | 35 | msg "${RED}Searching GCP images for candidates using filter: $FILTER" 36 | # Ref: https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#deprecating_an_image 37 | $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \ 38 | while read name selfLink creationTimestamp status labels 39 | do 40 | count_image 41 | reason="" 42 | created_ymd=$(date --date=$creationTimestamp --iso-8601=date) 43 | permanent=$(grep -E --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true) 44 | last_used=$(grep -E --only-matching --max-count=1 'last-used=[[:digit:]]+' <<< $labels || true) 45 | 46 | LABELSFX="labels: '$labels'" 47 | 48 | # Any image marked with a `permanent=true` label should be retained forever. 49 | # Typically this will be due to it's use by CI in a release-branch. The images 50 | # `repo-ref` and `build-id` labels should provide clues as to where it's 51 | # required (may be multiple repos.) - for any future auditing purposes. 52 | if [[ -n "$permanent" ]]; then 53 | msg "Retaining forever $name | $labels" 54 | continue 55 | fi 56 | 57 | # Any image matching the currently in-use IMG_SFX must always be preserved 58 | # Value is defined in cirrus.yml 59 | # shellcheck disable=SC2154 60 | if [[ "$name" =~ $IMG_SFX ]]; then 61 | msg "Retaining current (latest) image $name | $labels" 62 | continue 63 | fi 64 | 65 | # No label was set 66 | if [[ -z "$last_used" ]] 67 | then # image lacks any tracking labels 68 | reason="Missing 'last-used' metadata; $LABELSFX" 69 | echo "GCP $name $reason" >> $TOOBSOLETE 70 | continue 71 | fi 72 | 73 | last_used_timestamp=$(date --date=@$(cut -d= -f2 <<< $last_used || true) +%s || true) 74 | last_used_ymd=$(date --date=@$last_used_timestamp --iso-8601=date) 75 | # Validate label contents 76 | if [[ -z "$last_used_timestamp" ]] || \ 77 | [[ "$last_used_timestamp" -ge "$THEFUTURE" ]] 78 | then 79 | reason="Missing/invalid last-used timestamp: '$last_used_timestamp'; $LABELSFX" 80 | echo "GCP $name $reason" >> $TOOBSOLETE 81 | continue 82 | fi 83 | 84 | # Image is actually too old 85 | if [[ "$last_used_timestamp" -le $THRESHOLD ]] 86 | then 87 | reason="Used over $TOO_OLD_DESC on $last_used_ymd; $LABELSFX" 88 | echo "GCP $name $reason" >> $TOOBSOLETE 89 | continue 90 | fi 91 | 92 | msg "Retaining $name | $created_ymd | $status | $labels" 93 | done 94 | 95 | 96 | msg "${RED}Searching for obsolete EC2 images unused as of: ${NOR}$TOO_OLD_DESC" 97 | aws_init 98 | 99 | # The AWS cli returns a huge blob of data we mostly don't need. 100 | # Use query statement to simplify the results. N/B: The get_tag_value() 101 | # function expects to find a "TAGS" item w/ list value. 102 | ami_query='Images[*].{ID:ImageId,CREATED:CreationDate,STATE:State,TAGS:Tags,DEP:DeprecationTime}' 103 | all_amis=$($AWS ec2 describe-images --owners self --query "$ami_query") 104 | nr_amis=$(jq -r -e length<<<"$all_amis") 105 | 106 | # For whatever reason, the last time the image was used is not 107 | # provided in 'aws ec2 describe-images...' result, a separate 108 | # command must be used. For images without any `lastLaunchedTime` 109 | # (lower-case l) attribute, the simple --query will return an 110 | # empty-value and zero-exit instead of an absent key. 111 | # N/B: The result data uses `LastLaunchedTime` (upper-case L) because 112 | # AWS loves to keep us on our toes. 113 | lltcmd=(\ 114 | aws ec2 describe-image-attribute --attribute lastLaunchedTime 115 | --query "LastLaunchedTime" --image-id 116 | ) 117 | 118 | req_env_vars all_amis nr_amis 119 | for (( i=nr_amis ; i ; i-- )); do 120 | unset ami ami_id state created created_ymd name name_tag dep 121 | ami=$(jq -r -e ".[$((i-1))]"<<<"$all_amis") 122 | ami_id=$(jq -r -e ".ID"<<<"$ami") 123 | state=$(jq -r -e ".STATE"<<<"$ami") 124 | created=$(jq -r -e ".CREATED"<<<"$ami") 125 | created_ymd=$(date --date="$created" --iso-8601=date) 126 | dep=$(jq -r -e ".DEP"<<<"$ami") 127 | 128 | unset tags 129 | # The name-tag is easier on human eys if one is set. 130 | name="$ami_id" 131 | if name_tag=$(get_tag_value "Name" "$ami"); then 132 | name="$name_tag" 133 | tags="Name=$name_tag" 134 | fi 135 | 136 | for tag in permanent build-id repo-ref automation; do 137 | if [[ -z "$tags" ]]; then 138 | tags="$tag=" 139 | else 140 | tags+=",$tag=" 141 | fi 142 | 143 | unset tagval 144 | if tagval=$(get_tag_value "$tag" "$ami"); then 145 | tags+="$tagval" 146 | fi 147 | done 148 | 149 | unset automation permanent reason 150 | automation=$(grep -E --only-matching --max-count=1 \ 151 | --ignore-case 'automation=true' <<< $tags || true) 152 | permanent=$(grep -E --only-matching --max-count=1 \ 153 | --ignore-case 'permanent=true' <<< $tags || true) 154 | 155 | if [[ -n "$permanent" ]]; then 156 | msg "Retaining forever $name | $tags" 157 | # Permanent AMIs should never ever have a deprecation date set 158 | $AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null 159 | continue 160 | fi 161 | 162 | # Any image matching the currently in-use IMG_SFX 163 | # must always be preserved. Values are defined in cirrus.yml 164 | # shellcheck disable=SC2154 165 | if [[ "$name" =~ $IMG_SFX ]]; then 166 | msg "Retaining current (latest) image $name | $tags" 167 | continue 168 | fi 169 | 170 | # For IAM (security) policy, an "automation" tag is always required 171 | if [[ -z "$automation" ]] 172 | then 173 | reason="Missing 'automation' metadata; Tags: $tags" 174 | echo "EC2 $ami_id $reason" >> $TOOBSOLETE 175 | continue 176 | fi 177 | 178 | unset lltvalue last_used_timestamp last_used_ymd 179 | if lltvalue=$("${lltcmd[@]}" $ami_id | jq -r -e ".Value") && [[ -n "$lltvalue" ]]; then 180 | last_used_timestamp=$(date --date="$lltvalue" +%s) 181 | last_used_ymd=$(date --date="@$last_used_timestamp" --iso-8601=date) 182 | tags+=",lastLaunchedTime=$last_used_ymd" 183 | else 184 | reason="Missing 'lastLaunchedTime' metadata; Tags: $tags" 185 | echo "EC2 $ami_id $reason" >> $TOOBSOLETE 186 | continue 187 | fi 188 | 189 | if [[ "$last_used_timestamp" -le $THRESHOLD ]]; then 190 | reason="Used over $TOO_OLD_DESC on $last_used_ymd; Tags: $tags" 191 | echo "EC2 $ami_id $reason" >> $TOOBSOLETE 192 | continue 193 | else 194 | msg "Retaining $ami_id | $created_ymd | $state | $tags" 195 | if [[ "$dep" != "null" ]]; then 196 | msg " Removing previously set AMI deprecation timestamp: $dep" 197 | # Ignore confirmation output. 198 | $AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null 199 | fi 200 | fi 201 | done 202 | 203 | COUNT=$(<"$IMGCOUNT") 204 | CANDIDATES=$(wc -l <$TOOBSOLETE) 205 | msg "########################################################################" 206 | msg "Obsoleting $OBSOLETE_LIMIT random image candidates ($CANDIDATES/$COUNT total):" 207 | 208 | # Require a minimum number of images to exist. Also if there is some 209 | # horrible scripting accident, this limits the blast-radius. 210 | if [[ "$CANDIDATES" -lt $OBSOLETE_LIMIT ]] 211 | then 212 | die 0 "Safety-net Insufficient images ($CANDIDATES) to process ($OBSOLETE_LIMIT required)" 213 | fi 214 | 215 | # Don't let one bad apple ruin the whole bunch 216 | ERRORS=0 217 | sort --random-sort $TOOBSOLETE | tail -$OBSOLETE_LIMIT | \ 218 | while read -r cloud image_name reason; do 219 | 220 | msg "Obsoleting $cloud image $image_name:${NOR} $reason" 221 | if ((DRY_RUN)); then 222 | msg " Dry-run: No changes made" 223 | elif [[ "$cloud" == "GCP" ]]; then 224 | # Ref: https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#deprecating_an_image 225 | # Note: --delete-in creates deprecated.delete(from imgobsolete container) 226 | # The imgprune container is required to actually delete the image. 227 | $GCLOUD compute images deprecate $image_name \ 228 | --state=OBSOLETE --delete-in="${TOO_OLD_DAYS}d" \ 229 | || ERRORS=$((ERRORS+1)) 230 | elif [[ "$cloud" == "EC2" ]]; then 231 | # Note: Image will be automatically deleted in 30 days unless manual 232 | # intervention performed. The imgprune container is NOT used for AWS 233 | # image pruning! 234 | if ! status=$($AWS ec2 enable-image-deprecation --image-id "$image_name" \ 235 | --deprecate-at $(date --utc --date "+$TOO_OLD_DAYS days" \ 236 | --iso-8601=date)); then 237 | ERRORS=$((ERRORS+1)) 238 | elif [[ $(jq -r -e ".Return"<<<"$status") != "true" ]]; then 239 | ERRORS=$((ERRORS+1)) 240 | fi 241 | else 242 | die 1 "Unknown/Unsupported cloud '$cloud' record encountered in \$TOOBSOLETE file" 243 | fi 244 | done 245 | 246 | if ((ERRORS)); then 247 | die 1 "Updating $ERRORS images failed (see above)." 248 | fi 249 | -------------------------------------------------------------------------------- /get_ci_vm/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is only intended to be executed by Cirrus-CI, in 4 | # a container, in order to test the functionality of the freshly 5 | # built get_ci_vm container. Any other usage is unlikely to 6 | # function properly. 7 | # 8 | # Example podman command for local testing, using a locally-built 9 | # container image, from top-level repo. directory: 10 | # 11 | # podman run -it --rm -e TESTING_ENTRYPOINT=true -e AI_PATH=$PWD \ 12 | # -e CIRRUS_WORKING_DIR=$PWD -v $PWD:$PWD:O -w $PWD \ 13 | # --entrypoint=get_ci_vm/test.sh get_ci_vm:latest 14 | 15 | set -eo pipefail 16 | 17 | SCRIPT_FILEPATH=$(realpath "$0") 18 | SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH") 19 | REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../") 20 | 21 | # shellcheck source=./lib.sh 22 | source "$REPO_DIRPATH/lib.sh" 23 | 24 | # Set this non-zero to print test-debug info. 25 | TEST_DEBUG=0 26 | FAILURE_COUNT=0 27 | 28 | exit_with_status() { 29 | if ((FAILURE_COUNT)); then 30 | echo "Total Failures: $FAILURE_COUNT" 31 | else 32 | echo "All tests passed" 33 | fi 34 | set -e # Force exit with exit code 35 | test "$FAILURE_COUNT" -eq 0 36 | } 37 | 38 | # Used internally by test_cmd to assist debugging and output file cleanup 39 | _test_report() { 40 | local msg="$1" 41 | local inc_fail="$2" 42 | local outf="$3" 43 | 44 | if ((inc_fail)); then 45 | let 'FAILURE_COUNT++' 46 | echo -n "fail - " 47 | else 48 | echo -n "pass - " 49 | fi 50 | 51 | echo -n "$msg" 52 | 53 | if [[ -r "$outf" ]]; then 54 | # Ignore output when successful 55 | if ((inc_fail)) || ((TEST_DEBUG)); then 56 | echo " (output follows)" 57 | cat "$outf" 58 | fi 59 | rm -f "$outf" "$outf.oneline" 60 | fi 61 | echo -e '\n' # Makes output easier to read 62 | } 63 | 64 | # Execute an entrypoint.sh function in isolation after calling 65 | # Capture output and verify exit code and stdout/stderr contents. 66 | # usage: testf [args...] 67 | # Notes: Exit code not checked if blank. Expected output will be verified blank 68 | # if regex is empty. Otherwise, regex checks whitespace-squashed output. 69 | testf() { 70 | echo "Testing: ${1:-WARNING: No Test description given}" 71 | local harness=$2 72 | local e_exit="$3" 73 | local e_out_re="$4" 74 | shift 4 75 | 76 | if ((TEST_DEBUG)); then 77 | # shellcheck disable=SC2145 78 | echo "# $@" > /dev/stderr 79 | fi 80 | 81 | # Using grep -E vs file safer than shell builtin test 82 | local a_out_f 83 | local a_exit=0 84 | a_out_f=$(mktemp -p '' "tmp_${FUNCNAME[0]}_XXXXXXXX") 85 | 86 | # Use a sub-shell to isolate tests from eachother 87 | set -o pipefail 88 | # Note: since \$_TMPDIR is defined/set in subshell, this is going to 89 | # leak them like crazy. Ignore this since tests should only be running 90 | # inside a container anyway. 91 | ( 92 | set -eo pipefail 93 | # shellcheck source=get_ci_vm/entrypoint.sh disable=SC2154 94 | source $AI_PATH/get_ci_vm/entrypoint.sh 95 | status() { /bin/true; } # not normally useful for testing 96 | if [[ -n "$harness" ]]; then "$harness"; fi 97 | "$@" 0<&- |& tee "$a_out_f" | tr -s '[:space:]' ' ' > "${a_out_f}.oneline" 98 | ) 99 | a_exit="$?" 100 | if ((TEST_DEBUG)); then 101 | echo "Command/Function call exited with code: $a_exit" 102 | fi 103 | 104 | if [[ -n "$e_exit" ]] && [[ $e_exit -ne $a_exit ]]; then 105 | _test_report "Expected exit-code $e_exit but received $a_exit while executing $1" "1" "$a_out_f" 106 | elif [[ -z "$e_out_re" ]] && [[ -n "$(<$a_out_f)" ]]; then 107 | _test_report "Expecting no output from $*" "1" "$a_out_f" 108 | elif [[ -n "$e_out_re" ]]; then 109 | if ((TEST_DEBUG)); then 110 | echo "Received $(wc -l $a_out_f | awk '{print $1}') output lines of $(wc -c $a_out_f | awk '{print $1}') bytes total" 111 | fi 112 | if grep -E -q "$e_out_re" "${a_out_f}.oneline"; then 113 | _test_report "Command $1 exited as expected with expected output" "0" "$a_out_f" 114 | else 115 | _test_report "Expecting regex '$e_out_re' match to (whitespace-squashed) output" "1" "$a_out_f" 116 | fi 117 | else # Pass 118 | _test_report "Command $1 exited as expected ($a_exit)" "0" "$a_out_f" 119 | fi 120 | } 121 | 122 | 123 | ### MAIN 124 | 125 | # Check some basic items first and mimic 'testf' output 126 | PASS_MSG=$'pass - Command exited as expected (0)\n' 127 | 128 | msg "Testing: Verify \$CIRRUS_WORKING_DIR is non-empty" 129 | req_env_vars CIRRUS_WORKING_DIR 130 | msg "$PASS_MSG" 131 | 132 | msg "Testing: Verify \$TESTING_ENTRYPOINT is non-empty" 133 | req_env_vars TESTING_ENTRYPOINT 134 | msg "$PASS_MSG" 135 | 136 | msg "Testing: Verify \$AI_PATH is non-empty" 137 | req_env_vars AI_PATH 138 | msg "$PASS_MSG" 139 | 140 | set +e 141 | 142 | # usage: test_cmd [args...] 143 | testf "Verify \$AI_PATH/get_ci_vm/entrypoint.sh loads w/o status output" \ 144 | "" 0 "" \ 145 | status 146 | 147 | name_root() { NAME="root"; SRCDIR="/tmp"; } 148 | testf "Verify init() fails when \$NAME is root" \ 149 | name_root 1 "Running as root not supported" \ 150 | init 151 | 152 | # CIRRUS_WORKING_DIR verified non-empty 153 | # shellcheck disable=SC2154 154 | BAD_TEST_REPO="$CIRRUS_WORKING_DIR/get_ci_vm/bad_repo_test" 155 | bad_repo() { NAME="foobar"; SRCDIR="$BAD_TEST_REPO"; } 156 | testf "Verify init() w/ old/unsupported repo." \ 157 | bad_repo 1 "not compatible" \ 158 | init 159 | 160 | GOOD_TEST_REPO="$CIRRUS_WORKING_DIR/get_ci_vm/good_repo_test" 161 | good_repo() { NAME="foobar"; SRCDIR="$GOOD_TEST_REPO"; } 162 | testf "Verify init() w/ apiv1 compatible repo." \ 163 | good_repo 0 "" \ 164 | init 165 | 166 | GOOD_TEST_REPO_V2="$CIRRUS_WORKING_DIR/get_ci_vm/good_repo_test_v2" 167 | good_repo_v2() { NAME="snafu"; SRCDIR="$GOOD_TEST_REPO_V2"; } 168 | testf "Verify init() w/ apiv2 compatible repo." \ 169 | good_repo_v2 0 "" \ 170 | init 171 | 172 | good_init() { 173 | NAME="foobar" 174 | SRCDIR="$GOOD_TEST_REPO" 175 | CIRRUS_TASK="--list" 176 | init 177 | } 178 | testf "Verify get_inst_image() returns expected google task name" \ 179 | good_init 0 "google_test" \ 180 | get_inst_image 181 | 182 | testf "Verify get_inst_image() returns expected aws task name" \ 183 | good_init 0 "aws_test" \ 184 | get_inst_image 185 | 186 | testf "Verify get_inst_image() returns expected container task name" \ 187 | good_init 0 "container_test" \ 188 | get_inst_image 189 | 190 | mock_uninit_gcloud() { 191 | # Don't preserve arguments to make checking easier 192 | # shellcheck disable=SC2145 193 | echo "gcloud $@" 194 | cat $GOOD_TEST_REPO/uninit_gcloud.output 195 | return 0 196 | } 197 | 198 | mock_uninit_gcevm() { 199 | NAME="foobar" 200 | SRCDIR="$GOOD_TEST_REPO" 201 | CIRRUS_TASK="google_test" 202 | GCLOUD="mock_uninit_gcloud" 203 | READING_DELAY="0.1s" 204 | init 205 | get_inst_image 206 | } 207 | 208 | UTC_LOCAL_TEST="-0500" 209 | testf "Verify mock 'gcevm' w/o creds attempts to initialize" \ 210 | mock_uninit_gcevm 1 \ 211 | "WARNING:.+valid GCP credentials.+gcloud.+init.+Mock Google.+ERROR: Unable.+credentials" \ 212 | init_gcevm 213 | 214 | mock_gcloud() { 215 | # Don't preserve arguments to make checking easier 216 | # shellcheck disable=SC2145 217 | echo "gcloud $@" 218 | return 0 219 | } 220 | 221 | mock_init_gcevm() { 222 | NAME="foobar" 223 | SRCDIR="$GOOD_TEST_REPO" 224 | CIRRUS_TASK="google_test" 225 | GCLOUD="mock_gcloud" 226 | READING_DELAY="0.1s" 227 | init 228 | get_inst_image 229 | } 230 | 231 | 232 | UTC_LOCAL_TEST="-0000" 233 | testf "Verify mock 'gcevm' w/ UTC TZ initializes with delay and warning" \ 234 | mock_init_gcevm 0 'WARNING:.+override \$GCLOUD_ZONE to' \ 235 | init_gcevm 236 | 237 | UTC_LOCAL_TEST="-0500" 238 | testf "Verify mock 'gcevm' w/ central TZ initializes as expected" \ 239 | mock_init_gcevm 0 "Winning lottery-number checksum: 0" \ 240 | init_gcevm 241 | 242 | mock_gcevm_workflow() { 243 | init_gcevm 244 | create_vm 245 | make_ci_env_script 246 | make_setup_tarball 247 | setup_vm 248 | } 249 | # Don't confuse the actual repo. by nesting another repo inside 250 | tar -xzf "$GOOD_TEST_REPO/dot_git.tar.gz" -C "$GOOD_TEST_REPO" .git 251 | # Ignore ownership security checks 252 | git config --system --add safe.directory $GOOD_TEST_REPO 253 | # Setup should tarball new files in the repo. 254 | echo "testing" > "$GOOD_TEST_REPO/uncommited_file" 255 | # Setup should tarball changed files in the repo. 256 | echo -e "\n\ntest file changes\n\n" >> "$GOOD_TEST_REPO/README.md" 257 | # Setup should ignore a removed file 258 | git rm -f "$GOOD_TEST_REPO/uninit_gcloud.output" 259 | # The goal is to match key elements and sequences in the mock output, 260 | # without overly burdening future development. 261 | workflow_regex="\ 262 | .*gcloud.+--configuration=automation_images\ 263 | .*--image-project=automation_images\ 264 | .*--image=test-image-name\ 265 | .*foobar-test-image-name\ 266 | .*Cloning into\ 267 | .*README.md\ 268 | .*Ignoring uncommited removed.+uninit_gcloud.output\ 269 | .*uncommited_file\ 270 | .*Switched to a new branch\ 271 | .*gcloud.+compute scp.+root@foobar-test-image-name:/tmp/\ 272 | .*gcloud.+compute ssh.+tar.+setup.tar.gz\ 273 | .*gcloud.+compute ssh.+chmod.+ci_env.sh\ 274 | .*gcloud.+compute ssh.+/root/ci_env.sh.+get_ci_vm.sh --setup" 275 | 276 | testf "Verify mock 'gcevm' flavor main() workflow produces expected output" \ 277 | mock_init_gcevm \ 278 | 0 "$workflow_regex" \ 279 | mock_gcevm_workflow 280 | 281 | # prevent repo. in repo. problems + stray test files 282 | rm -rf "$GOOD_TEST_REPO/.git" "$GOOD_TEST_REPO/uncommited_file" 283 | 284 | mock_uninit_aws() { 285 | # Don't preserve arguments to make checking easier 286 | # shellcheck disable=SC2145 287 | echo "aws $@" 288 | cat $GOOD_TEST_REPO_V2/uninit_aws.output 289 | return 1 290 | } 291 | 292 | mock_uninit_ec2vm() { 293 | NAME="mctestface" 294 | SRCDIR="$GOOD_TEST_REPO_V2" 295 | CIRRUS_TASK="aws_test" 296 | AWSCLI="mock_uninit_aws" 297 | init 298 | get_inst_image 299 | } 300 | 301 | testf "Verify mock 'ec2vm' w/o creds attempts to initialize" \ 302 | mock_uninit_ec2vm 1 \ 303 | "WARNING: AWS.+ssh.+initialize" \ 304 | init_ec2vm 305 | 306 | mock_init_aws() { 307 | # Only care if string is present 308 | # shellcheck disable=SC2199 309 | if [[ "$@" =~ describe-images ]]; then 310 | cat $GOOD_TEST_REPO_V2/ami_search.json 311 | else 312 | # Don't preserve arguments to make checking easier 313 | # shellcheck disable=SC2145 314 | echo "aws $@" 315 | fi 316 | } 317 | 318 | mock_init_ec2vm() { 319 | NAME="mctestface" 320 | SRCDIR="$GOOD_TEST_REPO_V2" 321 | CIRRUS_TASK="aws_test" 322 | AWSCLI="mock_init_aws" 323 | EC2_SSH_KEY="$GOOD_TEST_REPO_V2/mock_ec2_key" 324 | SSH_CMD=true 325 | init 326 | get_inst_image 327 | } 328 | 329 | testf "Verify mock initialized 'ec2vm' is satisfied with test setup" \ 330 | mock_init_ec2vm 0 "" \ 331 | init_ec2vm 332 | 333 | print_select_ec2_inst_image() { 334 | export A_DEBUG=1 335 | select_ec2_inst_image 336 | echo "$INST_IMAGE" 337 | } 338 | 339 | testf "Verify AMI selection by name tag with from fake describe-images data" \ 340 | mock_init_ec2vm 0 "ami-newest" \ 341 | print_select_ec2_inst_image 342 | 343 | # TODO: Add more EC2 tests 344 | 345 | # Must be called last 346 | exit_with_status 347 | --------------------------------------------------------------------------------