├── .github ├── scripts │ ├── download-gcc-bpf.sh │ └── tmpfsify-workspace.sh └── workflows │ ├── gcc-bpf.yml │ ├── kernel-build-test.yml │ ├── kernel-build.yml │ ├── kernel-test.yml │ ├── lint.yaml │ ├── scripts_tests.yml │ ├── test-prepare-incremental-build-action.yml │ └── test.yml ├── .gitignore ├── INDEX ├── LICENSE ├── LICENSE.BSD-2-Clause ├── LICENSE.LGPL-2.1 ├── README.md ├── ansible ├── README.md ├── inventory_example.yml ├── playbook.yml └── roles │ ├── base │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── setup-Debian.yml │ │ └── setup-RedHat.yml │ └── vars │ │ ├── Debian.yml │ │ └── RedHat.yml │ ├── qemu-user-static │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml │ └── runner │ ├── README.md │ ├── defaults │ └── main.yml │ ├── files │ ├── actions-runner-watchdog.sh │ ├── app_token.sh │ └── gh_token_generator.sh │ ├── handlers │ └── main.yml │ └── tasks │ └── main.yml ├── build-bpf-gcc ├── README.md ├── action.yml ├── build-and-install.sh └── latest-snapshots.sh ├── build-linux ├── action.yml └── build.sh ├── build-samples ├── action.yml └── build_samples.sh ├── build-scx-selftests ├── README.md ├── action.yml └── build.sh ├── build-selftests ├── action.yml └── build_selftests.sh ├── ci ├── diffs │ └── .keep └── vmtest │ └── configs │ ├── DENYLIST │ ├── DENYLIST.aarch64 │ ├── DENYLIST.rc │ ├── DENYLIST.s390x │ ├── DENYLIST.test_progs-bpf_gcc │ ├── DENYLIST.x86_64 │ ├── run-vmtest.env │ ├── run_veristat.kernel.cfg │ ├── run_veristat.meta.cfg │ └── veristat_meta.cfg ├── download-vmlinux ├── action.yml └── run.sh ├── get-linux-source ├── action.yml └── checkout_latest_kernel.sh ├── get-llvm-version └── action.yml ├── helpers.sh ├── patch-kernel ├── action.yml └── patch_kernel.sh ├── prepare-incremental-build ├── README.md ├── action.yml ├── get-commit-metadata.sh └── prepare-incremental-builds.sh ├── prepare-rootfs ├── action.yml ├── run.sh └── run_vmtest.sh ├── rootfs ├── Makefile ├── mkrootfs_debian.sh ├── mkrootfs_tweak.sh └── s390x-self-hosted-builder │ ├── README.md │ ├── actions-runner-libbpf.Dockerfile │ ├── actions-runner-libbpf.service │ ├── fs │ └── usr │ │ └── bin │ │ ├── actions-runner │ │ └── entrypoint │ └── qemu-user-static.service ├── run-qemu ├── action.yml └── run.sh ├── run-vmtest ├── README.md ├── action.yml ├── helpers.sh ├── normalize-paths-for-github.sh ├── normalize_bpf_test_names.py ├── prepare-bpf-selftests.sh ├── print_test_summary.py ├── run-bpf-selftests.sh ├── run-scx-selftests.sh ├── run.sh ├── tests │ ├── normalize_bpf_test_names │ │ ├── expected-output.txt │ │ ├── input.txt │ │ └── run-test.sh │ ├── print_test_summary │ │ ├── expected-output.txt │ │ ├── expected-summary.txt │ │ ├── run-test.sh │ │ └── test_progs.json │ └── run-all-tests.sh └── vmtest-init.sh ├── setup-build-env ├── action.yml ├── build_pahole.sh ├── install_clang.sh ├── install_cross_compilation_toolchain.sh └── install_packages.sh └── tar-artifacts ├── README.md ├── action.yml └── tar-artifacts.sh /.github/scripts/download-gcc-bpf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | GCC_BPF_RELEASE_GH_REPO=$1 6 | INSTALL_DIR=$(realpath $2) 7 | 8 | cd /tmp 9 | 10 | tag=$(gh release list -L 1 -R ${GCC_BPF_RELEASE_GH_REPO} --json tagName -q .[].tagName) 11 | if [[ -z "$tag" ]]; then 12 | echo "Could not find latest GCC BPF release at ${GCC_BPF_RELEASE_GH_REPO}" 13 | exit 1 14 | fi 15 | 16 | url="https://github.com/${GCC_BPF_RELEASE_GH_REPO}/releases/download/${tag}/${tag}.tar.zst" 17 | echo "Downloading $url" 18 | wget -q "$url" 19 | 20 | tarball=${tag}.tar.zst 21 | dir=$(tar tf $tarball | head -1 || true) 22 | 23 | echo "Extracting $tarball ..." 24 | tar -I zstd -xf $tarball && rm -f $tarball 25 | 26 | rm -rf $INSTALL_DIR 27 | mv -v $dir $INSTALL_DIR 28 | 29 | cd - 30 | 31 | -------------------------------------------------------------------------------- /.github/scripts/tmpfsify-workspace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x -euo pipefail 4 | 5 | TMPFS_SIZE=20 # GB 6 | MEM_TOTAL=$(awk '/MemTotal/ {print int($2/1024)}' /proc/meminfo) 7 | 8 | # sanity check: total mem is at least double TMPFS_SIZE 9 | if [ $MEM_TOTAL -lt $(($TMPFS_SIZE*1024*2)) ]; then 10 | echo "tmpfsify-workspace.sh: will not allocate tmpfs, total memory is too low (${MEM_TOTAL}MB)" 11 | exit 0 12 | fi 13 | 14 | dir="$(basename "$GITHUB_WORKSPACE")" 15 | cd "$(dirname "$GITHUB_WORKSPACE")" 16 | mv "${dir}" "${dir}.backup" 17 | mkdir "${dir}" 18 | sudo mount -t tmpfs -o size=${TMPFS_SIZE}G tmpfs "${dir}" 19 | rsync -a "${dir}.backup/" "${dir}" 20 | cd - 21 | 22 | -------------------------------------------------------------------------------- /.github/workflows/gcc-bpf.yml: -------------------------------------------------------------------------------- 1 | name: Testing GCC BPF compiler 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | runs_on: 7 | required: true 8 | type: string 9 | arch: 10 | required: true 11 | type: string 12 | gcc_version: 13 | required: true 14 | type: string 15 | llvm_version: 16 | required: true 17 | type: string 18 | toolchain: 19 | required: true 20 | type: string 21 | toolchain_full: 22 | required: true 23 | type: string 24 | download_sources: 25 | required: true 26 | type: boolean 27 | 28 | jobs: 29 | test: 30 | name: GCC BPF 31 | runs-on: >- 32 | ${{ 33 | contains(fromJSON(inputs.runs_on), 'codebuild') 34 | && format('codebuild-bpf-ci-{0}-{1}', github.run_id, github.run_attempt) 35 | || fromJSON(inputs.runs_on) 36 | }} 37 | env: 38 | ARCH: ${{ inputs.arch }} 39 | BPF_NEXT_BASE_BRANCH: 'master' 40 | GCC_BPF_INSTALL_DIR: ${{ github.workspace }}/gcc-bpf 41 | GCC_BPF_RELEASE_REPO: 'theihor/gcc-bpf' 42 | KBUILD_OUTPUT: ${{ github.workspace }}/src/kbuild-output 43 | REPO_ROOT: ${{ github.workspace }}/src 44 | 45 | steps: 46 | 47 | - uses: actions/checkout@v4 48 | 49 | - if: ${{ inputs.download_sources }} 50 | name: Download bpf-next tree 51 | uses: ./get-linux-source 52 | with: 53 | dest: ${{ env.REPO_ROOT }} 54 | rev: ${{ env.BPF_NEXT_BASE_BRANCH }} 55 | 56 | - if: ${{ ! inputs.download_sources }} 57 | name: Checkout ${{ github.repository }} to ./src 58 | uses: actions/checkout@v4 59 | with: 60 | path: 'src' 61 | 62 | - uses: ./patch-kernel 63 | with: 64 | patches-root: '${{ github.workspace }}/ci/diffs' 65 | repo-root: ${{ env.REPO_ROOT }} 66 | 67 | - uses: actions/download-artifact@v4 68 | with: 69 | name: vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }} 70 | path: ${{ env.REPO_ROOT }} 71 | 72 | - name: Untar artifacts 73 | working-directory: ${{ env.REPO_ROOT }} 74 | run: zstd -d -T0 vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }}.tar.zst --stdout | tar -xf - 75 | 76 | - name: Setup build environment 77 | uses: ./setup-build-env 78 | with: 79 | arch: ${{ inputs.arch }} 80 | gcc-version: ${{ inputs.gcc_version }} 81 | llvm-version: ${{ inputs.llvm_version }} 82 | 83 | - name: Download GCC BPF compiler 84 | shell: bash 85 | env: 86 | GH_TOKEN: ${{ github.token }} 87 | run: .github/scripts/download-gcc-bpf.sh ${{ env.GCC_BPF_RELEASE_REPO }} ${{ env.GCC_BPF_INSTALL_DIR }} 88 | 89 | - name: Build selftests/bpf/test_progs-bpf_gcc 90 | uses: ./build-selftests 91 | env: 92 | BPF_GCC: ${{ env.GCC_BPF_INSTALL_DIR }} 93 | MAX_MAKE_JOBS: 32 94 | SELFTESTS_BPF_TARGETS: 'test_progs-bpf_gcc' 95 | with: 96 | arch: ${{ inputs.arch }} 97 | kernel-root: ${{ env.REPO_ROOT }} 98 | llvm-version: ${{ inputs.llvm_version }} 99 | toolchain: ${{ inputs.toolchain }} 100 | -------------------------------------------------------------------------------- /.github/workflows/kernel-build-test.yml: -------------------------------------------------------------------------------- 1 | name: Reusable Build/Test/Veristat workflow 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | arch: 7 | required: true 8 | type: string 9 | description: The architecture to build against, e.g x86_64, aarch64, s390x... 10 | toolchain_full: 11 | required: true 12 | type: string 13 | description: The toolchain and for llvm, its version, e.g gcc, llvm-15 14 | toolchain: 15 | required: true 16 | type: string 17 | description: The toolchain, e.g gcc, llvm 18 | runs_on: 19 | required: true 20 | type: string 21 | description: The runners to run the test on. This is a json string representing an array of labels. 22 | build_runs_on: 23 | required: true 24 | type: string 25 | description: The runners to run the builds on. This is a json string representing an array of labels. 26 | gcc_version: 27 | required: true 28 | type: string 29 | description: GCC version to install 30 | llvm_version: 31 | required: true 32 | type: string 33 | description: LLVM version to install 34 | kernel: 35 | required: true 36 | type: string 37 | description: The kernel to run the test against. For KPD this is always LATEST, which runs against a newly built kernel. 38 | tests: 39 | required: true 40 | type: string 41 | description: A serialized json array with the tests to be running, it must follow the json-matrix format, https://www.jitsejan.com/use-github-actions-with-json-file-as-matrix 42 | run_tests: 43 | required: true 44 | type: boolean 45 | description: Whether or not to run the test job. 46 | download_sources: 47 | required: true 48 | type: boolean 49 | description: Whether to download the linux sources into the working directory. 50 | default: false 51 | build_release: 52 | required: true 53 | type: boolean 54 | description: Build selftests with -O2 optimization in addition to non-optimized build. 55 | default: false 56 | 57 | jobs: 58 | 59 | # Build kernel and selftest 60 | build: 61 | uses: ./.github/workflows/kernel-build.yml 62 | with: 63 | arch: ${{ inputs.arch }} 64 | toolchain_full: ${{ inputs.toolchain_full }} 65 | toolchain: ${{ inputs.toolchain }} 66 | runs_on: ${{ inputs.build_runs_on }} 67 | gcc_version: ${{ inputs.gcc_version }} 68 | llvm_version: ${{ inputs.llvm_version }} 69 | kernel: ${{ inputs.kernel }} 70 | download_sources: ${{ inputs.download_sources }} 71 | 72 | build-release: 73 | if: ${{ inputs.build_release }} 74 | uses: ./.github/workflows/kernel-build.yml 75 | with: 76 | arch: ${{ inputs.arch }} 77 | toolchain_full: ${{ inputs.toolchain_full }} 78 | toolchain: ${{ inputs.toolchain }} 79 | runs_on: ${{ inputs.build_runs_on }} 80 | gcc_version: ${{ inputs.gcc_version }} 81 | llvm_version: ${{ inputs.llvm_version }} 82 | kernel: ${{ inputs.kernel }} 83 | download_sources: ${{ inputs.download_sources }} 84 | release: true 85 | 86 | test: 87 | if: ${{ inputs.run_tests }} 88 | uses: ./.github/workflows/kernel-test.yml 89 | # Setting name to test here to avoid lengthy autogenerated names due to matrix 90 | # e.g build-and-test x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc 91 | name: "test" 92 | needs: [build] 93 | strategy: 94 | fail-fast: false 95 | matrix: ${{ fromJSON(inputs.tests) }} 96 | with: 97 | arch: ${{ inputs.arch }} 98 | toolchain_full: ${{ inputs.toolchain_full }} 99 | runs_on: ${{ inputs.runs_on }} 100 | kernel: ${{ inputs.kernel }} 101 | test: ${{ matrix.test }} 102 | continue_on_error: ${{ toJSON(matrix.continue_on_error) }} 103 | timeout_minutes: ${{ matrix.timeout_minutes }} 104 | 105 | gcc-bpf: 106 | name: 'GCC BPF' 107 | if: ${{ inputs.arch == 'x86_64' }} 108 | uses: ./.github/workflows/gcc-bpf.yml 109 | needs: [build] 110 | with: 111 | # GCC BPF does not need /dev/kvm, so use the "build" runners 112 | runs_on: ${{ inputs.build_runs_on }} 113 | arch: ${{ inputs.arch }} 114 | gcc_version: ${{ inputs.gcc_version }} 115 | llvm_version: ${{ inputs.llvm_version }} 116 | toolchain: ${{ inputs.toolchain }} 117 | toolchain_full: ${{ inputs.toolchain_full }} 118 | download_sources: ${{ inputs.download_sources }} 119 | 120 | -------------------------------------------------------------------------------- /.github/workflows/kernel-build.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Reusable build workflow 3 | 4 | on: 5 | workflow_call: 6 | inputs: 7 | arch: 8 | required: true 9 | type: string 10 | description: The architecture to build against, e.g x86_64, aarch64, s390x... 11 | toolchain_full: 12 | required: true 13 | type: string 14 | description: The toolchain and for llvm, its version, e.g gcc, llvm-15 15 | toolchain: 16 | required: true 17 | type: string 18 | description: The toolchain, e.g gcc, llvm 19 | runs_on: 20 | required: true 21 | type: string 22 | description: The runners to run the test on. This is a json string representing an array of labels. 23 | gcc_version: 24 | required: true 25 | type: string 26 | description: GCC version to install 27 | llvm_version: 28 | required: true 29 | type: string 30 | description: LLVM version to install 31 | kernel: 32 | required: true 33 | type: string 34 | description: The kernel to run the test against. For KPD this is always LATEST, which runs against a newly built kernel. 35 | download_sources: 36 | required: true 37 | type: boolean 38 | description: Whether to download the linux sources into the working directory. 39 | default: false 40 | release: 41 | required: false 42 | type: boolean 43 | description: Build selftest with -O2 optimization 44 | default: false 45 | 46 | jobs: 47 | build: 48 | name: build kernel and selftests ${{ inputs.release && '-O2' || '' }} 49 | # To run on CodeBuild, runs-on value must correspond to the AWS 50 | # CodeBuild project associated with the kernel-patches webhook 51 | # However matrix.py passes just a 'codebuild' string 52 | runs-on: >- 53 | ${{ 54 | contains(fromJSON(inputs.runs_on), 'codebuild') 55 | && format('codebuild-bpf-ci-{0}-{1}', github.run_id, github.run_attempt) 56 | || fromJSON(inputs.runs_on) 57 | }} 58 | env: 59 | ARTIFACTS_ARCHIVE: "vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }}.tar.zst" 60 | BPF_NEXT_BASE_BRANCH: 'master' 61 | BPF_NEXT_FETCH_DEPTH: 64 # A bit of history is needed to facilitate incremental builds 62 | CROSS_COMPILE: ${{ inputs.arch != 'x86_64' && 'true' || '' }} 63 | # BUILD_SCHED_EXT_SELFTESTS: ${{ inputs.arch == 'x86_64' || inputs.arch == 'aarch64' && 'true' || '' }} 64 | KBUILD_OUTPUT: ${{ github.workspace }}/kbuild-output 65 | KERNEL: ${{ inputs.kernel }} 66 | KERNEL_ROOT: ${{ github.workspace }} 67 | REPO_PATH: "" 68 | REPO_ROOT: ${{ github.workspace }} 69 | RUNNER_TYPE: ${{ contains(fromJSON(inputs.runs_on), 'codebuild') && 'codebuild' || 'default' }} 70 | steps: 71 | - uses: actions/checkout@v4 72 | with: 73 | fetch-depth: ${{ inputs.download_sources && 1 || env.BPF_NEXT_FETCH_DEPTH }} 74 | 75 | - if: ${{ env.RUNNER_TYPE == 'codebuild' }} 76 | shell: bash 77 | run: .github/scripts/tmpfsify-workspace.sh 78 | 79 | - if: ${{ inputs.download_sources }} 80 | name: Download bpf-next tree 81 | env: 82 | FETCH_DEPTH: ${{ env.BPF_NEXT_FETCH_DEPTH }} 83 | uses: ./get-linux-source 84 | with: 85 | dest: '.kernel' 86 | rev: ${{ env.BPF_NEXT_BASE_BRANCH }} 87 | 88 | - uses: ./prepare-incremental-build 89 | with: 90 | repo-root: ${{ inputs.download_sources && '.kernel' || env.REPO_ROOT }} 91 | base-branch: >- 92 | ${{ inputs.download_sources && env.BPF_NEXT_BASE_BRANCH 93 | || github.event_name == 'pull_request' && github.base_ref 94 | || github.ref_name 95 | }} 96 | arch: ${{ inputs.arch }} 97 | toolchain_full: ${{ inputs.toolchain_full }} 98 | kbuild-output: ${{ env.KBUILD_OUTPUT }} 99 | - if: ${{ inputs.download_sources }} 100 | name: Move linux source in place 101 | shell: bash 102 | run: | 103 | cd .kernel 104 | rm -rf .git 105 | mv -t .. $(ls -A) 106 | cd .. 107 | rmdir .kernel 108 | - uses: ./patch-kernel 109 | 110 | with: 111 | patches-root: '${{ github.workspace }}/ci/diffs' 112 | repo-root: ${{ env.REPO_ROOT }} 113 | 114 | - name: Setup build environment 115 | uses: ./setup-build-env 116 | with: 117 | arch: ${{ inputs.arch }} 118 | gcc-version: ${{ inputs.gcc_version }} 119 | llvm-version: ${{ inputs.llvm_version }} 120 | pahole: master 121 | 122 | # We have to setup qemu+binfmt in order to enable cross-compation of selftests. 123 | # During selftests build, freshly built bpftool is executed. 124 | # On self-hosted bare-metal hosts binfmt is pre-configured. 125 | - if: ${{ env.RUNNER_TYPE == 'codebuild' && env.CROSS_COMPILE }} 126 | name: Set up docker 127 | uses: docker/setup-docker-action@v4 128 | - if: ${{ env.RUNNER_TYPE == 'codebuild' && env.CROSS_COMPILE }} 129 | name: Setup binfmt and qemu 130 | uses: docker/setup-qemu-action@v3 131 | with: 132 | image: tonistiigi/binfmt:qemu-v9.2.0 133 | 134 | - name: Build kernel image 135 | uses: ./build-linux 136 | with: 137 | arch: ${{ inputs.arch }} 138 | toolchain: ${{ inputs.toolchain }} 139 | kbuild-output: ${{ env.KBUILD_OUTPUT }} 140 | max-make-jobs: 32 141 | llvm-version: ${{ inputs.llvm_version }} 142 | 143 | - name: Build selftests/bpf 144 | uses: ./build-selftests 145 | env: 146 | MAX_MAKE_JOBS: 32 147 | RELEASE: ${{ inputs.release && '1' || '' }} 148 | with: 149 | arch: ${{ inputs.arch }} 150 | kernel-root: ${{ env.KERNEL_ROOT }} 151 | llvm-version: ${{ inputs.llvm_version }} 152 | toolchain: ${{ inputs.toolchain }} 153 | 154 | - if: ${{ env.BUILD_SCHED_EXT_SELFTESTS }} 155 | name: Build selftests/sched_ext 156 | uses: ./build-scx-selftests 157 | with: 158 | kbuild-output: ${{ env.KBUILD_OUTPUT }} 159 | repo-root: ${{ env.REPO_ROOT }} 160 | arch: ${{ inputs.arch }} 161 | toolchain: ${{ inputs.toolchain }} 162 | llvm-version: ${{ inputs.llvm_version }} 163 | max-make-jobs: 32 164 | 165 | - if: ${{ github.event_name != 'push' }} 166 | name: Build samples 167 | uses: ./build-samples 168 | with: 169 | arch: ${{ inputs.arch }} 170 | toolchain: ${{ inputs.toolchain }} 171 | kbuild-output: ${{ env.KBUILD_OUTPUT }} 172 | max-make-jobs: 32 173 | llvm-version: ${{ inputs.llvm_version }} 174 | - name: Tar artifacts 175 | id: tar-artifacts 176 | uses: ./tar-artifacts 177 | env: 178 | ARCHIVE_BPF_SELFTESTS: 'true' 179 | ARCHIVE_MAKE_HELPERS: 'true' 180 | ARCHIVE_SCHED_EXT_SELFTESTS: ${{ env.BUILD_SCHED_EXT_SELFTESTS }} 181 | with: 182 | arch: ${{ inputs.arch }} 183 | archive: ${{ env.ARTIFACTS_ARCHIVE }} 184 | kbuild-output: ${{ env.KBUILD_OUTPUT }} 185 | repo-root: ${{ env.REPO_ROOT }} 186 | - if: ${{ github.event_name != 'push' }} 187 | name: Remove KBUILD_OUTPUT content 188 | shell: bash 189 | run: | 190 | # Remove $KBUILD_OUTPUT to prevent cache creation for pull requests. 191 | # Only on pushed changes are build artifacts actually cached, because 192 | # of github.com/actions/cache's cache isolation logic. 193 | rm -rf "${KBUILD_OUTPUT}" 194 | - uses: actions/upload-artifact@v4 195 | with: 196 | name: vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }}${{ inputs.release && '-release' || '' }} 197 | if-no-files-found: error 198 | path: ${{ env.ARTIFACTS_ARCHIVE }} 199 | -------------------------------------------------------------------------------- /.github/workflows/kernel-test.yml: -------------------------------------------------------------------------------- 1 | name: Reusable test workflow 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | arch: 7 | required: true 8 | type: string 9 | description: The architecture to build against, e.g x86_64, aarch64, s390x... 10 | toolchain_full: 11 | required: true 12 | type: string 13 | description: The toolchain and for llvm, its version, e.g gcc, llvm-15 14 | runs_on: 15 | required: true 16 | type: string 17 | description: The runners to run the test on. This is a json string representing an array of labels. 18 | kernel: 19 | required: true 20 | type: string 21 | description: The kernel to run the test against. For KPD this is always LATEST, which runs against a newly built kernel. 22 | test: 23 | required: true 24 | type: string 25 | description: The test to run in the vm, e.g test_progs, test_maps, test_progs_no_alu32... 26 | continue_on_error: 27 | required: true 28 | type: string 29 | description: Whether to continue on error. This is typically set to true for parallel tests which are currently known to fail, but we don't want to fail the whole CI because of that. 30 | timeout_minutes: 31 | required: true 32 | type: number 33 | description: In case a test runs for too long, after how many seconds shall we timeout and error. 34 | 35 | jobs: 36 | test: 37 | name: ${{ inputs.test }} on ${{ inputs.arch }} with ${{ inputs.toolchain_full }} 38 | runs-on: ${{ fromJSON(inputs.runs_on) }} 39 | timeout-minutes: 100 40 | env: 41 | ARCH: ${{ inputs.arch }} 42 | KERNEL: ${{ inputs.kernel }} 43 | REPO_ROOT: ${{ github.workspace }} 44 | REPO_PATH: "" 45 | # https://github.com/actions/runner/issues/1483#issuecomment-1031671517 46 | # booleans are weird in GH. 47 | CONTINUE_ON_ERROR: ${{ inputs.continue_on_error }} 48 | DEPLOYMENT: ${{ github.repository == 'kernel-patches/bpf' && 'prod' || 'rc' }} 49 | ALLOWLIST_FILE: /tmp/allowlist 50 | DENYLIST_FILE: /tmp/denylist 51 | steps: 52 | - uses: actions/checkout@v4 53 | 54 | - uses: actions/download-artifact@v4 55 | with: 56 | name: vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }} 57 | path: . 58 | 59 | - name: Untar artifacts 60 | # zstd is installed by default in the runner images. 61 | run: zstd -d -T0 vmlinux-${{ inputs.arch }}-${{ inputs.toolchain_full }}.tar.zst --stdout | tar -xf - 62 | 63 | - name: Run selftests 64 | uses: ./run-vmtest 65 | # https://github.com/actions/runner/issues/1483#issuecomment-1031671517 66 | # booleans are weird in GH. 67 | continue-on-error: ${{ fromJSON(env.CONTINUE_ON_ERROR) }} 68 | timeout-minutes: ${{ inputs.timeout_minutes }} 69 | env: 70 | ARCH: ${{ inputs.arch }} 71 | DEPLOYMENT: ${{ env.DEPLOYMENT }} 72 | KERNEL_TEST: ${{ inputs.test }} 73 | SELFTESTS_BPF: ${{ github.workspace }}/selftests/bpf 74 | VMTEST_CONFIGS: ${{ github.workspace }}/ci/vmtest/configs 75 | TEST_PROGS_TRAFFIC_MONITOR: ${{ inputs.arch == 'x86_64' && 'true' || '' }} 76 | TEST_PROGS_WATCHDOG_TIMEOUT: 600 77 | with: 78 | arch: ${{ inputs.arch }} 79 | vmlinuz: '${{ github.workspace }}/vmlinuz' 80 | kernel-root: ${{ env.REPO_ROOT }} 81 | max-cpu: 8 82 | kernel-test: ${{ inputs.test }} 83 | # Here we must use kbuild-output local to the repo, because 84 | # it was extracted from the artifacts. 85 | kbuild-output: ${{ env.REPO_ROOT }}/kbuild-output 86 | 87 | - if: ${{ always() }} 88 | uses: actions/upload-artifact@v4 89 | with: 90 | name: tmon-logs-${{ inputs.arch }}-${{ inputs.toolchain_full }}-${{ inputs.test }} 91 | if-no-files-found: ignore 92 | path: /tmp/tmon_pcap/* 93 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: "lint" 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | 9 | jobs: 10 | shellcheck: 11 | name: ShellCheck 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout repository 15 | uses: actions/checkout@v4 16 | - name: Run ShellCheck 17 | uses: ludeeus/action-shellcheck@master 18 | env: 19 | SHELLCHECK_OPTS: -S error 20 | -------------------------------------------------------------------------------- /.github/workflows/scripts_tests.yml: -------------------------------------------------------------------------------- 1 | name: "Test actions" 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | push: 7 | branches: 8 | - master 9 | 10 | jobs: 11 | test_run_vmtest: 12 | name: Test run-vmtest 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout repository 16 | uses: actions/checkout@v4 17 | - name: Execute run-vmtest/tests 18 | run: ./run-vmtest/tests/run-all-tests.sh 19 | -------------------------------------------------------------------------------- /.github/workflows/test-prepare-incremental-build-action.yml: -------------------------------------------------------------------------------- 1 | name: Workflow to test prepare-incremental-build action 2 | 3 | on: 4 | push: 5 | paths: 6 | - 'prepare-incremental-build/**' 7 | - '.github/**' 8 | workflow_dispatch: 9 | inputs: 10 | arch: 11 | required: true 12 | type: string 13 | default: x86_64 14 | toolchain_full: 15 | required: true 16 | type: string 17 | default: llvm-17 18 | runs_on: 19 | required: true 20 | type: string 21 | default: ubuntu-24.04 22 | kernel-repo: 23 | required: true 24 | type: string 25 | default: kernel-patches/bpf 26 | kernel-branch: 27 | required: true 28 | type: string 29 | default: bpf-next 30 | 31 | jobs: 32 | test-job: 33 | name: Test prepare-incremental-build action 34 | runs-on: ${{ inputs.runs_on || 'ubuntu-24.04' }} 35 | timeout-minutes: 100 36 | env: 37 | ARCH: ${{ inputs.arch || 'x86_64' }} 38 | TOOLCHAIN_FULL: ${{ inputs.toolchain_full || 'llvm-17' }} 39 | KERNEL_REPO: ${{ inputs.kernel-repo || 'kernel-patches/bpf' }} 40 | KERNEL_BRANCH: ${{ inputs.kernel-branch || 'bpf-next' }} 41 | REPO_ROOT: ${{ github.workspace }}/linux 42 | KBUILD_OUTPUT: ${{ github.workspace }}/kbuild-output 43 | steps: 44 | 45 | - uses: actions/checkout@v4 46 | name: Checkout libbpf/ci 47 | 48 | - uses: actions/checkout@v4 49 | name: Checkout ${{ env.KERNEL_REPO }} 50 | with: 51 | repository: ${{ env.KERNEL_REPO }} 52 | path: ${{ env.REPO_ROOT }} 53 | 54 | - name: Run prepare-incremental-build action 55 | uses: ./prepare-incremental-build 56 | with: 57 | repo-root: ${{ env.REPO_ROOT }} 58 | base-branch: ${{ env.KERNEL_BRANCH }} 59 | arch: ${{ env.ARCH }} 60 | toolchain_full: ${{ env.TOOLCHAIN_FULL }} 61 | kbuild-output: ${{ env.KBUILD_OUTPUT }} 62 | cache-key-prefix: 'test-prepare-incremental-build-action' 63 | 64 | - name: Put something in KBUILD_OUTPUT to trigger caching 65 | shell: bash 66 | run: | 67 | mkdir -p $KBUILD_OUTPUT/path/to 68 | echo "$ARCH $TOOLCHAIN_FULL $KERNEL" > $KBUILD_OUTPUT/path/to/output.txt 69 | 70 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: bpf-ci 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | 9 | concurrency: 10 | group: ci-test-${{ github.ref_name }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | build-and-test: 15 | strategy: 16 | matrix: 17 | runs_on: ["ubuntu-24.04"] 18 | build_runs_on: ["ubuntu-24.04"] 19 | arch: ["x86_64"] 20 | kernel_compiler: ["gcc", "llvm"] 21 | gcc_version: [14] 22 | llvm_version: [18] 23 | kernel: ["LATEST"] 24 | build_release: [false] 25 | tests: 26 | - include: 27 | - {"test": "test_progs", "continue_on_error": false, "timeout_minutes": 360} 28 | - {"test": "test_progs_no_alu32", "continue_on_error": false, "timeout_minutes": 360} 29 | - {"test": "test_verifier", "continue_on_error": false, "timeout_minutes": 360} 30 | - {"test": "test_maps", "continue_on_error": false, "timeout_minutes": 360} 31 | # - {"test": "sched_ext", "continue_on_error": false, "timeout_minutes": 360} 32 | # - {"test": "test_progs-bpf_gcc", "continue_on_error": false, "timeout_minutes": 360} 33 | fail-fast: false 34 | 35 | name: ${{ matrix.arch }} ${{ matrix.kernel_compiler }}-${{ matrix.kernel_compiler == 'gcc' && matrix.gcc_version || matrix.llvm_version }} 36 | uses: ./.github/workflows/kernel-build-test.yml 37 | permissions: 38 | id-token: write 39 | contents: read 40 | 41 | with: 42 | arch: ${{ matrix.arch }} 43 | toolchain: ${{ matrix.kernel_compiler }} 44 | toolchain_full: ${{ matrix.kernel_compiler }}-${{ matrix.kernel_compiler == 'gcc' && matrix.gcc_version || matrix.llvm_version }} 45 | runs_on: ${{ toJSON(matrix.runs_on) }} 46 | build_runs_on: ${{ toJSON(matrix.build_runs_on) }} 47 | gcc_version: ${{ matrix.gcc_version }} 48 | llvm_version: ${{ matrix.llvm_version }} 49 | kernel: ${{ matrix.kernel }} 50 | tests: ${{ toJSON(matrix.tests) }} 51 | run_tests: true 52 | download_sources: true 53 | build_release: ${{ matrix.build_release }} 54 | 55 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.idea 2 | -------------------------------------------------------------------------------- /INDEX: -------------------------------------------------------------------------------- 1 | x86_64/libbpf-vmtest-rootfs-2024.08.22-noble-amd64.tar.zst https://libbpf-ci.s3.us-west-1.amazonaws.com/libbpf-vmtest-rootfs-2024.08.22-noble-amd64.tar.zst 2 | x86_64/vmlinux-4.9.0.zst https://libbpf-ci.s3-us-west-1.amazonaws.com/x86_64/vmlinux-4.9.0.zst 3 | x86_64/vmlinux-5.5.0.zst https://libbpf-ci.s3-us-west-1.amazonaws.com/x86_64/vmlinux-5.5.0.zst 4 | x86_64/vmlinuz-5.5.0 https://libbpf-ci.s3-us-west-1.amazonaws.com/x86_64/vmlinuz-5.5.0 5 | x86_64/vmlinuz-4.9.0 https://libbpf-ci.s3-us-west-1.amazonaws.com/x86_64/vmlinuz-4.9.0 6 | s390x/libbpf-vmtest-rootfs-2024.08.22-noble-s390x.tar.zst https://libbpf-ci.s3.us-west-1.amazonaws.com/libbpf-vmtest-rootfs-2024.08.22-noble-s390x.tar.zst 7 | aarch64/libbpf-vmtest-rootfs-2024.08.22-noble-arm64.tar.zst https://libbpf-ci.s3.us-west-1.amazonaws.com/libbpf-vmtest-rootfs-2024.08.22-noble-arm64.tar.zst 8 | riscv64/libbpf-vmtest-rootfs-2024.09.09-noble-riscv64.tar.zst https://libbpf-ci.s3.us-west-1.amazonaws.com/libbpf-vmtest-rootfs-2024.09.09-noble-riscv64.tar.zst 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | LGPL-2.1 OR BSD-2-Clause 2 | -------------------------------------------------------------------------------- /LICENSE.BSD-2-Clause: -------------------------------------------------------------------------------- 1 | Valid-License-Identifier: BSD-2-Clause 2 | SPDX-URL: https://spdx.org/licenses/BSD-2-Clause.html 3 | Usage-Guide: 4 | To use the BSD 2-clause "Simplified" License put the following SPDX 5 | tag/value pair into a comment according to the placement guidelines in 6 | the licensing rules documentation: 7 | SPDX-License-Identifier: BSD-2-Clause 8 | License-Text: 9 | 10 | Copyright (c) 2015 The Libbpf Authors. All rights reserved. 11 | 12 | Redistribution and use in source and binary forms, with or without 13 | modification, are permitted provided that the following conditions are met: 14 | 15 | 1. Redistributions of source code must retain the above copyright notice, 16 | this list of conditions and the following disclaimer. 17 | 18 | 2. Redistributions in binary form must reproduce the above copyright 19 | notice, this list of conditions and the following disclaimer in the 20 | documentation and/or other materials provided with the distribution. 21 | 22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 | POSSIBILITY OF SUCH DAMAGE. 33 | -------------------------------------------------------------------------------- /LICENSE.LGPL-2.1: -------------------------------------------------------------------------------- 1 | Valid-License-Identifier: LGPL-2.1 2 | Valid-License-Identifier: LGPL-2.1+ 3 | SPDX-URL: https://spdx.org/licenses/LGPL-2.1.html 4 | Usage-Guide: 5 | To use this license in source code, put one of the following SPDX 6 | tag/value pairs into a comment according to the placement 7 | guidelines in the licensing rules documentation. 8 | For 'GNU Lesser General Public License (LGPL) version 2.1 only' use: 9 | SPDX-License-Identifier: LGPL-2.1 10 | For 'GNU Lesser General Public License (LGPL) version 2.1 or any later 11 | version' use: 12 | SPDX-License-Identifier: LGPL-2.1+ 13 | License-Text: 14 | 15 | GNU LESSER GENERAL PUBLIC LICENSE 16 | Version 2.1, February 1999 17 | 18 | Copyright (C) 1991, 1999 Free Software Foundation, Inc. 19 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 | 21 | Everyone is permitted to copy and distribute verbatim copies of this 22 | license document, but changing it is not allowed. 23 | 24 | [This is the first released version of the Lesser GPL. It also counts as 25 | the successor of the GNU Library Public License, version 2, hence the 26 | version number 2.1.] 27 | 28 | Preamble 29 | 30 | The licenses for most software are designed to take away your freedom to 31 | share and change it. By contrast, the GNU General Public Licenses are 32 | intended to guarantee your freedom to share and change free software--to 33 | make sure the software is free for all its users. 34 | 35 | This license, the Lesser General Public License, applies to some specially 36 | designated software packages--typically libraries--of the Free Software 37 | Foundation and other authors who decide to use it. You can use it too, but 38 | we suggest you first think carefully about whether this license or the 39 | ordinary General Public License is the better strategy to use in any 40 | particular case, based on the explanations below. 41 | 42 | When we speak of free software, we are referring to freedom of use, not 43 | price. Our General Public Licenses are designed to make sure that you have 44 | the freedom to distribute copies of free software (and charge for this 45 | service if you wish); that you receive source code or can get it if you 46 | want it; that you can change the software and use pieces of it in new free 47 | programs; and that you are informed that you can do these things. 48 | 49 | To protect your rights, we need to make restrictions that forbid 50 | distributors to deny you these rights or to ask you to surrender these 51 | rights. These restrictions translate to certain responsibilities for you if 52 | you distribute copies of the library or if you modify it. 53 | 54 | For example, if you distribute copies of the library, whether gratis or for 55 | a fee, you must give the recipients all the rights that we gave you. You 56 | must make sure that they, too, receive or can get the source code. If you 57 | link other code with the library, you must provide complete object files to 58 | the recipients, so that they can relink them with the library after making 59 | changes to the library and recompiling it. And you must show them these 60 | terms so they know their rights. 61 | 62 | We protect your rights with a two-step method: (1) we copyright the 63 | library, and (2) we offer you this license, which gives you legal 64 | permission to copy, distribute and/or modify the library. 65 | 66 | To protect each distributor, we want to make it very clear that there is no 67 | warranty for the free library. Also, if the library is modified by someone 68 | else and passed on, the recipients should know that what they have is not 69 | the original version, so that the original author's reputation will not be 70 | affected by problems that might be introduced by others. 71 | 72 | Finally, software patents pose a constant threat to the existence of any 73 | free program. We wish to make sure that a company cannot effectively 74 | restrict the users of a free program by obtaining a restrictive license 75 | from a patent holder. Therefore, we insist that any patent license obtained 76 | for a version of the library must be consistent with the full freedom of 77 | use specified in this license. 78 | 79 | Most GNU software, including some libraries, is covered by the ordinary GNU 80 | General Public License. This license, the GNU Lesser General Public 81 | License, applies to certain designated libraries, and is quite different 82 | from the ordinary General Public License. We use this license for certain 83 | libraries in order to permit linking those libraries into non-free 84 | programs. 85 | 86 | When a program is linked with a library, whether statically or using a 87 | shared library, the combination of the two is legally speaking a combined 88 | work, a derivative of the original library. The ordinary General Public 89 | License therefore permits such linking only if the entire combination fits 90 | its criteria of freedom. The Lesser General Public License permits more lax 91 | criteria for linking other code with the library. 92 | 93 | We call this license the "Lesser" General Public License because it does 94 | Less to protect the user's freedom than the ordinary General Public 95 | License. It also provides other free software developers Less of an 96 | advantage over competing non-free programs. These disadvantages are the 97 | reason we use the ordinary General Public License for many 98 | libraries. However, the Lesser license provides advantages in certain 99 | special circumstances. 100 | 101 | For example, on rare occasions, there may be a special need to encourage 102 | the widest possible use of a certain library, so that it becomes a de-facto 103 | standard. To achieve this, non-free programs must be allowed to use the 104 | library. A more frequent case is that a free library does the same job as 105 | widely used non-free libraries. In this case, there is little to gain by 106 | limiting the free library to free software only, so we use the Lesser 107 | General Public License. 108 | 109 | In other cases, permission to use a particular library in non-free programs 110 | enables a greater number of people to use a large body of free 111 | software. For example, permission to use the GNU C Library in non-free 112 | programs enables many more people to use the whole GNU operating system, as 113 | well as its variant, the GNU/Linux operating system. 114 | 115 | Although the Lesser General Public License is Less protective of the users' 116 | freedom, it does ensure that the user of a program that is linked with the 117 | Library has the freedom and the wherewithal to run that program using a 118 | modified version of the Library. 119 | 120 | The precise terms and conditions for copying, distribution and modification 121 | follow. Pay close attention to the difference between a "work based on the 122 | library" and a "work that uses the library". The former contains code 123 | derived from the library, whereas the latter must be combined with the 124 | library in order to run. 125 | 126 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 127 | 128 | 0. This License Agreement applies to any software library or other program 129 | which contains a notice placed by the copyright holder or other 130 | authorized party saying it may be distributed under the terms of this 131 | Lesser General Public License (also called "this License"). Each 132 | licensee is addressed as "you". 133 | 134 | A "library" means a collection of software functions and/or data 135 | prepared so as to be conveniently linked with application programs 136 | (which use some of those functions and data) to form executables. 137 | 138 | The "Library", below, refers to any such software library or work which 139 | has been distributed under these terms. A "work based on the Library" 140 | means either the Library or any derivative work under copyright law: 141 | that is to say, a work containing the Library or a portion of it, either 142 | verbatim or with modifications and/or translated straightforwardly into 143 | another language. (Hereinafter, translation is included without 144 | limitation in the term "modification".) 145 | 146 | "Source code" for a work means the preferred form of the work for making 147 | modifications to it. For a library, complete source code means all the 148 | source code for all modules it contains, plus any associated interface 149 | definition files, plus the scripts used to control compilation and 150 | installation of the library. 151 | 152 | Activities other than copying, distribution and modification are not 153 | covered by this License; they are outside its scope. The act of running 154 | a program using the Library is not restricted, and output from such a 155 | program is covered only if its contents constitute a work based on the 156 | Library (independent of the use of the Library in a tool for writing 157 | it). Whether that is true depends on what the Library does and what the 158 | program that uses the Library does. 159 | 160 | 1. You may copy and distribute verbatim copies of the Library's complete 161 | source code as you receive it, in any medium, provided that you 162 | conspicuously and appropriately publish on each copy an appropriate 163 | copyright notice and disclaimer of warranty; keep intact all the notices 164 | that refer to this License and to the absence of any warranty; and 165 | distribute a copy of this License along with the Library. 166 | 167 | You may charge a fee for the physical act of transferring a copy, and 168 | you may at your option offer warranty protection in exchange for a fee. 169 | 170 | 2. You may modify your copy or copies of the Library or any portion of it, 171 | thus forming a work based on the Library, and copy and distribute such 172 | modifications or work under the terms of Section 1 above, provided that 173 | you also meet all of these conditions: 174 | 175 | a) The modified work must itself be a software library. 176 | 177 | b) You must cause the files modified to carry prominent notices stating 178 | that you changed the files and the date of any change. 179 | 180 | c) You must cause the whole of the work to be licensed at no charge to 181 | all third parties under the terms of this License. 182 | 183 | d) If a facility in the modified Library refers to a function or a table 184 | of data to be supplied by an application program that uses the 185 | facility, other than as an argument passed when the facility is 186 | invoked, then you must make a good faith effort to ensure that, in 187 | the event an application does not supply such function or table, the 188 | facility still operates, and performs whatever part of its purpose 189 | remains meaningful. 190 | 191 | (For example, a function in a library to compute square roots has a 192 | purpose that is entirely well-defined independent of the 193 | application. Therefore, Subsection 2d requires that any 194 | application-supplied function or table used by this function must be 195 | optional: if the application does not supply it, the square root 196 | function must still compute square roots.) 197 | 198 | These requirements apply to the modified work as a whole. If 199 | identifiable sections of that work are not derived from the Library, and 200 | can be reasonably considered independent and separate works in 201 | themselves, then this License, and its terms, do not apply to those 202 | sections when you distribute them as separate works. But when you 203 | distribute the same sections as part of a whole which is a work based on 204 | the Library, the distribution of the whole must be on the terms of this 205 | License, whose permissions for other licensees extend to the entire 206 | whole, and thus to each and every part regardless of who wrote it. 207 | 208 | Thus, it is not the intent of this section to claim rights or contest 209 | your rights to work written entirely by you; rather, the intent is to 210 | exercise the right to control the distribution of derivative or 211 | collective works based on the Library. 212 | 213 | In addition, mere aggregation of another work not based on the Library 214 | with the Library (or with a work based on the Library) on a volume of a 215 | storage or distribution medium does not bring the other work under the 216 | scope of this License. 217 | 218 | 3. You may opt to apply the terms of the ordinary GNU General Public 219 | License instead of this License to a given copy of the Library. To do 220 | this, you must alter all the notices that refer to this License, so that 221 | they refer to the ordinary GNU General Public License, version 2, 222 | instead of to this License. (If a newer version than version 2 of the 223 | ordinary GNU General Public License has appeared, then you can specify 224 | that version instead if you wish.) Do not make any other change in these 225 | notices. 226 | 227 | Once this change is made in a given copy, it is irreversible for that 228 | copy, so the ordinary GNU General Public License applies to all 229 | subsequent copies and derivative works made from that copy. 230 | 231 | This option is useful when you wish to copy part of the code of the 232 | Library into a program that is not a library. 233 | 234 | 4. You may copy and distribute the Library (or a portion or derivative of 235 | it, under Section 2) in object code or executable form under the terms 236 | of Sections 1 and 2 above provided that you accompany it with the 237 | complete corresponding machine-readable source code, which must be 238 | distributed under the terms of Sections 1 and 2 above on a medium 239 | customarily used for software interchange. 240 | 241 | If distribution of object code is made by offering access to copy from a 242 | designated place, then offering equivalent access to copy the source 243 | code from the same place satisfies the requirement to distribute the 244 | source code, even though third parties are not compelled to copy the 245 | source along with the object code. 246 | 247 | 5. A program that contains no derivative of any portion of the Library, but 248 | is designed to work with the Library by being compiled or linked with 249 | it, is called a "work that uses the Library". Such a work, in isolation, 250 | is not a derivative work of the Library, and therefore falls outside the 251 | scope of this License. 252 | 253 | However, linking a "work that uses the Library" with the Library creates 254 | an executable that is a derivative of the Library (because it contains 255 | portions of the Library), rather than a "work that uses the 256 | library". The executable is therefore covered by this License. Section 6 257 | states terms for distribution of such executables. 258 | 259 | When a "work that uses the Library" uses material from a header file 260 | that is part of the Library, the object code for the work may be a 261 | derivative work of the Library even though the source code is 262 | not. Whether this is true is especially significant if the work can be 263 | linked without the Library, or if the work is itself a library. The 264 | threshold for this to be true is not precisely defined by law. 265 | 266 | If such an object file uses only numerical parameters, data structure 267 | layouts and accessors, and small macros and small inline functions (ten 268 | lines or less in length), then the use of the object file is 269 | unrestricted, regardless of whether it is legally a derivative 270 | work. (Executables containing this object code plus portions of the 271 | Library will still fall under Section 6.) 272 | 273 | Otherwise, if the work is a derivative of the Library, you may 274 | distribute the object code for the work under the terms of Section 275 | 6. Any executables containing that work also fall under Section 6, 276 | whether or not they are linked directly with the Library itself. 277 | 278 | 6. As an exception to the Sections above, you may also combine or link a 279 | "work that uses the Library" with the Library to produce a work 280 | containing portions of the Library, and distribute that work under terms 281 | of your choice, provided that the terms permit modification of the work 282 | for the customer's own use and reverse engineering for debugging such 283 | modifications. 284 | 285 | You must give prominent notice with each copy of the work that the 286 | Library is used in it and that the Library and its use are covered by 287 | this License. You must supply a copy of this License. If the work during 288 | execution displays copyright notices, you must include the copyright 289 | notice for the Library among them, as well as a reference directing the 290 | user to the copy of this License. Also, you must do one of these things: 291 | 292 | a) Accompany the work with the complete corresponding machine-readable 293 | source code for the Library including whatever changes were used in 294 | the work (which must be distributed under Sections 1 and 2 above); 295 | and, if the work is an executable linked with the Library, with the 296 | complete machine-readable "work that uses the Library", as object 297 | code and/or source code, so that the user can modify the Library and 298 | then relink to produce a modified executable containing the modified 299 | Library. (It is understood that the user who changes the contents of 300 | definitions files in the Library will not necessarily be able to 301 | recompile the application to use the modified definitions.) 302 | 303 | b) Use a suitable shared library mechanism for linking with the 304 | Library. A suitable mechanism is one that (1) uses at run time a copy 305 | of the library already present on the user's computer system, rather 306 | than copying library functions into the executable, and (2) will 307 | operate properly with a modified version of the library, if the user 308 | installs one, as long as the modified version is interface-compatible 309 | with the version that the work was made with. 310 | 311 | c) Accompany the work with a written offer, valid for at least three 312 | years, to give the same user the materials specified in Subsection 313 | 6a, above, for a charge no more than the cost of performing this 314 | distribution. 315 | 316 | d) If distribution of the work is made by offering access to copy from a 317 | designated place, offer equivalent access to copy the above specified 318 | materials from the same place. 319 | 320 | e) Verify that the user has already received a copy of these materials 321 | or that you have already sent this user a copy. 322 | 323 | For an executable, the required form of the "work that uses the Library" 324 | must include any data and utility programs needed for reproducing the 325 | executable from it. However, as a special exception, the materials to be 326 | distributed need not include anything that is normally distributed (in 327 | either source or binary form) with the major components (compiler, 328 | kernel, and so on) of the operating system on which the executable runs, 329 | unless that component itself accompanies the executable. 330 | 331 | It may happen that this requirement contradicts the license restrictions 332 | of other proprietary libraries that do not normally accompany the 333 | operating system. Such a contradiction means you cannot use both them 334 | and the Library together in an executable that you distribute. 335 | 336 | 7. You may place library facilities that are a work based on the Library 337 | side-by-side in a single library together with other library facilities 338 | not covered by this License, and distribute such a combined library, 339 | provided that the separate distribution of the work based on the Library 340 | and of the other library facilities is otherwise permitted, and provided 341 | that you do these two things: 342 | 343 | a) Accompany the combined library with a copy of the same work based on 344 | the Library, uncombined with any other library facilities. This must 345 | be distributed under the terms of the Sections above. 346 | 347 | b) Give prominent notice with the combined library of the fact that part 348 | of it is a work based on the Library, and explaining where to find 349 | the accompanying uncombined form of the same work. 350 | 351 | 8. You may not copy, modify, sublicense, link with, or distribute the 352 | Library except as expressly provided under this License. Any attempt 353 | otherwise to copy, modify, sublicense, link with, or distribute the 354 | Library is void, and will automatically terminate your rights under this 355 | License. However, parties who have received copies, or rights, from you 356 | under this License will not have their licenses terminated so long as 357 | such parties remain in full compliance. 358 | 359 | 9. You are not required to accept this License, since you have not signed 360 | it. However, nothing else grants you permission to modify or distribute 361 | the Library or its derivative works. These actions are prohibited by law 362 | if you do not accept this License. Therefore, by modifying or 363 | distributing the Library (or any work based on the Library), you 364 | indicate your acceptance of this License to do so, and all its terms and 365 | conditions for copying, distributing or modifying the Library or works 366 | based on it. 367 | 368 | 10. Each time you redistribute the Library (or any work based on the 369 | Library), the recipient automatically receives a license from the 370 | original licensor to copy, distribute, link with or modify the Library 371 | subject to these terms and conditions. You may not impose any further 372 | restrictions on the recipients' exercise of the rights granted 373 | herein. You are not responsible for enforcing compliance by third 374 | parties with this License. 375 | 376 | 11. If, as a consequence of a court judgment or allegation of patent 377 | infringement or for any other reason (not limited to patent issues), 378 | conditions are imposed on you (whether by court order, agreement or 379 | otherwise) that contradict the conditions of this License, they do not 380 | excuse you from the conditions of this License. If you cannot 381 | distribute so as to satisfy simultaneously your obligations under this 382 | License and any other pertinent obligations, then as a consequence you 383 | may not distribute the Library at all. For example, if a patent license 384 | would not permit royalty-free redistribution of the Library by all 385 | those who receive copies directly or indirectly through you, then the 386 | only way you could satisfy both it and this License would be to refrain 387 | entirely from distribution of the Library. 388 | 389 | If any portion of this section is held invalid or unenforceable under 390 | any particular circumstance, the balance of the section is intended to 391 | apply, and the section as a whole is intended to apply in other 392 | circumstances. 393 | 394 | It is not the purpose of this section to induce you to infringe any 395 | patents or other property right claims or to contest validity of any 396 | such claims; this section has the sole purpose of protecting the 397 | integrity of the free software distribution system which is implemented 398 | by public license practices. Many people have made generous 399 | contributions to the wide range of software distributed through that 400 | system in reliance on consistent application of that system; it is up 401 | to the author/donor to decide if he or she is willing to distribute 402 | software through any other system and a licensee cannot impose that 403 | choice. 404 | 405 | This section is intended to make thoroughly clear what is believed to 406 | be a consequence of the rest of this License. 407 | 408 | 12. If the distribution and/or use of the Library is restricted in certain 409 | countries either by patents or by copyrighted interfaces, the original 410 | copyright holder who places the Library under this License may add an 411 | explicit geographical distribution limitation excluding those 412 | countries, so that distribution is permitted only in or among countries 413 | not thus excluded. In such case, this License incorporates the 414 | limitation as if written in the body of this License. 415 | 416 | 13. The Free Software Foundation may publish revised and/or new versions of 417 | the Lesser General Public License from time to time. Such new versions 418 | will be similar in spirit to the present version, but may differ in 419 | detail to address new problems or concerns. 420 | 421 | Each version is given a distinguishing version number. If the Library 422 | specifies a version number of this License which applies to it and "any 423 | later version", you have the option of following the terms and 424 | conditions either of that version or of any later version published by 425 | the Free Software Foundation. If the Library does not specify a license 426 | version number, you may choose any version ever published by the Free 427 | Software Foundation. 428 | 429 | 14. If you wish to incorporate parts of the Library into other free 430 | programs whose distribution conditions are incompatible with these, 431 | write to the author to ask for permission. For software which is 432 | copyrighted by the Free Software Foundation, write to the Free Software 433 | Foundation; we sometimes make exceptions for this. Our decision will be 434 | guided by the two goals of preserving the free status of all 435 | derivatives of our free software and of promoting the sharing and reuse 436 | of software generally. 437 | 438 | NO WARRANTY 439 | 440 | 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 441 | FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 442 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 443 | PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER 444 | EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 445 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE 446 | ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH 447 | YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL 448 | NECESSARY SERVICING, REPAIR OR CORRECTION. 449 | 450 | 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 451 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 452 | REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR 453 | DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL 454 | DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY 455 | (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED 456 | INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF 457 | THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR 458 | OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 459 | 460 | END OF TERMS AND CONDITIONS 461 | 462 | How to Apply These Terms to Your New Libraries 463 | 464 | If you develop a new library, and you want it to be of the greatest 465 | possible use to the public, we recommend making it free software that 466 | everyone can redistribute and change. You can do so by permitting 467 | redistribution under these terms (or, alternatively, under the terms of the 468 | ordinary General Public License). 469 | 470 | To apply these terms, attach the following notices to the library. It is 471 | safest to attach them to the start of each source file to most effectively 472 | convey the exclusion of warranty; and each file should have at least the 473 | "copyright" line and a pointer to where the full notice is found. 474 | 475 | one line to give the library's name and an idea of what it does. 476 | Copyright (C) year name of author 477 | 478 | This library is free software; you can redistribute it and/or modify it 479 | under the terms of the GNU Lesser General Public License as published by 480 | the Free Software Foundation; either version 2.1 of the License, or (at 481 | your option) any later version. 482 | 483 | This library is distributed in the hope that it will be useful, but WITHOUT 484 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 485 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License 486 | for more details. 487 | 488 | You should have received a copy of the GNU Lesser General Public License 489 | along with this library; if not, write to the Free Software Foundation, 490 | Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add 491 | information on how to contact you by electronic and paper mail. 492 | 493 | You should also get your employer (if you work as a programmer) or your 494 | school, if any, to sign a "copyright disclaimer" for the library, if 495 | necessary. Here is a sample; alter the names: 496 | 497 | Yoyodyne, Inc., hereby disclaims all copyright interest in 498 | the library `Frob' (a library for tweaking knobs) written 499 | by James Random Hacker. 500 | 501 | signature of Ty Coon, 1 April 1990 502 | Ty Coon, President of Vice 503 | That's all there is to it! 504 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/libbpf/ci/badd5122055e76450a092ed66db10c95f47337e4/README.md -------------------------------------------------------------------------------- /ansible/README.md: -------------------------------------------------------------------------------- 1 | ## Install `ansible` 2 | 3 | ``` 4 | sudo dnf install -y ansible 5 | ``` 6 | 7 | 8 | ## Inventory 9 | 10 | The [inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) is where we define our hosts, hostgroup, possibly variable... 11 | 12 | Using an inventory similar to [inventory_example.yml](inventory_example.yml). 13 | 14 | After having changed the github token, one can run the following commands: 15 | 16 | Run the playbook against `s390x` only, and in check mode (`-C`): 17 | ``` 18 | ansible-playbook -i ~/inventory.yml ansible/playbook.yml -C --limit s390x 19 | ``` 20 | 21 | Run the playbook against all hosts in `repo_and_org_runner` group, in check mode and display the change diff `-D`: 22 | 23 | ``` 24 | ansible-playbook -i ~/inventory.yml ansible/playbook.yml -C -D --limit repo_and_org_runner 25 | ``` 26 | 27 | 28 | Same, but against all hosts: 29 | ``` 30 | ansible-playbook -i ~/inventory.yml ansible/playbook.yml -C -D 31 | ``` 32 | 33 | To actually apply the changes, remove `-C`. 34 | 35 | -------------------------------------------------------------------------------- /ansible/inventory_example.yml: -------------------------------------------------------------------------------- 1 | all: 2 | children: 3 | linuxone: 4 | children: 5 | s390x: 6 | children: 7 | repo_and_org_runner: 8 | hosts: 9 | ci_runner_s390x_1: null 10 | ci_runner_s390x_3: null 11 | vars: 12 | runner_repo_list: 13 | - name: a-github-org 14 | instances: 1 15 | gh_app_id: a-github-app-id 16 | - name: a-github-org/a-github-repo 17 | instances: 1 18 | hosts: 19 | ci_runner_s390x_1: 20 | ansible_host: 192.0.2.1 21 | ci_runner_s390x_2: 22 | ansible_host: 192.0.2.2 23 | ci_runner_s390x_3: 24 | ansible_host: 192.0.2.3 25 | vars: 26 | ansible_user: host_user 27 | runner_docker_healthcheck: |- 28 | --health-cmd='(ss -ntp -H dport = :443 | grep -q ESTAB) || exit 1' \ 29 | --health-start-period=60s --health-interval=30s \ 30 | --health-timeout=5s --health-retries=3 31 | runner_repo_list: 32 | - name: a-github-org 33 | instances: 1 34 | gh_app_id: a-github-app-id 35 | aws: 36 | children: 37 | x86_64: 38 | hosts: 39 | ci_runner_x86_64_1: 40 | ansible_host: 192.0.2.11 41 | ci_runner_x86_64_2: 42 | ansible_host: 192.0.2.12 43 | aarch64: 44 | hosts: 45 | ci_runner_arm64_1: 46 | ansible_host: 192.0.2.21 47 | ci_runner_arm64_2: 48 | ansible_host: 192.0.2.22 49 | vars: 50 | runner_repo_list: 51 | - name: a-github-org 52 | instances: 8 53 | gh_app_id: a-github-app-id 54 | vars: 55 | ansible_user: host_user 56 | runner_repo_list: 57 | - name: a-github-org 58 | instances: 10 59 | gh_app_id: a-github-app-id 60 | runner_prefix: some-prefixes 61 | vars: 62 | ansible_ssh_common_args: -o 'ProxyCommand ......' 63 | runner_libbpf_ci_repo_url: https://github.com/libbpf/ci 64 | runner_libbpf_ci_repo_branch: master 65 | runner_gh_apps: 66 | - name: kernel-patches-runner 67 | id: a-github-app-id 68 | secret: '{{ lookup(''ansible.builtin.pipe'', ''command to get a-github-app-id secret'') }}' 69 | runner_gh_token_default: '{{ lookup(''ansible.builtin.pipe'', ''scommand to get the default github-user-token'') }}' 70 | runner_gh_user_default: kernel-patches-bot 71 | runner_gh_tokens: 72 | a-github-org/a-github-repo: '{{ lookup(''ansible.builtin.pipe'', ''command to get a repo specific github-user-token'') }}' 73 | 74 | -------------------------------------------------------------------------------- /ansible/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | roles: 4 | - role: base 5 | tags: [base] 6 | - role: qemu-user-static 7 | tags: [qemu] 8 | - role: runner 9 | tags: [runner] 10 | -------------------------------------------------------------------------------- /ansible/roles/base/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: base 2 | 3 | ## Description 4 | 5 | This role is used to install basic packages that may be required by any deployment. 6 | 7 | Some default packages that apply to both RedHat based and Debian based is set in [defaults/main.yml](defaults/main.yml) file. 8 | 9 | Each specific distro that has different package name has a file under [vars/](vars/) with a list of packages (example: `docker.io` for Debian, `podman-docker` for RedHat). 10 | 11 | It also provides handler that can be useful to any other roles, such as 12 | - `"reset systemd failed"`: runs `systemctl reset-failed` 13 | - `"reload systemd daemon"`: essentially runs `systemctl daemon-reload` 14 | 15 | This role is typically evaluated first. 16 | -------------------------------------------------------------------------------- /ansible/roles/base/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __base_packages: 3 | - curl 4 | - git 5 | - jq 6 | - python3-pip 7 | - vim 8 | -------------------------------------------------------------------------------- /ansible/roles/base/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Just force systemd to reread configs 3 | become: yes 4 | ansible.builtin.systemd: 5 | daemon_reload: yes 6 | listen: "reload systemd daemon" 7 | 8 | - name: systemctl reset-failed 9 | become: yes 10 | ansible.builtin.shell: 11 | "systemctl reset-failed" 12 | listen: "reset systemd failed" 13 | -------------------------------------------------------------------------------- /ansible/roles/base/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include OS-specific variables. 3 | include_vars: "{{ ansible_os_family }}.yml" 4 | 5 | - name: Build package list 6 | set_fact: 7 | base_packages: "{{ __base_packages + __base_distro_packages }}" 8 | 9 | - include_tasks: "setup-{{ ansible_os_family }}.yml" 10 | -------------------------------------------------------------------------------- /ansible/roles/base/tasks/setup-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install base packages 3 | become: true 4 | apt: 5 | state: present 6 | name: "{{ base_packages }}" 7 | update_cache: yes 8 | tags: [install] 9 | 10 | - name: Gather the package facts 11 | ansible.builtin.package_facts: 12 | 13 | # Auditd is spamming the logs when the workers are busy. 14 | # Disable for now 15 | - name: Disable auditd 16 | become: true 17 | ansible.builtin.systemd: 18 | name: auditd 19 | state: stopped 20 | enabled: no 21 | masked: yes 22 | when: "'auditd' in ansible_facts.packages" 23 | -------------------------------------------------------------------------------- /ansible/roles/base/tasks/setup-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install base packages 3 | become: true 4 | package: 5 | state: present 6 | name: "{{ base_packages }}" 7 | update_cache: yes 8 | tags: [install] 9 | 10 | - name: Start docker 11 | become: true 12 | service: 13 | name: docker 14 | state: started -------------------------------------------------------------------------------- /ansible/roles/base/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | __base_distro_packages: 4 | - docker.io 5 | -------------------------------------------------------------------------------- /ansible/roles/base/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | __base_distro_packages: 4 | - "{{ 'podman-docker' if ansible_distribution != 'Amazon' else 'docker' }}" 5 | 6 | -------------------------------------------------------------------------------- /ansible/roles/qemu-user-static/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: qemu-user-static 2 | 3 | ## Description 4 | 5 | This is needed in order to run different architecture containers by QEMU. 6 | 7 | See https://github.com/multiarch/qemu-user-static for more details. 8 | 9 | The role merely create the needed systemd unit file and ensure the service is started once. 10 | -------------------------------------------------------------------------------- /ansible/roles/qemu-user-static/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | qus_container_name: aptman/qus 3 | qus_container_tag: d7.1 4 | -------------------------------------------------------------------------------- /ansible/roles/qemu-user-static/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Systemd execution of non-native binaries with QEMU 3 | become: true 4 | copy: 5 | dest: /etc/systemd/system/qemu-user-static.service 6 | content: | 7 | [Unit] 8 | Description=Support for transparent execution of non-native binaries with QEMU user emulation 9 | 10 | [Service] 11 | Type=oneshot 12 | RemainAfterExit=yes 13 | # https://github.com/multiarch/qemu-user-static does not support host arch != x86_64 14 | # see https://github.com/multiarch/qemu-user-static/issues/174 15 | # In the meantime use https://dbhi.github.io/qus/ 16 | # The command below is a stupid trick to ensure that systemd units for binfmt (proc-sys-fs-binfmt_misc.{auto,}mount are kicked and properly mount the FS before we run qemu-user-static 17 | ExecStartPre=-/bin/ls /proc/sys/fs/binfmt_misc 18 | ExecStartPre=/usr/bin/docker run --rm --interactive --privileged {{ qus_container_name }}:{{ qus_container_tag }} -s -- -r 19 | ExecStart=/usr/bin/docker run --rm --interactive --privileged {{ qus_container_name }}:{{ qus_container_tag }} -s -- -p 20 | 21 | [Install] 22 | WantedBy=multi-user.target 23 | mode: 0644 24 | owner: root 25 | group: root 26 | register: qemu_user_static 27 | 28 | - name: Mask systemd-binfmt service 29 | become: true 30 | systemd: 31 | name: systemd-binfmt 32 | state: stopped 33 | masked: yes 34 | 35 | # Building the image relies on this having started. Force starting the service when this file changes. 36 | - name: start qemu-user-static 37 | become: yes 38 | service: 39 | name: "qemu-user-static" 40 | state: started 41 | enabled: true 42 | daemon_reload: yes 43 | when: qemu_user_static.changed 44 | -------------------------------------------------------------------------------- /ansible/roles/runner/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: runner 2 | 3 | ## Description 4 | 5 | This Ansile role configures [github action self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). 6 | 7 | The self-hosted runners will register to GH at the repository level. 8 | 9 | This role currently installs runners suitable to run on s390x architecture as described in https://github.com/libbpf/libbpf/tree/master/ci/rootfs/s390x-self-hosted-builder . 10 | 11 | The role will sync with libbpf repository where all the files needed to build the docker image reside. 12 | It then set up the runners configurations, systemd service units. 13 | 14 | ## Requirements 15 | 16 | This role requires the ``qemu-user-static`` role to be executed before, and `docker` must be installed. 17 | 18 | ## Role variables 19 | 20 | All variables which can be overridden are stored in [defaults/main.yml](defaults/main.yml) file as well as in table below. 21 | 22 | | Name | Default Value | Description | 23 | | ---- | ------------- | ----------- | 24 | | `runner_base_dir` | /etc/actions-runner | The base directory where actions runner config will go. | 25 | | `runner_libbpf_ci_repo_url` | https://github.com/libbpf/ci.git | The libbpf repository where to fetch s390x runners installation artifacts from. | 26 | | `runner_libbpf_ci_repo_branch` | master | Which branch to check out | 27 | | `runner_repo_list` | [ {name: kernel-patches/bpf, instances: 2}, {name: kernel-patches/vmtest, instances: 1} ] | List of dictionaries of name/instances. The name being the name of the repository to attach to, instances being the number of runners to run on a single host. | 28 | | `runner_gh_tokens` | {'foo/bar': 'foo/bar token'} | Dictionary of repository names and their associated tokens. | 29 | | `runner_gh_token_default` | "replace with your token" | The default token to use for authenticating the runner. Used if no entry for the repository is found in `runner_gh_tokens`. | 30 | 31 | ## Example 32 | 33 | Example playbook: 34 | 35 | ``` 36 | --- 37 | - hosts: all 38 | vars: 39 | - runner_gh_tokens: ghp_token 40 | - runner_gh_token_default: 41 | myowner/myrepo: ghp_token2 42 | - runner_repo_list: 43 | - {name: myowner2/myrepo2, instances: 2} 44 | - {name: myowner/myrepo, instances: 1} 45 | roles: 46 | - role: base 47 | tags: [base] 48 | - role: qemu-user-static 49 | tags: [qemu] 50 | when: ansible_architecture == "s390x" 51 | - role: runner 52 | tags: [runner] 53 | when: ansible_architecture == "s390x" 54 | ``` 55 | -------------------------------------------------------------------------------- /ansible/roles/runner/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | runner_base_dir: /etc/actions-runner 3 | runner_workers: 1 4 | runner_prefix: "" 5 | runner_workdir: /tmp/work 6 | runner_tmpfs_options: 7 | - exec 8 | runner_gh_token_default: replace with your token 9 | runner_gh_user_default: replace with your GH user 10 | runner_docker_tag: main 11 | runner_docker_ubuntu_version: noble 12 | runner_docker_image_url: ghcr.io/kernel-patches/runner 13 | runner_docker_healthcheck: "" 14 | runner_docker_mount_volume: false 15 | runner_gh_tokens: 16 | owner/repository: gh token for owner/repository 17 | runner_libbpf_ci_repo_url: https://github.com/libbpf/ci.git 18 | runner_libbpf_ci_repo_branch: master 19 | runner_repo_list: 20 | - {name: owner1/repository1, instances: 2} 21 | - {name: owner1/repository2, instances: 1} 22 | - {name: owner2/repository2, instances: 1} 23 | 24 | #runner_gh_apps: 25 | # - name: runner-app-name 26 | # id: 1234 27 | # secret: | 28 | # ---START PEM SECRET--- 29 | # ... 30 | # ... 31 | # ---END PEM SECRET--- 32 | -------------------------------------------------------------------------------- /ansible/roles/runner/files/actions-runner-watchdog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Systemd will restart the actions-runner container 4 | 5 | docker ps --filter 'health=unhealthy' --format '{{.ID}} {{.Names}}' | while read -r container_id container_name 6 | do 7 | echo "Container ${container_id} (${container_name}) is unhealthy, stopping it" 8 | docker exec "$container_id" kill -INT -- -1 9 | docker wait "$container_id" 10 | done 11 | -------------------------------------------------------------------------------- /ansible/roles/runner/files/app_token.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Request an ACCESS_TOKEN to be used by a GitHub APP 4 | # Environment variable that need to be set up: 5 | # * APP_ID, the GitHub's app ID 6 | # * APP_PRIVATE_KEY, the content of GitHub app's private key in PEM format. 7 | # 8 | # https://github.com/orgs/community/discussions/24743#discussioncomment-3245300 9 | # 10 | 11 | set -o pipefail 12 | 13 | _GITHUB_HOST=${GITHUB_HOST:="github.com"} 14 | 15 | # If URL is not github.com then use the enterprise api endpoint 16 | if [[ ${GITHUB_HOST} = "github.com" ]]; then 17 | URI="https://api.${_GITHUB_HOST}" 18 | else 19 | URI="https://${_GITHUB_HOST}/api/v3" 20 | fi 21 | 22 | API_VERSION=v3 23 | API_HEADER="Accept: application/vnd.github.${API_VERSION}+json" 24 | CONTENT_LENGTH_HEADER="Content-Length: 0" 25 | APP_INSTALLATIONS_URI="${URI}/app/installations" 26 | 27 | 28 | # JWT parameters based off 29 | # https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app 30 | # 31 | # JWT token issuance and expiration parameters 32 | JWT_IAT_DRIFT=60 33 | JWT_EXP_DELTA=600 34 | 35 | JWT_JOSE_HEADER='{ 36 | "alg": "RS256", 37 | "typ": "JWT" 38 | }' 39 | 40 | 41 | build_jwt_payload() { 42 | now=$(date +%s) 43 | iat=$((now - JWT_IAT_DRIFT)) 44 | jq -c \ 45 | --arg iat_str "${iat}" \ 46 | --arg exp_delta_str "${JWT_EXP_DELTA}" \ 47 | --arg app_id_str "${APP_ID}" \ 48 | ' 49 | ($iat_str | tonumber) as $iat 50 | | ($exp_delta_str | tonumber) as $exp_delta 51 | | ($app_id_str | tonumber) as $app_id 52 | | .iat = $iat 53 | | .exp = ($iat + $exp_delta) 54 | | .iss = $app_id 55 | ' <<< "{}" | tr -d '\n' 56 | } 57 | 58 | base64url() { 59 | base64 | tr '+/' '-_' | tr -d '=\n' 60 | } 61 | 62 | rs256_sign() { 63 | openssl dgst -binary -sha256 -sign <(echo "$1") 64 | } 65 | 66 | request_access_token() { 67 | jwt_payload=$(build_jwt_payload) 68 | encoded_jwt_parts=$(base64url <<<"${JWT_JOSE_HEADER}").$(base64url <<<"${jwt_payload}") 69 | encoded_mac=$(echo -n "$encoded_jwt_parts" | rs256_sign "${APP_PRIVATE_KEY}" | base64url) 70 | generated_jwt="${encoded_jwt_parts}.${encoded_mac}" 71 | 72 | auth_header="Authorization: Bearer ${generated_jwt}" 73 | 74 | app_installations_response=$(curl -sX GET \ 75 | -H "${auth_header}" \ 76 | -H "${API_HEADER}" \ 77 | ${APP_INSTALLATIONS_URI} \ 78 | ) 79 | access_token_url=$(echo "$app_installations_response" | jq --raw-output '.[] | select (.app_id == '"${APP_ID}"') .access_tokens_url') 80 | curl -sX POST \ 81 | -H "${CONTENT_LENGTH_HEADER}" \ 82 | -H "${auth_header}" \ 83 | -H "${API_HEADER}" \ 84 | "${access_token_url}" | \ 85 | jq --raw-output .token 86 | } 87 | 88 | request_access_token 89 | -------------------------------------------------------------------------------- /ansible/roles/runner/files/gh_token_generator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | SCRIPT_DIR=$(dirname "$0") 5 | APP_ID=$1 6 | APP_PRIVATE_KEY=$2 7 | DST_FILE="$3" 8 | 9 | ACCESS_TOKEN="$(APP_ID="${APP_ID}" APP_PRIVATE_KEY="$(<"${APP_PRIVATE_KEY}")" "${SCRIPT_DIR}/app_token.sh")" 10 | echo "ACCESS_TOKEN=${ACCESS_TOKEN}" > "${DST_FILE}" 11 | -------------------------------------------------------------------------------- /ansible/roles/runner/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "restart actions-runners" 3 | become: yes 4 | ansible.builtin.service: 5 | name: actions-runner-{{ item.0.normalized }}@{{ item.1 }} 6 | state: restarted 7 | # Find the systemd resources that were modified and extract the "normalized" attributes 8 | loop: "{{ runner_projects.results | selectattr('changed', 'equalto', true) | map(attribute='item') | subelements('workers') }}" 9 | 10 | - name: "restart docker_pruning timer" 11 | become: yes 12 | ansible.builtin.service: 13 | name: docker_pruning.timer 14 | state: restarted 15 | 16 | - name: "restart actions-runner-watchdog timer" 17 | become: yes 18 | ansible.builtin.service: 19 | name: actions-runner-watchdog.timer 20 | state: restarted 21 | 22 | - name: "restart runner" 23 | become: yes 24 | ansible.builtin.service: 25 | name: runner 26 | state: restarted 27 | -------------------------------------------------------------------------------- /ansible/roles/runner/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Used by ansible modules later 4 | - name: Install docker pip on Amazon Linux 5 | become: yes 6 | ansible.builtin.pip: 7 | name: 8 | # AL2 python's version is compiled against openssl 1.0, urllib3>=2.0 needs openssl 1.1.1 9 | - urllib3<2.0 10 | - docker 11 | extra_args: --user 12 | executable: pip3 13 | when: ansible_distribution == 'Amazon' 14 | 15 | - name: Install python3-docker on Ubuntu 16 | become: yes 17 | ansible.builtin.apt: 18 | state: present 19 | name: python3-docker 20 | update_cache: yes 21 | when: ansible_os_family == 'Debian' 22 | 23 | - name: Create runner directory 24 | become: yes 25 | ansible.builtin.file: 26 | path: "{{ runner_base_dir }}" 27 | state: directory 28 | mode: 0755 29 | 30 | - name: Copy GH application token scripts 31 | become: yes 32 | ansible.builtin.copy: 33 | src: "{{ item }}" 34 | dest: "{{ runner_base_dir }}/{{ item }}" 35 | mode: 0755 36 | owner: root 37 | group: root 38 | with_items: 39 | - app_token.sh 40 | - gh_token_generator.sh 41 | 42 | - name: Write App private key 43 | no_log: true 44 | become: yes 45 | ansible.builtin.copy: 46 | content: "{{ item.secret }}" 47 | dest: "{{ runner_base_dir }}/{{ item.name }}_{{ item.id }}_priv.pem" 48 | mode: 0700 49 | owner: root 50 | group: root 51 | with_items: "{{ runner_gh_apps |default([]) }}" 52 | 53 | - name: Set runner env 54 | become: yes 55 | ansible.builtin.copy: 56 | dest: "{{ runner_base_dir }}/runner_unit.env" 57 | content: | 58 | DOCKER_TAG={{ runner_docker_tag }}-{{ runner_docker_ubuntu_version }}-{{ ansible_architecture }} 59 | mode: 0700 60 | owner: root 61 | group: root 62 | 63 | # Generate runners'facts by infering a normalized repo/org name from the repo name, 64 | # Generate a list of workers to generate, so we can easily iterate over that list. 65 | # Setting a default `gh_app_id` to 0. If `gh_app_id` is defined in the item, it will be 66 | # overriden by `combine(item)`, otherwise it will provide a "bogus" value that will never 67 | # match and actual GH app ID, but allows for the rest of the code to check for `gh_app_id` 68 | # vs checking its existence. 69 | - name: Generate runners facts 70 | set_fact: 71 | runners: "{{ runners|default([]) + [ {'gh_app_id': 0, 'normalized': item.name | replace('/','-'), 'workers': range(item.instances) | list } | combine(item) ] }}" 72 | with_items: "{{ runner_repo_list }}" 73 | 74 | - name: Set runner name prefix 75 | set_fact: 76 | runner_name_prefix: "{{ '%s-' | format(runner_prefix) if runner_prefix }}{{ ansible_hostname }}" 77 | 78 | # When running on Amazon Linux hosts, we override the runner_name_prefix with the ec2's instance ID. 79 | # When testing in Amazon Linux VMs, the `Load ec2 metadata facts` task will fail and we will fallback 80 | # on using hostname. 81 | - name: Set runner_name_prefix to instance ID for Amazon hosts 82 | block: 83 | - name: Load ec2 metadata facts 84 | amazon.aws.ec2_metadata_facts: 85 | 86 | - name: Set runner name prefix with instance ID 87 | set_fact: 88 | runner_name_prefix: "{{ '%s-' | format(runner_prefix) if runner_prefix }}{{ ansible_ec2_instance_id }}" 89 | when: ansible_distribution == 'Amazon' 90 | ignore_errors: yes 91 | 92 | - name: Generate runner env 93 | become: yes 94 | ansible.builtin.copy: 95 | dest: "{{ runner_base_dir }}/actions-runner-{{ item.0.normalized }}-{{ 'worker-%02d.env' | format(item.1) }}" 96 | content: | 97 | ACCESS_TOKEN={{ runner_gh_tokens[item.0.name] | default(runner_gh_token_default) }} 98 | RUNNER_WORKDIR={{ runner_workdir }} 99 | LABELS={{ ansible_architecture }},docker-{{ runner_docker_ubuntu_version }}-{{ runner_docker_tag }} 100 | EPHEMERAL=true 101 | {% if '/' in item.0.name %} 102 | {# The presence of a / in the name signifies that we have a repo name, otherwise we assume an organization name. #} 103 | REPO_URL=https://github.com/{{ item.0.name }} 104 | {% else %} 105 | RUNNER_SCOPE=org 106 | ORG_NAME={{ item.0.name }} 107 | {% endif %} 108 | RUNNER_NAME_PREFIX={{ runner_name_prefix }}-{{ 'worker-%02d' | format(item.1) }} 109 | RANDOM_RUNNER_SUFFIX=true 110 | DISABLE_AUTO_UPDATE=true 111 | loop: "{{ runners | subelements('workers') }}" 112 | 113 | - name: Docker GHCR login 114 | become: yes 115 | docker_login: 116 | registry: ghcr.io 117 | username: "{{ runner_gh_user_default }}" 118 | password: "{{ runner_gh_token_default }}" 119 | 120 | - name: Check /dev/kvm exists 121 | ansible.builtin.stat: 122 | path: /dev/kvm 123 | register: has_kvm 124 | 125 | - name: Set runner.service 126 | become: yes 127 | ansible.builtin.copy: 128 | dest: "/etc/systemd/system/actions-runner-{{ item.normalized }}@.service" 129 | # The use of `namespace` is needed. See Jinja scoping: https://jinja.palletsprojects.com/en/3.1.x/templates/#assignments 130 | content: | 131 | {% set ns = namespace(ghapp=undefined) %} 132 | {% for runner_gh_app in runner_gh_apps if runner_gh_app.id == item.gh_app_id %} 133 | {%- set ns.ghapp = runner_gh_app %} 134 | {%- endfor %} 135 | [Unit] 136 | Description=Ephemeral GitHub Actions Runner Container for {{ item.name }} - %i 137 | After=docker.service 138 | Requires=docker.service 139 | 140 | [Service] 141 | TimeoutStartSec=0 142 | Restart=always 143 | RestartPreventExitStatus=199 144 | EnvironmentFile={{ runner_base_dir }}/runner_unit.env 145 | # Optionally loaded file. Use this to override per runner environment 146 | EnvironmentFile=-{{ runner_base_dir }}/runner_unit-%i.env 147 | ExecStartPre=-/usr/bin/docker stop %p-%i 148 | ExecStartPre=-/usr/bin/docker rm %p-%i 149 | ExecStartPre=-/usr/bin/docker pull {{ runner_docker_image_url }}:${DOCKER_TAG} 150 | {% if ns.ghapp is defined %} 151 | ExecStartPre=-{{ runner_base_dir }}/gh_token_generator.sh {{ ns.ghapp.id }} "{{ runner_base_dir }}/{{ ns.ghapp.name }}_{{ ns.ghapp.id }}_priv.pem" "{{ runner_base_dir }}/actions-runner-{{ item.normalized }}-worker-%i-ghtoken.env" 152 | {% endif %} 153 | ExecStart=/usr/bin/docker run {{ '--device=/dev/kvm' if has_kvm.stat.exists }} \ 154 | {% if runner_docker_healthcheck %} 155 | {{ runner_docker_healthcheck }} \ 156 | {% endif %} 157 | {% if runner_docker_mount_volume %} 158 | --volume=actions-runner-{{ item.normalized }}-worker-%i:{{ runner_workdir }} \ 159 | {% else %} 160 | {{ '--tmpfs %s:%s' | format(runner_workdir, runner_tmpfs_options | join(',')) }} \ 161 | {% endif %} 162 | --rm \ 163 | --env-file "{{ runner_base_dir }}/actions-runner-{{ item.normalized }}-worker-%i.env" \ 164 | {% if ns.ghapp is defined %} 165 | --env-file "{{ runner_base_dir }}/actions-runner-{{ item.normalized }}-worker-%i-ghtoken.env" \ 166 | {% endif %} 167 | --name %p-%i \ 168 | {{ runner_docker_image_url }}:${DOCKER_TAG} 169 | 170 | [Install] 171 | WantedBy=multi-user.target 172 | mode: 0700 173 | owner: root 174 | group: root 175 | loop: "{{ runners }}" 176 | notify: 177 | - reload systemd daemon 178 | 179 | - name: Start and enable runner services 180 | become: yes 181 | ansible.builtin.service: 182 | name: "{{ 'actions-runner-%s@%02d' | format(item.0.normalized, item.1) }}" 183 | state: started 184 | enabled: yes 185 | loop: "{{ runners | subelements('workers') }}" 186 | 187 | # Docker pruning 188 | - name: Set docker pruning service 189 | become: yes 190 | ansible.builtin.copy: 191 | dest: /etc/systemd/system/docker_pruning.service 192 | content: | 193 | [Unit] 194 | Description=Prune unused docker resources 195 | Wants=docker_pruning.timer 196 | 197 | [Service] 198 | Type=oneshot 199 | ExecStart=/usr/bin/docker system prune --volumes -f 200 | mode: 0644 201 | owner: root 202 | group: root 203 | notify: 204 | - reload systemd daemon 205 | - restart docker_pruning timer 206 | 207 | 208 | - name: Set docker pruning timer 209 | become: yes 210 | ansible.builtin.copy: 211 | dest: /etc/systemd/system/docker_pruning.timer 212 | content: | 213 | [Unit] 214 | Description=Run docker_pruning service daily 215 | 216 | [Timer] 217 | # Run at 8:00 UTC daily 218 | OnCalendar=*-*-* 08:00:00 219 | [Install] 220 | WantedBy=timers.target 221 | mode: 0644 222 | owner: root 223 | group: root 224 | notify: 225 | - reload systemd daemon 226 | - restart docker_pruning timer 227 | 228 | - name: Enable docker pruning timer 229 | become: yes 230 | ansible.builtin.service: 231 | name: docker_pruning.timer 232 | enabled: yes 233 | state: started 234 | 235 | # Watchdog 236 | - name: Actions runner watchdog script 237 | become: yes 238 | ansible.builtin.copy: 239 | src: actions-runner-watchdog.sh 240 | dest: /usr/local/sbin/actions-runner-watchdog.sh 241 | mode: 0700 242 | owner: root 243 | group: root 244 | 245 | - name: Set actions runner watchdog timer 246 | become: yes 247 | ansible.builtin.copy: 248 | dest: /etc/systemd/system/actions-runner-watchdog.timer 249 | content: | 250 | [Unit] 251 | Description=Run actions-runner-watchdog service regularly 252 | 253 | [Timer] 254 | # Run two minutes after previous run done 255 | OnBootSec=1m 256 | OnUnitInactiveSec=2m 257 | [Install] 258 | WantedBy=timers.target 259 | mode: 0644 260 | owner: root 261 | group: root 262 | notify: 263 | - reload systemd daemon 264 | - restart actions-runner-watchdog timer 265 | 266 | - name: Set actions runner watchdog service 267 | become: yes 268 | ansible.builtin.copy: 269 | dest: /etc/systemd/system/actions-runner-watchdog.service 270 | content: | 271 | [Unit] 272 | Description=Kill unhealty actions runner containers 273 | Wants=actions-runner-watchdog.timer 274 | 275 | [Service] 276 | Type=oneshot 277 | ExecStart=/usr/local/sbin/actions-runner-watchdog.sh 278 | mode: 0644 279 | owner: root 280 | group: root 281 | notify: 282 | - reload systemd daemon 283 | - restart actions-runner-watchdog timer 284 | 285 | - name: Enable actions runner watchdog timer 286 | become: yes 287 | ansible.builtin.service: 288 | name: actions-runner-watchdog.timer 289 | enabled: yes 290 | state: started 291 | -------------------------------------------------------------------------------- /build-bpf-gcc/README.md: -------------------------------------------------------------------------------- 1 | # Build BPF GCC 2 | 3 | This action grabs latest GCC 15 source code snapshot from 4 | https://gcc.gnu.org/pub/gcc/snapshots, as well as most recent 5 | binutils, and builds GCC for BPF backend and installs it into a 6 | specified directory. 7 | 8 | Resulting artifacts are cached with 9 | [actions/cache](https://github.com/actions/cache), using snapshot 10 | names as a key. 11 | 12 | ## Required inputs 13 | 14 | * `install-dir` - Path to the directory where built binaries are going to be installed, or restored from cache. 15 | 16 | -------------------------------------------------------------------------------- /build-bpf-gcc/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Build BPF GCC' 2 | description: 'Fetch latest GCC and binutils snapshots, bulid GCC and install into the target directory' 3 | inputs: 4 | install-dir: 5 | description: "Path to the GCC installation directory" 6 | required: true 7 | 8 | runs: 9 | using: "composite" 10 | steps: 11 | 12 | - name: Determine latest snapshots 13 | id: latest-snapshots 14 | shell: bash 15 | run: ${GITHUB_ACTION_PATH}/latest-snapshots.sh 16 | 17 | - uses: actions/cache@v4 18 | id: cache 19 | with: 20 | path: ${{ inputs.install-dir }} 21 | key: BPF-GCC-${{ steps.latest-snapshots.outputs.GCC_BASENAME }}-${{ steps.latest-snapshots.outputs.BINUTILS_BASENAME }} 22 | 23 | - if: steps.cache.outputs.cache-hit != 'true' 24 | name: Build BPF GCC 25 | shell: bash 26 | run: | 27 | ${GITHUB_ACTION_PATH}/build-and-install.sh ${{ inputs.install-dir }} 28 | 29 | -------------------------------------------------------------------------------- /build-bpf-gcc/build-and-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | source ${GITHUB_ACTION_PATH}/../helpers.sh 5 | 6 | INSTALLDIR=$(realpath $1) 7 | 8 | if [ -f ${GITHUB_ACTION_PATH}/.env ]; then 9 | source ${GITHUB_ACTION_PATH}/.env 10 | else 11 | echo "${GITHUB_ACTION_PATH}/.env is not found, supposed to be produced by latest-snapshots.sh" 12 | exit 1 13 | fi 14 | 15 | foldable start download_tarballs "Downloading $BINUTILS_URL and $GCC_URL" 16 | 17 | test -f $BINUTILS_TARBALL || wget -q $BINUTILS_URL 18 | test -f $GCC_TARBALL || wget -q $GCC_URL 19 | 20 | foldable end download_tarballs 21 | 22 | foldable start build_binutils "Building $BINUTILS_BASENAME" 23 | 24 | if [ ! -f "${INSTALLDIR}/${BINUTILS_BASENAME}.built" ]; then 25 | tar xJf $BINUTILS_TARBALL 26 | mkdir -p ${BINUTILS_BASENAME}/build-bpf 27 | cd ${BINUTILS_BASENAME}/build-bpf 28 | ../configure --target=bpf-unknown-none --prefix=$INSTALLDIR 29 | make -j$(nproc) && make install 30 | touch ${INSTALLDIR}/${BINUTILS_BASENAME}.built 31 | cd - 32 | fi 33 | 34 | foldable end build_binutils 35 | 36 | foldable start build_gcc "Building $GCC_BASENAME" 37 | 38 | if [ ! -f "${INSTALLDIR}/${GCC_BASENAME}.built" ]; then 39 | tar xJf $GCC_TARBALL 40 | cd ${GCC_BASENAME} 41 | ./contrib/download_prerequisites 42 | cd - 43 | mkdir -p ${GCC_BASENAME}/build-bpf 44 | cd ${GCC_BASENAME}/build-bpf 45 | ../configure --target=bpf-unknown-none --prefix=$INSTALLDIR 46 | make -j$(nproc) && make install 47 | touch ${INSTALLDIR}/${GCC_BASENAME}.built 48 | cd - 49 | fi 50 | 51 | foldable end build_gcc 52 | 53 | exit 0 54 | -------------------------------------------------------------------------------- /build-bpf-gcc/latest-snapshots.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | BINUTILS_TARBALL=$(wget https://snapshots.sourceware.org/binutils/trunk/latest/src/sha512.sum -O - -o /dev/null | awk '{print $2}') 5 | GCC_TARBALL=$(wget https://gcc.gnu.org/pub/gcc/snapshots/LATEST-15 -O - -o /dev/null | grep -E 'gcc-15-[0-9]+.tar.xz' | sed -e 's/.*\(gcc-15-[^<]*\).*/\1/') 6 | 7 | BINUTILS_URL="https://snapshots.sourceware.org/binutils/trunk/latest/src/$BINUTILS_TARBALL" 8 | GCC_URL="https://gcc.gnu.org/pub/gcc/snapshots/LATEST-15/$GCC_TARBALL" 9 | 10 | BINUTILS_BASENAME=$(basename $BINUTILS_TARBALL .tar.xz) 11 | GCC_BASENAME=$(basename $GCC_TARBALL .tar.xz) 12 | 13 | cat > ${GITHUB_ACTION_PATH}/.env <> "$GITHUB_OUTPUT" 23 | echo "GCC_BASENAME=${GCC_BASENAME}" >> "$GITHUB_OUTPUT" 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /build-linux/action.yml: -------------------------------------------------------------------------------- 1 | name: 'build linux' 2 | description: 'Build Linux' 3 | inputs: 4 | arch: 5 | description: 'arch' 6 | required: true 7 | toolchain: 8 | description: 'what toolchain to use' 9 | default: 'gcc' 10 | kbuild-output: 11 | description: 'relative or absolute path to use for storing build artifacts' 12 | required: true 13 | max-make-jobs: 14 | description: 'Maximum number of jobs to use when running make (e.g argument to -j). Default: 4*nproc' 15 | default: '' 16 | llvm-version: 17 | description: 'llvm version' 18 | required: false 19 | default: '16' 20 | runs: 21 | using: "composite" 22 | steps: 23 | - name: build linux 24 | shell: bash 25 | run: | 26 | kbuild_output="$(realpath ${{ inputs.kbuild-output }})" 27 | export LLVM_VERSION=${{ inputs.llvm-version }} 28 | ${GITHUB_ACTION_PATH}/build.sh "${{ inputs.arch }}" "${{ inputs.toolchain }}" "${kbuild_output}" 29 | env: 30 | MAX_MAKE_JOBS: ${{ inputs.max-make-jobs }} 31 | -------------------------------------------------------------------------------- /build-linux/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | THISDIR="$(cd $(dirname $0) && pwd)" 6 | 7 | source "${THISDIR}"/../helpers.sh 8 | 9 | TARGET_ARCH="$1" 10 | TOOLCHAIN="$2" 11 | export KBUILD_OUTPUT="$3" 12 | 13 | ARCH="$(platform_to_kernel_arch "${TARGET_ARCH}")" 14 | CROSS_COMPILE="" 15 | 16 | if [[ "${TARGET_ARCH}" != "$(uname -m)" ]] 17 | then 18 | CROSS_COMPILE="${TARGET_ARCH}-linux-gnu-" 19 | fi 20 | 21 | if [ $TOOLCHAIN = "llvm" ]; then 22 | export LLVM="-$LLVM_VERSION" 23 | TOOLCHAIN="llvm-$LLVM_VERSION" 24 | fi 25 | 26 | foldable start build_kernel "Building kernel with $TOOLCHAIN" 27 | 28 | # $1 - path to config file to create/overwrite 29 | cat_kernel_config() { 30 | cat ${GITHUB_WORKSPACE}/tools/testing/selftests/bpf/config \ 31 | ${GITHUB_WORKSPACE}/tools/testing/selftests/bpf/config.vm \ 32 | ${GITHUB_WORKSPACE}/tools/testing/selftests/bpf/config.${TARGET_ARCH} \ 33 | ${GITHUB_WORKSPACE}/tools/testing/selftests/sched_ext/config \ 34 | ${GITHUB_WORKSPACE}/ci/vmtest/configs/config \ 35 | ${GITHUB_WORKSPACE}/ci/vmtest/configs/config.${TARGET_ARCH} 2> /dev/null > "${1}" 36 | } 37 | 38 | mkdir -p "${KBUILD_OUTPUT}" 39 | if [ -f "${KBUILD_OUTPUT}"/.config ]; then 40 | kbuild_tmp="$(mktemp -d)" 41 | cat_kernel_config ${kbuild_tmp}/.config && : 42 | 43 | # Generate a fully blown config to determine whether anything changed. 44 | KBUILD_OUTPUT="${kbuild_tmp}" make ARCH="${ARCH}" CROSS_COMPILE="${CROSS_COMPILE}" olddefconfig 45 | 46 | if diff -q "${kbuild_tmp}"/.config "${KBUILD_OUTPUT}"/.config > /dev/null; then 47 | echo "Existing kernel configuration is up-to-date" 48 | else 49 | echo "Using updated kernel configuration; diff:" 50 | diff -u "${KBUILD_OUTPUT}"/.config "${kbuild_tmp}"/.config && : 51 | 52 | mv "${kbuild_tmp}"/.config "${KBUILD_OUTPUT}"/.config 53 | fi 54 | rm -rf "${kbuild_tmp}" 55 | else 56 | cat_kernel_config "${KBUILD_OUTPUT}"/.config && : 57 | fi 58 | 59 | make ARCH="${ARCH}" CROSS_COMPILE="${CROSS_COMPILE}" olddefconfig 60 | make ARCH="${ARCH}" CROSS_COMPILE="${CROSS_COMPILE}" -j $(kernel_build_make_jobs) all || ( 61 | echo "Build failed; falling back to full rebuild" 62 | make clean; make ARCH="${ARCH}" CROSS_COMPILE="${CROSS_COMPILE}" -j $(kernel_build_make_jobs) all 63 | ) 64 | 65 | foldable end build_kernel 66 | -------------------------------------------------------------------------------- /build-samples/action.yml: -------------------------------------------------------------------------------- 1 | name: 'build samples/bpf' 2 | description: 'Build samples/bpf' 3 | inputs: 4 | kernel: 5 | description: 'kernel version' 6 | default: 'LATEST' 7 | toolchain: 8 | description: 'what toolchain to use' 9 | default: 'gcc' 10 | kbuild-output: 11 | description: 'relative or absolute path to use for storing build artifacts' 12 | required: true 13 | max-make-jobs: 14 | description: 'Maximum number of jobs to use when running make (e.g argument to -j). Default: 4*nproc' 15 | default: '' 16 | llvm-version: 17 | description: 'llvm version' 18 | required: false 19 | default: '16' 20 | arch: 21 | description: 'arch' 22 | required: true 23 | 24 | runs: 25 | using: "composite" 26 | steps: 27 | - name: build samples/bpf 28 | shell: bash 29 | run: | 30 | kbuild_output="$(realpath ${{ inputs.kbuild-output }})" 31 | export LLVM_VERSION=${{ inputs.llvm-version }} 32 | ${GITHUB_ACTION_PATH}/build_samples.sh "${{ inputs.arch }}" "${{ inputs.kernel }}" "${{ inputs.toolchain }}" "${kbuild_output}" 33 | env: 34 | MAX_MAKE_JOBS: ${{ inputs.max-make-jobs }} 35 | -------------------------------------------------------------------------------- /build-samples/build_samples.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | THISDIR="$(cd $(dirname $0) && pwd)" 6 | 7 | source "${THISDIR}"/../helpers.sh 8 | 9 | TARGET_ARCH="$1" 10 | KERNEL="$2" 11 | TOOLCHAIN="$3" 12 | export KBUILD_OUTPUT="$4" 13 | 14 | ARCH="$(platform_to_kernel_arch ${TARGET_ARCH})" 15 | CROSS_COMPILE="" 16 | 17 | if [[ "${TARGET_ARCH}" != "$(uname -m)" ]] 18 | then 19 | CROSS_COMPILE="${TARGET_ARCH}-linux-gnu-" 20 | fi 21 | 22 | if [ $TOOLCHAIN = "llvm" ]; then 23 | export LLVM="-$LLVM_VERSION" 24 | TOOLCHAIN="llvm-$LLVM_VERSION" 25 | fi 26 | 27 | foldable start build_samples "Building samples with $TOOLCHAIN" 28 | 29 | if [[ "${KERNEL}" = 'LATEST' ]]; then 30 | VMLINUX_H= 31 | else 32 | VMLINUX_H=${THISDIR}/vmlinux.h 33 | fi 34 | 35 | make headers_install 36 | make \ 37 | ARCH="${ARCH}" \ 38 | CROSS_COMPILE="${CROSS_COMPILE}" \ 39 | CLANG=clang-${LLVM_VERSION} \ 40 | OPT=opt-${LLVM_VERSION} \ 41 | LLC=llc-${LLVM_VERSION} \ 42 | LLVM_DIS=llvm-dis-${LLVM_VERSION} \ 43 | LLVM_OBJCOPY=llvm-objcopy-${LLVM_VERSION} \ 44 | LLVM_READELF=llvm-readelf-${LLVM_VERSION} \ 45 | LLVM_STRIP=llvm-strip-${LLVM_VERSION} \ 46 | VMLINUX_BTF="${KBUILD_OUTPUT}/vmlinux" \ 47 | VMLINUX_H="${VMLINUX_H}" \ 48 | -C "${REPO_ROOT}/${REPO_PATH}/samples/bpf" \ 49 | -j $(kernel_build_make_jobs) 50 | 51 | foldable end build_samples 52 | -------------------------------------------------------------------------------- /build-scx-selftests/README.md: -------------------------------------------------------------------------------- 1 | # Build selftests/sched_ext 2 | 3 | This action builds selftests/sched_ext given a kernel build 4 | output. Kernel build configuration is supposed to include necessary 5 | flags (i.e. `tools/testing/selftests/sched_ext/config`). 6 | 7 | The action is expected to be executed by a workflow with access to the 8 | Linux kernel repository. 9 | 10 | ## Required inputs 11 | 12 | * `kbuild-output` - Path to the kernel build output. 13 | * `repo-root` - Path to the root of the Linux kernel repository. 14 | * `arch` - Kernel build architecture. 15 | * `toolchain` - Toolchain name: `gcc` (default) or `llvm`. 16 | 17 | ## Optional inputs 18 | * `llvm-version` - LLVM version, used when `toolchain` is `llvm`. Default: `16`. 19 | * `max-make-jobs` - Maximum number of jobs to use when running make (e.g argument to -j). Default: 4*nproc. 20 | 21 | -------------------------------------------------------------------------------- /build-scx-selftests/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Build selftests/sched_ext' 2 | inputs: 3 | kbuild-output: 4 | description: 'Path to the kernel build output' 5 | required: true 6 | repo-root: 7 | description: "Path to the root of the kernel repository" 8 | required: true 9 | arch: 10 | description: 'arch' 11 | required: true 12 | toolchain: 13 | description: 'gcc or llvm' 14 | default: 'gcc' 15 | required: true 16 | llvm-version: 17 | description: 'llvm version' 18 | required: false 19 | default: '16' 20 | max-make-jobs: 21 | description: 'Maximum number of jobs to use when running make (e.g argument to -j). Default: 4*nproc' 22 | default: '' 23 | 24 | runs: 25 | using: "composite" 26 | steps: 27 | - name: Build selftests/sched_ext 28 | env: 29 | KBUILD_OUTPUT: ${{ inputs.kbuild-output }} 30 | MAX_MAKE_JOBS: ${{ inputs.max-make-jobs }} 31 | REPO_ROOT: ${{ inputs.repo-root || github.workspace }} 32 | shell: bash 33 | run: 34 | ${GITHUB_ACTION_PATH}/build.sh "${{ inputs.arch }}" "${{ inputs.toolchain }}" "${{ inputs.llvm-version }}" 35 | -------------------------------------------------------------------------------- /build-scx-selftests/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | source "${GITHUB_ACTION_PATH}/../helpers.sh" 6 | 7 | TARGET_ARCH=$1 8 | TOOLCHAIN=$2 9 | LLVM_VERSION=$3 10 | 11 | ARCH="$(platform_to_kernel_arch ${TARGET_ARCH})" 12 | CROSS_COMPILE="" 13 | 14 | if [[ "${TARGET_ARCH}" != "$(uname -m)" ]] 15 | then 16 | CROSS_COMPILE="${TARGET_ARCH}-linux-gnu-" 17 | fi 18 | 19 | if [[ $TOOLCHAIN = "llvm" ]]; then 20 | export LLVM="-$LLVM_VERSION" 21 | TOOLCHAIN="llvm-$LLVM_VERSION" 22 | fi 23 | 24 | foldable start build_selftests "Building selftests/sched_ext with $TOOLCHAIN" 25 | 26 | MAKE_OPTS=$(cat < /dev/null; then 96 | # This URL contains a mapping from file names to URLs where 97 | # those files can be downloaded. 98 | declare -gA URLS 99 | while IFS=$'\t' read -r name url; do 100 | URLS["$name"]="$url" 101 | done < <(cat "${GITHUB_ACTION_PATH}/../INDEX") 102 | fi 103 | } 104 | 105 | matching_kernel_releases() { 106 | local pattern="$1" 107 | { 108 | for file in "${!URLS[@]}"; do 109 | if [[ $file =~ ^vmlinux-(.*).zst$ ]]; then 110 | release="${BASH_REMATCH[1]}" 111 | case "$release" in 112 | $pattern) 113 | # sort -V handles rc versions properly 114 | # if we use "~" instead of "-". 115 | echo "${release//-rc/~rc}" 116 | ;; 117 | esac 118 | fi 119 | done 120 | } | sort -rV | sed 's/~rc/-rc/g' 121 | } 122 | 123 | download() { 124 | local file="$1" 125 | cache_urls 126 | if [[ ! -v URLS[$file] ]]; then 127 | echo "$file not found" >&2 128 | return 1 129 | fi 130 | echo "Downloading $file..." >&2 131 | curl -Lf "${URLS[$file]}" "${@:2}" 132 | } 133 | 134 | if (( LIST )); then 135 | cache_urls 136 | matching_kernel_releases "$KERNELRELEASE" 137 | exit 0 138 | fi 139 | 140 | # Only go to the network if it's actually a glob pattern. 141 | if [[ -v BUILDDIR ]]; then 142 | KERNELRELEASE="$(make -C "$BUILDDIR" -s kernelrelease)" 143 | elif [[ ! $KERNELRELEASE =~ ^([^\\*?[]|\\[*?[])*\\?$ ]]; then 144 | # We need to cache the list of URLs outside of the command 145 | # substitution, which happens in a subshell. 146 | cache_urls 147 | KERNELRELEASE="$(matching_kernel_releases "$KERNELRELEASE" | head -1)" 148 | if [[ -z $KERNELRELEASE ]]; then 149 | echo "No matching kernel release found" >&2 150 | exit 1 151 | fi 152 | fi 153 | 154 | echo "Kernel release: $KERNELRELEASE" >&2 155 | echo 156 | 157 | foldable start vmlinux_setup "Preparing Linux image" 158 | 159 | tmp= 160 | ARCH_DIR="$DIR/x86_64" 161 | mkdir -p "$ARCH_DIR" 162 | mnt="$(mktemp -d -p "$DIR" mnt.XXXXXXXXXX)" 163 | 164 | cleanup() { 165 | if [[ -n $tmp ]]; then 166 | rm -f "$tmp" || true 167 | fi 168 | if mountpoint -q "$mnt"; then 169 | sudo umount "$mnt" || true 170 | fi 171 | if [[ -d "$mnt" ]]; then 172 | rmdir "$mnt" || true 173 | fi 174 | } 175 | trap cleanup EXIT 176 | 177 | if [[ -v BUILDDIR ]]; then 178 | vmlinuz="$BUILDDIR/$(make -C "$BUILDDIR" -s image_name)" 179 | else 180 | vmlinuz="${ARCH_DIR}/vmlinuz-${KERNELRELEASE}" 181 | if [[ ! -e $vmlinuz ]]; then 182 | tmp="$(mktemp "$vmlinuz.XXX.part")" 183 | download "${ARCH}/vmlinuz-${KERNELRELEASE}" -o "$tmp" 184 | mv "$tmp" "$vmlinuz" 185 | tmp= 186 | fi 187 | fi 188 | 189 | # Install vmlinux. 190 | vmlinux="${GITHUB_WORKSPACE}/vmlinux" 191 | download "${ARCH}/vmlinux-${KERNELRELEASE}.zst" | zstd -d > "$vmlinux" 192 | 193 | echo $vmlinux 194 | 195 | foldable end vmlinux_setup 196 | -------------------------------------------------------------------------------- /get-linux-source/action.yml: -------------------------------------------------------------------------------- 1 | name: 'get-linux-source' 2 | description: 'Download linux source' 3 | inputs: 4 | repo: 5 | description: 'source repo' 6 | default: 'https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git' 7 | required: false 8 | rev: 9 | description: 'source rev' 10 | default: 'master' 11 | required: true 12 | dest: 13 | description: 'destination directory' 14 | required: true 15 | runs: 16 | using: "composite" 17 | steps: 18 | - name: Get bpf-next source 19 | shell: bash 20 | run: | 21 | export KERNEL_ORIGIN='${{ inputs.repo }}' 22 | export KERNEL_BRANCH='${{ inputs.rev }}' 23 | export REPO_PATH='${{ inputs.dest }}' 24 | ${{ github.action_path }}/checkout_latest_kernel.sh 25 | -------------------------------------------------------------------------------- /get-linux-source/checkout_latest_kernel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xeuo pipefail 4 | 5 | FETCH_DEPTH=${FETCH_DEPTH:-1} 6 | 7 | echo KERNEL_ORIGIN = ${KERNEL_ORIGIN} 8 | echo KERNEL_BRANCH = ${KERNEL_BRANCH} 9 | echo REPO_PATH = ${REPO_PATH} 10 | 11 | if [ -d "${REPO_PATH}" ]; then 12 | echo "${REPO_PATH} directory already exists, will not download kernel sources" 13 | exit 1 14 | fi 15 | 16 | mkdir -p "${REPO_PATH}" 17 | cd "${REPO_PATH}" 18 | 19 | git init 20 | git remote add origin ${KERNEL_ORIGIN} 21 | git fetch --depth=${FETCH_DEPTH} origin ${KERNEL_BRANCH} 22 | git checkout FETCH_HEAD 23 | -------------------------------------------------------------------------------- /get-llvm-version/action.yml: -------------------------------------------------------------------------------- 1 | name: 'get-llvm-version' 2 | description: 'Retrieve the LLVM version to use' 3 | outputs: 4 | version: 5 | description: "LLVM version" 6 | value: ${{ steps.llvm-version.outputs.version }} 7 | runs: 8 | using: "composite" 9 | steps: 10 | - id: llvm-version 11 | shell: bash 12 | run: echo "version=$(source $GITHUB_ACTION_PATH/../helpers.sh; llvm_default_version)" >> $GITHUB_OUTPUT 13 | -------------------------------------------------------------------------------- /helpers.sh: -------------------------------------------------------------------------------- 1 | # shellcheck shell=bash 2 | 3 | # $1 - start or end 4 | # $2 - fold identifier, no spaces 5 | # $3 - fold section description 6 | foldable() { 7 | local YELLOW='\033[1;33m' 8 | local NOCOLOR='\033[0m' 9 | if [ $1 = "start" ]; then 10 | line="::group::$2" 11 | if [ ! -z "${3:-}" ]; then 12 | line="$line - ${YELLOW}$3${NOCOLOR}" 13 | fi 14 | else 15 | line="::endgroup::" 16 | fi 17 | echo -e "$line" 18 | } 19 | 20 | __print() { 21 | local TITLE="" 22 | if [[ -n $2 ]]; then 23 | TITLE=" title=$2" 24 | fi 25 | echo "::$1${TITLE}::$3" 26 | } 27 | 28 | # $1 - title 29 | # $2 - message 30 | print_error() { 31 | __print error $1 $2 32 | } 33 | 34 | # $1 - title 35 | # $2 - message 36 | print_notice() { 37 | __print notice $1 $2 38 | } 39 | 40 | # No arguments 41 | llvm_default_version() { 42 | echo "17" 43 | } 44 | 45 | # No arguments 46 | llvm_latest_version() { 47 | echo "19" 48 | } 49 | 50 | # No arguments 51 | kernel_build_make_jobs() { 52 | # returns the number of processes to use when building kernel/selftests/samples 53 | # default to 4*nproc if MAX_MAKE_JOBS is not defined 54 | smp=$((4*$(nproc))) 55 | MAX_MAKE_JOBS=${MAX_MAKE_JOBS:-$smp} 56 | echo $(( smp > MAX_MAKE_JOBS ? MAX_MAKE_JOBS : smp )) 57 | } 58 | 59 | distrib_codename() { 60 | DISTRIB_CODENAME="noble" 61 | test -f /etc/lsb-release && . /etc/lsb-release 62 | echo "${DISTRIB_CODENAME}" 63 | } 64 | 65 | # Convert a platform (as returned by uname -m) to the kernel 66 | # arch (as expected by ARCH= env). 67 | platform_to_kernel_arch() { 68 | case $1 in 69 | s390x) 70 | echo "s390" 71 | ;; 72 | aarch64) 73 | echo "arm64" 74 | ;; 75 | riscv64) 76 | echo "riscv" 77 | ;; 78 | x86_64) 79 | echo "x86" 80 | ;; 81 | *) 82 | echo "$1" 83 | ;; 84 | esac 85 | } 86 | 87 | # Convert a platform (as returned by uname -m) to its debian equivalent. 88 | platform_to_deb_arch() { 89 | case $1 in 90 | aarch64) 91 | echo "arm64" 92 | ;; 93 | x86_64) 94 | echo "amd64" 95 | ;; 96 | *) 97 | echo "$1" 98 | ;; 99 | esac 100 | } 101 | -------------------------------------------------------------------------------- /patch-kernel/action.yml: -------------------------------------------------------------------------------- 1 | name: 'patch kernel tree' 2 | description: 'patch kernel' 3 | inputs: 4 | patches-root: 5 | description: "Directory to find patches" 6 | required: true 7 | default: ./ci/diffs 8 | repo-root: 9 | description: "Directory to apply patch to" 10 | required: true 11 | default: ${{ github.workspace }} 12 | runs: 13 | using: "composite" 14 | steps: 15 | - name: apply temporary patches 16 | shell: bash 17 | run: cd ${{ inputs.repo-root }} && ${GITHUB_ACTION_PATH}/patch_kernel.sh "${{ inputs.patches-root }}" 18 | -------------------------------------------------------------------------------- /patch-kernel/patch_kernel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | DIFF_DIR=$1 5 | 6 | for ext in diff patch; do 7 | if ls ${DIFF_DIR}/*.${ext} 1>/dev/null 2>&1; then 8 | for file in ${DIFF_DIR}/*.${ext}; do 9 | if patch --dry-run -N --silent -p1 -s < "${file}" 2>/dev/null; then 10 | patch -s -p1 < "${file}" 2>/dev/null 11 | echo "Successfully applied ${file}!" 12 | else 13 | echo "Failed to apply ${file}, skipping!" 14 | fi 15 | done 16 | fi 17 | done 18 | -------------------------------------------------------------------------------- /prepare-incremental-build/README.md: -------------------------------------------------------------------------------- 1 | [![Action test](https://github.com/libbpf/ci/actions/workflows/test-prepare-incremental-build-action.yml/badge.svg)](https://github.com/libbpf/ci/actions/workflows/test-prepare-incremental-build-action.yml) 2 | 3 | # Prepare incremental build 4 | 5 | This action uses [actions/cache](https://github.com/actions/cache) in combination with custom scripts to save kernel build output from previous workflow runs to faciliate incremental builds. 6 | 7 | ## Required inputs 8 | 9 | * `repo-root` - Path to the root of the Linux kernel repository. 10 | * `base-branch` - Branch of the kernel repository. This is used to find the commit hash for cache lookup. 11 | * `arch` - Kernel build architecture. Part of the cache key. 12 | * `toolchain_full` - Toolchain name, such as `llvm-17`. Part of the cache key. 13 | * `kbuild-output` - Path to the directory where the kernel build output is saved or restored to. This is passed as `path` to [actions/cache](https://github.com/actions/cache?tab=readme-ov-file#inputs). 14 | 15 | ## Optional inputs 16 | * `cache-key-prefix` - Prefix for the cache key. Defaults to `kbuild-output`. -------------------------------------------------------------------------------- /prepare-incremental-build/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Prepare incremental kernel build' 2 | description: 'Pull cached kernel build output from previous runs and prepare the repo for incremental build' 3 | inputs: 4 | repo-root: 5 | description: "Path to the root of the kernel repository" 6 | required: true 7 | base-branch: 8 | description: "Branch for cache lookup" 9 | required: true 10 | arch: 11 | required: true 12 | type: string 13 | description: "Part of cache lookup key" 14 | toolchain_full: 15 | required: true 16 | type: string 17 | description: "Part of cache lookup key" 18 | kbuild-output: 19 | required: true 20 | type: string 21 | description: "Path to KBUILD_OUTPUT" 22 | cache-key-prefix: 23 | required: false 24 | type: string 25 | default: "kbuild-output" 26 | 27 | runs: 28 | using: "composite" 29 | steps: 30 | 31 | - name: Get commit meta-data for cache lookup 32 | id: get-commit-metadata 33 | working-directory: ${{ inputs.repo-root }} 34 | shell: bash 35 | run: ${GITHUB_ACTION_PATH}/get-commit-metadata.sh ${{ inputs.base-branch }} 36 | 37 | - name: Pull recent KBUILD_OUTPUT contents 38 | uses: actions/cache@v4 39 | with: 40 | path: ${{ inputs.kbuild-output }} 41 | key: ${{ inputs.cache-key-prefix }}-${{ inputs.arch }}-${{ inputs.toolchain_full }}-${{ steps.get-commit-metadata.outputs.branch }}-${{ steps.get-commit-metadata.outputs.timestamp }}-${{ steps.get-commit-metadata.outputs.commit }} 42 | restore-keys: | 43 | ${{ inputs.cache-key-prefix }}-${{ inputs.arch }}-${{ inputs.toolchain_full }}-${{ steps.get-commit-metadata.outputs.branch }}-${{ steps.get-commit-metadata.outputs.timestamp }}- 44 | ${{ inputs.cache-key-prefix }}-${{ inputs.arch }}-${{ inputs.toolchain_full }}-${{ steps.get-commit-metadata.outputs.branch }}- 45 | ${{ inputs.cache-key-prefix }}-${{ inputs.arch }}-${{ inputs.toolchain_full }}- 46 | 47 | - name: Prepare incremental build 48 | working-directory: ${{ inputs.repo-root }} 49 | env: 50 | KBUILD_OUTPUT: ${{ inputs.kbuild-output }} 51 | shell: bash 52 | run: ${GITHUB_ACTION_PATH}/prepare-incremental-builds.sh ${{ steps.get-commit-metadata.outputs.commit }} || echo "Prepare incremental build script failed! Continue anyway..." 53 | 54 | -------------------------------------------------------------------------------- /prepare-incremental-build/get-commit-metadata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | branch=$1 6 | 7 | echo "branch=${branch}" >> "${GITHUB_OUTPUT}" 8 | 9 | git fetch --quiet --prune --no-tags --depth=1 --no-recurse-submodules \ 10 | origin "+refs/heads/${branch}:refs/remotes/origin/${branch}" 11 | commit=$(git rev-parse "origin/${branch}") 12 | 13 | timestamp_utc="$(TZ=utc git show --format='%cd' --no-patch --date=iso-strict-local "${commit}")" 14 | 15 | echo "timestamp=${timestamp_utc}" >> "${GITHUB_OUTPUT}" 16 | echo "commit=${commit}" >> "${GITHUB_OUTPUT}" 17 | echo "Most recent ${branch} commit is ${commit}" 18 | 19 | -------------------------------------------------------------------------------- /prepare-incremental-build/prepare-incremental-builds.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | commit_id="${1}" 6 | 7 | # $1 - the SHA-1 to fetch and check out 8 | fetch_and_checkout() { 9 | local build_base_sha 10 | 11 | build_base_sha="${1}" 12 | # If cached artifacts became stale for one reason or another, we 13 | # may not have the build base SHA available. Fetch it and retry. 14 | git fetch origin "${build_base_sha}" && git checkout --quiet "${build_base_sha}" 15 | } 16 | 17 | # $1 - value of KBUILD_OUTPUT 18 | clear_cache_artifacts() { 19 | local output_dir 20 | 21 | output_dir="${1}" 22 | echo "Unable to find earlier upstream ref. Discarding KBUILD_OUTPUT contents..." 23 | rm --recursive --force "${output_dir}" 24 | mkdir "${output_dir}" 25 | false 26 | } 27 | 28 | # $1 - value of KBUILD_OUTPUT 29 | # $2 - current time in ISO 8601 format 30 | restore_source_code_times() { 31 | local build_output 32 | local current_time 33 | local src_time 34 | local obj_time 35 | 36 | build_output="${1}" 37 | current_time="${2}" 38 | src_time="$(date --iso-8601=ns --date="${current_time} - 2 minutes")" 39 | obj_time="$(date --iso-8601=ns --date="${current_time} - 1 minute")" 40 | 41 | git ls-files | xargs --max-args=10000 touch -m --no-create --date="${src_time}" 42 | find "${build_output}" -type f | xargs --max-args=10000 touch -m --no-create --date="${obj_time}" 43 | git checkout --quiet - 44 | echo "Adjusted src and obj time stamps relative to system time" 45 | } 46 | 47 | mkdir --parents "${KBUILD_OUTPUT}" 48 | current_time="$(date --iso-8601=ns)" 49 | 50 | if [ -f "${KBUILD_OUTPUT}/.build-base-sha" ]; then 51 | build_base_sha="$(cat "${KBUILD_OUTPUT}/.build-base-sha")" 52 | echo "Setting up base build state for ${build_base_sha}" 53 | 54 | ( 55 | git checkout --quiet "${build_base_sha}" \ 56 | || fetch_and_checkout "${build_base_sha}" \ 57 | || clear_cache_artifacts "${KBUILD_OUTPUT}" 58 | ) && restore_source_code_times "${KBUILD_OUTPUT}" "${current_time}" 59 | else 60 | echo "No previous build data found" 61 | fi 62 | 63 | echo -n "${commit_id}" > "${KBUILD_OUTPUT}/.build-base-sha" 64 | -------------------------------------------------------------------------------- /prepare-rootfs/action.yml: -------------------------------------------------------------------------------- 1 | name: 'prepare-rootfs' 2 | description: 'build rootfs 3 | 1. download base root img to /tmp/root.img (or other path, if provided) 4 | 2. either copy "/vmlinux" or download "vmlinux" to "/boot" in image 5 | 3. (FIXME) either copy "/vmlinuz" from build dir or download "vmlinuz" to workspace root 6 | 4. copy hardcoded "selftests" and "ci/vmtest" directory to "/$PROJECT_NAME" in root image 7 | ' 8 | inputs: 9 | project-name: 10 | description: 'project name' 11 | required: true 12 | arch: 13 | description: 'arch' 14 | required: true 15 | kernel: 16 | description: 'kernel version; use LATEST to use the most recent version' 17 | required: true 18 | kernel-root: 19 | description: 'kernel source dir' 20 | required: true 21 | kbuild-output: 22 | description: 'relative or absolute path to use for storing build artifacts' 23 | required: true 24 | image-output: 25 | description: 'path where to store the generated image' 26 | required: true 27 | test: 28 | description: 'the name of the test to run' 29 | default: '' 30 | required: false 31 | runs: 32 | using: "composite" 33 | steps: 34 | - run: | 35 | export REPO_ROOT="${{ github.workspace }}" 36 | export KERNEL="${{ inputs.kernel }}" 37 | export KERNEL_ROOT="${{ inputs.kernel-root }}" 38 | kbuild_output="$(realpath ${{ inputs.kbuild-output }})" 39 | $GITHUB_ACTION_PATH/run_vmtest.sh "${kbuild_output}" ${{ inputs.image-output }} ${{ inputs.test }} 40 | shell: bash 41 | env: 42 | PROJECT_NAME: ${{ inputs.project-name }} 43 | TARGET_ARCH: ${{ inputs.arch }} 44 | -------------------------------------------------------------------------------- /prepare-rootfs/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | trap 'exit 2' ERR 5 | 6 | source $(cd $(dirname $0) && pwd)/../helpers.sh 7 | 8 | usage () { 9 | USAGE_STRING="usage: $0 [-k KERNELRELEASE|-b DIR] [--source DIR] [[-r ROOTFSVERSION] [-fo]|-I] [-Si] [-d DIR] IMG 10 | $0 [-k KERNELRELEASE] -l 11 | $0 -h 12 | 13 | Run "${PROJECT_NAME}" tests in a virtual machine. 14 | 15 | This exits with status 0 on success, 1 if the virtual machine ran successfully 16 | but tests failed, and 2 if we encountered a fatal error. 17 | 18 | This script uses sudo to work around a libguestfs bug. 19 | 20 | Arguments: 21 | IMG path of virtual machine disk image to create 22 | 23 | Versions: 24 | -k, --kernel=KERNELRELEASE 25 | kernel release to test. This is a glob pattern; the 26 | newest (sorted by version number) release that matches 27 | the pattern is used (default: newest available release) 28 | 29 | --source DIR path to the kernel source directory 30 | 31 | -b, --build DIR use the kernel built in the given directory. This option 32 | cannot be combined with -k 33 | 34 | -r, --rootfs=ROOTFSVERSION 35 | version of root filesystem to use (default: newest 36 | available version) 37 | 38 | Setup: 39 | -f, --force overwrite IMG if it already exists 40 | 41 | -o, --one-shot one-shot mode. By default, this script saves a clean copy 42 | of the downloaded root filesystem image and vmlinux and 43 | makes a copy (reflinked, when possible) for executing the 44 | virtual machine. This allows subsequent runs to skip 45 | downloading these files. If this option is given, the 46 | root filesystem image and vmlinux are always 47 | re-downloaded and are not saved. This option implies -f 48 | 49 | -s, --setup-cmd setup commands run on VM boot. Whitespace characters 50 | should be escaped with preceding '\'. 51 | 52 | -I, --skip-image skip creating the disk image; use the existing one at 53 | IMG. This option cannot be combined with -r, -f, or -o 54 | 55 | -S, --skip-source skip copying the source files and init scripts 56 | 57 | Miscellaneous: 58 | -i, --interactive interactive mode. Boot the virtual machine into an 59 | interactive shell instead of automatically running tests 60 | 61 | -d, --dir=DIR working directory to use for downloading and caching 62 | files (default: current working directory) 63 | 64 | -l, --list list available kernel releases instead of running tests. 65 | The list may be filtered with -k 66 | 67 | -h, --help display this help message and exit" 68 | 69 | case "$1" in 70 | out) 71 | echo "$USAGE_STRING" 72 | exit 0 73 | ;; 74 | err) 75 | echo "$USAGE_STRING" >&2 76 | exit 2 77 | ;; 78 | esac 79 | } 80 | 81 | TEMP=$(getopt -o 'k:b:r:fos:ISid:lh' --long 'kernel:,source:,build:,rootfs:,force,one-shot,setup-cmd,skip-image,skip-source:,interactive,dir:,list,help' -n "$0" -- "$@") 82 | eval set -- "$TEMP" 83 | unset TEMP 84 | 85 | unset KERNELRELEASE 86 | unset KERNELSRC 87 | unset BUILDDIR 88 | unset ROOTFSVERSION 89 | unset IMG 90 | unset SETUPCMD 91 | FORCE=0 92 | ONESHOT=0 93 | SKIPIMG=0 94 | SKIPSOURCE=0 95 | APPEND="" 96 | DIR="$PWD" 97 | LIST=0 98 | 99 | # by default will copy all files that aren't listed in git exclusions 100 | # but it doesn't work for entire kernel tree very well 101 | # so for full kernel tree you may need to SOURCE_FULLCOPY=0 102 | SOURCE_FULLCOPY=${SOURCE_FULLCOPY:-0} 103 | 104 | while true; do 105 | case "$1" in 106 | -k|--kernel) 107 | KERNELRELEASE="$2" 108 | shift 2 109 | ;; 110 | --source) 111 | KERNELSRC="$2" 112 | shift 2 113 | ;; 114 | -b|--build) 115 | BUILDDIR="$2" 116 | shift 2 117 | ;; 118 | -r|--rootfs) 119 | ROOTFSVERSION="$2" 120 | shift 2 121 | ;; 122 | -f|--force) 123 | FORCE=1 124 | shift 125 | ;; 126 | -o|--one-shot) 127 | ONESHOT=1 128 | FORCE=1 129 | shift 130 | ;; 131 | -s|--setup-cmd) 132 | SETUPCMD="$2" 133 | shift 2 134 | ;; 135 | -I|--skip-image) 136 | SKIPIMG=1 137 | shift 138 | ;; 139 | -S|--skip-source) 140 | SKIPSOURCE=1 141 | shift 142 | ;; 143 | -i|--interactive) 144 | APPEND=" single" 145 | shift 146 | ;; 147 | -d|--dir) 148 | DIR="$2" 149 | shift 2 150 | ;; 151 | -l|--list) 152 | LIST=1 153 | ;; 154 | -h|--help) 155 | usage out 156 | ;; 157 | --) 158 | shift 159 | break 160 | ;; 161 | *) 162 | usage err 163 | ;; 164 | esac 165 | done 166 | if [[ -v BUILDDIR ]]; then 167 | if [[ -v KERNELRELEASE ]]; then 168 | usage err 169 | fi 170 | elif [[ ! -v KERNELRELEASE ]]; then 171 | KERNELRELEASE='*' 172 | fi 173 | if [[ $SKIPIMG -ne 0 && ( -v ROOTFSVERSION || $FORCE -ne 0 ) ]]; then 174 | usage err 175 | fi 176 | if (( LIST )); then 177 | if [[ $# -ne 0 || -v BUILDDIR || -v ROOTFSVERSION || $FORCE -ne 0 || 178 | $SKIPIMG -ne 0 || $SKIPSOURCE -ne 0 || -n $APPEND ]]; then 179 | usage err 180 | fi 181 | else 182 | if [[ $# -ne 1 ]]; then 183 | usage err 184 | fi 185 | IMG="${!OPTIND}" 186 | fi 187 | if [[ "${SOURCE_FULLCOPY}" == "1" ]]; then 188 | img_size=2G 189 | else 190 | img_size=8G 191 | fi 192 | 193 | unset URLS 194 | cache_urls() { 195 | if ! declare -p URLS &> /dev/null; then 196 | # This URL contains a mapping from file names to URLs where 197 | # those files can be downloaded. 198 | declare -gA URLS 199 | while IFS=$'\t' read -r name url; do 200 | URLS["$name"]="$url" 201 | done < <(cat "${GITHUB_ACTION_PATH}/../INDEX") 202 | fi 203 | } 204 | 205 | matching_kernel_releases() { 206 | local pattern="$1" 207 | { 208 | for file in "${!URLS[@]}"; do 209 | if [[ $file =~ ^${TARGET_ARCH}/vmlinux-(.*).zst$ ]]; then 210 | release="${BASH_REMATCH[1]}" 211 | case "$release" in 212 | $pattern) 213 | # sort -V handles rc versions properly 214 | # if we use "~" instead of "-". 215 | echo "${release//-rc/~rc}" 216 | ;; 217 | esac 218 | fi 219 | done 220 | } | sort -rV | sed 's/~rc/-rc/g' 221 | } 222 | 223 | newest_rootfs_version() { 224 | { 225 | for file in "${!URLS[@]}"; do 226 | if [[ $file =~ ^${TARGET_ARCH}/${PROJECT_NAME}-vmtest-rootfs-(.*)\.tar\.zst$ ]]; then 227 | echo "${BASH_REMATCH[1]}" 228 | fi 229 | done 230 | } | sort -rV | head -1 231 | } 232 | 233 | download() { 234 | local file="$1" 235 | cache_urls 236 | if [[ ! -v URLS[$file] ]]; then 237 | echo "$file not found" >&2 238 | return 1 239 | fi 240 | echo "Downloading $file..." >&2 241 | curl -Lf "${URLS[$file]}" "${@:2}" 242 | } 243 | 244 | set_nocow() { 245 | touch "$@" 246 | chattr +C "$@" >/dev/null 2>&1 || true 247 | } 248 | 249 | cp_img() { 250 | set_nocow "$2" 251 | cp --reflink=auto "$1" "$2" 252 | } 253 | 254 | create_rootfs_img() { 255 | local path="$1" 256 | set_nocow "$path" 257 | truncate -s "$img_size" "$path" 258 | mkfs.ext4 -q "$path" 259 | } 260 | 261 | download_rootfs() { 262 | local rootfsversion="$1" 263 | download "${TARGET_ARCH}/${PROJECT_NAME}-vmtest-rootfs-$rootfsversion.tar.zst" | 264 | zstd -d 265 | } 266 | 267 | tar_in() { 268 | local dst_path="$1" 269 | # guestfish --remote does not forward file descriptors, which prevents 270 | # us from using `tar-in -` or bash process substitution. We don't want 271 | # to copy all the data into a temporary file, so use a FIFO. 272 | tmp=$(mktemp -d) 273 | mkfifo "$tmp/fifo" 274 | cat >"$tmp/fifo" & 275 | local cat_pid=$! 276 | guestfish --remote tar-in "$tmp/fifo" "$dst_path" 277 | wait "$cat_pid" 278 | rm -r "$tmp" 279 | tmp= 280 | } 281 | 282 | if (( LIST )); then 283 | cache_urls 284 | matching_kernel_releases "$KERNELRELEASE" 285 | exit 0 286 | fi 287 | 288 | if [[ $FORCE -eq 0 && $SKIPIMG -eq 0 && -e $IMG ]]; then 289 | echo "$IMG already exists; use -f to overwrite it or -I to reuse it" >&2 290 | exit 1 291 | fi 292 | 293 | # Only go to the network if it's actually a glob pattern. 294 | if [[ -v BUILDDIR ]]; then 295 | KERNELRELEASE="$(KBUILD_OUTPUT="${BUILDDIR}" make -C "${KERNELSRC:-$BUILDDIR}" -s kernelrelease)" 296 | elif [[ ! $KERNELRELEASE =~ ^([^\\*?[]|\\[*?[])*\\?$ ]]; then 297 | # We need to cache the list of URLs outside of the command 298 | # substitution, which happens in a subshell. 299 | cache_urls 300 | KERNELRELEASE="$(matching_kernel_releases "$KERNELRELEASE" | head -1)" 301 | if [[ -z $KERNELRELEASE ]]; then 302 | echo "No matching kernel release found" >&2 303 | exit 1 304 | fi 305 | fi 306 | if [[ $SKIPIMG -eq 0 && ! -v ROOTFSVERSION ]]; then 307 | cache_urls 308 | ROOTFSVERSION="$(newest_rootfs_version)" 309 | fi 310 | 311 | foldable start vmlinux_setup "Preparing Linux image" 312 | 313 | echo "Kernel release: $KERNELRELEASE" >&2 314 | echo 315 | 316 | if (( SKIPIMG )); then 317 | echo "Not extracting root filesystem" >&2 318 | else 319 | echo "Root filesystem version: $ROOTFSVERSION" >&2 320 | fi 321 | echo "Disk image: $IMG" >&2 322 | 323 | tmp= 324 | ARCH_DIR="$DIR/${TARGET_ARCH}" 325 | mkdir -p "$ARCH_DIR" 326 | 327 | cleanup() { 328 | if [[ -n $tmp ]]; then 329 | rm -rf "$tmp" || true 330 | fi 331 | guestfish --remote exit 2>/dev/null || true 332 | } 333 | trap cleanup EXIT 334 | 335 | if [[ -v BUILDDIR ]]; then 336 | vmlinuz="$BUILDDIR/$(KBUILD_OUTPUT="${BUILDDIR}" make -C "${KERNELSRC:-$BUILDDIR}" -s image_name)" 337 | else 338 | vmlinuz="${ARCH_DIR}/vmlinuz-${KERNELRELEASE}" 339 | if [[ ! -e $vmlinuz ]]; then 340 | tmp="$(mktemp "$vmlinuz.XXX.part")" 341 | download "${TARGET_ARCH}/vmlinuz-${KERNELRELEASE}" -o "$tmp" 342 | mv "$tmp" "$vmlinuz" 343 | tmp= 344 | fi 345 | fi 346 | cp "$vmlinuz" "$GITHUB_WORKSPACE"/vmlinuz 347 | 348 | # Mount and set up the rootfs image. Use a persistent guestfish session in 349 | # order to avoid the startup overhead. 350 | # Work around https://bugs.launchpad.net/fuel/+bug/1467579. 351 | sudo chmod +r /boot/vmlinuz* || true 352 | eval "$(guestfish --listen)" 353 | if (( ONESHOT )); then 354 | rm -f "$IMG" 355 | create_rootfs_img "$IMG" 356 | guestfish --remote \ 357 | add "$IMG" label:img : \ 358 | launch : \ 359 | mount /dev/disk/guestfs/img / 360 | download_rootfs "$ROOTFSVERSION" | tar_in / 361 | else 362 | if (( ! SKIPIMG )); then 363 | rootfs_img="${ARCH_DIR}/${PROJECT_NAME}-vmtest-rootfs-${ROOTFSVERSION}.img" 364 | 365 | if [[ ! -e $rootfs_img ]]; then 366 | tmp="$(mktemp "$rootfs_img.XXX.part")" 367 | set_nocow "$tmp" 368 | truncate -s "$img_size" "$tmp" 369 | mkfs.ext4 -q "$tmp" 370 | 371 | # libguestfs supports hotplugging only with a libvirt 372 | # backend, which we are not using here, so handle the 373 | # temporary image in a separate session. 374 | download_rootfs "$ROOTFSVERSION" | 375 | guestfish -a "$tmp" tar-in - / 376 | 377 | mv "$tmp" "$rootfs_img" 378 | tmp= 379 | fi 380 | 381 | rm -f "$IMG" 382 | cp_img "$rootfs_img" "$IMG" 383 | fi 384 | guestfish --remote \ 385 | add "$IMG" label:img : \ 386 | launch : \ 387 | mount /dev/disk/guestfs/img / 388 | fi 389 | 390 | # Install vmlinux. 391 | vmlinux="/boot/vmlinux-${KERNELRELEASE}" 392 | if [[ -v BUILDDIR || $ONESHOT -eq 0 ]]; then 393 | if [[ -v BUILDDIR ]]; then 394 | source_vmlinux="${BUILDDIR}/vmlinux" 395 | else 396 | source_vmlinux="${ARCH_DIR}/vmlinux-${KERNELRELEASE}" 397 | if [[ ! -e $source_vmlinux ]]; then 398 | tmp="$(mktemp "$source_vmlinux.XXX.part")" 399 | download "${TARGET_ARCH}/vmlinux-${KERNELRELEASE}.zst" | zstd -dfo "$tmp" 400 | mv "$tmp" "$source_vmlinux" 401 | tmp= 402 | fi 403 | fi 404 | else 405 | source_vmlinux="${ARCH_DIR}/vmlinux-${KERNELRELEASE}" 406 | download "${TARGET_ARCH}/vmlinux-${KERNELRELEASE}.zst" | zstd -d >"$source_vmlinux" 407 | fi 408 | echo "Copying vmlinux..." >&2 409 | guestfish --remote \ 410 | upload "$source_vmlinux" "$vmlinux" : \ 411 | chmod 644 "$vmlinux" 412 | 413 | foldable end vmlinux_setup 414 | 415 | foldable start copy_files "Copying files..." 416 | 417 | if (( SKIPSOURCE )); then 418 | echo "Not copying source files..." >&2 419 | else 420 | echo "Copying source files..." >&2 421 | # Copy the source files in. 422 | guestfish --remote \ 423 | mkdir-p "/${PROJECT_NAME}" : \ 424 | chmod 0755 "/${PROJECT_NAME}" 425 | if [[ "${SOURCE_FULLCOPY}" == "1" ]]; then 426 | git ls-files -z | tar --null --files-from=- -c | tar_in "/${PROJECT_NAME}" 427 | else 428 | guestfish --remote \ 429 | mkdir-p "/${PROJECT_NAME}/selftests" : \ 430 | chmod 0755 "/${PROJECT_NAME}/selftests" : \ 431 | mkdir-p "/${PROJECT_NAME}/ci" : \ 432 | chmod 0755 "/${PROJECT_NAME}/ci" 433 | tar -C "${REPO_ROOT}/selftests" -c bpf | tar_in "/${PROJECT_NAME}/selftests" 434 | tar -C "${REPO_ROOT}/ci" -c vmtest | tar_in "/${PROJECT_NAME}" 435 | fi 436 | fi 437 | 438 | tmp=$(mktemp) 439 | cat <"$tmp" 440 | "#!/bin/sh 441 | 442 | echo 'Skipping setup commands' 443 | echo vm_start:0 > /exitstatus 444 | chmod 644 /exitstatus 445 | HERE 446 | 447 | # Create the init scripts. 448 | if [[ -n $SETUPCMD ]]; then 449 | # Unescape whitespace characters. 450 | setup_cmd=$(sed 's/\(\\\)\([[:space:]]\)/\2/g' <<< "${SETUPCMD}") 451 | kernel="${KERNELRELEASE}" 452 | if [[ -v BUILDDIR ]]; then kernel='latest'; fi 453 | setup_envvars="export KERNEL=${kernel}" 454 | cat <"$tmp" 455 | #!/bin/bash 456 | set -eu 457 | echo 'Running setup commands' 458 | ${setup_envvars} 459 | set +e 460 | ${setup_cmd}; exitstatus=\$? 461 | echo -e '$(foldable start collect_status "Collect status")' 462 | set -e 463 | # If setup command did not write its exit status to /exitstatus, do it now 464 | if [[ ! -s /exitstatus ]]; then 465 | echo setup_cmd:\$exitstatus > /exitstatus 466 | fi 467 | chmod 644 /exitstatus 468 | echo -e '$(foldable end collect_status)' 469 | echo -e '$(foldable start shutdown Shutdown)' 470 | HERE 471 | fi 472 | 473 | guestfish --remote \ 474 | upload "$tmp" /etc/rcS.d/S50-run-tests : \ 475 | chmod 755 /etc/rcS.d/S50-run-tests 476 | 477 | cat <"$tmp" 478 | #!/bin/sh 479 | 480 | rm -f /shutdown-status 481 | echo "clean" > /shutdown-status 482 | chmod 644 /shutdown-status 483 | 484 | poweroff 485 | HERE 486 | guestfish --remote \ 487 | upload "$tmp" /etc/rcS.d/S99-poweroff : \ 488 | chmod 755 /etc/rcS.d/S99-poweroff 489 | rm "$tmp" 490 | tmp= 491 | 492 | guestfish --remote exit 493 | 494 | foldable end copy_files 495 | -------------------------------------------------------------------------------- /prepare-rootfs/run_vmtest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | THISDIR="$(cd $(dirname $0) && pwd)" 6 | 7 | source "${THISDIR}"/../helpers.sh 8 | 9 | KBUILD_OUTPUT="${1}" 10 | IMAGE="${2}" 11 | TEST="${3:-}" 12 | 13 | foldable start env "Setup env" 14 | sudo apt-get update 15 | sudo apt-get install -y libguestfs-tools zstd 16 | foldable stop env 17 | 18 | USER=`whoami` 19 | if [[ ${USER} != 'root' ]]; then 20 | foldable start adduser_to_kvm "Add user ${USER}" 21 | sudo adduser "${USER}" kvm 22 | foldable stop adduser_to_kvm 23 | fi 24 | 25 | VMTEST_SETUPCMD="export GITHUB_WORKFLOW=${GITHUB_WORKFLOW:-}; export PROJECT_NAME=${PROJECT_NAME}; /${PROJECT_NAME}/vmtest/run_selftests.sh ${TEST}" 26 | # Escape whitespace characters. 27 | setup_cmd=$(sed 's/\([[:space:]]\)/\\\1/g' <<< "${VMTEST_SETUPCMD}") 28 | 29 | if [[ "${KERNEL}" = 'LATEST' ]]; then 30 | "${THISDIR}"/run.sh --build "${KBUILD_OUTPUT}" --source "${KERNEL_ROOT}" -o -d ~ -s "${setup_cmd}" "${IMAGE}" 31 | else 32 | "${THISDIR}"/run.sh -k "${KERNEL}*" -o -d ~ -s "${setup_cmd}" "${IMAGE}" 33 | fi 34 | -------------------------------------------------------------------------------- /rootfs/Makefile: -------------------------------------------------------------------------------- 1 | ARCHS = amd64 arm64 s390x riscv64 2 | DISTROS = noble 3 | 4 | .PHONY: all 5 | all: 6 | @for distro in $(DISTROS) ; do \ 7 | for arch in $(ARCHS) ; do \ 8 | sudo ./mkrootfs_debian.sh --arch "$${arch}" --distro "$${distro}" ; \ 9 | done \ 10 | done 11 | 12 | -------------------------------------------------------------------------------- /rootfs/mkrootfs_debian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script builds a Debian root filesystem image for testing libbpf in a 3 | # virtual machine. Requires debootstrap >= 1.0.95 and zstd. 4 | 5 | # Use e.g. ./mkrootfs_debian.sh --arch=s390x to generate a rootfs for a 6 | # foreign architecture. Requires configured binfmt_misc, e.g. using 7 | # Debian/Ubuntu's qemu-user-binfmt package or 8 | # https://github.com/multiarch/qemu-user-static. 9 | # Any arguments that need to be passed to `debootstrap` should be passed after 10 | # `--`, e.g ./mkrootfs_debian.sh --arch=s390x -- --foo=bar 11 | 12 | set -e -u -o pipefail 13 | 14 | # table of Debian arch <-> GNU arch matches 15 | CPUTABLE="${CPUTABLE:-/usr/share/dpkg/cputable}" 16 | 17 | deb_arch=$(dpkg --print-architecture) 18 | distro="bullseye" 19 | 20 | function usage() { 21 | echo "Usage: $0 [-a | --arch architecture] [-h | --help] 22 | 23 | Build a Debian chroot filesystem image for testing libbbpf in a virtual machine. 24 | By default build an image for the architecture of the host running the script. 25 | 26 | -a | --arch: architecture to build the image for. Default (${deb_arch}) 27 | -d | --distro: distribution to build. Default (${distro}) 28 | " 29 | } 30 | 31 | function error() { 32 | echo "ERROR: ${1}" >&2 33 | } 34 | 35 | function debian_to_gnu() { 36 | # Funtion to convert an architecture in Debian to its GNU equivalent, 37 | # e.g amd64 -> x86_64 38 | # CPUTABLE contains a list of debian_arch\tgnu_arch per line 39 | # Compare of the first field matches and print the second one. 40 | awk -v deb_arch="$1" '$1 ~ deb_arch {print $2}' "${CPUTABLE}" 41 | } 42 | 43 | function qemu_static() { 44 | # Given a Debian architecture find the location of the matching 45 | # qemu-${gnu_arch}-static binary. 46 | gnu_arch=$(debian_to_gnu "${1}") 47 | if [ "$deb_arch" == "ppc64el" ]; then 48 | gnu_arch="ppc64le" 49 | fi 50 | echo "qemu-${gnu_arch}-static" 51 | } 52 | 53 | function check_requirements() { 54 | # Checks that all necessary packages are installed on the system. 55 | # Prints an error message explaining what is missing and exits. 56 | 57 | local deb_arch=$1 58 | local err=0 59 | 60 | # Check that we can translate from Debian arch to GNU arch. 61 | if [[ ! -e "${CPUTABLE}" ]] 62 | then 63 | error "${CPUTABLE} not found on your system. Make sure dpkg package is installed." 64 | err=1 65 | fi 66 | 67 | # Check that the architecture is supported by Debian. 68 | if [[ -z $(debian_to_gnu "${deb_arch}") ]] 69 | then 70 | error "${deb_arch} is not a supported architecture." 71 | err=1 72 | fi 73 | 74 | # Check that we can install the root image for a foreign arch. 75 | qemu=$(qemu_static "${deb_arch}") 76 | if ! command -v "${qemu}" &> /dev/null 77 | then 78 | error "${qemu} binary not found on your system. Make sure qemu-user-static package is installed." 79 | err=1 80 | fi 81 | 82 | # Check that debootrap is installed. 83 | if ! command -v debootstrap &> /dev/null 84 | then 85 | error "debootstrap binary not found on your system. Make sure debootstrap package is installed." 86 | err=1 87 | fi 88 | 89 | # Check we are root. 90 | if [[ "$(id -u)" != 0 ]]; then 91 | error "$0 must run as root" 92 | err=1 93 | fi 94 | 95 | if [[ ${err} -ne 0 ]] 96 | then 97 | exit 1 98 | fi 99 | } 100 | 101 | TEMP=$(getopt -l "arch:,distro:,help" -o "a:d:h" -- "$@") 102 | if [ $? -ne 0 ]; then 103 | usage 104 | fi 105 | 106 | eval set -- "${TEMP}" 107 | unset TEMP 108 | 109 | while true; do 110 | case "$1" in 111 | --arch | -a) 112 | deb_arch="$2" 113 | shift 2 114 | ;; 115 | --distro | -d) 116 | distro="$2" 117 | shift 2 118 | ;; 119 | --help | -h) 120 | usage 121 | exit 122 | ;; 123 | --) 124 | shift 125 | break 126 | ;; 127 | *) 128 | break 129 | ;; 130 | esac 131 | done 132 | 133 | 134 | check_requirements "${deb_arch}" 135 | 136 | # Print out commands ran to make it easier to troubleshoot breakages. 137 | set -x 138 | 139 | # Create a working directory and schedule its deletion. 140 | root=$(mktemp -d -p "$PWD") 141 | trap '[[ "$?" -eq "0" ]] && rm -r "$root"' EXIT 142 | 143 | # Install packages. 144 | packages=( 145 | binutils 146 | busybox 147 | elfutils 148 | ethtool 149 | iproute2 150 | iptables 151 | keyutils 152 | libcap2 153 | libelf1t64 154 | openssl 155 | strace 156 | zlib1g 157 | ) 158 | packages=$(IFS=, && echo "${packages[*]}") 159 | 160 | # Stage 1 161 | debootstrap --include="$packages" \ 162 | --components=main,universe \ 163 | --foreign \ 164 | --variant=minbase \ 165 | --arch="${deb_arch}" \ 166 | "$@" \ 167 | "${distro}" \ 168 | "$root" 169 | 170 | qemu=$(which $(qemu_static ${deb_arch})) 171 | 172 | cp "${qemu}" "${root}/usr/bin" 173 | 174 | # Stage 2 175 | chroot "${root}" /debootstrap/debootstrap --second-stage 176 | 177 | # Remove the init scripts (tests use their own). Also remove various 178 | # unnecessary files in order to save space. 179 | rm -rf \ 180 | "$root"/etc/rcS.d \ 181 | "$root"/usr/share/{doc,info,locale,man,zoneinfo} \ 182 | "$root"/var/cache/apt/archives/* \ 183 | "$root"/var/lib/apt/lists/* \ 184 | "${root}/usr/bin/${qemu}" 185 | 186 | # Apply common tweaks. 187 | "$(dirname "$0")"/mkrootfs_tweak.sh "$root" 188 | 189 | # Save the result. 190 | name="libbpf-vmtest-rootfs-$(date +%Y.%m.%d)-${distro}-${deb_arch}.tar.zst" 191 | rm -f "$name" 192 | tar -C "$root" -c . | zstd -T0 -19 -o "$name" 193 | -------------------------------------------------------------------------------- /rootfs/mkrootfs_tweak.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script prepares a mounted root filesystem for testing libbpf in a virtual 3 | # machine. 4 | set -e -u -x -o pipefail 5 | root=$1 6 | shift 7 | 8 | chroot "${root}" /bin/busybox --install 9 | 10 | cat > "$root/etc/inittab" << "EOF" 11 | ::sysinit:/etc/init.d/rcS 12 | ::ctrlaltdel:/sbin/reboot 13 | ::shutdown:/sbin/swapoff -a 14 | ::shutdown:/bin/umount -a -r 15 | ::restart:/sbin/init 16 | EOF 17 | chmod 644 "$root/etc/inittab" 18 | 19 | mkdir -m 755 -p "$root/etc/init.d" "$root/etc/rcS.d" 20 | cat > "$root/etc/rcS.d/S10-mount" << "EOF" 21 | #!/bin/sh 22 | 23 | set -eux 24 | 25 | /bin/mount proc /proc -t proc 26 | 27 | # Mount devtmpfs if not mounted 28 | if [[ -z "$(/bin/mount -t devtmpfs)" ]]; then 29 | /bin/mount devtmpfs /dev -t devtmpfs 30 | fi 31 | 32 | /bin/mount sysfs /sys -t sysfs 33 | /bin/mount bpffs /sys/fs/bpf -t bpf 34 | /bin/mount debugfs /sys/kernel/debug -t debugfs 35 | 36 | # Symlink /dev/fd to /proc/self/fd so process substitution works. 37 | [[ -e /dev/fd ]] || ln -s /proc/self/fd /dev/fd 38 | 39 | echo 'Listing currently mounted file systems' 40 | /bin/mount 41 | EOF 42 | chmod 755 "$root/etc/rcS.d/S10-mount" 43 | 44 | cat > "$root/etc/rcS.d/S40-network" << "EOF" 45 | #!/bin/sh 46 | 47 | set -eux 48 | 49 | ip link set lo up 50 | EOF 51 | chmod 755 "$root/etc/rcS.d/S40-network" 52 | 53 | cat > "$root/etc/init.d/rcS" << "EOF" 54 | #!/bin/sh 55 | 56 | set -eux 57 | 58 | for path in /etc/rcS.d/S*; do 59 | [ -x "$path" ] && "$path" 60 | done 61 | EOF 62 | chmod 755 "$root/etc/init.d/rcS" 63 | 64 | chmod 755 "$root" 65 | -------------------------------------------------------------------------------- /rootfs/s390x-self-hosted-builder/README.md: -------------------------------------------------------------------------------- 1 | # IBM Z self-hosted builder 2 | 3 | libbpf CI uses an IBM-provided z15 self-hosted builder. There are no IBM Z 4 | builds of GitHub (GH) Actions runner, and stable qemu-user has problems with .NET 5 | apps, so the builder runs the x86_64 runner version with qemu-user built from 6 | the master branch. 7 | 8 | We are currently supporting runners for the following repositories: 9 | * libbpf/libbpf 10 | * kernel-patches/bpf 11 | * kernel-patches/vmtest 12 | 13 | Below instructions are directly applicable to libbpf, and require minor 14 | modifications for kernel-patches repos. Currently, qemu-user-static Docker 15 | image is shared between all GitHub runners, but separate actions-runner-\* 16 | service / Docker image is created for each runner type. 17 | 18 | ## Configuring the builder. 19 | 20 | ### Install prerequisites. 21 | 22 | ``` 23 | $ sudo apt install -y docker.io # Ubuntu 24 | ``` 25 | 26 | ### Add services. 27 | 28 | ``` 29 | $ sudo cp *.service /etc/systemd/system/ 30 | $ sudo systemctl daemon-reload 31 | ``` 32 | 33 | ### Create a config file. 34 | 35 | ``` 36 | $ sudo tee /etc/actions-runner-libbpf 37 | repo=/ 38 | access_token= 39 | runner_name= 40 | ``` 41 | 42 | Access token should have the repo scope, consult 43 | https://docs.github.com/en/rest/reference/actions#create-a-registration-token-for-a-repository 44 | for details. 45 | 46 | ### Autostart the x86_64 emulation support. 47 | 48 | This step is important, you would not be able to build docker container 49 | without having this service running. If container build fails, make sure 50 | service is running properly. 51 | 52 | ``` 53 | $ sudo systemctl enable --now qemu-user-static 54 | ``` 55 | 56 | ### Autostart the runner. 57 | 58 | ``` 59 | $ sudo systemctl enable --now actions-runner-libbpf 60 | ``` 61 | 62 | ## Rebuilding the image 63 | 64 | In order to update the `iiilinuxibmcom/actions-runner-libbpf` image, e.g. to 65 | get the latest OS security fixes, use the following commands: 66 | 67 | ``` 68 | $ sudo docker build \ 69 | --pull \ 70 | -f actions-runner-libbpf.Dockerfile \ 71 | -t iiilinuxibmcom/actions-runner-libbpf \ 72 | . 73 | $ sudo systemctl restart actions-runner-libbpf 74 | ``` 75 | 76 | ## Removing persistent data 77 | 78 | The `actions-runner-libbpf` service stores various temporary data, such as 79 | runner registration information, work directories and logs, in the 80 | `actions-runner-libbpf` volume. In order to remove it and start from scratch, 81 | e.g. when upgrading the runner or switching it to a different repository, use 82 | the following commands: 83 | 84 | ``` 85 | $ sudo systemctl stop actions-runner-libbpf 86 | $ sudo docker rm -f actions-runner-libbpf 87 | $ sudo docker volume rm actions-runner-libbpf 88 | ``` 89 | 90 | ## Troubleshooting 91 | 92 | In order to check if service is running, use the following command: 93 | 94 | ``` 95 | $ sudo systemctl status 96 | ``` 97 | 98 | In order to get logs for service: 99 | 100 | ``` 101 | $ journalctl -u 102 | ``` 103 | 104 | In order to check which containers are currently active: 105 | 106 | ``` 107 | $ sudo docker ps 108 | ``` 109 | -------------------------------------------------------------------------------- /rootfs/s390x-self-hosted-builder/actions-runner-libbpf.Dockerfile: -------------------------------------------------------------------------------- 1 | # Self-Hosted IBM Z Github Actions Runner. 2 | 3 | # Temporary image: amd64 dependencies. 4 | FROM amd64/ubuntu:20.04 as ld-prefix 5 | ENV DEBIAN_FRONTEND=noninteractive 6 | RUN apt-get update && apt-get -y install ca-certificates libicu66 libssl1.1 7 | 8 | # Main image. 9 | FROM s390x/ubuntu:20.04 10 | 11 | # Packages for libbpf testing that are not installed by .github/actions/setup. 12 | ENV DEBIAN_FRONTEND=noninteractive 13 | RUN apt-get update && apt-get -y install \ 14 | bc \ 15 | bison \ 16 | cmake \ 17 | cpu-checker \ 18 | curl \ 19 | wget \ 20 | flex \ 21 | git \ 22 | jq \ 23 | linux-image-generic \ 24 | qemu-system-s390x \ 25 | rsync \ 26 | software-properties-common \ 27 | sudo \ 28 | tree \ 29 | zstd \ 30 | iproute2 \ 31 | iputils-ping 32 | 33 | # amd64 dependencies. 34 | COPY --from=ld-prefix / /usr/x86_64-linux-gnu/ 35 | RUN ln -fs ../lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 /usr/x86_64-linux-gnu/lib64/ 36 | RUN ln -fs /etc/resolv.conf /usr/x86_64-linux-gnu/etc/ 37 | ENV QEMU_LD_PREFIX=/usr/x86_64-linux-gnu 38 | 39 | # amd64 Github Actions Runner. 40 | ARG version=2.298.2 41 | RUN useradd -m actions-runner 42 | RUN echo "actions-runner ALL=(ALL) NOPASSWD: ALL" >>/etc/sudoers 43 | RUN echo "Defaults env_keep += \"DEBIAN_FRONTEND\"" >>/etc/sudoers 44 | RUN usermod -a -G kvm actions-runner 45 | USER actions-runner 46 | ENV USER=actions-runner 47 | WORKDIR /home/actions-runner 48 | RUN curl -L https://github.com/actions/runner/releases/download/v${version}/actions-runner-linux-x64-${version}.tar.gz | tar -xz 49 | VOLUME /home/actions-runner 50 | 51 | # Scripts. 52 | COPY fs/ / 53 | ENTRYPOINT ["/usr/bin/entrypoint"] 54 | CMD ["/usr/bin/actions-runner"] 55 | -------------------------------------------------------------------------------- /rootfs/s390x-self-hosted-builder/actions-runner-libbpf.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Self-Hosted IBM Z Github Actions Runner 3 | Wants=qemu-user-static 4 | After=qemu-user-static 5 | StartLimitIntervalSec=0 6 | 7 | [Service] 8 | Type=simple 9 | Restart=always 10 | ExecStart=/usr/bin/docker run \ 11 | --device=/dev/kvm \ 12 | --env-file=/etc/actions-runner-libbpf \ 13 | --init \ 14 | --interactive \ 15 | --name=actions-runner-libbpf \ 16 | --rm \ 17 | --volume=actions-runner-libbpf:/home/actions-runner \ 18 | iiilinuxibmcom/actions-runner-libbpf 19 | ExecStop=/bin/sh -c "docker exec actions-runner-libbpf kill -INT -- -1" 20 | ExecStop=/bin/sh -c "docker wait actions-runner-libbpf" 21 | ExecStop=/bin/sh -c "docker rm actions-runner-libbpf" 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /rootfs/s390x-self-hosted-builder/fs/usr/bin/actions-runner: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Ephemeral runner startup script. 5 | # 6 | # Expects the following environment variables: 7 | # 8 | # - repo=/ 9 | # - access_token= 10 | # - runner_name= 11 | 12 | set -e -u 13 | 14 | # Check the cached registration token. 15 | token_file=registration-token.json 16 | set +e 17 | expires_at=$(jq --raw-output .expires_at "$token_file" 2>/dev/null) 18 | status=$? 19 | set -e 20 | if [[ $status -ne 0 || $(date +%s) -ge $(date -d "$expires_at" +%s) ]]; then 21 | # Refresh the cached registration token. 22 | curl \ 23 | -X POST \ 24 | -H "Accept: application/vnd.github.v3+json" \ 25 | -H "Authorization: token $access_token" \ 26 | "https://api.github.com/repos/$repo/actions/runners/registration-token" \ 27 | -o "$token_file" 28 | fi 29 | 30 | # (Re-)register the runner. 31 | registration_token=$(jq --raw-output .token "$token_file") 32 | ./config.sh remove --token "$registration_token" || true 33 | ./config.sh \ 34 | --url "https://github.com/$repo" \ 35 | --token "$registration_token" \ 36 | --labels z15,s390x \ 37 | --replace \ 38 | --unattended \ 39 | --name "$runner_name" \ 40 | --ephemeral 41 | 42 | # Run one job. 43 | ./run.sh 44 | -------------------------------------------------------------------------------- /rootfs/s390x-self-hosted-builder/fs/usr/bin/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Container entrypoint that waits for all spawned processes. 5 | # 6 | 7 | set -e -u 8 | 9 | # /dev/kvm has host permissions, fix it. 10 | if [ -e /dev/kvm ]; then 11 | sudo chown root:kvm /dev/kvm 12 | fi 13 | 14 | # Create a FIFO and start reading from its read end. 15 | tempdir=$(mktemp -d "/tmp/done.XXXXXXXXXX") 16 | trap 'rm -r "$tempdir"' EXIT 17 | done="$tempdir/pipe" 18 | mkfifo "$done" 19 | cat "$done" & waiter=$! 20 | 21 | # Start the workload. Its descendants will inherit the FIFO's write end. 22 | status=0 23 | if [ "$#" -eq 0 ]; then 24 | bash 9>"$done" || status=$? 25 | else 26 | "$@" 9>"$done" || status=$? 27 | fi 28 | 29 | # When the workload and all of its descendants exit, the FIFO's write end will 30 | # be closed and `cat "$done"` will exit. Wait until it happens. This is needed 31 | # in order to handle SelfUpdater, which the workload may start in background 32 | # before exiting. 33 | wait "$waiter" 34 | 35 | exit "$status" 36 | -------------------------------------------------------------------------------- /rootfs/s390x-self-hosted-builder/qemu-user-static.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Support for transparent execution of non-native binaries with QEMU user emulation 3 | 4 | [Service] 5 | Type=oneshot 6 | # The source code for iiilinuxibmcom/qemu-user-static is at https://github.com/iii-i/qemu-user-static/tree/v6.1.0-1 7 | # TODO: replace it with multiarch/qemu-user-static once version >6.1 is available 8 | ExecStart=/usr/bin/docker run --rm --interactive --privileged iiilinuxibmcom/qemu-user-static --reset -p yes 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /run-qemu/action.yml: -------------------------------------------------------------------------------- 1 | name: 'run qemu' 2 | description: 'Run qemu with given rootfs and print summary' 3 | inputs: 4 | arch: 5 | description: 'arch' 6 | required: true 7 | img: 8 | description: 'img path' 9 | required: true 10 | vmlinuz: 11 | description: 'vmlinuz path' 12 | required: true 13 | kernel-root: 14 | description: 'kernel source dir' 15 | default: '.' 16 | max-cpu: 17 | description: 'Maximum number of CPU allocated to a VM (regardless of number of CPUs available on the host). Default is unset, e.g it will default to the number of CPU on the host.' 18 | default: '' 19 | kernel-test: 20 | description: 'Test to run' 21 | default: '' 22 | output-dir: 23 | description: | 24 | Some sub-commands produce output dir within VM file system (/command_output/). 25 | If this option is set that dir's content would be copied to corresponding location. 26 | default: '' 27 | runs: 28 | using: "composite" 29 | steps: 30 | - name: install qemu tools 31 | shell: bash 32 | run: | 33 | source "${GITHUB_ACTION_PATH}/../helpers.sh" 34 | foldable start install_qemu "Installing QEMU tools" 35 | sudo apt-get update && sudo apt-get install -y cpu-checker qemu-kvm qemu-utils qemu-system-x86 qemu-system-s390x qemu-system-arm 36 | foldable end install_qemu 37 | - name: test 38 | shell: bash 39 | env: 40 | VMLINUZ: ${{ inputs.vmlinuz }} 41 | IMG: ${{ inputs.img }} 42 | KERNEL_ROOT: ${{ inputs.kernel-root }} 43 | MAX_CPU: ${{ inputs.max-cpu }} 44 | KERNEL_TEST: ${{ inputs.kernel-test }} 45 | OUTPUT_DIR: ${{ inputs.output-dir }} 46 | run: | 47 | ARCH="${{ inputs.arch }}" ${GITHUB_ACTION_PATH}/run.sh 48 | -------------------------------------------------------------------------------- /run-qemu/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | trap 'exit 2' ERR 5 | 6 | source $(cd $(dirname $0) && pwd)/../helpers.sh 7 | 8 | foldable start bpftool_checks "Running bpftool checks..." 9 | bpftool_exitstatus=0 10 | 11 | MAX_CPU=${MAX_CPU:-$(nproc)} 12 | 13 | # bpftool checks are aimed at checking type names, documentation, shell 14 | # completion etc. against the current kernel, so only run on LATEST. 15 | if [[ "${KERNEL}" = 'LATEST' ]]; then 16 | # "&& true" does not change the return code (it is not executed if the 17 | # Python script fails), but it prevents the trap on ERR set at the top 18 | # of this file to trigger on failure. 19 | "${REPO_ROOT}/${KERNEL_ROOT}/tools/testing/selftests/bpf/test_bpftool_synctypes.py" && true 20 | bpftool_exitstatus=$? 21 | if [[ $bpftool_exitstatus -eq 0 ]]; then 22 | echo "bpftool checks passed successfully." 23 | else 24 | echo "bpftool checks returned ${bpftool_exitstatus}." 25 | fi 26 | else 27 | echo "bpftool checks skipped." 28 | fi 29 | 30 | bpftool_exitstatus="bpftool:${bpftool_exitstatus}" 31 | foldable end bpftool_checks 32 | 33 | foldable start vm_init "Starting virtual machine..." 34 | 35 | echo "Starting VM with $(nproc) CPUs..." 36 | 37 | APPEND=${APPEND:-} 38 | 39 | case "$ARCH" in 40 | s390x) 41 | qemu="qemu-system-s390x" 42 | console="ttyS1" 43 | smp=2 44 | kvm_accel="-enable-kvm" 45 | tcg_accel="-machine accel=tcg" 46 | ;; 47 | x86_64) 48 | qemu="qemu-system-x86_64" 49 | console="ttyS0,115200" 50 | smp=$(nproc) 51 | kvm_accel="-cpu kvm64 -enable-kvm" 52 | tcg_accel="-cpu qemu64 -machine accel=tcg" 53 | ;; 54 | aarch64) 55 | qemu="qemu-system-aarch64" 56 | console="ttyAMA0,115200" 57 | smp=$(nproc) 58 | kvm_accel="-cpu host -enable-kvm -machine virt,gic-version=3,accel=kvm:tcg" 59 | tcg_accel="-cpu cortex-a72 -machine virt,accel=tcg" 60 | ;; 61 | 62 | *) 63 | echo "Unsupported architecture" 64 | exit 1 65 | ;; 66 | esac 67 | 68 | if kvm-ok ; then 69 | accel=$kvm_accel 70 | else 71 | accel=$tcg_accel 72 | fi 73 | 74 | smp=$(( $smp > $MAX_CPU ? $MAX_CPU : $smp )) 75 | 76 | if [[ -n ${KERNEL_TEST} ]]; then 77 | APPEND+=" run_tests=${KERNEL_TEST}" 78 | fi 79 | 80 | CACHE_OPT=",cache=none" 81 | img_fs=$(df --output=fstype "${IMG}" | sed 1d) 82 | if [[ ${img_fs} == "tmpfs" ]]; then 83 | CACHE_OPT="" 84 | fi 85 | 86 | "$qemu" -nodefaults --no-reboot -nographic \ 87 | -chardev stdio,id=char0,mux=on,signal=off,logfile=boot.log \ 88 | -serial chardev:char0 \ 89 | ${accel} -smp "$smp" -m 8G \ 90 | -drive file="$IMG",format=raw,index=1,media=disk,if=virtio${CACHE_OPT} \ 91 | -kernel "$VMLINUZ" -append "root=/dev/vda rw console=$console panic=-1 sysctl.vm.panic_on_oom=1 $APPEND" 92 | 93 | exitfile="${bpftool_exitstatus}\n" 94 | exitfile+="$(guestfish --ro -a "$IMG" -i cat /exitstatus 2>/dev/null)" 95 | exitstatus="$(echo -e "$exitfile" | awk --field-separator ':' \ 96 | 'BEGIN { s=0 } { if ($2) {s=1} } END { print s }')" 97 | 98 | if [[ "$exitstatus" =~ ^[0-9]+$ ]]; then 99 | printf '\nTests exit status: %s\n' "$exitstatus" >&2 100 | else 101 | printf '\nCould not read tests exit status ("%s")\n' "$exitstatus" >&2 102 | exitstatus=1 103 | fi 104 | 105 | foldable end shutdown 106 | 107 | # Try to collect json summary from VM 108 | if [[ -n ${KERNEL_TEST} && ${KERNEL_TEST} =~ test_progs* ]] 109 | then 110 | guestfish --ro -a "$IMG" -i download "/${KERNEL_TEST}.json" "${KERNEL_TEST}.json" 111 | if [ $? ] 112 | then 113 | ## Job summary 114 | "${GITHUB_ACTION_PATH}/../run-vmtest/print_test_summary.py" -s "${GITHUB_STEP_SUMMARY}" -j "${KERNEL_TEST}.json" 115 | fi 116 | fi 117 | 118 | # Try to collect $IMG:/command_output to OUTPUT_DIR 119 | if [[ -n "${OUTPUT_DIR}" ]] 120 | then 121 | mkdir -p "${OUTPUT_DIR}" 122 | guestfish --ro --add "$IMG" --inspector tar-out /command_output/ - \ 123 | | tar --extract --file - --directory="${OUTPUT_DIR}" 124 | fi 125 | 126 | # Final summary - Don't use a fold, keep it visible 127 | echo -e "\033[1;33mTest Results:\033[0m" 128 | echo -e "$exitfile" | while read result; do 129 | testgroup=${result%:*} 130 | status=${result#*:} 131 | # Print final result for each group of tests 132 | if [[ "$status" -eq 0 ]]; then 133 | printf "%20s: \033[1;32mPASS\033[0m\n" "$testgroup" 134 | else 135 | printf "%20s: \033[1;31mFAIL\033[0m (returned %s)\n" "$testgroup" "$status" 136 | fi 137 | done 138 | 139 | shutdownstatus="$(guestfish --ro -a "$IMG" -i cat /shutdown-status 2>/dev/null)" 140 | if [[ "${shutdownstatus}" == "clean" ]]; then 141 | printf "%20s: \033[1;32mCLEAN\033[0m\n" "shutdown" 142 | else 143 | printf "%20s: \033[1;31mNOT CLEAN\033[0m" "shutdown" 144 | exitstatus=1 145 | fi 146 | 147 | exit "$exitstatus" 148 | -------------------------------------------------------------------------------- /run-vmtest/README.md: -------------------------------------------------------------------------------- 1 | # Run vmtest 2 | 3 | This action is designed to run Linux Kernel BPF selftests. 4 | 5 | It expects kernel binaries as well as test runner binaries as input, 6 | and executes the test runners with a given kernel using the [vmtest 7 | tool](https://github.com/danobi/vmtest). 8 | 9 | In summary the action performs the following: 10 | * Download specified vmtest release 11 | * Install qemu and other dependencies (assuming Ubuntu environment) 12 | * Configure access to [/dev/kvm](https://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine) 13 | * Execute run.sh 14 | * Set up the environment variables 15 | * Choose runner scripts 16 | * Run vmtest 17 | * Collect and test results and report 18 | 19 | Note that behavior of the running scripts is tunable mostly by the 20 | environment variables. 21 | 22 | ## Required inputs 23 | 24 | * `arch` - Target architecture 25 | 26 | ## Optional inputs 27 | 28 | * `kernel-root` (default: current dir) - path to the root of Linux Kernel source tree 29 | * `kernel-test` - controls what test runners are executed 30 | * can be a name of a test runner program, such as `test_progs` 31 | * if not set, all known test runners will run one-by-one, see `run-bpf-selftests.sh` 32 | * if set to `sched_ext`, then `run-scx-selftests.sh` is executed 33 | * `max-cpu` - limit number of cpus to use 34 | * `vmlinuz` - path to the kernel bzImage, passed to vmtest 35 | * if not specified, `$VMLINUZ` var is checked 36 | * if `$VMLINUZ` is not set, the script will attempt to run `make -s 37 | image_name` to find the image 38 | * `output-dir` - path for test runner summaries and veristat output 39 | * `kbuild-output` (default: `./kbuild-output`) - path to Linux Kernel binaries, aka `$KBUILD_OUTPUT` 40 | * `vmtest-release` - release version name of the vmtest tool 41 | 42 | ## run-vmtest.env 43 | 44 | There are a couple of scripts, as well as code in the 45 | `run-bpf-selftests.sh` that handles ALLOWLIST and DENYLIST, which are 46 | very important in context of CI. 47 | 48 | Typically there is a need for granular lists, such as per arch list, 49 | per kernel version, per test runner, etc. And so it is often necessary 50 | to pre-process a number of lists into one, which is then passed to the 51 | test runner. 52 | 53 | To avoid copy-pasting merging scripts between the action users, a 54 | special file `$VMTEST_CONFIGS/run-vmtest.env` is sourced by 55 | `run.sh`. 56 | 57 | run-vmtest.env is expected to export `SELFTESTS_BPF_ALLOWLIST_FILES` 58 | and `SELFTESTS_BPF_DENYLIST_FILES`, which are pipe-separated lists of 59 | files. These variables are then used by `prepare-bpf-selftests.sh` to 60 | produce final allow/denylist passed to the runners. If these variables 61 | are not set, `prepare-bpf-selftests.sh` does nothing. 62 | 63 | See `ci/vmtest/configs/run-vmtest.env` for an example. 64 | 65 | -------------------------------------------------------------------------------- /run-vmtest/action.yml: -------------------------------------------------------------------------------- 1 | name: 'run vmtest' 2 | description: 'Run vmtest and print summary' 3 | inputs: 4 | arch: 5 | description: 'arch' 6 | required: true 7 | vmlinuz: 8 | description: | 9 | If passed, this vmlinuz will be used. Otherwise vmlinuz will be 10 | searched via make -s image_name in kbuild-output. 11 | required: false 12 | kernel-root: 13 | description: 'kernel source dir' 14 | default: '.' 15 | max-cpu: 16 | description: 'Maximum number of CPU allocated to a VM (regardless of number of CPUs available on the host). Default is unset, e.g it will default to the number of CPU on the host.' 17 | default: '' 18 | kernel-test: 19 | description: 'Test to run' 20 | default: '' 21 | output-dir: 22 | description: | 23 | Some sub-commands produce output dir within VM file system (/command_output/). 24 | If this option is set that dir's content would be copied to corresponding location. 25 | default: '' 26 | kbuild-output: 27 | description: 'Path to KBUILD_OUTPUT' 28 | required: false 29 | default: 'kbuild-output' 30 | vmtest-release: 31 | description: 'Release version of vmtest tool to use' 32 | required: false 33 | default: 'v0.15.0' 34 | 35 | runs: 36 | using: "composite" 37 | steps: 38 | - name: Download vmtest 39 | shell: bash 40 | run: | 41 | VMTEST_URL="https://github.com/danobi/vmtest/releases/download/${{ inputs.vmtest-release }}/vmtest-$(uname -m)" 42 | sudo curl -L $VMTEST_URL -o /usr/bin/vmtest 43 | sudo chmod 755 /usr/bin/vmtest 44 | - name: install qemu tools and selftest dependencies 45 | shell: bash 46 | run: | 47 | source "${GITHUB_ACTION_PATH}/../helpers.sh" 48 | foldable start install_qemu "Installing QEMU tools" 49 | # need gawk to support `--field-separator` 50 | sudo apt-get update && sudo apt-get install -y cpu-checker qemu-kvm qemu-utils qemu-system-x86 qemu-system-s390x qemu-system-arm qemu-guest-agent \ 51 | ethtool keyutils iptables libpcap-dev \ 52 | gawk 53 | foldable end install_qemu 54 | - name: Configure KVM group perms 55 | shell: bash 56 | run: | 57 | source "${GITHUB_ACTION_PATH}/../helpers.sh" 58 | foldable start config_kvm "Configuring KVM permissions" 59 | # Only configure kvm perms if kvm is available 60 | if [[ -e /dev/kvm && ! -w /dev/kvm ]]; then 61 | echo "Updating KVM permissions" 62 | echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules 63 | sudo udevadm control --reload-rules 64 | sudo udevadm trigger --name-match=kvm 65 | fi 66 | foldable end config_kvm 67 | - name: Run vmtest 68 | shell: bash 69 | env: 70 | ARCH: ${{ inputs.arch }} 71 | KBUILD_OUTPUT: ${{ inputs.kbuild-output }} 72 | KERNEL_ROOT: ${{ inputs.kernel-root }} 73 | KERNEL_TEST: ${{ inputs.kernel-test }} 74 | MAX_CPU: ${{ inputs.max-cpu }} 75 | OUTPUT_DIR: ${{ inputs.output-dir }} 76 | VMLINUZ: ${{ inputs.vmlinuz || '' }} 77 | run: | 78 | ${GITHUB_ACTION_PATH}/run.sh 79 | 80 | -------------------------------------------------------------------------------- /run-vmtest/helpers.sh: -------------------------------------------------------------------------------- 1 | # shellcheck shell=bash 2 | 3 | # $1 - start or end 4 | # $2 - fold identifier, no spaces 5 | # $3 - fold section description 6 | foldable() { 7 | local YELLOW='\033[1;33m' 8 | local NOCOLOR='\033[0m' 9 | if [ $1 = "start" ]; then 10 | line="::group::$2" 11 | if [ ! -z "${3:-}" ]; then 12 | line="$line - ${YELLOW}$3${NOCOLOR}" 13 | fi 14 | else 15 | line="::endgroup::" 16 | fi 17 | echo -e "$line" 18 | } 19 | 20 | __print() { 21 | local TITLE="" 22 | if [[ -n $2 ]]; then 23 | TITLE=" title=$2" 24 | fi 25 | echo "::$1${TITLE}::$3" 26 | } 27 | 28 | # $1 - title 29 | # $2 - message 30 | print_error() { 31 | __print error $1 $2 32 | } 33 | 34 | # $1 - title 35 | # $2 - message 36 | print_notice() { 37 | __print notice $1 $2 38 | } 39 | 40 | read_lists() { 41 | (for path in "$@"; do 42 | if [[ -s "$path" ]]; then 43 | cat "$path" 44 | fi; 45 | done) | cut -d'#' -f1 | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | tr -s '\n' ',' 46 | } 47 | -------------------------------------------------------------------------------- /run-vmtest/normalize-paths-for-github.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # Github's actions/upload-artifact has restriction on the characters that can be used in a path. 6 | # Invalid characters include: 7 | # Double quote ", 8 | # Colon :, 9 | # Less than <, 10 | # Greater than >, 11 | # Vertical bar |, 12 | # Asterisk *, 13 | # Question mark ?, 14 | # Carriage return \r, 15 | # Line feed \n 16 | 17 | DIR=$1 18 | if [[ ! -d "$DIR" ]]; then 19 | exit 0 20 | fi 21 | 22 | normalize() { 23 | local path="$1" 24 | echo -n "$path" | tr '":><|*?\r\n' '_________' 25 | } 26 | 27 | find "$DIR" -depth | while read -r path; do 28 | if [[ "$path" == "$DIR" ]]; then 29 | continue 30 | fi 31 | dirname=$(dirname "$path") 32 | basename=$(basename "$path") 33 | new_basename=$(normalize "$basename") 34 | if [[ "$basename" != "$new_basename" ]]; then 35 | new_path="${dirname}/${new_basename}" 36 | mv -v "$path" "$new_path" 37 | fi 38 | done 39 | -------------------------------------------------------------------------------- /run-vmtest/normalize_bpf_test_names.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | # This script normalizes a list of selftests/bpf test names. These 4 | # lists have the following format: 5 | # * each line indicates a test name 6 | # * a line can contain an end-line comment, starting with # 7 | # 8 | # The test names may contain spaces, commas, and potentially other characters. 9 | # 10 | # The purpose of this script is to take in a composite allow/denylist 11 | # (usually produced as an append of multiple different lists) and 12 | # transform it into one clean deduplicated list. 13 | # 14 | # In addition to dedup of tests by exact match, subtests are taken 15 | # into account. For example, one source denylist may contain a test 16 | # "a_test/subtest2", and another may contain simply "a_test". In such 17 | # case "a_test" indicates that no subtest of "a_test" should run, and 18 | # so "a_test/subtest2" shouldn't be in the list. 19 | # 20 | # The result is printed to stdout 21 | 22 | import sys 23 | 24 | def clean_line(line: str) -> str: 25 | line = line.split('#')[0] # remove comment 26 | line = line.strip() # strip whitespace 27 | return line 28 | 29 | def read_clean_and_sort_input(file) -> list[str]: 30 | input = [] 31 | for line in file: 32 | line = clean_line(line) 33 | if len(line) == 0: 34 | continue 35 | input.append(line) 36 | input.sort() 37 | return input 38 | 39 | # Deduplicate subtests and yield the next unique test name 40 | def next_test(lines: list[str]): 41 | 42 | if not lines: 43 | return 44 | 45 | prev = lines[0] 46 | 47 | def is_subtest(line: str) -> bool: 48 | return ('/' in line) and ('/' not in prev) and line.startswith(prev) 49 | 50 | yield lines[0] 51 | for line in lines[1:]: 52 | if prev == line or is_subtest(line): 53 | continue 54 | yield line 55 | prev = line 56 | 57 | 58 | if __name__ == '__main__': 59 | 60 | if len(sys.argv) != 2: 61 | print("Usage: merge_test_lists.py ", file=sys.stderr) 62 | sys.exit(1) 63 | 64 | lines = [] 65 | with open(sys.argv[1]) as file: 66 | lines = read_clean_and_sort_input(file) 67 | 68 | for line in next_test(lines): 69 | print(line) 70 | 71 | -------------------------------------------------------------------------------- /run-vmtest/prepare-bpf-selftests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | if [[ -z "${SELFTESTS_BPF_ALLOWLIST_FILES:-}" && -z "${SELFTESTS_BPF_DENYLIST_FILES:-}" ]]; then 6 | exit 0 7 | fi 8 | 9 | function merge_test_lists_into() { 10 | local out="$1" 11 | shift 12 | local files=("$@") 13 | 14 | local list=$(mktemp) 15 | echo -n > "$list" 16 | # append all the source lists into one 17 | for file in "${files[@]}"; do 18 | if [[ -f "$file" ]]; then 19 | echo "Include $file" 20 | cat "$file" >> "$list" 21 | fi 22 | done 23 | 24 | # then normalize the list of test names 25 | $GITHUB_ACTION_PATH/normalize_bpf_test_names.py "$list" > "$out" 26 | rm "$list" 27 | } 28 | 29 | # Read arrays from pipe-separated strings 30 | IFS="|" read -a ALLOWLIST_FILES <<< "$SELFTESTS_BPF_ALLOWLIST_FILES" 31 | IFS="|" read -a DENYLIST_FILES <<< "$SELFTESTS_BPF_DENYLIST_FILES" 32 | 33 | merge_test_lists_into "${ALLOWLIST_FILE}" "${ALLOWLIST_FILES[@]}" 34 | merge_test_lists_into "${DENYLIST_FILE}" "${DENYLIST_FILES[@]}" 35 | 36 | exit 0 37 | -------------------------------------------------------------------------------- /run-vmtest/print_test_summary.py: -------------------------------------------------------------------------------- 1 | #!/bin/python3 2 | # prints a summary of the tests to both the console and the job summary: 3 | # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-job-summary 4 | # 5 | # To test the output of the GH test summary: 6 | # python3 run-vmtest/print_test_summary.py -j run-vmtest/fixtures/test_progs.json -s /dev/stderr > /dev/null 7 | # To test the output of the console: 8 | # python3 run-vmtest/print_test_summary.py -j run-vmtest/fixtures/test_progs.json -s /dev/stderr 2> /dev/null 9 | 10 | import argparse 11 | import json 12 | import os 13 | import sys 14 | 15 | def parse_args(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument( 18 | "-j", 19 | "--json-summary", 20 | required=True, 21 | metavar="FILE", 22 | help="test_progs's json summary file", 23 | ) 24 | parser.add_argument( 25 | "-s", 26 | "--step-summary", 27 | required=True, 28 | metavar="FILE", 29 | help="Github step summary file", 30 | ) 31 | parser.add_argument( 32 | "-a", "--append", action="store_true", help="Append to github step summary file" 33 | ) 34 | return parser.parse_args() 35 | 36 | 37 | def notice(text: str) -> str: 38 | return f"::notice::{text}" 39 | 40 | 41 | def error(text: str) -> str: 42 | return f"::error::{text}" 43 | 44 | 45 | def markdown_summary(json_summary: json): 46 | return f"""- :heavy_check_mark: Success: {json_summary['success']}/{json_summary['success_subtest']} 47 | - :next_track_button: Skipped: ${json_summary['skipped']} 48 | - :x: Failed: {json_summary['failed']}""" 49 | 50 | 51 | def console_summary(json_summary: json): 52 | return f"Success: {json_summary['success']}/{json_summary['success_subtest']}, Skipped: {json_summary['skipped']}, Failed: {json_summary['failed']}" 53 | 54 | 55 | def log_gh_summary(file, text: str): 56 | print(text, file=file) 57 | 58 | 59 | def log_console(text: str): 60 | print(text) 61 | 62 | 63 | def group(text: str, title: str = "", error: bool = False) -> str: 64 | if error and title: 65 | title = f"\033[1;31mError:\033[0m {title}" 66 | return f"""::group::{title} 67 | {text} 68 | ::endgroup::""" 69 | 70 | 71 | def test_error_console_log(test_error: str, test_message: str) -> str: 72 | error_msg = error(test_error) 73 | if test_message: 74 | error_msg += "\n" + test_message.strip() 75 | return group(error_msg, title=test_error, error=True) 76 | else: 77 | return error_msg 78 | 79 | def error_die(msg: str): 80 | print("print_test_summary.py: {}".format(msg), file=sys.stderr) 81 | exit(0) 82 | 83 | if __name__ == "__main__": 84 | args = parse_args() 85 | step_open_mode = "a" if args.append else "w" 86 | str_summary = None 87 | json_summary = None 88 | 89 | if not os.path.exists(args.json_summary): 90 | error_die("Could not find {}".format(args.json_summary)) 91 | elif os.stat(args.json_summary).st_size == 0: 92 | error_die("{} is empty".format(args.json_summary)) 93 | 94 | with open(args.json_summary, "r") as f: 95 | str_summary = f.read() 96 | 97 | try: 98 | json_summary = json.loads(str_summary) 99 | except json.JSONDecodeError: 100 | error_die("{} is not a valid JSON\n{}".format(args.json_summary, str_summary)) 101 | 102 | with open(args.step_summary, step_open_mode) as f: 103 | log_gh_summary(f, "# Tests summary") 104 | log_gh_summary(f, markdown_summary(json_summary)) 105 | 106 | log_console(notice(console_summary(json_summary))) 107 | 108 | for test in json_summary["results"]: 109 | test_name = test["name"] 110 | test_number = test["number"] 111 | if test["failed"]: 112 | test_log = f"#{test_number} {test_name}" 113 | log_gh_summary(f, test_log) 114 | log_console(test_error_console_log(test_log, test["message"])) 115 | 116 | for subtest in test["subtests"]: 117 | if subtest["failed"]: 118 | subtest_log = f"#{test_number}/{subtest['number']} {test_name}/{subtest['name']}" 119 | log_gh_summary(f, subtest_log) 120 | log_console(test_error_console_log(subtest_log, subtest["message"])) 121 | -------------------------------------------------------------------------------- /run-vmtest/run-bpf-selftests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is expected to be executed by vmtest program (a qemu 4 | # wrapper). By default vmtest mounts working directory to /mnt/vmtest, 5 | # which is why this path is often assumed in the script. The working 6 | # directory is usually (although not necessarily) the 7 | # $GITHUB_WORKSPACE of the Github Action workflow, calling 8 | # libbpf/ci/run-vmtest action. 9 | # See also action.yml and run.sh 10 | # 11 | # The script executes the tests within $SELFTESTS_BPF directory. 12 | # Runners passed as arguments are executed. In case of no arguments, 13 | # all test runners are executed. 14 | 15 | set -euo pipefail 16 | 17 | source "$(cd "$(dirname "$0")" && pwd)/helpers.sh" 18 | 19 | ARCH=$(uname -m) 20 | 21 | export SELFTESTS_BPF=${SELFTESTS_BPF:-/mnt/vmtest/selftests/bpf} 22 | 23 | STATUS_FILE=${STATUS_FILE:-/mnt/vmtest/exitstatus} 24 | OUTPUT_DIR=${OUTPUT_DIR:-/mnt/vmtest} 25 | 26 | test_progs_helper() { 27 | local selftest="test_progs${1}" 28 | local args=("$2") 29 | 30 | if [ "${args[0]}" == "" ]; then 31 | args=("${args[@]:1}") 32 | fi 33 | 34 | json_file=${selftest/-/_} 35 | if [ "$2" == "-j" ] 36 | then 37 | json_file+="_parallel" 38 | fi 39 | json_file="${OUTPUT_DIR}/${json_file}.json" 40 | 41 | args+=(${TEST_PROGS_WATCHDOG_TIMEOUT:+-w$TEST_PROGS_WATCHDOG_TIMEOUT}) 42 | args+=(${ALLOWLIST_FILE:+-a@$ALLOWLIST_FILE}) 43 | args+=(${DENYLIST_FILE:+-d@$DENYLIST_FILE}) 44 | args+=(-J "${json_file}") 45 | args+=(${TEST_PROGS_TRAFFIC_MONITOR:+-m '*'}) 46 | 47 | foldable start ${selftest} "Testing ${selftest}" 48 | echo "./${selftest}" "${args[@]}" 49 | # "&& true" does not change the return code (it is not executed 50 | # if the Python script fails), but it prevents exiting on a 51 | # failure due to the "set -e". 52 | ./${selftest} "${args[@]}" && true 53 | echo "${selftest}:$?" >>"${STATUS_FILE}" 54 | foldable end ${selftest} 55 | } 56 | 57 | test_progs() { 58 | test_progs_helper "" "" 59 | } 60 | 61 | test_progs_parallel() { 62 | test_progs_helper "" "-j" 63 | } 64 | 65 | test_progs_no_alu32() { 66 | test_progs_helper "-no_alu32" "" 67 | } 68 | 69 | test_progs_no_alu32_parallel() { 70 | test_progs_helper "-no_alu32" "-j" 71 | } 72 | 73 | test_progs_cpuv4() { 74 | test_progs_helper "-cpuv4" "" 75 | } 76 | 77 | test_maps() { 78 | foldable start test_maps "Testing test_maps" 79 | taskset 0xF ./test_maps && true 80 | echo "test_maps:$?" >>"${STATUS_FILE}" 81 | foldable end test_maps 82 | } 83 | 84 | test_verifier() { 85 | foldable start test_verifier "Testing test_verifier" 86 | ./test_verifier && true 87 | echo "test_verifier:$?" >>"${STATUS_FILE}" 88 | foldable end test_verifier 89 | } 90 | 91 | test_progs-bpf_gcc() { 92 | test_progs_helper "-bpf_gcc" "" 93 | } 94 | 95 | export VERISTAT_CONFIGS=${VERISTAT_CONFIGS:-/mnt/vmtest/ci/vmtest/configs} 96 | export WORKING_DIR=$(pwd) # veristat config expects this variable 97 | 98 | run_veristat_helper() { 99 | local mode="${1}" 100 | 101 | # Make veristat commands visible in the log 102 | if [ -o xtrace ]; then 103 | xtrace_was_on="1" 104 | else 105 | xtrace_was_on="" 106 | set -x 107 | fi 108 | 109 | ( 110 | # shellcheck source=ci/vmtest/configs/run_veristat.default.cfg 111 | # shellcheck source=ci/vmtest/configs/run_veristat.meta.cfg 112 | source "${VERISTAT_CONFIGS}/run_veristat.${mode}.cfg" 113 | pushd "${VERISTAT_OBJECTS_DIR}" 114 | 115 | "${SELFTESTS_BPF}/veristat" -o csv -q -e file,prog,verdict,states ${VERISTAT_OBJECTS_GLOB} > \ 116 | "${OUTPUT_DIR}/${VERISTAT_OUTPUT}" 117 | 118 | echo "run_veristat_${mode}:$?" >> ${STATUS_FILE} 119 | popd 120 | ) 121 | 122 | # Hide commands again 123 | if [ -z "$xtrace_was_on" ]; then 124 | set +x 125 | fi 126 | 127 | } 128 | 129 | run_veristat_kernel() { 130 | foldable start run_veristat_kernel "Running veristat.kernel" 131 | run_veristat_helper "kernel" 132 | foldable end run_veristat_kernel 133 | } 134 | 135 | run_veristat_meta() { 136 | foldable start run_veristat_meta "Running veristat.meta" 137 | run_veristat_helper "meta" 138 | foldable end run_veristat_meta 139 | } 140 | 141 | foldable end vm_init 142 | 143 | foldable start kernel_config "Kconfig" 144 | zcat /proc/config.gz 145 | foldable end kernel_config 146 | 147 | 148 | if [ -f "${ALLOWLIST_FILE:-}" ]; then 149 | foldable start allowlist "Print ALLOWLIST" 150 | cat "${ALLOWLIST_FILE}" 151 | foldable end allowlist 152 | else 153 | echo "ALLOWLIST_FILE=${ALLOWLIST_FILE:-} is not set or does not exist" 154 | fi 155 | 156 | if [ -f "${DENYLIST_FILE:-}" ]; then 157 | foldable start denylist "Print DENYLIST" 158 | cat "${DENYLIST_FILE}" 159 | foldable end denylist 160 | else 161 | echo "DENYLIST_FILE=${DENYLIST_FILE:-} is not set or does not exist" 162 | fi 163 | 164 | cd $SELFTESTS_BPF 165 | 166 | declare -a TEST_NAMES=($@) 167 | # if we don't have any test name provided to the script, we run all tests. 168 | if [ ${#TEST_NAMES[@]} -eq 0 ]; then 169 | test_progs 170 | test_progs_no_alu32 171 | test_progs_cpuv4 172 | test_maps 173 | test_verifier 174 | if [ -f test_progs-bpf_gcc ]; then 175 | test_progs-bpf_gcc 176 | fi 177 | else 178 | # else we run the tests passed as command-line arguments and through boot 179 | # parameter. 180 | for test_name in "${TEST_NAMES[@]}"; do 181 | "${test_name}" 182 | done 183 | fi 184 | -------------------------------------------------------------------------------- /run-vmtest/run-scx-selftests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | SELFTESTS_DIR="${KERNEL_ROOT}/selftests/sched_ext" 6 | STATUS_FILE=/mnt/vmtest/exitstatus 7 | 8 | cd "${SELFTESTS_DIR}" 9 | 10 | echo "Executing selftests/sched_ext/runner" 11 | echo "runner output is being written to runner.log" 12 | 13 | ./runner "$@" 2>&1 > runner.log & wait 14 | 15 | echo "runner finished, check results" 16 | echo "[...]" 17 | tail -n 16 runner.log 18 | 19 | failed=$(tail -n 16 runner.log | grep "FAILED" | awk '{print $2}') 20 | 21 | if [ "$failed" -gt 0 ]; then 22 | echo "Tests failed, dumping full runners log and dmesg" 23 | 24 | echo "-------- runner.log start --------" 25 | cat runner.log 26 | echo "-------- runner.log end ----------" 27 | 28 | echo "-------- dmesg start --------" 29 | dmesg -H 30 | echo "-------- dmesg end ----------" 31 | fi 32 | 33 | echo "selftests/sched_ext:$failed" >> "${STATUS_FILE}" 34 | 35 | exit 0 36 | 37 | -------------------------------------------------------------------------------- /run-vmtest/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | trap 'exit 2' ERR 5 | 6 | source "${GITHUB_ACTION_PATH}/../helpers.sh" 7 | 8 | export ARCH=${ARCH:-$(uname -m)} 9 | 10 | export VMLINUZ=${VMLINUZ:-} 11 | if [[ ! -f "${VMLINUZ}" ]]; then 12 | echo "Could not find VMLINUZ=\"$VMLINUZ\", searching with make -s image_name" 13 | karch=$(platform_to_kernel_arch $ARCH) 14 | image_name=$(ARCH=${karch} make -C ${KERNEL_ROOT} -s image_name) 15 | export VMLINUZ=$(realpath ${KBUILD_OUTPUT})/${image_name} 16 | fi 17 | 18 | if [[ ! -f "${VMLINUZ}" ]]; then 19 | echo "Could not find VMLINUZ (compressed kernel binary), exiting" 20 | exit 2 21 | fi 22 | 23 | # Create a symlink to vmlinux from a "standard" location 24 | # See btf__load_vmlinux_btf() in libbpf 25 | export VMLINUX=${VMLINUX:-"$KBUILD_OUTPUT/vmlinux"} 26 | if [[ -f "${VMLINUX}" ]]; then 27 | VMLINUX_VERSION="$(strings ${VMLINUX} | grep -m 1 'Linux version' | awk '{print $3}')" || true 28 | sudo mkdir -p /usr/lib/debug/boot 29 | sudo ln -sf "${VMLINUX}" "/usr/lib/debug/boot/vmlinux-${VMLINUX_VERSION}" 30 | else 31 | echo "Could not find VMLINUX=\"$VMLINUX\", exiting" 32 | exit 2 33 | fi 34 | 35 | RUN_BPFTOOL_CHECKS=${RUN_BPFTOOL_CHECKS:-} 36 | if [[ -z "${RUN_BPFTOOL_CHECKS}" \ 37 | && "${KERNEL}" = 'LATEST' \ 38 | && "$KERNEL_TEST" != "sched_ext" ]]; 39 | then 40 | RUN_BPFTOOL_CHECKS=true 41 | fi 42 | 43 | VMTEST_CONFIGS=${VMTEST_CONFIGS:-} 44 | if [[ -n "$VMTEST_CONFIGS" && -f "${VMTEST_CONFIGS}/run-vmtest.env" ]]; 45 | then 46 | source "${VMTEST_CONFIGS:-}/run-vmtest.env" 47 | fi 48 | 49 | VMTEST_SCRIPT=${VMTEST_SCRIPT:-} 50 | if [[ -z "$VMTEST_SCRIPT" && "$KERNEL_TEST" == "sched_ext" ]]; 51 | then 52 | VMTEST_SCRIPT="${GITHUB_ACTION_PATH}/run-scx-selftests.sh" 53 | elif [[ -z "$VMTEST_SCRIPT" ]]; 54 | then 55 | ${GITHUB_ACTION_PATH}/prepare-bpf-selftests.sh 56 | VMTEST_SCRIPT="${GITHUB_ACTION_PATH}/run-bpf-selftests.sh" 57 | fi 58 | 59 | # clear exitstatus file 60 | echo -n > exitstatus 61 | 62 | foldable start bpftool_checks "Running bpftool checks..." 63 | 64 | # bpftool checks are aimed at checking type names, documentation, shell 65 | # completion etc. against the current kernel, so only run on LATEST. 66 | if [[ -n "${RUN_BPFTOOL_CHECKS}" ]]; then 67 | bpftool_exitstatus=0 68 | # "&& true" does not change the return code (it is not executed if the 69 | # Python script fails), but it prevents the trap on ERR set at the top 70 | # of this file to trigger on failure. 71 | "${KERNEL_ROOT}/tools/testing/selftests/bpf/test_bpftool_synctypes.py" && true 72 | bpftool_exitstatus=$? 73 | if [[ $bpftool_exitstatus -eq 0 ]]; then 74 | echo "bpftool checks passed successfully." 75 | else 76 | echo "bpftool checks returned ${bpftool_exitstatus}." 77 | fi 78 | echo "bpftool:${bpftool_exitstatus}" >> exitstatus 79 | else 80 | echo "bpftool checks skipped." 81 | fi 82 | 83 | foldable end bpftool_checks 84 | 85 | foldable start vmtest "Starting virtual machine..." 86 | 87 | # Tests may be comma-separated. vmtest_selftest expect them to come from CLI space-separated. 88 | TEST_RUNNERS=$(echo ${KERNEL_TEST} | tr -s ',' ' ') 89 | vmtest -k "${VMLINUZ}" --kargs "panic=-1 sysctl.vm.panic_on_oom=1" \ 90 | "${GITHUB_ACTION_PATH}/vmtest-init.sh && \ 91 | cd '${GITHUB_WORKSPACE}' && \ 92 | ${VMTEST_SCRIPT} ${TEST_RUNNERS}" 93 | 94 | # fixup traffic montioring log paths if present 95 | PCAP_DIR=/tmp/tmon_pcap 96 | ${GITHUB_ACTION_PATH}/normalize-paths-for-github.sh "$PCAP_DIR" 97 | 98 | foldable end vmtest 99 | 100 | foldable start collect_status "Collecting exit status" 101 | 102 | exitfile="$(cat exitstatus 2>/dev/null)" 103 | exitstatus="$(echo -e "$exitfile" | awk --field-separator ':' \ 104 | 'BEGIN { s=0 } { if ($2) {s=1} } END { print s }')" 105 | 106 | if [[ "$exitstatus" =~ ^[0-9]+$ ]]; then 107 | printf '\nTests exit status: %s\n' "$exitstatus" >&2 108 | else 109 | printf '\nCould not read tests exit status ("%s")\n' "$exitstatus" >&2 110 | exitstatus=1 111 | fi 112 | 113 | foldable end collect_status 114 | 115 | SUMMARIES=$(find . -maxdepth 1 -name "test_*.json") 116 | for summary in ${SUMMARIES}; do 117 | if [ -f "${summary}" ]; then 118 | "${GITHUB_ACTION_PATH}/print_test_summary.py" -s "${GITHUB_STEP_SUMMARY}" -j "${summary}" 119 | fi 120 | done 121 | 122 | # Final summary - Don't use a fold, keep it visible 123 | echo -e "\033[1;33mTest Results:\033[0m" 124 | echo -e "$exitfile" | while read result; do 125 | testgroup=${result%:*} 126 | status=${result#*:} 127 | # Print final result for each group of tests 128 | if [[ "$status" -eq 0 ]]; then 129 | printf "%20s: \033[1;32mPASS\033[0m\n" "$testgroup" 130 | else 131 | printf "%20s: \033[1;31mFAIL\033[0m (returned %s)\n" "$testgroup" "$status" 132 | fi 133 | done 134 | 135 | exit "$exitstatus" 136 | -------------------------------------------------------------------------------- /run-vmtest/tests/normalize_bpf_test_names/expected-output.txt: -------------------------------------------------------------------------------- 1 | blah_test4 2 | test1 3 | test2/subtest1 4 | test2/subtest2 with a space 5 | test2/subtest3 with a space, and a comma 6 | test2/subtest4 7 | test3/subtest1 with a space, and a comma 8 | test4 9 | test4_blah 10 | test5 11 | test6 12 | test7/subtest123 13 | -------------------------------------------------------------------------------- /run-vmtest/tests/normalize_bpf_test_names/input.txt: -------------------------------------------------------------------------------- 1 | test1 2 | test2/subtest1 3 | test2/subtest2 with a space 4 | test2/subtest3 with a space, and a comma 5 | test2/subtest4 # with comment 6 | test3/subtest1 with a space, and a comma # and a comment 7 | test4 8 | blah_test4 9 | test4_blah 10 | test4/subtest # should be deleted, because test4 exists 11 | test5/subtest1 12 | test5/subtest2 13 | test6 14 | test7/subtest123 15 | test5 # should consume test5/subtest1 and test5/subtest2 16 | 17 | 18 | ## some blank lines, should be removed 19 | 20 | 21 | -------------------------------------------------------------------------------- /run-vmtest/tests/normalize_bpf_test_names/run-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | GITHUB_ACTION_PATH=$(realpath ../..) 6 | 7 | rm -f output.txt 8 | $GITHUB_ACTION_PATH/normalize_bpf_test_names.py input.txt 2>&1 > output.txt 9 | diff expected-output.txt output.txt 10 | 11 | -------------------------------------------------------------------------------- /run-vmtest/tests/print_test_summary/expected-summary.txt: -------------------------------------------------------------------------------- 1 | # Tests summary 2 | - :heavy_check_mark: Success: 29/23 3 | - :next_track_button: Skipped: $3 4 | - :x: Failed: 28 5 | #10 bpf_cookie 6 | #10/2 bpf_cookie/multi_kprobe_link_api 7 | #10/3 bpf_cookie/multi_kprobe_attach_api 8 | #10/8 bpf_cookie/lsm 9 | #15 bpf_mod_race 10 | #15/1 bpf_mod_race/ksym (used_btfs UAF) 11 | #15/2 bpf_mod_race/kfunc (kfunc_btf_tab UAF) 12 | #36 cgroup_hierarchical_stats 13 | #61 deny_namespace 14 | #61/1 deny_namespace/unpriv_userns_create_no_bpf 15 | #73 fexit_stress 16 | #83 get_func_ip_test 17 | #99 kfunc_dynptr_param 18 | #99/1 kfunc_dynptr_param/dynptr_data_null 19 | #99/4 kfunc_dynptr_param/dynptr_data_null 20 | #100 kprobe_multi_bench_attach 21 | #100/1 kprobe_multi_bench_attach/kernel 22 | #100/2 kprobe_multi_bench_attach/modules 23 | #101 kprobe_multi_test 24 | #101/1 kprobe_multi_test/skel_api 25 | #101/2 kprobe_multi_test/link_api_addrs 26 | #101/3 kprobe_multi_test/link_api_syms 27 | #101/4 kprobe_multi_test/attach_api_pattern 28 | #101/5 kprobe_multi_test/attach_api_addrs 29 | #101/6 kprobe_multi_test/attach_api_syms 30 | #108 libbpf_get_fd_by_id_opts 31 | #114 linked_list 32 | #114/1 linked_list/kptr_missing_lock_push_front 33 | #114/2 linked_list/kptr_missing_lock_push_back 34 | #114/3 linked_list/kptr_missing_lock_pop_front 35 | #114/4 linked_list/kptr_missing_lock_pop_back 36 | #114/5 linked_list/global_missing_lock_push_front 37 | #114/6 linked_list/global_missing_lock_push_back 38 | #114/7 linked_list/global_missing_lock_pop_front 39 | #114/8 linked_list/global_missing_lock_pop_back 40 | #114/9 linked_list/map_missing_lock_push_front 41 | #114/10 linked_list/map_missing_lock_push_back 42 | #114/11 linked_list/map_missing_lock_pop_front 43 | #114/12 linked_list/map_missing_lock_pop_back 44 | #114/13 linked_list/inner_map_missing_lock_push_front 45 | #114/14 linked_list/inner_map_missing_lock_push_back 46 | #114/15 linked_list/inner_map_missing_lock_pop_front 47 | #114/16 linked_list/inner_map_missing_lock_pop_back 48 | #114/17 linked_list/kptr_kptr_incorrect_lock_push_front 49 | #114/18 linked_list/kptr_global_incorrect_lock_push_front 50 | #114/19 linked_list/kptr_map_incorrect_lock_push_front 51 | #114/20 linked_list/kptr_inner_map_incorrect_lock_push_front 52 | #114/21 linked_list/kptr_kptr_incorrect_lock_push_back 53 | #114/22 linked_list/kptr_global_incorrect_lock_push_back 54 | #114/23 linked_list/kptr_map_incorrect_lock_push_back 55 | #114/24 linked_list/kptr_inner_map_incorrect_lock_push_back 56 | #114/25 linked_list/kptr_kptr_incorrect_lock_pop_front 57 | #114/26 linked_list/kptr_global_incorrect_lock_pop_front 58 | #114/27 linked_list/kptr_map_incorrect_lock_pop_front 59 | #114/28 linked_list/kptr_inner_map_incorrect_lock_pop_front 60 | #114/29 linked_list/kptr_kptr_incorrect_lock_pop_back 61 | #114/30 linked_list/kptr_global_incorrect_lock_pop_back 62 | #114/31 linked_list/kptr_map_incorrect_lock_pop_back 63 | #114/32 linked_list/kptr_inner_map_incorrect_lock_pop_back 64 | #114/33 linked_list/global_kptr_incorrect_lock_push_front 65 | #114/34 linked_list/global_global_incorrect_lock_push_front 66 | #114/35 linked_list/global_map_incorrect_lock_push_front 67 | #114/36 linked_list/global_inner_map_incorrect_lock_push_front 68 | #114/37 linked_list/global_kptr_incorrect_lock_push_back 69 | #114/38 linked_list/global_global_incorrect_lock_push_back 70 | #114/39 linked_list/global_map_incorrect_lock_push_back 71 | #114/40 linked_list/global_inner_map_incorrect_lock_push_back 72 | #114/41 linked_list/global_kptr_incorrect_lock_pop_front 73 | #114/42 linked_list/global_global_incorrect_lock_pop_front 74 | #114/43 linked_list/global_map_incorrect_lock_pop_front 75 | #114/44 linked_list/global_inner_map_incorrect_lock_pop_front 76 | #114/45 linked_list/global_kptr_incorrect_lock_pop_back 77 | #114/46 linked_list/global_global_incorrect_lock_pop_back 78 | #114/47 linked_list/global_map_incorrect_lock_pop_back 79 | #114/48 linked_list/global_inner_map_incorrect_lock_pop_back 80 | #114/49 linked_list/map_kptr_incorrect_lock_push_front 81 | #114/50 linked_list/map_global_incorrect_lock_push_front 82 | #114/51 linked_list/map_map_incorrect_lock_push_front 83 | #114/52 linked_list/map_inner_map_incorrect_lock_push_front 84 | #114/53 linked_list/map_kptr_incorrect_lock_push_back 85 | #114/54 linked_list/map_global_incorrect_lock_push_back 86 | #114/55 linked_list/map_map_incorrect_lock_push_back 87 | #114/56 linked_list/map_inner_map_incorrect_lock_push_back 88 | #114/57 linked_list/map_kptr_incorrect_lock_pop_front 89 | #114/58 linked_list/map_global_incorrect_lock_pop_front 90 | #114/59 linked_list/map_map_incorrect_lock_pop_front 91 | #114/60 linked_list/map_inner_map_incorrect_lock_pop_front 92 | #114/61 linked_list/map_kptr_incorrect_lock_pop_back 93 | #114/62 linked_list/map_global_incorrect_lock_pop_back 94 | #114/63 linked_list/map_map_incorrect_lock_pop_back 95 | #114/64 linked_list/map_inner_map_incorrect_lock_pop_back 96 | #114/65 linked_list/inner_map_kptr_incorrect_lock_push_front 97 | #114/66 linked_list/inner_map_global_incorrect_lock_push_front 98 | #114/67 linked_list/inner_map_map_incorrect_lock_push_front 99 | #114/68 linked_list/inner_map_inner_map_incorrect_lock_push_front 100 | #114/69 linked_list/inner_map_kptr_incorrect_lock_push_back 101 | #114/70 linked_list/inner_map_global_incorrect_lock_push_back 102 | #114/71 linked_list/inner_map_map_incorrect_lock_push_back 103 | #114/72 linked_list/inner_map_inner_map_incorrect_lock_push_back 104 | #114/73 linked_list/inner_map_kptr_incorrect_lock_pop_front 105 | #114/74 linked_list/inner_map_global_incorrect_lock_pop_front 106 | #114/75 linked_list/inner_map_map_incorrect_lock_pop_front 107 | #114/76 linked_list/inner_map_inner_map_incorrect_lock_pop_front 108 | #114/77 linked_list/inner_map_kptr_incorrect_lock_pop_back 109 | #114/78 linked_list/inner_map_global_incorrect_lock_pop_back 110 | #114/79 linked_list/inner_map_map_incorrect_lock_pop_back 111 | #114/80 linked_list/inner_map_inner_map_incorrect_lock_pop_back 112 | #114/81 linked_list/map_compat_kprobe 113 | #114/82 linked_list/map_compat_kretprobe 114 | #114/83 linked_list/map_compat_tp 115 | #114/84 linked_list/map_compat_perf 116 | #114/85 linked_list/map_compat_raw_tp 117 | #114/86 linked_list/map_compat_raw_tp_w 118 | #114/87 linked_list/obj_type_id_oor 119 | #114/88 linked_list/obj_new_no_composite 120 | #114/89 linked_list/obj_new_no_struct 121 | #114/90 linked_list/obj_drop_non_zero_off 122 | #114/91 linked_list/new_null_ret 123 | #114/92 linked_list/obj_new_acq 124 | #114/93 linked_list/use_after_drop 125 | #114/94 linked_list/ptr_walk_scalar 126 | #114/95 linked_list/direct_read_lock 127 | #114/96 linked_list/direct_write_lock 128 | #114/97 linked_list/direct_read_head 129 | #114/98 linked_list/direct_write_head 130 | #114/99 linked_list/direct_read_node 131 | #114/100 linked_list/direct_write_node 132 | #114/101 linked_list/use_after_unlock_push_front 133 | #114/102 linked_list/use_after_unlock_push_back 134 | #114/103 linked_list/double_push_front 135 | #114/104 linked_list/double_push_back 136 | #114/105 linked_list/no_node_value_type 137 | #114/106 linked_list/incorrect_value_type 138 | #114/107 linked_list/incorrect_node_var_off 139 | #114/108 linked_list/incorrect_node_off1 140 | #114/109 linked_list/incorrect_node_off2 141 | #114/110 linked_list/no_head_type 142 | #114/111 linked_list/incorrect_head_var_off1 143 | #114/112 linked_list/incorrect_head_var_off2 144 | #114/113 linked_list/incorrect_head_off1 145 | #114/114 linked_list/incorrect_head_off2 146 | #114/115 linked_list/pop_front_off 147 | #114/116 linked_list/pop_back_off 148 | #114/117 linked_list/btf: too many locks 149 | #114/118 linked_list/btf: missing lock 150 | #114/119 linked_list/btf: bad offset 151 | #114/120 linked_list/btf: missing contains: 152 | #114/121 linked_list/btf: missing struct 153 | #114/122 linked_list/btf: missing node 154 | #114/123 linked_list/btf: node incorrect type 155 | #114/124 linked_list/btf: multiple bpf_list_node with name b 156 | #114/125 linked_list/btf: owning | owned AA cycle 157 | #114/126 linked_list/btf: owning | owned ABA cycle 158 | #114/129 linked_list/btf: owning | owned -> owning | owned -> owned 159 | #114/130 linked_list/btf: owning -> owning | owned -> owning | owned -> owned 160 | #114/131 linked_list/btf: list_node and rb_node in same struct 161 | #124 lru_bug 162 | #135 module_attach 163 | #136 module_fentry_shadow 164 | #137 mptcp 165 | #137/1 mptcp/base 166 | #164 rcu_read_lock 167 | #164/1 rcu_read_lock/success 168 | #164/2 rcu_read_lock/rcuptr_acquire 169 | #169 ringbuf 170 | #169/2 ringbuf/ringbuf_map_key 171 | #175 setget_sockopt 172 | #213 task_local_storage 173 | #213/3 task_local_storage/recursion 174 | #222 test_bprm_opts 175 | #224 test_ima 176 | #225 test_local_storage 177 | #226 test_lsm 178 | #231 timer 179 | #239 tracing_struct 180 | #240 trampoline_count 181 | #248 user_ringbuf 182 | #248/2 user_ringbuf/test_user_ringbuf_post_misaligned 183 | #248/3 user_ringbuf/test_user_ringbuf_post_producer_wrong_offset 184 | #248/4 user_ringbuf/test_user_ringbuf_post_larger_than_ringbuf_sz 185 | #248/5 user_ringbuf/test_user_ringbuf_basic 186 | #248/6 user_ringbuf/test_user_ringbuf_sample_full_ring_buffer 187 | #248/7 user_ringbuf/test_user_ringbuf_post_alignment_autoadjust 188 | #248/8 user_ringbuf/test_user_ringbuf_overfill 189 | #248/9 user_ringbuf/test_user_ringbuf_discards_properly_ignored 190 | #248/10 user_ringbuf/test_user_ringbuf_loop 191 | #248/11 user_ringbuf/test_user_ringbuf_msg_protocol 192 | #248/12 user_ringbuf/test_user_ringbuf_blocking_reserve 193 | #279 verify_pkcs7_sig 194 | -------------------------------------------------------------------------------- /run-vmtest/tests/print_test_summary/run-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | GITHUB_ACTION_PATH=$(realpath ../..) 6 | 7 | rm -f output.txt summary.txt 8 | 9 | $GITHUB_ACTION_PATH/print_test_summary.py -j test_progs.json -s summary.txt 2>&1 > output.txt 10 | 11 | diff expected-summary.txt summary.txt 12 | diff expected-output.txt output.txt 13 | 14 | -------------------------------------------------------------------------------- /run-vmtest/tests/run-all-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | for tst in $(find . -name run-test.sh); do 6 | t_dir=$(dirname $tst) 7 | cd $t_dir 8 | ./$(basename $tst) \ 9 | && echo "$t_dir ok" \ 10 | || (echo "$t_dir failed" && exit 1) 11 | cd - > /dev/null 12 | done 13 | 14 | -------------------------------------------------------------------------------- /run-vmtest/vmtest-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /bin/mount bpffs /sys/fs/bpf -t bpf 4 | ip link set lo up 5 | 6 | -------------------------------------------------------------------------------- /setup-build-env/action.yml: -------------------------------------------------------------------------------- 1 | name: 'setup build env' 2 | description: 'Setup build env' 3 | inputs: 4 | pahole: 5 | description: 'pahole rev/tag/branch' 6 | required: true 7 | default: 'master' 8 | pahole-origin: 9 | description: 'pahole repo' 10 | required: true 11 | default: 'https://git.kernel.org/pub/scm/devel/pahole/pahole.git' 12 | llvm-version: 13 | description: 'llvm version' 14 | required: false 15 | default: '16' 16 | gcc-version: 17 | required: false 18 | default: '13' 19 | arch: 20 | description: 'arch' 21 | required: true 22 | runs: 23 | using: "composite" 24 | steps: 25 | - name: Setup environment 26 | shell: bash 27 | env: 28 | GCC_VERSION: ${{ inputs.gcc-version }} 29 | run: | 30 | ${GITHUB_ACTION_PATH}/install_packages.sh 31 | - name: Install clang 32 | shell: bash 33 | env: 34 | LLVM_VERSION: ${{ inputs.llvm-version }} 35 | run: | 36 | ${GITHUB_ACTION_PATH}/install_clang.sh 37 | - name: Install pahole 38 | shell: bash 39 | env: 40 | PAHOLE_BRANCH: ${{ inputs.pahole }} 41 | PAHOLE_ORIGIN: ${{ inputs.pahole-origin }} 42 | run: | 43 | ${GITHUB_ACTION_PATH}/build_pahole.sh 44 | echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}:/usr/local/lib" >> $GITHUB_ENV 45 | - name: Install cross compilation toolchain 46 | shell: bash 47 | env: 48 | GCC_VERSION: ${{ inputs.gcc-version }} 49 | run: | 50 | ${GITHUB_ACTION_PATH}/install_cross_compilation_toolchain.sh ${{ inputs.arch }} 51 | -------------------------------------------------------------------------------- /setup-build-env/build_pahole.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | PAHOLE_BRANCH=${PAHOLE_BRANCH:-master} 6 | PAHOLE_ORIGIN=${PAHOLE_ORIGIN:-https://git.kernel.org/pub/scm/devel/pahole/pahole.git} 7 | 8 | if [ "$PAHOLE_BRANCH" == "none" ]; then 9 | echo "WARNING: will not build and install pahole, because 'pahole: none' was passed to the action call" 10 | exit 0 11 | fi 12 | 13 | source $(cd $(dirname $0) && pwd)/../helpers.sh 14 | 15 | foldable start build_pahole "Building pahole" 16 | 17 | sudo apt-get update -y 18 | sudo apt-get install -y --no-install-recommends elfutils libelf-dev libdw-dev 19 | 20 | CWD=$(pwd) 21 | 22 | mkdir -p pahole 23 | cd pahole 24 | git init 25 | git remote add origin ${PAHOLE_ORIGIN} 26 | git fetch --depth=1 origin "${PAHOLE_BRANCH}" 27 | git checkout "${PAHOLE_BRANCH}" 28 | 29 | mkdir -p build 30 | cd build 31 | cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -D__LIB=lib .. 32 | make -j$((4*$(nproc))) 33 | sudo make install 34 | 35 | export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}:/usr/local/lib 36 | ldd $(which pahole) 37 | pahole --version 38 | 39 | foldable end build_pahole 40 | -------------------------------------------------------------------------------- /setup-build-env/install_clang.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | THISDIR="$(cd "$(dirname "$0")" && pwd)" 5 | source "${THISDIR}"/../helpers.sh 6 | 7 | foldable start install_clang "Install LLVM ${LLVM_VERSION}" 8 | 9 | curl -O https://apt.llvm.org/llvm.sh 10 | chmod +x llvm.sh 11 | sudo ./llvm.sh ${LLVM_VERSION} 12 | 13 | foldable end install_clang 14 | -------------------------------------------------------------------------------- /setup-build-env/install_cross_compilation_toolchain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Installs the necessary toolchain to cross compile for ${TARGET_ARCH} 3 | set -euo pipefail 4 | 5 | THISDIR="$(cd "$(dirname "$0")" && pwd)" 6 | 7 | source "${THISDIR}"/../helpers.sh 8 | 9 | TARGET_ARCH="$1" 10 | 11 | foldable start install_crosscompile "Installing Cross-Compilation toolchain" 12 | 13 | if [[ "${TARGET_ARCH}" == "$(uname -m)" ]]; then 14 | echo "Nothing to do. Target arch is the same as host arch: ${TARGET_ARCH}" 15 | exit 0 16 | fi 17 | 18 | source /etc/os-release 19 | 20 | DEB_ARCH="$(platform_to_deb_arch "${TARGET_ARCH}")" 21 | DEB_HOST_ARCH="$(dpkg --print-architecture)" 22 | UBUNTU_CODENAME=${VERSION_CODENAME:-noble} 23 | GCC_VERSION=${GCC_VERSION:-14} 24 | 25 | cat <