├── images ├── hook-docker │ ├── go.sum │ ├── go.mod │ ├── main_test.go │ ├── Dockerfile │ └── main.go ├── hook-embedded │ ├── docker │ │ └── .keep │ ├── images │ │ └── .keep │ ├── images_tar │ │ └── .keep │ ├── Dockerfile │ ├── images.txt.example │ ├── images-mount.sh │ └── pull-images.sh ├── hook-bootkit │ ├── Dockerfile │ ├── registry.go │ ├── go.mod │ ├── main.go │ └── registry_test.go ├── hook-containerd │ ├── etc │ │ └── containerd │ │ │ ├── runtime-config.toml │ │ │ └── config.toml │ └── Dockerfile ├── hook-udev │ └── Dockerfile ├── hook-runc │ └── Dockerfile └── hook-acpid │ └── Dockerfile ├── .dockerignore ├── kernel ├── .dockerignore ├── download.sh └── Dockerfile ├── .github ├── CODEOWNERS ├── mergify.yml ├── settings.yml └── dependabot.yml ├── .yamllint ├── .editorconfig ├── files ├── dhcpcd.conf ├── setup-dns.sh ├── dhcp.sh ├── vlan.sh └── static-network.sh ├── .gitignore ├── CONTRIBUTING.md ├── RELEASING.md ├── contrib └── tag-release.sh ├── linuxkit-templates ├── peg.template.yaml └── hook.template.yaml ├── bash ├── cli.sh ├── bootable │ ├── fat32-image.sh │ ├── grub.sh │ └── rpi.sh ├── shellcheck.sh ├── inventory.sh ├── hook-lk-containers.sh ├── kernel │ ├── kernel_armbian.sh │ └── kernel_default.sh ├── bootable-media.sh ├── kernel.sh ├── common.sh ├── docker.sh └── json-matrix.sh ├── docs └── DCO.md ├── .golangci.yml ├── LICENSE └── README.md /images/hook-docker/go.sum: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/hook-embedded/docker/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/hook-embedded/images/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !shell.nix 3 | -------------------------------------------------------------------------------- /images/hook-embedded/images_tar/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/hook-docker/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tinkerbell/hook/hook-docker 2 | 3 | go 1.18 4 | -------------------------------------------------------------------------------- /kernel/.dockerignore: -------------------------------------------------------------------------------- 1 | .dockerignore 2 | .gitignore 3 | Dockerfile* 4 | Makefile 5 | README.md 6 | !Dockerfile.autogen.helper* 7 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | /.github/settings.yml @chrisdoherty4 @jacobweinstock 2 | /.github/CODEOWNERS @chrisdoherty4 @jacobweinstock 3 | -------------------------------------------------------------------------------- /images/hook-bootkit/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24-alpine AS dev 2 | COPY . /src/ 3 | WORKDIR /src 4 | RUN go mod download 5 | RUN CGO_ENABLED=0 go build -a -ldflags '-s -w -extldflags "-static"' -o /bootkit 6 | 7 | FROM scratch 8 | COPY --from=dev /bootkit . 9 | ENTRYPOINT ["/bootkit"] 10 | -------------------------------------------------------------------------------- /images/hook-containerd/etc/containerd/runtime-config.toml: -------------------------------------------------------------------------------- 1 | # This file is used by linuxkit/init to configure the start up of containerd 2 | # https://github.com/linuxkit/linuxkit/blob/master/pkg/init/cmd/service/system_init.go 3 | stdout = "/var/log/containerd.out.log" 4 | stderr = "/var/log/containerd.err.log" -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | rules: 5 | braces: 6 | max-spaces-inside: 1 7 | brackets: 8 | max-spaces-inside: 1 9 | comments: disable 10 | comments-indentation: disable 11 | document-start: disable 12 | line-length: 13 | level: warning 14 | max: 160 15 | allow-non-breakable-inline-mappings: true 16 | truthy: disable 17 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | [*] 2 | charset = utf-8 3 | end_of_line = lf 4 | indent_style = tab 5 | indent_size = 4 6 | trim_trailing_whitespace = true 7 | insert_final_newline = true 8 | 9 | [*.sh] 10 | shell_variant = bash 11 | binary_next_line = false 12 | switch_case_indent = true 13 | ij_shell_switch_cases_indented = true 14 | space_redirects = true 15 | keep_padding = false 16 | function_next_line = false -------------------------------------------------------------------------------- /images/hook-embedded/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | ENTRYPOINT [] 3 | WORKDIR / 4 | COPY ./images/ /etc/embedded-images/ 5 | # the name 001 is important as that is the order in which the scripts are executed 6 | # we need this mounting to happen before the other init.d scripts run so that 7 | # the mount points are available to them. 8 | COPY ./images-mount.sh /etc/init.d/001-images-mount.sh 9 | CMD [] 10 | -------------------------------------------------------------------------------- /images/hook-udev/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:12-slim AS base 2 | 3 | RUN DEBIAN_FRONTEND=noninteractive apt update && \ 4 | DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends udev && \ 5 | apt-get clean autoclean && \ 6 | apt-get autoremove --yes && \ 7 | rm -rf /var/lib/apt /var/lib/dpkg /var/lib/cache /var/lib/log /var/cache/* /usr/lib/apt/* /usr/share/* 8 | 9 | CMD ["/etc/init.d/udev", "start"] 10 | -------------------------------------------------------------------------------- /files/dhcpcd.conf: -------------------------------------------------------------------------------- 1 | # Default values for dhcpcd from linuxkit/dhcpcd:v0.8 with `allowinterfaces en*` removed 2 | # This allows the `--allowinterfaces` flag of dhcpcd to specify the allowinterfaces. 3 | hostname 4 | clientid 5 | persistent 6 | option rapid_commit 7 | option domain_name_servers, domain_name, domain_search, host_name 8 | option classless_static_routes 9 | option ntp_servers 10 | option interface_mtu 11 | require dhcp_server_identifier 12 | slaac private 13 | nodelay 14 | noarp 15 | waitip 4 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | dbg/ 3 | dist/ 4 | .env 5 | !/hook-bootkit/ 6 | !/hook-docker/ 7 | hook*.*.yaml 8 | !linuxkit-templates 9 | out/ 10 | cache/ 11 | bootable/ 12 | !bash/bootable 13 | *.swp 14 | .idea 15 | kernel/Dockerfile.autogen.* 16 | images/hook-embedded/images/* 17 | !images/hook-embedded/images/.keep 18 | images/hook-embedded/images.txt 19 | images/hook-embedded/docker/* 20 | !images/hook-embedded/docker/.keep 21 | images/hook-embedded/images_tar/* 22 | !images/hook-embedded/images_tar/.keep 23 | scratch/ 24 | -------------------------------------------------------------------------------- /images/hook-embedded/images.txt.example: -------------------------------------------------------------------------------- 1 | # This is an example file. It explains the required format. 2 | # For the actual file, you must remove all the comments. 3 | # The format is source image, a single space, optional additional tag of the source image, a single space, true or false to remove the original tag. 4 | # 5 | # for example: 6 | quay.io/tinkerbell/tink-worker:v0.10.0 7 | quay.io/tinkerbell/tink-worker:v0.10.0 tink-worker:v0.10.0 true 8 | quay.io/tinkerbell/actions/image2disk embedded/actions/image2disk 9 | quay.io/tinkerbell/actions/cexec 127.0.0.1/embedded/actions/cexec true 10 | -------------------------------------------------------------------------------- /images/hook-embedded/images-mount.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec 3>&1 4>&2 4 | trap 'exec 2>&4 1>&3' 0 1 2 3 5 | exec 1>/var/log/embedded-images.log 2>&1 6 | 7 | set -xeuo pipefail 8 | 9 | # We can't have a Linuxkit "init" container that dumps its file contents to /var and be writable 10 | # because the init process overwrites it and the contents are lost. 11 | # Instead, we have the init container, with all the Docker images, dump its contents to /etc/embedded-images. 12 | # Then we bind mount /etc/embedded-images to /run/images (/var/run is symlinked to /run) and make sure it's 13 | # read/write. This allows the DinD container to bind mount /var/run/images to /var/lib/docker and the Docker 14 | # images are available right away and /var/lib/docker is writable. 15 | mkdir -p /run/images 16 | mount -o bind,rw /etc/embedded-images/ /run/images 17 | mount -o remount,rw /run/images 18 | -------------------------------------------------------------------------------- /.github/mergify.yml: -------------------------------------------------------------------------------- 1 | merge_queue: 2 | max_parallel_checks: 1 3 | queue_rules: 4 | - name: default 5 | queue_conditions: 6 | - base=main 7 | - or: 8 | - "#approved-reviews-by>=1" 9 | - author=jacobweinstock 10 | - "#changes-requested-reviews-by=0" 11 | - "#review-requested=0" 12 | - check-success=DCO 13 | - check-success~=^Prepare .* 14 | - check-success~=^LinuxKit .* 15 | - check-success~=^Hook .* 16 | - check-success~=^Kernel .* 17 | - label!=do-not-merge 18 | - label=ready-to-merge 19 | merge_conditions: 20 | # Conditions to get out of the queue (= merged) 21 | - check-success=DCO 22 | - check-success~=^Prepare .* 23 | - check-success~=^LinuxKit .* 24 | - check-success~=^Hook .* 25 | - check-success~=^Kernel .* 26 | commit_message_template: | 27 | {{ title }} (#{{ number }}) 28 | 29 | {{ body }} 30 | merge_method: merge 31 | 32 | pull_request_rules: 33 | - name: Automatic merge on approval 34 | conditions: [] 35 | actions: 36 | queue: 37 | -------------------------------------------------------------------------------- /.github/settings.yml: -------------------------------------------------------------------------------- 1 | # Collaborators: give specific users access to this repository. 2 | # See https://docs.github.com/en/rest/reference/repos#add-a-repository-collaborator for available options 3 | collaborators: 4 | # Maintainers, should also be added to the .github/CODEOWNERS file as owners of this settings.yml file. 5 | - username: chrisdoherty4 6 | permission: maintain 7 | - username: jacobweinstock 8 | permission: maintain 9 | # Approvers 10 | # Reviewers 11 | 12 | # Note: `permission` is only valid on organization-owned repositories. 13 | # The permission to grant the collaborator. Can be one of: 14 | # * `pull` - can pull, but not push to or administer this repository. 15 | # * `push` - can pull and push, but not administer this repository. 16 | # * `admin` - can pull, push and administer this repository. 17 | # * `maintain` - Recommended for project managers who need to manage the repository without access to sensitive or destructive actions. 18 | # * `triage` - Recommended for contributors who need to proactively manage issues and pull requests without write access. 19 | -------------------------------------------------------------------------------- /images/hook-bootkit/registry.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/distribution/reference" 5 | "golang.org/x/text/unicode/norm" 6 | ) 7 | 8 | // useAuth determines if authentication should be used for pulling the given image. 9 | // It compares the registry hostname extracted from the image reference against the 10 | // configured registry hostname to ensure exact matching and prevent security vulnerabilities 11 | // from substring matching attacks and homograph attacks using Unicode normalization. 12 | func useAuth(imageRef, registryHost string) bool { 13 | if registryHost == "" { 14 | return false 15 | } 16 | 17 | pnn, err := reference.ParseNormalizedNamed(imageRef) 18 | if err != nil { 19 | return false 20 | } 21 | imageHost := reference.Domain(pnn) 22 | 23 | // Apply Unicode normalization to prevent homograph attacks 24 | // Use NFC (Canonical Decomposition followed by Canonical Composition) 25 | // to ensure consistent Unicode representation 26 | imageH := norm.NFC.String(imageHost) 27 | registryH := norm.NFC.String(registryHost) 28 | 29 | return imageH == registryH 30 | } 31 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Hello Contributors! 2 | 3 | Thanks for your interest! 4 | We're so glad you're here. 5 | 6 | ### Important Resources 7 | 8 | #### bugs: [https://github.com/tinkerbell/hook/issues](https://github.com/tinkerbell/hook/issues) 9 | 10 | ### Code of Conduct 11 | 12 | Please read and understand the code of conduct found [here](https://github.com/tinkerbell/.github/blob/main/CODE_OF_CONDUCT.md). 13 | 14 | ### DCO Sign Off 15 | 16 | Please read and understand the DCO found [here](docs/DCO.md). 17 | 18 | ### Environment Details 19 | 20 | Building is handled by a bash script, please see the [build.sh](build.sh) for details. 21 | 22 | ### How to Submit Change Requests 23 | 24 | Please submit change requests and / or features via [Issues](https://github.com/tinkerbell/hook/issues). 25 | There's no guarantee it'll be changed, but you never know until you try. 26 | We'll try to add comments as soon as possible, though. 27 | 28 | ### How to Report a Bug 29 | 30 | Bugs are problems in code, in the functionality of an application or in its UI design; you can submit them through [Issues](https://github.com/tinkerbell/hook/issues). 31 | 32 | ## Code Style Guides 33 | -------------------------------------------------------------------------------- /images/hook-docker/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "os" 7 | "testing" 8 | ) 9 | 10 | func TestWriteToDisk(t *testing.T) { 11 | tests := map[string]struct { 12 | cfg dockerConfig 13 | want []byte 14 | wantErr error 15 | }{ 16 | "success": {cfg: dockerConfig{Debug: false, LogDriver: "json-file"}, want: []byte(`{"debug":false,"log-driver":"json-file"}`)}, 17 | "success - empty struct": {cfg: dockerConfig{}, want: []byte(`{"debug":false}`)}, 18 | } 19 | for name, tt := range tests { 20 | t.Run(name, func(t *testing.T) { 21 | // Create a temporary directory 22 | dir, err := os.MkdirTemp("", "hook-docker") 23 | if err != nil { 24 | t.Fatal(err) 25 | } 26 | defer os.RemoveAll(dir) 27 | loc := dir + "daemon.json" 28 | 29 | err = tt.cfg.writeToDisk(loc) 30 | if !errors.Is(err, tt.wantErr) { 31 | t.Fatalf("got err %v, want %v", err, tt.wantErr) 32 | } 33 | 34 | if tt.wantErr == nil { 35 | got, err := os.ReadFile(loc) 36 | if err != nil { 37 | t.Fatal(err) 38 | } 39 | 40 | if !bytes.Equal(got, tt.want) { 41 | t.Fatalf("\ngot:\n %s\nwant:\n %s", got, tt.want) 42 | } 43 | } 44 | }) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /RELEASING.md: -------------------------------------------------------------------------------- 1 | # Releasing 2 | 3 | For version v0.x.y: 4 | 5 | ## Prerequisites 6 | 7 | 1. Update the `VERSION`, `VERSION_ID`, and `PRETTY_NAME` values in the `hook.yaml` file under `files -> "- path: etc/os-release"` to use `0.x.y` 8 | 9 | ```bash 10 | make update-os-release NEW_VERSION=0.x.y 11 | ``` 12 | 13 | 1. Commit, push, PR, and merge the version changes 14 | 15 | ```bash 16 | git commit -sm "Update version to v0.x.y" hook.yaml 17 | ``` 18 | 19 | ## Release Process 20 | 21 | 1. Create the annotated tag 22 | 23 | > NOTE: To use your GPG signature when pushing the tag, use `SIGN_TAG=1 ./contrib/tag-release.sh v0.x.y` instead 24 | 25 | ```bash 26 | ./contrib/tag-release.sh v0.x.y 27 | ``` 28 | 29 | 1. Push the tag to the GitHub repository. This will automatically trigger a [Github Action](https://github.com/tinkerbell/hook/actions) to create a release. 30 | 31 | > NOTE: `origin` should be the name of the remote pointing to `github.com/tinkerbell/boots` 32 | 33 | ```bash 34 | git push origin v0.x.y 35 | ``` 36 | 37 | 1. Review the release on GitHub. 38 | 39 | ### Permissions 40 | 41 | Releasing requires a particular set of permissions. 42 | 43 | - Tag push access to the GitHub repository 44 | -------------------------------------------------------------------------------- /images/hook-runc/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile to build linuxkit/runc for linuxkit 2 | FROM alpine:3.22 AS alpine 3 | RUN \ 4 | apk add \ 5 | bash \ 6 | gcc \ 7 | git \ 8 | go=1.24.4-r0 \ 9 | libc-dev \ 10 | libseccomp-dev \ 11 | libseccomp-static \ 12 | linux-headers \ 13 | make \ 14 | && true 15 | ENV GOPATH=/go PATH=$PATH:/go/bin GO111MODULE=off 16 | ENV RUNC_COMMIT=v1.3.0 17 | RUN mkdir -p $GOPATH/src/github.com/opencontainers && \ 18 | cd $GOPATH/src/github.com/opencontainers && \ 19 | git clone https://github.com/opencontainers/runc.git 20 | WORKDIR $GOPATH/src/github.com/opencontainers/runc 21 | RUN git checkout $RUNC_COMMIT 22 | RUN make static BUILDTAGS="seccomp" EXTRA_FLAGS="-buildmode pie" EXTRA_LDFLAGS="-s -w -extldflags \\\"-fno-PIC -static\\\"" 23 | RUN cp runc /usr/bin/ 24 | 25 | RUN mkdir -p /etc/init.d && ln -s /usr/bin/service /etc/init.d/010-onboot 26 | RUN mkdir -p /etc/shutdown.d && ln -s /usr/bin/service /etc/shutdown.d/010-onshutdown 27 | 28 | FROM scratch 29 | WORKDIR / 30 | ENTRYPOINT [] 31 | COPY --from=alpine /usr/bin/runc /usr/bin/ 32 | COPY --from=alpine /etc/init.d/ /etc/init.d/ 33 | COPY --from=alpine /etc/shutdown.d/ /etc/shutdown.d/ 34 | COPY --from=alpine /etc/apk /etc/apk/ 35 | COPY --from=alpine /lib/apk /lib/apk/ 36 | -------------------------------------------------------------------------------- /images/hook-docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24-alpine AS dev 2 | COPY . /src/ 3 | WORKDIR /src 4 | RUN CGO_ENABLED=0 go build -a -ldflags '-s -w -extldflags "-static"' -o /hook-docker 5 | 6 | FROM docker:28.2.2-dind AS docker 7 | RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories 8 | RUN apk update && apk add kexec-tools binutils && rm -rf /var/cache/apk/* 9 | # Won't use docker-buildx nor docker-compose 10 | RUN rm -rf /usr/local/libexec/docker/cli-plugins 11 | # Strip some large binaries 12 | RUN strip /usr/local/bin/docker /usr/local/bin/dockerd /usr/local/bin/docker-proxy /usr/local/bin/runc /usr/local/bin/containerd /usr/local/bin/containerd-shim-runc-v2 13 | # Purge binutils package after stripping 14 | RUN apk del binutils 15 | 16 | FROM alpine:3.22 17 | COPY --from=dev /hook-docker . 18 | COPY --from=docker /usr/local/bin/docker-init /usr/local/bin/docker /usr/local/bin/dockerd /usr/local/bin/docker-proxy /usr/local/bin/runc /usr/local/bin/ 19 | COPY --from=docker /usr/local/bin/containerd /usr/local/bin/containerd-shim-runc-v2 /usr/local/bin/ 20 | COPY --from=docker /usr/local/bin/dockerd-entrypoint.sh /usr/local/bin/dockerd-entrypoint.sh 21 | 22 | RUN apk add --no-cache ca-certificates iptables openssl 23 | 24 | ENTRYPOINT ["/hook-docker"] 25 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | day: "monday" 8 | time: "04:39" 9 | timezone: "America/New_York" 10 | reviewers: 11 | - chrisdoherty4 12 | - jacobweinstock 13 | open-pull-requests-limit: 10 14 | 15 | - package-ecosystem: "gomod" 16 | directory: "/" 17 | schedule: 18 | interval: "weekly" 19 | day: "monday" 20 | time: "03:52" 21 | timezone: "America/New_York" 22 | reviewers: 23 | - chrisdoherty4 24 | - jacobweinstock 25 | open-pull-requests-limit: 10 26 | 27 | - package-ecosystem: "gomod" 28 | directory: "/" 29 | schedule: 30 | interval: "weekly" 31 | day: "thursday" 32 | time: "03:52" 33 | timezone: "America/New_York" 34 | reviewers: 35 | - chrisdoherty4 36 | - jacobweinstock 37 | open-pull-requests-limit: 10 38 | 39 | - package-ecosystem: "docker" 40 | directory: "/" 41 | schedule: 42 | interval: "weekly" 43 | day: "monday" 44 | time: "04:22" 45 | timezone: "America/New_York" 46 | reviewers: 47 | - chrisdoherty4 48 | - jacobweinstock 49 | open-pull-requests-limit: 10 50 | -------------------------------------------------------------------------------- /contrib/tag-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit -o nounset -o pipefail 4 | 5 | if [ -z "${1-}" ]; then 6 | echo "Must specify new tag" 7 | exit 1 8 | fi 9 | 10 | new_tag=${1-} 11 | [[ $new_tag =~ ^v[0-9]*\.[0-9]*\.[0-9]*$ ]] || ( 12 | echo "Tag must be in the form of vX.Y.Z" 13 | exit 1 14 | ) 15 | 16 | if [[ $(git symbolic-ref HEAD) != refs/heads/main ]] && [[ -z ${ALLOW_NON_MAIN-} ]]; then 17 | echo "Must be on main branch" >&2 18 | exit 1 19 | fi 20 | if [[ $(git describe --dirty) != $(git describe) ]]; then 21 | echo "Repo must be in a clean state" >&2 22 | exit 1 23 | fi 24 | 25 | git fetch --all 26 | 27 | last_tag=$(git describe --abbrev=0) 28 | last_tag_commit=$(git rev-list -n1 "$last_tag") 29 | last_specific_tag=$(git tag --contains="$last_tag_commit" | grep -E "^v[0-9]*\.[0-9]*\.[0-9]*$" | tail -n 1) 30 | last_specific_tag_commit=$(git rev-list -n1 "$last_specific_tag") 31 | if [[ $last_specific_tag_commit == $(git rev-list -n1 HEAD) ]]; then 32 | echo "No commits since last tag" >&2 33 | exit 1 34 | fi 35 | 36 | if [[ -n ${SIGN_TAG-} ]]; then 37 | git tag -s -m "${new_tag}" "${new_tag}" &>/dev/null && echo "created signed tag ${new_tag}" >&2 && exit 38 | else 39 | git tag -a -m "${new_tag}" "${new_tag}" &>/dev/null && echo "created annotated tag ${new_tag}" >&2 && exit 40 | fi 41 | -------------------------------------------------------------------------------- /files/setup-dns.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script is intended to be run on the HookOS/Linuxkit host so it must use /bin/sh. 4 | # No other shells are available on the host. 5 | 6 | # modified from alpine setup-dns 7 | # apk add alpine-conf 8 | 9 | exec 3>&1 4>&2 10 | trap 'exec 2>&4 1>&3' 0 1 2 3 11 | exec 1>/var/log/setup-dns.log 2>&1 12 | 13 | while getopts "d:n:h" opt; do 14 | case $opt in 15 | d) DOMAINNAME="$OPTARG";; 16 | n) NAMESERVERS="$OPTARG";; 17 | esac 18 | done 19 | shift $(($OPTIND - 1)) 20 | 21 | 22 | conf="${ROOT}resolv.conf" 23 | 24 | if [ -f "$conf" ] ; then 25 | domain=$(awk '/^domain/ {print $2}' $conf) 26 | dns=$(awk '/^nameserver/ {printf "%s ",$2}' $conf) 27 | elif fqdn="$(get_fqdn)" && [ -n "$fqdn" ]; then 28 | domain="$fqdn" 29 | fi 30 | 31 | if [ -n "$DOMAINNAME" ]; then 32 | domain="$DOMAINNAME" 33 | fi 34 | 35 | if [ -n "$NAMESERVERS" ] || [ $# -gt 0 ];then 36 | dns="$NAMESERVERS" 37 | fi 38 | 39 | if [ -n "$domain" ]; then 40 | mkdir -p "${conf%/*}" 41 | echo "search $domain" > $conf 42 | fi 43 | 44 | if [ -n "$dns" ] || [ $# -gt 0 ] && [ -f "$conf" ]; then 45 | sed -i -e '/^nameserver/d' $conf 46 | fi 47 | for i in $dns $@; do 48 | mkdir -p "${conf%/*}" 49 | echo "nameserver $i" >> $conf 50 | done 51 | -------------------------------------------------------------------------------- /files/dhcp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script will run the dhcp client. If `vlan_id=` in `/proc/cmdline` has a value, it will run the dhcp client only on the 4 | # VLAN interface. 5 | # This script accepts an input parameter of true or false. 6 | # true: run the dhcp client with the one shot option 7 | # false: run the dhcp client as a service 8 | set -x 9 | 10 | run_dhcp_client() { 11 | one_shot="$1" 12 | al="e*" 13 | 14 | vlan_id=$(sed -n 's/.* vlan_id=\([0-9]*\).*/\1/p' /proc/cmdline) 15 | if [ -n "$vlan_id" ]; then 16 | al="e*.*" 17 | fi 18 | 19 | if [ "$one_shot" = "true" ]; then 20 | # always return true for the one shot dhcp call so it doesn't block Hook from starting up. 21 | # the --nobackground is not used here because when it is used, dhcpcd doesn't honor the --timeout option 22 | # and waits indefinitely for a response. For one shot, we want to timeout after the 30 second default. 23 | /sbin/dhcpcd -f /dhcpcd.conf --allowinterfaces "${al}" -1 || true 24 | # use busybox's ntpd to set the time after getting an IP address; don't fail 25 | echo 'sleep 1 second before calling ntpd' && sleep 1 26 | /usr/sbin/ntpd -n -q -dd -p pool.ntp.org || true 27 | else 28 | /sbin/dhcpcd --nobackground -f /dhcpcd.conf --allowinterfaces "${al}" 29 | fi 30 | 31 | } 32 | 33 | if [ -f /run/network/interfaces ] || [ -f /var/run/network/interfaces ]; then 34 | echo "the /run/network/interfaces file or /var/run/network/interfaces file exists, so static IP's are in use. not running the dhcp client." 35 | exit 0 36 | fi 37 | 38 | # we always return true so that a failure here doesn't block the next container service from starting. Ideally, we always 39 | # want the getty service to start so we can debug failures. 40 | run_dhcp_client "$1" || true 41 | -------------------------------------------------------------------------------- /linuxkit-templates/peg.template.yaml: -------------------------------------------------------------------------------- 1 | # a 'peg' is not a 'hook', instead, just a bare LinuxKit image. It does use Hook's kernel though. 2 | # this exists so we can experiment with newer LK versions and the dind interaction. 3 | # EXCLUSIVELY for development, this should never be released. 4 | kernel: 5 | image: "${HOOK_KERNEL_IMAGE}" 6 | cmdline: irrelevant=here 7 | #ucode: intel-ucode.cpio 8 | init: 9 | - linuxkit/init:45a1ad5919f0b6acf0f0cf730e9434abfae11fe6 10 | - linuxkit/runc:6062483d748609d505f2bcde4e52ee64a3329f5f 11 | - linuxkit/containerd:e7a92d9f3282039eac5fb1b07cac2b8664cbf0ad 12 | #- linuxkit/ca-certificates:5aaa343474e5ac3ac01f8b917e82efb1063d80ff 13 | #- linuxkit/firmware:8def159583422181ddee3704f7024ecb9c02d348 14 | onboot: 15 | - name: rngd1 16 | image: linuxkit/rngd:cdb919e4aee49fed0bf6075f0a104037cba83c39 17 | command: [ "/sbin/rngd", "-1" ] 18 | - name: sysctl 19 | image: linuxkit/sysctl:5a374e4bf3e5a7deeacff6571d0f30f7ea8f56db 20 | - name: modprobe 21 | image: linuxkit/modprobe:ab5ac4d5e7e7a5f2d103764850f7846b69230676 22 | command: [ "modprobe", "cdc_ncm" ] # for usb ethernet dongles 23 | - name: dhcpcd 24 | image: linuxkit/dhcpcd:e9e3580f2de00e73e7b316a007186d22fea056ee 25 | command: [ "/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1" ] 26 | services: 27 | #- name: rngd 28 | # image: linuxkit/rngd:cdb919e4aee49fed0bf6075f0a104037cba83c39 29 | - name: getty 30 | image: linuxkit/getty:5d86a2ce2d890c14ab66b13638dcadf74f29218b 31 | capabilities: 32 | - all 33 | binds.add: 34 | - /:/host2 35 | env: 36 | - INSECURE=true 37 | - name: sshd 38 | image: linuxkit/sshd:75f399fbfb6455dfccd4cb30543d0b4b494d28c8 39 | binds.add: 40 | - /root/.ssh:/root/.ssh 41 | files: 42 | - path: root/.ssh/authorized_keys 43 | source: ~/.ssh/id_rsa.pub 44 | mode: "0600" 45 | optional: true -------------------------------------------------------------------------------- /images/hook-bootkit/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tinkerbell/hook/hook-bootkit 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.24.1 6 | 7 | require ( 8 | github.com/cenkalti/backoff/v4 v4.3.0 9 | github.com/distribution/reference v0.6.0 10 | github.com/docker/docker v28.3.2+incompatible 11 | github.com/go-logr/logr v1.4.3 12 | github.com/go-logr/zerologr v1.2.3 13 | github.com/rs/zerolog v1.34.0 14 | golang.org/x/text v0.27.0 15 | ) 16 | 17 | require ( 18 | github.com/Microsoft/go-winio v0.6.2 // indirect 19 | github.com/containerd/errdefs v1.0.0 // indirect 20 | github.com/containerd/errdefs/pkg v0.3.0 // indirect 21 | github.com/containerd/log v0.1.0 // indirect 22 | github.com/docker/go-connections v0.5.0 // indirect 23 | github.com/docker/go-units v0.5.0 // indirect 24 | github.com/felixge/httpsnoop v1.0.4 // indirect 25 | github.com/go-logr/stdr v1.2.2 // indirect 26 | github.com/gogo/protobuf v1.3.2 // indirect 27 | github.com/mattn/go-colorable v0.1.14 // indirect 28 | github.com/mattn/go-isatty v0.0.20 // indirect 29 | github.com/moby/docker-image-spec v1.3.1 // indirect 30 | github.com/moby/sys/atomicwriter v0.1.0 // indirect 31 | github.com/moby/term v0.5.2 // indirect 32 | github.com/morikuni/aec v1.0.0 // indirect 33 | github.com/opencontainers/go-digest v1.0.0 // indirect 34 | github.com/opencontainers/image-spec v1.1.1 // indirect 35 | github.com/pkg/errors v0.9.1 // indirect 36 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 37 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect 38 | go.opentelemetry.io/otel v1.37.0 // indirect 39 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.37.0 // indirect 40 | go.opentelemetry.io/otel/metric v1.37.0 // indirect 41 | go.opentelemetry.io/otel/trace v1.37.0 // indirect 42 | golang.org/x/net v0.42.0 // indirect 43 | golang.org/x/sys v0.34.0 // indirect 44 | golang.org/x/time v0.12.0 // indirect 45 | gotest.tools/v3 v3.5.2 // indirect 46 | ) 47 | -------------------------------------------------------------------------------- /kernel/download.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xeuo pipefail 4 | 5 | # This script downloads the Linux kernel source code and verifies it using GPG. 6 | 7 | function verify() { 8 | local kernel_sha256_sums="$1" 9 | local kernel_version="$2" 10 | local kernel_source="$3" 11 | local kernel_pgp2_sign="$4" 12 | 13 | curl -fsSL ${kernel_sha256_sums} -o sha256sums.asc 14 | [ -f linux-${kernel_version}.tar.xz ] || curl -fsSLO ${kernel_source} 15 | gpg2 -q --import keys.asc 16 | gpg2 --verify sha256sums.asc 17 | KERNEL_SHA256=$(grep linux-${kernel_version}.tar.xz sha256sums.asc | cut -d ' ' -f 1) 18 | echo "${KERNEL_SHA256} linux-${kernel_version}.tar.xz" | sha256sum -c - 19 | if [ $? -ne 0 ]; then 20 | return 1 21 | fi 22 | # Verify the signature of the kernel source 23 | [ -f linux-${kernel_version}.tar ] || xz -T 0 -d linux-${kernel_version}.tar.xz 24 | curl -fsSLO ${kernel_pgp2_sign} 25 | gpg2 --verify linux-${kernel_version}.tar.sign linux-${kernel_version}.tar 26 | if [ $? -ne 0 ]; then 27 | return 1 28 | fi 29 | } 30 | 31 | function extract() { 32 | local kernel_version="$1" 33 | 34 | if [ -d linux-${kernel_version} ]; then 35 | echo "Directory linux-${kernel_version} already exists, skipping extraction." 36 | else 37 | tar --absolute-names -xf linux-${kernel_version}.tar 38 | rm -rf ./linux 39 | mv ./linux-${kernel_version} ./linux 40 | fi 41 | } 42 | 43 | # Main script execution 44 | function main() { 45 | local kernel_version="$1" 46 | local kernel_source="$2" 47 | local kernel_sha256_sums="$3" 48 | local kernel_pgp2_sign="$4" 49 | local kernel_source_backup="$5" 50 | local kernel_sha256_sums_backup="$6" 51 | local kernel_pgp2_sign_backup="$7" 52 | 53 | verify "${kernel_sha256_sums}" "${kernel_version}" "${kernel_source}" "${kernel_pgp2_sign}" || \ 54 | verify "${kernel_sha256_sums_backup}" "${kernel_version}" "${kernel_source_backup}" "${kernel_pgp2_sign_backup}" 55 | 56 | extract "${kernel_version}" 57 | } 58 | 59 | main "$@" -------------------------------------------------------------------------------- /bash/cli.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function parse_command_line_arguments() { 4 | declare -A -g CLI_PARSED_CMDLINE_PARAMS=() 5 | declare -a -g CLI_NON_PARAM_ARGS=() 6 | 7 | # loop over the arguments & parse them out 8 | local arg 9 | for arg in "${@}"; do 10 | if [[ "${arg}" == *=* ]]; then # contains an equal sign. it's a param. 11 | local param_name param_value param_value_desc 12 | param_name=${arg%%=*} 13 | param_value=${arg##*=} 14 | param_value_desc="${param_value:-(empty)}" 15 | # Sanity check for the param name; it must be a valid bash variable name. 16 | if [[ "${param_name}" =~ ^[a-zA-Z_][a-zA-Z0-9_]*$ ]]; then 17 | CLI_PARSED_CMDLINE_PARAMS["${param_name}"]="${param_value}" # For current run. 18 | # log debug "Command line: parsed parameter '$param_name' to" "${param_value_desc}" 19 | else 20 | log error "Invalid command line parameter '${param_name}=${param_value_desc}'" 21 | exit 8 22 | fi 23 | elif [[ "x${arg}x" != "xx" ]]; then # not a param, not empty, store it in the non-param array for later usage 24 | local non_param_value="${arg}" 25 | local non_param_value_desc="${non_param_value:-(empty)}" 26 | log debug "Command line: non-param argument" "'${non_param_value_desc}'" 27 | CLI_NON_PARAM_ARGS+=("${non_param_value}") 28 | fi 29 | done 30 | 31 | # Loop over the dictionary and apply the values to the environment. 32 | for param_name in "${!CLI_PARSED_CMDLINE_PARAMS[@]}"; do 33 | local param_value param_value_desc 34 | # get the current value from the environment 35 | current_env_value_desc="${!param_name-(unset)}" 36 | current_env_value_desc="${current_env_value_desc:-(empty)}" 37 | # get the new value from the dictionary 38 | param_value="${CLI_PARSED_CMDLINE_PARAMS[${param_name}]}" 39 | param_value_desc="${param_value:-(empty)}" 40 | 41 | log info "Applying cmdline param to environment" "'$param_name': '${current_env_value_desc}' --> '${param_value_desc}'" 42 | # use `declare -g` to make it global, and -x to export it, we're in a function. 43 | eval "declare -g -x $param_name=\"$param_value\"" 44 | done 45 | 46 | return 0 47 | } 48 | -------------------------------------------------------------------------------- /images/hook-acpid/Dockerfile: -------------------------------------------------------------------------------- 1 | # We are building a static acpid binary from source because the linuxkit/acpid image 2 | # does not work 3 | FROM alpine:3.22 AS alpine 4 | 5 | # Install build dependencies 6 | RUN apk add --no-cache \ 7 | gcc \ 8 | musl-dev \ 9 | make \ 10 | git \ 11 | autoconf \ 12 | automake \ 13 | libtool \ 14 | linux-headers \ 15 | wget \ 16 | xz \ 17 | patch \ 18 | busybox-static \ 19 | # Install the dynamically built acpid so that we can get the handler script and event files 20 | acpid 21 | 22 | # Download and build acpid 23 | ENV ACPID_VERSION=2.0.34 24 | RUN wget https://sourceforge.net/projects/acpid2/files/acpid-${ACPID_VERSION}.tar.xz/download -O acpid-${ACPID_VERSION}.tar.xz && \ 25 | tar -xf acpid-${ACPID_VERSION}.tar.xz 26 | 27 | WORKDIR /acpid-${ACPID_VERSION} 28 | 29 | # Fix musl compatibility - replace stat64/fstat64 with stat/fstat 30 | RUN sed -i 's/struct stat64/struct stat/g' sock.c && \ 31 | sed -i 's/fstat64/fstat/g' sock.c 32 | 33 | # Build static binary with musl-compatible flags 34 | RUN ./configure \ 35 | --enable-static \ 36 | --disable-shared \ 37 | CFLAGS="-D_GNU_SOURCE -Os" \ 38 | LDFLAGS="-static" && \ 39 | make && \ 40 | strip acpid && \ 41 | cp acpid /usr/bin/ 42 | 43 | # Verify it's statically linked 44 | RUN ldd /usr/bin/acpid 2>&1 | grep -q "not a dynamic executable" || echo "Warning: not statically linked" 45 | 46 | # Copy BusyBox static binary and create poweroff symlink 47 | RUN mkdir -p /stage/bin && cp /bin/busybox.static /bin/busybox && \ 48 | ln -s /bin/busybox /stage/bin/poweroff && \ 49 | ln -s /bin/busybox /stage/bin/logger && \ 50 | # This is needed for the acpid handler scripts (/etc/acpi/handler.sh, /etc/acpi/events/anything) to work 51 | ln -s /bin/busybox /stage/bin/sh 52 | 53 | FROM scratch 54 | WORKDIR / 55 | ENTRYPOINT [] 56 | COPY --from=alpine /usr/bin/acpid /usr/bin/ 57 | COPY --from=alpine /etc/acpi/events/anything /etc/acpi/events/anything 58 | COPY --from=alpine /etc/acpi/handler.sh /etc/acpi/handler.sh 59 | COPY --from=alpine /bin/busybox /bin/busybox 60 | COPY --from=alpine /stage/ / 61 | CMD ["/usr/bin/acpid", "-f", "-d"] -------------------------------------------------------------------------------- /docs/DCO.md: -------------------------------------------------------------------------------- 1 | # DCO Sign Off 2 | 3 | All authors to the project retain copyright to their work. However, to ensure 4 | that they are only submitting work that they have rights to, we are requiring 5 | everyone to acknowledge this by signing their work. 6 | 7 | Since this signature indicates your rights to the contribution and 8 | certifies the statements below, it must contain your real name and 9 | email address. Various forms of noreply email address must not be used. 10 | 11 | Any copyright notices in this repository should specify the authors as "The 12 | project authors". 13 | 14 | To sign your work, just add a line like this at the end of your commit message: 15 | 16 | ```text 17 | Signed-off-by: Jess Owens 18 | ``` 19 | 20 | This can easily be done with the `--signoff` option to `git commit`. 21 | 22 | By doing this you state that you can certify the following (from [https://developercertificate.org/][1]): 23 | 24 | ```text 25 | Developer Certificate of Origin 26 | Version 1.1 27 | 28 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 29 | 1 Letterman Drive 30 | Suite D4700 31 | San Francisco, CA, 94129 32 | 33 | Everyone is permitted to copy and distribute verbatim copies of this 34 | license document, but changing it is not allowed. 35 | 36 | 37 | Developer's Certificate of Origin 1.1 38 | 39 | By making a contribution to this project, I certify that: 40 | 41 | (a) The contribution was created in whole or in part by me and I 42 | have the right to submit it under the open source license 43 | indicated in the file; or 44 | 45 | (b) The contribution is based upon previous work that, to the best 46 | of my knowledge, is covered under an appropriate open source 47 | license and I have the right under that license to submit that 48 | work with modifications, whether created in whole or in part 49 | by me, under the same open source license (unless I am 50 | permitted to submit under a different license), as indicated 51 | in the file; or 52 | 53 | (c) The contribution was provided directly to me by some other 54 | person who certified (a), (b) or (c) and I have not modified 55 | it. 56 | 57 | (d) I understand and agree that this project and the contribution 58 | are public and that a record of the contribution (including all 59 | personal information I submit with it, including my sign-off) is 60 | maintained indefinitely and may be redistributed consistent with 61 | this project or the open source license(s) involved. 62 | ``` 63 | -------------------------------------------------------------------------------- /images/hook-containerd/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.22 AS builder 2 | 3 | ARG TARGETPLATFORM 4 | 5 | # checkout and compile containerd 6 | # Update `FROM` in `pkg/containerd/Dockerfile`, `pkg/init/Dockerfile` and 7 | # `test/pkg/containerd/Dockerfile` when changing this. 8 | ENV CONTAINERD_REPO=https://github.com/containerd/containerd.git 9 | 10 | ENV CONTAINERD_COMMIT=v2.1.3 11 | ENV NERDCTL_VERSION=2.1.2 12 | ENV GOPATH=/go 13 | RUN apk add go=1.24.6-r0 git 14 | RUN mkdir -p $GOPATH/src/github.com/containerd && \ 15 | cd $GOPATH/src/github.com/containerd && \ 16 | git clone https://github.com/containerd/containerd.git && \ 17 | cd $GOPATH/src/github.com/containerd/containerd && \ 18 | git checkout $CONTAINERD_COMMIT 19 | RUN apk add --no-cache btrfs-progs-dev gcc libc-dev linux-headers make libseccomp-dev 20 | WORKDIR $GOPATH/src/github.com/containerd/containerd 21 | RUN make binaries STATIC=1 EXTRA_FLAGS="-buildmode pie" EXTRA_LDFLAGS='-w -s -extldflags "-fno-PIC -static"' BUILDTAGS="static_build no_devmapper" 22 | 23 | # install nerdctl 24 | RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then ARCHITECTURE=amd64; elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then ARCHITECTURE=arm64; else ARCHITECTURE=amd64; fi \ 25 | && wget https://github.com/containerd/nerdctl/releases/download/v${NERDCTL_VERSION}/nerdctl-${NERDCTL_VERSION}-linux-${ARCHITECTURE}.tar.gz \ 26 | && tar -zxvf nerdctl-${NERDCTL_VERSION}-linux-${ARCHITECTURE}.tar.gz -C /usr/local/bin/ 27 | 28 | RUN cp bin/containerd bin/ctr bin/containerd-shim-runc-v2 /usr/bin/ 29 | RUN strip /usr/bin/containerd /usr/bin/ctr /usr/bin/containerd-shim-runc-v2 30 | RUN mkdir -p /opt/containerd 31 | 32 | FROM scratch AS containerd-dev 33 | ENTRYPOINT [] 34 | WORKDIR / 35 | COPY --from=builder /usr/bin/containerd /usr/bin/ctr /usr/bin/containerd-shim-runc-v2 /usr/bin/ 36 | COPY --from=builder /go/src/github.com/containerd/containerd /go/src/github.com/containerd/containerd 37 | COPY --from=builder /usr/local/bin/nerdctl /usr/bin/ 38 | COPY --from=builder /opt/containerd/ /opt/containerd/ 39 | 40 | # Dockerfile to build linuxkit/containerd for linuxkit 41 | FROM alpine:3.22 AS alpine 42 | 43 | RUN apk add tzdata binutils 44 | RUN mkdir -p /etc/init.d && ln -s /usr/bin/service /etc/init.d/020-containerd 45 | 46 | FROM containerd-dev 47 | 48 | FROM scratch 49 | ENTRYPOINT [] 50 | WORKDIR / 51 | COPY --from=containerd-dev /usr/bin/containerd /usr/bin/ctr /usr/bin/containerd-shim-runc-v2 /usr/bin/ 52 | COPY --from=containerd-dev /usr/bin/nerdctl /usr/bin/ 53 | COPY --from=containerd-dev /opt/containerd/ /opt/containerd/ 54 | COPY --from=alpine /usr/share/zoneinfo/UTC /etc/localtime 55 | COPY --from=alpine /etc/init.d/ /etc/init.d/ 56 | COPY etc etc/ 57 | COPY --from=alpine /etc/apk /etc/apk/ 58 | COPY --from=alpine /lib/apk /lib/apk/ 59 | -------------------------------------------------------------------------------- /files/vlan.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script is intended to be run on the HookOS/Linuxkit host so it must use /bin/sh. 4 | # No other shells are available on the host. 5 | 6 | exec 3>&1 4>&2 7 | trap 'exec 2>&4 1>&3' 0 1 2 3 8 | exec 1>/var/log/vlan.log 2>&1 9 | 10 | set -e # exit on error 11 | 12 | # This script will set up VLAN interfaces if `vlan_id=xxxx` in `/proc/cmdline` has a value. 13 | # It will use the MAC address specified in `hw_addr=` to find the interface to add the VLAN to. 14 | 15 | parse_from_cmdline() { 16 | local key="${1}" 17 | local cmdline 18 | local ipam_value 19 | 20 | # Read the contents of /proc/cmdline 21 | cmdline=$(cat /proc/cmdline) 22 | 23 | # Use grep to find the ipam= parameter and awk to extract its value 24 | value=$(echo "$cmdline" | grep -o "${key}=[^ ]*" | awk -F= '{print $2}') 25 | 26 | # Check if parameter was found 27 | if [ -n "$value" ]; then 28 | echo "$value" 29 | return 0 30 | else 31 | echo "${key}= parameter not found in /proc/cmdline" >&2 32 | return 1 33 | fi 34 | } 35 | 36 | get_interface_name() { 37 | local mac=$1 38 | for interface in /sys/class/net/*; do 39 | if [ -f "$interface/address" ]; then 40 | if [ "$(cat "$interface/address")" == "$mac" ]; then 41 | echo "$(basename "$interface")" 42 | return 0 43 | fi 44 | fi 45 | done 46 | return 1 47 | } 48 | 49 | function add_vlan_interface() { 50 | # check if vlan_id are set in the kernel commandline, otherwise return. 51 | if ! parse_from_cmdline vlan_id; then 52 | echo "No vlan_id=xxxx set in kernel commandline; no VLAN handling." >&2 53 | return 54 | fi 55 | 56 | # check if hw_addr are set in the kernel commandline, otherwise return. 57 | if ! parse_from_cmdline hw_addr; then 58 | echo "No hw_addr=xx:xx:xx:xx:xx:xx set in kernel commandline." >&2 59 | fi 60 | 61 | echo "Starting VLAN handling, parsing..." >&2 62 | 63 | vlan_id="$(parse_from_cmdline vlan_id)" 64 | hw_addr="$(parse_from_cmdline hw_addr)" 65 | 66 | echo "VLAN handling - vlan_id: '${vlan_id}', hw_addr: '${hw_addr}'" >&2 67 | 68 | if [ -n "$vlan_id" ]; then 69 | if [ -n "$hw_addr" ]; then 70 | echo "VLAN handling - vlan_id: '${vlan_id}', hw_addr: '${hw_addr}', searching for interface..." >&2 71 | ifname="$(get_interface_name ${hw_addr})" 72 | echo "VLAN handling - vlan_id: '${vlan_id}', hw_addr: '${hw_addr}', found interface: '${ifname}'" >&2 73 | else 74 | echo "VLAN handling - vlan_id: '${vlan_id}', hw_addr: '${hw_addr}', no hw_addr found in kernel commandline; cannot set vlan interface." >&2 75 | ifname="" 76 | fi 77 | 78 | if [ -n "$ifname" ]; then 79 | echo "VLAN handling - vlan_id: '${vlan_id}', hw_addr: '${hw_addr}', adding VLAN interface..." >&2 80 | ip link set dev "${ifname}" up || true 81 | ip link add link "${ifname}" name "${ifname}.${vlan_id}" type vlan id "${vlan_id}" || true 82 | ip link set "${ifname}.${vlan_id}" up || true 83 | echo "VLAN handling - vlan_id: '${vlan_id}', hw_addr: '${hw_addr}', added VLAN interface: '${ifname}.${vlan_id}'" >&2 84 | return 0 85 | else 86 | echo "VLAN handling - vlan_id: '${vlan_id}', hw_addr: '${hw_addr}', no interface found for hw_addr." >&2 87 | return 3 88 | fi 89 | 90 | else 91 | echo "VLAN handling - vlan_id: '${vlan_id}', hw_addr: '${hw_addr}', no vlan_id found in kernel commandline." >&2 92 | return 1 93 | fi 94 | } 95 | 96 | # we always return true so that a failure here doesn't block the next container service from starting. Ideally, we always 97 | # want the getty service to start so we can debug failures. 98 | add_vlan_interface || true 99 | echo "Done with VLAN handling." >&2 100 | 101 | # @TODO: debugging since I seem to have machines hanging here; dump some info 102 | echo "Running 'ip link show'..." 103 | ip link show || true 104 | exit 0 105 | -------------------------------------------------------------------------------- /bash/bootable/fat32-image.sh: -------------------------------------------------------------------------------- 1 | function create_image_fat32_root_from_dir() { 2 | declare output_dir="${1}" 3 | declare output_filename="${2}" 4 | declare fat32_root_dir="${3}" 5 | declare partition_type="${partition_type:-"gpt"}" # or, "msdos" 6 | declare esp_partitition="${esp_partitition:-"no"}" # or, "yes" -- only for GPT; mark the fat32 partition as an ESP or not 7 | declare output_image="${output_dir}/${output_filename}" 8 | 9 | # Show whats about to be done 10 | log info "Creating FAT32 image '${output_image}' from '${fat32_root_dir}'..." 11 | log info "Partition type: ${partition_type}; ESP partition: ${esp_partitition}" 12 | 13 | # Create a Dockerfile; install parted and mtools 14 | mkdir -p "bootable" 15 | declare dockerfile_helper_filename="undefined.sh" 16 | produce_dockerfile_helper_apt_oras "bootable/" # will create the helper script in bootable/ directory; sets helper_name 17 | 18 | # Lets create a Dockerfile that will be used to create the FAT32 image 19 | cat <<- MKFAT32_SCRIPT > "bootable/Dockerfile.autogen.helper.mkfat32.sh" 20 | #!/bin/bash 21 | set -e 22 | set -x 23 | 24 | # Hack: transform the initramfs using mkimage to a u-boot image # @TODO refactor this out of here 25 | ls -lah /work/input 26 | if [[ -f /work/input/uinitrd.wanted ]]; then 27 | mkimage -A arm64 -O linux -T ramdisk -C gzip -n uInitrd -d /work/input/initramfs /work/input/uinitrd 28 | rm -f /work/input/initramfs /work/input/uinitrd.wanted 29 | ls -lah /work/input/uinitrd 30 | fi 31 | 32 | # Hack: boot.cmd -> boot.scr 33 | if [ -f /work/input/boot.cmd ]; then 34 | echo "Converting boot.cmd to boot.scr..." 35 | mkimage -C none -A arm -T script -d /work/input/boot.cmd /work/input/boot.scr 36 | fi 37 | 38 | # Calculate the size of the image 39 | # a) take the size, in megabytes, of /work/input directory 40 | # b) add 32mb to it, 16 for the offset and 16 for extra files user might wanna put there 41 | declare -i size_mb 42 | size_mb="\$(du -s -BM /work/input | cut -f 1 | tr -d 'M')" 43 | size_mb="\$((size_mb + 32))" 44 | echo "Size of the image: \${size_mb}M" 1>&2 45 | 46 | truncate -s \${size_mb}M /output/fat32.img 47 | parted /output/fat32.img mklabel ${partition_type} 48 | parted -a optimal /output/fat32.img mkpart primary fat32 16MiB 100% 49 | if [ "${partition_type}" == "gpt" ] && [ "${esp_partitition}" == "yes" ]; then 50 | parted /output/fat32.img set 1 esp on; 51 | fi 52 | mformat -i /output/fat32.img@@16M -F -v HOOK :: 53 | mcopy -i /output/fat32.img@@16M -s /work/input/* :: 54 | # list all the files in the fat32.img 55 | mdir -i /output/fat32.img@@16M -s 56 | 57 | parted /output/fat32.img print 58 | if [ "${partition_type}" == "gpt" ]; then 59 | sgdisk --print /output/fat32.img 60 | sgdisk --info=1 /output/fat32.img 61 | fi 62 | 63 | mv -v /output/fat32.img /output/${output_filename} 64 | MKFAT32_SCRIPT 65 | 66 | # Lets create a Dockerfile that will be used to obtain the artifacts needed, using ORAS binary 67 | declare -g mkfat32_dockerfile="bootable/Dockerfile.autogen.mkfat32" 68 | log info "Creating Dockerfile '${mkfat32_dockerfile}'... " 69 | cat <<- MKFAT32_DOCKERFILE > "${mkfat32_dockerfile}" 70 | FROM debian:stable AS builder 71 | # Call the helper to install curl, oras, parted, and mtools 72 | ADD ./${dockerfile_helper_filename} /apt-oras-helper.sh 73 | RUN bash /apt-oras-helper.sh parted mtools u-boot-tools gdisk 74 | ADD ./${fat32_root_dir} /work/input 75 | ADD ./Dockerfile.autogen.helper.mkfat32.sh /Dockerfile.autogen.helper.mkfat32.sh 76 | WORKDIR /output 77 | RUN bash /Dockerfile.autogen.helper.mkfat32.sh 78 | FROM scratch 79 | COPY --from=builder /output/* / 80 | MKFAT32_DOCKERFILE 81 | 82 | # Now, build the Dockerfile and output the fat32 image directly 83 | log info "Building Dockerfile for fat32 image and outputting directly to '${output_image}'..." 84 | docker buildx build --output "type=local,dest=${output_dir}" "--progress=${DOCKER_BUILDX_PROGRESS_TYPE}" -f "${mkfat32_dockerfile}" bootable 85 | 86 | # Ensure the output image is named correctly; grab its size 87 | if [ -f "${output_image}" ]; then 88 | declare fat32img_size 89 | fat32img_size="$(du -h "${output_image}" | cut -f 1)" 90 | log info "Built fat32 image '${output_image}' (${fat32img_size})" 91 | else 92 | log error "Failed to build fat32 image, missing '${output_image}'" 93 | exit 1 94 | fi 95 | } 96 | -------------------------------------------------------------------------------- /files/static-network.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script is intended to be run on the HookOS/Linuxkit host so it must use /bin/sh. 4 | # No other shells are available on the host. 5 | 6 | # this script will statically configure a single network interface based on the ipam= parameter 7 | # passed in the kernel command line. The ipam parameter is a colon separated string with the following fields: 8 | # ipam=:::::::: 9 | # Example: ipam=de-ad-be-ef-fe-ed::192.168.2.193:255.255.255.0:192.168.2.1:myserver:1.1.1.1,8.8.8.8::132.163.97.1,132.163.96.1 10 | # the mac address format requires it to be hyphen separated. 11 | 12 | exec 3>&1 4>&2 13 | trap 'exec 2>&4 1>&3' 0 1 2 3 14 | exec 1>/var/log/network_config.log 2>&1 15 | 16 | set -xeuo pipefail 17 | 18 | # Define the location of the interfaces file 19 | INTERFACES_FILE="/var/run/network/interfaces" 20 | 21 | parse_ipam_from_cmdline() { 22 | local cmdline 23 | local ipam_value 24 | 25 | # Read the contents of /proc/cmdline 26 | cmdline=$(cat /proc/cmdline) 27 | 28 | # Use grep to find the ipam= parameter and awk to extract its value 29 | ipam_value=$(echo "$cmdline" | grep -o 'ipam=[^ ]*' | awk -F= '{print $2}') 30 | 31 | # Check if ipam= parameter was found 32 | if [ -n "$ipam_value" ]; then 33 | echo "$ipam_value" 34 | return 0 35 | else 36 | echo "ipam= parameter not found in /proc/cmdline" >&2 37 | return 1 38 | fi 39 | } 40 | 41 | # Function to get interface name from MAC address 42 | # TODO(jacobweinstock): if a vlan id is provided we should match for the vlan interface 43 | get_interface_name() { 44 | local mac=$1 45 | for interface in /sys/class/net/*; do 46 | if [ -f "$interface/address" ]; then 47 | if [ "$(cat "$interface/address")" == "$mac" ]; then 48 | echo "$(basename "$interface")" 49 | return 0 50 | fi 51 | fi 52 | done 53 | return 1 54 | } 55 | 56 | convert_hyphen_to_colon() { 57 | echo "$1" | tr '-' ':' 58 | } 59 | 60 | ipam=$(parse_ipam_from_cmdline) 61 | if [ $? -ne 0 ]; then 62 | echo "Failed to get IPAM value, not statically configuring network" 63 | cat /proc/cmdline 64 | exit 0 65 | fi 66 | echo "IPAM value: $ipam" 67 | 68 | mkdir -p $(dirname "$INTERFACES_FILE") 69 | 70 | # Parse the IPAM string 71 | IFS=':' read -r mac vlan_id ip netmask gateway hostname dns search_domains ntp < "$INTERFACES_FILE" 131 | 132 | echo "Network configuration has been written to $INTERFACES_FILE" 133 | 134 | # Run ifup on the interface 135 | ifup -v -a -i "$INTERFACES_FILE" 136 | 137 | # setup DNS 138 | ROOT=/run/resolvconf/ setup-dns -d "$search_domains" "$dns" 139 | -------------------------------------------------------------------------------- /kernel/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stable AS kernel-source-unpacked 2 | ENV DEBIAN_FRONTEND=noninteractive 3 | 4 | # crossbuild-essentials are pretty heavy; here we install for both architecures to maximize Docker layer hit cache rate during development, but only one will be used 5 | RUN set -x && apt -o "Dpkg::Use-Pty=0" -y update && \ 6 | apt -o "Dpkg::Use-Pty=0" -y install curl xz-utils gnupg2 flex bison libssl-dev libelf-dev bc libncurses-dev kmod make \ 7 | crossbuild-essential-amd64 crossbuild-essential-arm64 && \ 8 | apt -o "Dpkg::Use-Pty=0" -y clean 9 | 10 | ARG KERNEL_MAJOR_V 11 | ARG KERNEL_VERSION 12 | ARG KERNEL_SOURCE=https://www.kernel.org/pub/linux/kernel/${KERNEL_MAJOR_V}/linux-${KERNEL_VERSION}.tar.xz 13 | ARG KERNEL_SHA256_SUMS=https://www.kernel.org/pub/linux/kernel/${KERNEL_MAJOR_V}/sha256sums.asc 14 | ARG KERNEL_PGP2_SIGN=https://www.kernel.org/pub/linux/kernel/${KERNEL_MAJOR_V}/linux-${KERNEL_VERSION}.tar.sign 15 | # Backup URLs in case the main ones are down or point releases are not available yet. 16 | ARG KERNEL_SOURCE_BACKUP=https://cdn.kernel.org/pub/linux/kernel/${KERNEL_MAJOR_V}/linux-${KERNEL_VERSION}.tar.xz 17 | ARG KERNEL_SHA256_SUMS_BACKUP=https://cdn.kernel.org/pub/linux/kernel/${KERNEL_MAJOR_V}/sha256sums.asc 18 | ARG KERNEL_PGP2_SIGN_BACKUP=https://cdn.kernel.org/pub/linux/kernel/${KERNEL_MAJOR_V}/linux-${KERNEL_VERSION}.tar.sign 19 | 20 | # PGP keys: 589DA6B1 (greg@kroah.com) & 6092693E (autosigner@kernel.org) & 00411886 (torvalds@linux-foundation.org) 21 | COPY /keys.asc /keys.asc 22 | 23 | # Download and verify kernel 24 | COPY download.sh /download.sh 25 | RUN /download.sh ${KERNEL_VERSION} ${KERNEL_SOURCE} ${KERNEL_SHA256_SUMS} ${KERNEL_PGP2_SIGN} ${KERNEL_SOURCE_BACKUP} ${KERNEL_SHA256_SUMS_BACKUP} ${KERNEL_PGP2_SIGN_BACKUP} 26 | 27 | FROM kernel-source-unpacked AS kernel-with-config 28 | 29 | ARG INPUT_DEFCONFIG 30 | ARG KERNEL_ARCH 31 | ARG KERNEL_CROSS_COMPILE 32 | 33 | ENV KERNEL_ARCH=${KERNEL_ARCH} 34 | ENV ARCH=${KERNEL_ARCH} 35 | ENV CROSS_COMPILE=${KERNEL_CROSS_COMPILE} 36 | ENV KCFLAGS="-fdiagnostics-color=always -fno-pie" 37 | ENV KBUILD_BUILD_USER="hook" 38 | ENV KBUILD_BUILD_HOST="tinkerbell" 39 | 40 | # Copy just the defconfig needed for this build 41 | WORKDIR /linux 42 | COPY /configs/${INPUT_DEFCONFIG} /linux/.config 43 | 44 | # Kernel config; copy the correct defconfig as .config, and run olddefconfig 45 | RUN set -x && make "ARCH=${KERNEL_ARCH}" olddefconfig 46 | 47 | # Use this stage to run kernel configuration tasks like menuconfig / savedefconfig etc with: 48 | # docker buildx build --load --progress=plain --build-arg KERNEL_VERSION=5.10.212 --build-arg KERNEL_SERIES=5.10.y -t hook-kernel:builder --target kernel-configurator . 49 | # docker run -it -v "$(pwd)":/out-config hook-kernel:builder 50 | # Otherwise, since this stage is not referenced anywhere during normal build, it is completely skipped 51 | FROM kernel-with-config AS kernel-configurator 52 | VOLUME /host 53 | 54 | 55 | FROM kernel-with-config AS kernel-build 56 | 57 | ARG KERNEL_OUTPUT_IMAGE 58 | 59 | RUN mkdir /out 60 | 61 | RUN sed -i 's/#define COMMAND_LINE_SIZE 2048/#define COMMAND_LINE_SIZE 4096/' arch/x86/include/asm/setup.h 62 | 63 | # Kernel build. ENVs in previous stages are inherited; thus ARCH, CROSS_COMPILE, KCFLAGS, KBUILD_BUILD_USER, KBUILD_BUILD_HOST are available 64 | RUN set -x && \ 65 | echo "Cross compiler: ${CROSS_COMPILE}" && \ 66 | make -j"$(getconf _NPROCESSORS_ONLN)" && \ 67 | cp ${KERNEL_OUTPUT_IMAGE} /out/kernel && \ 68 | cp System.map /out 69 | 70 | 71 | # Modules, from lib/modules go into kernel.tar (will be extracted in root filesystem by linuxkit) 72 | RUN set -x && \ 73 | make -s -j"$(getconf _NPROCESSORS_ONLN)" INSTALL_MOD_PATH=/tmp/kernel-modules modules_install && \ 74 | ( DVER=$(basename $(find /tmp/kernel-modules/lib/modules/ -mindepth 1 -maxdepth 1)) && \ 75 | cd /tmp/kernel-modules/lib/modules/$DVER && \ 76 | rm -f build source ) && \ 77 | ( cd /tmp/kernel-modules && tar cf /out/kernel.tar . ) 78 | 79 | # For arches that have DTB's, eg arm64; they go separately into dtbs.tar; for arches that don't (x86), an empty dtbs.tar is created 80 | RUN set -x && \ 81 | mkdir -p /tmp/kernel-dtb && \ 82 | case "$KERNEL_ARCH" in \ 83 | arm64) \ 84 | echo "Building DTBs for arm64" && \ 85 | make -s -j"$(getconf _NPROCESSORS_ONLN)" INSTALL_DTBS_PATH=/tmp/kernel-dtb dtbs_install; \ 86 | ;; \ 87 | *) \ 88 | echo "No DTBs for $KERNEL_ARCH"; \ 89 | ;; \ 90 | esac && \ 91 | ( cd /tmp/kernel-dtb && tar czvf /out/dtbs.tar.gz . ) 92 | 93 | FROM scratch 94 | ENTRYPOINT [] 95 | CMD [] 96 | WORKDIR / 97 | COPY --from=kernel-build /out/* / 98 | -------------------------------------------------------------------------------- /images/hook-docker/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "os/exec" 8 | "path/filepath" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | type tinkConfig struct { 14 | syslogHost string 15 | insecureRegistries []string 16 | httpProxy string 17 | httpsProxy string 18 | noProxy string 19 | } 20 | 21 | type dockerConfig struct { 22 | Debug bool `json:"debug"` 23 | LogDriver string `json:"log-driver,omitempty"` 24 | LogOpts map[string]string `json:"log-opts,omitempty"` 25 | InsecureRegistries []string `json:"insecure-registries,omitempty"` 26 | } 27 | 28 | func run() error { 29 | // Parse the cmdline in order to find the urls for the repository and path to the cert 30 | content, err := os.ReadFile("/proc/cmdline") 31 | if err != nil { 32 | return err 33 | } 34 | cmdLines := strings.Split(string(content), " ") 35 | cfg := parseCmdLine(cmdLines) 36 | 37 | fmt.Println("Starting the Docker Engine") 38 | 39 | d := dockerConfig{ 40 | Debug: true, 41 | LogDriver: "syslog", 42 | LogOpts: map[string]string{ 43 | "syslog-address": fmt.Sprintf("udp://%v:514", cfg.syslogHost), 44 | }, 45 | InsecureRegistries: cfg.insecureRegistries, 46 | } 47 | path := "/etc/docker" 48 | // Create the directory for the docker config 49 | err = os.MkdirAll(path, os.ModeDir) 50 | if err != nil { 51 | return err 52 | } 53 | if err := d.writeToDisk(filepath.Join(path, "daemon.json")); err != nil { 54 | return fmt.Errorf("failed to write docker config: %w", err) 55 | } 56 | // Build the command, and execute 57 | // cmd := exec.Command("/usr/local/bin/docker-init", "/usr/local/bin/dockerd") 58 | cmd := exec.Command("sh", "-c", "/usr/local/bin/dockerd-entrypoint.sh") 59 | cmd.Stdout = os.Stdout 60 | cmd.Stderr = os.Stderr 61 | 62 | myEnvs := make([]string, 0, 3) 63 | myEnvs = append(myEnvs, fmt.Sprintf("HTTP_PROXY=%s", cfg.httpProxy)) 64 | myEnvs = append(myEnvs, fmt.Sprintf("HTTPS_PROXY=%s", cfg.httpsProxy)) 65 | myEnvs = append(myEnvs, fmt.Sprintf("NO_PROXY=%s", cfg.noProxy)) 66 | // We set this so that the dockerd-entrypoint.sh will run docker with TLS enabled. 67 | // This is needed as the docker daemon is listening on 0.0.0.0 and it's not straightforward 68 | // to reconfigure this. Enabling TLS will block remote access to the docker daemon for now. 69 | myEnvs = append(myEnvs, "DOCKER_TLS_CERTDIR=/certs") 70 | 71 | cmd.Env = append(os.Environ(), myEnvs...) 72 | 73 | err = cmd.Run() 74 | if err != nil { 75 | return err 76 | } 77 | return nil 78 | } 79 | 80 | func main() { 81 | fmt.Println("Starting Docker") 82 | go rebootWatch() 83 | for { 84 | if err := run(); err != nil { 85 | fmt.Println("error starting up Docker", err) 86 | fmt.Println("will retry in 10 seconds") 87 | time.Sleep(10 * time.Second) 88 | } 89 | } 90 | } 91 | 92 | // writeToDisk writes the dockerConfig to loc. 93 | func (d dockerConfig) writeToDisk(loc string) error { 94 | b, err := json.Marshal(d) 95 | if err != nil { 96 | return fmt.Errorf("unable to marshal docker config: %w", err) 97 | } 98 | if err := os.WriteFile(loc, b, 0o600); err != nil { 99 | return fmt.Errorf("error writing daemon.json: %w", err) 100 | } 101 | 102 | return nil 103 | } 104 | 105 | // parseCmdLine will parse the command line. 106 | func parseCmdLine(cmdLines []string) (cfg tinkConfig) { 107 | for i := range cmdLines { 108 | cmdLine := strings.SplitN(cmdLines[i], "=", 2) 109 | if len(cmdLine) == 0 { 110 | continue 111 | } 112 | 113 | switch cmd := strings.TrimSpace(cmdLine[0]); cmd { 114 | case "syslog_host": 115 | cfg.syslogHost = strings.TrimSpace(cmdLine[1]) 116 | case "insecure_registries": 117 | cfg.insecureRegistries = strings.Split(strings.TrimSpace(cmdLine[1]), ",") 118 | case "HTTP_PROXY": 119 | cfg.httpProxy = strings.TrimSpace(cmdLine[1]) 120 | case "HTTPS_PROXY": 121 | cfg.httpsProxy = strings.TrimSpace(cmdLine[1]) 122 | case "NO_PROXY": 123 | cfg.noProxy = strings.TrimSpace(cmdLine[1]) 124 | } 125 | } 126 | return cfg 127 | } 128 | 129 | func rebootWatch() { 130 | fmt.Println("Starting Reboot Watcher") 131 | 132 | // Forever loop 133 | for { 134 | if fileExists("/worker/reboot") { 135 | cmd := exec.Command("/sbin/reboot") 136 | cmd.Stdout = os.Stdout 137 | cmd.Stderr = os.Stderr 138 | err := cmd.Run() 139 | if err != nil { 140 | fmt.Printf("error calling /sbin/reboot: %v\n", err) 141 | time.Sleep(time.Second) 142 | continue 143 | } 144 | break 145 | } 146 | // Wait one second before looking for file 147 | time.Sleep(time.Second) 148 | } 149 | fmt.Println("Rebooting") 150 | } 151 | 152 | func fileExists(filename string) bool { 153 | info, err := os.Stat(filename) 154 | if os.IsNotExist(err) { 155 | return false 156 | } 157 | return !info.IsDir() 158 | } 159 | -------------------------------------------------------------------------------- /images/hook-embedded/pull-images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is used to build container images that are embedded in HookOS. 4 | # When HookOS boots up, the DinD container will have all the images in its cache. 5 | 6 | set -euo pipefail 7 | 8 | function docker_save_image() { 9 | local image="$1" 10 | local output_dir="$2" 11 | local output_file="${output_dir}/$(echo "${image}" | tr '/' '-')" 12 | 13 | docker save -o "${output_file}".tar "${image}" 14 | } 15 | 16 | function docker_pull_image() { 17 | local image="$1" 18 | local arch="${2-amd64}" 19 | 20 | docker pull --platform=linux/"${arch}" "${image}" 21 | } 22 | 23 | function docker_remove_image() { 24 | local image="$1" 25 | 26 | docker rmi "${image}" || true 27 | } 28 | 29 | function trap_handler() { 30 | local dind_container="$1" 31 | 32 | if [[ "${remove_dind_container}" == "true" ]]; then 33 | docker rm -f "${dind_container}" &> /dev/null 34 | else 35 | echo "DinD container NOT removed, please remove it manually" 36 | fi 37 | } 38 | 39 | function main() { 40 | local dind_container="$1" 41 | local images_file="$2" 42 | local arch="$3" 43 | local dind_container_image="$4" 44 | 45 | # Pull the images 46 | while IFS=" " read -r first_image image_tag || [ -n "${first_image}" ] ; do 47 | echo -e "----------------------- $first_image -----------------------" 48 | # Remove the image if it exists so that the image pulls the correct architecture 49 | docker_remove_image "${first_image}" 50 | docker_pull_image "${first_image}" "${arch}" 51 | done < "${images_file}" 52 | 53 | # Save the images 54 | local output_dir="${PWD}/images_tar" 55 | mkdir -p "${output_dir}" 56 | while IFS=" " read -r first_image image_tag || [ -n "${first_image}" ] ; do 57 | docker_save_image "${first_image}" "${output_dir}" 58 | done < "${images_file}" 59 | 60 | export remove_dind_container="true" 61 | # as this function maybe called multiple times, we need to ensure the container is removed 62 | trap "trap_handler ${dind_container}" RETURN 63 | # we're using set -e so the trap on RETURN will not be executed when a command fails 64 | trap "trap_handler ${dind_container}" EXIT 65 | 66 | # start DinD container 67 | # In order to avoid the src bind mount directory (./images/) ownership from changing to root 68 | # we don't bind mount to /var/lib/docker in the container because the DinD container is running as root and 69 | # will change the permissions of the bind mount directory (images/) to root. 70 | echo -e "Starting DinD container" 71 | echo -e "-----------------------" 72 | docker run -d --privileged --name "${dind_container}" -v "${PWD}/images_tar":/images_tar -v "${PWD}"/images/:/var/lib/docker-embedded/ -d "${dind_container_image}" 73 | 74 | # wait until the docker daemon is ready 75 | until docker exec "${dind_container}" docker info &> /dev/null; do 76 | sleep 1 77 | if [[ $(docker inspect -f '{{.State.Status}}' "${dind_container}") == "exited" ]]; then 78 | echo "DinD container exited unexpectedly" 79 | docker logs "${dind_container}" 80 | exit 1 81 | fi 82 | done 83 | 84 | # As hook-docker uses the overlay2 storage driver the DinD must use the overlay2 storage driver too. 85 | # make sure the overlay2 storage driver is used by the DinD container. 86 | # The VFS storage driver might get used if /var/lib/docker in the DinD container cannot be used by overlay2. 87 | storage_driver=$(docker exec "${dind_container}" docker info --format '{{.Driver}}') 88 | if [[ "${storage_driver}" != "overlay2" ]]; then 89 | export remove_dind_container="false" 90 | echo "DinD container is not using overlay2 storage driver, storage driver detected: ${storage_driver}" 91 | exit 1 92 | fi 93 | 94 | # remove the contents of /var/lib/docker-embedded so that any previous images are removed. Without this it seems to cause boot issues. 95 | docker exec "${dind_container}" sh -c "rm -rf /var/lib/docker-embedded/*" 96 | 97 | # Load the images 98 | for image_file in "${output_dir}"/*; do 99 | echo -e "Loading image: ${image_file}" 100 | docker exec "${dind_container}" docker load -i "/images_tar/$(basename ${image_file})" 101 | done 102 | 103 | # clean up tar files 104 | rm -rf "${output_dir}"/* 105 | 106 | # Create any tags for the images and remove any original tags 107 | while IFS=" " read -r first_image image_tag remove_original || [ -n "${first_image}" ] ; do 108 | if [[ "${image_tag}" != "" ]]; then 109 | docker exec "${dind_container}" docker tag "${first_image}" "${image_tag}" 110 | if [[ "${remove_original}" == "true" ]]; then 111 | docker exec "${dind_container}" docker rmi "${first_image}" 112 | fi 113 | fi 114 | done < "${images_file}" 115 | 116 | # We need to copy /var/lib/docker to /var/lib/docker-embedded in order for HookOS to use the Docker images in its build. 117 | docker exec "${dind_container}" sh -c "cp -a /var/lib/docker/* /var/lib/docker-embedded/" 118 | } 119 | 120 | arch="${1-amd64}" 121 | dind_container_name="hookos-dind" 122 | images_file="images.txt" 123 | dind_container_image="${2-docker:dind}" 124 | main "${dind_container_name}" "${images_file}" "${arch}" "${dind_container_image}" 125 | -------------------------------------------------------------------------------- /bash/shellcheck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function download_prepare_shellcheck_bin() { 4 | declare SHELLCHECK_VERSION=${SHELLCHECK_VERSION:-"0.10.0"} # https://github.com/koalaman/shellcheck/releases 5 | log info "Preparing shellcheck binary for version v${SHELLCHECK_VERSION}..." 6 | 7 | declare bash_machine="${BASH_VERSINFO[5]}" 8 | declare shellcheck_os="" shellcheck_arch="" 9 | case "$bash_machine" in 10 | *darwin*) shellcheck_os="darwin" ;; 11 | *linux*) shellcheck_os="linux" ;; 12 | *) 13 | log error "unknown os: $bash_machine" 14 | exit 3 15 | ;; 16 | esac 17 | 18 | case "$bash_machine" in 19 | *aarch64*) shellcheck_arch="aarch64" ;; 20 | *x86_64*) shellcheck_arch="x86_64" ;; 21 | *) 22 | log error "unknown arch: $bash_machine" 23 | exit 2 24 | ;; 25 | esac 26 | 27 | declare shellcheck_fn="shellcheck-v${SHELLCHECK_VERSION}.${shellcheck_os}.${shellcheck_arch}" 28 | declare shellcheck_fn_tarxz="${shellcheck_fn}.tar.xz" 29 | declare DOWN_URL="https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION}/${shellcheck_fn_tarxz}" 30 | declare -g -r SHELLCHECK_BIN="${CACHE_DIR}/${shellcheck_fn}" 31 | 32 | if [[ ! -f "${SHELLCHECK_BIN}" ]]; then 33 | log info "Cache miss for shellcheck binary, downloading..." 34 | log debug "bash_machine: ${bash_machine}" 35 | log debug "Down URL: ${DOWN_URL}" 36 | log debug "SHELLCHECK_BIN: ${SHELLCHECK_BIN}" 37 | curl -sL "${DOWN_URL}" -o "${SHELLCHECK_BIN}.tar.xz" 38 | tar -xf "${SHELLCHECK_BIN}.tar.xz" -C "${CACHE_DIR}" "shellcheck-v${SHELLCHECK_VERSION}/shellcheck" 39 | mv "${CACHE_DIR}/shellcheck-v${SHELLCHECK_VERSION}/shellcheck" "${SHELLCHECK_BIN}" 40 | rm -rf "${CACHE_DIR}/shellcheck-v${SHELLCHECK_VERSION}" "${SHELLCHECK_BIN}.tar.xz" 41 | chmod +x "${SHELLCHECK_BIN}" 42 | fi 43 | 44 | declare -g SHELLCHECK_ACTUAL_VERSION="unknown" 45 | SHELLCHECK_ACTUAL_VERSION="$("${SHELLCHECK_BIN}" --version | grep "^version")" 46 | declare -g -r SHELLCHECK_ACTUAL_VERSION="${SHELLCHECK_ACTUAL_VERSION}" 47 | log debug "SHELLCHECK_ACTUAL_VERSION: ${SHELLCHECK_ACTUAL_VERSION}" 48 | 49 | return 0 50 | } 51 | 52 | # Same, but for shellfmt 53 | function download_prepare_shellfmt_bin() { 54 | declare SHELLFMT_VERSION=${SHELLFMT_VERSION:-"3.10.0"} # https://github.com/mvdan/sh/releases/ 55 | log info "Preparing shellfmt binary for version v${SHELLFMT_VERSION}..." 56 | 57 | declare bash_machine="${BASH_VERSINFO[5]}" 58 | declare shellfmt_os="" shellfmt_arch="" 59 | case "$bash_machine" in 60 | *darwin*) shellfmt_os="darwin" ;; 61 | *linux*) shellfmt_os="linux" ;; 62 | *) 63 | log error "unknown os: $bash_machine" 64 | exit 3 65 | ;; 66 | esac 67 | 68 | case "$bash_machine" in 69 | *aarch64*) shellfmt_arch="arm64" ;; 70 | *x86_64*) shellfmt_arch="amd64" ;; 71 | *) 72 | log error "unknown arch: $bash_machine" 73 | exit 2 74 | ;; 75 | esac 76 | 77 | declare shellfmt_fn="shfmt_v${SHELLFMT_VERSION}_${shellfmt_os}_${shellfmt_arch}" 78 | declare DOWN_URL="https://github.com/mvdan/sh/releases/download/v${SHELLFMT_VERSION}/${shellfmt_fn}" 79 | declare -g -r SHELLFMT_BIN="${CACHE_DIR}/${shellfmt_fn}" 80 | 81 | if [[ ! -f "${SHELLFMT_BIN}" ]]; then 82 | log info "Cache miss for shellfmt binary, downloading..." 83 | log debug "bash_machine: ${bash_machine}" 84 | log debug "Down URL: ${DOWN_URL}" 85 | log debug "SHELLFMT_BIN: ${SHELLFMT_BIN}" 86 | curl -sL "${DOWN_URL}" -o "${SHELLFMT_BIN}.tmp" 87 | chmod +x "${SHELLFMT_BIN}.tmp" 88 | mv "${SHELLFMT_BIN}.tmp" "${SHELLFMT_BIN}" 89 | fi 90 | 91 | declare -g SHELLFMT_ACTUAL_VERSION="unknown" 92 | SHELLFMT_ACTUAL_VERSION="$("${SHELLFMT_BIN}" --version)" 93 | declare -g -r SHELLFMT_ACTUAL_VERSION="${SHELLFMT_ACTUAL_VERSION}" 94 | log debug "SHELLFMT_ACTUAL_VERSION: ${SHELLFMT_ACTUAL_VERSION}" 95 | } 96 | 97 | function run_shellcheck() { 98 | declare -a params=() 99 | params+=(--check-sourced --color=always --external-sources --format=tty --shell=bash --severity=style) # warning is the default 100 | 101 | log info "Running shellcheck ${SHELLCHECK_ACTUAL_VERSION} against 'build.sh', please wait..." 102 | log debug "All shellcheck params: " "${params[@]}" 103 | 104 | if "${SHELLCHECK_BIN}" "${params[@]}" build.sh; then 105 | log info "Shellcheck detected no problems in bash code." 106 | else 107 | log error "Shellcheck detected problems in bash code; check output above." 108 | exit 1 109 | fi 110 | } 111 | 112 | function run_shellfmt() { 113 | log info "Running shellfmt ${SHELLFMT_ACTUAL_VERSION} against all bash files, please wait..." 114 | declare -a all_bash_files=() 115 | all_bash_files+=("build.sh") # The root build script 116 | mapfile -t all_bash_files < <(find bash/ -type f -name "*.sh") # All .sh under bash/ 117 | log debug "All bash files: ${all_bash_files[*]}" 118 | 119 | # First, run shellfmt with --diff: it will exit with an error if changes are needed. 120 | if ! "${SHELLFMT_BIN}" --diff "${all_bash_files[@]}"; then 121 | log warn "Shellfmt detected deviations in bash code formatting." 122 | log info "Re-running shellfmt with --write to fix the deviations..." 123 | if ! "${SHELLFMT_BIN}" --write "${all_bash_files[@]}"; then 124 | log error "Shellfmt failed to fix deviations in bash code formatting." 125 | exit 66 126 | else 127 | log info "Shellfmt fixed deviations in bash code formatting." 128 | fi 129 | exit 1 # So CI breaks 130 | fi 131 | log info "Shellfmt detected no deviations in bash code formatting." 132 | return 0 133 | } 134 | -------------------------------------------------------------------------------- /bash/bootable/grub.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function list_bootable_grub() { 4 | : "${bootable_info['BOOTABLE_ID']:?"bootable_info['BOOTABLE_ID'] is unset"}" 5 | declare -g -A bootable_boards=() 6 | declare board_name="${bootable_info['BOOTABLE_ID']}-generic" 7 | bootable_boards["${board_name}"]="NOT=used" # A single, generic "board" for all grub bootables 8 | } 9 | 10 | function build_bootable_grub() { 11 | : "${kernel_info['DOCKER_ARCH']:?"kernel_info['DOCKER_ARCH'] is unset"}" 12 | : "${bootable_info['INVENTORY_ID']:?"bootable_info['INVENTORY_ID'] is unset"}" 13 | : "${OUTPUT_ID:?"OUTPUT_ID is unset"}" 14 | 15 | declare hook_id="${bootable_info['INVENTORY_ID']}" 16 | declare bootable_img="bootable_grub_${OUTPUT_ID}.img" 17 | declare kernel_command_line="console=tty0 console=${bootable_info['SERIAL_CONSOLE']}" 18 | 19 | declare has_dtbs="${bootable_info['DTB']}" 20 | [[ -z "${has_dtbs}" ]] && has_dtbs="no" 21 | 22 | declare -A hook_files=( 23 | ["kernel"]="vmlinuz-${OUTPUT_ID}" 24 | ["initrd"]="initramfs-${OUTPUT_ID}" 25 | ) 26 | 27 | if [[ "${has_dtbs}" == "yes" ]]; then 28 | hook_files["dtbs"]="dtbs-${OUTPUT_ID}.tar.gz" 29 | fi 30 | 31 | # Check if all the required files are present; if not, give instructions on how to build kernel and hook $hook_id 32 | for file in "${!hook_files[@]}"; do 33 | if [[ ! -f "out/hook/${hook_files[$file]}" ]]; then 34 | log error "Required file 'out/hook/${hook_files[$file]}' not found; please build the kernel and hook ${hook_id} first: ./build.sh kernel ${hook_id} && ./build.sh build ${hook_id}" 35 | exit 1 36 | fi 37 | done 38 | 39 | log info "Building grub bootable for hook ${hook_id}" 40 | 41 | # Prepare the base working directory in bootable/ 42 | declare bootable_dir="grub" 43 | declare bootable_base_dir="bootable/${bootable_dir}" 44 | rm -rf "${bootable_base_dir}" 45 | mkdir -p "${bootable_base_dir}" 46 | 47 | # Prepare a directory that will be the root of the FAT32 partition 48 | declare fat32_root_dir="${bootable_base_dir}/fat32-root" 49 | mkdir -p "${fat32_root_dir}" 50 | 51 | # Kernel and initrd go directly in the root of the FAT32 partition 52 | cp -p "${debug_dash_v[@]}" "out/hook/${hook_files['kernel']}" "${fat32_root_dir}/vmlinuz" 53 | cp -p "${debug_dash_v[@]}" "out/hook/${hook_files['initrd']}" "${fat32_root_dir}/initrd.img" 54 | 55 | # Handle DTBs 56 | if [[ "${has_dtbs}" == "yes" ]]; then 57 | mkdir -p "${fat32_root_dir}/dtb" 58 | tar -C "${fat32_root_dir}/dtb" --strip-components=1 -xzf "out/hook/${hook_files["dtbs"]}" 59 | fi 60 | 61 | # Grab the GRUB binaries from the LinuxKit Docker images 62 | declare grub_arch="${kernel_info['DOCKER_ARCH']}" 63 | declare grub_linuxkit_image="linuxkit/grub-dev:4184bd7644a0edf73d4fe8a55171fe06f4b4d738" # See https://github.com/linuxkit/linuxkit/blob/master/tools/grub/Dockerfile for the latest 64 | declare fat32_efi_dir="${fat32_root_dir}/EFI/BOOT" 65 | mkdir -p "${fat32_efi_dir}" 66 | 67 | download_grub_binaries_from_linuxkit_docker_images "${fat32_efi_dir}" "${grub_arch}" "${grub_linuxkit_image}" 68 | 69 | declare -g -a bootable_tinkerbell_kernel_params=() 70 | fill_array_bootable_tinkerbell_kernel_parameters "efi-${grub_arch}" 71 | declare tinkerbell_args="${bootable_tinkerbell_kernel_params[*]}" 72 | 73 | cat <<- GRUB_CFG > "${fat32_efi_dir}/grub.cfg" 74 | set timeout=0 75 | set gfxpayload=text 76 | menuentry 'Tinkerbell Hook ${grub_arch}' { 77 | linux /vmlinuz ${kernel_command_line} ${tinkerbell_args} 78 | initrd /initrd.img 79 | } 80 | GRUB_CFG 81 | bat_language="Plain text" log_file_bat "${fat32_efi_dir}/grub.cfg" "info" "Produced GRUB grub.cfg" 82 | 83 | # Show the state 84 | log_tree "${bootable_base_dir}" "debug" "State of the bootable directory" 85 | 86 | # Use a Dockerfile to assemble a GPT image, with a single FAT32 partition, containing the files in the fat32-root directory 87 | # This is common across all GPT-based bootable media; the only difference is the ESP flag, which is set for UEFI bootable media. 88 | esp_partitition="yes" \ 89 | create_image_fat32_root_from_dir "${bootable_base_dir}" "${bootable_img}" "${bootable_dir}/fat32-root" 90 | 91 | log info "Done building grub bootable for hook ${hook_id}" 92 | output_bootable_media "${bootable_base_dir}/${bootable_img}" "hook-bootable-grub-${OUTPUT_ID}.img" 93 | 94 | return 0 95 | } 96 | 97 | function download_grub_binaries_from_linuxkit_docker_images() { 98 | declare output_dir="${1}" 99 | declare arch="${2}" 100 | declare image="${3}" 101 | log info "Grabbing GRUB bins for arch '${arch}' from image '${image}'..." 102 | 103 | # Lets create a Dockerfile that will be used to obtain the artifacts needed 104 | declare -g grub_grabber_dockerfile="bootable/Dockerfile.autogen.grub_grabber" 105 | log info "Creating Dockerfile '${grub_grabber_dockerfile}'... " 106 | 107 | cat <<- GRUB_GRABBER_DOCKERFILE > "${grub_grabber_dockerfile}" 108 | FROM --platform=linux/${arch} ${image} AS grub-build-${arch} 109 | FROM scratch 110 | COPY --from=grub-build-${arch} /*.EFI / 111 | GRUB_GRABBER_DOCKERFILE 112 | 113 | # Now, build the Dockerfile and output the fat32 image directly 114 | log info "Building Dockerfile for GRUB grabber and outputting directly to '${output_dir}'..." 115 | docker buildx build --output "type=local,dest=${output_dir}" "--progress=${DOCKER_BUILDX_PROGRESS_TYPE}" -f "${grub_grabber_dockerfile}" bootable 116 | 117 | log info "Done, GRUB binaries are in ${output_dir}" 118 | log_tree "${output_dir}" "debug" "State of the GRUB binaries directory" 119 | } 120 | -------------------------------------------------------------------------------- /bash/bootable/rpi.sh: -------------------------------------------------------------------------------- 1 | function list_bootable_rpi_firmware() { 2 | declare -g -A bootable_boards=() 3 | bootable_boards["rpi"]="NOT=used" 4 | } 5 | 6 | function build_bootable_rpi_firmware() { 7 | : "${bootable_info['INVENTORY_ID']:?"bootable_info['INVENTORY_ID'] is unset"}" 8 | 9 | declare hook_id="${bootable_info['INVENTORY_ID']}" 10 | 11 | declare -A hook_files=( 12 | ["kernel"]="vmlinuz-${hook_id}" 13 | ["initrd"]="initramfs-${hook_id}" 14 | ["dtbs"]="dtbs-${hook_id}.tar.gz" 15 | ) 16 | 17 | # Check if all the required files are present; if not, give instructions on how to build kernel and hook $hook_id 18 | for file in "${!hook_files[@]}"; do 19 | if [[ ! -f "out/hook/${hook_files[$file]}" ]]; then 20 | log error "Required file 'out/hook/${hook_files[$file]}' not found; please build the kernel and hook ${hook_id} first: ./build.sh kernel ${hook_id} && ./build.sh build ${hook_id}" 21 | exit 1 22 | fi 23 | done 24 | 25 | log info "Building rpi for hook ${hook_id}" 26 | 27 | # Prepare the base working directory in bootable/ 28 | declare bootable_dir="rpi" 29 | declare bootable_base_dir="bootable/${bootable_dir}" 30 | rm -rf "${bootable_base_dir}" 31 | mkdir -p "${bootable_base_dir}" 32 | 33 | # Prepare a directory that will be the root of the FAT32 partition 34 | declare fat32_root_dir="${bootable_base_dir}/fat32-root" 35 | mkdir -p "${fat32_root_dir}" 36 | 37 | # Kernel and initrd go directly in the root of the FAT32 partition 38 | cp -p "${debug_dash_v[@]}" "out/hook/vmlinuz-${hook_id}" "${fat32_root_dir}/vmlinuz" 39 | cp -p "${debug_dash_v[@]}" "out/hook/initramfs-${hook_id}" "${fat32_root_dir}/initrd.img" 40 | 41 | # Handle DTBs for rpi 42 | mkdir -p "${fat32_root_dir}/dtb" 43 | tar -C "${fat32_root_dir}/dtb" --strip-components=1 -xzf "out/hook/dtbs-${hook_id}.tar.gz" 44 | log_tree "${fat32_root_dir}" "debug" "State of the FAT32 directory pre-moving DTBs" 45 | 46 | # RPi: put DTBs directly in the fat32-root directory; overlays go into a subdirectory 47 | mv "${debug_dash_v[@]}" "${fat32_root_dir}/dtb/overlays" "${fat32_root_dir}/overlays" 48 | mv "${debug_dash_v[@]}" "${fat32_root_dir}/dtb/broadcom"/*.dtb "${fat32_root_dir}/" 49 | rm -rf "${fat32_root_dir}/dtb" 50 | log_tree "${fat32_root_dir}" "debug" "State of the FAT32 directory post-moving DTBs" 51 | 52 | # Write the Raspberry Pi firmware files 53 | rpi_write_binary_firmware_from_rpi_foundation "${fat32_root_dir}" 54 | rpi_write_config_txt "${fat32_root_dir}" 55 | rpi_write_cmdline_txt "${fat32_root_dir}" 56 | 57 | # Use a Dockerfile to assemble a GPT image, with a single FAT32 partition, containing the files in the fat32-root directory 58 | # This is common across all GPT-based bootable media; the only difference is the ESP flag, which is set for UEFI bootable media but not for Rockchip/RaspberryPi 59 | # The u-boot binaries are written _later_ in the process, after the image is created, using Armbian's helper scripts. 60 | create_image_fat32_root_from_dir "${bootable_base_dir}" "bootable-media-rpi.img" "${bootable_dir}/fat32-root" 61 | 62 | log info "Done building rpi bootable for hook ${hook_id}" 63 | output_bootable_media "${bootable_base_dir}/bootable-media-rpi.img" "hook-bootable-rpi.img" 64 | 65 | return 0 66 | 67 | } 68 | 69 | function rpi_write_binary_firmware_from_rpi_foundation() { 70 | declare rpi_firmware_base_url="https://raw.githubusercontent.com/raspberrypi/firmware/refs/tags/1.20241126/boot/" 71 | 72 | declare fat32_root_dir="${1}" 73 | declare -a rpi_firmware_files=( 74 | "bootcode.bin" 75 | "fixup4cd.dat" 76 | "fixup4.dat" 77 | "fixup4db.dat" 78 | "fixup4x.dat" 79 | "fixup_cd.dat" 80 | "fixup.dat" 81 | "fixup_db.dat" 82 | "fixup_x.dat" 83 | "LICENCE.broadcom" 84 | "start4cd.elf" 85 | "start4db.elf" 86 | "start4.elf" 87 | "start4x.elf" 88 | "start_cd.elf" 89 | "start_db.elf" 90 | "start.elf" 91 | "start_x.elf" 92 | ) 93 | # Download the Raspberry Pi firmware files from the Raspberry Pi Foundation's GitHub repo: 94 | for file in "${rpi_firmware_files[@]}"; do 95 | log info "Downloading ${file}..." 96 | curl -sL -o "${fat32_root_dir}/${file}" "${rpi_firmware_base_url}/${file}" 97 | done 98 | 99 | return 0 100 | } 101 | 102 | function rpi_write_config_txt() { 103 | declare fat32_root_dir="${1}" 104 | cat <<- RPI_CONFIG_TXT > "${fat32_root_dir}/config.txt" 105 | # For more options and information see http://rptl.io/configtxt 106 | auto_initramfs=1 107 | # bootloader logs to serial, second stage 108 | enable_uart=1 109 | # disable Bluetooth, as having it enabled causes issues with the serial console due to fake Broadcom UART 110 | dtoverlay=disable-bt 111 | dtoverlay=vc4-kms-v3d 112 | max_framebuffers=2 113 | disable_fw_kms_setup=1 114 | disable_overscan=1 115 | arm_boost=1 116 | [cm4] 117 | otg_mode=1 118 | [cm5] 119 | dtoverlay=dwc2,dr_mode=host 120 | [all] 121 | kernel=vmlinuz 122 | initramfs initrd.img followkernel 123 | arm_64bit=1 124 | RPI_CONFIG_TXT 125 | 126 | bat_language="ini" log_file_bat "${fat32_root_dir}/config.txt" "info" "Produced rpi config.txt" 127 | } 128 | 129 | function rpi_write_cmdline_txt() { 130 | declare -g -a bootable_tinkerbell_kernel_params=() 131 | fill_array_bootable_tinkerbell_kernel_parameters "rpi" 132 | declare tinkerbell_args="${bootable_tinkerbell_kernel_params[*]}" 133 | 134 | declare fat32_root_dir="${1}" 135 | cat <<- RPI_CMDLINE_TXT > "${fat32_root_dir}/cmdline.txt" 136 | console=tty1 console=ttyAMA0,115200 loglevel=7 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory ${tinkerbell_args} 137 | RPI_CMDLINE_TXT 138 | 139 | log_file_bat "${fat32_root_dir}/cmdline.txt" "info" "Produced rpi cmdline.txt" 140 | } 141 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | # The default runtime timeout is 1m, which doesn't work well on Github Actions. 3 | timeout: 4m 4 | 5 | # NOTE: This file is populated by the lint-install tool. Local adjustments may be overwritten. 6 | linters-settings: 7 | cyclop: 8 | # NOTE: This is a very high transitional threshold 9 | max-complexity: 37 10 | package-average: 34.0 11 | skip-tests: true 12 | 13 | gocognit: 14 | # NOTE: This is a very high transitional threshold 15 | min-complexity: 98 16 | 17 | dupl: 18 | threshold: 200 19 | 20 | goconst: 21 | min-len: 4 22 | min-occurrences: 5 23 | ignore-tests: true 24 | 25 | gosec: 26 | excludes: 27 | - G107 # Potential HTTP request made with variable url 28 | - G204 # Subprocess launched with function call as argument or cmd arguments 29 | - G404 # Use of weak random number generator (math/rand instead of crypto/rand 30 | 31 | errorlint: 32 | # these are still common in Go: for instance, exit errors. 33 | asserts: false 34 | # Forcing %w in error wrapping forces authors to make errors part of their package APIs. The decision to make 35 | # an error part of a package API should be a concious decision by the author. 36 | # Also see Hyrums Law. 37 | errorf: false 38 | 39 | exhaustive: 40 | default-signifies-exhaustive: true 41 | 42 | nestif: 43 | min-complexity: 8 44 | 45 | nolintlint: 46 | require-explanation: true 47 | allow-unused: false 48 | require-specific: true 49 | 50 | revive: 51 | ignore-generated-header: true 52 | severity: warning 53 | rules: 54 | - name: atomic 55 | - name: blank-imports 56 | - name: bool-literal-in-expr 57 | - name: confusing-naming 58 | - name: constant-logical-expr 59 | - name: context-as-argument 60 | - name: context-keys-type 61 | - name: deep-exit 62 | - name: defer 63 | - name: range-val-in-closure 64 | - name: range-val-address 65 | - name: dot-imports 66 | - name: error-naming 67 | - name: error-return 68 | - name: error-strings 69 | - name: errorf 70 | - name: exported 71 | - name: identical-branches 72 | - name: if-return 73 | - name: import-shadowing 74 | - name: increment-decrement 75 | - name: indent-error-flow 76 | - name: indent-error-flow 77 | - name: package-comments 78 | - name: range 79 | - name: receiver-naming 80 | - name: redefines-builtin-id 81 | - name: superfluous-else 82 | - name: struct-tag 83 | - name: time-naming 84 | - name: unexported-naming 85 | - name: unexported-return 86 | - name: unnecessary-stmt 87 | - name: unreachable-code 88 | - name: unused-parameter 89 | - name: var-declaration 90 | - name: var-naming 91 | - name: unconditional-recursion 92 | - name: waitgroup-by-value 93 | 94 | staticcheck: 95 | go: "1.17" 96 | 97 | unused: 98 | go: "1.17" 99 | 100 | output: 101 | sort-results: true 102 | 103 | linters: 104 | disable-all: true 105 | enable: 106 | - asciicheck 107 | - bodyclose 108 | - cyclop 109 | - deadcode 110 | - dogsled 111 | - dupl 112 | - durationcheck 113 | - errcheck 114 | - errname 115 | - errorlint 116 | - exhaustive 117 | - exportloopref 118 | - forcetypeassert 119 | - gocognit 120 | - goconst 121 | - gocritic 122 | - godot 123 | - gofmt 124 | - gofumpt 125 | - gosec 126 | - goheader 127 | - goimports 128 | - goprintffuncname 129 | - gosimple 130 | - govet 131 | - ifshort 132 | - importas 133 | - ineffassign 134 | - makezero 135 | - misspell 136 | - nakedret 137 | - nestif 138 | - nilerr 139 | - noctx 140 | - nolintlint 141 | - predeclared 142 | # disabling for the initial iteration of the linting tool 143 | # - promlinter 144 | - revive 145 | - rowserrcheck 146 | - sqlclosecheck 147 | - staticcheck 148 | - structcheck 149 | - stylecheck 150 | - thelper 151 | - tparallel 152 | - typecheck 153 | - unconvert 154 | - unparam 155 | - unused 156 | - varcheck 157 | - wastedassign 158 | - whitespace 159 | 160 | # Disabled linters, due to being misaligned with Go practices 161 | # - exhaustivestruct 162 | # - gochecknoglobals 163 | # - gochecknoinits 164 | # - goconst 165 | # - godox 166 | # - goerr113 167 | # - gomnd 168 | # - lll 169 | # - nlreturn 170 | # - testpackage 171 | # - wsl 172 | # Disabled linters, due to not being relevant to our code base: 173 | # - maligned 174 | # - prealloc "For most programs usage of prealloc will be a premature optimization." 175 | # Disabled linters due to bad error messages or bugs 176 | # - tagliatelle 177 | 178 | issues: 179 | # Excluding configuration per-path, per-linter, per-text and per-source 180 | exclude-rules: 181 | - path: _test\.go 182 | linters: 183 | - dupl 184 | - errcheck 185 | - forcetypeassert 186 | - gocyclo 187 | - gosec 188 | - noctx 189 | 190 | - path: .*cmd.* 191 | linters: 192 | - noctx 193 | 194 | - path: main\.go 195 | linters: 196 | - noctx 197 | 198 | - path: .*cmd.* 199 | text: "deep-exit" 200 | 201 | - path: main\.go 202 | text: "deep-exit" 203 | 204 | # This check is of questionable value 205 | - linters: 206 | - tparallel 207 | text: "call t.Parallel on the top level as well as its subtests" 208 | 209 | # Don't hide lint issues just because there are many of them 210 | max-same-issues: 0 211 | max-issues-per-linter: 0 212 | -------------------------------------------------------------------------------- /bash/inventory.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function produce_kernels_flavours_inventory() { 4 | declare -g -A inventory_dict=() 5 | declare -g -A bootable_inventory_dict=() 6 | 7 | produce_default_kernel_inventory 8 | produce_armbian_kernel_inventory 9 | 10 | # if a function `produce_custom_kernel_inventory` exists, call it. 11 | if type -t produce_custom_kernel_inventory &> /dev/null; then 12 | log info "Custom kernel inventory function found, calling it." 13 | produce_custom_kernel_inventory 14 | fi 15 | 16 | # extract keys & make readonly 17 | declare -g -a -r inventory_ids=("${!inventory_dict[@]}") # extract the _keys_ from the inventory_ids dict 18 | declare -g -A -r inventory_dict # make kernels_data dict readonly 19 | declare -g -a -r bootable_inventory_ids=("${!bootable_inventory_dict[@]}") # extract the _keys_ from the inventory_ids dict 20 | declare -g -A -r bootable_inventory_dict # make kernels_data dict readonly 21 | 22 | return 0 23 | } 24 | 25 | function produce_default_kernel_inventory() { 26 | ##### METHOD=default; Hook's own kernel, in kernel/ directory 27 | ## Hook default kernel, source code stored in `kernel` dir in this repo -- currently 5.10.y 28 | define_id "hook-default-amd64" METHOD='default' ARCH='x86_64' TAG='standard' SUPPORTS_ISO='yes' \ 29 | KERNEL_MAJOR='5' KERNEL_MINOR='10' KCONFIG='generic' 30 | add_bootable_id "grub-amd64" HANDLER='grub' SERIAL_CONSOLE='ttyS0' TAG='standard' 31 | 32 | define_id "hook-default-arm64" METHOD='default' ARCH='aarch64' TAG='standard' SUPPORTS_ISO='yes' \ 33 | KERNEL_MAJOR='5' KERNEL_MINOR='10' KCONFIG='generic' 34 | add_bootable_id "grub-arm64" HANDLER='grub' SERIAL_CONSOLE='ttyAMA0' TAG='standard' 35 | 36 | ## A 'peg' is not really a 'hook': for development purposes; testing new LK version and simpler LK configurations, using the default kernel 37 | define_id "peg-default-amd64" METHOD='default' ARCH='x86_64' TAG='dev' \ 38 | USE_KERNEL_ID='hook-default-amd64' TEMPLATE='peg' LINUXKIT_VERSION='1.2.0' \ 39 | KERNEL_MAJOR='5' KERNEL_MINOR='10' KCONFIG='generic' 40 | 41 | ## development purposes: trying out kernel 6.6.y 42 | define_id "hook-latest-lts-amd64" METHOD='default' ARCH='x86_64' TAG='lts' SUPPORTS_ISO='yes' \ 43 | KERNEL_MAJOR='6' KERNEL_MINOR='6' KCONFIG='generic' FORCE_OUTPUT_ID='latest-lts' 44 | add_bootable_id "grub-latest-lts-amd64" SERIAL_CONSOLE='ttyS0' HANDLER='grub' TAG='lts' 45 | 46 | define_id "hook-latest-lts-arm64" METHOD='default' ARCH='aarch64' TAG='lts' SUPPORTS_ISO='yes' \ 47 | KERNEL_MAJOR='6' KERNEL_MINOR='6' KCONFIG='generic' FORCE_OUTPUT_ID='latest-lts' 48 | add_bootable_id "grub-latest-lts-arm64" SERIAL_CONSOLE='ttyAMA0' HANDLER='grub' TAG='lts' 49 | } 50 | 51 | ##### METHOD=armbian; Foreign kernels, taken from Armbian's OCI repos. Those are "exotic" kernels for certain SoC's. 52 | # edge = (release candidates or stable but rarely LTS, more aggressive patching) 53 | # current = (LTS kernels, stable-ish patching) 54 | # vendor/legacy = (vendor/BSP kernels, stable patching, NOT mainline, not frequently rebased) 55 | # Check https://github.com/orgs/armbian/packages?tab=packages&q=kernel- for possibilities 56 | # nb: when no ARMBIAN_KERNEL_VERSION, will use the first tag returned, high traffic, low cache rate. 57 | # one might set eg ARMBIAN_KERNEL_VERSION='6.7.10-xxxx' to use a fixed version. 58 | function produce_armbian_kernel_inventory() { 59 | ### SBC-oriented: 60 | ## Armbian meson64 (Amlogic) edge Khadas VIM3/3L, Radxa Zero/2, LibreComputer Potatos, and many more 61 | define_id "armbian-meson64-edge" METHOD='armbian' ARCH='aarch64' TAG='armbian-sbc' ARMBIAN_KERNEL_ARTIFACT='kernel-meson64-edge' 62 | add_bootable_id "uboot-aml" HANDLER='armbian_uboot_amlogic' TAG='armbian-sbc' UBOOT_TYPE='extlinux' CONSOLE_EXTRA_ARGS=',115200' # all meson64, mainline kernel and u-boot, uses extlinux to boot 63 | 64 | ## Armbian bcm2711 (Broadcom) current, from RaspberryPi Foundation with many CNCF-landscape fixes and patches; for the RaspberryPi 3b+/4b/5 65 | define_id "armbian-bcm2711-current" METHOD='armbian' ARCH='aarch64' TAG='armbian-sbc' ARMBIAN_KERNEL_ARTIFACT='kernel-bcm2711-current' 66 | add_bootable_id "rpi" HANDLER='rpi_firmware' TAG='armbian-sbc' 67 | 68 | ## Armbian rockchip64 (Rockchip) edge, for many rk356x/3399 SoCs. As of late December 2024, also for rk3588. 69 | define_id "armbian-rockchip64-edge" METHOD='armbian' ARCH='aarch64' TAG='armbian-sbc' ARMBIAN_KERNEL_ARTIFACT='kernel-rockchip64-edge' 70 | add_bootable_id "uboot-rk" HANDLER='armbian_uboot_rockchip' TAG='armbian-sbc' UBOOT_TYPE='extlinux' CONSOLE_EXTRA_ARGS=',1500000' # rk3588, mainline u-boot, uses extlinux to boot 71 | 72 | ## Armbian rk35xx (Rockchip) vendor, for rk3566, rk3568, rk3588, rk3588s SoCs -- 6.1-rkr4.1 - BSP / vendor kernel, roughly equivalent to Android's 6.1.84 73 | # Use with edk2 (v0.9.1+) or mainline u-boot + EFI: matches the DT included in https://github.com/edk2-porting/edk2-rk3588 _after_ v0.9.1 74 | define_id "armbian-rk35xx-vendor" METHOD='armbian' ARCH='aarch64' TAG='armbian-sbc' ARMBIAN_KERNEL_ARTIFACT='kernel-rk35xx-vendor' 75 | add_bootable_id "uboot-rk35xx-vendor" HANDLER='armbian_uboot_rockchip_vendor' TAG='armbian-sbc' CONSOLE_EXTRA_ARGS=',1500000' 76 | 77 | ### Armbian mainline Generic UEFI kernels, for EFI capable machines might use those: 78 | ## Armbian generic edge UEFI kernel for arm64 79 | define_id "armbian-uefi-arm64-edge" METHOD='armbian' ARCH='aarch64' TAG='standard armbian-uefi' ARMBIAN_KERNEL_ARTIFACT='kernel-arm64-edge' 80 | add_bootable_id "grub-armbian-uefi-arm64" HANDLER='grub' SERIAL_CONSOLE='ttyAMA0' DTB='yes' TAG='standard' 81 | 82 | ## Armbian generic edge UEFI kernel (Armbian calls it x86) 83 | define_id "armbian-uefi-x86-edge" METHOD='armbian' ARCH='x86_64' TAG='standard armbian-uefi' ARMBIAN_KERNEL_ARTIFACT='kernel-x86-edge' 84 | add_bootable_id "grub-armbian-uefi-amd64" HANDLER='grub' SERIAL_CONSOLE='ttyS0' TAG='standard' 85 | } 86 | -------------------------------------------------------------------------------- /bash/hook-lk-containers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function build_all_hook_linuxkit_containers() { 4 | log info "Building all LinuxKit containers..." 5 | : "${DOCKER_ARCH:?"ERROR: DOCKER_ARCH is not defined"}" 6 | 7 | # when adding new container builds here you'll also want to add them to the 8 | # `linuxkit_build` function in the linuxkit.sh file. 9 | # # NOTE: linuxkit containers must be in the images/ directory 10 | build_hook_linuxkit_container hook-bootkit "HOOK_CONTAINER_BOOTKIT_IMAGE" "${EXPORT_LK_CONTAINERS}" "${EXPORT_LK_CONTAINERS_DIR}" 11 | build_hook_linuxkit_container hook-docker "HOOK_CONTAINER_DOCKER_IMAGE" "${EXPORT_LK_CONTAINERS}" "${EXPORT_LK_CONTAINERS_DIR}" 12 | build_hook_linuxkit_container hook-udev "HOOK_CONTAINER_UDEV_IMAGE" "${EXPORT_LK_CONTAINERS}" "${EXPORT_LK_CONTAINERS_DIR}" 13 | build_hook_linuxkit_container hook-acpid "HOOK_CONTAINER_ACPID_IMAGE" "${EXPORT_LK_CONTAINERS}" "${EXPORT_LK_CONTAINERS_DIR}" 14 | build_hook_linuxkit_container hook-containerd "HOOK_CONTAINER_CONTAINERD_IMAGE" "${EXPORT_LK_CONTAINERS}" "${EXPORT_LK_CONTAINERS_DIR}" 15 | build_hook_linuxkit_container hook-runc "HOOK_CONTAINER_RUNC_IMAGE" "${EXPORT_LK_CONTAINERS}" "${EXPORT_LK_CONTAINERS_DIR}" 16 | build_hook_linuxkit_container hook-embedded "HOOK_CONTAINER_EMBEDDED_IMAGE" "${EXPORT_LK_CONTAINERS}" "${EXPORT_LK_CONTAINERS_DIR}" 17 | } 18 | 19 | function build_hook_linuxkit_container() { 20 | declare container_dir="${1}" 21 | declare template_var="${2}" # bash name reference, kind of an output var but weird 22 | declare container_base_dir="images" 23 | declare export_container_images="${3:-false}" 24 | declare export_container_images_dir="${4:-/tmp}" 25 | 26 | # Lets hash the contents of the directory and use that as a tag 27 | declare container_files_hash 28 | # NOTE: linuxkit containers must be in the images/ directory 29 | container_files_hash="$(find "${container_base_dir}/${container_dir}" -type f -print0 | LC_ALL=C sort -z | xargs -0 sha256sum | sha256sum | cut -d' ' -f1)" 30 | declare container_files_hash_short="${container_files_hash:0:8}" 31 | 32 | declare container_oci_ref="${HOOK_LK_CONTAINERS_OCI_BASE}${container_dir}:${container_files_hash_short}-${DOCKER_ARCH}" 33 | log info "Consider building LK container ${container_oci_ref} from ${container_base_dir}/${container_dir} for platform ${DOCKER_ARCH}" 34 | hook_template_vars["${template_var}"]="${container_oci_ref}" # set the template var for envsubst 35 | 36 | # If the image is in the local docker cache, skip building 37 | log debug "Checking if image ${container_oci_ref} exists in local registry" 38 | if [[ -n "$(docker images -q "${container_oci_ref}")" ]]; then 39 | log info "Image ${container_oci_ref} exists in local registry, skipping build" 40 | # we try to push here because a previous build may have created the image 41 | # this is the case for GitHub Actions CI because we build PRs on the same self-hosted runner 42 | push_hook_linuxkit_container "${container_oci_ref}" 43 | 44 | # If export_container_images=yes then export images as tar.gzs to export_container_images_dir 45 | # This is mainly for CI to be able to pass built images between jobs 46 | if [[ "${export_container_images}" == "yes" ]]; then 47 | save_docker_image_to_tar_gz "${container_oci_ref}" "${export_container_images_dir}" 48 | fi 49 | return 0 50 | fi 51 | 52 | # Check if we can pull the image from registry; if so, skip the build. 53 | log debug "Checking if image ${container_oci_ref} can be pulled from remote registry" 54 | if docker pull "${container_oci_ref}"; then 55 | log info "Image ${container_oci_ref} pulled from remote registry, skipping build" 56 | # If export_container_images=yes then export images as tar.gzs to export_container_images_dir 57 | # This is mainly for CI to be able to pass built images between jobs 58 | if [[ "${export_container_images}" == "yes" ]]; then 59 | save_docker_image_to_tar_gz "${container_oci_ref}" "${export_container_images_dir}" 60 | fi 61 | return 0 62 | fi 63 | 64 | # If environment DO_BUILD_LK_CONTAINERS=no, we're being asked NOT to build this. Exit with an error. 65 | if [[ "${DO_BUILD_LK_CONTAINERS}" == "no" ]]; then 66 | log error "DO_BUILD_LK_CONTAINERS is set to 'no'; not building ${container_oci_ref}" 67 | exit 9 68 | fi 69 | 70 | log info "Building ${container_oci_ref} from ${container_base_dir}/${container_dir} for platform ${DOCKER_ARCH}" 71 | ( 72 | cd "${container_base_dir}/${container_dir}" || exit 1 73 | docker buildx build --load "--progress=${DOCKER_BUILDX_PROGRESS_TYPE}" -t "${container_oci_ref}" --platform "linux/${DOCKER_ARCH}" . 74 | ) 75 | 76 | log info "Built ${container_oci_ref} from ${container_base_dir}/${container_dir} for platform ${DOCKER_ARCH}" 77 | 78 | push_hook_linuxkit_container "${container_oci_ref}" 79 | 80 | # If export_container_images=yes then export images as tar.gzs to export_container_images_dir 81 | # This is mainly for CI to be able to pass built images between jobs 82 | if [[ "${export_container_images}" == "yes" ]]; then 83 | save_docker_image_to_tar_gz "${container_oci_ref}" "${export_container_images_dir}" 84 | fi 85 | 86 | return 0 87 | } 88 | 89 | function save_docker_image_to_tar_gz() { 90 | declare container_oci_ref="${1}" 91 | declare export_dir="${2:-/tmp}" 92 | 93 | # Create the export directory if it doesn't exist 94 | mkdir -p "${export_dir}" 95 | 96 | # Save the Docker image as a tar.gz file 97 | docker save "${container_oci_ref}" | gzip > "${export_dir}/$(basename "${container_oci_ref}" | sed 's/:/-/g').tar.gz" 98 | log info "Saved Docker image ${container_oci_ref} to ${export_dir}/$(basename "${container_oci_ref}" | sed 's/:/-/g').tar.gz" 99 | } 100 | 101 | function push_hook_linuxkit_container() { 102 | declare container_oci_ref="${1}" 103 | 104 | # Push the image to the registry, if DO_PUSH is set to yes 105 | if [[ "${DO_PUSH}" == "yes" ]]; then 106 | docker push "${container_oci_ref}" || { 107 | log error "Failed to push ${container_oci_ref} to registry" 108 | exit 33 109 | } 110 | else 111 | log info "Skipping push of ${container_oci_ref} to registry; set DO_PUSH=yes to push." 112 | fi 113 | } 114 | -------------------------------------------------------------------------------- /bash/kernel/kernel_armbian.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | declare -g ARMBIAN_BASE_ORAS_REF="${ARMBIAN_BASE_ORAS_REF:-"ghcr.io/armbian/os"}" 4 | 5 | function obtain_kernel_output_id_armbian() { 6 | : "${inventory_id:?"ERROR: inventory_id is not defined"}" 7 | # output ID is just the inventory_id 8 | declare -g OUTPUT_ID="${inventory_id}" 9 | } 10 | 11 | function calculate_kernel_version_armbian() { 12 | : "${inventory_id:?"ERROR: inventory_id is not defined"}" 13 | log info "Calculating version of Armbian kernel..." 14 | 15 | declare -g ARMBIAN_KERNEL_BASE_ORAS_REF="${ARMBIAN_BASE_ORAS_REF}/${ARMBIAN_KERNEL_ARTIFACT}" 16 | 17 | # If ARMBIAN_KERNEL_VERSION is unset, for using the latest kernel 18 | if [[ -z "${ARMBIAN_KERNEL_VERSION}" ]]; then 19 | log info "ARMBIAN_KERNEL_VERSION is unset, obtaining the most recently pushed-to tag of ${ARMBIAN_KERNEL_BASE_ORAS_REF}" 20 | declare latest_tag_for_docker_image 21 | get_latest_tag_for_docker_image_using_skopeo "${ARMBIAN_KERNEL_BASE_ORAS_REF}" ".\-S..." # regex to match the tag, like "6.1.84-Sxxxx" 22 | ARMBIAN_KERNEL_VERSION="${latest_tag_for_docker_image}" 23 | log info "Using most recent Armbian kernel tag: ${ARMBIAN_KERNEL_VERSION}" 24 | fi 25 | 26 | obtain_kernel_output_id_armbian 27 | 28 | declare -g ARMBIAN_KERNEL_FULL_ORAS_REF_DEB_TAR="${ARMBIAN_KERNEL_BASE_ORAS_REF}:${ARMBIAN_KERNEL_VERSION}" 29 | declare -g ARMBIAN_KERNEL_MAJOR_MINOR_POINT="unknown" 30 | ARMBIAN_KERNEL_MAJOR_MINOR_POINT="$(echo -n "${ARMBIAN_KERNEL_VERSION}" | cut -d "-" -f 1)" 31 | log info "ARMBIAN_KERNEL_MAJOR_MINOR_POINT: ${ARMBIAN_KERNEL_MAJOR_MINOR_POINT}" 32 | 33 | # A helper script, as escaping bash into a RUN command in Dockerfile is a pain; included in input_hash later 34 | declare dockerfile_helper_filename="undefined.sh" 35 | produce_dockerfile_helper_apt_oras "kernel/" # will create the helper script in kernel/ directory; sets helper_name 36 | 37 | # Lets create a Dockerfile that will be used to obtain the artifacts needed, using ORAS binary 38 | declare -g ARMBIAN_KERNEL_DOCKERFILE="kernel/Dockerfile.autogen.armbian.${inventory_id}" 39 | echo "Creating Dockerfile '${ARMBIAN_KERNEL_DOCKERFILE}'... " 40 | cat <<- ARMBIAN_ORAS_DOCKERFILE > "${ARMBIAN_KERNEL_DOCKERFILE}" 41 | FROM debian:stable AS downloader 42 | # Call the helper to install curl, oras, and dpkg-dev 43 | ADD ./${dockerfile_helper_filename} /apt-oras-helper.sh 44 | RUN bash /apt-oras-helper.sh 45 | 46 | FROM downloader AS downloaded 47 | 48 | # lets create the output dir 49 | WORKDIR /armbian/output 50 | RUN echo getting kernel from ${ARMBIAN_KERNEL_FULL_ORAS_REF_DEB_TAR} 51 | 52 | WORKDIR /armbian 53 | 54 | # Pull the image from oras. This will contain a .tar file... 55 | RUN oras pull "${ARMBIAN_KERNEL_FULL_ORAS_REF_DEB_TAR}" 56 | 57 | # ... extract the .tar file to get .deb packages in the "global" subdir... 58 | RUN tar -xvf *.tar 59 | WORKDIR /armbian/global 60 | 61 | # ... extract the contents of the .deb packages linuxq-image-* ... 62 | RUN dpkg-deb --extract linux-image-*.deb /armbian/image 63 | 64 | WORKDIR /armbian/image 65 | 66 | # Get the kernel image... 67 | RUN cp -v boot/vmlinuz* /armbian/output/kernel 68 | 69 | # Create a tarball with the modules in lib. 70 | # Important: this tarball needs to have permissions for the root directory included! Otherwise linuxkit rootfs will have the wrong permissions on / (root) 71 | WORKDIR /armbian/modules_only 72 | RUN mv /armbian/image/lib /armbian/modules_only/ 73 | RUN echo "Before cleaning: " && du -h -d 10 -x lib/modules | sort -h | tail -n 20 74 | # Trim the kernel modules to save space; hopefully your required hardware is not included here 75 | RUN rm -rf ./lib/modules/*/kernel/drivers/net/wireless ./lib/modules/*/kernel/sound ./lib/modules/*/kernel/drivers/media 76 | RUN rm -rf ./lib/modules/*/kernel/drivers/infiniband 77 | RUN echo "After cleaning: " && du -h -d 10 -x lib/modules | sort -h | tail -n 20 78 | RUN tar -cf /armbian/output/kernel.tar . 79 | 80 | # Create a tarball with the dtbs in usr/lib/linux-image-* 81 | WORKDIR /armbian/image 82 | RUN { cd usr/lib/linux-image-* || { echo "No DTBS for this arch, empty tar..." && mkdir -p usr/lib/linux-image-no-dtbs && cd usr/lib/linux-image-* ; } ; } && pwd && du -h -d 1 . && tar -czf /armbian/output/dtbs.tar.gz . && ls -lah /armbian/output/dtbs.tar.gz 83 | 84 | # Show the contents of the output dir 85 | WORKDIR /armbian/output 86 | RUN ls -lahtS 87 | 88 | # Output layer should be in the layout expected by LinuxKit (dtbs.tar.gz is ignored) 89 | FROM scratch 90 | COPY --from=downloaded /armbian/output/* / 91 | ARMBIAN_ORAS_DOCKERFILE 92 | 93 | declare input_hash="" short_input_hash="" 94 | input_hash="$(cat "${ARMBIAN_KERNEL_DOCKERFILE}" "kernel/${dockerfile_helper_filename}" | sha256sum - | cut -d ' ' -f 1)" 95 | short_input_hash="${input_hash:0:8}" 96 | kernel_oci_version="${ARMBIAN_KERNEL_MAJOR_MINOR_POINT}-${short_input_hash}" 97 | armbian_type="${inventory_id#"armbian-"}" # remove the 'armbian-' prefix from inventory_id, but keep the rest. "uefi" has "current/edge" and "arm64/x86" variants. 98 | kernel_oci_image="${HOOK_KERNEL_OCI_BASE}-armbian:${kernel_oci_version}-${armbian_type}" 99 | log info "kernel_oci_version: ${kernel_oci_version}" 100 | log info "kernel_oci_image: ${kernel_oci_image}" 101 | } 102 | 103 | function build_kernel_armbian() { 104 | log info "Building armbian kernel from deb-tar at ${ARMBIAN_KERNEL_FULL_ORAS_REF_DEB_TAR}" 105 | log info "Will build Dockerfile ${ARMBIAN_KERNEL_DOCKERFILE}" 106 | 107 | # Don't specify platform, our Dockerfile is multiarch, thus you can build x86 kernels in arm64 hosts and vice-versa ... 108 | docker buildx build --load "--progress=${DOCKER_BUILDX_PROGRESS_TYPE}" -t "${kernel_oci_image}" -f "${ARMBIAN_KERNEL_DOCKERFILE}" kernel 109 | # .. but enforce the target arch for LK in the final image via dump/edit-manifests/reimport trick 110 | ensure_docker_image_architecture "${kernel_oci_image}" "${kernel_info['DOCKER_ARCH']}" 111 | } 112 | 113 | function configure_kernel_armbian() { 114 | log error "Can't configure Armbian kernel from Hook, since they're prebuilt externally." 115 | log warn "Armbian kernel's configs are at https://github.com/armbian/build/tree/main/config/kernel" 116 | exit 3 117 | } 118 | -------------------------------------------------------------------------------- /bash/bootable-media.sh: -------------------------------------------------------------------------------- 1 | function build_bootable_media() { 2 | log debug "would build build_bootable_media: '${*}'" 3 | 4 | declare -r -g bootable_id="${1}" # read-only variable from here 5 | 6 | # Check if the bootable_id is set, otherwise bomb 7 | if [[ -z "${bootable_id}" ]]; then 8 | log error "No bootable_id specified; please specify one of: ${bootable_inventory_ids[*]}" 9 | exit 1 10 | fi 11 | 12 | declare -g -A bootable_info=() 13 | get_bootable_info_dict "${bootable_id}" 14 | 15 | # Dump the bootable_info dict 16 | log debug "bootable_info: $(declare -p bootable_info)" 17 | 18 | # Get the kernel info from the bootable_info INVENTORY_ID 19 | declare -g -A kernel_info=() 20 | declare -g inventory_id="${bootable_info['INVENTORY_ID']}" 21 | get_kernel_info_dict "${inventory_id}" 22 | log debug "kernel_info: $(declare -p kernel_info)" 23 | set_kernel_vars_from_info_dict 24 | kernel_obtain_output_id # sets OUTPUT_ID 25 | 26 | # A few scenarios we want to support: 27 | # A) UEFI bootable media; GPT + ESP, FAT32, GRUB, kernel/initrd, grub.conf + some kernel command line. 28 | # B) RPi 3b/4/5 bootable media; GPT, non-ESP partition, FAT32, kernel/initrd, config.txt, cmdline.txt + some kernel command line. 29 | # C) Rockchip bootable media; GPT, non-ESP partition, FAT32, extlinux.conf + some kernel command line; write u-boot bin on top of GPT via Armbian sh 30 | # D) Amlogic bootable media; MBR, FAT32, extlinux.conf + some kernel command line; write u-boot bin on top of MBR via Armbian sh 31 | 32 | # General process: 33 | # Obtain extra variables from environment (BOARD/BRANCH for armbian); optional. 34 | # Obtain the latest Armbian u-boot version from the OCI registry, using Skopeo. 35 | # 1) (C/D) Obtain the u-boot artifact binaries using ORAS, given the version above; massage using Docker and extract the binaries. 36 | # 1) (A) Obtain grub somehow; LinuxKit has them ready-to-go in a Docker image. 37 | # 1) (B) Obtain the rpi firmware files (bootcode.bin, start.elf, fixup.dat) from the RaspberryPi Foundation 38 | # 2) Prepare the FAT32 contents; kernel/initrd, grub.conf, config.txt, cmdline.txt, extlinux.conf depending on scenario 39 | # 3) Create a GPT+ESP, GTP+non-ESP, or MBR partition table image with the contents of the FAT32 (use libguestfs) 40 | # 4) For the scenarios with u-boot, write u-boot binaries to the correct offsets in the image. 41 | 42 | # @TODO: possibly make sure the kernel and lk is built before delegating? 43 | 44 | # Call the bootable build function 45 | declare bootable_build_func="${bootable_info['BOOTABLE_BUILD_FUNC']}" 46 | log info "Calling bootable build function: ${bootable_build_func}" 47 | "${bootable_build_func}" 48 | 49 | } 50 | 51 | function get_bootable_info_dict() { 52 | declare bootable="${1}" 53 | declare bootable_data_str="${bootable_inventory_dict[${bootable}]}" 54 | if [[ -z "${bootable_data_str}" ]]; then 55 | log error "No bootable data found for '${bootable}'; valid ones are: ${bootable_inventory_ids[*]} " 56 | exit 1 57 | fi 58 | log debug "Bootable data for '${bootable}': ${bootable_data_str}" 59 | declare -g -A bootable_info 60 | eval "bootable_info=(${bootable_data_str})" 61 | 62 | # Post process; calculate bash function names given the handler 63 | bootable_info['BOOTABLE_LIST_FUNC']="list_bootable_${bootable_info['HANDLER']}" 64 | bootable_info['BOOTABLE_BUILD_FUNC']="build_bootable_${bootable_info['HANDLER']}" 65 | 66 | # Ensure bootable_info a valid TAG 67 | if [[ -z "${bootable_info['TAG']}" ]]; then 68 | log error "No TAG found for bootable '${bootable}'" 69 | exit 1 70 | fi 71 | } 72 | 73 | function output_bootable_media() { 74 | declare input_file="${1}" 75 | declare output_fn="${2}" 76 | declare full_output_fn="out/${output_fn}.xz" 77 | 78 | # If CARD_DEVICE is set, write the image to the device; otherwise, compress it 79 | if [[ -n "${CARD_DEVICE}" ]]; then 80 | write_image_to_device "${input_file}" "${CARD_DEVICE}" 81 | log info "Wrote image file ${input_file} to device ${CARD_DEVICE}; done." 82 | return 0 83 | fi 84 | 85 | declare human_size_input_file="" 86 | human_size_input_file="$(du -h "${input_file}" | awk '{print $1}')" 87 | 88 | # Use pixz to compress the image; use all CPU cores, default compression level 89 | log info "Compressing image file ${input_file} (${human_size_input_file}) to ${full_output_fn} -- wait..." 90 | pixz -i "${input_file}" -o "${full_output_fn}" 91 | 92 | declare human_size_output_file="" 93 | human_size_output_file="$(du -h "${full_output_fn}" | awk '{print $1}')" 94 | log info "Compressed image file to ${full_output_fn} (${human_size_output_file})" 95 | 96 | return 0 97 | } 98 | 99 | function write_image_to_device() { 100 | local image_file="${1}" 101 | local device="${2}" 102 | if [[ -b "${device}" && -f "${image_file}" ]]; then 103 | log info "Writing image file ${image_file} to device ${device}" 104 | pv -p -b -r -c -N "dd" "${image_file}" | dd "of=${device}" bs=1M iflag=fullblock oflag=direct status=none 105 | log info "Waiting for fsync()..." 106 | sync 107 | else 108 | if [[ -n ${device} ]]; then 109 | log error "Device ${device} not found or image file ${image_file} not found" 110 | exit 3 111 | fi 112 | fi 113 | } 114 | 115 | function fill_array_bootable_tinkerbell_kernel_parameters() { 116 | declare -g -a bootable_tinkerbell_kernel_params=() # output global var 117 | declare -r board_id="${1}" # board_id is the first argument 118 | 119 | declare TINK_WORKER_IMAGE="${TINK_WORKER_IMAGE:-"ghcr.io/tinkerbell/tink-agent:latest"}" 120 | declare TINK_TLS="${TINK_TLS:-"false"}" 121 | declare TINK_GRPC_PORT="${TINK_GRPC_PORT:-"42113"}" 122 | declare TINK_SERVER="${TINK_SERVER:-"tinkerbell"}" # export TINK_SERVER="192.168.66.75" 123 | declare WORKER_ID="${WORKER_ID:-"${board_id}"}" # export WORKER_ID="11:22:33:44:55:66" 124 | 125 | log info "WORKER_ID is set to '${WORKER_ID}'" 126 | log info "TINK_WORKER_IMAGE is set to '${TINK_WORKER_IMAGE}'" 127 | log info "TINK_SERVER is set to '${TINK_SERVER}'" 128 | log info "TINK_TLS is set to '${TINK_TLS}'" 129 | log info "TINK_GRPC_PORT is set to '${TINK_GRPC_PORT}'" 130 | 131 | bootable_tinkerbell_kernel_params+=( 132 | "worker_id=${WORKER_ID}" 133 | "tink_worker_image=${TINK_WORKER_IMAGE}" 134 | "grpc_authority=${TINK_SERVER}:${TINK_GRPC_PORT}" 135 | "tinkerbell_tls=${TINK_TLS}" 136 | "syslog_host=${TINK_SERVER}" 137 | ) 138 | } 139 | -------------------------------------------------------------------------------- /bash/kernel.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function obtain_kernel_data_from_id() { 4 | declare -g -A kernel_info=() 5 | declare -g kernel_oci_version="" kernel_oci_image="" 6 | 7 | log debug "Obtaining kernel data for kernel ID: '${1}'" 8 | 9 | get_kernel_info_dict "${1}" 10 | set_kernel_vars_from_info_dict 11 | kernel_calculate_version 12 | 13 | return 0 14 | } 15 | 16 | function kernel_obtain_output_id() { 17 | log debug "Running obtain output id method: ${kernel_info[OUTPUT_ID_FUNC]}" 18 | "${kernel_info[OUTPUT_ID_FUNC]}" 19 | 20 | return 0 21 | 22 | } 23 | 24 | function kernel_calculate_version() { 25 | log debug "Running calculate version method: ${kernel_info[VERSION_FUNC]}" 26 | "${kernel_info[VERSION_FUNC]}" 27 | 28 | return 0 29 | } 30 | 31 | function kernel_build() { 32 | if [[ "${FORCE_BUILD_KERNEL:-"no"}" == "no" ]]; then 33 | # determine if it is already available in the OCI registry; if so, just pull and skip building/pushing 34 | if docker pull "${kernel_oci_image}"; then 35 | log info "Kernel image ${kernel_oci_image} already in registry; skipping build." 36 | log info "Set FORCE_BUILD_KERNEL=yes to force a build; use DO_PUSH=yes to also push after build." 37 | if [[ "${EXPORT_KERNEL_IMAGE}" == "yes" ]]; then 38 | log info "Exporting kernel image ${kernel_oci_image} to ${EXPORT_KERNEL_IMAGE_DIR}" 39 | save_docker_image_to_tar_gz "${kernel_oci_image}" "${EXPORT_KERNEL_IMAGE_DIR}" 40 | fi 41 | exit 0 42 | fi 43 | fi 44 | 45 | log debug "Kernel build method: ${kernel_info[BUILD_FUNC]}" 46 | "${kernel_info[BUILD_FUNC]}" 47 | 48 | # Push it to the OCI registry; this discards the os/arch information that BuildKit generates 49 | if [[ "${DO_PUSH:-"no"}" == "yes" ]]; then 50 | log info "Kernel built; pushing to ${kernel_oci_image}" 51 | docker push "${kernel_oci_image}" 52 | else 53 | log info "DO_PUSH not 'yes', not pushing." 54 | fi 55 | 56 | if [[ "${EXPORT_KERNEL_IMAGE}" == "yes" ]]; then 57 | log info "Exporting kernel image ${kernel_oci_image} to ${EXPORT_KERNEL_IMAGE_DIR}" 58 | save_docker_image_to_tar_gz "${kernel_oci_image}" "${EXPORT_KERNEL_IMAGE_DIR}" 59 | fi 60 | } 61 | 62 | function kernel_configure_interactive() { 63 | # bail if not interactive (stdin is a terminal) 64 | [[ ! -t 0 ]] && log error "not interactive, can't configure" && exit 1 65 | 66 | log debug "Configuring a kernel with $*" 67 | 68 | log debug "Kernel config method: ${kernel_info[CONFIG_FUNC]}" 69 | "${kernel_info[CONFIG_FUNC]}" "$@" 70 | } 71 | 72 | function resolve_latest_kernel_version_lts() { # Produces KERNEL_POINT_RELEASE 73 | declare -i cache_valid=0 74 | 75 | # As the point release can and does change frequently, Users can specify if they 76 | # want to use the latest known point release version. This allows users to build 77 | # HookOS using an existing kernel container image from the registry. This only works with 78 | # unauthenticated registries. 79 | if [[ -n "${USE_LATEST_BUILT_KERNEL}" ]]; then 80 | reg="$(echo "${HOOK_KERNEL_OCI_BASE}" | cut -d'/' -f1)" 81 | repo="$(echo "${HOOK_KERNEL_OCI_BASE}" | cut -d'/' -f2-)" 82 | # expected format is: 6.6.32-14b8be17 (major.minor.point-hash) 83 | latest_point_release="$(curl -sL "https://${reg}/v2/${repo}/tags/list" | jq -r ".tags[]" | grep -e "^${KERNEL_MAJOR}.${KERNEL_MINOR}" | sort -V | tail -n1 | cut -d"-" -f1 | cut -d"." -f3)" 84 | log info "Using latest point release from registry ${HOOK_KERNEL_OCI_BASE} for kernel ${KERNEL_MAJOR}.${KERNEL_MINOR}: ${latest_point_release}" 85 | KERNEL_POINT_RELEASE="${latest_point_release}" 86 | return 0 87 | fi 88 | 89 | if [[ -f "${CACHE_DIR}/kernel-releases.json" ]]; then 90 | log debug "Found disk cached kernel-releases.json" 91 | # if the cache is older than 2 hours, refresh it 92 | if [[ "$(find "${CACHE_DIR}/kernel-releases.json" -mmin +120)" ]]; then 93 | log info "Cached kernel-releases.json is older than 2 hours, will refresh..." 94 | else 95 | log info "Using cached kernel-releases.json" 96 | cache_valid=1 97 | fi 98 | fi 99 | 100 | # if no valid cache found, grab for kernel.org 101 | if [[ ${cache_valid} -eq 0 ]]; then 102 | log info "Fetching kernel releases JSON info from kernel.org..." 103 | curl -sL "https://www.kernel.org/releases.json" -o "${CACHE_DIR}/kernel-releases.json" 104 | fi 105 | 106 | # shellcheck disable=SC2002 # cat is not useless. my cat's stylistic 107 | POINT_RELEASE_TRI="$(cat "${CACHE_DIR}/kernel-releases.json" | jq -r ".releases[].version" | grep -v -e "^next\-" -e "\-rc" | grep -e "^${KERNEL_MAJOR}\.${KERNEL_MINOR}\.")" 108 | POINT_RELEASE="$(echo "${POINT_RELEASE_TRI}" | cut -d '.' -f 3)" 109 | log debug "POINT_RELEASE_TRI: ${POINT_RELEASE_TRI}" 110 | log debug "POINT_RELEASE: ${POINT_RELEASE}" 111 | KERNEL_POINT_RELEASE="${KERNEL_POINT_RELEASE:-"${POINT_RELEASE}"}" 112 | } 113 | 114 | function get_kernel_info_dict() { 115 | declare kernel="${1}" 116 | declare kernel_data_str="${inventory_dict[${kernel}]}" 117 | if [[ -z "${kernel_data_str}" ]]; then 118 | log error "No kernel data found for '${kernel}'; valid ones are: ${inventory_ids[*]} " 119 | exit 1 120 | fi 121 | log debug "Kernel data for '${kernel}': ${kernel_data_str}" 122 | eval "kernel_info=(${kernel_data_str})" 123 | # Post process 124 | kernel_info['BUILD_FUNC']="build_kernel_${kernel_info['METHOD']}" 125 | kernel_info['VERSION_FUNC']="calculate_kernel_version_${kernel_info['METHOD']}" 126 | kernel_info['OUTPUT_ID_FUNC']="obtain_kernel_output_id_${kernel_info['METHOD']}" 127 | kernel_info['CONFIG_FUNC']="configure_kernel_${kernel_info['METHOD']}" 128 | 129 | # Defaults for optional settings 130 | kernel_info['TEMPLATE']="${kernel_info['TEMPLATE']:-"hook"}" 131 | kernel_info['LINUXKIT_VERSION']="${kernel_info['LINUXKIT_VERSION']:-"${LINUXKIT_VERSION_DEFAULT}"}" 132 | 133 | # Ensure kernel_info a valid TAG 134 | if [[ -z "${kernel_info['TAG']}" ]]; then 135 | log error "No TAG found for kernel '${kernel}'" 136 | exit 1 137 | fi 138 | 139 | # convert ARCH (x86_64, aarch64) to docker-ARCH (amd64, arm64) 140 | case "${kernel_info['ARCH']}" in 141 | "x86_64") kernel_info['DOCKER_ARCH']="amd64" ;; 142 | "aarch64" | "arm64") kernel_info['DOCKER_ARCH']="arm64" ;; 143 | *) log error "ARCH ${kernel_info['ARCH']} not supported" && exit 1 ;; 144 | esac 145 | } 146 | 147 | function set_kernel_vars_from_info_dict() { 148 | # Loop over the keys in kernel_info dictionary 149 | for key in "${!kernel_info[@]}"; do 150 | declare -g "${key}"="${kernel_info[${key}]}" 151 | log debug "Set ${key} to ${kernel_info[${key}]}" 152 | done 153 | } 154 | 155 | function get_host_docker_arch() { 156 | declare -g host_docker_arch="unknown" 157 | # convert ARCH (x86_64, aarch64) to docker-ARCH (amd64, arm64) 158 | case "$(uname -m)" in 159 | "x86_64") host_docker_arch="amd64" ;; 160 | "aarch64" | "arm64") host_docker_arch="arm64" ;; 161 | *) log error "ARCH $(uname -m) not supported" && exit 1 ;; 162 | esac 163 | return 0 164 | } 165 | -------------------------------------------------------------------------------- /bash/kernel/kernel_default.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | function obtain_kernel_output_id_default() { 6 | : "${inventory_id:?"ERROR: inventory_id is not defined"}" 7 | # The default kernel output id is just the arch (for compatibility with the old hook) 8 | # One can override with FORCE_OUTPUT_ID, which will be prepended to ARCH. 9 | # If that is not set, and KCONFIG != generic, an output will be generated with KCONFIG, MAJOR, MINOR, ARCH. 10 | # Lastly if using USE_KERNEL_ID, that will be used instead of the default inventory_id. 11 | declare -g OUTPUT_ID="${ARCH}" 12 | if [[ "x${FORCE_OUTPUT_ID}x" != "xx" ]]; then 13 | declare -g OUTPUT_ID="${FORCE_OUTPUT_ID}-${ARCH}" 14 | elif [[ "${KCONFIG}" != "generic" ]]; then 15 | OUTPUT_ID="${KCONFIG}-${KERNEL_MAJOR}.${KERNEL_MINOR}.y-${ARCH}" 16 | elif [[ -n "${USE_KERNEL_ID}" ]]; then 17 | OUTPUT_ID="${inventory_id}" 18 | fi 19 | } 20 | 21 | function calculate_kernel_version_default() { 22 | # Make sure inventory_id is defined or exit with an error; using a one liner 23 | : "${inventory_id:?"ERROR: inventory_id is not defined"}" 24 | log debug "Starting calculate_kernel_version_default for inventory_id='${inventory_id}'" 25 | 26 | # Calculate the input DEFCONFIG 27 | declare -g INPUT_DEFCONFIG="${KCONFIG}-${KERNEL_MAJOR}.${KERNEL_MINOR}.y-${ARCH}" 28 | if [[ ! -f "kernel/configs/${INPUT_DEFCONFIG}" ]]; then 29 | log error "kernel/configs/${INPUT_DEFCONFIG} does not exist, check inputs/envs" 30 | exit 1 31 | fi 32 | 33 | obtain_kernel_output_id_default # Sets OUTPUT_ID 34 | 35 | # Calculate the KERNEL_ARCH from ARCH; also what is the cross-compiler package needed for the arch 36 | declare -g KERNEL_ARCH="" KERNEL_CROSS_COMPILE_PKGS="" KERNEL_OUTPUT_IMAGE="" 37 | case "${ARCH}" in 38 | "x86_64") 39 | KERNEL_ARCH="x86" 40 | KERNEL_CROSS_COMPILE_PKGS="crossbuild-essential-amd64" 41 | KERNEL_CROSS_COMPILE="x86_64-linux-gnu-" 42 | KERNEL_OUTPUT_IMAGE="arch/x86_64/boot/bzImage" 43 | ;; 44 | "aarch64" | "arm64") 45 | KERNEL_ARCH="arm64" 46 | KERNEL_CROSS_COMPILE_PKGS="crossbuild-essential-arm64" 47 | KERNEL_CROSS_COMPILE="aarch64-linux-gnu-" 48 | KERNEL_OUTPUT_IMAGE="arch/arm64/boot/Image" 49 | ;; 50 | *) log error "ERROR: ARCH ${ARCH} not supported" && exit 1 ;; 51 | esac 52 | 53 | # Grab the latest version from kernel.org 54 | declare -g KERNEL_POINT_RELEASE="${KERNEL_POINT_RELEASE:-""}" 55 | resolve_latest_kernel_version_lts 56 | 57 | # Calculate a version and hash for the OCI image 58 | # Hash the Dockerfile and the input defconfig together 59 | declare input_hash="" short_input_hash="" 60 | input_hash="$(cat "kernel/configs/${INPUT_DEFCONFIG}" "kernel/Dockerfile" | sha256sum - | cut -d ' ' -f 1)" 61 | short_input_hash="${input_hash:0:8}" 62 | kernel_oci_version="${KERNEL_MAJOR}.${KERNEL_MINOR}.${KERNEL_POINT_RELEASE}-${short_input_hash}" 63 | kernel_oci_image="${HOOK_KERNEL_OCI_BASE}:${kernel_oci_version}" 64 | 65 | # Log the obtained version & images to stderr 66 | log info "Kernel arch: ${KERNEL_ARCH} (for ARCH ${ARCH})" 67 | log info "Kernel version: ${KERNEL_MAJOR}.${KERNEL_MINOR}.${KERNEL_POINT_RELEASE}" 68 | log info "Kernel OCI version: ${kernel_oci_version}" 69 | log info "Kernel OCI image: ${kernel_oci_image}" 70 | log info "Kernel cross-compiler: ${KERNEL_CROSS_COMPILE} (in pkgs ${KERNEL_CROSS_COMPILE_PKGS})" 71 | } 72 | 73 | function common_build_args_kernel_default() { 74 | build_args+=( 75 | "--build-arg" "KERNEL_OUTPUT_IMAGE=${KERNEL_OUTPUT_IMAGE}" 76 | "--build-arg" "KERNEL_CROSS_COMPILE_PKGS=${KERNEL_CROSS_COMPILE_PKGS}" # This is not used in the Dockerfile, to maximize cache hits 77 | "--build-arg" "KERNEL_CROSS_COMPILE=${KERNEL_CROSS_COMPILE}" 78 | "--build-arg" "KERNEL_ARCH=${KERNEL_ARCH}" 79 | "--build-arg" "KERNEL_MAJOR=${KERNEL_MAJOR}" 80 | "--build-arg" "KERNEL_MAJOR_V=v${KERNEL_MAJOR}.x" 81 | "--build-arg" "KERNEL_MINOR=${KERNEL_MINOR}" 82 | "--build-arg" "KERNEL_VERSION=${KERNEL_MAJOR}.${KERNEL_MINOR}.${KERNEL_POINT_RELEASE}" 83 | "--build-arg" "KERNEL_SERIES=${KERNEL_MAJOR}.${KERNEL_MINOR}.y" 84 | "--build-arg" "KERNEL_POINT_RELEASE=${KERNEL_POINT_RELEASE}" 85 | "--build-arg" "INPUT_DEFCONFIG=${INPUT_DEFCONFIG}" 86 | ) 87 | } 88 | 89 | function configure_kernel_default() { 90 | log info "Configuring default kernel: $*" 91 | 92 | declare -a build_args=() 93 | common_build_args_kernel_default 94 | log info "Will configure with: ${build_args[*]}" 95 | 96 | declare configurator_image="${kernel_oci_image}-configurator" 97 | 98 | # Build the config stage 99 | log info "Building kernel-configurator Dockerfile stage..." 100 | ( 101 | cd kernel 102 | # Build the "kernel-configurator" target from the Dockerfile; tag it separately 103 | docker buildx build --load "--progress=${DOCKER_BUILDX_PROGRESS_TYPE}" "${build_args[@]}" --target kernel-configurator -t "${configurator_image}" . 104 | ) 105 | log info "Built kernel-configurator Dockerfile stage..." 106 | 107 | if [[ "$1" == "one-shot" ]]; then 108 | log info "Running one-shot configuration, modifying ${INPUT_DEFCONFIG} ..." 109 | ( 110 | cd kernel 111 | # Run the built container; mount kernel/configs as /host; run config directly and extract from container 112 | docker run -it --rm -v "$(pwd)/configs:/host" "${configurator_image}" bash "-c" "make menuconfig && make savedefconfig && cp -v defconfig /host/${INPUT_DEFCONFIG}" 113 | ) 114 | log info "Kernel config finished. File ${INPUT_DEFCONFIG} is modified in your local copy." 115 | else 116 | log info "Starting an interactive shell in Dockerfile kernel-configurator stage..." 117 | ( 118 | cd kernel 119 | # Run the built container; mount kernel/configs as /host 120 | cat <<- INSTRUCTIONS 121 | *** Starting a shell in the Docker kernel-configurator stage. 122 | *** The config ${INPUT_DEFCONFIG} is already in place in .config (and already expanded). 123 | *** You can run "make menuconfig" to interactively configure the kernel. 124 | *** After configuration, you should run "make savedefconfig" to obtain a "defconfig" file. 125 | *** You can then run "cp -v defconfig /host/${INPUT_DEFCONFIG}" to copy it to the build host for commiting. 126 | INSTRUCTIONS 127 | docker run -it --rm -v "$(pwd)/configs:/host" "${configurator_image}" bash 128 | ) 129 | fi 130 | return 0 131 | } 132 | 133 | function build_kernel_default() { 134 | log info "Building default kernel" 135 | declare -a build_args=() 136 | common_build_args_kernel_default 137 | log info "Will build with: ${build_args[*]}" 138 | 139 | # Don't specify platform, our Dockerfile is multiarch, thus you can build x86 kernels in arm64 hosts and vice-versa ... 140 | docker buildx build --load "--progress=${DOCKER_BUILDX_PROGRESS_TYPE}" "${build_args[@]}" -t "${kernel_oci_image}" -f kernel/Dockerfile kernel 141 | # .. but enforce the target arch for LK in the final image via dump/edit-manifests/reimport trick 142 | ensure_docker_image_architecture "${kernel_oci_image}" "${kernel_info['DOCKER_ARCH']}" 143 | } 144 | -------------------------------------------------------------------------------- /bash/common.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # logger utility, output ANSI-colored messages to stderr; first argument is level (debug/info/warn/error), all other arguments are the message. 4 | declare -A log_colors=(["debug"]="0;36" ["info"]="0;32" ["notice"]="1;32" ["warn"]="1;33" ["error"]="1;31") 5 | declare -A log_emoji=(["debug"]="🐛" ["info"]="🌿" ["notice"]="🌱" ["warn"]="🚸" ["error"]="🚨") 6 | declare -A log_gha_levels=(["notice"]="notice" ["warn"]="warning" ["error"]="error") 7 | function log() { 8 | declare level="${1}" 9 | shift 10 | [[ "${level}" == "debug" && "${DEBUG}" != "yes" ]] && return # Skip debugs unless DEBUG=yes is set in the environment 11 | # If running on GitHub Actions, and level exists in log_gha_levels... 12 | if [[ -n "${GITHUB_ACTIONS}" && -n "${log_gha_levels[${level}]}" ]]; then 13 | echo "::${log_gha_levels[${level}]} ::${*}" >&2 14 | fi 15 | # Normal output 16 | declare color="\033[${log_colors[${level}]}m" 17 | declare emoji="${log_emoji[${level}]}" 18 | declare ansi_reset="\033[0m" 19 | level=$(printf "%-5s" "${level}") # pad to 5 characters before printing 20 | echo -e "${emoji} ${ansi_reset}[${color}${level}${ansi_reset}] ${color}${*}${ansi_reset}" >&2 21 | } 22 | 23 | # Helper for debugging directory trees; 24 | function log_tree() { 25 | declare directory="${1}" 26 | shift 27 | declare level="${1}" 28 | [[ "${level}" == "debug" && "${DEBUG}" != "yes" ]] && return # Skip debugs unless DEBUG=yes is set in the environment 29 | log "${@}" "-- directory ${directory}:" 30 | if command -v tree > /dev/null; then 31 | tree "${directory}" 32 | else 33 | log "${level}" "'tree' utility not installed; install it to see directory structure in logs." 34 | fi 35 | } 36 | 37 | # Helper for showing the contents of a file (using bat, if installed) 38 | function log_file_bat() { 39 | declare file="${1}" 40 | shift 41 | declare level="${1}" 42 | shift 43 | [[ "${level}" == "debug" && "${DEBUG}" != "yes" ]] && return # Skip debugs unless DEBUG=yes is set in the environment 44 | log "${level}" "${@}" "-- file ${file}:" 45 | declare extra_bat_args=() 46 | if [[ -n "${bat_language}" ]]; then 47 | extra_bat_args+=("--language=${bat_language}") 48 | fi 49 | if command -v bat > /dev/null; then 50 | bat --color=always --paging=never "${extra_bat_args[@]}" "${file}" 51 | elif command -v batcat > /dev/null; then 52 | batcat --color=always --paging=never "${extra_bat_args[@]}" "${file}" 53 | else 54 | log "${level}" "'bat' utility not installed; install it to see file contents in logs." 55 | fi 56 | } 57 | 58 | function install_dependencies() { 59 | declare extra="${1}" 60 | 61 | declare -a debian_pkgs=() 62 | declare -a brew_pkgs=() 63 | 64 | command -v jq > /dev/null || { 65 | debian_pkgs+=("jq") 66 | brew_pkgs+=("jq") 67 | } 68 | 69 | command -v pigz > /dev/null || { 70 | debian_pkgs+=("pigz") 71 | brew_pkgs+=("pigz") 72 | } 73 | 74 | command -v envsubst > /dev/null || { 75 | debian_pkgs+=("gettext-base") 76 | brew_pkgs+=("gettext") 77 | } 78 | 79 | if [[ "${extra}" == "bootable-media" ]]; then 80 | command -v pixz > /dev/null || { 81 | debian_pkgs+=("pixz") 82 | brew_pkgs+=("pixz") 83 | } 84 | 85 | command -v pv > /dev/null || { 86 | debian_pkgs+=("pv") 87 | brew_pkgs+=("pv") 88 | } 89 | fi 90 | 91 | if [[ "$(uname)" == "Darwin" ]]; then 92 | command -v gtar > /dev/null || brew_pkgs+=("gnu-tar") 93 | command -v greadlink > /dev/null || brew_pkgs+=("coreutils") 94 | command -v gsed > /dev/null || brew_pkgs+=("gnu-sed") 95 | fi 96 | 97 | # If more than zero entries in the array, install 98 | if [[ ${#debian_pkgs[@]} -gt 0 ]]; then 99 | # If running on Debian or Ubuntu... 100 | if [[ -f /etc/debian_version ]]; then 101 | log info "Installing apt dependencies: ${debian_pkgs[*]}" 102 | sudo DEBIAN_FRONTEND=noninteractive apt -o "Dpkg::Use-Pty=0" -y update 103 | sudo DEBIAN_FRONTEND=noninteractive apt -o "Dpkg::Use-Pty=0" -y install "${debian_pkgs[@]}" 104 | elif [[ "$(uname)" == "Darwin" ]]; then 105 | log info "Skipping Debian deps installation for Darwin..." 106 | else 107 | log error "Don't know how to install the equivalent of Debian packages *on the host*: ${debian_pkgs[*]} -- teach me!" 108 | fi 109 | else 110 | log info "All deps found, no apt installs necessary on host." 111 | fi 112 | 113 | if [[ "$(uname)" == "Darwin" ]]; then 114 | if [[ ${#brew_pkgs[@]} -gt 0 ]]; then 115 | log info "Detected Darwin, assuming 'brew' is available: running 'brew install ${brew_pkgs[*]}'" 116 | brew install "${brew_pkgs[@]}" 117 | fi 118 | 119 | if [[ "${extra}" == "" ]]; then # Do not to this if extra dependencies are being installed 120 | # Re-export PATH with the gnu-version of coreutils, tar, and sed 121 | declare brew_prefix 122 | brew_prefix="$(brew --prefix)" 123 | export PATH="${brew_prefix}/opt/gnu-sed/libexec/gnubin:${brew_prefix}/opt/gnu-tar/libexec/gnubin:${brew_prefix}/opt/coreutils/libexec/gnubin:${PATH}" 124 | log debug "Darwin; PATH is now: ${PATH}" 125 | fi 126 | fi 127 | 128 | return 0 129 | } 130 | 131 | # utility used by inventory.sh to define a kernel/flavour with less-terrible syntax. 132 | function define_id() { 133 | declare id="${1}" 134 | shift 135 | 136 | declare -A dict=() 137 | declare arg 138 | for arg in "$@"; do 139 | if [[ "${arg}" == *=* ]]; then # contains an equal sign. it's a param. 140 | local param_name param_value 141 | param_name=${arg%%=*} 142 | param_value=${arg##*=} 143 | dict["${param_name}"]="${param_value}" # For current run. 144 | else 145 | log error "Unknown argument to define, id=${id}: '${arg}'" 146 | exit 10 147 | fi 148 | done 149 | 150 | # Sanity checking: METHOD, ARCH and TAG are required. 151 | if [[ -z "${dict['METHOD']}" || -z "${dict['ARCH']}" || -z "${dict['TAG']}" ]]; then 152 | log error "Flavour definition for id=${id} is missing METHOD, ARCH or TAG" 153 | exit 11 154 | fi 155 | 156 | declare str_dict 157 | str_dict="$(declare -p dict)" # bash high sorcery; get a string representation of the dict 158 | str_dict="${str_dict#*"dict=("}" # remove 'declare -A dict=(' from the string 159 | str_dict="${str_dict%?}" # remove the last character, which is a ")" 160 | log debug "str dict for id=${id}: ${str_dict}" # this _will_ go wrong, so add a debug 161 | 162 | # eval it into the inventory_dict dict 163 | eval "inventory_dict[${id}]='${str_dict}'" 164 | 165 | # declare a global with the id of the last-added kernel, for add_bootable_id's convenience 166 | declare -g last_defined_id="${id}" 167 | 168 | return 0 169 | } 170 | 171 | function add_bootable_id() { 172 | declare id="${1}" 173 | shift 174 | 175 | declare -A dict=() 176 | declare arg 177 | for arg in "$@"; do 178 | if [[ "${arg}" == *=* ]]; then # contains an equal sign. it's a param. 179 | local param_name param_value 180 | param_name=${arg%%=*} 181 | param_value=${arg##*=} 182 | dict["${param_name}"]="${param_value}" # For current run. 183 | else 184 | log error "Unknown argument to define, id=${id}: '${arg}'" 185 | exit 10 186 | fi 187 | done 188 | 189 | # if dict["INVENTORY_ID"] is not defined, set it to the last defined id 190 | if [[ -z "${dict['INVENTORY_ID']}" ]]; then 191 | dict["INVENTORY_ID"]="${last_defined_id}" 192 | fi 193 | 194 | dict["BOOTABLE_ID"]="${id}" 195 | 196 | # Sanity checking: METHOD, ARCH and TAG are required. 197 | if [[ -z "${dict['HANDLER']}" || -z "${dict['TAG']}" ]]; then 198 | log error "Bootable definition for id=${id} is missing HANDLER or TAG" 199 | exit 11 200 | fi 201 | 202 | declare str_dict 203 | str_dict="$(declare -p dict)" # bash high sorcery; get a string representation of the dict 204 | str_dict="${str_dict#*"dict=("}" # remove 'declare -A dict=(' from the string 205 | str_dict="${str_dict%?}" # remove the last character, which is a ")" 206 | log debug "str dict for id=${id}: ${str_dict}" # this _will_ go wrong, so add a debug 207 | 208 | # eval it into the inventory_dict dict 209 | eval "bootable_inventory_dict[${id}]='${str_dict}'" 210 | 211 | return 0 212 | } 213 | -------------------------------------------------------------------------------- /images/hook-containerd/etc/containerd/config.toml: -------------------------------------------------------------------------------- 1 | # default containerd configuration file, generated via `containerd config default` 2 | version = 3 3 | root = '/var/lib/containerd' 4 | state = '/run/containerd' 5 | temp = '' 6 | disabled_plugins = ["io.containerd.grpc.v1.cri","io.containerd.internal.v1.opt"] 7 | required_plugins = [] 8 | oom_score = 0 9 | imports = [] 10 | 11 | [grpc] 12 | address = '/run/containerd/containerd.sock' 13 | tcp_address = '' 14 | tcp_tls_ca = '' 15 | tcp_tls_cert = '' 16 | tcp_tls_key = '' 17 | uid = 0 18 | gid = 0 19 | max_recv_message_size = 16777216 20 | max_send_message_size = 16777216 21 | 22 | [ttrpc] 23 | address = '' 24 | uid = 0 25 | gid = 0 26 | 27 | [debug] 28 | address = '' 29 | uid = 0 30 | gid = 0 31 | level = '' 32 | format = '' 33 | 34 | [metrics] 35 | address = '' 36 | grpc_histogram = false 37 | 38 | [plugins] 39 | [plugins.'io.containerd.cri.v1.images'] 40 | snapshotter = 'overlayfs' 41 | disable_snapshot_annotations = true 42 | discard_unpacked_layers = false 43 | max_concurrent_downloads = 3 44 | concurrent_layer_fetch_buffer = 0 45 | image_pull_progress_timeout = '5m0s' 46 | image_pull_with_sync_fs = false 47 | stats_collect_period = 10 48 | use_local_image_pull = false 49 | 50 | [plugins.'io.containerd.cri.v1.images'.pinned_images] 51 | sandbox = 'registry.k8s.io/pause:3.10' 52 | 53 | [plugins.'io.containerd.cri.v1.images'.registry] 54 | config_path = '' 55 | 56 | [plugins.'io.containerd.cri.v1.images'.image_decryption] 57 | key_model = 'node' 58 | 59 | [plugins.'io.containerd.cri.v1.runtime'] 60 | enable_selinux = false 61 | selinux_category_range = 1024 62 | max_container_log_line_size = 16384 63 | disable_apparmor = false 64 | restrict_oom_score_adj = false 65 | disable_proc_mount = false 66 | unset_seccomp_profile = '' 67 | tolerate_missing_hugetlb_controller = true 68 | disable_hugetlb_controller = true 69 | device_ownership_from_security_context = false 70 | ignore_image_defined_volumes = false 71 | netns_mounts_under_state_dir = false 72 | enable_unprivileged_ports = true 73 | enable_unprivileged_icmp = true 74 | enable_cdi = true 75 | cdi_spec_dirs = ['/etc/cdi', '/var/run/cdi'] 76 | drain_exec_sync_io_timeout = '0s' 77 | ignore_deprecation_warnings = [] 78 | 79 | [plugins.'io.containerd.cri.v1.runtime'.containerd] 80 | default_runtime_name = 'runc' 81 | ignore_blockio_not_enabled_errors = false 82 | ignore_rdt_not_enabled_errors = false 83 | 84 | [plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes] 85 | [plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes.runc] 86 | runtime_type = 'io.containerd.runc.v2' 87 | runtime_path = '' 88 | pod_annotations = [] 89 | container_annotations = [] 90 | privileged_without_host_devices = false 91 | privileged_without_host_devices_all_devices_allowed = false 92 | cgroup_writable = false 93 | base_runtime_spec = '' 94 | cni_conf_dir = '' 95 | cni_max_conf_num = 0 96 | snapshotter = '' 97 | sandboxer = 'podsandbox' 98 | io_type = '' 99 | 100 | [plugins.'io.containerd.cri.v1.runtime'.containerd.runtimes.runc.options] 101 | BinaryName = '' 102 | CriuImagePath = '' 103 | CriuWorkPath = '' 104 | IoGid = 0 105 | IoUid = 0 106 | NoNewKeyring = false 107 | Root = '' 108 | ShimCgroup = '' 109 | 110 | [plugins.'io.containerd.cri.v1.runtime'.cni] 111 | bin_dir = '' 112 | bin_dirs = ['/opt/cni/bin'] 113 | conf_dir = '/etc/cni/net.d' 114 | max_conf_num = 1 115 | setup_serially = false 116 | conf_template = '' 117 | ip_pref = '' 118 | use_internal_loopback = false 119 | 120 | [plugins.'io.containerd.differ.v1.erofs'] 121 | mkfs_options = [] 122 | 123 | [plugins.'io.containerd.gc.v1.scheduler'] 124 | pause_threshold = 0.02 125 | deletion_threshold = 0 126 | mutation_threshold = 100 127 | schedule_delay = '0s' 128 | startup_delay = '100ms' 129 | 130 | [plugins.'io.containerd.grpc.v1.cri'] 131 | disable_tcp_service = true 132 | stream_server_address = '127.0.0.1' 133 | stream_server_port = '0' 134 | stream_idle_timeout = '4h0m0s' 135 | enable_tls_streaming = false 136 | 137 | [plugins.'io.containerd.grpc.v1.cri'.x509_key_pair_streaming] 138 | tls_cert_file = '' 139 | tls_key_file = '' 140 | 141 | [plugins.'io.containerd.image-verifier.v1.bindir'] 142 | bin_dir = '/opt/containerd/image-verifier/bin' 143 | max_verifiers = 10 144 | per_verifier_timeout = '10s' 145 | 146 | [plugins.'io.containerd.internal.v1.opt'] 147 | path = '/opt/containerd' 148 | 149 | [plugins.'io.containerd.internal.v1.tracing'] 150 | 151 | [plugins.'io.containerd.metadata.v1.bolt'] 152 | content_sharing_policy = 'shared' 153 | no_sync = false 154 | 155 | [plugins.'io.containerd.monitor.container.v1.restart'] 156 | interval = '10s' 157 | 158 | [plugins.'io.containerd.monitor.task.v1.cgroups'] 159 | no_prometheus = false 160 | 161 | [plugins.'io.containerd.nri.v1.nri'] 162 | disable = false 163 | socket_path = '/var/run/nri/nri.sock' 164 | plugin_path = '/opt/nri/plugins' 165 | plugin_config_path = '/etc/nri/conf.d' 166 | plugin_registration_timeout = '5s' 167 | plugin_request_timeout = '2s' 168 | disable_connections = false 169 | 170 | [plugins.'io.containerd.runtime.v2.task'] 171 | platforms = ['linux/amd64'] 172 | 173 | [plugins.'io.containerd.service.v1.diff-service'] 174 | default = ['walking'] 175 | sync_fs = false 176 | 177 | [plugins.'io.containerd.service.v1.tasks-service'] 178 | blockio_config_file = '' 179 | rdt_config_file = '' 180 | 181 | [plugins.'io.containerd.shim.v1.manager'] 182 | env = [] 183 | 184 | [plugins.'io.containerd.snapshotter.v1.blockfile'] 185 | root_path = '' 186 | scratch_file = '' 187 | fs_type = '' 188 | mount_options = [] 189 | recreate_scratch = false 190 | 191 | [plugins.'io.containerd.snapshotter.v1.btrfs'] 192 | root_path = '' 193 | 194 | [plugins.'io.containerd.snapshotter.v1.erofs'] 195 | root_path = '' 196 | ovl_mount_options = [] 197 | enable_fsverity = false 198 | 199 | [plugins.'io.containerd.snapshotter.v1.native'] 200 | root_path = '' 201 | 202 | [plugins.'io.containerd.snapshotter.v1.overlayfs'] 203 | root_path = '' 204 | upperdir_label = false 205 | sync_remove = false 206 | slow_chown = false 207 | mount_options = [] 208 | 209 | [plugins.'io.containerd.snapshotter.v1.zfs'] 210 | root_path = '' 211 | 212 | [plugins.'io.containerd.tracing.processor.v1.otlp'] 213 | 214 | [plugins.'io.containerd.transfer.v1.local'] 215 | max_concurrent_downloads = 3 216 | concurrent_layer_fetch_buffer = 0 217 | max_concurrent_uploaded_layers = 3 218 | check_platform_supported = false 219 | config_path = '' 220 | 221 | [cgroup] 222 | path = '' 223 | 224 | [timeouts] 225 | 'io.containerd.timeout.bolt.open' = '0s' 226 | 'io.containerd.timeout.cri.defercleanup' = '1m0s' 227 | 'io.containerd.timeout.metrics.shimstats' = '2s' 228 | 'io.containerd.timeout.shim.cleanup' = '5s' 229 | 'io.containerd.timeout.shim.load' = '5s' 230 | 'io.containerd.timeout.shim.shutdown' = '3s' 231 | 'io.containerd.timeout.task.state' = '2s' 232 | 233 | [stream_processors] 234 | [stream_processors.'io.containerd.ocicrypt.decoder.v1.tar'] 235 | accepts = ['application/vnd.oci.image.layer.v1.tar+encrypted'] 236 | returns = 'application/vnd.oci.image.layer.v1.tar' 237 | path = 'ctd-decoder' 238 | args = ['--decryption-keys-path', '/etc/containerd/ocicrypt/keys'] 239 | env = ['OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf'] 240 | 241 | [stream_processors.'io.containerd.ocicrypt.decoder.v1.tar.gzip'] 242 | accepts = ['application/vnd.oci.image.layer.v1.tar+gzip+encrypted'] 243 | returns = 'application/vnd.oci.image.layer.v1.tar+gzip' 244 | path = 'ctd-decoder' 245 | args = ['--decryption-keys-path', '/etc/containerd/ocicrypt/keys'] 246 | env = ['OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf'] 247 | -------------------------------------------------------------------------------- /bash/docker.sh: -------------------------------------------------------------------------------- 1 | function check_docker_daemon_for_sanity() { 2 | # LinuxKit is a bit confused about `docker context list` when you're using a non-default context. 3 | # Let's obtain the currect socket from the current context and explicitly export it. 4 | # This allows working on machines with Docker Desktop, colima, and other run-linux-in-a-VM solutions. 5 | declare current_context_docker_socket="" current_docker_context_name="" 6 | current_docker_context_name="$(docker context show)" 7 | current_context_docker_socket="$(docker context inspect "${current_docker_context_name}" | jq -r '.[0].Endpoints.docker.Host')" 8 | log info "Current Docker context ('${current_docker_context_name}') socket: '${current_context_docker_socket}'" 9 | 10 | log debug "Setting DOCKER_HOST to '${current_context_docker_socket}'" 11 | export DOCKER_HOST="${current_context_docker_socket}" 12 | 13 | # Hide Docker, Inc spamming "What's next" et al. 14 | export DOCKER_CLI_HINTS=false 15 | 16 | # Shenanigans to go around error control & capture output in the same effort, 'docker info' is slow. 17 | declare docker_info docker_buildx_version 18 | docker_info="$({ docker info 2> /dev/null && echo "DOCKER_INFO_OK"; } || true)" 19 | 20 | if [[ ! "${docker_info}" =~ "DOCKER_INFO_OK" ]]; then 21 | log error "'docker info' failed. Is Docker installed & your user in the correct group?" 22 | exit 3 23 | fi 24 | 25 | docker_buildx_version="$(echo "${docker_info}" | grep -i -e "buildx:" || true | cut -d ":" -f 2 | xargs echo -n)" 26 | log debug "Docker Buildx version" "${docker_buildx_version}" 27 | 28 | if [[ -z "${docker_buildx_version}" ]]; then 29 | log error "'docker info' indicates there's no buildx installed. Please install docker buildx." 30 | exit 4 31 | fi 32 | 33 | # Once we know docker is sane, hook up a function that helps us trace invocations. 34 | function docker() { 35 | log debug "--> docker $*" 36 | command docker "$@" 37 | } 38 | 39 | } 40 | 41 | # Utility to pull skopeo itself from SKOPEO_IMAGE; checks the local Docker cache and skips if found 42 | function pull_skopeo_image_if_not_in_local_docker_cache() { 43 | # Check if the image is already in the local Docker cache 44 | if docker image inspect "${SKOPEO_IMAGE}" &> /dev/null; then 45 | log info "Skopeo image ${SKOPEO_IMAGE} is already in the local Docker cache; skipping pull." 46 | return 0 47 | fi 48 | 49 | log info "Pulling Skopeo image ${SKOPEO_IMAGE}..." 50 | 51 | pull_docker_image_from_remote_with_retries "${SKOPEO_IMAGE}" 52 | } 53 | 54 | # Utility to get the most recent tag for a given image, using Skopeo. no retries, a failure is fatal. 55 | # Sets the value of outer-scope variable latest_tag_for_docker_image, so declare it there. 56 | # If extra arguments are present after the image, they are used to grep the tags. 57 | function get_latest_tag_for_docker_image_using_skopeo() { 58 | declare image="$1" 59 | shift 60 | latest_tag_for_docker_image="undetermined_tag" 61 | 62 | # Pull separately to avoid tty hell in the subshell below 63 | pull_skopeo_image_if_not_in_local_docker_cache 64 | 65 | # if extra arguments are present, use them to grep the tags 66 | if [[ -n "$*" ]]; then 67 | latest_tag_for_docker_image="$(docker run "${SKOPEO_IMAGE}" list-tags "docker://${image}" | jq -r ".Tags[]" | grep "${@}" | tail -1)" 68 | else 69 | latest_tag_for_docker_image="$(docker run "${SKOPEO_IMAGE}" list-tags "docker://${image}" | jq -r ".Tags[]" | tail -1)" 70 | fi 71 | log info "Found latest tag: '${latest_tag_for_docker_image}' for image '${image}'" 72 | } 73 | 74 | # Utility to pull from remote, with retries. 75 | function pull_docker_image_from_remote_with_retries() { 76 | declare image="$1" 77 | declare -i retries=3 78 | declare -i retry_delay=5 79 | declare -i retry_count=0 80 | 81 | while [[ ${retry_count} -lt ${retries} ]]; do 82 | if docker pull "${image}"; then 83 | log info "Successfully pulled ${image}" 84 | return 0 85 | else 86 | log warn "Failed to pull ${image}; retrying in ${retry_delay} seconds..." 87 | sleep "${retry_delay}" 88 | ((retry_count += 1)) 89 | fi 90 | done 91 | 92 | log error "Failed to pull ${image} after ${retries} retries." 93 | exit 1 94 | } 95 | 96 | # Helper script, for common task of installing packages on a Debian Dockerfile 97 | # always includes curl and downloads ORAS binary too 98 | # takes the relative directory to write the helper to 99 | # sets outer scope dockerfile_helper_filename with the name of the file for the Dockerfile (does not include the directory) 100 | function produce_dockerfile_helper_apt_oras() { 101 | declare target_dir="$1" 102 | declare helper_name="apt-oras-helper.sh" 103 | dockerfile_helper_filename="Dockerfile.autogen.helper.${helper_name}" # this is negated in .dockerignore 104 | 105 | declare fn="${target_dir}${dockerfile_helper_filename}" 106 | cat <<- 'DOWNLOAD_HELPER_SCRIPT' > "${fn}" 107 | #!/bin/bash 108 | set -e 109 | declare oras_version="1.2.2" # See https://github.com/oras-project/oras/releases 110 | # determine the arch to download from current arch 111 | declare oras_arch="unknown" 112 | case "$(uname -m)" in 113 | "x86_64") oras_arch="amd64" ;; 114 | "aarch64" | "arm64") oras_arch="arm64" ;; 115 | *) log error "ERROR: ARCH $(uname -m) not supported by ORAS? check https://github.com/oras-project/oras/releases" && exit 1 ;; 116 | esac 117 | declare oras_down_url="https://github.com/oras-project/oras/releases/download/v${oras_version}/oras_${oras_version}_linux_${oras_arch}.tar.gz" 118 | export DEBIAN_FRONTEND=noninteractive 119 | apt-get -qq -o "Dpkg::Use-Pty=0" update || apt-get -o "Dpkg::Use-Pty=0" update 120 | apt-get -qq install -o "Dpkg::Use-Pty=0" -q -y curl "${@}" || apt-get install -o "Dpkg::Use-Pty=0" -q -y curl "${@}" 121 | curl -sL -o /oras.tar.gz ${oras_down_url} 122 | tar -xvf /oras.tar.gz -C /usr/local/bin/ oras 123 | rm -rf /oras.tar.gz 124 | chmod +x /usr/local/bin/oras 125 | echo -n "ORAS version: " && oras version 126 | DOWNLOAD_HELPER_SCRIPT 127 | log debug "Created apt-oras helper script '${fn}'" 128 | } 129 | 130 | # A huge hack to force the architecture of a Docker image to a specific value. 131 | # This is required for the LinuxKit kernel images: LK expects them to have the correct arch, despite the 132 | # actual contents always being the same. Docker's buildkit tags a locally built image with the host arch. 133 | # Thus change the host arch to the expected arch in the image's manifests via a dump/reimport. 134 | function ensure_docker_image_architecture() { 135 | declare kernel_oci_image="$1" 136 | declare expected_arch="$2" 137 | 138 | # If the host arch is the same as the expected arch, no need to do anything 139 | if [[ "$(uname -m)" == "${expected_arch}" ]]; then 140 | log info "Host architecture is already ${expected_arch}, no need to rewrite Docker image ${kernel_oci_image}" 141 | return 0 142 | fi 143 | 144 | log info "Rewriting Docker image ${kernel_oci_image} to architecture ${expected_arch}, wait..." 145 | 146 | # Create a temporary directory, use mktemp 147 | declare -g tmpdir 148 | tmpdir="$(mktemp -d)" 149 | log debug "Created temporary directory: ${tmpdir}" 150 | 151 | # Export the image to a tarball 152 | docker save -o "${tmpdir}/original.tar" "${kernel_oci_image}" 153 | 154 | # Untag the hostarch image 155 | docker rmi "${kernel_oci_image}" 156 | 157 | # Create a working dir under the tmpdir 158 | mkdir -p "${tmpdir}/working" 159 | 160 | # Extract the tarball into the working dir 161 | tar -xf "${tmpdir}/original.tar" -C "${tmpdir}/working" 162 | log debug "Extracted tarball to ${tmpdir}/working" 163 | 164 | # Remove the original tarball 165 | rm -f "${tmpdir}/original.tar" 166 | 167 | declare working_blobs_dir="${tmpdir}/working/blobs/sha256" 168 | 169 | # Find all files under working_blobs_dir which are smaller than 2048 bytes 170 | # Use mapfile to create an array of files 171 | declare -a small_files 172 | mapfile -t small_files < <(find "${working_blobs_dir}" -type f -size -2048c) 173 | log debug "Found small blob files: ${small_files[*]}" 174 | 175 | # Replace the architecture in each of the small files 176 | for file in "${small_files[@]}"; do 177 | log debug "Replacing architecture in ${file}" 178 | sed -i "s|\"architecture\":\".....\"|\"architecture\":\"${expected_arch}\"|g" "${file}" # 🤮 179 | done 180 | 181 | # Create a new tarball with the modified files 182 | tar -cf "${tmpdir}/modified.tar" -C "${tmpdir}/working" . 183 | log debug "Created modified tarball: ${tmpdir}/modified.tar" 184 | 185 | # Remove the working directory 186 | rm -rf "${tmpdir}/working" 187 | 188 | # Import the modified tarball back into the local cache 189 | docker load -i "${tmpdir}/modified.tar" 190 | 191 | # Remove the temporary directory, completely 192 | rm -rf "${tmpdir}" 193 | 194 | log info "Rewrote Docker image ${kernel_oci_image} to architecture ${expected_arch}." 195 | } 196 | -------------------------------------------------------------------------------- /bash/json-matrix.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ## Implements JSON generation for GitHub Actions matrixes. 4 | ## nb: this is an example of what not to use bash for. 5 | ## The same code in Python would be 10x shorter and run infinitely faster. 6 | ## Please be patient while reading this. 7 | 8 | # TAG selection, using environment variables: 9 | # - CI_TAGS should be a space-separated list of tags to include in the matrixes 10 | # - Those will be matched against each flavor's TAG 11 | # - Any flavor with a matching tag will be included in the matrixes 12 | 13 | # GH Runner selection, using environment variables (in order of specificity, for AMD64, same applies to ARM64 variant): 14 | ## Kernels: (1) 15 | # - CI_RUNNER_KERNEL_AMD64 is the runner to use for amd64 kernel builds 16 | # - CI_RUNNER_KERNEL is the runner to use for all kernels 17 | # - CI_RUNNER_AMD64 is the runner to use for all AMD64 things 18 | # - CI_RUNNER is the runner to use for everything 19 | ## LinuxKit/Hook: (2) 20 | # - CI_RUNNER_LK_AMD64 is the runner to use for amd64 linuxkit builds 21 | # - CI_RUNNER_LK is the runner to use for all linuxkit builds 22 | # - CI_RUNNER_AMD64 is the runner to use for all AMD64 things 23 | # - CI_RUNNER is the runner to use for everything 24 | ## LK containers (hook-bootkit, hook-docker, hook-udev): (3): 25 | # - CI_RUNNER_LK_CONTAINERS_AMD64 is the runner to use for amd64 linuxkit containers builds 26 | # - CI_RUNNER_LK_CONTAINERS is the runner to use for all linuxkit containers builds 27 | # - CI_RUNNER_AMD64 is the runner to use for all AMD64 things 28 | # - CI_RUNNER is the runner to use for everything 29 | 30 | function output_gha_matrixes() { 31 | declare -r CI_TAGS="${CI_TAGS:-"standard dev armbian-sbc armbian-uefi"}" 32 | # shellcheck disable=SC2206 # yes, we want to split 33 | declare -g -a -r CI_TAGS_ARRAY=(${CI_TAGS}) 34 | log info "CI_TAGS: ${CI_TAGS_ARRAY[*]}" 35 | 36 | declare -A all_arches=() # accumulator for every arch that is selected to be built 37 | 38 | declare full_json="" 39 | prepare_json_matrix "KERNEL" # sets full_json and adds to all_arches 40 | declare kernels_json="${full_json}" 41 | 42 | declare full_json="" 43 | prepare_json_matrix "LK" # sets full_json and adds to all_arches 44 | declare lk_hooks_json="${full_json}" 45 | 46 | declare full_json="" 47 | prepare_json_matrix_lkcontainers "LK_CONTAINERS" # reads all_arches's keys and sets full_json 48 | declare lkcontainers_json="${full_json}" 49 | 50 | declare full_json="" 51 | prepare_json_matrix_bootable "BOOTABLE" # lists boards under bootables 52 | declare bootable_json="${full_json}" 53 | 54 | log info "kernels_json to: ${kernels_json}" 55 | log info "lk_hooks_json to: ${lk_hooks_json}" 56 | log info "lkcontainers_json to: ${lkcontainers_json}" 57 | log info "bootable_json to: ${bootable_json}" 58 | 59 | # If under GHA, set a GHA output variable. 60 | if [[ -n "${GITHUB_OUTPUT}" ]]; then 61 | # shellcheck disable=SC2129 # no, thanks, shellcheck. 62 | echo "kernels_json=${kernels_json}" >> "${GITHUB_OUTPUT}" 63 | echo "lk_hooks_json=${lk_hooks_json}" >> "${GITHUB_OUTPUT}" 64 | echo "lkcontainers_json=${lkcontainers_json}" >> "${GITHUB_OUTPUT}" 65 | echo "bootable_json=${bootable_json}" >> "${GITHUB_OUTPUT}" 66 | fi 67 | 68 | echo -n "${bootable_json}" # output the hooks matrix to stdout, for cli/jq etc 69 | #echo -n "${lk_hooks_json}" # output the hooks matrix to stdout, for cli/jq etc 70 | } 71 | 72 | function prepare_json_matrix() { 73 | declare -r matrix_type="${1}" 74 | 75 | declare -a json_items=() 76 | declare kernel 77 | for kernel in "${inventory_ids[@]}"; do 78 | declare -A kernel_info 79 | get_kernel_info_dict "${kernel}" 80 | 81 | if [[ "${matrix_type}" == "KERNEL" ]]; then # special case for kernel builds, if USE_KERNEL_ID is set, skip this kernel 82 | if [[ -n "${kernel_info[USE_KERNEL_ID]}" ]]; then 83 | log info "Skipping build of kernel '${kernel}' due to it having USE_KERNEL_ID set to '${kernel_info[USE_KERNEL_ID]}'" 84 | continue 85 | fi 86 | fi 87 | 88 | if json_matrix_tag_match "${kernel_info[TAG]}"; then 89 | declare runner="unknown-runner" 90 | runner="$(json_matrix_find_runner "${matrix_type}" "${kernel_info[DOCKER_ARCH]}")" 91 | declare gha_cache="yes" # always use GH cache; hitting DockerHub for linuxkit images is prone to rate limiting 92 | 93 | all_arches["${kernel_info[DOCKER_ARCH]}"]=1 94 | json_items+=("{\"kernel\":\"${kernel}\",\"arch\":\"${kernel_info[ARCH]}\",\"docker_arch\":\"${kernel_info[DOCKER_ARCH]}\",\"build_iso\":\"${kernel_info[SUPPORTS_ISO]}\",\"runner\":${runner},\"gha_cache\":\"${gha_cache}\"}") 95 | fi 96 | done 97 | 98 | prepare_json_array_to_json 99 | return 0 100 | } 101 | 102 | function prepare_json_matrix_bootable() { 103 | declare -r matrix_type="${1}" 104 | 105 | declare -a json_items=() 106 | declare bootable_id 107 | for bootable_id in "${bootable_inventory_ids[@]}"; do 108 | declare -g -A bootable_info=() 109 | get_bootable_info_dict "${bootable_id}" 110 | 111 | declare -g -A kernel_info=() 112 | get_kernel_info_dict "${bootable_info['INVENTORY_ID']}" 113 | 114 | if json_matrix_tag_match "${bootable_info[TAG]}"; then 115 | declare runner="unknown-runner" 116 | runner="$(json_matrix_find_runner "${matrix_type}" "${kernel_info[DOCKER_ARCH]}")" 117 | 118 | declare bootable_list_func="${bootable_info['BOOTABLE_LIST_FUNC']}" 119 | if [[ -z "${bootable_list_func}" ]]; then 120 | log error "No BOOTABLE_LIST_FUNC found for bootable '${bootable_id}'" 121 | exit 1 122 | fi 123 | 124 | declare -A bootable_boards=() 125 | log debug "Calling bootable list function: ${bootable_list_func}" 126 | "${bootable_list_func}" 127 | declare -a bootable_board_ids=("${!bootable_boards[@]}") 128 | 129 | declare bootable_board_id 130 | for bootable_board_id in "${bootable_board_ids[@]}"; do 131 | declare board_opts="${bootable_boards[${bootable_board_id}]}" 132 | json_items+=("{\"kernel\":\"${bootable_info['INVENTORY_ID']}\",\"bootable\":\"${bootable_id}\",\"arch\":\"${kernel_info[ARCH]}\",\"docker_arch\":\"${kernel_info[DOCKER_ARCH]}\",\"board_id\":\"${bootable_board_id}\",\"board_opts\":\"${board_opts}\",\"runner\":${runner}}") 133 | done 134 | fi 135 | done 136 | 137 | prepare_json_array_to_json 138 | return 0 139 | } 140 | 141 | function prepare_json_matrix_lkcontainers() { 142 | declare -r matrix_type="${1}" 143 | declare -a unique_arches=("${!all_arches[@]}") # get an array with the KEYS of all_arches dict 144 | declare -a json_items=() 145 | declare kernel 146 | for one_arch in "${unique_arches[@]}"; do 147 | declare runner="unknown-runner" 148 | runner="$(json_matrix_find_runner "${matrix_type}" "${one_arch}")" 149 | json_items+=("{\"docker_arch\":\"${one_arch}\",\"runner\":${runner}}") 150 | done 151 | prepare_json_array_to_json 152 | return 0 153 | } 154 | 155 | # takes json_items array, outputs full_json single-line string; massage the array into JSON (comma handling) 156 | function prepare_json_array_to_json() { 157 | declare this_json="[" 158 | declare -i counter=0 159 | declare json_item 160 | for json_item in "${json_items[@]}"; do 161 | this_json+="${json_item}" 162 | [[ $counter -lt $((${#json_items[@]} - 1)) ]] && this_json+="," # append a comma if not the last element 163 | counter+=1 164 | done 165 | this_json+="]" 166 | if [[ "${skip_jq:-"no"}" == "yes" ]]; then 167 | full_json="${this_json}" 168 | return 0 169 | fi 170 | log debug "Raw json before jq: ${this_json}" 171 | full_json="$(echo "${this_json}" | jq -c .)" # Pass it through jq for correctness check & compaction 172 | return 0 173 | } 174 | 175 | # This is probably the slowest bash code ever written 176 | function json_matrix_tag_match() { 177 | declare current_tags="${1}" 178 | # shellcheck disable=SC2206 # we want to split the string into an array, thanks 179 | declare -a current_tags_array=(${current_tags}) 180 | # if any of current_tags_array in in CI_TAGS_ARRAY, we've a match 181 | for tag in "${current_tags_array[@]}"; do 182 | for ci_tag in "${CI_TAGS_ARRAY[@]}"; do 183 | if [[ "${tag}" == "${ci_tag}" ]]; then 184 | log debug "Tag '${tag}' matches CI_TAG '${ci_tag}'..." 185 | return 0 186 | fi 187 | done 188 | done 189 | log debug "No tags matched." 190 | return 1 191 | } 192 | 193 | function json_matrix_find_runner() { 194 | declare matrix_type="${1}" 195 | declare docker_arch="${2}" 196 | declare runner="ubuntu-latest" 197 | #log debug "Finding runner for matrix type '${matrix_type}' and docker arch '${docker_arch}'" 198 | declare -a vars_to_try=("CI_RUNNER_${matrix_type^^}_${docker_arch^^}" "CI_RUNNER_${matrix_type^^}" "CI_RUNNER_${docker_arch^^}" "CI_RUNNER") 199 | for var in "${vars_to_try[@]}"; do 200 | log debug "Checking var '${var}'" 201 | if [[ -n "${!var}" && "x${!var}x" != "xx" ]]; then # if var is set, and not empty... 202 | log debug "Found runner '${!var}' for matrix type '${matrix_type}' and docker arch '${docker_arch}' via var '${var}'" 203 | runner="${!var}" 204 | break 205 | fi 206 | done 207 | log debug "Found runner '${runner}' for matrix type '${matrix_type}' and docker arch '${docker_arch}'" 208 | 209 | # shellcheck disable=SC2206 # split by spaces, make it a json array 210 | declare -a json_items_bare=(${runner}) 211 | # wrap each json_items array item in double quotes 212 | declare -a json_items=() 213 | for item in "${json_items_bare[@]}"; do 214 | json_items+=("\"${item}\"") 215 | done 216 | declare full_json="" 217 | skip_jq="yes" prepare_json_array_to_json # skip jq; this is only a json fragment 218 | echo -n "${full_json}" 219 | return 0 220 | } 221 | -------------------------------------------------------------------------------- /images/hook-bootkit/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "encoding/base64" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "io" 11 | "os" 12 | "os/signal" 13 | "path" 14 | "strings" 15 | "syscall" 16 | "time" 17 | 18 | "github.com/cenkalti/backoff/v4" 19 | "github.com/docker/docker/api/types/container" 20 | "github.com/docker/docker/api/types/image" 21 | "github.com/docker/docker/api/types/mount" 22 | "github.com/docker/docker/api/types/registry" 23 | "github.com/docker/docker/client" 24 | "github.com/go-logr/logr" 25 | "github.com/go-logr/zerologr" 26 | "github.com/rs/zerolog" 27 | ) 28 | 29 | type tinkWorkerConfig struct { 30 | // Registry configuration 31 | registry string 32 | username string 33 | password string 34 | 35 | // Tink Server GRPC address:port 36 | grpcAuthority string 37 | 38 | // Worker ID 39 | workerID string 40 | 41 | // tinkWorkerImage is the Tink worker image location. 42 | tinkWorkerImage string 43 | 44 | // tinkServerTLS is whether or not to use TLS for tink-server communication. 45 | tinkServerTLS string 46 | 47 | // tinkServerInsecureTLS is whether or not to use insecure TLS for tink-server communication; only applies is TLS itself is on 48 | tinkServerInsecureTLS string 49 | 50 | httpProxy string 51 | httpsProxy string 52 | noProxy string 53 | } 54 | 55 | func main() { 56 | ctx, done := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) 57 | defer done() 58 | log := defaultLogger("debug") 59 | log.Info("starting BootKit: the tink-worker bootstrapper") 60 | 61 | for { 62 | if errors.Is(ctx.Err(), context.Canceled) { 63 | log.Info("context cancellation received, exiting") 64 | return 65 | } 66 | if err := run(ctx, log); err != nil { 67 | log.Error(err, "bootstrapping tink-worker failed") 68 | log.Info("will retry in 5 seconds") 69 | time.Sleep(5 * time.Second) 70 | continue 71 | } 72 | break 73 | } 74 | log.Info("BootKit: the tink-worker bootstrapper finished") 75 | } 76 | 77 | // TODO(jacobweinstock): clean up func run(). 78 | // 1. read /proc/cmdline 79 | // 2. parse and populate tinkConfig from contents of /proc/cmdline 80 | // 3. do validation/sanitization on tinkConfig 81 | // 4. setup docker client 82 | // 4. configure any registry auth 83 | // 5. pull tink-worker image 84 | // 6. remove any existing tink-worker container 85 | // 7. setup tink-worker container config 86 | // 8. create tink-worker container 87 | // 9. start tink-worker container 88 | // 10. check that the tink-worker container is running 89 | 90 | func run(ctx context.Context, log logr.Logger) error { 91 | content, err := os.ReadFile("/proc/cmdline") 92 | if err != nil { 93 | return err 94 | } 95 | cmdLines := strings.Split(string(content), " ") 96 | cfg := parseCmdLine(cmdLines) 97 | // Generate the path to the tink-worker 98 | var imageName string 99 | if cfg.registry != "" { 100 | imageName = path.Join(cfg.registry, "tink-worker:latest") 101 | } 102 | if cfg.tinkWorkerImage != "" { 103 | imageName = cfg.tinkWorkerImage 104 | } 105 | if imageName == "" { 106 | return fmt.Errorf("cannot pull image for tink-worker, 'docker_registry' and/or 'tink_worker_image' NOT specified in /proc/cmdline") 107 | } 108 | 109 | // Give time for Docker to start 110 | // Alternatively we watch for the socket being created 111 | log.Info("setting up the Docker client") 112 | 113 | os.Setenv("HTTP_PROXY", cfg.httpProxy) 114 | os.Setenv("HTTPS_PROXY", cfg.httpsProxy) 115 | os.Setenv("NO_PROXY", cfg.noProxy) 116 | // Create Docker client with API (socket) 117 | cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) 118 | if err != nil { 119 | return err 120 | } 121 | 122 | log.Info("Pulling image", "imageName", imageName) 123 | authConfig := registry.AuthConfig{ 124 | Username: cfg.username, 125 | Password: strings.TrimSuffix(cfg.password, "\n"), 126 | } 127 | 128 | encodedJSON, err := json.Marshal(authConfig) 129 | if err != nil { 130 | return err 131 | } 132 | 133 | authStr := base64.URLEncoding.EncodeToString(encodedJSON) 134 | 135 | pullOpts := image.PullOptions{} 136 | if useAuth(imageName, cfg.registry) { 137 | pullOpts.RegistryAuth = authStr 138 | } 139 | var out io.ReadCloser 140 | imagePullOperation := func() error { 141 | // with embedded images, the tink worker could potentially already exist 142 | // in the local Docker image cache. And the image name could be something 143 | // unreachable via the network (for example: 127.0.0.1/embedded/tink-worker). 144 | // Because of this we check if the image already exists and don't return an 145 | // error if the image does not exist and the pull fails. 146 | var imageExists bool 147 | if _, _, err := cli.ImageInspectWithRaw(ctx, imageName); err == nil { 148 | imageExists = true 149 | } 150 | out, err = cli.ImagePull(ctx, imageName, pullOpts) 151 | if err != nil && !imageExists { 152 | log.Error(err, "image pull failure", "imageName", imageName) 153 | return err 154 | } 155 | return nil 156 | } 157 | if err := backoff.Retry(imagePullOperation, backoff.NewExponentialBackOff()); err != nil { 158 | return err 159 | } 160 | 161 | if out != nil { 162 | buf := bufio.NewScanner(out) 163 | for buf.Scan() { 164 | structured := make(map[string]interface{}) 165 | if err := json.Unmarshal(buf.Bytes(), &structured); err != nil { 166 | log.Info("image pull logs", "output", buf.Text()) 167 | } else { 168 | log.Info("image pull logs", "logs", structured) 169 | } 170 | 171 | } 172 | if err := out.Close(); err != nil { 173 | log.Error(err, "closing image pull logs failed") 174 | } 175 | } 176 | 177 | log.Info("Removing any existing tink-worker container") 178 | if err := removeTinkWorkerContainer(ctx, cli); err != nil { 179 | return fmt.Errorf("failed to remove existing tink-worker container: %w", err) 180 | } 181 | 182 | log.Info("Creating tink-worker container") 183 | tinkContainer := &container.Config{ 184 | Image: imageName, 185 | Env: []string{ 186 | fmt.Sprintf("DOCKER_REGISTRY=%s", cfg.registry), 187 | fmt.Sprintf("REGISTRY_USERNAME=%s", cfg.username), 188 | fmt.Sprintf("REGISTRY_PASSWORD=%s", cfg.password), 189 | fmt.Sprintf("TINKERBELL_GRPC_AUTHORITY=%s", cfg.grpcAuthority), 190 | fmt.Sprintf("TINKERBELL_TLS=%s", cfg.tinkServerTLS), 191 | fmt.Sprintf("TINKERBELL_INSECURE_TLS=%s", cfg.tinkServerInsecureTLS), 192 | fmt.Sprintf("WORKER_ID=%s", cfg.workerID), 193 | fmt.Sprintf("ID=%s", cfg.workerID), 194 | fmt.Sprintf("HTTP_PROXY=%s", cfg.httpProxy), 195 | fmt.Sprintf("HTTPS_PROXY=%s", cfg.httpsProxy), 196 | fmt.Sprintf("NO_PROXY=%s", cfg.noProxy), 197 | }, 198 | AttachStdout: true, 199 | AttachStderr: true, 200 | } 201 | 202 | tinkHostConfig := &container.HostConfig{ 203 | Mounts: []mount.Mount{ 204 | { 205 | Type: mount.TypeBind, 206 | Source: "/worker", 207 | Target: "/worker", 208 | }, 209 | { 210 | Type: mount.TypeBind, 211 | Source: "/var/run/docker.sock", 212 | Target: "/var/run/docker.sock", 213 | }, 214 | }, 215 | NetworkMode: "host", 216 | Privileged: true, 217 | } 218 | resp, err := cli.ContainerCreate(ctx, tinkContainer, tinkHostConfig, nil, nil, "tink-worker") 219 | if err != nil { 220 | return fmt.Errorf("creating tink-worker container failed: %w", err) 221 | } 222 | 223 | log.Info("Starting tink-worker container") 224 | if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { 225 | return fmt.Errorf("starting tink-worker container failed: %w", err) 226 | } 227 | 228 | time.Sleep(time.Second * 3) 229 | // if tink-worker is not running return error so we try again 230 | if err := checkContainerRunning(ctx, cli, resp.ID); err != nil { 231 | return fmt.Errorf("checking if tink-worker container is running failed: %w", err) 232 | } 233 | 234 | return nil 235 | } 236 | 237 | // checkContainerRunning checks if the tink-worker container is running. 238 | func checkContainerRunning(ctx context.Context, cli *client.Client, containerID string) error { 239 | inspect, err := cli.ContainerInspect(ctx, containerID) 240 | if err != nil { 241 | return err 242 | } 243 | if !inspect.State.Running { 244 | return fmt.Errorf("tink-worker container is not running") 245 | } 246 | return nil 247 | } 248 | 249 | // removeTinkWorkerContainer removes the tink-worker container if it exists. 250 | func removeTinkWorkerContainer(ctx context.Context, cli *client.Client) error { 251 | cs, err := cli.ContainerList(ctx, container.ListOptions{All: true}) 252 | if err != nil { 253 | return fmt.Errorf("listing containers, in order to find an existing tink-worker container, failed: %w", err) 254 | } 255 | for _, c := range cs { 256 | for _, n := range c.Names { 257 | if n == "/tink-worker" { 258 | if err := cli.ContainerRemove(ctx, c.ID, container.RemoveOptions{Force: true}); err != nil { 259 | return fmt.Errorf("removing existing tink-worker container failed: %w", err) 260 | } 261 | } 262 | } 263 | } 264 | return nil 265 | } 266 | 267 | // parseCmdLine will parse the command line. 268 | // These values follow what Boots sends to the auto.ipxe Script. 269 | // https://github.com/tinkerbell/boots/blob/main/ipxe/hook.go 270 | func parseCmdLine(cmdLines []string) (cfg tinkWorkerConfig) { 271 | for i := range cmdLines { 272 | cmdLine := strings.SplitN(strings.TrimSpace(cmdLines[i]), "=", 2) 273 | if len(cmdLine) == 0 { 274 | continue 275 | } 276 | 277 | switch cmd := cmdLine[0]; cmd { 278 | case "docker_registry": 279 | cfg.registry = cmdLine[1] 280 | case "registry_username": 281 | cfg.username = cmdLine[1] 282 | case "registry_password": 283 | cfg.password = cmdLine[1] 284 | case "grpc_authority": 285 | cfg.grpcAuthority = cmdLine[1] 286 | case "worker_id": 287 | cfg.workerID = cmdLine[1] 288 | case "tink_worker_image": 289 | cfg.tinkWorkerImage = cmdLine[1] 290 | case "tinkerbell_tls": 291 | cfg.tinkServerTLS = cmdLine[1] 292 | case "tinkerbell_insecure_tls": 293 | cfg.tinkServerInsecureTLS = cmdLine[1] 294 | case "HTTP_PROXY": 295 | cfg.httpProxy = cmdLine[1] 296 | case "HTTPS_PROXY": 297 | cfg.httpsProxy = cmdLine[1] 298 | case "NO_PROXY": 299 | cfg.noProxy = cmdLine[1] 300 | } 301 | } 302 | return cfg 303 | } 304 | 305 | // defaultLogger is a zerolog logr implementation. 306 | func defaultLogger(level string) logr.Logger { 307 | zl := zerolog.New(os.Stdout) 308 | zl = zl.With().Caller().Timestamp().Logger() 309 | var l zerolog.Level 310 | switch level { 311 | case "debug": 312 | l = zerolog.DebugLevel 313 | default: 314 | l = zerolog.InfoLevel 315 | } 316 | zl = zl.Level(l) 317 | 318 | return zerologr.New(&zl) 319 | } 320 | -------------------------------------------------------------------------------- /images/hook-bootkit/registry_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestShouldUseAuth(t *testing.T) { 8 | tests := map[string]struct { 9 | imageRef string 10 | registryHost string 11 | expected bool 12 | description string 13 | }{ 14 | // Positive cases - should use auth 15 | "exact match": { 16 | imageRef: "registry.example.com/namespace/image:tag", 17 | registryHost: "registry.example.com", 18 | expected: true, 19 | description: "Exact hostname match should use auth", 20 | }, 21 | "exact match with port": { 22 | imageRef: "registry.example.com:5000/image:tag", 23 | registryHost: "registry.example.com:5000", 24 | expected: true, 25 | description: "Exact hostname and port match should use auth", 26 | }, 27 | "localhost with port": { 28 | imageRef: "localhost:5000/image:tag", 29 | registryHost: "localhost:5000", 30 | expected: true, 31 | description: "Localhost with port should match exactly", 32 | }, 33 | "docker hub image with configured host": { 34 | imageRef: "ubuntu:20.04", 35 | registryHost: "docker.io", 36 | expected: true, 37 | description: "Docker Hub image should match when explicitly configured", 38 | }, 39 | "docker hub image with namespace": { 40 | imageRef: "library/ubuntu:20.04", 41 | registryHost: "docker.io", 42 | expected: true, 43 | description: "Docker Hub image with namespace should match when configured", 44 | }, 45 | "docker hub user image": { 46 | imageRef: "username/image:tag", 47 | registryHost: "docker.io", 48 | expected: true, 49 | description: "Docker Hub user image should match when configured", 50 | }, 51 | "ip address registry": { 52 | imageRef: "192.168.1.100:5000/image:tag", 53 | registryHost: "192.168.1.100:5000", 54 | expected: true, 55 | description: "IP address registry should match exactly", 56 | }, 57 | "ipv6 address without port": { 58 | imageRef: "[::1]/image:tag", 59 | registryHost: "[::1]", 60 | expected: true, 61 | description: "IPv6 address without port should match exactly", 62 | }, 63 | "ipv6 full address without port": { 64 | imageRef: "[2001:db8::1]/image:tag", 65 | registryHost: "[2001:db8::1]", 66 | expected: true, 67 | description: "Full IPv6 address without port should match exactly", 68 | }, 69 | "complex path": { 70 | imageRef: "registry.example.com/deep/nested/path/image:tag", 71 | registryHost: "registry.example.com", 72 | expected: true, 73 | description: "Complex path should match when registry is exact", 74 | }, 75 | "registry with https scheme": { 76 | imageRef: "registry.example.com/image:tag", 77 | registryHost: "https://registry.example.com", 78 | expected: false, 79 | description: "registry with https scheme is not a valid registry reference", 80 | }, 81 | "registry with http scheme": { 82 | imageRef: "registry.example.com:5000/image:tag", 83 | registryHost: "http://registry.example.com:5000", 84 | expected: false, 85 | description: "registry with http scheme is not a valid registry reference", 86 | }, 87 | 88 | // Security test cases - should NOT use auth (prevent exploitation) 89 | "substring attack - malicious registry": { 90 | imageRef: "malicious-registry.example.com.evil.com/image:tag", 91 | registryHost: "registry.example.com", 92 | expected: false, 93 | description: "Should not match when target registry is substring of malicious hostname", 94 | }, 95 | "substring attack - path injection": { 96 | imageRef: "evil.com/registry.example.com/image:tag", 97 | registryHost: "registry.example.com", 98 | expected: false, 99 | description: "Should not match when target registry appears in path", 100 | }, 101 | "substring attack - domain prefix": { 102 | imageRef: "sub.registry.example.com/image:tag", 103 | registryHost: "registry.example.com", 104 | expected: false, 105 | description: "Should not match subdomains", 106 | }, 107 | "substring attack - port manipulation": { 108 | imageRef: "registry.example.com.evil:443/image:tag", 109 | registryHost: "registry.example.com", 110 | expected: false, 111 | description: "Should not match when target is substring with malicious port", 112 | }, 113 | "substring attack - different port": { 114 | imageRef: "registry.example.com:9999/image:tag", 115 | registryHost: "registry.example.com:5000", 116 | expected: false, 117 | description: "Should not match when ports are different", 118 | }, 119 | "substring attack - unicode normalization": { 120 | imageRef: "registrу.example.com/image:tag", // Contains Cyrillic 'у' instead of 'y' 121 | registryHost: "registry.example.com", 122 | expected: false, 123 | description: "Should not match when Unicode characters are used to obfuscate registry", 124 | }, 125 | "typosquatting attack": { 126 | imageRef: "registr.example.com/image:tag", // Similar to registry.example.com 127 | registryHost: "registry.example.com", 128 | expected: false, 129 | description: "Should not match similar domain names to prevent typosquatting", 130 | }, 131 | "subdomain hijack attempt": { 132 | imageRef: "evil.registry.example.com/image:tag", // Attacker controls subdomain 133 | registryHost: "registry.example.com", 134 | expected: false, 135 | description: "Should not match when attacker controls subdomain", 136 | }, 137 | "port confusion attack": { 138 | imageRef: "registry.example.com:80/image:tag", // Attacker uses different port 139 | registryHost: "registry.example.com:443", 140 | expected: false, 141 | description: "Should not match when attacker uses different port to bypass auth", 142 | }, 143 | "path traversal attempt": { 144 | imageRef: "evil.com/../registry.example.com/image:tag", // Attacker attempts path traversal 145 | registryHost: "registry.example.com", 146 | expected: false, 147 | description: "Should not match when attacker attempts path traversal in hostname", 148 | }, 149 | "invalid registry format": { 150 | imageRef: "registry.example.com/image:tag", 151 | registryHost: "registry.example.com:abc", // Invalid port format 152 | expected: false, 153 | description: "Should not match when registry host has invalid port format", 154 | }, 155 | "invalid ipv6 address": { 156 | imageRef: "[::1]/image:tag", 157 | registryHost: "[::1:5000", // Malformed IPv6 address 158 | expected: false, 159 | description: "Should not match when registry host has malformed IPv6 address", 160 | }, 161 | "homograph attack simulation": { 162 | imageRef: "registrу.example.com/image:tag", // Contains Cyrillic 'у' instead of 'y' 163 | registryHost: "registry.example.com", 164 | expected: false, 165 | description: "Should not match when attacker uses similar-looking Unicode characters", 166 | }, 167 | 168 | // Edge cases 169 | "docker hub image - no auth configured": { 170 | imageRef: "ubuntu:20.04", 171 | registryHost: "docker.io", 172 | expected: true, 173 | description: "Should match Docker Hub when explicitly configured", 174 | }, 175 | "docker hub image - private registry configured": { 176 | imageRef: "ubuntu:20.04", 177 | registryHost: "registry.example.com", 178 | expected: false, 179 | description: "Should not use private registry auth for Docker Hub images", 180 | }, 181 | "empty registry host": { 182 | imageRef: "registry.example.com/image:tag", 183 | registryHost: "", 184 | expected: false, 185 | description: "Should not use auth when no registry is configured", 186 | }, 187 | "empty image ref": { 188 | imageRef: "", 189 | registryHost: "registry.example.com", 190 | expected: false, 191 | description: "Should not use auth for empty image reference", 192 | }, 193 | "case sensitivity": { 194 | imageRef: "Registry.Example.Com/image:tag", 195 | registryHost: "registry.example.com", 196 | expected: false, 197 | description: "Should be case sensitive for security", 198 | }, 199 | } 200 | 201 | for name, tt := range tests { 202 | t.Run(name, func(t *testing.T) { 203 | result := useAuth(tt.imageRef, tt.registryHost) 204 | if result != tt.expected { 205 | t.Errorf("shouldUseAuth(%q, %q) = %v, expected %v - %s", 206 | tt.imageRef, tt.registryHost, result, tt.expected, tt.description) 207 | } 208 | }) 209 | } 210 | } 211 | 212 | // TestSecurityScenarios tests specific security scenarios to ensure the implementation 213 | // is resistant to various attack vectors. 214 | func TestSecurityScenarios(t *testing.T) { 215 | scenarios := []struct { 216 | name string 217 | imageRef string 218 | registryHost string 219 | shouldAuth bool 220 | description string 221 | }{ 222 | { 223 | name: "typosquatting attack", 224 | imageRef: "registr.example.com/malware:latest", 225 | registryHost: "registry.example.com", 226 | shouldAuth: false, 227 | description: "Attacker uses similar domain name", 228 | }, 229 | { 230 | name: "subdomain hijack attempt", 231 | imageRef: "evil.registry.example.com/image:latest", 232 | registryHost: "registry.example.com", 233 | shouldAuth: false, 234 | description: "Attacker controls subdomain", 235 | }, 236 | { 237 | name: "homograph attack simulation", 238 | imageRef: "registrу.example.com/image:latest", // Contains Cyrillic 'у' instead of 'y' 239 | registryHost: "registry.example.com", 240 | shouldAuth: false, 241 | description: "Attacker uses similar-looking Unicode characters", 242 | }, 243 | { 244 | name: "port confusion", 245 | imageRef: "registry.example.com:80/image:latest", 246 | registryHost: "registry.example.com:443", 247 | shouldAuth: false, 248 | description: "Attacker uses different port to bypass auth", 249 | }, 250 | { 251 | name: "path traversal attempt", 252 | imageRef: "evil.com/../registry.example.com/image:latest", 253 | registryHost: "registry.example.com", 254 | shouldAuth: false, 255 | description: "Attacker attempts path traversal in hostname", 256 | }, 257 | } 258 | 259 | for _, scenario := range scenarios { 260 | t.Run(scenario.name, func(t *testing.T) { 261 | result := useAuth(scenario.imageRef, scenario.registryHost) 262 | if result != scenario.shouldAuth { 263 | t.Errorf("Security test failed: %s\n"+ 264 | "shouldUseAuth(%q, %q) = %v, expected %v", 265 | scenario.description, scenario.imageRef, scenario.registryHost, result, scenario.shouldAuth) 266 | } 267 | }) 268 | } 269 | } 270 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /linuxkit-templates/hook.template.yaml: -------------------------------------------------------------------------------- 1 | # Attention, this is a template that is run through envsubst, but with a specific list of variables to replace. Thus: 2 | # - HOOK_VERSION: ${HOOK_VERSION} (a simple 1.2.3 string, no 'v') 3 | # - HOOK_KERNEL_IMAGE: ${HOOK_KERNEL_IMAGE} 4 | # - HOOK_KERNEL_ID: ${HOOK_KERNEL_ID} 5 | # - HOOK_KERNEL_VERSION: ${HOOK_KERNEL_VERSION} 6 | # - HOOK_CONTAINER_BOOTKIT_IMAGE: ${HOOK_CONTAINER_BOOTKIT_IMAGE} 7 | # - HOOK_CONTAINER_DOCKER_IMAGE: ${HOOK_CONTAINER_DOCKER_IMAGE} 8 | # - HOOK_CONTAINER_UDEV_IMAGE: ${HOOK_CONTAINER_UDEV_IMAGE} 9 | # - HOOK_CONTAINER_ACPID_IMAGE: ${HOOK_CONTAINER_ACPID_IMAGE} 10 | # - HOOK_CONTAINER_CONTAINERD_IMAGE: ${HOOK_CONTAINER_CONTAINERD_IMAGE} 11 | # - HOOK_CONTAINER_RUNC_IMAGE: ${HOOK_CONTAINER_RUNC_IMAGE} 12 | # - HOOK_CONTAINER_EMBEDDED_IMAGE: ${HOOK_CONTAINER_EMBEDDED_IMAGE} 13 | # - Other variables are not replaced: for example this is a literal dollarsign-SOMETHING: $SOMETHING and with braces: ${SOMETHING} 14 | 15 | kernel: 16 | image: "${HOOK_KERNEL_IMAGE}" 17 | # The cmdline is a random string with 1024 characters. It is used for binary patching in the ISO image. 18 | # The kernel and initrd don't use this cmdline. The bootloader that loads the kernel determines the cmdline that will be used. 19 | cmdline: "464vn90e7rbj08xbwdjejmdf4it17c5zfzjyfhthbh19eij201hjgit021bmpdb9ctrc87x2ymc8e7icu4ffi15x1hah9iyaiz38ckyap8hwx2vt5rm44ixv4hau8iw718q5yd019um5dt2xpqqa2rjtdypzr5v1gun8un110hhwp8cex7pqrh2ivh0ynpm4zkkwc8wcn367zyethzy7q8hzudyeyzx3cgmxqbkh825gcak7kxzjbgjajwizryv7ec1xm2h0hh7pz29qmvtgfjj1vphpgq1zcbiiehv52wrjy9yq473d9t1rvryy6929nk435hfx55du3ih05kn5tju3vijreru1p6knc988d4gfdz28eragvryq5x8aibe5trxd0t6t7jwxkde34v6pj1khmp50k6qqj3nzgcfzabtgqkmeqhdedbvwf3byfdma4nkv3rcxugaj2d0ru30pa2fqadjqrtjnv8bu52xzxv7irbhyvygygxu1nt5z4fh9w1vwbdcmagep26d298zknykf2e88kumt59ab7nq79d8amnhhvbexgh48e8qc61vq2e9qkihzt1twk1ijfgw70nwizai15iqyted2dt9gfmf2gg7amzufre79hwqkddc1cd935ywacnkrnak6r7xzcz7zbmq3kt04u2hg1iuupid8rt4nyrju51e6uejb2ruu36g9aibmz3hnmvazptu8x5tyxk820g2cdpxjdij766bt2n3djur7v623a2v44juyfgz80ekgfb9hkibpxh3zgknw8a34t4jifhf116x15cei9hwch0fye3xyq0acuym8uhitu5evc4rag3ui0fny3qg4kju7zkfyy8hwh537urd5uixkzwu5bdvafz4jmv7imypj543xg5em8jk8cgk7c4504xdd5e4e71ihaumt6u5u2t1w7um92fepzae8p0vq93wdrd1756npu1pziiur1payc7kmdwyxg3hj5n4phxbc29x0tcddamjrwt260b0w" 20 | 21 | init: 22 | # this init container sha has support for volumes 23 | - linuxkit/init:v1.1.0 24 | - "${HOOK_CONTAINER_RUNC_IMAGE}" 25 | - "${HOOK_CONTAINER_CONTAINERD_IMAGE}" 26 | - linuxkit/ca-certificates:v1.0.0 27 | - linuxkit/firmware:24402a25359c7bc290f7fc3cd23b6b5f0feb32a5 # "Some" firmware from Linuxkit pkg; see https://github.com/linuxkit/linuxkit/blob/master/pkg/firmware/Dockerfile 28 | - "${HOOK_CONTAINER_EMBEDDED_IMAGE}" 29 | 30 | onboot: 31 | - name: rngd1 32 | image: linuxkit/rngd:v1.0.0 33 | command: [ "/sbin/rngd", "-1" ] 34 | 35 | - name: sysctl 36 | image: linuxkit/sysctl:v1.0.0 37 | 38 | - name: sysfs 39 | image: linuxkit/sysfs:v1.0.0 40 | 41 | - name: modprobe 42 | image: linuxkit/modprobe:v1.0.0 43 | command: [ "modprobe", "cdc_ncm" ] # for usb ethernet dongles 44 | 45 | - name: udev 46 | image: "${HOOK_CONTAINER_UDEV_IMAGE}" 47 | capabilities: 48 | - all 49 | binds: 50 | - /dev:/dev 51 | - /sys:/sys 52 | - /lib/modules:/lib/modules 53 | - /run:/run 54 | rootfsPropagation: shared 55 | devices: 56 | - path: all 57 | type: b 58 | 59 | - name: dhcpcd-once 60 | image: linuxkit/dhcpcd:v1.0.0 61 | command: [ "/etc/ip/dhcp.sh", "true" ] # 2nd paramter is one-shot true/false: true for onboot, false for services 62 | #capabilities.add: 63 | # - CAP_SYS_TIME # for ntp one-shot no-max-offset after ntpd, for hardware missing RTC's that boot in 1970 64 | capabilities: 65 | - all 66 | binds.add: 67 | - /var/lib/dhcpcd:/var/lib/dhcpcd 68 | - /run:/run 69 | - /etc/ip/dhcp.sh:/etc/ip/dhcp.sh 70 | - /dhcpcd.conf:/dhcpcd.conf 71 | runtime: 72 | mkdir: 73 | - /var/lib/dhcpcd 74 | 75 | services: 76 | - name: rngd 77 | image: linuxkit/rngd:v1.0.0 78 | 79 | - name: ntpd 80 | image: linuxkit/openntpd:v1.0.0 81 | 82 | - name: udev # as a service; so system reacts to changes in devices 83 | image: "${HOOK_CONTAINER_UDEV_IMAGE}" 84 | command: [ "/lib/systemd/systemd-udevd", "--debug" ] 85 | capabilities: [ all ] 86 | binds: [ /dev:/dev, /sys:/sys, /lib/modules:/lib/modules ] 87 | rootfsPropagation: shared 88 | net: host 89 | pid: host 90 | devices: 91 | - path: all 92 | type: b 93 | - path: all 94 | type: c 95 | 96 | - name: acpi 97 | image: "${HOOK_CONTAINER_ACPID_IMAGE}" 98 | capabilities: 99 | - all 100 | pid: host 101 | binds.add: 102 | - /dev:/dev 103 | - /var/run:/var/run 104 | devices: 105 | - path: all 106 | type: b 107 | - path: all 108 | type: c 109 | 110 | - name: getty 111 | image: linuxkit/getty:v1.0.0 112 | capabilities: 113 | - all 114 | binds.add: 115 | - /etc/profile.d/local.sh:/etc/profile.d/local.sh 116 | - /etc/securetty:/etc/securetty 117 | - /etc/motd:/etc/motd 118 | - /etc/os-release:/etc/os-release 119 | - /:/host_root 120 | - /run:/run 121 | - /dev:/dev 122 | - /dev/console:/dev/console 123 | - /usr/bin/nerdctl:/usr/bin/nerdctl 124 | env: 125 | - INSECURE=true 126 | devices: 127 | - path: all 128 | type: b 129 | - path: all 130 | type: c 131 | - path: "/dev/console" 132 | type: c 133 | major: 5 134 | minor: 1 135 | mode: "0666" 136 | - path: "/dev/tty0" 137 | type: c 138 | major: 4 139 | minor: 0 140 | mode: "0666" 141 | - path: "/dev/tty1" 142 | type: c 143 | major: 4 144 | minor: 1 145 | mode: "0666" 146 | - path: "/dev/ttyS0" 147 | type: c 148 | major: 4 149 | minor: 64 150 | mode: "0666" 151 | - path: "/dev/ttyS1" 152 | type: c 153 | major: 4 154 | minor: 65 155 | mode: "0666" 156 | - path: "/dev/ttyS2" 157 | type: c 158 | major: 4 159 | minor: 66 160 | mode: "0666" 161 | - path: "/dev/ttyAMA0" 162 | type: c 163 | major: 204 164 | minor: 64 165 | mode: "0666" 166 | - path: "/dev/ttyAMA1" 167 | type: c 168 | major: 204 169 | minor: 65 170 | mode: "0666" 171 | - path: "/dev/ttyAML0" 172 | type: c 173 | major: 243 174 | minor: 0 175 | mode: "0666" 176 | - path: "/dev/ttyAML1" 177 | type: c 178 | major: 243 179 | minor: 1 180 | mode: "0666" 181 | - path: "/dev/ttyUSB0" 182 | type: c 183 | major: 188 184 | minor: 0 185 | mode: "0666" 186 | - path: "/dev/ttyUSB1" 187 | type: c 188 | major: 188 189 | minor: 1 190 | mode: "0666" 191 | 192 | - name: hook-docker 193 | image: "${HOOK_CONTAINER_DOCKER_IMAGE}" 194 | capabilities: 195 | - all 196 | net: host 197 | pid: host 198 | mounts: 199 | - type: cgroup2 200 | options: [ "rw", "nosuid", "noexec", "nodev", "relatime" ] 201 | destination: /sys/fs/cgroup 202 | binds.add: 203 | - /dev/console:/dev/console 204 | - /dev:/dev 205 | - /etc/resolv.conf:/etc/resolv.conf 206 | - /lib/modules:/lib/modules 207 | - /var/run/docker:/var/run 208 | - /var/run/images:/var/lib/docker 209 | - /var/run/worker:/worker 210 | - /:/host_root 211 | runtime: 212 | mkdir: 213 | - /var/run/images 214 | - /var/run/docker 215 | - /var/run/worker 216 | devices: 217 | - path: all 218 | type: b 219 | - path: all 220 | type: c 221 | 222 | - name: hook-bootkit 223 | image: "${HOOK_CONTAINER_BOOTKIT_IMAGE}" 224 | capabilities: 225 | - all 226 | net: host 227 | mounts: 228 | - type: cgroup2 229 | options: [ "rw", "nosuid", "noexec", "nodev", "relatime" ] 230 | destination: /sys/fs/cgroup 231 | binds: 232 | - /var/run/docker:/var/run 233 | runtime: 234 | mkdir: 235 | - /var/run/docker 236 | 237 | - name: dhcpcd-daemon 238 | image: linuxkit/dhcpcd:v1.0.0 239 | command: [ "/etc/ip/dhcp.sh", "false" ] # 2nd paramter is one-shot true/false: true for onboot, false for services 240 | #capabilities.add: 241 | # - CAP_SYS_TIME # for ntp one-shot no-max-offset after ntpd, for hardware missing RTC's that boot in 1970 242 | capabilities: 243 | - all 244 | binds.add: 245 | - /var/lib/dhcpcd:/var/lib/dhcpcd 246 | - /run:/run 247 | - /etc/ip/dhcp.sh:/etc/ip/dhcp.sh 248 | - /dhcpcd.conf:/dhcpcd.conf 249 | runtime: 250 | mkdir: 251 | - /var/lib/dhcpcd 252 | 253 | #SSH_SERVER - name: sshd 254 | #SSH_SERVER image: linuxkit/sshd:v1.0.0 255 | #SSH_SERVER binds.add: 256 | #SSH_SERVER - /etc/profile.d/local.sh:/etc/profile.d/local.sh 257 | #SSH_SERVER - /root/.ssh/authorized_keys:/root/.ssh/authorized_keys 258 | #SSH_SERVER - /usr/bin/nerdctl:/usr/bin/nerdctl 259 | #SSH_SERVER - /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt 260 | #SSH_SERVER - /:/host_root 261 | 262 | 263 | files: 264 | - path: etc/profile.d/local.sh 265 | contents: | 266 | alias docker='nerdctl -n services.linuxkit exec -it hook-docker docker' 267 | alias docker-shell='nerdctl -n services.linuxkit exec -it hook-docker sh' 268 | export PS1='HookOS ${HOOK_VERSION}:\w\$ ' 269 | # only print WARNING or higher kernel messages to console 270 | echo 4 > /proc/sys/kernel/printk 271 | mode: "0644" 272 | 273 | - path: etc/motd 274 | mode: "0644" 275 | # This is ANSI Regular font 276 | contents: | 277 | Welcome to HookOS! Your Tinkerbell operating system installation environment. 278 | 279 | ██ ██ ██ ██████ ███████ 280 | ██ ██ ██████ ██████ ██ ██ ██ ██ ██ 281 | ███████ ██ ██ ██ ██ █████ ██ ██ ███████ 282 | ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ 283 | ██ ██ ██████ ██████ ██ ██ ██████ ███████ 284 | 285 | - Hook flavor ${HOOK_KERNEL_ID} version ${HOOK_VERSION} / Kernel version ${HOOK_KERNEL_VERSION} 286 | - Use `docker` commands to access the tink worker/agent container and workflow action containers. 287 | - Logs are located in the `/var/log/` directory. 288 | - Use `docker logs tink-worker --follow` to watch the worker logs. 289 | 290 | - path: etc/os-release 291 | mode: "0444" 292 | contents: | 293 | NAME="HookOS" 294 | VERSION=${HOOK_VERSION} 295 | ID=hookos 296 | VERSION_ID=${HOOK_VERSION} 297 | PRETTY_NAME="HookOS ${HOOK_KERNEL_ID} v${HOOK_VERSION}/k${HOOK_KERNEL_VERSION}" 298 | ANSI_COLOR="1;34" 299 | HOME_URL="https://github.com/tinkerbell/hook" 300 | 301 | # Putting scripts in /etc/init.d/ allows them to be run at boot time by the init system 302 | - path: etc/init.d/002-vlan.sh 303 | source: "files/vlan.sh" 304 | mode: "0777" 305 | 306 | # Putting scripts in /etc/init.d/ allows them to be run at boot time by the init system 307 | - path: etc/init.d/003-static-network.sh 308 | source: "files/static-network.sh" 309 | mode: "0777" 310 | 311 | # This makes the script available in the host $PATH 312 | - path: sbin/setup-dns 313 | source: "files/setup-dns.sh" 314 | mode: "0777" 315 | 316 | - path: etc/ip/dhcp.sh 317 | source: "files/dhcp.sh" 318 | mode: "0777" 319 | 320 | - path: dhcpcd.conf 321 | source: "files/dhcpcd.conf" 322 | mode: "0644" 323 | 324 | - path: etc/securetty 325 | contents: | 326 | console 327 | tty0 328 | tty1 329 | tty2 330 | tty3 331 | tty4 332 | tty5 333 | tty6 334 | tty7 335 | tty8 336 | tty9 337 | tty10 338 | tty11 339 | hvc0 340 | ttyS0 341 | ttyS1 342 | ttyS2 343 | ttyAMA0 344 | ttyAMA1 345 | ttyAML0 346 | ttyAML1 347 | ttyUSB0 348 | ttyUSB1 349 | ttyUSB2 350 | 351 | #SSH_SERVER - path: root/.ssh/authorized_keys 352 | #SSH_SERVER source: ~/.ssh/id_rsa.pub 353 | #SSH_SERVER mode: "0600" 354 | #SSH_SERVER optional: true 355 | 356 | trust: 357 | org: 358 | - linuxkit 359 | - library 360 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hook 2 | 3 | Hook is the Tinkerbell Installation Environment for bare-metal. 4 | It runs in-memory, installs operating system, and handles deprovisioning. 5 | 6 | ## Motivation 7 | 8 | One of the [Tinkerbell] components is the Operating System Installation Environment (OSIE). 9 | The Tinkerbell project originally used [OSIE] as its default OSIE. 10 | That implementation was open-sourced by Equinix Metal as is and was difficult to modify/extend. 11 | (Not to mention confusing, [OSIE] is our OSIE, hook is a new OSIE and you can have your very own OSIE too) 12 | We started this project for the following reasons: 13 | 14 | - Because we like to hack on the Kernel! 15 | - Tinkerbell architecture leaves an open door in terms of the OSIE you can use, one is provided by default for simplicity, but users can write their own. 16 | This is an implementation to validate that the model works (it does!! this is why we are here) 17 | - Looking at the CI/CD build time for [OSIE] was ~1h on average 18 | - The [OSIE] build process was not standardised, which is critical for an open-source project because it causes friction for contributors. 19 | This project, as highlighted later in this page, uses [LinuxKit]. 20 | It gives us: 21 | - Documentation about how the building phase works 22 | - A clear and defined CLI and [specification] (YAML) 23 | - A shared community that is supportive 24 | - LinuxKit cross-compiles in many architectures 25 | - Different output formats: ISO, init ramdisk, aws, docker, rpi3... see [formats]. 26 | - It was not easy to explain to the Tinkerbell community how [OSIE] works and the components it is made for. 27 | A lot of the components were Equinix Metal specific and are not strictly needed in Tinkerbell. 28 | 29 | ## Architecture 30 | 31 | The hook project aims to provide an "in-place" swappable set of files (`kernel`/`initramfs`) that can be used to function as the Tinkerbell OSIE. 32 | The key aims of this new project: 33 | 34 | - Immutable output 35 | - Batteries included (but swappable if needed) 36 | - Ease of build (subsequent builds of hook are ~47 seconds) 37 | - Lean / simple design 38 | - Clean base to build upon 39 | 40 | The hook project predominantly uses [linuxkit] as the toolkit that will produce repeatable and straightforward build of the entire in-memory operating system. 41 | The linuxkit project combines a Linux kernel with a number of additional container images to produce a Linux Operating System with just the right amount of functionality (no less / no more). 42 | We have built upon the minimal set of components: 43 | 44 | - containerd (the engine to start/stop all other components in a LinuxKit OS) 45 | - dhcpd (for network access) 46 | - ntpd (network time) 47 | - rngd (random number gen for entropy) 48 | 49 | To this minimal build, we've added our own set of containers that will provide the functionality needed for a `tink-worker` to run successfully: 50 | 51 | ### hook-docker 52 | 53 | The `hook-docker` container builds upon the upstream `dind` (docker-in-docker) container. 54 | It adds the additional functionality to retrieve the certificates needed for the docker engine to communicate with the Tinkerbell repository **before** it starts the docker engine. 55 | The docker engine will be exposed through the `/var/run/docker.sock` that will use a bind mount so that the container `bootkit` can access it. 56 | 57 | ### hook-bootkit 58 | 59 | The `hook-bootkit` container will parse the `/proc/cmdline` and the metadata service in order to retrieve the specific configuration for tink-worker to be started for the current/correct machine. 60 | It will then speak with the `hook-docker` engine API through the shared `/var/run/docker.sock`, where it will ask the engine to run the `tink-worker:latest` container. 61 | `tink-worker:latest` will in turn begin to execute the workflow/actions associated with that machine. 62 | 63 | ## Developer/builder guide 64 | 65 | ### Introduction / recently changed 66 | 67 | > This refers to the 0.9.0 version, compared to 0.8.1. 68 | 69 | - Replaces the emulated Alpine kernel build with a Debian based cross-compiling build 70 | - Much faster building. Emulating x86_64 on arm64 is very slow and vice-versa. 71 | - Replaces kernel .config's with the `defconfig` versions, via Kbuild's `make savedefconfig` 72 | - Replaces Git-SHA1-based image versioning ("current revision") with content-based hashing. 73 | - This way, there's much higher cache reuse, and new versions are pushed only when components actually changed (caveat emptor) 74 | - Should allow people to develop Hook without having to build a kernel, depending on CI frequency and luck. 75 | - Introduces multiple "flavors" of hook. Instead of restricted to 2 hardcoded flavors (x86_64 and aarch64, built from source), we can now define multiple flavors, each with an ID and version/configure/build methods. 76 | - the `hook-default-amd64` and `hook-default-arm64` kernels are equivalent to the two original. 77 | - the `armbian-` prefixed kernels are actually Armbian kernels for more exotic arm64 SBCs, or Armbian's generic UEFI kernels for both arches. Those are very fast to "build" since Armbian publishes their .deb packages in OCI images, and here we 78 | just download and massage them into the format required by Linuxkit. 79 | - `hook.yaml` is replaced with `hook.template.yaml` which is templated via a limited-var invocation of `envsubst`; only the kernel image and the arch is actually different per-flavor. 80 | - Auto-updating of the kernel via kernel.org's JSON endpoint (ofc only works for LTS or recent-enough stable kernels). Could opt-out/use a fixed version. 81 | - Auto updating of Armbian kernels via OCI tag listing via `skopeo`. Can opt-out/use a fixed version. 82 | - DTB-producing Kernel builds (aarch64) produce a `dtbs.tar.gz` artifact together with the initrd and vmlinuz. DTBs are not used by Hook or Tinkerbell right now, but will be necessary for some SBCs. 83 | 84 | ### Flavor / `id` 85 | 86 | The Hook build system is designed to handle multiple flavors. 87 | A flavor mostly equates with a specific Linux Kernel, a LinuxKit version, and a LinuxKit YAML configuration template. 88 | The "default" flavor ids are `hook-default-amd64` and `hook-default-arm64`, which use a kernel that is built and configured from source by the Hook build system. 89 | Other flavors use Foreign kernels from other distributions to cater for special needs. 90 | 91 | There is an inventory of all available flavors in the [bash/inventory.sh](bash/inventory.sh) file. 92 | 93 | ### Command line interface (`build.sh`) 94 | 95 | The `build.sh` script is the main entry point for building a Hook flavor. 96 | The general syntax of the cli is: 97 | 98 | `./build.sh [] [=] [=...]` 99 | 100 | Where: 101 | 102 | - ``, if not specified, defaults to `build` 103 | - ``, if not specified, defaults to `hook-default-amd64` (or the arm64 variant, if running on an arm64 host); the full list is defined in the [bash/inventory.sh](bash/inventory.sh) 104 | - `[=]` is useful to set environment variables (similar to `make`) and can come in any position in the command line. 105 | 106 | So, just running `./build.sh` will build the default flavor for the host architecture. 107 | 108 | Other commands are: 109 | 110 | - `kernel `: builds the kernel for the specified flavor 111 | - for `default` ids, this will build the kernel from source 112 | - for other methods, usually this will download & massage the kernels from a distro's packages 113 | - `config `: runs kernel configuration for the specified flavor. 114 | - this only works for the default flavors; Foreign kernels are configured elsewhere; 115 | - it will open an interactive menuconfig session where you can change kernel config options; after exiting, `savedefconfig` will be run and the resulting file copied back to the host, ready for commit. 116 | - `build `: builds the Hook flavor. The kernel must be either available for pulling, or have been built locally beforehand. 117 | - `qemu `: builds the Hook flavor and runs it in QEMU. 118 | - this accepts `MAC=` and `TINK_SERVER=` env vars, see below 119 | 120 | Other, less common commands are: 121 | 122 | - `kernel-config-shell `: prepares an interactive Docker shell for advanced kernel .config operations. 123 | - `shellcheck`: runs shellcheck on all bash scripts in the project and exits with an error if any issues are found. 124 | - `linuxkit-containers`: builds the LinuxKit containers for the specified architecture. 125 | 126 | #### Environment variables for building/testing 127 | 128 | Using the `=` syntax, you can set environment variables that will be used by the build system. 129 | Of course, you may also set them in the environment before running the script (that is heavily used by the GitHub Actions build workflow). 130 | 131 | The most important environment variables are: 132 | 133 | - general, applies to most commands: 134 | - `DEBUG=yes`: set this to get lots of debugging messages which can make understanding the build and finding problems easier. 135 | - `HOOK_VERSION`: The Hook version, ends up in `/etc/os-release` and on the screen at boot. 136 | - `HOOK_KERNEL_OCI_BASE`: OCI base coordinates for the kernel images. 137 | - `HOOK_LK_CONTAINERS_OCI_BASE`: OCI base coordinates for the LinuxKit containers. 138 | - `CACHE_DIR`: directory where the build system will cache downloaded files. Relative to the project root. 139 | - `USE_LATEST_BUILT_KERNEL`: set this to `yes` to use the latest built kernel from `quay.io/tinkerbell/hook-kernel`. 140 | - `LINUXKIT_ISO`: set this to `yes` to build an ISO image instead of a kernel and initrd. 141 | - exclusively for the `qemu` command: 142 | - `TINK_SERVER=`: the IP address of the Tinkerbell GRPC server. No default. 143 | - `MAC=`: the MAC address of the machine that will be provisioned. No default. 144 | - and also 145 | - `TINK_WORKER_IMAGE`, defaults to `"quay.io/tinkerbell/tink-worker:latest"` 146 | - `TINK_TLS` defaults to `false` 147 | - `TINK_GRPC_PORT` defaults to `42113` 148 | 149 | ### CI (GitHub Actions) 150 | 151 | - There's a distributed GitHub Actions build workflow `"matrix"`. 152 | - The bash build system produces JSON objects that drive the matrix stages: 153 | - One matrix is per-arch, and builds all the containers whose source is hosted in this repo (bootkit, docker, udev) 154 | - Second matrix is per-flavor(/kernel), and builds the kernel 155 | - Third matrix, depending on the other two, is per-flavor(/kernel), and builds Hook itself (via LinuxKit) and prepares a .tar.gz into GH artifacts 156 | 157 | The `gha-matrix` CLI command prepares a set of JSON outputs for GitHub Actions matrix workflow, based on the inventory and certain environment variables: 158 | 159 | - `CI_RUNNER_` are used to determine the GH Action runners (self-hosted or GH-hosted) that are used for each step. See [bash/json-matrix.sh](bash/json-matrix.sh) for details. 160 | - `CI_TAGS`, a space separated list of tags that will be used to filter the inventory. 161 | - `DOCKER_ARCH` is used by the `linuxkit-containers` command to build the containers for the specified architecture. 162 | - `DO_PUSH`: `yes` or `no`, will push the built containers to the OCI registry; defaults to `no`. 163 | 164 | ### Embedding container images into the DinD (docker-in-docker), also known as [hook-docker](images/hook-docker/), container 165 | 166 | For use cases where having container images already available in Docker is needed, the following steps can be taken to embed container images into hook-docker (DinD): 167 | 168 | > Note: This is optional and no container images will be embedded by default. 169 | 170 | > Note: This will increase the overall size of HookOS. As HookOS is an in memory OS, make sure that the size increase works for the machines you are provisioning. 171 | 172 | 1. Create a file named `images.txt` in the [images/hook-embedded/](images/hook-embedded/) directory. 173 | 1. Populate this `images.txt` file with the list of images to be embedded. See [images/hook-embedded/images.txt.example](images/hook-embedded/images.txt.example) for details on the required file format. 174 | 1. Change directories to [images/hook-embedded/](images/hook-embedded/) and run [`pull-images.sh`](images/hook-embedded/pull-images.sh) script when building amd64 images and run [`pull-images.sh arm64`](images/hook-embedded/pull-images.sh) when building arm64 images. Read the comments at the top of the script for more details. 175 | 1. Change directories to the root of the HookOS repository and run `sudo ./build.sh build ...` to build the HookOS kernel and ramdisk. FYI, `sudo` is needed as DIND changes file ownerships to root. 176 | 177 | ### Build system TO-DO list 178 | 179 | - [ ] `make debug` functionality (sshd enabled) was lost in the Makefile -> bash transition; 180 | 181 | [formats]: https://github.com/linuxkit/linuxkit/blob/master/README.md#booting-and-testing 182 | [linuxkit]: https://github.com/linuxkit/linuxkit 183 | [osie]: https://github.com/tinkerbell/osie 184 | [specification]: https://github.com/linuxkit/linuxkit/blob/master/docs/yaml.md 185 | [tinkerbell]: https://tinkerbell.org 186 | --------------------------------------------------------------------------------