├── .gitignore ├── network.png ├── tp-link-sg108e-802-1q-vlan-configuration.png ├── tp-link-sg108e-802-1q-vlan-pvid-configuration.png ├── grafana-datasources.yml ├── actions ├── reboot │ ├── Dockerfile │ └── provision.sh ├── provision.sh ├── reset-uefi-boot │ ├── Dockerfile │ ├── provision.sh │ └── entrypoint.sh ├── clonezilla-restoredisk │ ├── provision.sh │ ├── Dockerfile │ └── entrypoint.sh └── flatcar-install │ ├── provision.sh │ └── Dockerfile ├── meshcommander └── Dockerfile ├── workers ├── provision.sh ├── hardware-odyssey.json ├── hardware-desktop-mini.json ├── hardware-nuc.json ├── provision-nuc.sh ├── provision-odyssey.sh ├── provision-desktop-mini.sh ├── provision-virtual.sh └── provision-rpis.sh ├── provision-meshcommander.sh ├── provision-docker-hub-auth.sh ├── templates ├── provision.sh ├── debian │ ├── provision.sh │ ├── workflow-template.yml │ └── provision-workflow.sh ├── ubuntu │ ├── provision.sh │ ├── workflow-template.yml │ └── provision-workflow.sh ├── proxmox-ve │ ├── provision.sh │ ├── workflow-template.yml │ └── provision-workflow.sh ├── flatcar-linux │ ├── provision.sh │ ├── flatcar-linux-config.yml │ ├── provision-workflow.sh │ └── workflow-template.yml ├── windows-2022 │ ├── provision.sh │ ├── workflow-template.yml │ └── provision-workflow.sh └── hello-world │ ├── provision-workflow.sh │ └── provision.sh ├── provision-docker-compose.sh ├── loki-config.yml ├── provision-go.sh ├── provision-grafana.sh ├── provision-nfs-server.sh ├── provision-docker-buildx.sh ├── provision-portainer.sh ├── provision-tinkerbell-tink.sh ├── provision-loki.sh ├── provision-debian-osie.sh ├── provision-debian-boots.sh ├── provision-docker.sh ├── provision-tinkerbell.sh ├── summary.sh ├── provision-base.sh ├── tink-helpers.source.sh ├── Vagrantfile ├── README.md └── network.uxf /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/ 2 | tmp/ 3 | *.img 4 | *.log 5 | *.zip 6 | -------------------------------------------------------------------------------- /network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgl/tinkerbell-vagrant/HEAD/network.png -------------------------------------------------------------------------------- /tp-link-sg108e-802-1q-vlan-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgl/tinkerbell-vagrant/HEAD/tp-link-sg108e-802-1q-vlan-configuration.png -------------------------------------------------------------------------------- /tp-link-sg108e-802-1q-vlan-pvid-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rgl/tinkerbell-vagrant/HEAD/tp-link-sg108e-802-1q-vlan-pvid-configuration.png -------------------------------------------------------------------------------- /grafana-datasources.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | datasources: 3 | - name: Loki 4 | type: loki 5 | access: proxy 6 | url: @@loki_base_url@@ 7 | jsonData: 8 | maxLines: 1000 9 | -------------------------------------------------------------------------------- /actions/reboot/Dockerfile: -------------------------------------------------------------------------------- 1 | # NB YES its overkill to use a container to implement a reboot. 2 | # see https://github.com/tinkerbell/tink/issues/71 3 | FROM busybox 4 | 5 | ENTRYPOINT ["touch", "/worker/reboot"] 6 | -------------------------------------------------------------------------------- /actions/provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | cd /vagrant/actions 4 | 5 | bash reboot/provision.sh 6 | bash reset-uefi-boot/provision.sh 7 | bash clonezilla-restoredisk/provision.sh 8 | bash flatcar-install/provision.sh 9 | -------------------------------------------------------------------------------- /meshcommander/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14-alpine3.14 2 | RUN npm install meshcommander@0.9.1-a 3 | EXPOSE 4000 4 | # TODO use tini or s6 init to start meshcommander. 5 | CMD ["/node_modules/.bin/meshcommander", "--any", "--port", "4000"] 6 | -------------------------------------------------------------------------------- /actions/reset-uefi-boot/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y \ 5 | efibootmgr && \ 6 | rm -rf /var/lib/apt/lists/* 7 | 8 | COPY --chmod=755 entrypoint.sh / 9 | 10 | ENTRYPOINT ["/entrypoint.sh"] -------------------------------------------------------------------------------- /workers/provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | cd /vagrant/workers 4 | 5 | bash provision-virtual.sh 6 | bash provision-rpis.sh 7 | bash provision-nuc.sh 8 | bash provision-desktop-mini.sh 9 | bash provision-odyssey.sh 10 | 11 | source /vagrant/tink-helpers.source.sh 12 | tink hardware get 13 | -------------------------------------------------------------------------------- /provision-meshcommander.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | cd /vagrant/meshcommander 4 | 5 | docker build -t meshcommander . 6 | 7 | docker run \ 8 | -d \ 9 | --restart unless-stopped \ 10 | --name meshcommander \ 11 | -p 4000:4000 \ 12 | -e NODE_ENV=production \ 13 | meshcommander 14 | -------------------------------------------------------------------------------- /actions/clonezilla-restoredisk/provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | # create the action image. 8 | docker build -t $TINKERBELL_HOST_IP/clonezilla-restoredisk . 9 | docker push $TINKERBELL_HOST_IP/clonezilla-restoredisk 10 | -------------------------------------------------------------------------------- /provision-docker-hub-auth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | install -m 700 -d ~/.docker 5 | install -m 600 /dev/null ~/.docker/config.json 6 | cat >~/.docker/config.json </etc/profile.d/go.sh <<'EOF' 20 | #[[ "$-" != *i* ]] && return 21 | export PATH="$PATH:/usr/local/go/bin" 22 | export PATH="$PATH:$HOME/go/bin" 23 | EOF 24 | -------------------------------------------------------------------------------- /templates/hello-world/provision-workflow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | hardware_hostname="$1" 8 | 9 | # find the hardware with the given hostname. 10 | hardware_mac="$(get-hardware-mac "$hardware_hostname")" 11 | 12 | # find the template id. 13 | template_id="$(tink template get --format json | jq -r '.data[] | select(.name=="hello-world") | .id')" 14 | 15 | # delete the workflows associated with the hardware. 16 | delete-hardware-workflows "$hardware_hostname" 17 | 18 | # create the workflow. 19 | hardware="$(jo device_1=$hardware_mac)" 20 | workflow_output="$(tink workflow create --template "$template_id" --hardware "$hardware")" 21 | workflow_id="$(echo "$workflow_output" | perl -n -e '/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/ && print $1')" 22 | tink workflow get "$workflow_id" 23 | -------------------------------------------------------------------------------- /workers/hardware-odyssey.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "$worker_id", 3 | "metadata": { 4 | "facility": { 5 | "facility_code": "onprem" 6 | }, 7 | "instance": { 8 | "hostname": "$worker_name" 9 | }, 10 | "state": "" 11 | }, 12 | "network": { 13 | "interfaces": [ 14 | { 15 | "dhcp": { 16 | "arch": "x86_64", 17 | "uefi": true, 18 | "mac": "$worker_mac_address", 19 | "ip": { 20 | "address": "$worker_ip_address", 21 | "netmask": "255.255.255.0", 22 | "gateway": "$provisioner_ip_address" 23 | }, 24 | "lease_time": 86400, 25 | "name_servers": ["1.1.1.1", "1.0.0.1"], 26 | "hostname": "$worker_name", 27 | "iface_name": "eth0" 28 | }, 29 | "netboot": { 30 | "allow_pxe": true, 31 | "allow_workflow": true 32 | } 33 | } 34 | ] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /workers/hardware-desktop-mini.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "$worker_id", 3 | "metadata": { 4 | "facility": { 5 | "facility_code": "onprem" 6 | }, 7 | "instance": { 8 | "hostname": "$worker_name" 9 | }, 10 | "state": "" 11 | }, 12 | "network": { 13 | "interfaces": [ 14 | { 15 | "dhcp": { 16 | "arch": "x86_64", 17 | "uefi": true, 18 | "mac": "$worker_mac_address", 19 | "ip": { 20 | "address": "$worker_ip_address", 21 | "netmask": "255.255.255.0", 22 | "gateway": "$provisioner_ip_address" 23 | }, 24 | "lease_time": 86400, 25 | "name_servers": ["1.1.1.1", "1.0.0.1"], 26 | "hostname": "$worker_name", 27 | "iface_name": "eth0" 28 | }, 29 | "netboot": { 30 | "allow_pxe": true, 31 | "allow_workflow": true 32 | } 33 | } 34 | ] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /workers/hardware-nuc.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "$worker_id", 3 | "metadata": { 4 | "facility": { 5 | "facility_code": "onprem" 6 | }, 7 | "instance": { 8 | "hostname": "$worker_name" 9 | }, 10 | "state": "" 11 | }, 12 | "network": { 13 | "interfaces": [ 14 | { 15 | "dhcp": { 16 | "arch": "x86_64", 17 | "uefi": $worker_efi_boot, 18 | "mac": "$worker_mac_address", 19 | "ip": { 20 | "address": "$worker_ip_address", 21 | "netmask": "255.255.255.0", 22 | "gateway": "$provisioner_ip_address" 23 | }, 24 | "lease_time": 86400, 25 | "name_servers": ["1.1.1.1", "1.0.0.1"], 26 | "hostname": "$worker_name", 27 | "iface_name": "eth0" 28 | }, 29 | "netboot": { 30 | "allow_pxe": true, 31 | "allow_workflow": true 32 | } 33 | } 34 | ] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /provision-grafana.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | loki_ip_address="${1:-10.3.0.2}" 5 | 6 | # see https://github.com/grafana/grafana/releases 7 | # see https://hub.docker.com/r/grafana/grafana/tags 8 | grafana_version="8.3.1" 9 | 10 | mkdir -p grafana/datasources 11 | cd grafana 12 | 13 | # configure grafana. 14 | # see https://grafana.com/docs/grafana/latest/administration/configure-docker/ 15 | # see https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources 16 | # see https://grafana.com/docs/grafana/latest/datasources/loki/#configure-the-data-source-with-provisioning 17 | sed -E "s,@@loki_base_url@@,http://$loki_ip_address:3100,g" /vagrant/grafana-datasources.yml \ 18 | >datasources/datasources.yml 19 | 20 | # start grafana. 21 | # see https://grafana.com/docs/grafana/latest/installation/docker/ 22 | docker run \ 23 | -d \ 24 | --restart unless-stopped \ 25 | --name grafana \ 26 | -p 3000:3000 \ 27 | -v $PWD/datasources:/etc/grafana/provisioning/datasources \ 28 | grafana/grafana:$grafana_version 29 | -------------------------------------------------------------------------------- /provision-nfs-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | ip_address="$(ip add show eth1 | perl -n -e '/^\s+inet (\d+(\.\d+)+)/ && print $1')" 6 | 7 | # 8 | # provision the NFS server. 9 | # see exports(5). 10 | 11 | apt-get install -y nfs-kernel-server 12 | 13 | # configure the images export. 14 | install -d -o nobody -g nogroup -m 700 /var/nfs/images 15 | chmod 755 /root # NB our images come from a sub-directry in this tree. 16 | install -d $TINKERBELL_STATE_WEBROOT_PATH/images 17 | cat >>/etc/fstab </etc/exports.d/images.exports </dev/null 30 | authentication_token="$(http \ 31 | --ignore-stdin \ 32 | POST \ 33 | localhost:9000/api/auth \ 34 | Username=admin \ 35 | Password=abracadabra \ 36 | | jq -r .jwt)" 37 | http \ 38 | --ignore-stdin \ 39 | --form \ 40 | POST \ 41 | localhost:9000/api/endpoints \ 42 | "Authorization: Bearer $authentication_token" \ 43 | Name=docker \ 44 | EndpointCreationType=1 \ 45 | >/dev/null 46 | -------------------------------------------------------------------------------- /templates/debian/provision-workflow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | hardware_hostname="$1" 8 | boot_device="${2:-/dev/sda}" 9 | img_url="http://$TINKERBELL_HOST_IP:8080/images/debian-amd64/" 10 | img_url="nfs://$TINKERBELL_HOST_IP/var/nfs/images/debian-amd64" 11 | 12 | # create the image. 13 | # NB this image can created from https://github.com/rgl/debian-vagrant. 14 | install-vagrant-box-clonezilla-image debian-11-uefi-amd64 debian-amd64 15 | 16 | # find the hardware with the given hostname. 17 | hardware_mac="$(get-hardware-mac "$hardware_hostname")" 18 | 19 | # find the template id. 20 | template_id="$(tink template get --format json | jq -r '.data[] | select(.name=="debian") | .id')" 21 | 22 | # delete the workflows associated with the hardware. 23 | delete-hardware-workflows "$hardware_hostname" 24 | 25 | # create the workflow associated with the hardware. 26 | hardware="$(jo device_1="$hardware_mac" img_url="$img_url" boot_device="$boot_device")" 27 | workflow_output="$(tink workflow create --template "$template_id" --hardware "$hardware")" 28 | workflow_id="$(echo "$workflow_output" | perl -n -e '/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/ && print $1')" 29 | tink workflow get "$workflow_id" 30 | -------------------------------------------------------------------------------- /templates/ubuntu/provision-workflow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | hardware_hostname="$1" 8 | boot_device="${2:-/dev/sda}" 9 | #img_url="http://$TINKERBELL_HOST_IP:8080/images/ubuntu-amd64.raw.gz" 10 | img_url="nfs://$TINKERBELL_HOST_IP/var/nfs/images/ubuntu-amd64" 11 | 12 | # create the image. 13 | # NB this image can created from https://github.com/rgl/ubuntu-vagrant. 14 | install-vagrant-box-clonezilla-image ubuntu-20.04-uefi-amd64 ubuntu-amd64 15 | 16 | # find the hardware with the given hostname. 17 | hardware_mac="$(get-hardware-mac "$hardware_hostname")" 18 | 19 | # find the template id. 20 | template_id="$(tink template get --format json | jq -r '.data[] | select(.name=="ubuntu") | .id')" 21 | 22 | # delete the workflows associated with the hardware. 23 | delete-hardware-workflows "$hardware_hostname" 24 | 25 | # create the workflow associated with the hardware. 26 | hardware="$(jo device_1="$hardware_mac" img_url="$img_url" boot_device="$boot_device")" 27 | workflow_output="$(tink workflow create --template "$template_id" --hardware "$hardware")" 28 | workflow_id="$(echo "$workflow_output" | perl -n -e '/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/ && print $1')" 29 | tink workflow get "$workflow_id" 30 | -------------------------------------------------------------------------------- /templates/windows-2022/provision-workflow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | hardware_hostname="$1" 8 | boot_device="${2:-/dev/sda}" 9 | img_url="http://$TINKERBELL_HOST_IP:8080/images/windows-2022-amd64/" 10 | img_url="nfs://$TINKERBELL_HOST_IP/var/nfs/images/windows-2022-amd64" 11 | 12 | # create the image. 13 | # NB this image can created from https://github.com/rgl/windows-vagrant. 14 | install-vagrant-box-clonezilla-image windows-2022-uefi-amd64 windows-2022-amd64 15 | 16 | # find the hardware with the given hostname. 17 | hardware_mac="$(get-hardware-mac "$hardware_hostname")" 18 | 19 | # find the template id. 20 | template_id="$(tink template get --format json | jq -r '.data[] | select(.name=="windows-2022") | .id')" 21 | 22 | # delete the workflows associated with the hardware. 23 | delete-hardware-workflows "$hardware_hostname" 24 | 25 | # create the workflow associated with the hardware. 26 | hardware="$(jo device_1="$hardware_mac" img_url="$img_url" boot_device="$boot_device")" 27 | workflow_output="$(tink workflow create --template "$template_id" --hardware "$hardware")" 28 | workflow_id="$(echo "$workflow_output" | perl -n -e '/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/ && print $1')" 29 | tink workflow get "$workflow_id" 30 | -------------------------------------------------------------------------------- /templates/proxmox-ve/provision-workflow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | hardware_hostname="$1" 8 | boot_device="${2:-/dev/sda}" 9 | img_url="http://$TINKERBELL_HOST_IP:8080/images/proxmox-ve-amd64.raw.gz" 10 | #img_url="nfs://$TINKERBELL_HOST_IP/images/proxmox-ve-amd64/" 11 | 12 | # create the image. 13 | # NB this image can created from https://github.com/rgl/proxmox-ve. 14 | install-vagrant-box-raw-image proxmox-ve-uefi-amd64 proxmox-ve-amd64 15 | #install-vagrant-box-clonezilla-image proxmox-ve-uefi-amd64 proxmox-ve-amd64 16 | 17 | # find the hardware with the given hostname. 18 | hardware_mac="$(get-hardware-mac "$hardware_hostname")" 19 | 20 | # find the template id. 21 | template_id="$(tink template get --format json | jq -r '.data[] | select(.name=="proxmox-ve") | .id')" 22 | 23 | # delete the workflows associated with the hardware. 24 | delete-hardware-workflows "$hardware_hostname" 25 | 26 | # create the workflow associated with the hardware. 27 | hardware="$(jo device_1="$hardware_mac" img_url="$img_url" boot_device="$boot_device")" 28 | workflow_output="$(tink workflow create --template "$template_id" --hardware "$hardware")" 29 | workflow_id="$(echo "$workflow_output" | perl -n -e '/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/ && print $1')" 30 | tink workflow get "$workflow_id" 31 | -------------------------------------------------------------------------------- /provision-tinkerbell-tink.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | # checkout the tinkerbell tink repository. 6 | if [ ! -d ~/tinkerbell-tink ]; then 7 | cd ~ 8 | git clone --no-checkout https://github.com/tinkerbell/tink.git tinkerbell-tink 9 | cd tinkerbell-tink 10 | git checkout -f b72ab0bd24a47ef3c42d682f9ab80e3825655a82 # 2021-12-03T22:35:03Z 11 | fi 12 | 13 | # build tink-worker. 14 | # see https://github.com/tinkerbell/tink/pull/549 tink-worker: do not attach the action container stdout/stderr to the current process when --capture-action-logs=false 15 | # see https://github.com/tinkerbell/tink/pull/552 Upgrade to PostgreSQL 14 16 | cd ~/tinkerbell-tink 17 | go install golang.org/x/tools/cmd/goimports 18 | go install golang.org/x/tools/cmd/stringer 19 | rm -rf bin 20 | ln -sf "$(go env GOPATH)/bin" bin 21 | make cmd/tink-worker/tink-worker-linux-amd64 cmd/tink-worker/tink-worker-linux-arm64 22 | docker buildx build \ 23 | --tag $TINKERBELL_HOST_IP/tink-worker \ 24 | --output type=registry \ 25 | --platform linux/amd64,linux/arm64 \ 26 | --progress plain \ 27 | cmd/tink-worker 28 | docker manifest inspect $TINKERBELL_HOST_IP/tink-worker 29 | 30 | # ensure tink-worker will not be re-installed by compose. 31 | sed -i -E 's,^(quay\.io/tinkerbell/tink-worker:.+),#\1,g' ~/tinkerbell-sandbox/deploy/compose/registry/registry_images.txt 32 | -------------------------------------------------------------------------------- /provision-loki.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | # see https://github.com/grafana/loki/releases 5 | # see https://hub.docker.com/r/grafana/loki/tags 6 | loki_version="2.4.1" 7 | 8 | # destroy the existing loki container and data. 9 | docker rm --force loki && rm -rf ~/loki && mkdir ~/loki 10 | 11 | cd ~/loki 12 | 13 | cp /vagrant/loki-config.yml . 14 | 15 | # see https://grafana.com/docs/loki/latest/installation/docker/ 16 | # see https://grafana.com/docs/loki/latest/configuration/examples/#complete-local-config 17 | # see https://hub.docker.com/r/grafana/loki 18 | # see https://github.com/grafana/loki 19 | docker run \ 20 | -d \ 21 | --restart unless-stopped \ 22 | --name loki \ 23 | -p 3100:3100 \ 24 | -v "$PWD:/etc/loki" \ 25 | grafana/loki:$loki_version \ 26 | -config.file=/etc/loki/loki-config.yml 27 | 28 | # wait for loki to be ready. 29 | # see https://grafana.com/docs/loki/latest/api/ 30 | bash -euc 'while [ "$(wget -qO- http://localhost:3100/ready)" != "ready" ]; do sleep 5; done' 31 | #wget -qO- http://localhost:3100/metrics 32 | #wget -qO- http://localhost:3100/config | yq eval - 33 | 34 | # install logcli. 35 | # see https://grafana.com/docs/loki/latest/getting-started/logcli/ 36 | wget -q https://github.com/grafana/loki/releases/download/v$loki_version/logcli-linux-amd64.zip 37 | unzip logcli-linux-amd64.zip 38 | install -m 755 logcli-linux-amd64 /usr/local/bin/logcli 39 | rm logcli-linux-amd64* 40 | -------------------------------------------------------------------------------- /templates/flatcar-linux/provision-workflow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | cd "$(dirname "${BASH_SOURCE[0]}")" 5 | 6 | hardware_hostname="$1" 7 | boot_device="${2:-/dev/sda}" 8 | provisioner_ip_address="$TINKERBELL_HOST_IP" 9 | 10 | # install the flatcar linux configuration to ignition file transpiler. 11 | if [ ! -f tmp/ct ]; then 12 | mkdir -p tmp 13 | wget -qO tmp/ct.tmp https://github.com/coreos/container-linux-config-transpiler/releases/download/v0.9.0/ct-v0.9.0-x86_64-unknown-linux-gnu 14 | chmod +x tmp/ct.tmp 15 | mv tmp/ct{.tmp,} 16 | fi 17 | 18 | # find the hardware with the given hostname. 19 | hardware_mac="$(get-hardware-mac "$hardware_hostname")" 20 | 21 | # find the template id. 22 | template_id="$(tink template get --format json | jq -r '.data[] | select(.name=="flatcar-linux") | .id')" 23 | 24 | # delete the workflows associated with the hardware. 25 | delete-hardware-workflows "$hardware_hostname" 26 | 27 | # create the workflow associated with the hardware. 28 | ignition="$(cat flatcar-linux-config.yml | CORE_SSH_PUBLIC_KEY="$(cat /vagrant/tmp/id_rsa.pub)" DOLLAR='$' envsubst | ./tmp/ct | base64 -w0)" 29 | hardware="$(jo device_1="$hardware_mac" ignition="$ignition" boot_device="$boot_device")" 30 | workflow_output="$(tink workflow create --template "$template_id" --hardware "$hardware")" 31 | workflow_id="$(echo "$workflow_output" | perl -n -e '/([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/ && print $1')" 32 | tink workflow get "$workflow_id" 33 | -------------------------------------------------------------------------------- /templates/flatcar-linux/workflow-template.yml: -------------------------------------------------------------------------------- 1 | version: "0.1" 2 | name: flatcar-linux 3 | global_timeout: 1800 4 | tasks: 5 | - name: flatcar-install 6 | worker: '{{.device_1}}' 7 | volumes: 8 | - /dev:/dev 9 | - /sys/firmware/efi/efivars:/sys/firmware/efi/efivars 10 | - /worker:/worker 11 | actions: 12 | - name: reset-uefi-boot 13 | image: reset-uefi-boot 14 | - name: create-ignition 15 | image: flatcar-install 16 | command: 17 | - sh 18 | - -c 19 | - echo '{{.ignition}}' | base64 -d >/workflow/ignition.json 20 | - name: install-os 21 | image: flatcar-install 22 | command: 23 | # NB flatcar-linux partitions the disk as (partx --show /dev/sda): 24 | # sda1: EFI-SYSTEM 25 | # sda2: BIOS-BOOT 26 | # sda3: USR-A 27 | # sda4: USR-B 28 | # sda6: OEM (ignition is stored here as config.ign) 29 | # sda7: OEM-CONFIG 30 | # sda9: ROOT 31 | # see https://docs.flatcar-linux.org/os/installing-to-disk/ 32 | # see https://github.com/flatcar-linux/docs/blob/master/os/installing-to-disk.md 33 | # see https://github.com/flatcar-linux/init/blob/flatcar-master/bin/flatcar-install 34 | - /usr/local/bin/flatcar-install 35 | - -v 36 | - -d 37 | - '{{.boot_device}}' 38 | - -C 39 | - stable 40 | - -i 41 | - /workflow/ignition.json 42 | - name: reboot 43 | image: reboot 44 | -------------------------------------------------------------------------------- /actions/clonezilla-restoredisk/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | function die { 5 | echo -- "ERROR: $@" 6 | exit 1 7 | } 8 | 9 | if [ ! -v IMG_URL ]; then 10 | die 'The IMG_URL environment variable is not defined' 11 | fi 12 | 13 | if [ ! -v DEST_DEVICE ]; then 14 | die 'The DEST_DEVICE environment variable is not defined' 15 | fi 16 | 17 | CLONEZILLA_OCSROOT=/ocs 18 | CLONEZILLA_IMAGE_NAME="$(basename "$IMG_URL")" 19 | CLONEZILLA_IMAGE_MOUNT_POINT="$CLONEZILLA_OCSROOT/$CLONEZILLA_IMAGE_NAME" 20 | 21 | # mount the image. 22 | install -d $CLONEZILLA_IMAGE_MOUNT_POINT 23 | case "$IMG_URL" in 24 | http:*) 25 | # NB clonezilla over httpdirfs is very slow. 26 | httpdirfs -o ro $IMG_URL $CLONEZILLA_IMAGE_MOUNT_POINT 27 | ;; 28 | nfs:*) 29 | mount \ 30 | -t nfs4 \ 31 | -o ro,noatime,nolock \ 32 | "$(echo "$IMG_URL" | perl -n -e '/^nfs:\/\/(.+?)(\/.+)/ && print "$1:$2"')" \ 33 | $CLONEZILLA_IMAGE_MOUNT_POINT 34 | ;; 35 | *) 36 | die 'Unsupported IMG_URL scheme' 37 | ;; 38 | esac 39 | find $CLONEZILLA_IMAGE_MOUNT_POINT 40 | 41 | # show the mounts. 42 | mount 43 | 44 | # restore the image. 45 | # NB we do not use --check-sha1sum because its too time consuming. 46 | # TODO trim the free space. 47 | ocs-sr \ 48 | --batch \ 49 | --nogui \ 50 | --ocsroot $CLONEZILLA_OCSROOT \ 51 | --skip-check-restorable-r \ 52 | restoredisk \ 53 | $CLONEZILLA_IMAGE_NAME \ 54 | $(basename $DEST_DEVICE) 55 | 56 | # umount the image. 57 | umount $CLONEZILLA_IMAGE_MOUNT_POINT 58 | -------------------------------------------------------------------------------- /actions/flatcar-install/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | # install dependencies and parted for troubleshooting purposes. 4 | # see toolset at https://github.com/flatcar-linux/init/blob/flatcar-master/bin/flatcar-install#L20 5 | RUN apt-get update && \ 6 | apt-get install -y \ 7 | btrfs-progs \ 8 | gawk \ 9 | gpg \ 10 | udev \ 11 | wget \ 12 | parted && \ 13 | rm -rf /var/lib/apt/lists/* 14 | 15 | # TODO have everything inside this container and do not go to the internet when installing flatcar? 16 | # NOPE instead we should mirror everything because the container will be 17 | # downloaded to the memory of the worker, which might not be enough. instead, 18 | # it should be streamed from the nginx server (like osie) to the disk. 19 | # flatcar-install -f file should do the trick to install. 20 | # where file is downloaded from https://${CHANNEL_ID}.release.flatcar-linux.net/${BOARD} 21 | # e.g. https://stable.release.flatcar-linux.net/amd64-usr/current/version.txt 22 | # e.g. https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_image.bin.bz2 23 | # TODO https://github.com/flatcar-linux/init/issues/20 24 | # see https://docs.flatcar-linux.org/os/installing-to-disk/ 25 | # see https://github.com/flatcar-linux/docs/blob/master/os/installing-to-disk.md 26 | # see https://github.com/flatcar-linux/init/blob/flatcar-master/bin/flatcar-install 27 | RUN wget \ 28 | -qO /usr/local/bin/flatcar-install \ 29 | https://raw.githubusercontent.com/flatcar-linux/init/flatcar-master/bin/flatcar-install && \ 30 | chmod +x /usr/local/bin/flatcar-install 31 | -------------------------------------------------------------------------------- /actions/reset-uefi-boot/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | # show the current mounts. 5 | mount 6 | 7 | # show the current uefi boot status (before modifying it). 8 | efibootmgr -v 9 | 10 | # delete all the boot options. 11 | # NB if we do not set any boot option, the firmware will recover/discover them 12 | # at the next boot. unfortunately, in my test HP EliteDesk 800 35W G2 13 | # Desktop Mini, this requires an extra reboot, which messes with the 14 | # ethernet speed by switching it to 10 Mbps. so, we also execute grub to 15 | # install the boot option. 16 | efibootmgr \ 17 | | perl -n -e '/^Boot([0-9A-F]{4})/ && print "$1\n"' \ 18 | | xargs -I% efibootmgr --quiet --delete-bootnum --bootnum % 19 | 20 | # install grub (using the target os grub-install binary). 21 | if [ -v BOOT_DEVICE ] && [ -v UEFI_DEVICE ] && [ -v ROOT_DEVICE ]; then 22 | # mount the root and uefi devices. 23 | target='/mnt/target' 24 | mkdir -p $target 25 | mount $ROOT_DEVICE $target 26 | mount $UEFI_DEVICE $target/boot/efi 27 | 28 | # bind mount the required mount points. 29 | required_mount_points=(dev proc sys sys/firmware/efi/efivars) 30 | for p in ${required_mount_points[@]}; do 31 | mount --bind "/$p" "$target/$p" 32 | done 33 | 34 | # install grub. 35 | chroot $target /usr/sbin/grub-install $BOOT_DEVICE 36 | 37 | # umount the required mount points (in reverse order). 38 | for (( i=${#required_mount_points[@]}-1; i>=0; i-- )); do 39 | p="${required_mount_points[i]}" 40 | umount "$target/$p" 41 | done 42 | 43 | # umount the root and uefi devices (in reverse order). 44 | umount $target/boot/efi 45 | umount $target 46 | fi 47 | 48 | # show the current uefi boot status. 49 | efibootmgr -v 50 | -------------------------------------------------------------------------------- /workers/provision-nuc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | export provisioner_ip_address="$TINKERBELL_HOST_IP" 8 | worker_ip_address_prefix="$(echo $provisioner_ip_address | cut -d "." -f 1).$(echo $provisioner_ip_address | cut -d "." -f 2).$(echo $provisioner_ip_address | cut -d "." -f 3)" 9 | 10 | # create the hardware information about our workers. 11 | # see https://docs.tinkerbell.org/hardware-data/ 12 | # see Hardware type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models.go#L54-L75 13 | # see DiscoveryTinkerbellV1 type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models_tinkerbell.go#L16-L20 14 | # see HardwareTinkerbellV1 type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models_tinkerbell.go#L22-L27 15 | # see Arch at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/dhcp/pxe.go#L61-L71 16 | workers=( 17 | "c0:3f:d5:6c:b7:5a $worker_ip_address_prefix.13 nuc true" # my nuc physical machine. 18 | ) 19 | for worker in "${workers[@]}"; do 20 | export worker_mac_address="$(echo "$worker" | awk '{print $1}')" 21 | export worker_ip_address="$(echo "$worker" | awk '{print $2}')" 22 | export worker_name="$(echo "$worker" | awk '{print $3}')" 23 | export worker_efi_boot="$(echo "$worker" | awk '{print $4}')" 24 | export worker_id="00000000-0000-4000-8000-$(echo -n "$worker_mac_address" | tr -d :)" 25 | 26 | # create the hardware. 27 | cat hardware-nuc.json | DOLLAR='$' envsubst | tink hardware push 28 | 29 | # show the hardware. 30 | tink hardware mac "$worker_mac_address" 31 | 32 | # create the workflow. 33 | bash ../templates/flatcar-linux/provision-workflow.sh "$worker_name" 34 | done 35 | -------------------------------------------------------------------------------- /provision-debian-osie.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | # install dependencies. 6 | apt-get install -y p7zip-full 7 | 8 | # replace the current osie. 9 | osie_current="$TINKERBELL_STATE_WEBROOT_PATH/misc/osie/current" 10 | if [ ! -d "$osie_current.orig" ]; then 11 | mv "$osie_current" "$osie_current.orig" 12 | fi 13 | rm -rf "$osie_current" 14 | mkdir -p "$osie_current" 15 | pushd "$osie_current" 16 | while read arch parch; do 17 | if [ -f /vagrant/tmp/tinkerbell-debian-osie-$arch.iso ]; then 18 | 7z x /vagrant/tmp/tinkerbell-debian-osie-$arch.iso live/ 19 | mv live/vmlinuz-*-$arch vmlinuz-$parch 20 | mv live/initrd.img-*-$arch initramfs-$parch 21 | mv live/filesystem.squashfs filesystem-$parch.squashfs 22 | rm -rf live 23 | ln -sf config.sh config-$parch.sh 24 | fi 25 | done <<'EOF' 26 | amd64 x86_64 27 | arm64 aarch64 28 | EOF 29 | # create the live-config hook script that we can use to config the osie. 30 | cat >config.sh <<'SCRIPT_EOF' 31 | #!/bin/bash 32 | set -euo pipefail 33 | 34 | # NB you can see this script logs with journalctl -u live-config. 35 | # NB this file is executed synchronously by the live-config service 36 | # (/lib/systemd/system/live-config.service) from the 37 | # /lib/live/config/9990-hooks hook script. 38 | # NB the systemd basic.target is only executed after this script 39 | # finishes (live-config.service has the WantedBy=basic.target 40 | # setting). 41 | # NB normal services like containerd/dockerd/sshd are only be started 42 | # after this script finishes. 43 | 44 | function get-param { 45 | cat /proc/cmdline | tr ' ' '\n' | grep "^$1=" | sed -E 's,.+=(.*),\1,g' 46 | } 47 | 48 | #systemctl disable --now tink-worker 49 | SCRIPT_EOF 50 | popd 51 | pushd ~/tinkerbell-sandbox/deploy/compose 52 | yq eval --inplace 'del(.services.osie-work)' docker-compose.yml 53 | popd 54 | -------------------------------------------------------------------------------- /provision-debian-boots.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | # checkout the tinkerbell boots repository. 6 | if [ ! -d ~/tinkerbell-boots ]; then 7 | cd ~ 8 | git clone --no-checkout https://github.com/rgl/tinkerbell-boots.git tinkerbell-boots 9 | cd tinkerbell-boots 10 | git checkout -f rgl-debian-osie 11 | fi 12 | 13 | # install dependencies. 14 | apt-get install -y gcc-aarch64-linux-gnu build-essential liblzma-dev 15 | 16 | # build. 17 | cd ~/tinkerbell-boots 18 | go install golang.org/x/tools/cmd/goimports 19 | go install golang.org/x/tools/cmd/stringer 20 | go install github.com/golang/mock/mockgen 21 | rm -rf bin 22 | ln -sf "$(go env GOPATH)/bin" bin 23 | if [ ! -f ipxe/ipxe/ipxe.efi ]; then # do not rebuild when its already there. 24 | if [ ! -d /vagrant/tmp/ipxe ]; then # do not rebuild when its cached in the host. 25 | make ipxe 26 | rm -rf /vagrant/tmp/ipxe 27 | mkdir -p /vagrant/tmp/ipxe 28 | cp ipxe/ipxe/*.{efi,kpxe} /vagrant/tmp/ipxe 29 | else 30 | cp /vagrant/tmp/ipxe/*.{efi,kpxe} ipxe/ipxe/ 31 | fi 32 | fi 33 | sed -i -E 's,^(cmd/boots/boots: .+) ipxe (.+),\1 \2,g' rules.mk # remove the ipxe dep (we've built or copied it over in the previous step and do not want to build it again). 34 | make cmd/boots/boots-linux-amd64 cmd/boots/boots-linux-arm64 35 | docker buildx build \ 36 | --tag $TINKERBELL_HOST_IP/debian-boots \ 37 | --output type=registry \ 38 | --platform linux/amd64,linux/arm64 \ 39 | --progress plain \ 40 | . 41 | docker manifest inspect $TINKERBELL_HOST_IP/debian-boots 42 | 43 | # install and restart tinkerbell. 44 | cd ~/tinkerbell-sandbox/deploy/compose 45 | sed -i -E "s,(BOOTS_SERVER_IMAGE)=.*,\\1=$TINKERBELL_HOST_IP/debian-boots,g" .env 46 | source .env 47 | docker compose rm --stop --force boots 48 | docker rmi --force $TINKERBELL_HOST_IP/debian-boots 49 | docker compose up --quiet-pull --detach 50 | -------------------------------------------------------------------------------- /workers/provision-odyssey.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | export provisioner_ip_address="$TINKERBELL_HOST_IP" 8 | worker_ip_address_prefix="$(echo $provisioner_ip_address | cut -d "." -f 1).$(echo $provisioner_ip_address | cut -d "." -f 2).$(echo $provisioner_ip_address | cut -d "." -f 3)" 9 | 10 | # create the hardware information about our workers. 11 | # see https://docs.tinkerbell.org/hardware-data/ 12 | # see Hardware type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models.go#L54-L75 13 | # see DiscoveryTinkerbellV1 type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models_tinkerbell.go#L16-L20 14 | # see HardwareTinkerbellV1 type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models_tinkerbell.go#L22-L27 15 | # see Arch at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/dhcp/pxe.go#L61-L71 16 | # NB remotely manage this with meshcommander that is running at the provisioner port 4000 and connect to this machine AMT at http://10.3.0.14:16992. 17 | workers=( 18 | # Seeed Studio Odyssey X86J4105 board https://wiki.seeedstudio.com/ODYSSEY-X86J4105/ 19 | # see https://github.com/rgl/seeedstudio-odyssey-x86j4105-notes 20 | "00:e0:4c:01:93:a8 $worker_ip_address_prefix.15 odyssey" 21 | ) 22 | for worker in "${workers[@]}"; do 23 | export worker_mac_address="$(echo "$worker" | awk '{print $1}')" 24 | export worker_ip_address="$(echo "$worker" | awk '{print $2}')" 25 | export worker_name="$(echo "$worker" | awk '{print $3}')" 26 | export worker_id="00000000-0000-4000-8000-$(echo -n "$worker_mac_address" | tr -d :)" 27 | 28 | # create the hardware. 29 | cat hardware-odyssey.json | DOLLAR='$' envsubst | tink hardware push 30 | 31 | # show the hardware. 32 | tink hardware mac "$worker_mac_address" 33 | 34 | # create the workflow. 35 | bash ../templates/hello-world/provision-workflow.sh "$worker_name" 36 | done 37 | -------------------------------------------------------------------------------- /workers/provision-desktop-mini.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /vagrant/tink-helpers.source.sh 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | export provisioner_ip_address="$TINKERBELL_HOST_IP" 8 | worker_ip_address_prefix="$(echo $provisioner_ip_address | cut -d "." -f 1).$(echo $provisioner_ip_address | cut -d "." -f 2).$(echo $provisioner_ip_address | cut -d "." -f 3)" 9 | 10 | # create the hardware information about our workers. 11 | # see https://docs.tinkerbell.org/hardware-data/ 12 | # see Hardware type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models.go#L54-L75 13 | # see DiscoveryTinkerbellV1 type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models_tinkerbell.go#L16-L20 14 | # see HardwareTinkerbellV1 type at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/packet/models_tinkerbell.go#L22-L27 15 | # see Arch at https://github.com/tinkerbell/boots/blob/9b8953aca0dc54be52f49a2062b0b2dbc6356283/dhcp/pxe.go#L61-L71 16 | # NB remotely manage this with meshcommander that is running at the provisioner port 4000 and connect to this machine AMT at http://10.3.0.14:16992. 17 | workers=( 18 | # HP EliteDesk 800 35W G2 Desktop Mini https://support.hp.com/us-en/product/hp-elitedesk-800-35w-g2-desktop-mini-pc/7633266 19 | # see https://github.com/rgl/intel-amt-notes 20 | "ec:b1:d7:71:ff:f3 $worker_ip_address_prefix.14 dm" 21 | ) 22 | for worker in "${workers[@]}"; do 23 | export worker_mac_address="$(echo "$worker" | awk '{print $1}')" 24 | export worker_ip_address="$(echo "$worker" | awk '{print $2}')" 25 | export worker_name="$(echo "$worker" | awk '{print $3}')" 26 | export worker_id="00000000-0000-4000-8000-$(echo -n "$worker_mac_address" | tr -d :)" 27 | 28 | # create the hardware. 29 | cat hardware-desktop-mini.json | DOLLAR='$' envsubst | tink hardware push 30 | 31 | # show the hardware. 32 | tink hardware mac "$worker_mac_address" 33 | 34 | # create the workflow. 35 | bash ../templates/hello-world/provision-workflow.sh "$worker_name" 36 | done 37 | -------------------------------------------------------------------------------- /provision-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | docker_version='20.10.11' 5 | 6 | # prevent apt-get et al from asking questions. 7 | # NB even with this, you'll still get some warnings that you can ignore: 8 | # dpkg-preconfigure: unable to re-open stdin: No such file or directory 9 | export DEBIAN_FRONTEND=noninteractive 10 | 11 | # make sure the package index cache is up-to-date before installing anything. 12 | apt-get update 13 | 14 | # install docker. 15 | # see https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#install-using-the-repository 16 | apt-get install -y apt-transport-https software-properties-common 17 | wget -qO- https://download.docker.com/linux/ubuntu/gpg | apt-key add - 18 | add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 19 | apt-get update 20 | apt-cache madison docker-ce 21 | docker_version="$(apt-cache madison docker-ce | awk "/$docker_version~/{print \$3}")" 22 | apt-get install -y "docker-ce=$docker_version" "docker-ce-cli=$docker_version" containerd.io 23 | 24 | # configure it. 25 | systemctl stop docker 26 | cat >/etc/docker/daemon.json <<'EOF' 27 | { 28 | "experimental": false, 29 | "debug": false, 30 | "features": { 31 | "buildkit": true 32 | }, 33 | "log-driver": "journald", 34 | "labels": [ 35 | "os=linux" 36 | ], 37 | "hosts": [ 38 | "fd://" 39 | ] 40 | } 41 | EOF 42 | # start docker without any command line flags as its entirely configured from daemon.json. 43 | install -d /etc/systemd/system/docker.service.d 44 | cat >/etc/systemd/system/docker.service.d/override.conf <<'EOF' 45 | [Service] 46 | ExecStart= 47 | ExecStart=/usr/bin/dockerd 48 | EOF 49 | systemctl daemon-reload 50 | systemctl start docker 51 | 52 | # let the vagrant user manage docker. 53 | usermod -aG docker vagrant 54 | 55 | # kick the tires. 56 | ctr version 57 | docker version 58 | docker info 59 | docker network ls 60 | ip link 61 | bridge link 62 | #docker run --rm hello-world 63 | #docker run --rm alpine ping -c1 8.8.8.8 64 | #docker run --rm debian:10 ping -c1 8.8.8.8 65 | #docker run --rm debian:10-slim cat /etc/os-release 66 | #docker run --rm ubuntu:20.04 cat /etc/os-release 67 | -------------------------------------------------------------------------------- /provision-tinkerbell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # abort this script on errors. 3 | set -euxo pipefail 4 | 5 | provisioner_ip_address="${1:-10.3.0.2}"; shift || true 6 | 7 | # install tinkerbell. 8 | # see https://docs.tinkerbell.org/setup/on-bare-metal-with-docker/ 9 | # see https://github.com/tinkerbell/sandbox 10 | # see https://github.com/rgl/tinkerbell-tink 11 | tinkerbell_repository='https://github.com/tinkerbell/sandbox.git' 12 | tinkerbell_version='1e0157f57aba422c9e919d0865d68763f89bc215' # 2021-12-02T17:47:48Z 13 | cd ~ 14 | git clone --no-checkout $tinkerbell_repository tinkerbell-sandbox 15 | cd tinkerbell-sandbox 16 | git checkout -f $tinkerbell_version 17 | cd deploy/compose 18 | sed -i -E "s,(TINKERBELL_HOST_IP)=.*,\\1=$provisioner_ip_address,g" .env 19 | yq eval --inplace 'del(.services.create-tink-records)' docker-compose.yml 20 | yq eval --inplace 'del(.services.ubuntu-image-setup)' docker-compose.yml 21 | yq eval --inplace 'del(.services.osie-bootloader.depends_on.ubuntu-image-setup)' docker-compose.yml 22 | yq eval --inplace '.services.osie-bootloader.volumes += ["./osie-bootloader/nginx-templates:/etc/nginx/templates:ro"]' docker-compose.yml 23 | install -d osie-bootloader/nginx-templates 24 | # NB autoindex is required by httpdirfs to mount an http filesystem. 25 | cat >osie-bootloader/nginx-templates/default.conf.template <<'EOF' 26 | server { 27 | location / { 28 | root /usr/share/nginx/html; 29 | autoindex on; 30 | } 31 | } 32 | EOF 33 | docker compose pull --quiet 34 | docker compose run tls-gen 35 | docker compose up --quiet-pull --detach registry 36 | 37 | # trust the tinkerbell registry ca. 38 | # NB this is required for docker to push our images to the tinkerbell registry. 39 | # NB we must restart docker for it to pick up the new certificate. 40 | # NB we must restart docker before we start tinkerbell, as we cannot interrupt 41 | # the images-to-local-registry service before it finishes. 42 | source .env 43 | docker compose cp registry:/certs/onprem/ca-crt.pem /usr/local/share/ca-certificates/tinkerbell.crt 44 | update-ca-certificates 45 | systemctl restart docker 46 | docker login $TINKERBELL_HOST_IP --username admin --password-stdin </etc/sudoers.d/env_keep_apt 6 | chmod 440 /etc/sudoers.d/env_keep_apt 7 | export DEBIAN_FRONTEND=noninteractive 8 | 9 | # make sure grub can be installed in the current root disk. 10 | # NB these anwsers were obtained (after installing grub-pc) with: 11 | # 12 | # #sudo debconf-show grub-pc 13 | # sudo apt-get install debconf-utils 14 | # # this way you can see the comments: 15 | # sudo debconf-get-selections 16 | # # this way you can just see the values needed for debconf-set-selections: 17 | # sudo debconf-get-selections | grep -E '^grub-pc.+\s+' | sort 18 | debconf-set-selections </etc/sysctl.d/98-disable-ipv6.conf <<'EOF' 26 | net.ipv6.conf.all.disable_ipv6 = 1 27 | net.ipv6.conf.default.disable_ipv6 = 1 28 | net.ipv6.conf.lo.disable_ipv6 = 1 29 | EOF 30 | systemctl restart procps 31 | sed -i -E 's,(GRUB_CMDLINE_LINUX=.+)",\1 ipv6.disable=1",' /etc/default/grub 32 | update-grub2 33 | 34 | 35 | # upgrade the system. 36 | apt-get update 37 | apt-get dist-upgrade -y 38 | 39 | 40 | # 41 | # install tcpdump for being able to capture network traffic. 42 | 43 | apt-get install -y tcpdump 44 | 45 | 46 | # 47 | # install vim. 48 | 49 | apt-get install -y --no-install-recommends vim 50 | cat >/etc/vim/vimrc.local <<'EOF' 51 | syntax on 52 | set background=dark 53 | set esckeys 54 | set ruler 55 | set laststatus=2 56 | set nobackup 57 | EOF 58 | 59 | 60 | # 61 | # configure the shell. 62 | 63 | cat >/etc/profile.d/login.sh <<'EOF' 64 | [[ "$-" != *i* ]] && return # bail when not running interactively. 65 | export EDITOR=vim 66 | export PAGER=less 67 | alias l='ls -lF --color' 68 | alias ll='l -a' 69 | alias h='history 25' 70 | alias j='jobs -l' 71 | EOF 72 | 73 | cat >/etc/inputrc <<'EOF' 74 | set input-meta on 75 | set output-meta on 76 | set show-all-if-ambiguous on 77 | set completion-ignore-case on 78 | "\e[A": history-search-backward 79 | "\e[B": history-search-forward 80 | "\eOD": backward-word 81 | "\eOC": forward-word 82 | EOF 83 | 84 | cat >~/.bash_aliases <<'EOF' 85 | source /vagrant/tink-helpers.source.sh 86 | EOF 87 | 88 | cat >~/.bash_history <<'EOF' 89 | provision-workflow debian uefi && watch-hardware-workflows uefi 90 | watch-hardware-workflows uefi 91 | etherwake -i eth1 c0:3f:d5:6c:b7:5a 92 | provision-workflow hello-world rpi1 && watch-hardware-workflows rpi1 93 | watch-hardware-workflows rpi1 94 | ssh pi@rpi1.test 95 | ansible -f 10 -b -m command -a 'vcgencmd measure_temp' cluster 96 | source /opt/ansible/bin/activate && cd /home/vagrant/rpi-cluster 97 | EOF 98 | 99 | # configure the vagrant user home. 100 | su vagrant -c bash <<'EOF-VAGRANT' 101 | set -euxo pipefail 102 | 103 | install -d -m 750 ~/.ssh 104 | cat /vagrant/tmp/id_rsa.pub /vagrant/tmp/id_rsa.pub >>~/.ssh/authorized_keys 105 | 106 | cat >~/.bash_history <<'EOF' 107 | ssh pi@rpi1.test 108 | sudo su -l 109 | EOF 110 | EOF-VAGRANT 111 | 112 | 113 | # 114 | # setup NAT. 115 | # see https://help.ubuntu.com/community/IptablesHowTo 116 | 117 | apt-get install -y iptables iptables-persistent 118 | 119 | # enable IPv4 forwarding. 120 | sysctl net.ipv4.ip_forward=1 121 | sed -i -E 's,^\s*#?\s*(net.ipv4.ip_forward=).+,\11,g' /etc/sysctl.conf 122 | 123 | # NAT through eth0. 124 | # NB use something like -s 10.10.10/24 to limit to a specific network. 125 | iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE 126 | 127 | # load iptables rules on boot. 128 | iptables-save >/etc/iptables/rules.v4 129 | 130 | # get the MAC vendor list (used by dhcp-lease-list(8)). 131 | # NB the upstream is at http://standards-oui.ieee.org/oui/oui.txt 132 | # BUT linuxnet.ca version is better taken care of. 133 | wget -qO- https://linuxnet.ca/ieee/oui.txt.bz2 | bzcat >/usr/local/etc/oui.txt 134 | 135 | 136 | # 137 | # provision the stgt iSCSI target (aka iSCSI server). 138 | # see tgtd(8) 139 | # see http://stgt.sourceforge.net/ 140 | # see https://tools.ietf.org/html/rfc7143 141 | # TODO use http://linux-iscsi.org/ instead? 142 | # TODO increase the nic MTU to be more iSCSI friendly. 143 | # TODO use a dedicated VLAN for storage traffic. make it have higher priority then the others at the switch? 144 | 145 | apt-get install -y --no-install-recommends tgt 146 | systemctl status tgt 147 | 148 | 149 | # 150 | # provision useful tools. 151 | 152 | apt-get install -y jq jo 153 | apt-get install -y curl 154 | apt-get install -y httpie 155 | apt-get install -y unzip 156 | apt-get install -y python3-tabulate python3-pem 157 | apt-get install -y --no-install-recommends git 158 | apt-get install -y make patch 159 | apt-get install -y qemu-utils 160 | apt-get install -y pigz 161 | 162 | # install yq. 163 | wget -qO- https://github.com/mikefarah/yq/releases/download/v4.12.2/yq_linux_amd64.tar.gz | tar xz 164 | install yq_linux_amd64 /usr/local/bin/yq 165 | rm yq_linux_amd64 166 | 167 | # etherwake lets us power-on a machine by sending a Wake-on-LAN (WOL) 168 | # magic packet to its ethernet card. 169 | # e.g. etherwake -i eth1 c0:3f:d5:6c:b7:5a 170 | apt-get install -y etherwake 171 | 172 | # install clonezilla. 173 | apt-get install -y --no-install-recommends clonezilla partclone zstd 174 | 175 | # configure the system to automatically load nbd. 176 | # NB this is required to use qemu-nbd. 177 | cat >/etc/modules-load.d/nbd.conf <<'EOF' 178 | nbd 179 | EOF 180 | cat >/etc/modprobe.d/nbd.conf <<'EOF' 181 | options nbd max_part=8 182 | EOF 183 | modprobe nbd 184 | -------------------------------------------------------------------------------- /tink-helpers.source.sh: -------------------------------------------------------------------------------- 1 | source ~/tinkerbell-sandbox/deploy/compose/.env 2 | 3 | TINKERBELL_STATE_WEBROOT_PATH="$HOME/tinkerbell-sandbox/deploy/compose/state/webroot" 4 | 5 | function tink { 6 | # NB its unfortunate that this will output the following to stderr: 7 | # Flag based client configuration failed with err: fetch cert: Get "http://127.0.0.1:42114/cert" 8 | # but there is no universal workaround for it... we have to wait 9 | # for an upstream fix. 10 | # see https://github.com/tinkerbell/tink/issues/524 11 | docker exec -i compose-tink-cli-1 tink "$@" 12 | } 13 | 14 | # recreate the given template and workflow. 15 | # e.g. provision-workflow hello-world bios 16 | function provision-workflow { 17 | local template_name="$1"; shift 18 | local hardware_hostname="$1"; shift || true 19 | bash "/vagrant/templates/$template_name/provision.sh" \ 20 | && bash "/vagrant/templates/$template_name/provision-workflow.sh" "$hardware_hostname" "$@" 21 | } 22 | 23 | function delete-template { 24 | local template_name="$1" 25 | tink template get --format json | jq -r --arg name "$template_name" '.data[] | select(.name==$name) | .id' | while read template_id; do 26 | tink template delete "$template_id" 27 | done 28 | } 29 | 30 | # TODO possible change get-hardware-workflows, get-hardware-mac and 31 | # provision-workflow.sh depending on the outcome of 32 | # https://github.com/tinkerbell/tink/issues/550 33 | function get-hardware-workflows { 34 | docker exec -i compose-db-1 psql -U tinkerbell -A -t <>'device_1' as mac 42 | from 43 | workflow 44 | where 45 | deleted_at is null 46 | ) as w 47 | inner join 48 | ( 49 | select 50 | id as hardware_id, 51 | jsonb_array_elements(data->'network'->'interfaces')->'dhcp'->>'mac' as mac, 52 | (data->>'metadata')::jsonb->'instance'->>'hostname' as hostname 53 | from 54 | hardware 55 | ) as h 56 | on 57 | w.mac=h.mac 58 | where 59 | h.hostname='$1' 60 | EOF 61 | } 62 | 63 | function get-hardware-mac { 64 | local hardware_hostname="$1" 65 | (docker exec -i compose-db-1 psql -U tinkerbell -A -t | awk -F '|' '{print $2}') <'network'->'interfaces')->'dhcp'->>'mac' as mac, 69 | (data->>'metadata')::json->'instance'->>'hostname' as hostname 70 | from 71 | hardware 72 | where 73 | (data->>'metadata')::json->'instance'->>'hostname'='$1' 74 | EOF 75 | } 76 | 77 | # delete the workflows associated with the hardware. 78 | # NB we do not need to really delete the existing workflows. they are only 79 | # applied once. but deleting them makes things easier to follow. 80 | # NB workflows are not really deleted from the database, they are only 81 | # marked as deleted. 82 | function delete-hardware-workflows { 83 | local hardware_hostname="$1" 84 | get-hardware-workflows "$hardware_hostname" | awk -F '|' '{print $1}' | while read workflow_id; do 85 | tink workflow delete "$workflow_id" 86 | done 87 | } 88 | 89 | function watch-hardware-workflows { 90 | local hardware_hostname="$1" 91 | workflow_ids="$(get-hardware-workflows "$hardware_hostname" | awk -F '|' '{print $1}')" 92 | [ -z "$workflow_ids" ] && echo "the $hardware_hostname hardware does not have any workflow" && return 93 | watch " 94 | echo \"$workflow_ids\" | while read workflow_id; do 95 | docker exec -i compose-tink-cli-1 tink workflow state \$workflow_id 96 | docker exec -i compose-tink-cli-1 tink workflow events \$workflow_id 97 | done 98 | " 99 | } 100 | 101 | function install-vagrant-box-raw-image { 102 | local VAGRANT_BOX_IMAGE_PATH=/vagrant-boxes/$1/0/libvirt/box.img 103 | local IMAGE_NAME=$2 104 | local IMAGE_PATH="$TINKERBELL_STATE_WEBROOT_PATH/images/$IMAGE_NAME.raw.gz" 105 | #local IMAGE_PATH="$TINKERBELL_STATE_WEBROOT_PATH/images/$IMAGE_NAME.raw.zs" # TODO see https://github.com/tinkerbell/hub/issues/65 106 | 107 | if [ ! -f "$VAGRANT_BOX_IMAGE_PATH" ]; then 108 | echo "WARNING: $VAGRANT_BOX_IMAGE_PATH does not exist. skipping creating the $RAW_IMAGE_NAME image." 109 | exit 0 110 | fi 111 | 112 | # convert the vagrant box to a compressed raw image. 113 | if [ ! -f "$IMAGE_PATH" ] || [ "$VAGRANT_BOX_IMAGE_PATH" -nt "$IMAGE_PATH" ]; then 114 | local IMAGE_RAW_CACHE_PATH="/vagrant/tmp/$IMAGE_NAME.raw" 115 | local IMAGE_CACHE_PATH="$IMAGE_RAW_CACHE_PATH.gz" 116 | #local IMAGE_CACHE_PATH="$IMAGE_RAW_CACHE_PATH.zs" 117 | qemu-img convert -W -O raw "$VAGRANT_BOX_IMAGE_PATH" "$IMAGE_RAW_CACHE_PATH" 118 | pigz --stdout "$IMAGE_RAW_CACHE_PATH">"$IMAGE_CACHE_PATH.tmp" 119 | #zstd -T0 -o "$IMAGE_CACHE_PATH.tmp" "$IMAGE_RAW_CACHE_PATH" 120 | mv "$IMAGE_CACHE_PATH.tmp" "$IMAGE_CACHE_PATH" 121 | rm "$IMAGE_RAW_CACHE_PATH" 122 | install -d "$(dirname "$IMAGE_PATH")" 123 | cp "$IMAGE_CACHE_PATH" "$IMAGE_PATH" 124 | du -h $IMAGE_PATH 125 | fi 126 | } 127 | 128 | function install-vagrant-box-clonezilla-image { 129 | local VAGRANT_BOX_IMAGE_PATH=/vagrant-boxes/$1/0/libvirt/box.img 130 | local CLONEZILLA_IMAGE_NAME=$2 131 | local CLONEZILLA_IMAGE_PATH=/vagrant/tmp/$CLONEZILLA_IMAGE_NAME 132 | 133 | if [ ! -f "$VAGRANT_BOX_IMAGE_PATH" ]; then 134 | echo "WARNING: $VAGRANT_BOX_IMAGE_PATH does not exist. skipping creating the $CLONEZILLA_IMAGE_NAME image." 135 | exit 0 136 | fi 137 | 138 | # convert the vagrant box to a clonezilla image. 139 | if [ ! -f "$CLONEZILLA_IMAGE_PATH/SHA1SUMS" ] || [ "$VAGRANT_BOX_IMAGE_PATH" -nt "$CLONEZILLA_IMAGE_PATH/SHA1SUMS" ]; then 140 | qemu-img info $VAGRANT_BOX_IMAGE_PATH 141 | qemu-nbd --read-only --connect /dev/nbd0 $VAGRANT_BOX_IMAGE_PATH 142 | parted --script /dev/nbd0 print 143 | rm -rf $CLONEZILLA_IMAGE_PATH 144 | ocs-sr \ 145 | --batch \ 146 | --nogui \ 147 | --ocsroot /vagrant/tmp \ 148 | --use-partclone \ 149 | --clone-hidden-data \ 150 | --pzstd-compress \ 151 | --skip-check-restorable \ 152 | --gen-sha1sum \ 153 | savedisk \ 154 | $CLONEZILLA_IMAGE_NAME \ 155 | nbd0 156 | qemu-nbd --disconnect /dev/nbd0 157 | du -h $CLONEZILLA_IMAGE_PATH 158 | fi 159 | 160 | # you can restore the image with: 161 | # qemu-img create -f qcow2 $CLONEZILLA_IMAGE_PATH-test.qcow2 60G 162 | # qemu-img info $CLONEZILLA_IMAGE_PATH-test.qcow2 163 | # qemu-nbd --connect /dev/nbd1 $CLONEZILLA_IMAGE_PATH-test.qcow2 164 | # ocs-sr \ 165 | # --batch \ 166 | # --nogui \ 167 | # --ocsroot /vagrant/tmp \ 168 | # --skip-check-restorable-r \ 169 | # --check-sha1sum \ 170 | # restoredisk \ 171 | # $CLONEZILLA_IMAGE_NAME \ 172 | # nbd1 173 | # parted --script /dev/nbd1 print 174 | # qemu-nbd --disconnect /dev/nbd1 175 | 176 | # copy the clonezilla image to the tinkerbell webroot. 177 | install -d "$TINKERBELL_STATE_WEBROOT_PATH/images" 178 | rsync \ 179 | --archive \ 180 | --no-owner \ 181 | --no-group \ 182 | --chmod Du=rwx,Dg=rx,Do=rx,Fu=rw,Fg=r,Fo=r \ 183 | --delete \ 184 | $CLONEZILLA_IMAGE_PATH \ 185 | "$TINKERBELL_STATE_WEBROOT_PATH/images" 186 | } 187 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # configure the virtual machines network to use an already configured bridge. 2 | # NB this must be used for connecting to the external switch. 3 | $provisioner_bridge_name = 'br-rpi' 4 | $provisioner_ip_address = '10.3.0.2' 5 | 6 | # uncomment the next two lines to configure the virtual machines network 7 | # to use a new private network that is only available inside the host. 8 | # NB this must be used for NOT connecting to the external switch. 9 | #$provisioner_bridge_name = nil 10 | #$provisioner_ip_address = '10.11.12.2' 11 | 12 | # to make sure the nodes are created sequentially, we 13 | # have to force a --no-parallel execution. 14 | ENV['VAGRANT_NO_PARALLEL'] = 'yes' 15 | 16 | # enable typed triggers. 17 | # NB this is needed to modify the libvirt domain scsi controller model to virtio-scsi. 18 | ENV['VAGRANT_EXPERIMENTAL'] = 'typed_triggers' 19 | 20 | require 'open3' 21 | 22 | # get the docker hub auth from the host ~/.docker/config.json file. 23 | def get_docker_hub_auth 24 | config_path = File.expand_path '~/.docker/config.json' 25 | return nil unless File.exists? config_path 26 | config = JSON.load File.read(config_path) 27 | return nil unless config.has_key?('auths') && config['auths'].has_key?('https://index.docker.io/v1/') 28 | config['auths']['https://index.docker.io/v1/']['auth'] 29 | end 30 | DOCKER_HUB_AUTH = get_docker_hub_auth 31 | 32 | Vagrant.configure('2') do |config| 33 | config.vm.box = 'ubuntu-20.04-amd64' 34 | 35 | config.vm.provider :libvirt do |lv, config| 36 | lv.memory = 4*1024 37 | lv.cpus = 4 38 | lv.cpu_mode = 'host-passthrough' 39 | # lv.nested = true 40 | lv.keymap = 'pt' 41 | lv.disk_bus = 'scsi' 42 | lv.disk_device = 'sda' 43 | lv.disk_driver :discard => 'unmap', :cache => 'unsafe' 44 | # NB vagrant-libvirt does not yet support urandom; but since tinkerbell 45 | # built iPXE takes too much time to leave the "Initialising devices" 46 | # phase we modify this to urandom in the trigger bellow. 47 | lv.random :model => 'random' 48 | config.vm.synced_folder '.', '/vagrant', type: 'nfs', nfs_version: '4.2', nfs_udp: false 49 | config.vm.synced_folder "#{ENV['HOME']}/.vagrant.d/boxes", '/vagrant-boxes', mount_options: ['ro'], type: 'nfs', nfs_version: '4.2', nfs_udp: false 50 | config.trigger.before :'VagrantPlugins::ProviderLibvirt::Action::StartDomain', type: :action do |trigger| 51 | trigger.ruby do |env, machine| 52 | # modify the scsi controller model to virtio-scsi. 53 | # see https://github.com/vagrant-libvirt/vagrant-libvirt/pull/692 54 | # see https://github.com/vagrant-libvirt/vagrant-libvirt/issues/999 55 | stdout, stderr, status = Open3.capture3( 56 | 'virt-xml', machine.id, 57 | '--edit', 'type=scsi', 58 | '--controller', 'model=virtio-scsi') 59 | if status.exitstatus != 0 60 | raise "failed to run virt-xml to modify the scsi controller model. status=#{status.exitstatus} stdout=#{stdout} stderr=#{stderr}" 61 | end 62 | # modify the random model to use the urandom backend device. 63 | stdout, stderr, status = Open3.capture3( 64 | 'virt-xml', machine.id, 65 | '--edit', 66 | '--rng', '/dev/urandom') 67 | if status.exitstatus != 0 68 | raise "failed to run virt-xml to modify the random backend device. status=#{status.exitstatus} stdout=#{stdout} stderr=#{stderr}" 69 | end 70 | end 71 | end 72 | end 73 | 74 | config.vm.define :provisioner do |config| 75 | config.vm.hostname = 'provisioner' 76 | if $provisioner_bridge_name 77 | config.vm.network :public_network, 78 | ip: $provisioner_ip_address, 79 | dev: $provisioner_bridge_name, 80 | mode: 'bridge', 81 | type: 'bridge' 82 | else 83 | config.vm.network :private_network, 84 | ip: $provisioner_ip_address, 85 | libvirt__dhcp_enabled: false, 86 | libvirt__forward_mode: 'none' 87 | end 88 | config.trigger.before :up do |trigger| 89 | trigger.run = { 90 | inline: '''bash -euc \' 91 | file_paths=( 92 | ~/.ssh/id_rsa.pub 93 | ../tinkerbell-debian-osie/tinkerbell-debian-osie-amd64.iso 94 | ../tinkerbell-debian-osie/tinkerbell-debian-osie-arm64.iso 95 | ) 96 | for file_path in "${file_paths[@]}"; do 97 | if [ -f $file_path ]; then 98 | mkdir -p tmp 99 | rsync $file_path tmp 100 | fi 101 | done 102 | \' 103 | ''' 104 | } 105 | end 106 | config.vm.provision :shell, path: 'provision-base.sh' 107 | config.vm.provision :shell, path: 'provision-docker.sh' 108 | config.vm.provision :shell, path: 'provision-docker-hub-auth.sh', env: {'DOCKER_HUB_AUTH' => DOCKER_HUB_AUTH} 109 | config.vm.provision :shell, path: 'provision-docker-compose.sh' 110 | config.vm.provision :shell, path: 'provision-portainer.sh' 111 | config.vm.provision :shell, path: 'provision-loki.sh' 112 | config.vm.provision :shell, path: 'provision-grafana.sh', args: [$provisioner_ip_address] 113 | config.vm.provision :shell, path: 'provision-meshcommander.sh' 114 | config.vm.provision :shell, path: 'provision-go.sh' 115 | config.vm.provision :shell, path: 'provision-tinkerbell.sh', args: [$provisioner_ip_address] 116 | config.vm.provision :shell, path: 'provision-docker-buildx.sh' 117 | config.vm.provision :shell, path: 'provision-debian-boots.sh' 118 | config.vm.provision :shell, path: 'provision-debian-osie.sh' 119 | config.vm.provision :shell, path: 'provision-tinkerbell-tink.sh' 120 | config.vm.provision :shell, path: 'provision-nfs-server.sh' 121 | config.vm.provision :shell, path: 'actions/provision.sh' 122 | config.vm.provision :shell, path: 'templates/provision.sh' 123 | config.vm.provision :shell, path: 'workers/provision.sh' 124 | config.vm.provision :shell, name: 'Summary', path: 'summary.sh', run: 'always' 125 | end 126 | 127 | ['bios', 'uefi'].each_with_index do |firmware, i| 128 | config.vm.define firmware do |config| 129 | config.vm.box = nil 130 | if $provisioner_bridge_name 131 | config.vm.network :public_network, 132 | dev: $provisioner_bridge_name, 133 | mac: "08002700000#{i+1}", 134 | mode: 'bridge', 135 | type: 'bridge', 136 | auto_config: false 137 | else 138 | config.vm.network :private_network, 139 | # NB this ip is not really used by the VM; its used by 140 | # vagrant-libvirt to find the network to which it 141 | # will attach this VM to. 142 | ip: $provisioner_ip_address, 143 | mac: "08002700000#{i+1}", 144 | auto_config: false 145 | end 146 | config.vm.provider :libvirt do |lv, config| 147 | lv.loader = '/usr/share/ovmf/OVMF.fd' if firmware == 'uefi' 148 | lv.memory = 4*1024 149 | lv.boot 'hd' 150 | lv.boot 'network' 151 | lv.mgmt_attach = false 152 | lv.graphics_type = 'spice' 153 | lv.video_type = 'qxl' 154 | lv.input :type => 'tablet', :bus => 'usb' 155 | lv.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' 156 | lv.channel :type => 'spicevmc', :target_name => 'com.redhat.spice.0', :target_type => 'virtio' 157 | # set some BIOS settings that will help us identify this particular machine. 158 | # 159 | # QEMU | Linux 160 | # --------------------+---------------------------------------------- 161 | # type=1,manufacturer | /sys/devices/virtual/dmi/id/sys_vendor 162 | # type=1,product | /sys/devices/virtual/dmi/id/product_name 163 | # type=1,version | /sys/devices/virtual/dmi/id/product_version 164 | # type=1,serial | /sys/devices/virtual/dmi/id/product_serial 165 | # type=1,sku | dmidecode 166 | # type=1,uuid | /sys/devices/virtual/dmi/id/product_uuid 167 | # type=3,manufacturer | /sys/devices/virtual/dmi/id/chassis_vendor 168 | # type=3,family | /sys/devices/virtual/dmi/id/chassis_type 169 | # type=3,version | /sys/devices/virtual/dmi/id/chassis_version 170 | # type=3,serial | /sys/devices/virtual/dmi/id/chassis_serial 171 | # type=3,asset | /sys/devices/virtual/dmi/id/chassis_asset_tag 172 | [ 173 | 'type=1,manufacturer=your vendor name here', 174 | 'type=1,product=your product name here', 175 | 'type=1,version=your product version here', 176 | "type=1,serial=your product serial number here #{i+1}", 177 | 'type=1,sku=your product SKU here', 178 | "type=1,uuid=00000000-0000-4000-8000-00000000000#{i+1}", 179 | 'type=3,manufacturer=your chassis vendor name here', 180 | #'type=3,family=1', # TODO why this does not work on qemu from ubuntu 18.04? 181 | 'type=3,version=your chassis version here', 182 | "type=3,serial=your chassis serial number here #{i+1}", 183 | "type=3,asset=your chassis asset tag here #{i+1}", 184 | ].each do |value| 185 | lv.qemuargs :value => '-smbios' 186 | lv.qemuargs :value => value 187 | end 188 | lv.storage :file, :size => '65G', :device => 'sda', :bus => 'scsi', :discard => 'unmap', :detect_zeroes => 'unmap', :cache => 'unsafe' 189 | end 190 | end 191 | end 192 | end 193 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This is a [Vagrant](https://www.vagrantup.com/) Environment for playing with [Tinkerbell](https://tinkerbell.org/) for provisioning AMD64 and ARM64 (e.g. Raspberry Pi) machines. 2 | 3 | # Usage 4 | 5 | This `provisioner` environment is essentially running all the Tinkerbell [components](https://tinkerbell.org/components/) inside a single virtual machine. 6 | 7 | In order for it to work you need to connect the `provisioner` virtual network to a physical network that reaches the physical machines. 8 | 9 | I'm using Ubuntu 20.04 as the host, qemu/kvm/libvirt has the hypervisor, and a [tp-link tl-sg108e](https://www.tp-link.com/en/business-networking/easy-smart-switch/tl-sg108e/) switch. 10 | 11 | **NB** You can also use this vagrant environment without the switch (see the [Vagrantfile](Vagrantfile)). 12 | 13 | The network is connected as: 14 | 15 | ![](network.png) 16 | 17 | The tp-link tl-sg108e switch is configured with [rgl/ansible-collection-tp-link-easy-smart-switch](https://github.com/rgl/ansible-collection-tp-link-easy-smart-switch) as: 18 | 19 | ![](tp-link-sg108e-802-1q-vlan-configuration.png) 20 | ![](tp-link-sg108e-802-1q-vlan-pvid-configuration.png) 21 | 22 | **NB** this line of switches is somewhat insecure as, at least, its configuration protocol (UDP port 29808 and TCP port 80) uses clear text messages. For more information see [How I can gain control of your TP-LINK home switch](https://www.pentestpartners.com/security-blog/how-i-can-gain-control-of-your-tp-link-home-switch/) and [Information disclosure vulnerability in TP-Link Easy Smart switches](https://www.chrisdcmoore.co.uk/post/tplink-easy-smart-switch-vulnerabilities/). 23 | 24 | The host network is configured by netplan with `/etc/netplan/config.yaml` as: 25 | 26 | ```yaml 27 | network: 28 | version: 2 29 | renderer: networkd 30 | ethernets: 31 | enp3s0: 32 | link-local: [] 33 | addresses: 34 | - 10.1.0.1/24 35 | - 192.168.0.254/24 36 | bridges: 37 | # NB this is equivalent of executing: 38 | # ip link add name br-rpi type bridge 39 | # ip addr flush dev br-rpi 40 | # ip addr add dev br-rpi 10.3.0.1/24 41 | # ip link set dev br-rpi up 42 | # ip addr ls dev br-rpi 43 | # ip -d link show dev br-rpi 44 | # ip route 45 | # NB later, you can remove with: 46 | # ip link set dev br-rpi down 47 | # ip link delete dev br-rpi 48 | br-rpi: 49 | link-local: [] 50 | addresses: 51 | - 10.3.0.1/24 52 | interfaces: 53 | - vlan.rpi 54 | vlans: 55 | vlan.wan: 56 | id: 2 57 | link: enp3s0 58 | link-local: [] 59 | addresses: 60 | - 192.168.1.1/24 61 | gateway4: 192.168.1.254 62 | nameservers: 63 | addresses: 64 | # cloudflare+apnic public dns resolvers. 65 | # see https://en.wikipedia.org/wiki/1.1.1.1 66 | - "1.1.1.1" 67 | - "1.0.0.1" 68 | # google public dns resolvers. 69 | # see https://en.wikipedia.org/wiki/8.8.8.8 70 | #- "8.8.8.8" 71 | #- "8.8.4.4" 72 | # NB this is equivalent of executing: 73 | # ip link add link enp3s0 vlan.rpi type vlan proto 802.1q id 2 74 | # ip link set dev vlan.rpi up 75 | # ip -d link show dev vlan.rpi 76 | # NB later, you can remove with: 77 | # ip link set dev vlan.rpi down 78 | # ip link delete dev vlan.rpi 79 | vlan.rpi: 80 | id: 3 81 | link: enp3s0 82 | link-local: [] 83 | ``` 84 | 85 | **NB** For more information about VLANs see the [IEEE 802.1Q VLAN Tutorial](http://www.microhowto.info/tutorials/802.1q.html). 86 | 87 | Build and install the [Ubuntu Linux vagrant box](https://github.com/rgl/ubuntu-vagrant). 88 | 89 | Build [Debian OSIE](https://github.com/rgl/tinkerbell-debian-osie) in `../tinkerbell-debian-osie`. 90 | 91 | Optionally, build and install the following vagrant boxes (which must be using 92 | the UEFI variant): 93 | 94 | * [Debian](https://github.com/rgl/debian-vagrant) 95 | * [Proxmox VE](https://github.com/rgl/proxmox-ve) 96 | * [Ubuntu](https://github.com/rgl/ubuntu-vagrant) 97 | * [Windows 2022](https://github.com/rgl/windows-vagrant) 98 | 99 | Login into docker hub to have a [higher rate limits](https://www.docker.com/increase-rate-limits). 100 | 101 | Launch the `provisioner` with: 102 | 103 | ```bash 104 | # NB this takes about 30m in my machine. YMMV. 105 | vagrant up --no-destroy-on-error --no-tty provisioner 106 | ``` 107 | 108 | Enter the `provisioner` machine, and tail the relevant logs with: 109 | 110 | ```bash 111 | vagrant ssh provisioner 112 | sudo -i 113 | cd ~/tinkerbell-sandbox/deploy/compose 114 | docker compose logs --follow tink-server boots nginx 115 | ``` 116 | 117 | In another terminal, launch the `uefi` worker machine with: 118 | 119 | ```bash 120 | vagrant up --no-destroy-on-error --no-tty uefi 121 | ``` 122 | 123 | In another terminal, watch the workflow progress with: 124 | 125 | ```bash 126 | vagrant ssh provisioner 127 | sudo -i 128 | watch-hardware-workflows uefi 129 | ``` 130 | 131 | You should eventually see something alike: 132 | 133 | ``` 134 | +----------------------+--------------------------------------+ 135 | | FIELD NAME | VALUES | 136 | +----------------------+--------------------------------------+ 137 | | Workflow ID | dc2ff4c3-13b1-11ec-a4c5-0242ac1a0004 | 138 | | Workflow Progress | 100% | 139 | | Current Task | hello-world | 140 | | Current Action | info | 141 | | Current Worker | 00000000-0000-4000-8000-080027000001 | 142 | | Current Action State | STATE_SUCCESS | 143 | +----------------------+--------------------------------------+ 144 | +--------------------------------------+-------------+-------------+----------------+---------------------------------+---------------+ 145 | | WORKER ID | TASK NAME | ACTION NAME | EXECUTION TIME | MESSAGE | ACTION STATUS | 146 | +--------------------------------------+-------------+-------------+----------------+---------------------------------+---------------+ 147 | | 00000000-0000-4000-8000-080027000001 | hello-world | hello-world | 0 | Started execution | STATE_RUNNING | 148 | | 00000000-0000-4000-8000-080027000001 | hello-world | hello-world | 3 | finished execution successfully | STATE_SUCCESS | 149 | | 00000000-0000-4000-8000-080027000001 | hello-world | info | 0 | Started execution | STATE_RUNNING | 150 | | 00000000-0000-4000-8000-080027000001 | hello-world | info | 0 | finished execution successfully | STATE_SUCCESS | 151 | +--------------------------------------+-------------+-------------+----------------+---------------------------------+---------------+ 152 | ``` 153 | 154 | **NB** After a workflow action is executed, `tink-worker` will not re-execute it, even if you reboot the worker. You must create a new workflow, e.g. `provision-workflow hello-world uefi && watch-hardware-workflows uefi`. 155 | 156 | You can see the worker and action logs from Grafana Explore (its address is displayed at the end of the provisioning). 157 | 158 | From within the worker machine, you can query the metadata endpoint: 159 | 160 | **NB** this endpoint returns the data set in the `TODO` field of the particular worker `hardware` document. 161 | 162 | ```bash 163 | metadata_url="$(cat /proc/cmdline | tr ' ' '\n' | awk '/^tinkerbell=(.+)/{print "$1:50061/metadata"}')" 164 | wget -qO- "$metadata_url" 165 | ``` 166 | 167 | Then repeat the process with the `uefi` worker machine. 168 | 169 | To execute a more realistic workflow, you can install one of the following: 170 | 171 | ```bash 172 | provision-workflow debian uefi && watch-hardware-workflows uefi 173 | provision-workflow flatcar-linux uefi && watch-hardware-workflows uefi 174 | provision-workflow proxmox-ve uefi && watch-hardware-workflows uefi 175 | provision-workflow ubuntu uefi && watch-hardware-workflows uefi 176 | provision-workflow windows-2022 uefi && watch-hardware-workflows uefi 177 | ``` 178 | 179 | See which containers are running in the `provisioner` machine: 180 | 181 | ```bash 182 | vagrant ssh provisioner 183 | sudo -i 184 | # see https://docs.docker.com/engine/reference/commandline/ps/#formatting 185 | python3 <<'EOF' 186 | import io 187 | import json 188 | import subprocess 189 | from tabulate import tabulate 190 | 191 | def info(): 192 | p = subprocess.Popen( 193 | ('docker', 'ps', '-a', '--no-trunc', '--format', '{{.ID}}'), 194 | stdout=subprocess.PIPE, 195 | stderr=subprocess.STDOUT) 196 | for id in (l.rstrip("\r\n") for l in io.TextIOWrapper(p.stdout)): 197 | p = subprocess.Popen( 198 | ('docker', 'inspect', id), 199 | stdout=subprocess.PIPE, 200 | stderr=subprocess.STDOUT) 201 | for c in json.load(p.stdout): 202 | yield (c['Name'], c['Config']['Image'], c['Image']) 203 | 204 | print(tabulate(sorted(info()), headers=('ContainerName', 'ImageName', 'ImageId'))) 205 | EOF 206 | ``` 207 | 208 | At the time of writing these were the containers running by default: 209 | 210 | ```plain 211 | ContainerName ImageName ImageId 212 | ----------------------------------- ---------------------------------------- ----------------------------------------------------------------------- 213 | /compose-boots-1 10.3.0.2/debian-boots sha256:397e3206222130ada624953220e8cb38c66365a4e31df7ce808f639c9a141599 214 | /compose-db-1 postgres:14-alpine sha256:eb82a397daaf176f244e990aa6f550422a764a88759f43e641c3a1323953deb7 215 | /compose-hegel-1 quay.io/tinkerbell/hegel:sha-89cb9dc8 sha256:23c22f0bb8779fb4b0fdab8384937c54afbbed6b45aefb3554f2d54cb2c7cffa 216 | /compose-images-to-local-registry-1 quay.io/containers/skopeo:latest sha256:9f5c670462ec0dc756fe52ec6c4d080f62c01a0003b982d48bb8218f877a456a 217 | /compose-osie-bootloader-1 nginx:alpine sha256:b46db85084b80a87b94cc930a74105b74763d0175e14f5913ea5b07c312870f8 218 | /compose-osie-work-1 bash:4.4 sha256:bc8b0716d7386a05b5b3d04276cc7d8d608138be723fbefd834b5e75db6a6aeb 219 | /compose-registry-1 registry:2.7.1 sha256:b8604a3fe8543c9e6afc29550de05b36cd162a97aa9b2833864ea8a5be11f3e2 220 | /compose-registry-auth-1 httpd:2 sha256:ad17c88403e2cedd27963b98be7f04bd3f903dfa7490586de397d0404424936d 221 | /compose-tink-cli-1 quay.io/tinkerbell/tink-cli:sha-3743d31e sha256:8c90de15e97362a708cde2c59d3a261f73e3a4242583a54222b5e18d4070acaf 222 | /compose-tink-server-1 quay.io/tinkerbell/tink:sha-3743d31e sha256:fb21c42c067588223b87a5c1f1d9b2892f863bfef29ce5fcd8ba755cfa0a990b 223 | /compose-tink-server-migration-1 quay.io/tinkerbell/tink:sha-3743d31e sha256:fb21c42c067588223b87a5c1f1d9b2892f863bfef29ce5fcd8ba755cfa0a990b 224 | /compose-tls-gen-1 cfssl/cfssl sha256:655abf144edde793a3ff1bc883cc82ca61411efb35d0d403a52f202c9c3cd377 225 | /compose_tls-gen_run_67135735bbb3 cfssl/cfssl sha256:655abf144edde793a3ff1bc883cc82ca61411efb35d0d403a52f202c9c3cd377 226 | /grafana grafana/grafana:8.2.5 sha256:ddfae340d0681fe1a10582b06a2e8ae402196df9d429f0c1cefbe8dedca73cf0 227 | /loki grafana/loki:2.4.1 sha256:e3e722f23de3fdbb8608dcf1f8824dec62cba65bbfd5ab5ad095eed2d7c5872a 228 | /meshcommander meshcommander sha256:aff2fc5004fb7f77b1a14a82c35af72e941fa33715e66c2eab5a5d253820d4bb 229 | /portainer portainer/portainer-ce:2.9.2 sha256:a1c22f3d250fda6b357aa7d2148dd333a698805dd2878a08eb8f055ca8fb4e99 230 | ``` 231 | 232 | Those containers were started with docker compose and you can use it to 233 | inspect the tinkerbell containers: 234 | 235 | ```bash 236 | vagrant ssh provisioner 237 | sudo -i 238 | cd ~/tinkerbell-sandbox/deploy/compose 239 | docker compose ps 240 | docker compose logs -f 241 | ``` 242 | 243 | You can also use the [Portainer](https://github.com/portainer/portainer) 244 | application at the address that is displayed after the vagrant environment 245 | is launched (e.g. at `http://10.3.0.2:9000`). 246 | 247 | # Tinkerbell Debian OSIE 248 | 249 | This vagrant environment uses the [Debian based OSIE](https://github.com/rgl/tinkerbell-debian-osie) 250 | instead of the [LinuxKit (aka Hook) based OSIE](https://github.com/tinkerbell/hook). 251 | 252 | You can login into it using the `osie` username and password. 253 | 254 | # Raspberry Pi 255 | 256 | Install the RPI4-UEFI-IPXE firmware into a sd-card as described at 257 | https://github.com/rgl/rpi4-uefi-ipxe. 258 | 259 | Insert an external disk (e.g. an USB flash drive or USB SSD) to use as target on 260 | your Tinkerbell Action. 261 | 262 | # Intel NUC 263 | 264 | You can [use the Intel Integrator Toolkit ITK6.efi EFI application](https://downloadmirror.intel.com/29345/eng/Intel%20Integrator%20Toolkit%20User%20Guide.pdf) to set the SMBIOS properties. 265 | 266 | # Troubleshooting 267 | 268 | ## Network Packet Capture 269 | 270 | You can see all the network traffic from within the provisioner by running: 271 | 272 | ```bash 273 | vagrant ssh-config provisioner >tmp/provisioner-ssh-config.conf 274 | # NB this ignores the following ports: 275 | # 22: SSH 276 | # 16992: AMT HTTP 277 | # 16994: AMT Redirection/TCP 278 | # 4000: MeshCommander 279 | wireshark -k -i <(ssh -F tmp/provisioner-ssh-config.conf provisioner 'sudo tcpdump -s 0 -U -n -i eth1 -w - not tcp port 22 and not port 16992 and not port 16994 and not port 4000') 280 | ``` 281 | 282 | You can also do it from the host by capturing traffic from the `br-rpi` or `vlan.rpi` interface. 283 | 284 | ## Database 285 | 286 | Tinkerbell uses the [tinkerbell](https://github.com/tinkerbell/tink/tree/main/db/migration) 287 | PostgreSQL database, you can access its console with, e.g.: 288 | 289 | ```bash 290 | vagrant ssh provisioner 291 | sudo -i 292 | docker exec -i compose-db-1 psql -U tinkerbell -c '\dt' 293 | docker exec -i compose-db-1 psql -U tinkerbell -c '\d hardware' 294 | docker exec -i compose-db-1 psql -U tinkerbell -c 'select * from template' 295 | docker exec -i compose-db-1 psql -U tinkerbell -c 'select * from workflow' 296 | docker exec -i compose-db-1 psql -U tinkerbell -c 'select * from workflow_event order by created_at desc' 297 | ``` 298 | 299 | # Notes 300 | 301 | * All workflow actions run as `--privileged` containers. 302 | 303 | # Reference 304 | 305 | * [IEEE 802.1Q VLAN Tutorial](http://www.microhowto.info/tutorials/802.1q.html) 306 | * [ContainerSolutions/tinkerbell-rpi4-workflow](https://github.com/ContainerSolutions/tinkerbell-rpi4-workflow/tree/rpi4-tinkerbell-uefi) 307 | -------------------------------------------------------------------------------- /network.uxf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 10 4 | 5 | UMLClass 6 | 7 | 800 8 | 120 9 | 1280 10 | 160 11 | 12 | <<switch>> 13 | tp-link sg108e 14 | 15 | 16 | 17 | UMLClass 18 | 19 | 800 20 | 180 21 | 160 22 | 100 23 | 24 | <<port>> 25 | port: 1 26 | pvid: 2 27 | untagged vlan id: 2 28 | 29 | 30 | 31 | UMLClass 32 | 33 | 960 34 | 180 35 | 160 36 | 100 37 | 38 | <<port>> 39 | port: 2 40 | pvid: 1 41 | untagged vlan id: 1 42 | tagged vlan id: 2, 3 43 | 44 | 45 | 46 | UMLClass 47 | 48 | 560 49 | 120 50 | 180 51 | 160 52 | 53 | <<switch>> 54 | ISP switch 55 | 56 | 57 | 58 | Relation 59 | 60 | 640 61 | 270 62 | 260 63 | 60 64 | 65 | lt=- 66 | 10.0;10.0;10.0;40.0;240.0;40.0;240.0;10.0 67 | 68 | 69 | UMLClass 70 | 71 | 560 72 | 0 73 | 180 74 | 40 75 | 76 | Internet 77 | 78 | 79 | 80 | Relation 81 | 82 | 640 83 | 30 84 | 30 85 | 110 86 | 87 | lt=- 88 | 10.0;10.0;10.0;90.0 89 | 90 | 91 | UMLClass 92 | 93 | 1440 94 | 670 95 | 160 96 | 70 97 | 98 | <<raspberry pi>> 99 | model: 4b 100 | hostname: rpi1 101 | 102 | 103 | 104 | Relation 105 | 106 | 1510 107 | 270 108 | 30 109 | 420 110 | 111 | lt=- 112 | 10.0;10.0;10.0;400.0 113 | 114 | 115 | UMLClass 116 | 117 | 1120 118 | 180 119 | 160 120 | 100 121 | 122 | <<port>> 123 | port: 3 124 | pvid: 3 125 | untagged vlan id: 3 126 | 127 | 128 | 129 | UMLClass 130 | 131 | 1280 132 | 180 133 | 160 134 | 100 135 | 136 | <<port>> 137 | port: 4 138 | pvid: 3 139 | untagged vlan id: 3 140 | 141 | 142 | 143 | UMLClass 144 | 145 | 1440 146 | 180 147 | 160 148 | 100 149 | 150 | <<port>> 151 | port: 5 152 | pvid: 3 153 | untagged vlan id: 3 154 | 155 | 156 | 157 | UMLClass 158 | 159 | 1600 160 | 180 161 | 160 162 | 100 163 | 164 | <<port>> 165 | port: 6 166 | pvid: 3 167 | untagged vlan id: 3 168 | 169 | 170 | 171 | UMLClass 172 | 173 | 1760 174 | 180 175 | 160 176 | 100 177 | 178 | <<port>> 179 | port: 7 180 | pvid: 3 181 | untagged vlan id: 3 182 | 183 | 184 | 185 | UMLClass 186 | 187 | 1920 188 | 180 189 | 160 190 | 100 191 | 192 | <<port>> 193 | port: 8 194 | pvid: 3 195 | untagged vlan id: 3 196 | 197 | 198 | 199 | UMLClass 200 | 201 | 10 202 | 340 203 | 1120 204 | 1080 205 | 206 | <<pc>> 207 | hostname: host 208 | 209 | 210 | 211 | Relation 212 | 213 | 1030 214 | 270 215 | 30 216 | 130 217 | 218 | lt=- 219 | 10.0;10.0;10.0;110.0 220 | 221 | 222 | UMLClass 223 | 224 | 700 225 | 480 226 | 120 227 | 80 228 | 229 | <<bridge>> 230 | name: br-rpi 231 | ip: 10.3.0.1 232 | 233 | 234 | 235 | UMLClass 236 | 237 | 790 238 | 600 239 | 320 240 | 800 241 | 242 | <<vm>> 243 | name: provisioner 244 | eth1 ip: 10.3.0.2 245 | 246 | 247 | 248 | UMLClass 249 | 250 | 980 251 | 380 252 | 130 253 | 60 254 | 255 | <<nic>> 256 | name: enp3s0 257 | 258 | 259 | 260 | UMLClass 261 | 262 | 840 263 | 480 264 | 120 265 | 80 266 | 267 | <<vlan>> 268 | name: vlan.rpi 269 | vlan id: 3 270 | 271 | 272 | 273 | Relation 274 | 275 | 890 276 | 430 277 | 170 278 | 70 279 | 280 | lt=- 281 | 150.0;10.0;150.0;30.0;10.0;30.0;10.0;50.0 282 | 283 | 284 | UMLNote 285 | 286 | 1160 287 | 590 288 | 60 289 | 80 290 | 291 | DHCP 292 | TFTP 293 | NFS 294 | NAT 295 | bg=blue 296 | fg=black 297 | 298 | 299 | 300 | Relation 301 | 302 | 1100 303 | 610 304 | 80 305 | 30 306 | 307 | lt=. 308 | 60.0;10.0;10.0;10.0 309 | 310 | 311 | Relation 312 | 313 | 1080 314 | 690 315 | 380 316 | 50 317 | 318 | lt=-> 319 | <<mount>> 320 | rootfs 321 | 360.0;20.0;10.0;20.0 322 | 323 | 324 | UMLClass 325 | 326 | 810 327 | 680 328 | 280 329 | 60 330 | 331 | <<nfs share>> 332 | path: /srv/nfs/rpi1/root 333 | 334 | 335 | 336 | 337 | UMLClass 338 | 339 | 810 340 | 1110 341 | 280 342 | 60 343 | 344 | <<container>> 345 | image: tinkerbell/tink-cli 346 | name: compose-tink-cli-1 347 | 348 | 349 | 350 | UMLClass 351 | 352 | 810 353 | 750 354 | 280 355 | 150 356 | 357 | <<container>> 358 | image: tinkerbell/boots 359 | name: compose-boots-1 360 | 361 | 362 | 363 | UMLClass 364 | 365 | 810 366 | 1180 367 | 280 368 | 60 369 | 370 | <<container>> 371 | image: tinkerbell/hegel 372 | name: compose-hegel-1 373 | 374 | 375 | 376 | UMLClass 377 | 378 | 810 379 | 980 380 | 280 381 | 120 382 | 383 | <<container>> 384 | image: tinkerbell/tink 385 | name: compose-tink-server-1 386 | 387 | 388 | 389 | UMLClass 390 | 391 | 810 392 | 910 393 | 280 394 | 60 395 | 396 | <<container>> 397 | image: postgres:10-alpine 398 | name: compose-db-1 399 | 400 | 401 | 402 | UMLClass 403 | 404 | 810 405 | 1250 406 | 280 407 | 60 408 | 409 | <<container>> 410 | image: registry 411 | name: compose-registry-1 412 | 413 | 414 | 415 | UMLClass 416 | 417 | 810 418 | 1320 419 | 280 420 | 60 421 | 422 | <<container>> 423 | image: nginx:alpine 424 | name: compose-osie-bootloader-1 425 | 426 | 427 | 428 | Relation 429 | 430 | 1080 431 | 750 432 | 370 433 | 40 434 | 435 | lt=-() 436 | r2=dhcp/udp/0.0.0.0:67 437 | 438 | 10.0;20.0;350.0;20.0 439 | 440 | 441 | Relation 442 | 443 | 1080 444 | 780 445 | 370 446 | 40 447 | 448 | lt=-() 449 | r2=tftp/udp/10.3.0.2:69 450 | 451 | 10.0;20.0;350.0;20.0 452 | 453 | 454 | Relation 455 | 456 | 1080 457 | 840 458 | 370 459 | 40 460 | 461 | lt=-() 462 | r2=ipxe-script/http/tcp/10.3.0.2:80 463 | 464 | 10.0;20.0;350.0;20.0 465 | 466 | 467 | Relation 468 | 469 | 1080 470 | 810 471 | 370 472 | 40 473 | 474 | lt=-() 475 | r2=syslog/udp/10.3.0.2:514 476 | 477 | 10.0;20.0;350.0;20.0 478 | 479 | 480 | Relation 481 | 482 | 1080 483 | 1180 484 | 300 485 | 40 486 | 487 | lt=-() 488 | r2=tcp/0.0.0.0:42115 489 | 490 | 10.0;20.0;280.0;20.0 491 | 492 | 493 | Relation 494 | 495 | 1080 496 | 1210 497 | 300 498 | 40 499 | 500 | lt=-() 501 | r2=http/tcp/0.0.0.0:50061 502 | 503 | 10.0;20.0;280.0;20.0 504 | 505 | 506 | Relation 507 | 508 | 1080 509 | 1250 510 | 300 511 | 40 512 | 513 | lt=-() 514 | r2=https/tcp/0.0.0.0:443 515 | 516 | 10.0;20.0;280.0;20.0 517 | 518 | 519 | Relation 520 | 521 | 480 522 | 850 523 | 350 524 | 110 525 | 526 | lt=)- 527 | 528 | 90.0;80.0;20.0;80.0;10.0;70.0;10.0;20.0;20.0;10.0;330.0;10.0 529 | 530 | 531 | Relation 532 | 533 | 580 534 | 910 535 | 250 536 | 40 537 | 538 | lt=()- 539 | r1=pgsql/tcp/:5432 540 | 541 | 10.0;20.0;230.0;20.0 542 | 543 | 544 | Relation 545 | 546 | 580 547 | 1030 548 | 250 549 | 40 550 | 551 | lt=()- 552 | r1=http/tcp/:42114 553 | 554 | 10.0;20.0;230.0;20.0 555 | 556 | 557 | Relation 558 | 559 | 580 560 | 1180 561 | 250 562 | 40 563 | 564 | lt=()- 565 | r1=grpc/tcp/:42115 566 | 567 | 10.0;20.0;230.0;20.0 568 | 569 | 570 | Relation 571 | 572 | 480 573 | 910 574 | 350 575 | 340 576 | 577 | lt=)- 578 | 579 | 90.0;20.0;20.0;20.0;10.0;30.0;10.0;310.0;20.0;320.0;330.0;320.0 580 | 581 | 582 | Relation 583 | 584 | 480 585 | 910 586 | 350 587 | 190 588 | 589 | lt=)- 590 | 591 | 90.0;20.0;20.0;20.0;10.0;30.0;10.0;160.0;20.0;170.0;330.0;170.0 592 | 593 | 594 | Relation 595 | 596 | 530 597 | 990 598 | 300 599 | 160 600 | 601 | lt=)- 602 | 603 | 40.0;20.0;20.0;20.0;10.0;30.0;10.0;130.0;20.0;140.0;280.0;140.0 604 | 605 | 606 | Relation 607 | 608 | 1080 609 | 880 610 | 350 611 | 420 612 | 613 | lt=-( 614 | 615 | 10.0;10.0;320.0;10.0;330.0;20.0;330.0;380.0;320.0;390.0;300.0;390.0 616 | 617 | 618 | Relation 619 | 620 | 530 621 | 880 622 | 300 623 | 160 624 | 625 | lt=)- 626 | 627 | 40.0;130.0;20.0;130.0;10.0;120.0;10.0;20.0;20.0;10.0;280.0;10.0 628 | 629 | 630 | Relation 631 | 632 | 1080 633 | 1320 634 | 300 635 | 40 636 | 637 | lt=-() 638 | r2=http/tcp/10.3.0.2:8080 639 | 640 | 10.0;20.0;280.0;20.0 641 | 642 | 643 | UMLClass 644 | 645 | 410 646 | 600 647 | 360 648 | 120 649 | 650 | <<vm>> 651 | name: bios 652 | id: 00000000-0000-4000-8000-000000000001 653 | mac: 08:00:27:00:00:01 654 | eth0 ip: 10.3.0.11 655 | firmware: bios 656 | 657 | 658 | 659 | Relation 660 | 661 | 580 662 | 550 663 | 200 664 | 70 665 | 666 | lt=- 667 | 180.0;10.0;180.0;30.0;10.0;30.0;10.0;50.0 668 | 669 | 670 | UMLClass 671 | 672 | 30 673 | 600 674 | 360 675 | 120 676 | 677 | <<vm>> 678 | name: uefi 679 | id: 00000000-0000-4000-8000-000000000002 680 | mac: 08:00:27:00:00:02 681 | eth0 ip: 10.3.0.12 682 | firmware: uefi 683 | 684 | 685 | 686 | Relation 687 | 688 | 580 689 | 990 690 | 250 691 | 40 692 | 693 | lt=()- 694 | r1=grpc/tcp/:42113 695 | 696 | 10.0;20.0;230.0;20.0 697 | 698 | 699 | UMLClass 700 | 701 | 980 702 | 480 703 | 130 704 | 80 705 | 706 | <<vlan>> 707 | name: vlan.wan 708 | vlan id: 2 709 | ip: 192.168.1.1 710 | 711 | 712 | 713 | Relation 714 | 715 | 1030 716 | 430 717 | 30 718 | 70 719 | 720 | lt=- 721 | 10.0;10.0;10.0;50.0 722 | 723 | 724 | Relation 725 | 726 | 200 727 | 550 728 | 580 729 | 70 730 | 731 | lt=- 732 | 560.0;10.0;560.0;30.0;10.0;30.0;10.0;50.0 733 | 734 | 735 | Relation 736 | 737 | 810 738 | 510 739 | 50 740 | 30 741 | 742 | lt=- 743 | 10.0;10.0;30.0;10.0 744 | 745 | 746 | Relation 747 | 748 | 750 749 | 550 750 | 190 751 | 70 752 | 753 | lt=- 754 | 10.0;10.0;10.0;30.0;170.0;30.0;170.0;50.0 755 | 756 | 757 | --------------------------------------------------------------------------------