├── .gitignore ├── provision-wireguard.sh ├── provision-k9s.sh ├── argocd ├── providers.tf └── main.tf ├── .vscode └── settings.json ├── provision-terraform.sh ├── provision-reloader.sh ├── provision-network.sh ├── provision-etcdctl.sh ├── provision-helmfile.sh ├── renovate.json5 ├── provision-containerd-shim-spin-v2.sh ├── provision-helm.sh ├── provision-k3s-registries.sh ├── provision-containerd-configuration.sh ├── provision-kube-vip.sh ├── provision-metallb.sh ├── provision-k3s-agent.sh ├── provision-base.sh ├── provision-crossplane.sh ├── provision-gitlab-runner.sh ├── example-spin.yml ├── provision-cert-manager.sh ├── example.yml ├── provision-zot.sh ├── provision-k8s-dashboard.sh ├── provision-argocd.sh ├── renovate.sh ├── README.md ├── provision-k3s-server.sh └── Vagrantfile /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/ 2 | tmp/ 3 | .terraform/ 4 | .terraform.lock.hcl 5 | *.tfstate* 6 | *.log 7 | -------------------------------------------------------------------------------- /provision-wireguard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | # NB this is not really required. we only install it to have the wg tool to 5 | # quickly see the wireguard configuration. 6 | apt-get install -y wireguard 7 | -------------------------------------------------------------------------------- /provision-k9s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | k9s_version="${1:-v0.32.5}"; shift || true 5 | 6 | # download and install. 7 | wget -qO- "https://github.com/derailed/k9s/releases/download/$k9s_version/k9s_Linux_amd64.tar.gz" \ 8 | | tar xzf - k9s 9 | install -m 755 k9s /usr/local/bin/ 10 | rm k9s 11 | 12 | # try it. 13 | k9s version 14 | -------------------------------------------------------------------------------- /argocd/providers.tf: -------------------------------------------------------------------------------- 1 | # see https://github.com/hashicorp/terraform 2 | terraform { 3 | required_version = "1.9.4" 4 | required_providers { 5 | # see https://registry.terraform.io/providers/oboukili/argocd 6 | # see https://github.com/argoproj-labs/terraform-provider-argocd 7 | argocd = { 8 | source = "oboukili/argocd" 9 | version = "6.1.1" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "cSpell.words": [ 3 | "argocd", 4 | "buildx", 5 | "configmap", 6 | "containerd", 7 | "crictl", 8 | "crossplane", 9 | "daemonset", 10 | "Distro", 11 | "ethernets", 12 | "healthz", 13 | "httpie", 14 | "KUBECONFIG", 15 | "macaddress", 16 | "netplan", 17 | "networkd", 18 | "OIDC", 19 | "ruilopes", 20 | "traefik", 21 | "upbound" 22 | ] 23 | } -------------------------------------------------------------------------------- /provision-terraform.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | terraform_version="${1:-1.9.4}"; shift || true 5 | 6 | # install dependencies. 7 | apt-get install -y unzip 8 | 9 | # install terraform. 10 | artifact_url="https://releases.hashicorp.com/terraform/$terraform_version/terraform_${terraform_version}_linux_amd64.zip" 11 | artifact_path="/tmp/$(basename $artifact_url)" 12 | wget -qO $artifact_path $artifact_url 13 | unzip -o $artifact_path -d /usr/local/bin 14 | rm $artifact_path 15 | CHECKPOINT_DISABLE=1 terraform version 16 | -------------------------------------------------------------------------------- /provision-reloader.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | reloader_chart_version="${1:-1.0.121}"; shift || true 5 | 6 | # add the stakater reloader repository. 7 | # see https://github.com/stakater/reloader 8 | # see https://artifacthub.io/packages/helm/stakater/reloader 9 | helm repo add stakater https://stakater.github.io/stakater-charts 10 | helm repo update 11 | 12 | echo 'Setting the reloader values...' 13 | cat >reloader-values.yml </etc/sysctl.d/99-ipv6.conf <<'EOF' 8 | net.ipv6.conf.all.disable_ipv6 = 1 9 | EOF 10 | sysctl -p -f /etc/sysctl.d/99-ipv6.conf 11 | 12 | # set network. 13 | # NB the system must be rebooted for this to take effect. this is required when 14 | # running by vagrant, since we cannot reconfigure the network under it. 15 | # instead, we reboot the machine from a vagrant provisioner. 16 | cat >/etc/network/interfaces </etc/profile.d/etcdctl.sh <<'EOF' 17 | export ETCDCTL_CACERT=/var/lib/rancher/k3s/server/tls/etcd/server-ca.crt 18 | export ETCDCTL_CERT=/var/lib/rancher/k3s/server/tls/etcd/server-client.crt 19 | export ETCDCTL_KEY=/var/lib/rancher/k3s/server/tls/etcd/server-client.key 20 | EOF 21 | -------------------------------------------------------------------------------- /provision-helmfile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # 5 | # deploy helmfile. 6 | 7 | helmfile_version="${1:-0.167.1}"; shift || true 8 | 9 | # install helmfile. 10 | # see https://github.com/helmfile/helmfile#installation 11 | echo "installing helmfile $helmfile_version..." 12 | case `uname -m` in 13 | x86_64) 14 | wget -qO- "https://github.com/helmfile/helmfile/releases/download/v$helmfile_version/helmfile_${helmfile_version}_linux_amd64.tar.gz" | tar xzf - --strip-components=0 helmfile 15 | ;; 16 | aarch64) 17 | wget -qO- "https://github.com/helmfile/helmfile/releases/download/v$helmfile_version/helmfile_${helmfile_version}_linux_arm64.tar.gz" | tar xzf - --strip-components=0 helmfile 18 | ;; 19 | esac 20 | install helmfile /usr/local/bin 21 | rm helmfile 22 | 23 | # kick the tires. 24 | printf "#\n# helmfile version\n#\n" 25 | helmfile version 26 | -------------------------------------------------------------------------------- /renovate.json5: -------------------------------------------------------------------------------- 1 | // see https://docs.renovatebot.com/templates/ 2 | // see https://docs.renovatebot.com/modules/manager/ 3 | // see https://docs.renovatebot.com/modules/manager/regex/ 4 | // see https://docs.renovatebot.com/configuration-options/ 5 | { 6 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 7 | "regexManagers": [ 8 | // default datasources. 9 | { 10 | "fileMatch": [ 11 | "Vagrantfile", 12 | "Dockerfile", 13 | "\\.sh$", 14 | "\\.tf$", 15 | ], 16 | "matchStrings": [ 17 | "# renovate: datasource=(?[^:]+?) depName=(?.+?)( versioning=(?.+?))?( extractVersion=(?.+?))?( registryUrl=(?.+?))?\\s.+?[:=]\\s*[\"']?(?.+?)[\"']?\\s" 18 | ], 19 | "versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver-coerced{{/if}}", 20 | "extractVersionTemplate": "{{#if extractVersion}}{{{extractVersion}}}{{else}}^v?(?.+)${{/if}}" 21 | } 22 | ] 23 | } -------------------------------------------------------------------------------- /provision-containerd-shim-spin-v2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | # see https://github.com/spinkube/containerd-shim-spin 5 | # renovate: datasource=github-releases depName=spinkube/containerd-shim-spin 6 | CONTAINERD_SHIM_SPIN_VERSION='0.15.0' 7 | 8 | # bail when already installed. 9 | if [ -x /usr/local/bin/containerd-shim-spin-v2 ]; then 10 | # e.g. Version: 0.15.0 11 | actual_version="$(/usr/local/bin/containerd-shim-spin-v2 -v | perl -ne '/^\s*Version: (.+)/ && print $1')" 12 | if [ "$actual_version" == "$CONTAINERD_SHIM_SPIN_VERSION" ]; then 13 | echo 'ANSIBLE CHANGED NO' 14 | exit 0 15 | fi 16 | fi 17 | 18 | # download and install. 19 | containerd_shim_spin_url="https://github.com/spinkube/containerd-shim-spin/releases/download/v${CONTAINERD_SHIM_SPIN_VERSION}/containerd-shim-spin-v2-linux-x86_64.tar.gz" 20 | t="$(mktemp -q -d --suffix=.containerd-shim-spin)" 21 | wget -qO- "$containerd_shim_spin_url" | tar xzf - -C "$t" 22 | install -m 755 "$t/containerd-shim-spin-v2" /usr/local/bin/ 23 | rm -rf "$t" 24 | -------------------------------------------------------------------------------- /provision-helm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # 5 | # deploy helm. 6 | 7 | helm_version="${1:-v3.15.3}"; shift || true 8 | 9 | # install dependencies. 10 | apt-get install -y --no-install-recommends git-core 11 | 12 | # install helm. 13 | # see https://helm.sh/docs/intro/install/ 14 | echo "installing helm $helm_version client..." 15 | case `uname -m` in 16 | x86_64) 17 | wget -qO- "https://get.helm.sh/helm-$helm_version-linux-amd64.tar.gz" | tar xzf - --strip-components=1 linux-amd64/helm 18 | ;; 19 | aarch64) 20 | wget -qO- "https://get.helm.sh/helm-$helm_version-linux-arm64.tar.gz" | tar xzf - --strip-components=1 linux-arm64/helm 21 | ;; 22 | esac 23 | install helm /usr/local/bin 24 | rm helm 25 | 26 | # install the bash completion script. 27 | helm completion bash >/usr/share/bash-completion/completions/helm 28 | 29 | # install the helm-diff plugin. 30 | # NB this is especially useful for helmfile. 31 | helm plugin install https://github.com/databus23/helm-diff 32 | 33 | # kick the tires. 34 | printf "#\n# helm version\n#\n" 35 | helm version 36 | -------------------------------------------------------------------------------- /provision-k3s-registries.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | registry_mirror_url="http://registry.$(hostname --domain)" 5 | registries=' 6 | docker.io 7 | registry.k8s.io 8 | ghcr.io 9 | quay.io 10 | registry.gitlab.com 11 | ' 12 | 13 | # configure the registries. 14 | # NB this rewrite configuration ends-up in the containerd configuration, but, 15 | # only works because k3s is using a custom fork of containerd. 16 | # see https://docs.k3s.io/installation/private-registry#rewrites 17 | # see /var/lib/rancher/k3s/agent/etc/containerd/config.toml 18 | # see https://github.com/k3s-io/k3s/blob/v1.30.3+k3s1/pkg/agent/templates/templates_linux.go 19 | # see https://github.com/k3s-io/k3s/pull/3064 20 | # see https://github.com/rancher/rke2/issues/741 21 | # see https://github.com/containerd/containerd/pull/5171 22 | install -d /etc/rancher/k3s 23 | cat >/etc/rancher/k3s/registries.yaml <>/etc/rancher/k3s/registries.yaml </var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl <<'EOF' 24 | {{ template "base" . }} 25 | 26 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.spin] 27 | runtime_type = "io.containerd.spin.v2" 28 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.spin.options] 29 | BinaryName = "/usr/local/bin/containerd-shim-spin-v2" 30 | EOF 31 | -------------------------------------------------------------------------------- /provision-kube-vip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | kube_vip_version="${1:-v0.8.2}"; shift || true 5 | vip="${1:-10.11.0.30}"; shift || true 6 | # TODO use a specific version after https://github.com/kube-vip/kube-vip/issues/769 is addressed. 7 | kube_vip_rbac_url="https://kube-vip.io/manifests/rbac.yaml" 8 | kube_vip_image="ghcr.io/kube-vip/kube-vip:$kube_vip_version" 9 | fqdn="$(hostname --fqdn)" 10 | k3s_fqdn="s.$(hostname --domain)" 11 | k3s_url="https://$k3s_fqdn:6443" 12 | 13 | # load the IPVS kernel modules. 14 | cat >/etc/modules-load.d/ipvs.conf <<'EOF' 15 | ip_vs 16 | ip_vs_rr 17 | EOF 18 | for m in $(cat /etc/modules-load.d/ipvs.conf); do 19 | modprobe $m 20 | done 21 | 22 | # install kube-vip. 23 | # NB this creates a HA VIP (L2 IPVS) for the k8s control-plane k3s/api-server. 24 | # see https://kube-vip.io/docs/usage/k3s/ 25 | # see https://kube-vip.io/docs/installation/daemonset/ 26 | # see https://kube-vip.io/docs/about/architecture/ 27 | ctr image pull "$kube_vip_image" 28 | ( 29 | wget -qO- "$kube_vip_rbac_url" 30 | echo --- 31 | ctr run --rm --net-host "$kube_vip_image" vip \ 32 | /kube-vip \ 33 | manifest \ 34 | daemonset \ 35 | --arp \ 36 | --interface eth1 \ 37 | --address "$vip" \ 38 | --inCluster \ 39 | --taint \ 40 | --controlplane \ 41 | --enableLoadBalancer \ 42 | --leaderElection 43 | ) | kubectl apply -f - 44 | 45 | # wait until $k3s_url is available. 46 | while ! wget \ 47 | --quiet \ 48 | --spider \ 49 | --ca-certificate=/var/lib/rancher/k3s/server/tls/server-ca.crt \ 50 | --certificate=/var/lib/rancher/k3s/server/tls/client-admin.crt \ 51 | --private-key=/var/lib/rancher/k3s/server/tls/client-admin.key \ 52 | "$k3s_url"; do sleep 5; done 53 | -------------------------------------------------------------------------------- /provision-metallb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | metallb_chart_version="${1:-6.3.10}"; shift || true 5 | lb_ip_range="${1:-10.11.0.50-10.11.0.250}"; shift || true 6 | 7 | # add the bitnami helm charts repository. 8 | helm repo add bitnami https://charts.bitnami.com/bitnami 9 | helm repo update 10 | 11 | # install. 12 | # see https://artifacthub.io/packages/helm/bitnami/metallb 13 | # see https://metallb.universe.tf/configuration/k3s/ 14 | # see https://metallb.universe.tf/configuration/#layer-2-configuration 15 | # see https://metallb.universe.tf/community/#code-organization 16 | # see https://github.com/bitnami/charts/tree/master/bitnami/metallb 17 | # see https://kind.sigs.k8s.io/docs/user/loadbalancer/ 18 | cat >metallb-values.yml </etc/motd <<'EOF' 14 | 15 | _ ____ 16 | | | |___ \ 17 | | | __ __) |___ 18 | | |/ /|__ /usr/share/bash-completion/completions/crictl 54 | kubectl completion bash >/usr/share/bash-completion/completions/kubectl 55 | 56 | # list runnnig pods. 57 | crictl pods 58 | 59 | # list running containers. 60 | crictl ps 61 | ctr containers ls 62 | 63 | # show listening ports. 64 | ss -n --tcp --listening --processes 65 | 66 | # show network routes. 67 | ip route 68 | 69 | # show memory info. 70 | free 71 | 72 | # show versions. 73 | crictl version 74 | ctr version 75 | -------------------------------------------------------------------------------- /argocd/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # see https://github.com/bitnami/charts/tree/main/bitnami/nginx 3 | # see https://artifacthub.io/packages/helm/bitnami/nginx 4 | # renovate: datasource=docker depName=bitnamicharts/nginx 5 | nginx_chart_version = "18.1.8" 6 | } 7 | 8 | # see https://registry.terraform.io/providers/oboukili/argocd/latest/docs/resources/project 9 | resource "argocd_project" "example" { 10 | metadata { 11 | name = "example" 12 | namespace = "argocd" 13 | } 14 | spec { 15 | source_repos = ["*"] 16 | destination { 17 | server = "*" 18 | namespace = "*" 19 | } 20 | cluster_resource_whitelist { 21 | group = "*" 22 | kind = "*" 23 | } 24 | } 25 | } 26 | 27 | # see https://argo-cd.readthedocs.io/en/stable/user-guide/helm/ 28 | # see https://artifacthub.io/packages/helm/bitnami/nginx 29 | # see https://github.com/bitnami/charts/tree/main/bitnami/nginx 30 | # see https://github.com/argoproj/argocd-example-apps 31 | # see https://registry.terraform.io/providers/oboukili/argocd/latest/docs/resources/application 32 | resource "argocd_application" "nginx" { 33 | metadata { 34 | name = "nginx" 35 | namespace = "argocd" 36 | } 37 | 38 | wait = true 39 | 40 | spec { 41 | project = argocd_project.example.id 42 | 43 | destination { 44 | name = "in-cluster" 45 | namespace = "default" 46 | } 47 | 48 | source { 49 | repo_url = "registry-1.docker.io/bitnamicharts" 50 | chart = "nginx" 51 | target_revision = local.nginx_chart_version 52 | helm { 53 | values = yamlencode({ 54 | serverBlock = <<-EOS 55 | server { 56 | listen 0.0.0.0:8080; 57 | location / { 58 | return 200 "nginx: Hello, World!\n"; 59 | } 60 | } 61 | EOS 62 | }) 63 | } 64 | } 65 | 66 | sync_policy { 67 | automated { 68 | prune = true 69 | self_heal = true 70 | allow_empty = true 71 | } 72 | retry { 73 | limit = "5" 74 | backoff { 75 | duration = "30s" 76 | max_duration = "2m" 77 | factor = "2" 78 | } 79 | } 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /provision-base.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | extra_hosts="$1"; shift || true 5 | 6 | # set the extra hosts. 7 | cat >>/etc/hosts </etc/vim/vimrc.local <<'EOF' 51 | syntax on 52 | set background=dark 53 | set esckeys 54 | set ruler 55 | set laststatus=2 56 | set nobackup 57 | EOF 58 | 59 | # configure the shell. 60 | cat >/etc/profile.d/login.sh <<'EOF' 61 | [[ "$-" != *i* ]] && return 62 | export EDITOR=vim 63 | export PAGER=less 64 | alias l='ls -lF --color' 65 | alias ll='l -a' 66 | alias h='history 25' 67 | alias j='jobs -l' 68 | EOF 69 | 70 | cat >/etc/inputrc <<'EOF' 71 | set input-meta on 72 | set output-meta on 73 | set show-all-if-ambiguous on 74 | set completion-ignore-case on 75 | "\e[A": history-search-backward 76 | "\e[B": history-search-forward 77 | "\eOD": backward-word 78 | "\eOC": forward-word 79 | EOF 80 | 81 | # install arp-scan. 82 | # arp-scan lets us discover nodes in the local network. 83 | # e.g. arp-scan --localnet --interface eth1 84 | apt-get install -y --no-install-recommends arp-scan 85 | 86 | # install useful tools. 87 | apt-get install -y --no-install-recommends \ 88 | tcpdump \ 89 | traceroute \ 90 | iptables \ 91 | ipvsadm \ 92 | ipset 93 | -------------------------------------------------------------------------------- /provision-crossplane.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | crossplane_chart_version="${1:-1.16.0}"; shift || true 5 | crossplane_provider_aws_s3_version="${1:-1.11.0}"; shift || true 6 | 7 | # add the crossplane helm charts repository. 8 | helm repo add crossplane https://charts.crossplane.io/stable 9 | helm repo update 10 | 11 | # search the chart and app versions, e.g.: in this case we are using: 12 | # NAME CHART VERSION APP VERSION DESCRIPTION 13 | # crossplane/crossplane 1.16.0 1.16.0 Crossplane is an open source Kubernetes add-on ... 14 | helm search repo crossplane/crossplane --versions | head -10 15 | 16 | # set the configuration. 17 | # NB the default values are described at: 18 | # https://github.com/crossplane/crossplane/tree/master/cluster/charts/crossplane/values.yaml 19 | # NB make sure you are seeing the same version of the chart that you are installing. 20 | # see https://docs.crossplane.io/v1.16/software/install/#customize-the-crossplane-helm-chart 21 | cat >crossplane-values.yml <gitlab-runner-values.yml <cert-manager-values.yml < /vagrant/tmp/ingress-ca-crt.pem 92 | 93 | # trust the ingress ca. 94 | install /vagrant/tmp/ingress-ca-crt.pem /usr/local/share/ca-certificates/ingress.crt 95 | update-ca-certificates 96 | -------------------------------------------------------------------------------- /example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # see https://kubernetes.io/docs/concepts/services-networking/ingress/ 3 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#ingress-v1-networking-k8s-io 4 | apiVersion: networking.k8s.io/v1 5 | kind: Ingress 6 | metadata: 7 | name: example 8 | spec: 9 | rules: 10 | - host: example.example.test 11 | http: 12 | paths: 13 | - path: / 14 | pathType: Prefix 15 | backend: 16 | service: 17 | name: example 18 | port: 19 | name: web 20 | --- 21 | # see https://kubernetes.io/docs/concepts/services-networking/service/#type-clusterip 22 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#service-v1-core 23 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#serviceport-v1-core 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: example 28 | spec: 29 | type: ClusterIP 30 | selector: 31 | app: example 32 | ports: 33 | - name: web 34 | port: 80 35 | protocol: TCP 36 | targetPort: web 37 | --- 38 | # see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ 39 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deployment-v1-apps 40 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#podtemplatespec-v1-core 41 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#container-v1-core 42 | apiVersion: apps/v1 43 | kind: Deployment 44 | metadata: 45 | name: example 46 | spec: 47 | replicas: 1 48 | selector: 49 | matchLabels: 50 | app: example 51 | template: 52 | metadata: 53 | labels: 54 | app: example 55 | spec: 56 | enableServiceLinks: false 57 | containers: 58 | # see https://github.com/rgl/example-docker-buildx-go 59 | # see https://hub.docker.com/repository/docker/ruilopes/example-docker-buildx-go 60 | - name: example 61 | image: ruilopes/example-docker-buildx-go:v1.11.0 62 | args: 63 | - -listen=0.0.0.0:9000 64 | env: 65 | # see https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ 66 | # see https://github.com/kubernetes/kubernetes/blob/v1.30.2/test/e2e/common/node/downwardapi.go 67 | - name: EXAMPLE_NODE_NAME 68 | valueFrom: 69 | fieldRef: 70 | fieldPath: spec.nodeName 71 | - name: EXAMPLE_POD_NAMESPACE 72 | valueFrom: 73 | fieldRef: 74 | fieldPath: metadata.namespace 75 | - name: EXAMPLE_POD_NAME 76 | valueFrom: 77 | fieldRef: 78 | fieldPath: metadata.name 79 | - name: EXAMPLE_POD_UID 80 | valueFrom: 81 | fieldRef: 82 | fieldPath: metadata.uid 83 | - name: EXAMPLE_POD_IP 84 | valueFrom: 85 | fieldRef: 86 | fieldPath: status.podIP 87 | ports: 88 | - name: web 89 | containerPort: 9000 90 | resources: 91 | requests: 92 | memory: 20Mi 93 | cpu: '0.1' 94 | limits: 95 | memory: 20Mi 96 | cpu: '0.1' 97 | securityContext: 98 | allowPrivilegeEscalation: false 99 | capabilities: 100 | drop: 101 | - ALL 102 | readOnlyRootFilesystem: true 103 | runAsNonRoot: true 104 | seccompProfile: 105 | type: RuntimeDefault 106 | -------------------------------------------------------------------------------- /provision-zot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | zot_version="${1:-2.1.1}" 5 | 6 | zot_domain="$(hostname --fqdn)" 7 | zot_url="http://$zot_domain" 8 | 9 | # NB you can see all the currently used container images with: 10 | # kubectl get pods --all-namespaces -o go-template --template '{{range .items}}{{range .spec.containers}}{{printf "%s\n" .image}}{{end}}{{end}}' | sort -u 11 | registries=' 12 | docker.io 13 | registry.k8s.io 14 | ghcr.io 15 | quay.io 16 | registry.gitlab.com 17 | ' 18 | 19 | # add the zot user. 20 | groupadd --system zot 21 | adduser \ 22 | --system \ 23 | --disabled-login \ 24 | --no-create-home \ 25 | --gecos '' \ 26 | --ingroup zot \ 27 | --home /opt/zot \ 28 | zot 29 | install -m 750 -o zot -g zot -d /opt/zot 30 | 31 | # download and install. 32 | zot_dist_url="https://github.com/project-zot/zot/releases/download/v$zot_version/zot-linux-amd64" 33 | zli_dist_url="https://github.com/project-zot/zot/releases/download/v$zot_version/zli-linux-amd64" 34 | zot_dist_path="/vagrant/tmp/zot-$zot_version-$(basename "$zot_dist_url")" 35 | zli_dist_path="/vagrant/tmp/zot-$zot_version-$(basename "$zli_dist_url")" 36 | if [ ! -f "$zot_dist_path" ]; then 37 | wget -qO "$zot_dist_path" "$zot_dist_url" 38 | fi 39 | if [ ! -f "$zli_dist_path" ]; then 40 | wget -qO "$zli_dist_path" "$zli_dist_url" 41 | fi 42 | install -m 755 -d /opt/zot/bin 43 | install -m 750 -g zot -d /opt/zot/conf 44 | install -m 750 -o zot -g zot -d /opt/zot/data 45 | install -m 755 "$zot_dist_path" /opt/zot/bin/zot 46 | install -m 755 "$zli_dist_path" /opt/zot/bin/zli 47 | ln -sf /opt/zot/bin/zli /usr/local/bin/zli 48 | 49 | # # install the certificates. 50 | # install -m 440 -g zot /vagrant/tmp/tls/example-ca/$zot_domain-crt.pem /opt/zot/conf/crt.pem 51 | # install -m 440 -g zot /vagrant/tmp/tls/example-ca/$zot_domain-key.pem /opt/zot/conf/key.pem 52 | 53 | # create the configuration file. 54 | # NB examples: 55 | # # use the upstream registry. 56 | # regctl tag ls registry.k8s.io/pause 57 | # regctl image inspect registry.k8s.io/pause 58 | # # use the zot mirror registry. 59 | # regctl image export registry.test/mirror/registry.k8s.io/pause pause.tar 60 | # regctl image inspect registry.test/mirror/registry.k8s.io/pause 61 | # regctl tag ls registry.test/mirror/registry.k8s.io/pause 62 | # see https://zotregistry.dev/v2.1.1/articles/mirroring/ 63 | # see https://zotregistry.dev/v2.1.1/admin-guide/admin-configuration/#syncing-and-mirroring-registries 64 | cat >/opt/zot/conf/config.yaml <>/opt/zot/conf/config.yaml </etc/systemd/system/zot.service </usr/share/bash-completion/completions/zli 135 | zli config add main "$zot_url" 136 | zli config --list 137 | zli image list --config main 138 | -------------------------------------------------------------------------------- /provision-k8s-dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | kubernetes_dashboard_chart_version="${1:-v7.5.0}"; shift || true 5 | kubernetes_dashboard_fqdn="kubernetes-dashboard.$(hostname --domain)" 6 | 7 | # create the kubernetes-dashboard tls secret. 8 | # see https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/ 9 | kubectl create namespace kubernetes-dashboard 10 | kubectl apply -n kubernetes-dashboard -f - <kubernetes-dashboard-values.yml </vagrant/tmp/admin-token.txt 132 | -------------------------------------------------------------------------------- /provision-argocd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | argocd_cli_version="${1:-2.12.0}"; shift || true 5 | argocd_chart_version="${1:-7.4.3}"; shift || true 6 | argocd_fqdn="argocd.$(hostname --domain)" 7 | 8 | # create the argocd-server tls secret. 9 | # NB argocd-server will automatically reload this secret. 10 | # NB alternatively we could set the server.certificate.enabled helm value. but 11 | # that does not allow us to fully customize the certificate (e.g. subject). 12 | # see https://github.com/argoproj/argo-helm/blob/argo-cd-7.4.3/charts/argo-cd/templates/argocd-server/certificate.yaml 13 | # see https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/ 14 | kubectl create namespace argocd 15 | kubectl apply -n argocd -f - <argocd-values.yml < /vagrant/tmp/argocd-admin-password.txt 160 | 161 | # verify the certificates. 162 | # NB to further troubleshoot, add the -debug -tlsextdebug cli arguments. 163 | endpoints=( 164 | 'argocd.example.test:443' 165 | 'argocd-repo-server.argocd.svc:8081' 166 | # NB dex verification is commented because we have not configured dex, as 167 | # such, there is not endpoint listening, so we cannot verify the 168 | # certificate. 169 | #'argocd-dex-server.argocd.svc:5556' 170 | ) 171 | for endpoint in "${endpoints[@]}"; do 172 | h="${endpoint%:*}" 173 | kubectl -n argocd exec --stdin deployment/argocd-server -- bash -eux </dev/null \ 180 | | openssl x509 -noout -text 181 | # verify certificate. 182 | openssl s_client \ 183 | -connect "$endpoint" \ 184 | -servername "$h" \ 185 | -showcerts \ 186 | -verify 100 \ 187 | -verify_return_error \ 188 | -CAfile <(echo "$(cat /vagrant/tmp/ingress-ca-crt.pem)") 189 | EOF 190 | done 191 | 192 | # configure argocd. 193 | export ARGOCD_SERVER="$argocd_fqdn" 194 | export ARGOCD_AUTH_USERNAME="admin" 195 | export ARGOCD_AUTH_PASSWORD="$(cat /vagrant/tmp/argocd-admin-password.txt)" 196 | export CHECKPOINT_DISABLE=1 197 | export TF_LOG=DEBUG # TF_LOG can be one of: ERROR, WARN, INFO, DEBUG, TRACE. 198 | export TF_LOG_PATH=terraform.log 199 | pushd /vagrant/argocd 200 | rm -f terraform.tfstate* terraform*.log 201 | terraform init 202 | terraform apply -auto-approve \ 203 | | tee terraform-apply.log 204 | popd 205 | -------------------------------------------------------------------------------- /renovate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # this executes renovate against the local repository. 5 | # NB this uses a temporary gitea instance because running renovate against a 6 | # local directory not (yet?) supported. 7 | # see https://github.com/renovatebot/renovate/issues/3609 8 | 9 | export RENOVATE_USERNAME='renovate' 10 | export RENOVATE_NAME='Renovate Bot' 11 | export RENOVATE_PASSWORD='password' 12 | gitea_container_name="$(basename "$(dirname "$(realpath "${BASH_SOURCE[0]}")")")-renovate-gitea" 13 | 14 | # see https://hub.docker.com/r/gitea/gitea/tags 15 | # renovate: datasource=docker depName=gitea/gitea 16 | gitea_version='1.22.1' 17 | 18 | # see https://hub.docker.com/r/renovate/renovate/tags 19 | # renovate: datasource=docker depName=renovate/renovate 20 | renovate_version='38.27.0' 21 | 22 | # clean. 23 | echo 'Deleting existing Gitea...' 24 | docker rm --force "$gitea_container_name" >/dev/null 2>&1 25 | echo 'Deleting existing temporary files...' 26 | rm -f tmp/renovate-* 27 | install -d tmp 28 | 29 | # start gitea in background. 30 | # see https://docs.gitea.io/en-us/config-cheat-sheet/ 31 | # see https://github.com/go-gitea/gitea/releases 32 | # see https://github.com/go-gitea/gitea/blob/v1.22.1/docker/root/etc/s6/gitea/setup 33 | echo 'Starting Gitea...' 34 | docker run \ 35 | --detach \ 36 | --name "$gitea_container_name" \ 37 | -v /etc/timezone:/etc/timezone:ro \ 38 | -v /etc/localtime:/etc/localtime:ro \ 39 | -e SECRET_KEY=abracadabra \ 40 | -p 3000 \ 41 | "gitea/gitea:$gitea_version" \ 42 | >/dev/null 43 | gitea_addr="$(docker port "$gitea_container_name" 3000 | head -1)" 44 | gitea_url="http://$gitea_addr" 45 | export RENOVATE_ENDPOINT="$gitea_url" 46 | export GIT_PUSH_REPOSITORY="http://$RENOVATE_USERNAME:$RENOVATE_PASSWORD@$gitea_addr/$RENOVATE_USERNAME/test.git" 47 | 48 | # wait for gitea to be ready. 49 | echo "Waiting for Gitea to be ready at $gitea_url..." 50 | GITEA_URL="$gitea_url" bash -euc 'while [ -z "$(wget -qO- "$GITEA_URL/api/v1/version" | jq -r ".version | select(.!=null)")" ]; do sleep 5; done' 51 | 52 | # create user in gitea. 53 | echo "Creating Gitea $RENOVATE_USERNAME user..." 54 | docker exec --user git "$gitea_container_name" gitea admin user create \ 55 | --admin \ 56 | --email "$RENOVATE_USERNAME@example.com" \ 57 | --username "$RENOVATE_USERNAME" \ 58 | --password "$RENOVATE_PASSWORD" 59 | curl \ 60 | --silent \ 61 | --show-error \ 62 | --fail-with-body \ 63 | -u "$RENOVATE_USERNAME:$RENOVATE_PASSWORD" \ 64 | -X 'PATCH' \ 65 | -H 'Accept: application/json' \ 66 | -H 'Content-Type: application/json' \ 67 | -d "{\"full_name\":\"$RENOVATE_NAME\"}" \ 68 | "$gitea_url/api/v1/user/settings" \ 69 | | jq \ 70 | > /dev/null 71 | 72 | # create the user personal access token. 73 | # see https://docs.gitea.io/en-us/api-usage/ 74 | # see https://docs.gitea.io/en-us/oauth2-provider/#scopes 75 | # see https://try.gitea.io/api/swagger#/user/userCreateToken 76 | echo "Creating Gitea $RENOVATE_USERNAME user personal access token..." 77 | curl \ 78 | --silent \ 79 | --show-error \ 80 | --fail-with-body \ 81 | -u "$RENOVATE_USERNAME:$RENOVATE_PASSWORD" \ 82 | -X POST \ 83 | -H "Content-Type: application/json" \ 84 | -d '{"name": "renovate", "scopes": ["read:user", "write:issue", "write:repository"]}' \ 85 | "$gitea_url/api/v1/users/$RENOVATE_USERNAME/tokens" \ 86 | | jq -r .sha1 \ 87 | >tmp/renovate-gitea-token.txt 88 | 89 | # try the token. 90 | echo "Trying the Gitea $RENOVATE_USERNAME user personal access token..." 91 | RENOVATE_TOKEN="$(cat tmp/renovate-gitea-token.txt)" 92 | export RENOVATE_TOKEN 93 | curl \ 94 | --silent \ 95 | --show-error \ 96 | --fail-with-body \ 97 | -H "Authorization: token $RENOVATE_TOKEN" \ 98 | -H 'Accept: application/json' \ 99 | "$gitea_url/api/v1/version" \ 100 | | jq \ 101 | > /dev/null 102 | 103 | # create remote repository in gitea. 104 | echo "Creating Gitea $RENOVATE_USERNAME test repository..." 105 | curl \ 106 | --silent \ 107 | --show-error \ 108 | --fail-with-body \ 109 | -u "$RENOVATE_USERNAME:$RENOVATE_PASSWORD" \ 110 | -X POST \ 111 | -H 'Accept: application/json' \ 112 | -H 'Content-Type: application/json' \ 113 | -d '{"name": "test"}' \ 114 | "$gitea_url/api/v1/user/repos" \ 115 | | jq \ 116 | > /dev/null 117 | 118 | # push the code to local gitea repository. 119 | # NB running renovate locally is not yet supported. 120 | # see https://github.com/renovatebot/renovate/issues/3609 121 | echo "Pushing local repository to Gitea $RENOVATE_USERNAME test repository..." 122 | git push --force "$GIT_PUSH_REPOSITORY" 123 | 124 | # see https://docs.renovatebot.com/modules/platform/gitea/ 125 | # see https://docs.renovatebot.com/self-hosted-configuration/#dryrun 126 | # see https://github.com/renovatebot/renovate/blob/main/docs/usage/examples/self-hosting.md 127 | # see https://github.com/renovatebot/renovate/tree/main/lib/modules/datasource 128 | # see https://github.com/renovatebot/renovate/tree/main/lib/modules/versioning 129 | RENOVATE_TOKEN="$(cat tmp/renovate-gitea-token.txt)" 130 | export RENOVATE_TOKEN 131 | # NB these can also be passed as raw positional arguments to docker run. 132 | export RENOVATE_REPOSITORIES="$RENOVATE_USERNAME/test" 133 | # see https://docs.github.com/en/rest/rate-limit#get-rate-limit-status-for-the-authenticated-user 134 | # see https://github.com/settings/tokens 135 | # NB this is only used for authentication. the token should not have any scope enabled. 136 | #export GITHUB_COM_TOKEN='TODO-YOUR-TOKEN' 137 | # let renovate create all the required pull requests. 138 | # see https://docs.renovatebot.com/configuration-options/#prhourlylimit 139 | # see https://docs.renovatebot.com/configuration-options/#prconcurrentlimit 140 | export RENOVATE_PR_HOURLY_LIMIT='0' 141 | export RENOVATE_PR_CONCURRENT_LIMIT='0' 142 | echo 'Running renovate...' 143 | # NB use --dry-run=lookup for not modifying the repository (e.g. for not 144 | # creating pull requests). 145 | docker run \ 146 | --rm \ 147 | --tty \ 148 | --interactive \ 149 | --net host \ 150 | --env GITHUB_COM_TOKEN \ 151 | --env RENOVATE_ENDPOINT \ 152 | --env RENOVATE_TOKEN \ 153 | --env RENOVATE_REPOSITORIES \ 154 | --env RENOVATE_PR_HOURLY_LIMIT \ 155 | --env RENOVATE_PR_CONCURRENT_LIMIT \ 156 | --env LOG_LEVEL=debug \ 157 | --env LOG_FORMAT=json \ 158 | "renovate/renovate:$renovate_version" \ 159 | --platform=gitea \ 160 | --git-url=endpoint \ 161 | >tmp/renovate-log.json 162 | 163 | echo 'Getting results...' 164 | # extract the errors. 165 | jq 'select(.err)' tmp/renovate-log.json >tmp/renovate-errors.json 166 | # extract the result from the renovate log. 167 | jq 'select(.msg == "packageFiles with updates") | .config' tmp/renovate-log.json >tmp/renovate-result.json 168 | # extract all the dependencies. 169 | jq 'to_entries[].value[] | {packageFile,dep:.deps[]}' tmp/renovate-result.json >tmp/renovate-dependencies.json 170 | # extract the dependencies that have updates. 171 | jq 'select((.dep.updates | length) > 0)' tmp/renovate-dependencies.json >tmp/renovate-dependencies-updates.json 172 | 173 | # helpers. 174 | function show-title { 175 | echo 176 | echo '#' 177 | echo "# $1" 178 | echo '#' 179 | echo 180 | } 181 | 182 | # show errors. 183 | if [ "$(jq --slurp length tmp/renovate-errors.json)" -ne '0' ]; then 184 | show-title errors 185 | jq . tmp/renovate-errors.json 186 | fi 187 | 188 | # show dependencies. 189 | function show-dependencies { 190 | show-title "$1" 191 | ( 192 | printf 'packageFile\tdatasource\tdepName\tcurrentValue\tnewVersions\tskipReason\twarnings\n' 193 | jq \ 194 | -r \ 195 | '[ 196 | .packageFile, 197 | .dep.datasource, 198 | .dep.depName, 199 | .dep.currentValue, 200 | (.dep | select(.updates) | .updates | map(.newVersion) | join(" | ")), 201 | .dep.skipReason, 202 | (.dep | select(.warnings) | .warnings | map(.message) | join(" | ")) 203 | ] | @tsv' \ 204 | "$2" \ 205 | | sort 206 | ) | column -t -s "$(printf \\t)" 207 | } 208 | show-dependencies 'Dependencies' tmp/renovate-dependencies.json 209 | show-dependencies 'Dependencies Updates' tmp/renovate-dependencies-updates.json 210 | 211 | # show the gitea project. 212 | show-title "See PRs at $gitea_url/$RENOVATE_USERNAME/test/pulls (you can login as $RENOVATE_USERNAME:$RENOVATE_PASSWORD)" 213 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # About 2 | 3 | This is a [k3s](https://github.com/k3s-io/k3s) kubernetes cluster playground wrapped in a Vagrant environment. 4 | 5 | # Usage 6 | 7 | Configure the host machine `hosts` file with: 8 | 9 | ``` 10 | 10.11.0.4 registry.example.test 11 | 10.11.0.30 s.example.test 12 | 10.11.0.50 traefik.example.test 13 | 10.11.0.50 kubernetes-dashboard.example.test 14 | 10.11.0.50 kubernetes-hello.example.test 15 | 10.11.0.50 argocd.example.test 16 | ``` 17 | 18 | Install the base [Debian 12 (Bookworm) vagrant box](https://github.com/rgl/debian-vagrant). 19 | 20 | Optionally, start the [rgl/gitlab-vagrant](https://github.com/rgl/gitlab-vagrant) environment at `../gitlab-vagrant`. If you do this, this environment will have the [gitlab-runner helm chart](https://docs.gitlab.com/runner/install/kubernetes.html) installed in the k8s cluster. 21 | 22 | Optionally, connect the environment to the physical network through the host `br-lan` bridge. The environment assumes that the host bridge was configured as: 23 | 24 | ```bash 25 | sudo -i 26 | # review the configuration in the files at /etc/netplan and replace them all 27 | # with a single configuration file: 28 | ls -laF /etc/netplan 29 | upstream_interface=eth0 30 | upstream_mac=$(ip link show $upstream_interface | perl -ne '/ether ([^ ]+)/ && print $1') 31 | cat >/etc/netplan/00-config.yaml < tmp/kubernetes-hello.yml 134 | kubectl apply -f tmp/kubernetes-hello.yml 135 | kubectl rollout status daemonset/kubernetes-hello 136 | kubectl get ingresses,services,pods,daemonset 137 | kubernetes_hello_ip="$(kubectl get ingress/kubernetes-hello -o json | jq -r .status.loadBalancer.ingress[0].ip)" 138 | kubernetes_hello_fqdn="$(kubectl get ingress/kubernetes-hello -o json | jq -r .spec.rules[0].host)" 139 | kubernetes_hello_url="http://$kubernetes_hello_fqdn" 140 | echo "kubernetes_hello_url: $kubernetes_hello_url" 141 | curl --resolve "$kubernetes_hello_fqdn:80:$kubernetes_hello_ip" "$kubernetes_hello_url" 142 | kubectl delete -f tmp/kubernetes-hello.yml 143 | ``` 144 | 145 | Access the example `nginx` ArgoCD application service (managed by ArgoCD as the 146 | [`nginx` ArgoCD Application](argocd/main.tf)): 147 | 148 | ```bash 149 | nginx_ip="$(kubectl get service/nginx -o json | jq -r .status.loadBalancer.ingress[0].ip)" 150 | nginx_url="http://$nginx_ip" 151 | echo "nginx_url: $nginx_url" 152 | curl "$nginx_url" 153 | ``` 154 | 155 | List this repository dependencies (and which have newer versions): 156 | 157 | ```bash 158 | GITHUB_COM_TOKEN='YOUR_GITHUB_PERSONAL_TOKEN' ./renovate.sh 159 | ``` 160 | 161 | ## Traefik Dashboard 162 | 163 | Access the Traefik Dashboard at: 164 | 165 | https://traefik.example.test/dashboard/ 166 | 167 | ## Rancher Server 168 | 169 | Access the Rancher Server at: 170 | 171 | https://s.example.test:6443 172 | 173 | **NB** This is a proxy to the k8s API server (which is running in port 6444). 174 | 175 | **NB** You must use the client certificate that is inside the `tmp/admin.conf`, 176 | `tmp/*.pem`, or `/etc/rancher/k3s/k3s.yaml` (inside the `s1` machine) file. 177 | 178 | Access the rancher server using the client certificate with httpie: 179 | 180 | ```bash 181 | http \ 182 | --verify tmp/default-ca-crt.pem \ 183 | --cert tmp/default-crt.pem \ 184 | --cert-key tmp/default-key.pem \ 185 | https://s.example.test:6443 186 | ``` 187 | 188 | Or with curl: 189 | 190 | ```bash 191 | curl \ 192 | --cacert tmp/default-ca-crt.pem \ 193 | --cert tmp/default-crt.pem \ 194 | --key tmp/default-key.pem \ 195 | https://s.example.test:6443 196 | ``` 197 | 198 | ## Kubernetes Dashboard 199 | 200 | Access the Kubernetes Dashboard at: 201 | 202 | https://kubernetes-dashboard.example.test 203 | 204 | Then select `Token` and use the contents of `tmp/admin-token.txt` as the token. 205 | 206 | You can also launch the kubernetes API server proxy in background: 207 | 208 | ```bash 209 | export KUBECONFIG=$PWD/tmp/admin.conf 210 | kubectl proxy & 211 | ``` 212 | 213 | And access the kubernetes dashboard at: 214 | 215 | http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/ 216 | 217 | ## K9s Dashboard 218 | 219 | The [K9s](https://github.com/derailed/k9s) console UI dashboard is also 220 | installed in the server node. You can access it by running: 221 | 222 | ```bash 223 | vagrant ssh s1 224 | sudo su -l 225 | k9s 226 | ``` 227 | 228 | ## Zot Registry 229 | 230 | The [Zot Registry](https://zotregistry.dev) is installed in the registry 231 | node and can be accessed at: 232 | 233 | http://registry.example.test 234 | 235 | ## Argo CD 236 | 237 | Get the `admin` user password: 238 | 239 | ```bash 240 | echo "Argo CD admin password: $(cat tmp/argocd-admin-password.txt)" 241 | ``` 242 | 243 | Access the web interface: 244 | 245 | https://argocd.example.test 246 | 247 | Show the configuration: 248 | 249 | ```bash 250 | kubectl get -n argocd configmap/argocd-cmd-params-cm -o yaml 251 | ``` 252 | 253 | ## Crossplane 254 | 255 | Set the AWS credentials secret: 256 | 257 | ```bash 258 | # NB for testing purposes, you can copy these from the AWS Management Console. 259 | cat >tmp/aws-credentials.txt <<'EOF' 260 | [default] 261 | aws_access_key_id = 262 | aws_secret_access_key = 263 | #aws_session_token = 264 | EOF 265 | export KUBECONFIG=$PWD/tmp/admin.conf 266 | kubectl delete secret/aws-credentials \ 267 | --namespace crossplane-system 268 | kubectl create secret generic aws-credentials \ 269 | --namespace crossplane-system \ 270 | --from-file credentials=tmp/aws-credentials.txt 271 | ``` 272 | 273 | Create an S3 bucket: 274 | 275 | ```bash 276 | # see https://marketplace.upbound.io/providers/upbound/provider-aws-s3/v1.11.0/resources/s3.aws.upbound.io/Bucket/v1beta2 277 | # NB Bucket is cluster scoped. 278 | # see kubectl get crd buckets.s3.aws.upbound.io -o yaml 279 | export KUBECONFIG=$PWD/tmp/admin.conf 280 | kubectl create -f - <<'EOF' 281 | apiVersion: s3.aws.upbound.io/v1beta2 282 | kind: Bucket 283 | metadata: 284 | name: crossplane-hello-world 285 | spec: 286 | forProvider: 287 | region: eu-west-1 288 | tags: 289 | owner: rgl 290 | providerConfigRef: 291 | name: default 292 | EOF 293 | ``` 294 | 295 | List the created bucket: 296 | 297 | ```bash 298 | kubectl get buckets 299 | ``` 300 | 301 | Describe the created bucket: 302 | 303 | ```bash 304 | kubectl describe bucket/crossplane-hello-world 305 | ``` 306 | 307 | Using the AWS CLI, list the S3 buckets: 308 | 309 | ```bash 310 | AWS_CONFIG_FILE=tmp/aws-credentials.txt aws s3 ls 311 | ``` 312 | 313 | Delete the created bucket: 314 | 315 | ```bash 316 | kubectl delete bucket/crossplane-hello-world 317 | ``` 318 | 319 | # Notes 320 | 321 | * k3s has a custom k8s authenticator module that does user authentication from `/var/lib/rancher/k3s/server/cred/passwd`. 322 | 323 | # Reference 324 | 325 | * [k3s Installation and Configuration Options](https://rancher.com/docs/k3s/latest/en/installation/install-options/) 326 | * [k3s Advanced Options and Configuration](https://rancher.com/docs/k3s/latest/en/advanced/) 327 | * [k3s Under the Hood: Building a Product-grade Lightweight Kubernetes Distro (KubeCon NA 2019)](https://www.youtube.com/watch?v=-HchRyqNtkU) 328 | -------------------------------------------------------------------------------- /provision-k3s-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | k3s_command="$1"; shift 5 | k3s_channel="${1:-latest}"; shift 6 | k3s_version="${1:-v1.30.3+k3s1}"; shift 7 | k3s_token="$1"; shift 8 | flannel_backend="$1"; shift 9 | ip_address="$1"; shift 10 | krew_version="${1:-v0.4.4}"; shift || true # NB see https://github.com/kubernetes-sigs/krew 11 | taint="${1:-1}"; shift || true 12 | fqdn="$(hostname --fqdn)" 13 | k3s_fqdn="s.$(hostname --domain)" 14 | k3s_url="https://$k3s_fqdn:6443" 15 | 16 | # configure the motd. 17 | # NB this was generated at http://patorjk.com/software/taag/#p=display&f=Big&t=k3s%0Aserver. 18 | # it could also be generated with figlet.org. 19 | cat >/etc/motd <<'EOF' 20 | 21 | _ ____ 22 | | | |___ \ 23 | | | __ __) |___ 24 | | |/ /|__ /var/lib/rancher/k3s/server/manifests/traefik-local.yaml <<'EOF' 177 | apiVersion: traefik.containo.us/v1alpha1 178 | kind: IngressRoute 179 | metadata: 180 | name: traefik 181 | spec: 182 | entryPoints: 183 | - websecure 184 | routes: 185 | - match: Host(`traefik.example.test`) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`)) 186 | kind: Rule 187 | services: 188 | - name: api@internal 189 | kind: TraefikService 190 | EOF 191 | 192 | # install the krew kubectl package manager. 193 | echo "installing the krew $krew_version kubectl package manager..." 194 | apt-get install -y --no-install-recommends git-core 195 | wget -qO- "https://github.com/kubernetes-sigs/krew/releases/download/$krew_version/krew-linux_amd64.tar.gz" | tar xzf - ./krew-linux_amd64 196 | wget -q "https://github.com/kubernetes-sigs/krew/releases/download/$krew_version/krew.yaml" 197 | ./krew-linux_amd64 install --manifest=krew.yaml 198 | rm krew-linux_amd64 199 | cat >/etc/profile.d/krew.sh <<'EOF' 200 | export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" 201 | EOF 202 | source /etc/profile.d/krew.sh 203 | kubectl krew version 204 | 205 | # install the bash completion scripts. 206 | crictl completion bash >/usr/share/bash-completion/completions/crictl 207 | kubectl completion bash >/usr/share/bash-completion/completions/kubectl 208 | 209 | # symlink the default kubeconfig path so local tools like k9s can easily 210 | # find it without exporting the KUBECONFIG environment variable. 211 | ln -s /etc/rancher/k3s/k3s.yaml ~/.kube/config 212 | 213 | # save kubeconfig in the host. 214 | # NB the default users are generated at https://github.com/k3s-io/k3s/blob/v1.30.3+k3s1/pkg/daemons/control/deps/deps.go#L233-L261 215 | # and saved at /var/lib/rancher/k3s/server/cred/passwd 216 | mkdir -p /vagrant/tmp 217 | python3 - <1\.30\..+) 37 | K3S_VERSION = 'v1.30.3+k3s1' 38 | 39 | # see https://github.com/kube-vip/kube-vip/releases 40 | # renovate: datasource=github-releases depName=kube-vip/kube-vip 41 | KUBE_VIP_VERSION = 'v0.8.2' 42 | 43 | # see https://github.com/helm/helm/releases 44 | # renovate: datasource=github-releases depName=helm/helm 45 | HELM_VERSION = 'v3.15.3' 46 | 47 | # see https://github.com/helmfile/helmfile/releases 48 | # renovate: datasource=github-releases depName=helmfile/helmfile 49 | HELMFILE_VERSION = '0.167.1' 50 | 51 | # see https://github.com/kubernetes/dashboard/releases 52 | # renovate: datasource=helm depName=kubernetes-dashboard registryUrl=https://kubernetes.github.io/dashboard 53 | K8S_DASHBOARD_CHART_VERSION = 'v7.5.0' 54 | 55 | # see https://github.com/derailed/k9s/releases 56 | # renovate: datasource=github-releases depName=derailed/k9s 57 | K9S_VERSION = 'v0.32.5' 58 | 59 | # see https://github.com/kubernetes-sigs/krew/releases 60 | # renovate: datasource=github-releases depName=kubernetes-sigs/krew 61 | KREW_VERSION = 'v0.4.4' 62 | 63 | # see https://github.com/etcd-io/etcd/releases 64 | # NB make sure you use a version compatible with k3s. 65 | # renovate: datasource=github-releases depName=etcd-io/etcd 66 | ETCDCTL_VERSION = 'v3.5.15' 67 | 68 | # see https://artifacthub.io/packages/helm/bitnami/metallb 69 | # renovate: datasource=helm depName=metallb registryUrl=https://charts.bitnami.com/bitnami 70 | METALLB_CHART_VERSION = '6.3.10' # app version: 0.14.8 71 | 72 | # see https://www.terraform.io/downloads.html 73 | # see https://github.com/hashicorp/terraform/releases 74 | # renovate: datasource=github-releases depName=hashicorp/terraform 75 | TERRAFORM_VERSION = '1.9.4' 76 | 77 | # see https://artifacthub.io/packages/helm/cert-manager/cert-manager 78 | # renovate: datasource=helm depName=cert-manager registryUrl=https://charts.jetstack.io 79 | CERT_MANAGER_CHART_VERSION = '1.15.2' # app version: 1.15.2 80 | 81 | # see https://github.com/stakater/reloader 82 | # see https://artifacthub.io/packages/helm/stakater/reloader 83 | # renovate: datasource=helm depName=reloader registryUrl=https://stakater.github.io/stakater-charts 84 | RELOADER_CHART_VERSION = '1.0.121' # app version: 1.0.121 85 | 86 | # see https://gitlab.com/gitlab-org/charts/gitlab-runner/-/tags 87 | # renovate: datasource=helm depName=gitlab-runner registryUrl=https://charts.gitlab.io 88 | GITLAB_RUNNER_CHART_VERSION = '0.67.1' 89 | 90 | # link to the gitlab-vagrant environment (https://github.com/rgl/gitlab-vagrant running at ../gitlab-vagrant). 91 | GITLAB_FQDN = 'gitlab.example.com' 92 | GITLAB_IP = '10.10.9.99' 93 | 94 | # see https://github.com/argoproj/argo-cd/releases 95 | # renovate: datasource=github-releases depName=argoproj/argo-cd 96 | ARGOCD_CLI_VERSION = '2.12.0' 97 | 98 | # see https://artifacthub.io/packages/helm/argo/argo-cd 99 | # see https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd 100 | # renovate: datasource=helm depName=argo-cd registryUrl=https://argoproj.github.io/argo-helm 101 | ARGOCD_CHART_VERSION = '7.4.3' # app version 2.12.0. 102 | 103 | # see https://artifacthub.io/packages/helm/crossplane/crossplane 104 | # see https://github.com/crossplane/crossplane/tree/master/cluster/charts/crossplane 105 | # see https://github.com/crossplane/crossplane/releases 106 | # renovate: datasource=github-releases depName=crossplane/crossplane 107 | CROSSPLANE_CHART_VERSION = '1.16.0' # app version 1.16.0. 108 | 109 | # see https://marketplace.upbound.io/providers/upbound/provider-aws-s3 110 | # see https://github.com/upbound/provider-aws 111 | # renovate: datasource=github-releases depName=upbound/provider-aws 112 | CROSSPLANE_PROVIDER_AWS_S3_VERSION = '1.11.0' 113 | 114 | # set the flannel backend. use one of: 115 | # * host-gw: non-secure network (needs ethernet (L2) connectivity between nodes). 116 | # * vxlan: non-secure network (needs UDP (L3) connectivity between nodes). 117 | # * wireguard-native: secure network (needs UDP (L3) connectivity between nodes). 118 | FLANNEL_BACKEND = 'host-gw' 119 | #FLANNEL_BACKEND = 'vxlan' 120 | #FLANNEL_BACKEND = 'wireguard-native' 121 | 122 | NUMBER_OF_SERVER_NODES = 1 123 | NUMBER_OF_AGENT_NODES = 1 124 | 125 | BRIDGE_NAME = nil 126 | REGISTRY_FQDN = 'registry.example.test' 127 | REGISTRY_IP = '10.11.0.4' 128 | SERVER_FQDN = 's.example.test' 129 | SERVER_VIP = '10.11.0.30' 130 | FIRST_SERVER_NODE_IP = '10.11.0.31' 131 | FIRST_AGENT_NODE_IP = '10.11.0.41' 132 | LB_IP_RANGE = '10.11.0.50-10.11.0.69' 133 | 134 | # connect to the physical network through the host br-lan bridge. 135 | # BRIDGE_NAME = 'br-lan' 136 | # REGISTRY_IP = '192.168.1.4' 137 | # SERVER_VIP = '192.168.1.30' 138 | # FIRST_SERVER_NODE_IP = '192.168.1.31' 139 | # FIRST_AGENT_NODE_IP = '192.168.1.41' 140 | # LB_IP_RANGE = '192.168.1.50-192.168.1.69' 141 | 142 | SERVER_NODES = generate_nodes(FIRST_SERVER_NODE_IP, NUMBER_OF_SERVER_NODES, 's') 143 | AGENT_NODES = generate_nodes(FIRST_AGENT_NODE_IP, NUMBER_OF_AGENT_NODES, 'a') 144 | K3S_TOKEN = get_or_generate_k3s_token 145 | 146 | EXTRA_HOSTS = """ 147 | #{REGISTRY_IP} #{REGISTRY_FQDN} 148 | #{SERVER_VIP} #{SERVER_FQDN} 149 | #{GITLAB_IP} #{GITLAB_FQDN} 150 | """ 151 | 152 | # provision common tools between servers and agents. 153 | def provision_common(config, role, n) 154 | config.vm.provision 'shell', path: 'provision-helm.sh', args: [HELM_VERSION] # NB k3s also has a HelmChart CRD. 155 | config.vm.provision 'shell', path: 'provision-helmfile.sh', args: [HELMFILE_VERSION] 156 | config.vm.provision 'shell', path: 'provision-k9s.sh', args: [K9S_VERSION] 157 | end 158 | 159 | # provision the user workloads when running in the last agent or server (iif 160 | # there are no agents). 161 | def provision_user_workloads(config, role, n) 162 | if (role == 'agent' && n == NUMBER_OF_AGENT_NODES) || (role == 'server' && n == NUMBER_OF_SERVER_NODES && NUMBER_OF_AGENT_NODES == 0) then 163 | env = { 164 | 'KUBECONFIG' => '/vagrant/tmp/admin.conf', 165 | } 166 | config.vm.provision 'shell', path: 'provision-terraform.sh', args: [TERRAFORM_VERSION], env: env 167 | config.vm.provision 'shell', path: 'provision-cert-manager.sh', args: [CERT_MANAGER_CHART_VERSION], env: env 168 | config.vm.provision 'shell', path: 'provision-reloader.sh', args: [RELOADER_CHART_VERSION], env: env 169 | config.vm.provision 'shell', path: 'provision-k8s-dashboard.sh', args: [K8S_DASHBOARD_CHART_VERSION], env: env 170 | config.vm.provision 'shell', path: 'provision-gitlab-runner.sh', args: [GITLAB_RUNNER_CHART_VERSION, GITLAB_FQDN, GITLAB_IP], env: env 171 | config.vm.provision 'shell', path: 'provision-argocd.sh', args: [ARGOCD_CLI_VERSION, ARGOCD_CHART_VERSION], env: env 172 | config.vm.provision 'shell', path: 'provision-crossplane.sh', args: [CROSSPLANE_CHART_VERSION, CROSSPLANE_PROVIDER_AWS_S3_VERSION], env: env 173 | end 174 | end 175 | 176 | Vagrant.configure(2) do |config| 177 | config.vm.box = 'debian-12-amd64' 178 | 179 | config.vm.provider 'libvirt' do |lv, config| 180 | lv.cpus = 2 181 | lv.cpu_mode = 'host-passthrough' 182 | lv.nested = true 183 | lv.keymap = 'pt' 184 | lv.disk_bus = 'scsi' 185 | lv.disk_device = 'sda' 186 | lv.disk_driver :discard => 'unmap', :cache => 'unsafe' 187 | lv.machine_virtual_size = 16 188 | # NB vagrant-libvirt does not yet support urandom. but we'll modify this to 189 | # urandom in the trigger bellow. 190 | lv.random :model => 'random' 191 | config.vm.synced_folder '.', '/vagrant', type: 'nfs', nfs_version: '4.2', nfs_udp: false 192 | config.trigger.before :'VagrantPlugins::ProviderLibvirt::Action::StartDomain', type: :action do |trigger| 193 | trigger.ruby do |env, machine| 194 | # modify the random model to use the urandom backend device. 195 | stdout, stderr, status = Open3.capture3( 196 | 'virt-xml', machine.id, 197 | '--edit', 198 | '--rng', '/dev/urandom') 199 | if status.exitstatus != 0 200 | raise "failed to run virt-xml to modify the random backend device. status=#{status.exitstatus} stdout=#{stdout} stderr=#{stderr}" 201 | end 202 | # modify the scsi controller model to virtio-scsi. 203 | # see https://github.com/vagrant-libvirt/vagrant-libvirt/pull/692 204 | # see https://github.com/vagrant-libvirt/vagrant-libvirt/issues/999 205 | stdout, stderr, status = Open3.capture3( 206 | 'virt-xml', machine.id, 207 | '--edit', 'type=scsi', 208 | '--controller', 'model=virtio-scsi') 209 | if status.exitstatus != 0 210 | raise "failed to run virt-xml to modify the scsi controller model. status=#{status.exitstatus} stdout=#{stdout} stderr=#{stderr}" 211 | end 212 | end 213 | end 214 | end 215 | 216 | config.vm.define 'registry' do |config| 217 | config.vm.provider 'libvirt' do |lv, config| 218 | lv.memory = 2*1024 219 | end 220 | config.vm.hostname = REGISTRY_FQDN 221 | if BRIDGE_NAME 222 | config.vm.network :public_network, mode: 'bridge', type: 'bridge', dev: BRIDGE_NAME, ip: REGISTRY_IP, auto_config: false 223 | config.vm.provision 'shell', path: 'provision-network.sh', args: [REGISTRY_IP] 224 | config.vm.provision 'reload' 225 | else 226 | config.vm.network :private_network, ip: REGISTRY_IP, libvirt__forward_mode: 'none', libvirt__dhcp_enabled: false 227 | end 228 | config.vm.provision 'shell', path: 'provision-base.sh', args: [EXTRA_HOSTS] 229 | config.vm.provision 'shell', path: 'provision-zot.sh', args: [ZOT_VERSION] 230 | end 231 | 232 | SERVER_NODES.each do |name, fqdn, ip_address, n| 233 | config.vm.define name do |config| 234 | config.vm.provider 'libvirt' do |lv, config| 235 | lv.memory = 2*1024 236 | end 237 | config.vm.hostname = fqdn 238 | if BRIDGE_NAME 239 | config.vm.network :public_network, mode: 'bridge', type: 'bridge', dev: BRIDGE_NAME, ip: ip_address, auto_config: false 240 | config.vm.provision 'shell', path: 'provision-network.sh', args: [ip_address] 241 | config.vm.provision 'reload' 242 | else 243 | config.vm.network :private_network, ip: ip_address, libvirt__forward_mode: 'none', libvirt__dhcp_enabled: false 244 | end 245 | config.vm.provision 'shell', path: 'provision-base.sh', args: [EXTRA_HOSTS] 246 | config.vm.provision 'shell', path: 'provision-wireguard.sh' 247 | config.vm.provision 'shell', path: 'provision-etcdctl.sh', args: [ETCDCTL_VERSION] 248 | config.vm.provision 'shell', path: 'provision-containerd-shim-spin-v2.sh' 249 | config.vm.provision 'shell', path: 'provision-containerd-configuration.sh' 250 | config.vm.provision 'shell', path: 'provision-k3s-registries.sh' 251 | config.vm.provision 'shell', path: 'provision-k3s-server.sh', args: [ 252 | n == 1 ? "cluster-init" : "cluster-join", 253 | K3S_CHANNEL, 254 | K3S_VERSION, 255 | K3S_TOKEN, 256 | FLANNEL_BACKEND, 257 | ip_address, 258 | KREW_VERSION, 259 | NUMBER_OF_AGENT_NODES > 0 && '1' || '0', 260 | ] 261 | provision_common(config, 'server', n) 262 | if n == 1 263 | config.vm.provision 'shell', path: 'provision-kube-vip.sh', args: [KUBE_VIP_VERSION, SERVER_VIP] 264 | config.vm.provision 'shell', path: 'provision-metallb.sh', args: [METALLB_CHART_VERSION, LB_IP_RANGE] 265 | end 266 | provision_user_workloads(config, 'server', n) 267 | end 268 | end 269 | 270 | AGENT_NODES.each do |name, fqdn, ip_address, n| 271 | config.vm.define name do |config| 272 | config.vm.provider 'libvirt' do |lv, config| 273 | lv.memory = 2*1024 274 | end 275 | config.vm.hostname = fqdn 276 | if BRIDGE_NAME 277 | config.vm.network :public_network, mode: 'bridge', type: 'bridge', dev: BRIDGE_NAME, ip: ip_address, auto_config: false 278 | config.vm.provision 'shell', path: 'provision-network.sh', args: [ip_address] 279 | config.vm.provision 'reload' 280 | else 281 | config.vm.network :private_network, ip: ip_address, libvirt__forward_mode: 'none', libvirt__dhcp_enabled: false 282 | end 283 | config.vm.provision 'shell', path: 'provision-base.sh', args: [EXTRA_HOSTS] 284 | config.vm.provision 'shell', path: 'provision-wireguard.sh' 285 | config.vm.provision 'shell', path: 'provision-containerd-shim-spin-v2.sh' 286 | config.vm.provision 'shell', path: 'provision-containerd-configuration.sh' 287 | config.vm.provision 'shell', path: 'provision-k3s-registries.sh' 288 | config.vm.provision 'shell', path: 'provision-k3s-agent.sh', args: [ 289 | K3S_CHANNEL, 290 | K3S_VERSION, 291 | K3S_TOKEN, 292 | ip_address 293 | ] 294 | provision_common(config, 'agent', n) 295 | provision_user_workloads(config, 'agent', n) 296 | end 297 | end 298 | 299 | config.trigger.before :up do |trigger| 300 | trigger.only_on = 'registry' 301 | trigger.run = { 302 | inline: '''bash -euc \' 303 | install -d tmp 304 | artifacts=( 305 | ../gitlab-vagrant/tmp/gitlab.example.com-crt.pem 306 | ../gitlab-vagrant/tmp/gitlab.example.com-crt.der 307 | ../gitlab-vagrant/tmp/gitlab-runner-authentication-token-kubernetes-k3s.json 308 | ) 309 | for artifact in "${artifacts[@]}"; do 310 | if [ -f $artifact ]; then 311 | rsync $artifact tmp 312 | fi 313 | done 314 | \' 315 | ''' 316 | } 317 | end 318 | end 319 | --------------------------------------------------------------------------------