├── packer ├── meta-data ├── setup_ubuntu.sh ├── DEVELOPMENT.md ├── kh-klipper.dockerfile ├── README.md ├── user-data ├── install_virtualbox.sh ├── create-support-bundle.sh ├── virtualbox_ami_builder.pkr.hcl ├── studio_ami.pkr.hcl ├── setup_root.sh ├── studio_virtualbox.pkr.hcl ├── helm3.sh └── k3s.sh ├── .github ├── CODEOWNERS ├── renovate.json └── workflows │ ├── build_virtualbox_builder_ami.yml │ └── build_studio_images.yml ├── .prettierrc ├── .gitignore ├── README.md └── LICENSE /packer/meta-data: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @0x2b3bfa0 @dreadatour @shcheklein 2 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | --- 2 | proseWrap: always 3 | printWidth: 80 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.sw? 2 | *.tfstate 3 | .terraform 4 | .terraform.lock.hcl 5 | .env 6 | minio/ 7 | tls.key 8 | tls.crt 9 | .idea/ 10 | /packer/build/ 11 | -------------------------------------------------------------------------------- /packer/setup_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PS4='studio-selfhosted:setup_ubuntu.sh: ' 4 | set -eux 5 | set -o pipefail 6 | 7 | # Add Helm Iterative Repository 8 | helm repo add iterative https://helm.iterative.ai 9 | -------------------------------------------------------------------------------- /packer/DEVELOPMENT.md: -------------------------------------------------------------------------------- 1 | # Development 2 | To build the AMI and publish it to the AWS marketplace, use the following commands: 3 | 4 | ```bash 5 | packer init studio_ami.pkr.hcl 6 | packer build studio_ami.pkr.hcl 7 | ``` 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Studio Selfhosted 2 | 3 | This repository contains internal Packer scripts for building Studio Self-hosted 4 | 5 | If you would like to install Studio Self-hosted, see https://dvc.org/doc/studio/self-hosting/installation/aws-ami 6 | -------------------------------------------------------------------------------- /packer/kh-klipper.dockerfile: -------------------------------------------------------------------------------- 1 | FROM rancher/klipper-helm:v0.7.7-build20230403 2 | 3 | RUN helm_v3 repo add ingress-nginx https://kubernetes.github.io/ingress-nginx && \ 4 | helm_v3 pull ingress-nginx/ingress-nginx --version 4.6.0 && \ 5 | tar -zxvf ingress-nginx-4.6.0.tgz 6 | -------------------------------------------------------------------------------- /packer/README.md: -------------------------------------------------------------------------------- 1 | # Studio Selfhosted AMI 2 | 3 | This AMI contains K3s along with a preconfigured ingress-nginx reverse proxy. 4 | 5 | ## Documentation 6 | 7 | Documentation about installing AMI is available under dvc.org [docs](https://dvc.org/doc/studio/selfhosted/installation/aws-ami). 8 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": ["config:recommended"], 4 | "schedule": ["before 12am on Wednesday"], 5 | "assigneesFromCodeOwners": true, 6 | "labels": ["dependencies"], 7 | "rebaseWhen": "conflicted", 8 | "configMigration": true 9 | } 10 | -------------------------------------------------------------------------------- /packer/user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | autoinstall: 3 | version: 1 4 | early-commands: 5 | - ping -c 5 1.1.1.1 6 | late-commands: 7 | - ping -c 5 1.1.1.1 8 | locale: en_US 9 | timezone: UTC 10 | storage: 11 | swap: 12 | size: 0 13 | layout: 14 | name: direct 15 | identity: 16 | hostname: studio-selfhosted 17 | username: ubuntu 18 | password: $6$O530CsnyO/PNQqqY$yRofSd/TbMr7vFEb72mSkSZSKS2OmnbGwQ2Hk4nE9Q3bOGc5NO1gkVOdvAgRVR5ewawHaR/rdCsgR35TAZqk90 19 | ssh: 20 | install-server: true 21 | allow-pw: yes 22 | user-data: 23 | disable_root: false 24 | -------------------------------------------------------------------------------- /packer/install_virtualbox.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | uptime 3 | export DEBIAN_FRONTEND=noninteractive 4 | 5 | #Download 6 | curl https://www.virtualbox.org/download/oracle_vbox_2016.asc | gpg --dearmor > oracle_vbox_2016.gpg 7 | curl https://www.virtualbox.org/download/oracle_vbox.asc | gpg --dearmor > oracle_vbox.gpg 8 | 9 | #Install on system 10 | sudo install -o root -g root -m 644 oracle_vbox_2016.gpg /etc/apt/trusted.gpg.d/ 11 | sudo install -o root -g root -m 644 oracle_vbox.gpg /etc/apt/trusted.gpg.d/ 12 | 13 | echo "deb [arch=amd64] http://download.virtualbox.org/virtualbox/debian $(lsb_release -sc) contrib" | sudo tee /etc/apt/sources.list.d/virtualbox.list 14 | 15 | sudo apt-get update 16 | sudo apt-get install linux-headers-$(uname -r) dkms -y 17 | sudo apt-get install virtualbox-7.0 -y 18 | 19 | cd ~/ 20 | VER=$(curl -s https://download.virtualbox.org/virtualbox/LATEST.TXT) 21 | wget https://download.virtualbox.org/virtualbox/$VER/Oracle_VM_VirtualBox_Extension_Pack-$VER.vbox-extpack 22 | 23 | sudo VBoxManage extpack install Oracle_VM_VirtualBox_Extension_Pack-*.vbox-extpack --accept-license=33d7284dc4a0ece381196fda3cfe2ed0e1e8e7ed7f27b9a9ebc4ee22e24bd23c 24 | 25 | curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - 26 | sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" -y 27 | sudo apt-get update && sudo apt-get install packer -y 28 | 29 | sudo apt-get install -y unzip 30 | 31 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 32 | unzip awscliv2.zip 33 | sudo ./aws/install -i /usr/local/aws-cli -b /usr/local/bin 34 | aws --version 35 | 36 | git clone https://github.com/iterative/studio-selfhosted.git 37 | -------------------------------------------------------------------------------- /.github/workflows/build_virtualbox_builder_ami.yml: -------------------------------------------------------------------------------- 1 | name: Build Virtualbox builder AMI 2 | on: 3 | workflow_dispatch: 4 | pull_request: 5 | branches: [ "main" ] 6 | paths: 7 | - packer/virtualbox_ami_builder.pkr.hcl 8 | - packer/install_virtualbox.sh 9 | - .github/workflows/build_virtualbox_ami.yml 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.ref }} # Cancel in-progress jobs or runs for current workflow 12 | permissions: 13 | contents: read 14 | id-token: write 15 | packages: write 16 | jobs: 17 | build-virtualbox-ami-builder: 18 | environment: aws 19 | runs-on: ubuntu-latest 20 | defaults: 21 | run: 22 | working-directory: packer 23 | steps: 24 | - uses: actions/checkout@v4 25 | - uses: aws-actions/configure-aws-credentials@v4 26 | with: 27 | aws-region: us-west-1 28 | role-to-assume: arn:aws:iam::260760892802:role/studio-selfhosted-packer 29 | role-duration-seconds: 3600 30 | 31 | - name: Setup `packer` 32 | uses: hashicorp/setup-packer@main 33 | id: setup 34 | with: 35 | version: "1.10.3" 36 | 37 | - name: Run `packer init` 38 | id: init 39 | run: "packer init ./virtualbox_ami_builder.pkr.hcl" 40 | 41 | - name: Run `packer fmt -diff` 42 | id: fmt 43 | run: "packer fmt -diff -recursive ." 44 | 45 | - name: Run `packer validate` 46 | id: validate 47 | run: "packer validate ./virtualbox_ami_builder.pkr.hcl" 48 | 49 | - name: Run `packer build` 50 | id: build 51 | run: "packer build ./virtualbox_ami_builder.pkr.hcl" 52 | env: 53 | PKR_VAR_skip_create_ami: ${{ github.event_name != 'pull_request' && 'false' || 'true' }} 54 | 55 | 56 | -------------------------------------------------------------------------------- /packer/create-support-bundle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | NAMESPACE=studio 5 | LOG_DIR=/tmp/studio-support 6 | 7 | usage () { 8 | echo "Usage: $0 [OPTIONS]" 9 | echo 10 | echo "OPTIONS:" 11 | echo " --namespace " 12 | } 13 | 14 | while [ $# -ne 0 ]; do 15 | case $1 in 16 | --namespace) 17 | shift 1 18 | NAMESPACE=$1 19 | shift 1 20 | ;; 21 | -h|--help) 22 | usage 23 | exit 0 24 | ;; 25 | *) 26 | usage 27 | exit 0 28 | ;; 29 | esac 30 | done 31 | 32 | mkdir -p "$LOG_DIR" 33 | 34 | k8s_cm_hostname_to_url() { 35 | kubectl get configmap/studio --namespace $NAMESPACE -o=jsonpath="{.data.$1}" | sed -E 's#^(https?://)?([^:/?]+).*#\2#' 36 | } 37 | 38 | get_logs() { 39 | POD_NAME=$(kubectl get pods --namespace $NAMESPACE -l "app.kubernetes.io/name=studio-$1,app.kubernetes.io/instance=studio" -o jsonpath="{.items[0].metadata.name}") 40 | kubectl logs $POD_NAME --namespace $NAMESPACE 41 | } 42 | 43 | get_hostname() { 44 | nslookup "$1" 45 | dig "$1" 46 | } 47 | 48 | # Kubernetes 49 | get_logs backend > "$LOG_DIR/backend.txt" 50 | get_logs beat > "$LOG_DIR/beat.txt" 51 | get_logs ui > "$LOG_DIR/ui.txt" 52 | get_logs worker > "$LOG_DIR/worker.txt" 53 | 54 | # DNS resolution 55 | STUDIO_HOSTNAME=$(k8s_cm_hostname_to_url "UI_URL") 56 | get_hostname "$STUDIO_HOSTNAME" > "$LOG_DIR/dns_studio.txt" 57 | 58 | GITLAB_HOSTNAME=$(k8s_cm_hostname_to_url "GITLAB_URL") 59 | if [ -n "$GITLAB_HOSTNAME" ]; then 60 | get_hostname "$GITLAB_HOSTNAME" > "$LOG_DIR/dns_gitlab.txt" 61 | fi 62 | 63 | GITHUB_HOSTNAME=$(k8s_cm_hostname_to_url "GITHUB_URL") 64 | if [ -n "$GITHUB_HOSTNAME" ]; then 65 | get_hostname "$GITHUB_HOSTNAME" > "$LOG_DIR/dns_github.txt" 66 | fi 67 | 68 | BITBUCKET_HOSTNAME=$(k8s_cm_hostname_to_url "BITBUCKET_URL") 69 | if [ -n "$BITBUCKET_HOSTNAME" ]; then 70 | get_hostname "$BITBUCKET_HOSTNAME" > "$LOG_DIR/dns_bitbucket.txt" 71 | fi 72 | 73 | tar -zcvf /tmp/studio-support.tar.gz "$LOG_DIR" 74 | rm -rf "$LOG_DIR" 75 | -------------------------------------------------------------------------------- /packer/virtualbox_ami_builder.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | amazon = { 4 | version = ">= 1.0.0" 5 | source = "github.com/hashicorp/amazon" 6 | } 7 | } 8 | } 9 | 10 | variables { 11 | image_name = "studio-virtualbox-builder" 12 | image_description = "Studio Virtualbox Builder - {{isotime `2006-01-02`}}" 13 | aws_build_region = "us-west-1" 14 | aws_build_instance = "c6a.large" 15 | aws_build_ubuntu_image = "*ubuntu-*-22.04-amd64-server-*" 16 | skip_create_ami = false 17 | } 18 | 19 | locals { 20 | aws_tags = { 21 | ManagedBy = "packer" 22 | Name = var.image_name 23 | Environment = "prod" 24 | BuildDate = "{{isotime `2006-01-02`}}" 25 | } 26 | 27 | } 28 | 29 | data "amazon-ami" "ubuntu" { 30 | region = var.aws_build_region 31 | owners = ["099720109477"] 32 | most_recent = true 33 | 34 | filters = { 35 | name = "ubuntu/images/${var.aws_build_ubuntu_image}" 36 | root-device-type = "ebs" 37 | virtualization-type = "hvm" 38 | } 39 | } 40 | 41 | source "amazon-ebs" "source" { 42 | ami_groups = ["all"] 43 | ami_name = var.skip_create_ami ? "studio-virtualbox-builder {{isotime `2006-01-02_15-04-05`}}" : var.image_name 44 | ami_description = var.image_description 45 | ami_regions = ["us-west-1"] 46 | skip_create_ami = var.skip_create_ami 47 | 48 | region = var.aws_build_region 49 | spot_price = "0.2" 50 | spot_instance_types = [var.aws_build_instance] 51 | # instance_type = var.aws_build_instance 52 | 53 | source_ami = data.amazon-ami.ubuntu.id 54 | ssh_username = "ubuntu" 55 | 56 | force_delete_snapshot = !var.skip_create_ami 57 | force_deregister = !var.skip_create_ami 58 | 59 | tags = local.aws_tags 60 | run_tags = local.aws_tags 61 | run_volume_tags = local.aws_tags 62 | 63 | temporary_security_group_source_public_ip = true 64 | } 65 | 66 | build { 67 | sources = ["source.amazon-ebs.source"] 68 | 69 | provisioner "shell" { 70 | inline = [ 71 | "mkdir /home/ubuntu/.studio_install", 72 | ] 73 | } 74 | 75 | provisioner "file" { 76 | source = "install_virtualbox.sh" 77 | destination = "/home/ubuntu/install_virtualbox.sh" 78 | } 79 | 80 | provisioner "shell" { 81 | inline = ["/usr/bin/cloud-init status --wait"] 82 | } 83 | 84 | provisioner "shell" { 85 | binary = false 86 | execute_command = "{{ .Vars }} sudo -E -S '{{ .Path }}'" 87 | expect_disconnect = true 88 | inline = [ 89 | "apt-get update", 90 | "apt-get --yes dist-upgrade", 91 | "apt-get clean", 92 | "apt-get install --yes ntp", 93 | ] 94 | inline_shebang = "/bin/sh -e" 95 | skip_clean = false 96 | start_retry_timeout = "5m" 97 | } 98 | 99 | # Install script running as 'root' 100 | provisioner "shell" { 101 | inline = ["sudo reboot"] 102 | start_retry_timeout = "5m" 103 | expect_disconnect = true 104 | } 105 | 106 | # Install script running as 'root' 107 | provisioner "shell" { 108 | inline = ["sudo bash /home/ubuntu/install_virtualbox.sh"] 109 | start_retry_timeout = "5m" 110 | expect_disconnect = true 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /packer/studio_ami.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | amazon = { 4 | version = ">= 1.3.2" 5 | source = "github.com/hashicorp/amazon" 6 | } 7 | } 8 | } 9 | 10 | variables { 11 | image_name = "studio-selfhosted" 12 | image_description = "Iterative Studio Selfhosted - {{isotime `2006-01-02`}}" 13 | aws_build_region = "us-east-1" 14 | aws_build_instance = "c6a.large" 15 | aws_build_ubuntu_image = "*ubuntu-*-22.04-amd64-server-*" 16 | skip_create_ami = true 17 | kh_klipper_tag = "latest" 18 | } 19 | 20 | locals { 21 | aws_tags = { 22 | ManagedBy = "packer" 23 | Name = var.image_name 24 | Environment = "prod" 25 | BuildDate = "{{isotime `2006-01-02`}}" 26 | } 27 | 28 | aws_release_regions = [ 29 | "af-south-1", 30 | "ap-east-1", 31 | "ap-northeast-1", 32 | "ap-northeast-2", 33 | "ap-northeast-3", 34 | "ap-south-1", 35 | "ap-south-2", 36 | "ap-southeast-1", 37 | "ap-southeast-2", 38 | "ap-southeast-3", 39 | "ap-southeast-4", 40 | "ca-central-1", 41 | "eu-central-1", 42 | "eu-central-2", 43 | "eu-north-1", 44 | "eu-south-1", 45 | "eu-south-2", 46 | "eu-west-1", 47 | "eu-west-2", 48 | "eu-west-3", 49 | "me-central-1", 50 | "me-south-1", 51 | "sa-east-1", 52 | "us-east-1", 53 | "us-east-2", 54 | "us-west-1", 55 | "us-west-2", 56 | ] 57 | } 58 | 59 | data "amazon-ami" "ubuntu" { 60 | region = var.aws_build_region 61 | owners = ["099720109477"] 62 | most_recent = true 63 | 64 | filters = { 65 | name = "ubuntu/images/${var.aws_build_ubuntu_image}" 66 | root-device-type = "ebs" 67 | virtualization-type = "hvm" 68 | } 69 | 70 | # assume_role { 71 | # role_arn = var.aws_role_arn 72 | # session_name = var.aws_role_session_name 73 | # } 74 | } 75 | 76 | source "amazon-ebs" "source" { 77 | ami_groups = ["all"] 78 | ami_name = var.skip_create_ami ? "studio-selfhosted {{isotime `2006-01-02_15-04-05`}}" : var.image_name 79 | ami_description = var.image_description 80 | ami_regions = local.aws_release_regions 81 | skip_create_ami = var.skip_create_ami 82 | 83 | region = var.aws_build_region 84 | spot_price = "0.2" 85 | spot_instance_types = [var.aws_build_instance] 86 | # instance_type = var.aws_build_instance 87 | 88 | source_ami = data.amazon-ami.ubuntu.id 89 | ssh_username = "ubuntu" 90 | 91 | force_delete_snapshot = !var.skip_create_ami 92 | # force_deregister = !var.skip_create_ami 93 | 94 | tags = local.aws_tags 95 | run_tags = local.aws_tags 96 | run_volume_tags = local.aws_tags 97 | 98 | temporary_security_group_source_public_ip = true 99 | } 100 | 101 | build { 102 | sources = ["source.amazon-ebs.source"] 103 | 104 | provisioner "shell" { 105 | inline = [ 106 | "mkdir /home/ubuntu/.studio_install", 107 | ] 108 | } 109 | 110 | provisioner "file" { 111 | source = "k3s.sh" 112 | destination = "/home/ubuntu/.studio_install/k3s.sh" 113 | } 114 | 115 | provisioner "file" { 116 | source = "helm3.sh" 117 | destination = "/home/ubuntu/.studio_install/helm3.sh" 118 | } 119 | 120 | provisioner "file" { 121 | source = "create-support-bundle.sh" 122 | destination = "/home/ubuntu/.studio_install/create-support-bundle" 123 | } 124 | 125 | provisioner "file" { 126 | destination = "/home/ubuntu/.studio_install/setup_root.sh" 127 | content = templatefile("setup_root.sh", { 128 | kh_klipper_tag = var.kh_klipper_tag 129 | }) 130 | } 131 | 132 | provisioner "file" { 133 | source = "setup_ubuntu.sh" 134 | destination = "/home/ubuntu/.studio_install/setup_ubuntu.sh" 135 | } 136 | 137 | provisioner "shell" { 138 | inline = ["/usr/bin/cloud-init status --wait"] 139 | } 140 | 141 | # Install script running as 'root' 142 | provisioner "shell" { 143 | inline = ["sudo bash /home/ubuntu/.studio_install/setup_root.sh"] 144 | } 145 | 146 | # Install script running as 'ubuntu' 147 | provisioner "shell" { 148 | inline = ["bash /home/ubuntu/.studio_install/setup_ubuntu.sh"] 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /packer/setup_root.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PS4='studio-selfhosted:setup_root.sh: ' 4 | set -eux 5 | set -o pipefail 6 | 7 | if [ "$EUID" -ne 0 ] 8 | then echo "Please run as root" 9 | exit 10 | fi 11 | 12 | export DEBIAN_FRONTEND=noninteractive 13 | 14 | # Install K3s - script uploaded with packer 15 | export K3S_VERSION="v1.29.4+k3s1" 16 | export K3S_KUBECONFIG_MODE="644" 17 | export INSTALL_K3S_VERSION="$K3S_VERSION" 18 | export INSTALL_K3S_SKIP_START="true" 19 | export INSTALL_K3S_EXEC="" 20 | export INSTALL_K3S_EXEC="$INSTALL_K3S_EXEC --disable=traefik" 21 | export INSTALL_K3S_EXEC="$INSTALL_K3S_EXEC --kubelet-arg kube-reserved=cpu=500m,memory=1Gi,ephemeral-storage=1Gi" 22 | export INSTALL_K3S_EXEC="$INSTALL_K3S_EXEC --kubelet-arg system-reserved=cpu=500m,memory=1Gi,ephemeral-storage=1Gi" 23 | export INSTALL_K3S_EXEC="$INSTALL_K3S_EXEC --kubelet-arg eviction-hard=memory.available<0.5Gi,nodefs.available<10%" 24 | sh /home/ubuntu/.studio_install/k3s.sh - 25 | echo KUBECONFIG="/etc/rancher/k3s/k3s.yaml" >> /etc/environment 26 | 27 | # Install k9s 28 | export K9S_VERSION=v0.27.3 29 | cd /tmp 30 | curl --silent -L https://github.com/derailed/k9s/releases/download/$K9S_VERSION/k9s_Linux_amd64.tar.gz -o /tmp/k9s_Linux_amd64.tar.gz 31 | echo "b0eb5fb0decedbee5b6bd415f72af8ce6135ffb8128f9709bc7adcd5cbfa690b k9s_Linux_amd64.tar.gz" > /tmp/k9s.sha256 32 | sha256sum -c /tmp/k9s.sha256 33 | tar -zxvf /tmp/k9s_Linux_amd64.tar.gz -C /tmp 34 | mv /tmp/k9s /usr/local/bin/ 35 | cd /root 36 | 37 | # Install Ingress Nginx 38 | mkdir -p /var/lib/rancher/k3s/server/manifests/ 39 | cat << YAML >> /var/lib/rancher/k3s/server/manifests/ingress-nginx.yaml 40 | apiVersion: v1 41 | kind: Namespace 42 | metadata: 43 | name: ingress-nginx 44 | --- 45 | apiVersion: helm.cattle.io/v1 46 | kind: HelmChart 47 | metadata: 48 | name: ingress-nginx 49 | namespace: kube-system 50 | spec: 51 | chart: /home/klipper-helm/ingress-nginx-4.6.0.tgz 52 | jobImage: ghcr.io/iterative/studio-selfhosted/kh-klipper-cache:${kh_klipper_tag} 53 | targetNamespace: ingress-nginx 54 | --- 55 | apiVersion: helm.cattle.io/v1 56 | kind: HelmChartConfig 57 | metadata: 58 | name: ingress-nginx 59 | namespace: kube-system 60 | spec: 61 | valuesContent: |- 62 | controller: 63 | watchIngressWithoutClass: true 64 | image: 65 | digest: "" 66 | admissionWebhooks: 67 | patch: 68 | image: 69 | digest: "" 70 | YAML 71 | 72 | cat << YAML >> /var/lib/rancher/k3s/server/manifests/studio.yaml 73 | apiVersion: v1 74 | kind: Namespace 75 | metadata: 76 | name: studio 77 | YAML 78 | 79 | # Install Helm - script uploaded with packer 80 | bash /home/ubuntu/.studio_install/helm3.sh 81 | 82 | # Add Helm Iterative Repository 83 | helm repo add iterative https://helm.iterative.ai 84 | 85 | # Copy the support bundle script 86 | cp /home/ubuntu/.studio_install/create-support-bundle /usr/local/bin/create-support-bundle 87 | chmod +x /usr/local/bin/create-support-bundle 88 | 89 | # Cache Images in K3s 90 | 91 | ## Air-Gap Install https://docs.k3s.io/installation/airgap#prepare-the-images-directory-and-k3s-binary 92 | mkdir -p /var/lib/rancher/k3s/agent/images/ 93 | curl "https://github.com/k3s-io/k3s/releases/download/$K3S_VERSION/k3s-airgap-images-amd64.tar" -L -o /var/lib/rancher/k3s/agent/images/k3s-airgap-images-amd64.tar 94 | 95 | ## Download Docker and fetch ingress-nginx related images 96 | curl -fsSL https://get.docker.com -o get-docker.sh 97 | sh get-docker.sh 98 | 99 | docker pull -q ghcr.io/iterative/studio-selfhosted/kh-klipper-cache:${kh_klipper_tag} 100 | docker save ghcr.io/iterative/studio-selfhosted/kh-klipper-cache:${kh_klipper_tag} -o kh-klipper.tar 101 | 102 | docker pull -q registry.k8s.io/ingress-nginx/controller:v1.7.0 103 | docker save registry.k8s.io/ingress-nginx/controller:v1.7.0 -o ingress-nginx-controller.tar 104 | 105 | docker pull -q registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230312-helm-chart-4.5.2-28-g66a760794 106 | docker save registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230312-helm-chart-4.5.2-28-g66a760794 -o ingress-nginx-controller-kube-webhook-certgen.tar 107 | 108 | mv *.tar /var/lib/rancher/k3s/agent/images/ 109 | 110 | ## Cleanup docker 111 | apt-get purge docker* -y 112 | rm -rf get-docker.sh /var/lib/docker/ 113 | apt-get autoremove -y 114 | apt-get clean -y 115 | -------------------------------------------------------------------------------- /packer/studio_virtualbox.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_version = ">= 1.7.0, < 2.0.0" 3 | 4 | required_plugins { 5 | virtualbox = { 6 | source = "github.com/hashicorp/virtualbox" 7 | version = ">= 1.0.0, < 2.0.0" 8 | } 9 | } 10 | } 11 | 12 | variables { 13 | cpus = 4 14 | memory = 8192 15 | disk_size = "50000" 16 | headless = false 17 | iso_path_external = "https://releases.ubuntu.com/releases/jammy" 18 | iso_file = "ubuntu-22.04.4-live-server-amd64.iso" 19 | iso_checksum = "sha256:45f873de9f8cb637345d6e66a583762730bbea30277ef7b32c9c3bd6700a32b2" 20 | keep_registered = false 21 | packer_cache_dir = "${env("PACKER_CACHE_DIR")}" 22 | skip_export = false 23 | ssh_username = "ubuntu" 24 | ssh_password = "ubuntu" 25 | ssh_port = "22" 26 | vm_name = "{{isotime `2006-01-02_15-04`}}_studio-selfhosted" 27 | kh_klipper_tag = "latest" 28 | } 29 | 30 | 31 | # The "legacy_isotime" function has been provided for backwards compatability, 32 | # but we recommend switching to the timestamp and formatdate functions. 33 | locals { 34 | output_directory = "build/" 35 | } 36 | 37 | source "virtualbox-iso" "vbox" { 38 | boot_command = [ 39 | "", 40 | "c", 41 | "set gfxpayload=keep ", 42 | "linux /casper/vmlinuz ", 43 | "autoinstall ds='nocloud-net;s=http://{{ .HTTPIP }}:{{.HTTPPort}}/' --- ", 44 | "initrd /casper/initrd ", 45 | "boot" 46 | ] 47 | boot_wait = "1s" 48 | bundle_iso = false 49 | cpus = var.cpus 50 | disk_size = var.disk_size 51 | format = "ova" 52 | guest_additions_mode = "disable" 53 | guest_os_type = "Ubuntu_64" 54 | # hard_drive_discard = false 55 | # hard_drive_interface = "sata" 56 | # hard_drive_nonrotational = false 57 | headless = var.headless 58 | host_port_min = 2222 59 | host_port_max = 4444 60 | http_directory = "./" 61 | http_port_min = 8000 62 | http_port_max = 9000 63 | iso_checksum = var.iso_checksum 64 | # iso_interface = "sata" 65 | iso_target_extension = "iso" 66 | iso_target_path = "${regex_replace(var.packer_cache_dir, "^$", "/tmp")}/${var.iso_file}" 67 | iso_urls = [ 68 | "${var.iso_path_external}/${var.iso_file}" 69 | ] 70 | keep_registered = var.keep_registered 71 | memory = var.memory 72 | output_directory = local.output_directory 73 | post_shutdown_delay = "0s" 74 | shutdown_command = "echo '${var.ssh_password}' | sudo -E -S poweroff" 75 | shutdown_timeout = "10m" 76 | skip_export = var.skip_export 77 | skip_nat_mapping = false 78 | ssh_agent_auth = false 79 | ssh_clear_authorized_keys = true 80 | ssh_disable_agent_forwarding = false 81 | ssh_file_transfer_method = "scp" 82 | ssh_handshake_attempts = 100 83 | ssh_keep_alive_interval = "5s" 84 | ssh_username = var.ssh_username 85 | ssh_password = var.ssh_password 86 | ssh_port = var.ssh_port 87 | ssh_pty = false 88 | ssh_timeout = "30m" 89 | vboxmanage = [ 90 | ["modifyvm", "{{ .Name }}", "--rtc-use-utc", "on"], 91 | ["modifyvm", "{{ .Name }}", "--nat-localhostreachable1", "on"], 92 | ] 93 | virtualbox_version_file = "/tmp/.vbox_version" 94 | vm_name = var.vm_name 95 | vrdp_bind_address = "0.0.0.0" 96 | vrdp_port_min = 5900 97 | vrdp_port_max = 6000 98 | } 99 | 100 | build { 101 | 102 | sources = ["source.virtualbox-iso.vbox"] 103 | 104 | provisioner "shell" { 105 | binary = false 106 | execute_command = "echo '${var.ssh_password}' | {{ .Vars }} sudo -E -S '{{ .Path }}'" 107 | expect_disconnect = true 108 | inline = [ 109 | "cloud-init status --wait" 110 | ] 111 | inline_shebang = "/bin/sh -e" 112 | skip_clean = false 113 | start_retry_timeout = "5m" 114 | } 115 | 116 | provisioner "shell" { 117 | binary = false 118 | execute_command = "echo '${var.ssh_password}' | {{ .Vars }} sudo -E -S '{{ .Path }}'" 119 | expect_disconnect = true 120 | inline = [ 121 | "apt-get update", 122 | "apt-get --yes dist-upgrade", 123 | "apt-get clean", 124 | "apt-get install --yes ntp", 125 | "echo '${var.ssh_username} ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers" 126 | ] 127 | inline_shebang = "/bin/sh -e" 128 | skip_clean = false 129 | start_retry_timeout = "5m" 130 | } 131 | # Install script running as 'root' 132 | provisioner "shell" { 133 | inline = [ 134 | "mkdir /home/ubuntu/.studio_install", 135 | ] 136 | } 137 | 138 | provisioner "file" { 139 | source = "k3s.sh" 140 | destination = "/home/ubuntu/.studio_install/k3s.sh" 141 | } 142 | 143 | provisioner "file" { 144 | source = "helm3.sh" 145 | destination = "/home/ubuntu/.studio_install/helm3.sh" 146 | } 147 | 148 | provisioner "file" { 149 | source = "create-support-bundle.sh" 150 | destination = "/home/ubuntu/.studio_install/create-support-bundle" 151 | } 152 | 153 | provisioner "file" { 154 | destination = "/home/ubuntu/.studio_install/setup_root.sh" 155 | content = templatefile("setup_root.sh", { 156 | kh_klipper_tag = var.kh_klipper_tag 157 | }) 158 | } 159 | 160 | provisioner "file" { 161 | source = "setup_ubuntu.sh" 162 | destination = "/home/ubuntu/.studio_install/setup_ubuntu.sh" 163 | } 164 | 165 | provisioner "shell" { 166 | inline = ["/usr/bin/cloud-init status --wait"] 167 | } 168 | 169 | # Install script running as 'root' 170 | provisioner "shell" { 171 | inline = ["sudo bash /home/ubuntu/.studio_install/setup_root.sh"] 172 | } 173 | 174 | # Install script running as 'ubuntu' 175 | provisioner "shell" { 176 | inline = ["bash /home/ubuntu/.studio_install/setup_ubuntu.sh"] 177 | } 178 | 179 | } 180 | -------------------------------------------------------------------------------- /.github/workflows/build_studio_images.yml: -------------------------------------------------------------------------------- 1 | name: Build Studio Images 2 | on: 3 | schedule: 4 | - cron: '0 12 1 * *' # Once a month at 12:00 UTC 5 | workflow_dispatch: 6 | repository_dispatch: 7 | types: [ helm-release ] 8 | pull_request: 9 | branches: [ "main" ] 10 | paths: 11 | - packer/** 12 | - .github/workflows/build_studio_images.yml 13 | permissions: 14 | contents: read 15 | id-token: write 16 | packages: write 17 | jobs: 18 | build-kh-klipper-docker: 19 | runs-on: ubuntu-latest 20 | defaults: 21 | run: 22 | working-directory: packer 23 | permissions: 24 | contents: read 25 | packages: write 26 | # This is used to complete the identity challenge 27 | # with sigstore/fulcio when running outside of PRs. 28 | id-token: write 29 | steps: 30 | - name: Checkout repository 31 | uses: actions/checkout@v4 32 | 33 | # Workaround: https://github.com/docker/build-push-action/issues/461 34 | - name: Setup Docker buildx 35 | uses: docker/setup-buildx-action@5138f76647652447004da686b2411557eaf65f33 36 | 37 | # Login against a Docker registry except on PR 38 | # https://github.com/docker/login-action 39 | - name: Log into registry ${{ env.REGISTRY }} 40 | uses: docker/login-action@70fccc794acd729b2b22dd6a326895f286447728 41 | with: 42 | registry: ghcr.io 43 | username: ${{ github.actor }} 44 | password: ${{ github.token }} 45 | 46 | # Build and push Docker image with Buildx (don't push on PR) 47 | # https://github.com/docker/build-push-action 48 | - name: Build and push Docker image 49 | id: build-and-push 50 | uses: docker/build-push-action@2a53c6ccda456d31fb62eedc658aae06e238b7bd 51 | with: 52 | context: . 53 | file: packer/kh-klipper.dockerfile 54 | push: true 55 | tags: 56 | ghcr.io/${{ github.repository }}/kh-klipper-cache:${{ github.event_name != 'pull_request' && 'latest' || github.event.pull_request.head.sha }} 57 | 58 | create-virtualbox-runner: 59 | environment: aws 60 | runs-on: ubuntu-latest 61 | steps: 62 | - uses: actions/checkout@v4 63 | - name: Standard CML setup 64 | uses: iterative/setup-cml@v1 65 | with: 66 | version: latest 67 | - uses: aws-actions/configure-aws-credentials@v4 68 | with: 69 | aws-region: us-west-1 70 | role-to-assume: arn:aws:iam::260760892802:role/studio-selfhosted-packer 71 | role-duration-seconds: 3600 72 | - name: Create Runner 73 | env: 74 | REPO_TOKEN: ${{ secrets.REPO_TOKEN }} 75 | run: | 76 | cml runner \ 77 | --name="studio-virtualbox-builder-${{ github.run_id }}" \ 78 | --cloud-metadata="actions_link=https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ 79 | --cloud-startup-script=$(echo 'curl https://github.com/${{ github.actor }}.keys >> /home/ubuntu/.ssh/authorized_keys' | base64 -w 0) \ 80 | --single \ 81 | --labels=studio-selfhosted-virtualbox-builder \ 82 | --cloud=aws \ 83 | --cloud-image="studio-virtualbox-builder" \ 84 | --cloud-region=us-west-1 \ 85 | --cloud-type=c5n.metal \ 86 | --cloud-aws-security-group=sg-0da3ac1cdc286dd4e \ 87 | --cloud-aws-subnet=subnet-01c5f8c843b5c7759 \ 88 | --cloud-hdd-size 100 89 | 90 | build-studio-aws-ami: 91 | needs: 92 | - build-kh-klipper-docker 93 | environment: aws 94 | runs-on: ubuntu-latest 95 | defaults: 96 | run: 97 | working-directory: packer 98 | steps: 99 | - uses: actions/checkout@v4 100 | - uses: aws-actions/configure-aws-credentials@v4 101 | with: 102 | aws-region: us-west-1 103 | role-to-assume: arn:aws:iam::260760892802:role/studio-selfhosted-packer 104 | role-duration-seconds: 3600 105 | 106 | - name: Setup `packer` 107 | uses: hashicorp/setup-packer@main 108 | id: setup 109 | with: 110 | version: "1.8.6" 111 | 112 | - name: Run `packer init` 113 | id: init 114 | run: "packer init ./studio_ami.pkr.hcl" 115 | 116 | - name: Run `packer fmt -diff` 117 | id: fmt 118 | run: "packer fmt -diff -recursive ." 119 | 120 | - name: Run `packer validate` 121 | id: validate 122 | run: "packer validate ./studio_ami.pkr.hcl" 123 | 124 | - name: Run `packer build` 125 | id: build 126 | run: "packer build ./studio_ami.pkr.hcl" 127 | env: 128 | PKR_VAR_skip_create_ami: ${{ github.event_name != 'pull_request' && 'false' || 'true' }} 129 | PKR_VAR_kh_klipper_tag: ${{ github.event_name != 'pull_request' && 'latest' || github.event.pull_request.head.sha }} 130 | 131 | build-studio-virtualbox-image: 132 | needs: 133 | - create-virtualbox-runner 134 | - build-kh-klipper-docker 135 | runs-on: [studio-selfhosted-virtualbox-builder] 136 | environment: aws 137 | timeout-minutes: 60 138 | defaults: 139 | run: 140 | working-directory: packer 141 | steps: 142 | - uses: actions/checkout@v4 143 | - uses: aws-actions/configure-aws-credentials@v4 144 | with: 145 | aws-region: us-west-1 146 | role-to-assume: arn:aws:iam::260760892802:role/studio-selfhosted-packer 147 | role-duration-seconds: 3600 148 | 149 | - name: CML Version Test 150 | run: | 151 | which cml 152 | cml --version 153 | 154 | - name: Setup `packer` 155 | uses: hashicorp/setup-packer@main 156 | id: setup 157 | with: 158 | version: "1.8.6" 159 | 160 | - name: Run `packer init` 161 | id: init 162 | run: "packer init ./studio_virtualbox.pkr.hcl" 163 | 164 | - name: Run `packer fmt -diff` 165 | id: fmt 166 | run: "packer fmt -diff -recursive ." 167 | 168 | - name: Run `packer validate` 169 | id: validate 170 | run: "packer validate ./studio_virtualbox.pkr.hcl" 171 | 172 | - name: Run `packer build` 173 | id: build 174 | run: "packer build ./studio_virtualbox.pkr.hcl" 175 | env: 176 | PKR_VAR_kh_klipper_tag: ${{ github.event_name != 'pull_request' && 'latest' || github.event.pull_request.head.sha }} 177 | PKR_VAR_headless: true 178 | 179 | - name: Zip artifacts 180 | id: compress 181 | run: gzip build/* 182 | 183 | - name: Upload VirtualBox image to S3 184 | id: upload 185 | if: ${{ github.event_name != 'pull_request' }} 186 | run: | 187 | aws s3 cp build/*.gz s3://iterative-studio-selfhosted/virtualbox/ --storage-class STANDARD_IA --region us-east-2 188 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017-2023 Iterative, Inc. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /packer/helm3.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright The Helm Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # The install script is based off of the MIT-licensed script from glide, 18 | # the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get 19 | 20 | : ${BINARY_NAME:="helm"} 21 | : ${USE_SUDO:="true"} 22 | : ${DEBUG:="false"} 23 | : ${VERIFY_CHECKSUM:="true"} 24 | : ${VERIFY_SIGNATURES:="false"} 25 | : ${HELM_INSTALL_DIR:="/usr/local/bin"} 26 | : ${GPG_PUBRING:="pubring.kbx"} 27 | 28 | HAS_CURL="$(type "curl" &> /dev/null && echo true || echo false)" 29 | HAS_WGET="$(type "wget" &> /dev/null && echo true || echo false)" 30 | HAS_OPENSSL="$(type "openssl" &> /dev/null && echo true || echo false)" 31 | HAS_GPG="$(type "gpg" &> /dev/null && echo true || echo false)" 32 | HAS_GIT="$(type "git" &> /dev/null && echo true || echo false)" 33 | 34 | # initArch discovers the architecture for this system. 35 | initArch() { 36 | ARCH=$(uname -m) 37 | case $ARCH in 38 | armv5*) ARCH="armv5";; 39 | armv6*) ARCH="armv6";; 40 | armv7*) ARCH="arm";; 41 | aarch64) ARCH="arm64";; 42 | x86) ARCH="386";; 43 | x86_64) ARCH="amd64";; 44 | i686) ARCH="386";; 45 | i386) ARCH="386";; 46 | esac 47 | } 48 | 49 | # initOS discovers the operating system for this system. 50 | initOS() { 51 | OS=$(echo `uname`|tr '[:upper:]' '[:lower:]') 52 | 53 | case "$OS" in 54 | # Minimalist GNU for Windows 55 | mingw*|cygwin*) OS='windows';; 56 | esac 57 | } 58 | 59 | # runs the given command as root (detects if we are root already) 60 | runAsRoot() { 61 | if [ $EUID -ne 0 -a "$USE_SUDO" = "true" ]; then 62 | sudo "${@}" 63 | else 64 | "${@}" 65 | fi 66 | } 67 | 68 | # verifySupported checks that the os/arch combination is supported for 69 | # binary builds, as well whether or not necessary tools are present. 70 | verifySupported() { 71 | local supported="darwin-amd64\ndarwin-arm64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nlinux-s390x\nwindows-amd64" 72 | if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then 73 | echo "No prebuilt binary for ${OS}-${ARCH}." 74 | echo "To build from source, go to https://github.com/helm/helm" 75 | exit 1 76 | fi 77 | 78 | if [ "${HAS_CURL}" != "true" ] && [ "${HAS_WGET}" != "true" ]; then 79 | echo "Either curl or wget is required" 80 | exit 1 81 | fi 82 | 83 | if [ "${VERIFY_CHECKSUM}" == "true" ] && [ "${HAS_OPENSSL}" != "true" ]; then 84 | echo "In order to verify checksum, openssl must first be installed." 85 | echo "Please install openssl or set VERIFY_CHECKSUM=false in your environment." 86 | exit 1 87 | fi 88 | 89 | if [ "${VERIFY_SIGNATURES}" == "true" ]; then 90 | if [ "${HAS_GPG}" != "true" ]; then 91 | echo "In order to verify signatures, gpg must first be installed." 92 | echo "Please install gpg or set VERIFY_SIGNATURES=false in your environment." 93 | exit 1 94 | fi 95 | if [ "${OS}" != "linux" ]; then 96 | echo "Signature verification is currently only supported on Linux." 97 | echo "Please set VERIFY_SIGNATURES=false or verify the signatures manually." 98 | exit 1 99 | fi 100 | fi 101 | 102 | if [ "${HAS_GIT}" != "true" ]; then 103 | echo "[WARNING] Could not find git. It is required for plugin installation." 104 | fi 105 | } 106 | 107 | # checkDesiredVersion checks if the desired version is available. 108 | checkDesiredVersion() { 109 | if [ "x$DESIRED_VERSION" == "x" ]; then 110 | # Get tag from release URL 111 | local latest_release_url="https://github.com/helm/helm/releases" 112 | if [ "${HAS_CURL}" == "true" ]; then 113 | TAG=$(curl -Ls $latest_release_url | grep 'href="/helm/helm/releases/tag/v3.[0-9]*.[0-9]*\"' | sed -E 's/.*\/helm\/helm\/releases\/tag\/(v[0-9\.]+)".*/\1/g' | head -1) 114 | elif [ "${HAS_WGET}" == "true" ]; then 115 | TAG=$(wget $latest_release_url -O - 2>&1 | grep 'href="/helm/helm/releases/tag/v3.[0-9]*.[0-9]*\"' | sed -E 's/.*\/helm\/helm\/releases\/tag\/(v[0-9\.]+)".*/\1/g' | head -1) 116 | fi 117 | else 118 | TAG=$DESIRED_VERSION 119 | fi 120 | } 121 | 122 | # checkHelmInstalledVersion checks which version of helm is installed and 123 | # if it needs to be changed. 124 | checkHelmInstalledVersion() { 125 | if [[ -f "${HELM_INSTALL_DIR}/${BINARY_NAME}" ]]; then 126 | local version=$("${HELM_INSTALL_DIR}/${BINARY_NAME}" version --template="{{ .Version }}") 127 | if [[ "$version" == "$TAG" ]]; then 128 | echo "Helm ${version} is already ${DESIRED_VERSION:-latest}" 129 | return 0 130 | else 131 | echo "Helm ${TAG} is available. Changing from version ${version}." 132 | return 1 133 | fi 134 | else 135 | return 1 136 | fi 137 | } 138 | 139 | # downloadFile downloads the latest binary package and also the checksum 140 | # for that binary. 141 | downloadFile() { 142 | HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz" 143 | DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST" 144 | CHECKSUM_URL="$DOWNLOAD_URL.sha256" 145 | HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)" 146 | HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST" 147 | HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256" 148 | echo "Downloading $DOWNLOAD_URL" 149 | if [ "${HAS_CURL}" == "true" ]; then 150 | curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE" 151 | curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE" 152 | elif [ "${HAS_WGET}" == "true" ]; then 153 | wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL" 154 | wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL" 155 | fi 156 | } 157 | 158 | # verifyFile verifies the SHA256 checksum of the binary package 159 | # and the GPG signatures for both the package and checksum file 160 | # (depending on settings in environment). 161 | verifyFile() { 162 | if [ "${VERIFY_CHECKSUM}" == "true" ]; then 163 | verifyChecksum 164 | fi 165 | if [ "${VERIFY_SIGNATURES}" == "true" ]; then 166 | verifySignatures 167 | fi 168 | } 169 | 170 | # installFile installs the Helm binary. 171 | installFile() { 172 | HELM_TMP="$HELM_TMP_ROOT/$BINARY_NAME" 173 | mkdir -p "$HELM_TMP" 174 | tar xf "$HELM_TMP_FILE" -C "$HELM_TMP" 175 | HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/helm" 176 | echo "Preparing to install $BINARY_NAME into ${HELM_INSTALL_DIR}" 177 | runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$BINARY_NAME" 178 | echo "$BINARY_NAME installed into $HELM_INSTALL_DIR/$BINARY_NAME" 179 | } 180 | 181 | # verifyChecksum verifies the SHA256 checksum of the binary package. 182 | verifyChecksum() { 183 | printf "Verifying checksum... " 184 | local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}') 185 | local expected_sum=$(cat ${HELM_SUM_FILE}) 186 | if [ "$sum" != "$expected_sum" ]; then 187 | echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting." 188 | exit 1 189 | fi 190 | echo "Done." 191 | } 192 | 193 | # verifySignatures obtains the latest KEYS file from GitHub main branch 194 | # as well as the signature .asc files from the specific GitHub release, 195 | # then verifies that the release artifacts were signed by a maintainer's key. 196 | verifySignatures() { 197 | printf "Verifying signatures... " 198 | local keys_filename="KEYS" 199 | local github_keys_url="https://raw.githubusercontent.com/helm/helm/main/${keys_filename}" 200 | if [ "${HAS_CURL}" == "true" ]; then 201 | curl -SsL "${github_keys_url}" -o "${HELM_TMP_ROOT}/${keys_filename}" 202 | elif [ "${HAS_WGET}" == "true" ]; then 203 | wget -q -O "${HELM_TMP_ROOT}/${keys_filename}" "${github_keys_url}" 204 | fi 205 | local gpg_keyring="${HELM_TMP_ROOT}/keyring.gpg" 206 | local gpg_homedir="${HELM_TMP_ROOT}/gnupg" 207 | mkdir -p -m 0700 "${gpg_homedir}" 208 | local gpg_stderr_device="/dev/null" 209 | if [ "${DEBUG}" == "true" ]; then 210 | gpg_stderr_device="/dev/stderr" 211 | fi 212 | gpg --batch --quiet --homedir="${gpg_homedir}" --import "${HELM_TMP_ROOT}/${keys_filename}" 2> "${gpg_stderr_device}" 213 | gpg --batch --no-default-keyring --keyring "${gpg_homedir}/${GPG_PUBRING}" --export > "${gpg_keyring}" 214 | local github_release_url="https://github.com/helm/helm/releases/download/${TAG}" 215 | if [ "${HAS_CURL}" == "true" ]; then 216 | curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 217 | curl -SsL "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" -o "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 218 | elif [ "${HAS_WGET}" == "true" ]; then 219 | wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 220 | wget -q -O "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" "${github_release_url}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 221 | fi 222 | local error_text="If you think this might be a potential security issue," 223 | error_text="${error_text}\nplease see here: https://github.com/helm/community/blob/master/SECURITY.md" 224 | local num_goodlines_sha=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)') 225 | if [[ ${num_goodlines_sha} -lt 2 ]]; then 226 | echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz.sha256!" 227 | echo -e "${error_text}" 228 | exit 1 229 | fi 230 | local num_goodlines_tar=$(gpg --verify --keyring="${gpg_keyring}" --status-fd=1 "${HELM_TMP_ROOT}/helm-${TAG}-${OS}-${ARCH}.tar.gz.asc" 2> "${gpg_stderr_device}" | grep -c -E '^\[GNUPG:\] (GOODSIG|VALIDSIG)') 231 | if [[ ${num_goodlines_tar} -lt 2 ]]; then 232 | echo "Unable to verify the signature of helm-${TAG}-${OS}-${ARCH}.tar.gz!" 233 | echo -e "${error_text}" 234 | exit 1 235 | fi 236 | echo "Done." 237 | } 238 | 239 | # fail_trap is executed if an error occurs. 240 | fail_trap() { 241 | result=$? 242 | if [ "$result" != "0" ]; then 243 | if [[ -n "$INPUT_ARGUMENTS" ]]; then 244 | echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS" 245 | help 246 | else 247 | echo "Failed to install $BINARY_NAME" 248 | fi 249 | echo -e "\tFor support, go to https://github.com/helm/helm." 250 | fi 251 | cleanup 252 | exit $result 253 | } 254 | 255 | # testVersion tests the installed client to make sure it is working. 256 | testVersion() { 257 | set +e 258 | HELM="$(command -v $BINARY_NAME)" 259 | if [ "$?" = "1" ]; then 260 | echo "$BINARY_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?' 261 | exit 1 262 | fi 263 | set -e 264 | } 265 | 266 | # help provides possible cli installation arguments 267 | help () { 268 | echo "Accepted cli arguments are:" 269 | echo -e "\t[--help|-h ] ->> prints this help" 270 | echo -e "\t[--version|-v ] . When not defined it fetches the latest release from GitHub" 271 | echo -e "\te.g. --version v3.0.0 or -v canary" 272 | echo -e "\t[--no-sudo] ->> install without sudo" 273 | } 274 | 275 | # cleanup temporary files to avoid https://github.com/helm/helm/issues/2977 276 | cleanup() { 277 | if [[ -d "${HELM_TMP_ROOT:-}" ]]; then 278 | rm -rf "$HELM_TMP_ROOT" 279 | fi 280 | } 281 | 282 | # Execution 283 | 284 | #Stop execution on any error 285 | trap "fail_trap" EXIT 286 | set -e 287 | 288 | # Set debug if desired 289 | if [ "${DEBUG}" == "true" ]; then 290 | set -x 291 | fi 292 | 293 | # Parsing input arguments (if any) 294 | export INPUT_ARGUMENTS="${@}" 295 | set -u 296 | while [[ $# -gt 0 ]]; do 297 | case $1 in 298 | '--version'|-v) 299 | shift 300 | if [[ $# -ne 0 ]]; then 301 | export DESIRED_VERSION="${1}" 302 | else 303 | echo -e "Please provide the desired version. e.g. --version v3.0.0 or -v canary" 304 | exit 0 305 | fi 306 | ;; 307 | '--no-sudo') 308 | USE_SUDO="false" 309 | ;; 310 | '--help'|-h) 311 | help 312 | exit 0 313 | ;; 314 | *) exit 1 315 | ;; 316 | esac 317 | shift 318 | done 319 | set +u 320 | 321 | initArch 322 | initOS 323 | verifySupported 324 | checkDesiredVersion 325 | if ! checkHelmInstalledVersion; then 326 | downloadFile 327 | verifyFile 328 | installFile 329 | fi 330 | testVersion 331 | cleanup 332 | -------------------------------------------------------------------------------- /packer/k3s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | set -o noglob 4 | 5 | # Usage: 6 | # curl ... | ENV_VAR=... sh - 7 | # or 8 | # ENV_VAR=... ./install.sh 9 | # 10 | # Example: 11 | # Installing a server without traefik: 12 | # curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh - 13 | # Installing an agent to point at a server: 14 | # curl ... | K3S_TOKEN=xxx K3S_URL=https://server-url:6443 sh - 15 | # 16 | # Environment variables: 17 | # - K3S_* 18 | # Environment variables which begin with K3S_ will be preserved for the 19 | # systemd service to use. Setting K3S_URL without explicitly setting 20 | # a systemd exec command will default the command to "agent", and we 21 | # enforce that K3S_TOKEN is also set. 22 | # 23 | # - INSTALL_K3S_SKIP_DOWNLOAD 24 | # If set to true will not download k3s hash or binary. 25 | # 26 | # - INSTALL_K3S_FORCE_RESTART 27 | # If set to true will always restart the K3s service 28 | # 29 | # - INSTALL_K3S_SYMLINK 30 | # If set to 'skip' will not create symlinks, 'force' will overwrite, 31 | # default will symlink if command does not exist in path. 32 | # 33 | # - INSTALL_K3S_SKIP_ENABLE 34 | # If set to true will not enable or start k3s service. 35 | # 36 | # - INSTALL_K3S_SKIP_START 37 | # If set to true will not start k3s service. 38 | # 39 | # - INSTALL_K3S_VERSION 40 | # Version of k3s to download from github. Will attempt to download from the 41 | # stable channel if not specified. 42 | # 43 | # - INSTALL_K3S_COMMIT 44 | # Commit of k3s to download from temporary cloud storage. 45 | # * (for developer & QA use) 46 | # 47 | # - INSTALL_K3S_PR 48 | # PR build of k3s to download from Github Artifacts. 49 | # * (for developer & QA use) 50 | # 51 | # - INSTALL_K3S_BIN_DIR 52 | # Directory to install k3s binary, links, and uninstall script to, or use 53 | # /usr/local/bin as the default 54 | # 55 | # - INSTALL_K3S_BIN_DIR_READ_ONLY 56 | # If set to true will not write files to INSTALL_K3S_BIN_DIR, forces 57 | # setting INSTALL_K3S_SKIP_DOWNLOAD=true 58 | # 59 | # - INSTALL_K3S_SYSTEMD_DIR 60 | # Directory to install systemd service and environment files to, or use 61 | # /etc/systemd/system as the default 62 | # 63 | # - INSTALL_K3S_EXEC or script arguments 64 | # Command with flags to use for launching k3s in the systemd service, if 65 | # the command is not specified will default to "agent" if K3S_URL is set 66 | # or "server" if not. The final systemd command resolves to a combination 67 | # of EXEC and script args ($@). 68 | # 69 | # The following commands result in the same behavior: 70 | # curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -s - 71 | # curl ... | INSTALL_K3S_EXEC="server --disable=traefik" sh -s - 72 | # curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable=traefik 73 | # curl ... | sh -s - server --disable=traefik 74 | # curl ... | sh -s - --disable=traefik 75 | # 76 | # - INSTALL_K3S_NAME 77 | # Name of systemd service to create, will default from the k3s exec command 78 | # if not specified. If specified the name will be prefixed with 'k3s-'. 79 | # 80 | # - INSTALL_K3S_TYPE 81 | # Type of systemd service to create, will default from the k3s exec command 82 | # if not specified. 83 | # 84 | # - INSTALL_K3S_SELINUX_WARN 85 | # If set to true will continue if k3s-selinux policy is not found. 86 | # 87 | # - INSTALL_K3S_SKIP_SELINUX_RPM 88 | # If set to true will skip automatic installation of the k3s RPM. 89 | # 90 | # - INSTALL_K3S_CHANNEL_URL 91 | # Channel URL for fetching k3s download URL. 92 | # Defaults to 'https://update.k3s.io/v1-release/channels'. 93 | # 94 | # - INSTALL_K3S_CHANNEL 95 | # Channel to use for fetching k3s download URL. 96 | # Defaults to 'stable'. 97 | 98 | GITHUB_URL=https://github.com/k3s-io/k3s/releases 99 | GITHUB_PR_URL="" 100 | STORAGE_URL=https://k3s-ci-builds.s3.amazonaws.com 101 | DOWNLOADER= 102 | 103 | # --- helper functions for logs --- 104 | info() 105 | { 106 | echo '[INFO] ' "$@" 107 | } 108 | warn() 109 | { 110 | echo '[WARN] ' "$@" >&2 111 | } 112 | fatal() 113 | { 114 | echo '[ERROR] ' "$@" >&2 115 | exit 1 116 | } 117 | 118 | # --- fatal if no systemd or openrc --- 119 | verify_system() { 120 | if [ -x /sbin/openrc-run ]; then 121 | HAS_OPENRC=true 122 | return 123 | fi 124 | if [ -x /bin/systemctl ] || type systemctl > /dev/null 2>&1; then 125 | HAS_SYSTEMD=true 126 | return 127 | fi 128 | fatal 'Can not find systemd or openrc to use as a process supervisor for k3s' 129 | } 130 | 131 | # --- add quotes to command arguments --- 132 | quote() { 133 | for arg in "$@"; do 134 | printf '%s\n' "$arg" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/" 135 | done 136 | } 137 | 138 | # --- add indentation and trailing slash to quoted args --- 139 | quote_indent() { 140 | printf ' \\\n' 141 | for arg in "$@"; do 142 | printf '\t%s \\\n' "$(quote "$arg")" 143 | done 144 | } 145 | 146 | # --- escape most punctuation characters, except quotes, forward slash, and space --- 147 | escape() { 148 | printf '%s' "$@" | sed -e 's/\([][!#$%&()*;<=>?\_`{|}]\)/\\\1/g;' 149 | } 150 | 151 | # --- escape double quotes --- 152 | escape_dq() { 153 | printf '%s' "$@" | sed -e 's/"/\\"/g' 154 | } 155 | 156 | # --- ensures $K3S_URL is empty or begins with https://, exiting fatally otherwise --- 157 | verify_k3s_url() { 158 | case "${K3S_URL}" in 159 | "") 160 | ;; 161 | https://*) 162 | ;; 163 | *) 164 | fatal "Only https:// URLs are supported for K3S_URL (have ${K3S_URL})" 165 | ;; 166 | esac 167 | } 168 | 169 | # --- define needed environment variables --- 170 | setup_env() { 171 | # --- use command args if passed or create default --- 172 | case "$1" in 173 | # --- if we only have flags discover if command should be server or agent --- 174 | (-*|"") 175 | if [ -z "${K3S_URL}" ]; then 176 | CMD_K3S=server 177 | else 178 | if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_TOKEN_FILE}" ]; then 179 | fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN or K3S_TOKEN_FILE is not defined." 180 | fi 181 | CMD_K3S=agent 182 | fi 183 | ;; 184 | # --- command is provided --- 185 | (*) 186 | CMD_K3S=$1 187 | shift 188 | ;; 189 | esac 190 | 191 | verify_k3s_url 192 | 193 | CMD_K3S_EXEC="${CMD_K3S}$(quote_indent "$@")" 194 | 195 | # --- use systemd name if defined or create default --- 196 | if [ -n "${INSTALL_K3S_NAME}" ]; then 197 | SYSTEM_NAME=k3s-${INSTALL_K3S_NAME} 198 | else 199 | if [ "${CMD_K3S}" = server ]; then 200 | SYSTEM_NAME=k3s 201 | else 202 | SYSTEM_NAME=k3s-${CMD_K3S} 203 | fi 204 | fi 205 | 206 | # --- check for invalid characters in system name --- 207 | valid_chars=$(printf '%s' "${SYSTEM_NAME}" | sed -e 's/[][!#$%&()*;<=>?\_`{|}/[:space:]]/^/g;' ) 208 | if [ "${SYSTEM_NAME}" != "${valid_chars}" ]; then 209 | invalid_chars=$(printf '%s' "${valid_chars}" | sed -e 's/[^^]/ /g') 210 | fatal "Invalid characters for system name: 211 | ${SYSTEM_NAME} 212 | ${invalid_chars}" 213 | fi 214 | 215 | # --- use sudo if we are not already root --- 216 | SUDO=sudo 217 | if [ $(id -u) -eq 0 ]; then 218 | SUDO= 219 | fi 220 | 221 | # --- use systemd type if defined or create default --- 222 | if [ -n "${INSTALL_K3S_TYPE}" ]; then 223 | SYSTEMD_TYPE=${INSTALL_K3S_TYPE} 224 | else 225 | SYSTEMD_TYPE=notify 226 | fi 227 | 228 | # --- use binary install directory if defined or create default --- 229 | if [ -n "${INSTALL_K3S_BIN_DIR}" ]; then 230 | BIN_DIR=${INSTALL_K3S_BIN_DIR} 231 | else 232 | # --- use /usr/local/bin if root can write to it, otherwise use /opt/bin if it exists 233 | BIN_DIR=/usr/local/bin 234 | if ! $SUDO sh -c "touch ${BIN_DIR}/k3s-ro-test && rm -rf ${BIN_DIR}/k3s-ro-test"; then 235 | if [ -d /opt/bin ]; then 236 | BIN_DIR=/opt/bin 237 | fi 238 | fi 239 | fi 240 | 241 | # --- use systemd directory if defined or create default --- 242 | if [ -n "${INSTALL_K3S_SYSTEMD_DIR}" ]; then 243 | SYSTEMD_DIR="${INSTALL_K3S_SYSTEMD_DIR}" 244 | else 245 | SYSTEMD_DIR=/etc/systemd/system 246 | fi 247 | 248 | # --- set related files from system name --- 249 | SERVICE_K3S=${SYSTEM_NAME}.service 250 | UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh} 251 | KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh} 252 | 253 | # --- use service or environment location depending on systemd/openrc --- 254 | if [ "${HAS_SYSTEMD}" = true ]; then 255 | FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S} 256 | FILE_K3S_ENV=${SYSTEMD_DIR}/${SERVICE_K3S}.env 257 | elif [ "${HAS_OPENRC}" = true ]; then 258 | $SUDO mkdir -p /etc/rancher/k3s 259 | FILE_K3S_SERVICE=/etc/init.d/${SYSTEM_NAME} 260 | FILE_K3S_ENV=/etc/rancher/k3s/${SYSTEM_NAME}.env 261 | fi 262 | 263 | # --- get hash of config & exec for currently installed k3s --- 264 | PRE_INSTALL_HASHES=$(get_installed_hashes) 265 | 266 | # --- if bin directory is read only skip download --- 267 | if [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ]; then 268 | INSTALL_K3S_SKIP_DOWNLOAD=true 269 | fi 270 | 271 | # --- setup channel values 272 | INSTALL_K3S_CHANNEL_URL=${INSTALL_K3S_CHANNEL_URL:-'https://update.k3s.io/v1-release/channels'} 273 | INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:-'stable'} 274 | } 275 | 276 | # --- check if skip download environment variable set --- 277 | can_skip_download_binary() { 278 | if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != binary ]; then 279 | return 1 280 | fi 281 | } 282 | 283 | can_skip_download_selinux() { 284 | if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ] && [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != selinux ]; then 285 | return 1 286 | fi 287 | } 288 | 289 | # --- verify an executable k3s binary is installed --- 290 | verify_k3s_is_executable() { 291 | if [ ! -x ${BIN_DIR}/k3s ]; then 292 | fatal "Executable k3s binary not found at ${BIN_DIR}/k3s" 293 | fi 294 | } 295 | 296 | # --- set arch and suffix, fatal if architecture not supported --- 297 | setup_verify_arch() { 298 | if [ -z "$ARCH" ]; then 299 | ARCH=$(uname -m) 300 | fi 301 | case $ARCH in 302 | amd64) 303 | ARCH=amd64 304 | SUFFIX= 305 | ;; 306 | x86_64) 307 | ARCH=amd64 308 | SUFFIX= 309 | ;; 310 | arm64) 311 | ARCH=arm64 312 | SUFFIX=-${ARCH} 313 | ;; 314 | s390x) 315 | ARCH=s390x 316 | SUFFIX=-${ARCH} 317 | ;; 318 | aarch64) 319 | ARCH=arm64 320 | SUFFIX=-${ARCH} 321 | ;; 322 | arm*) 323 | ARCH=arm 324 | SUFFIX=-${ARCH}hf 325 | ;; 326 | *) 327 | fatal "Unsupported architecture $ARCH" 328 | esac 329 | } 330 | 331 | # --- verify existence of network downloader executable --- 332 | verify_downloader() { 333 | # Return failure if it doesn't exist or is no executable 334 | [ -x "$(command -v $1)" ] || return 1 335 | 336 | # Set verified executable as our downloader program and return success 337 | DOWNLOADER=$1 338 | return 0 339 | } 340 | 341 | # --- create temporary directory and cleanup when done --- 342 | setup_tmp() { 343 | TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX) 344 | TMP_HASH=${TMP_DIR}/k3s.hash 345 | TMP_ZIP=${TMP_DIR}/k3s.zip 346 | TMP_BIN=${TMP_DIR}/k3s.bin 347 | cleanup() { 348 | code=$? 349 | set +e 350 | trap - EXIT 351 | rm -rf ${TMP_DIR} 352 | exit $code 353 | } 354 | trap cleanup INT EXIT 355 | } 356 | 357 | # --- use desired k3s version if defined or find version from channel --- 358 | get_release_version() { 359 | if [ -n "${INSTALL_K3S_PR}" ]; then 360 | VERSION_K3S="PR ${INSTALL_K3S_PR}" 361 | get_pr_artifact_url 362 | elif [ -n "${INSTALL_K3S_COMMIT}" ]; then 363 | VERSION_K3S="commit ${INSTALL_K3S_COMMIT}" 364 | elif [ -n "${INSTALL_K3S_VERSION}" ]; then 365 | VERSION_K3S=${INSTALL_K3S_VERSION} 366 | else 367 | info "Finding release for channel ${INSTALL_K3S_CHANNEL}" 368 | version_url="${INSTALL_K3S_CHANNEL_URL}/${INSTALL_K3S_CHANNEL}" 369 | case $DOWNLOADER in 370 | curl) 371 | VERSION_K3S=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||') 372 | ;; 373 | wget) 374 | VERSION_K3S=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||') 375 | ;; 376 | *) 377 | fatal "Incorrect downloader executable '$DOWNLOADER'" 378 | ;; 379 | esac 380 | fi 381 | info "Using ${VERSION_K3S} as release" 382 | } 383 | 384 | # --- get k3s-selinux version --- 385 | get_k3s_selinux_version() { 386 | available_version="k3s-selinux-1.2-2.${rpm_target}.noarch.rpm" 387 | info "Finding available k3s-selinux versions" 388 | 389 | # run verify_downloader in case it binary installation was skipped 390 | verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files' 391 | 392 | case $DOWNLOADER in 393 | curl) 394 | DOWNLOADER_OPTS="-s" 395 | ;; 396 | wget) 397 | DOWNLOADER_OPTS="-q -O -" 398 | ;; 399 | *) 400 | fatal "Incorrect downloader executable '$DOWNLOADER'" 401 | ;; 402 | esac 403 | for i in {1..3}; do 404 | set +e 405 | if [ "${rpm_channel}" = "testing" ]; then 406 | version=$(timeout 5 ${DOWNLOADER} ${DOWNLOADER_OPTS} https://api.github.com/repos/k3s-io/k3s-selinux/releases | grep browser_download_url | awk '{ print $2 }' | grep -oE "[^\/]+${rpm_target}\.noarch\.rpm" | head -n 1) 407 | else 408 | version=$(timeout 5 ${DOWNLOADER} ${DOWNLOADER_OPTS} https://api.github.com/repos/k3s-io/k3s-selinux/releases/latest | grep browser_download_url | awk '{ print $2 }' | grep -oE "[^\/]+${rpm_target}\.noarch\.rpm") 409 | fi 410 | set -e 411 | if [ "${version}" != "" ]; then 412 | break 413 | fi 414 | sleep 1 415 | done 416 | if [ "${version}" == "" ]; then 417 | warn "Failed to get available versions of k3s-selinux..defaulting to ${available_version}" 418 | return 419 | fi 420 | available_version=${version} 421 | } 422 | 423 | # --- download from github url --- 424 | download() { 425 | [ $# -eq 2 ] || fatal 'download needs exactly 2 arguments' 426 | set +e 427 | case $DOWNLOADER in 428 | curl) 429 | curl -o $1 -sfL $2 430 | ;; 431 | wget) 432 | wget -qO $1 $2 433 | ;; 434 | *) 435 | fatal "Incorrect executable '$DOWNLOADER'" 436 | ;; 437 | esac 438 | 439 | # Abort if download command failed 440 | [ $? -eq 0 ] || fatal 'Download failed' 441 | set -e 442 | } 443 | 444 | # --- download hash from github url --- 445 | download_hash() { 446 | if [ -n "${INSTALL_K3S_PR}" ]; then 447 | info "Downloading hash ${GITHUB_PR_URL}" 448 | curl -o ${TMP_ZIP} -H "Authorization: Bearer $GITHUB_TOKEN" -L ${GITHUB_PR_URL} 449 | unzip -p ${TMP_ZIP} k3s.sha256sum > ${TMP_HASH} 450 | else 451 | if [ -n "${INSTALL_K3S_COMMIT}" ]; then 452 | HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum 453 | else 454 | HASH_URL=${GITHUB_URL}/download/${VERSION_K3S}/sha256sum-${ARCH}.txt 455 | fi 456 | info "Downloading hash ${HASH_URL}" 457 | download ${TMP_HASH} ${HASH_URL} 458 | fi 459 | HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH}) 460 | HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*} 461 | } 462 | 463 | # --- check hash against installed version --- 464 | installed_hash_matches() { 465 | if [ -x ${BIN_DIR}/k3s ]; then 466 | HASH_INSTALLED=$(sha256sum ${BIN_DIR}/k3s) 467 | HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*} 468 | if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then 469 | return 470 | fi 471 | fi 472 | return 1 473 | } 474 | 475 | # Use the GitHub API to identify the artifact associated with a given PR 476 | get_pr_artifact_url() { 477 | GITHUB_API_URL=https://api.github.com/repos/k3s-io/k3s 478 | 479 | # Check if jq is installed 480 | if ! [ -x "$(command -v jq)" ]; then 481 | echo "jq is required to use INSTALL_K3S_PR. Please install jq and try again" 482 | exit 1 483 | fi 484 | 485 | if [ -z "${GITHUB_TOKEN}" ]; then 486 | fatal "Installing PR builds requires GITHUB_TOKEN with k3s-io/k3s repo authorization" 487 | fi 488 | 489 | # GET request to the GitHub API to retrieve the latest commit SHA from the pull request 490 | COMMIT_ID=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/pulls/$INSTALL_K3S_PR" | jq -r '.head.sha') 491 | 492 | # GET request to the GitHub API to retrieve the Build workflow associated with the commit 493 | wf_raw=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/commits/$COMMIT_ID/check-runs") 494 | build_workflow=$(printf "%s" "$wf_raw" | jq -r '.check_runs[] | select(.name == "build / Build")') 495 | 496 | # Extract the Run ID from the build workflow and lookup artifacts associated with the run 497 | RUN_ID=$(echo "$build_workflow" | jq -r ' .details_url' | awk -F'/' '{print $(NF-2)}') 498 | 499 | # Extract the artifat ID for the "k3s" artifact 500 | artifacts=$(curl -s -H "Authorization: Bearer $GITHUB_TOKEN" "$GITHUB_API_URL/actions/runs/$RUN_ID/artifacts") 501 | artifacts_url=$(echo "$artifacts" | jq -r '.artifacts[] | select(.name == "k3s") | .archive_download_url') 502 | GITHUB_PR_URL=$artifacts_url 503 | } 504 | 505 | # --- download binary from github url --- 506 | download_binary() { 507 | if [ -n "${INSTALL_K3S_PR}" ]; then 508 | # Since Binary and Hash are zipped together, check if TMP_ZIP already exists 509 | if ! [ -f ${TMP_ZIP} ]; then 510 | info "Downloading K3s artifact ${GITHUB_PR_URL}" 511 | curl -o ${TMP_ZIP} -H "Authorization: Bearer $GITHUB_TOKEN" -L ${GITHUB_PR_URL} 512 | fi 513 | # extract k3s binary from zip 514 | unzip -p ${TMP_ZIP} k3s > ${TMP_BIN} 515 | return 516 | elif [ -n "${INSTALL_K3S_COMMIT}" ]; then 517 | BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT} 518 | else 519 | BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX} 520 | fi 521 | info "Downloading binary ${BIN_URL}" 522 | download ${TMP_BIN} ${BIN_URL} 523 | } 524 | 525 | # --- verify downloaded binary hash --- 526 | verify_binary() { 527 | info "Verifying binary download" 528 | HASH_BIN=$(sha256sum ${TMP_BIN}) 529 | HASH_BIN=${HASH_BIN%%[[:blank:]]*} 530 | if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then 531 | fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}" 532 | fi 533 | } 534 | 535 | # --- setup permissions and move binary to system directory --- 536 | setup_binary() { 537 | chmod 755 ${TMP_BIN} 538 | info "Installing k3s to ${BIN_DIR}/k3s" 539 | $SUDO chown root:root ${TMP_BIN} 540 | $SUDO mv -f ${TMP_BIN} ${BIN_DIR}/k3s 541 | } 542 | 543 | # --- setup selinux policy --- 544 | setup_selinux() { 545 | case ${INSTALL_K3S_CHANNEL} in 546 | *testing) 547 | rpm_channel=testing 548 | ;; 549 | *latest) 550 | rpm_channel=latest 551 | ;; 552 | *) 553 | rpm_channel=stable 554 | ;; 555 | esac 556 | 557 | rpm_site="rpm.rancher.io" 558 | if [ "${rpm_channel}" = "testing" ]; then 559 | rpm_site="rpm-testing.rancher.io" 560 | fi 561 | 562 | [ -r /etc/os-release ] && . /etc/os-release 563 | if [ `expr "${ID_LIKE}" : ".*suse.*"` != 0 ]; then 564 | rpm_target=sle 565 | rpm_site_infix=microos 566 | package_installer=zypper 567 | if [ "${ID_LIKE:-}" = suse ] && ( [ "${VARIANT_ID:-}" = sle-micro ] || [ "${ID:-}" = sle-micro ] ); then 568 | rpm_target=sle 569 | rpm_site_infix=slemicro 570 | package_installer=zypper 571 | fi 572 | elif [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then 573 | rpm_target=coreos 574 | rpm_site_infix=coreos 575 | package_installer=rpm-ostree 576 | elif [ "${VERSION_ID%%.*}" = "7" ]; then 577 | rpm_target=el7 578 | rpm_site_infix=centos/7 579 | package_installer=yum 580 | elif [ "${VERSION_ID%%.*}" = "8" ] || [ "${VERSION_ID%%.*}" -gt "36" ]; then 581 | rpm_target=el8 582 | rpm_site_infix=centos/8 583 | package_installer=yum 584 | else 585 | rpm_target=el9 586 | rpm_site_infix=centos/9 587 | package_installer=yum 588 | fi 589 | 590 | if [ "${package_installer}" = "rpm-ostree" ] && [ -x /bin/yum ]; then 591 | package_installer=yum 592 | fi 593 | 594 | if [ "${package_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then 595 | package_installer=dnf 596 | fi 597 | 598 | policy_hint="please install: 599 | ${package_installer} install -y container-selinux 600 | ${package_installer} install -y https://${rpm_site}/k3s/${rpm_channel}/common/${rpm_site_infix}/noarch/${available_version} 601 | " 602 | 603 | if [ "$INSTALL_K3S_SKIP_SELINUX_RPM" = true ] || can_skip_download_selinux || [ ! -d /usr/share/selinux ]; then 604 | info "Skipping installation of SELinux RPM" 605 | return 606 | fi 607 | 608 | get_k3s_selinux_version 609 | install_selinux_rpm ${rpm_site} ${rpm_channel} ${rpm_target} ${rpm_site_infix} 610 | 611 | policy_error=fatal 612 | if [ "$INSTALL_K3S_SELINUX_WARN" = true ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then 613 | policy_error=warn 614 | fi 615 | 616 | if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s >/dev/null 2>&1; then 617 | if $SUDO grep '^\s*SELINUX=enforcing' /etc/selinux/config >/dev/null 2>&1; then 618 | $policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}" 619 | fi 620 | elif [ ! -f /usr/share/selinux/packages/k3s.pp ]; then 621 | if [ -x /usr/sbin/transactional-update ] || [ "${ID_LIKE:-}" = coreos ] || [ "${VARIANT_ID:-}" = coreos ]; then 622 | warn "Please reboot your machine to activate the changes and avoid data loss." 623 | else 624 | $policy_error "Failed to find the k3s-selinux policy, ${policy_hint}" 625 | fi 626 | fi 627 | } 628 | 629 | install_selinux_rpm() { 630 | if [ -r /etc/redhat-release ] || [ -r /etc/centos-release ] || [ -r /etc/oracle-release ] || [ -r /etc/fedora-release ] || [ "${ID_LIKE%%[ ]*}" = "suse" ]; then 631 | repodir=/etc/yum.repos.d 632 | if [ -d /etc/zypp/repos.d ]; then 633 | repodir=/etc/zypp/repos.d 634 | fi 635 | set +o noglob 636 | $SUDO rm -f ${repodir}/rancher-k3s-common*.repo 637 | set -o noglob 638 | if [ -r /etc/redhat-release ] && [ "${3}" = "el7" ]; then 639 | $SUDO yum install -y yum-utils 640 | $SUDO yum-config-manager --enable rhel-7-server-extras-rpms 641 | fi 642 | $SUDO tee ${repodir}/rancher-k3s-common.repo >/dev/null << EOF 643 | [rancher-k3s-common-${2}] 644 | name=Rancher K3s Common (${2}) 645 | baseurl=https://${1}/k3s/${2}/common/${4}/noarch 646 | enabled=1 647 | gpgcheck=1 648 | repo_gpgcheck=0 649 | gpgkey=https://${1}/public.key 650 | EOF 651 | case ${3} in 652 | sle) 653 | rpm_installer="zypper --gpg-auto-import-keys" 654 | if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then 655 | transactional_update_run="transactional-update --no-selfupdate -d run" 656 | rpm_installer="transactional-update --no-selfupdate -d run ${rpm_installer}" 657 | : "${INSTALL_K3S_SKIP_START:=true}" 658 | fi 659 | # create the /var/lib/rpm-state in SLE systems to fix the prein selinux macro 660 | ${transactional_update_run} mkdir -p /var/lib/rpm-state 661 | ;; 662 | coreos) 663 | rpm_installer="rpm-ostree --idempotent" 664 | # rpm_install_extra_args="--apply-live" 665 | : "${INSTALL_K3S_SKIP_START:=true}" 666 | ;; 667 | *) 668 | rpm_installer="yum" 669 | ;; 670 | esac 671 | if [ "${rpm_installer}" = "yum" ] && [ -x /usr/bin/dnf ]; then 672 | rpm_installer=dnf 673 | fi 674 | if rpm -q --quiet k3s-selinux; then 675 | # remove k3s-selinux module before upgrade to allow container-selinux to upgrade safely 676 | if check_available_upgrades container-selinux ${3} && check_available_upgrades k3s-selinux ${3}; then 677 | MODULE_PRIORITY=$($SUDO semodule --list=full | grep k3s | cut -f1 -d" ") 678 | if [ -n "${MODULE_PRIORITY}" ]; then 679 | $SUDO semodule -X $MODULE_PRIORITY -r k3s || true 680 | fi 681 | fi 682 | fi 683 | # shellcheck disable=SC2086 684 | $SUDO ${rpm_installer} install -y "k3s-selinux" 685 | fi 686 | return 687 | } 688 | 689 | check_available_upgrades() { 690 | set +e 691 | case ${2} in 692 | sle) 693 | available_upgrades=$($SUDO zypper -q -t -s 11 se -s -u --type package $1 | tail -n 1 | grep -v "No matching" | awk '{print $3}') 694 | ;; 695 | coreos) 696 | # currently rpm-ostree does not support search functionality https://github.com/coreos/rpm-ostree/issues/1877 697 | ;; 698 | *) 699 | available_upgrades=$($SUDO yum -q --refresh list $1 --upgrades | tail -n 1 | awk '{print $2}') 700 | ;; 701 | esac 702 | set -e 703 | if [ -n "${available_upgrades}" ]; then 704 | return 0 705 | fi 706 | return 1 707 | } 708 | # --- download and verify k3s --- 709 | download_and_verify() { 710 | if can_skip_download_binary; then 711 | info 'Skipping k3s download and verify' 712 | verify_k3s_is_executable 713 | return 714 | fi 715 | 716 | setup_verify_arch 717 | verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files' 718 | setup_tmp 719 | get_release_version 720 | download_hash 721 | 722 | if installed_hash_matches; then 723 | info 'Skipping binary downloaded, installed k3s matches hash' 724 | return 725 | fi 726 | 727 | download_binary 728 | verify_binary 729 | setup_binary 730 | } 731 | 732 | # --- add additional utility links --- 733 | create_symlinks() { 734 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 735 | [ "${INSTALL_K3S_SYMLINK}" = skip ] && return 736 | 737 | for cmd in kubectl crictl ctr; do 738 | if [ ! -e ${BIN_DIR}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then 739 | which_cmd=$(command -v ${cmd} 2>/dev/null || true) 740 | if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then 741 | info "Creating ${BIN_DIR}/${cmd} symlink to k3s" 742 | $SUDO ln -sf k3s ${BIN_DIR}/${cmd} 743 | else 744 | info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}" 745 | fi 746 | else 747 | info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, already exists" 748 | fi 749 | done 750 | } 751 | 752 | # --- create killall script --- 753 | create_killall() { 754 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 755 | info "Creating killall script ${KILLALL_K3S_SH}" 756 | $SUDO tee ${KILLALL_K3S_SH} >/dev/null << \EOF 757 | #!/bin/sh 758 | [ $(id -u) -eq 0 ] || exec sudo $0 $@ 759 | 760 | for bin in /var/lib/rancher/k3s/data/**/bin/; do 761 | [ -d $bin ] && export PATH=$PATH:$bin:$bin/aux 762 | done 763 | 764 | set -x 765 | 766 | for service in /etc/systemd/system/k3s*.service; do 767 | [ -s $service ] && systemctl stop $(basename $service) 768 | done 769 | 770 | for service in /etc/init.d/k3s*; do 771 | [ -x $service ] && $service stop 772 | done 773 | 774 | pschildren() { 775 | ps -e -o ppid= -o pid= | \ 776 | sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \ 777 | grep -w "^$1" | \ 778 | cut -f2 779 | } 780 | 781 | pstree() { 782 | for pid in $@; do 783 | echo $pid 784 | for child in $(pschildren $pid); do 785 | pstree $child 786 | done 787 | done 788 | } 789 | 790 | killtree() { 791 | kill -9 $( 792 | { set +x; } 2>/dev/null; 793 | pstree $@; 794 | set -x; 795 | ) 2>/dev/null 796 | } 797 | 798 | remove_interfaces() { 799 | # Delete network interface(s) that match 'master cni0' 800 | ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do 801 | iface=${iface%%@*} 802 | [ -z "$iface" ] || ip link delete $iface 803 | done 804 | 805 | # Delete cni related interfaces 806 | ip link delete cni0 807 | ip link delete flannel.1 808 | ip link delete flannel-v6.1 809 | ip link delete kube-ipvs0 810 | ip link delete flannel-wg 811 | ip link delete flannel-wg-v6 812 | 813 | # Restart tailscale 814 | if [ -n "$(command -v tailscale)" ]; then 815 | tailscale set --advertise-routes= 816 | fi 817 | } 818 | 819 | getshims() { 820 | ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 821 | } 822 | 823 | killtree $({ set +x; } 2>/dev/null; getshims; set -x) 824 | 825 | do_unmount_and_remove() { 826 | set +x 827 | while read -r _ path _; do 828 | case "$path" in $1*) echo "$path" ;; esac 829 | done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount -f "$0" && rm -rf "$0"' 830 | set -x 831 | } 832 | 833 | do_unmount_and_remove '/run/k3s' 834 | do_unmount_and_remove '/var/lib/rancher/k3s' 835 | do_unmount_and_remove '/var/lib/kubelet/pods' 836 | do_unmount_and_remove '/var/lib/kubelet/plugins' 837 | do_unmount_and_remove '/run/netns/cni-' 838 | 839 | # Remove CNI namespaces 840 | ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete 841 | 842 | remove_interfaces 843 | 844 | rm -rf /var/lib/cni/ 845 | iptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore 846 | ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore 847 | EOF 848 | $SUDO chmod 755 ${KILLALL_K3S_SH} 849 | $SUDO chown root:root ${KILLALL_K3S_SH} 850 | } 851 | 852 | # --- create uninstall script --- 853 | create_uninstall() { 854 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 855 | info "Creating uninstall script ${UNINSTALL_K3S_SH}" 856 | $SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF 857 | #!/bin/sh 858 | set -x 859 | [ \$(id -u) -eq 0 ] || exec sudo \$0 \$@ 860 | 861 | ${KILLALL_K3S_SH} 862 | 863 | if command -v systemctl; then 864 | systemctl disable ${SYSTEM_NAME} 865 | systemctl reset-failed ${SYSTEM_NAME} 866 | systemctl daemon-reload 867 | fi 868 | if command -v rc-update; then 869 | rc-update delete ${SYSTEM_NAME} default 870 | fi 871 | 872 | rm -f ${FILE_K3S_SERVICE} 873 | rm -f ${FILE_K3S_ENV} 874 | 875 | remove_uninstall() { 876 | rm -f ${UNINSTALL_K3S_SH} 877 | } 878 | trap remove_uninstall EXIT 879 | 880 | if (ls ${SYSTEMD_DIR}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then 881 | set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x 882 | exit 883 | fi 884 | 885 | for cmd in kubectl crictl ctr; do 886 | if [ -L ${BIN_DIR}/\$cmd ]; then 887 | rm -f ${BIN_DIR}/\$cmd 888 | fi 889 | done 890 | 891 | rm -rf /etc/rancher/k3s 892 | rm -rf /run/k3s 893 | rm -rf /run/flannel 894 | rm -rf /var/lib/rancher/k3s 895 | rm -rf /var/lib/kubelet 896 | rm -f ${BIN_DIR}/k3s 897 | rm -f ${KILLALL_K3S_SH} 898 | 899 | if type yum >/dev/null 2>&1; then 900 | yum remove -y k3s-selinux 901 | rm -f /etc/yum.repos.d/rancher-k3s-common*.repo 902 | elif type rpm-ostree >/dev/null 2>&1; then 903 | rpm-ostree uninstall k3s-selinux 904 | rm -f /etc/yum.repos.d/rancher-k3s-common*.repo 905 | elif type zypper >/dev/null 2>&1; then 906 | uninstall_cmd="zypper remove -y k3s-selinux" 907 | if [ "\${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then 908 | uninstall_cmd="transactional-update --no-selfupdate -d run \$uninstall_cmd" 909 | fi 910 | \$uninstall_cmd 911 | rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo 912 | fi 913 | EOF 914 | $SUDO chmod 755 ${UNINSTALL_K3S_SH} 915 | $SUDO chown root:root ${UNINSTALL_K3S_SH} 916 | } 917 | 918 | # --- disable current service if loaded -- 919 | systemd_disable() { 920 | $SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true 921 | $SUDO rm -f /etc/systemd/system/${SERVICE_K3S} || true 922 | $SUDO rm -f /etc/systemd/system/${SERVICE_K3S}.env || true 923 | } 924 | 925 | # --- capture current env and create file containing k3s_ variables --- 926 | create_env_file() { 927 | info "env: Creating environment file ${FILE_K3S_ENV}" 928 | $SUDO touch ${FILE_K3S_ENV} 929 | $SUDO chmod 0600 ${FILE_K3S_ENV} 930 | sh -c export | while read x v; do echo $v; done | grep -E '^(K3S|CONTAINERD)_' | $SUDO tee ${FILE_K3S_ENV} >/dev/null 931 | sh -c export | while read x v; do echo $v; done | grep -Ei '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_K3S_ENV} >/dev/null 932 | } 933 | 934 | # --- write systemd service file --- 935 | create_systemd_service_file() { 936 | info "systemd: Creating service file ${FILE_K3S_SERVICE}" 937 | $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF 938 | [Unit] 939 | Description=Lightweight Kubernetes 940 | Documentation=https://k3s.io 941 | Wants=network-online.target 942 | After=network-online.target 943 | 944 | [Install] 945 | WantedBy=multi-user.target 946 | 947 | [Service] 948 | Type=${SYSTEMD_TYPE} 949 | EnvironmentFile=-/etc/default/%N 950 | EnvironmentFile=-/etc/sysconfig/%N 951 | EnvironmentFile=-${FILE_K3S_ENV} 952 | KillMode=process 953 | Delegate=yes 954 | # Having non-zero Limit*s causes performance problems due to accounting overhead 955 | # in the kernel. We recommend using cgroups to do container-local accounting. 956 | LimitNOFILE=1048576 957 | LimitNPROC=infinity 958 | LimitCORE=infinity 959 | TasksMax=infinity 960 | TimeoutStartSec=0 961 | Restart=always 962 | RestartSec=5s 963 | ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service 2>/dev/null' 964 | ExecStartPre=-/sbin/modprobe br_netfilter 965 | ExecStartPre=-/sbin/modprobe overlay 966 | ExecStart=${BIN_DIR}/k3s \\ 967 | ${CMD_K3S_EXEC} 968 | 969 | EOF 970 | } 971 | 972 | # --- write openrc service file --- 973 | create_openrc_service_file() { 974 | LOG_FILE=/var/log/${SYSTEM_NAME}.log 975 | 976 | info "openrc: Creating service file ${FILE_K3S_SERVICE}" 977 | $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF 978 | #!/sbin/openrc-run 979 | 980 | depend() { 981 | after network-online 982 | want cgroups 983 | } 984 | 985 | start_pre() { 986 | rm -f /tmp/k3s.* 987 | } 988 | 989 | supervisor=supervise-daemon 990 | name=${SYSTEM_NAME} 991 | command="${BIN_DIR}/k3s" 992 | command_args="$(escape_dq "${CMD_K3S_EXEC}") 993 | >>${LOG_FILE} 2>&1" 994 | 995 | output_log=${LOG_FILE} 996 | error_log=${LOG_FILE} 997 | 998 | pidfile="/var/run/${SYSTEM_NAME}.pid" 999 | respawn_delay=5 1000 | respawn_max=0 1001 | 1002 | set -o allexport 1003 | if [ -f /etc/environment ]; then . /etc/environment; fi 1004 | if [ -f ${FILE_K3S_ENV} ]; then . ${FILE_K3S_ENV}; fi 1005 | set +o allexport 1006 | EOF 1007 | $SUDO chmod 0755 ${FILE_K3S_SERVICE} 1008 | 1009 | $SUDO tee /etc/logrotate.d/${SYSTEM_NAME} >/dev/null << EOF 1010 | ${LOG_FILE} { 1011 | missingok 1012 | notifempty 1013 | copytruncate 1014 | } 1015 | EOF 1016 | } 1017 | 1018 | # --- write systemd or openrc service file --- 1019 | create_service_file() { 1020 | [ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file && restore_systemd_service_file_context 1021 | [ "${HAS_OPENRC}" = true ] && create_openrc_service_file 1022 | return 0 1023 | } 1024 | 1025 | restore_systemd_service_file_context() { 1026 | $SUDO restorecon -R -i ${FILE_K3S_SERVICE} 2>/dev/null || true 1027 | $SUDO restorecon -R -i ${FILE_K3S_ENV} 2>/dev/null || true 1028 | } 1029 | 1030 | # --- get hashes of the current k3s bin and service files 1031 | get_installed_hashes() { 1032 | $SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true 1033 | } 1034 | 1035 | # --- enable and start systemd service --- 1036 | systemd_enable() { 1037 | info "systemd: Enabling ${SYSTEM_NAME} unit" 1038 | $SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null 1039 | $SUDO systemctl daemon-reload >/dev/null 1040 | } 1041 | 1042 | systemd_start() { 1043 | info "systemd: Starting ${SYSTEM_NAME}" 1044 | $SUDO systemctl restart ${SYSTEM_NAME} 1045 | } 1046 | 1047 | # --- enable and start openrc service --- 1048 | openrc_enable() { 1049 | info "openrc: Enabling ${SYSTEM_NAME} service for default runlevel" 1050 | $SUDO rc-update add ${SYSTEM_NAME} default >/dev/null 1051 | } 1052 | 1053 | openrc_start() { 1054 | info "openrc: Starting ${SYSTEM_NAME}" 1055 | $SUDO ${FILE_K3S_SERVICE} restart 1056 | } 1057 | 1058 | has_working_xtables() { 1059 | if $SUDO sh -c "command -v \"$1-save\"" 1> /dev/null && $SUDO sh -c "command -v \"$1-restore\"" 1> /dev/null; then 1060 | if $SUDO $1-save 2>/dev/null | grep -q '^-A CNI-HOSTPORT-MASQ -j MASQUERADE$'; then 1061 | warn "Host $1-save/$1-restore tools are incompatible with existing rules" 1062 | else 1063 | return 0 1064 | fi 1065 | else 1066 | info "Host $1-save/$1-restore tools not found" 1067 | fi 1068 | return 1 1069 | } 1070 | 1071 | # --- startup systemd or openrc service --- 1072 | service_enable_and_start() { 1073 | if [ -f "/proc/cgroups" ] && [ "$(grep memory /proc/cgroups | while read -r n n n enabled; do echo $enabled; done)" -eq 0 ]; 1074 | then 1075 | info 'Failed to find memory cgroup, you may need to add "cgroup_memory=1 cgroup_enable=memory" to your linux cmdline (/boot/cmdline.txt on a Raspberry Pi)' 1076 | fi 1077 | 1078 | [ "${INSTALL_K3S_SKIP_ENABLE}" = true ] && return 1079 | 1080 | [ "${HAS_SYSTEMD}" = true ] && systemd_enable 1081 | [ "${HAS_OPENRC}" = true ] && openrc_enable 1082 | 1083 | [ "${INSTALL_K3S_SKIP_START}" = true ] && return 1084 | 1085 | POST_INSTALL_HASHES=$(get_installed_hashes) 1086 | if [ "${PRE_INSTALL_HASHES}" = "${POST_INSTALL_HASHES}" ] && [ "${INSTALL_K3S_FORCE_RESTART}" != true ]; then 1087 | info 'No change detected so skipping service start' 1088 | return 1089 | fi 1090 | 1091 | for XTABLES in iptables ip6tables; do 1092 | if has_working_xtables ${XTABLES}; then 1093 | $SUDO ${XTABLES}-save 2>/dev/null | grep -v KUBE- | grep -iv flannel | $SUDO ${XTABLES}-restore 1094 | fi 1095 | done 1096 | 1097 | [ "${HAS_SYSTEMD}" = true ] && systemd_start 1098 | [ "${HAS_OPENRC}" = true ] && openrc_start 1099 | return 0 1100 | } 1101 | 1102 | # --- re-evaluate args to include env command --- 1103 | eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@") 1104 | 1105 | # --- run the install process -- 1106 | { 1107 | verify_system 1108 | setup_env "$@" 1109 | download_and_verify 1110 | setup_selinux 1111 | create_symlinks 1112 | create_killall 1113 | create_uninstall 1114 | systemd_disable 1115 | create_env_file 1116 | create_service_file 1117 | service_enable_and_start 1118 | } 1119 | --------------------------------------------------------------------------------