├── .cr.yaml ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md └── workflows │ ├── build.yml │ ├── chart.yml │ ├── release-delete.yml │ ├── release.yml │ └── test.yaml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yaml ├── LICENSE ├── Makefile ├── README.md ├── charts └── k3k │ ├── .helmignore │ ├── Chart.yaml │ ├── crds │ ├── k3k.io_clusters.yaml │ └── k3k.io_virtualclusterpolicies.yaml │ ├── templates │ ├── _helpers.tpl │ ├── deployment.yaml │ ├── rbac.yaml │ ├── service.yaml │ └── serviceaccount.yaml │ └── values.yaml ├── cli ├── cmds │ ├── cluster.go │ ├── cluster_create.go │ ├── cluster_create_flags.go │ ├── cluster_delete.go │ ├── cluster_list.go │ ├── kubeconfig.go │ ├── policy.go │ ├── policy_create.go │ ├── policy_delete.go │ ├── policy_list.go │ ├── root.go │ └── table_printer.go └── main.go ├── docs ├── advanced-usage.md ├── architecture.md ├── cli │ ├── cli-docs.md │ └── genclidoc.go ├── crds │ ├── config.yaml │ └── crd-docs.md ├── development.md ├── howtos │ └── airgap.md ├── images │ └── architecture │ │ ├── shared-mode.excalidraw │ │ ├── shared-mode.png │ │ ├── virtual-mode.excalidraw │ │ └── virtual-mode.png └── virtualclusterpolicy.md ├── examples ├── clusterset.yaml ├── multiple-servers.yaml └── single-server.yaml ├── go.mod ├── go.sum ├── k3k-kubelet ├── README.md ├── config.go ├── controller │ ├── configmap.go │ ├── handler.go │ ├── persistentvolumeclaims.go │ ├── pod.go │ ├── secret.go │ ├── service.go │ └── webhook │ │ └── pod.go ├── kubelet.go ├── main.go ├── provider │ ├── collectors │ │ └── kubelet_resource_metrics.go │ ├── configure.go │ ├── node.go │ ├── provider.go │ ├── provider_test.go │ ├── token.go │ └── util.go └── translate │ └── host.go ├── main.go ├── package ├── Dockerfile.k3k └── Dockerfile.k3k-kubelet ├── pkg ├── apis │ └── k3k.io │ │ ├── register.go │ │ └── v1alpha1 │ │ ├── doc.go │ │ ├── register.go │ │ ├── types.go │ │ └── zz_generated.deepcopy.go ├── buildinfo │ └── buildinfo.go ├── controller │ ├── certs │ │ └── certs.go │ ├── cluster │ │ ├── agent │ │ │ ├── agent.go │ │ │ ├── shared.go │ │ │ ├── shared_test.go │ │ │ ├── virtual.go │ │ │ └── virtual_test.go │ │ ├── cluster.go │ │ ├── cluster_finalize.go │ │ ├── cluster_suite_test.go │ │ ├── cluster_test.go │ │ ├── pod.go │ │ ├── server │ │ │ ├── bootstrap │ │ │ │ └── bootstrap.go │ │ │ ├── config.go │ │ │ ├── ingress.go │ │ │ ├── server.go │ │ │ ├── service.go │ │ │ └── template.go │ │ └── token.go │ ├── controller.go │ ├── kubeconfig │ │ └── kubeconfig.go │ └── policy │ │ ├── namespace.go │ │ ├── networkpolicy.go │ │ ├── policy.go │ │ ├── policy_suite_test.go │ │ └── policy_test.go └── log │ └── zap.go ├── scripts ├── build └── generate └── tests ├── cluster_network_test.go ├── cluster_test.go ├── common_test.go ├── k8s_restclientgetter_test.go └── tests_suite_test.go /.cr.yaml: -------------------------------------------------------------------------------- 1 | release-name-template: chart-{{ .Version }} 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Environmental Info:** 13 | Host Cluster Version: 14 | 15 | 16 | Node(s) CPU architecture, OS, and Version: 17 | 18 | 19 | Host Cluster Configuration: 20 | 21 | 22 | K3K Cluster Configuration: 23 | 24 | 25 | **Describe the bug:** 26 | 27 | 28 | **Steps To Reproduce:** 29 | - Created a cluster with `k3k create`: 30 | 31 | **Expected behavior:** 32 | 33 | 34 | **Actual behavior:** 35 | 36 | 37 | **Additional context / logs:** 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - name: Checkout code 18 | uses: actions/checkout@v4 19 | 20 | - name: Set up Go 21 | uses: actions/setup-go@v5 22 | with: 23 | go-version-file: go.mod 24 | 25 | - name: Set up QEMU 26 | uses: docker/setup-qemu-action@v3 27 | 28 | - name: Run GoReleaser 29 | uses: goreleaser/goreleaser-action@v6 30 | with: 31 | distribution: goreleaser 32 | version: v2 33 | args: --clean --snapshot 34 | env: 35 | REPO: ${{ github.repository }} 36 | REGISTRY: "" 37 | -------------------------------------------------------------------------------- /.github/workflows/chart.yml: -------------------------------------------------------------------------------- 1 | name: Chart 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - "chart-*" 8 | 9 | permissions: 10 | contents: write 11 | 12 | jobs: 13 | chart-release: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | 21 | - name: Check tag 22 | if: github.event_name == 'push' 23 | run: | 24 | pushed_tag=$(echo ${{ github.ref_name }} | sed "s/chart-//") 25 | chart_tag=$(yq .version charts/k3k/Chart.yaml) 26 | 27 | echo pushed_tag=${pushed_tag} chart_tag=${chart_tag} 28 | [ "${pushed_tag}" == "${chart_tag}" ] 29 | 30 | - name: Configure Git 31 | run: | 32 | git config user.name "$GITHUB_ACTOR" 33 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 34 | 35 | - name: Install Helm 36 | uses: azure/setup-helm@v4 37 | env: 38 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 39 | 40 | - name: Run chart-releaser 41 | uses: helm/chart-releaser-action@v1.6.0 42 | with: 43 | config: .cr.yaml 44 | env: 45 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 46 | -------------------------------------------------------------------------------- /.github/workflows/release-delete.yml: -------------------------------------------------------------------------------- 1 | name: Release - Delete Draft 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | tag: 7 | type: string 8 | description: The tag of the release 9 | 10 | permissions: 11 | contents: write 12 | packages: write 13 | 14 | env: 15 | GH_TOKEN: ${{ github.token }} 16 | 17 | jobs: 18 | release-delete: 19 | runs-on: ubuntu-latest 20 | 21 | steps: 22 | - name: Check tag 23 | if: inputs.tag == '' 24 | run: echo "::error::Missing tag from input" && exit 1 25 | 26 | - name: Checkout code 27 | uses: actions/checkout@v4 28 | 29 | - name: Check if release is draft 30 | run: | 31 | CURRENT_TAG=${{ inputs.tag }} 32 | isDraft=$(gh release view ${CURRENT_TAG} --json isDraft --jq ".isDraft") 33 | if [ "$isDraft" = true ]; then 34 | echo "Release ${CURRENT_TAG} is draft" 35 | else 36 | echo "::error::Cannot delete non-draft release" && exit 1 37 | fi 38 | 39 | - name: Delete packages from Github Container Registry 40 | run: | 41 | CURRENT_TAG=${{ inputs.tag }} 42 | echo "Deleting packages with tag ${CURRENT_TAG}" 43 | 44 | JQ_QUERY=".[] | select(.metadata.container.tags[] == \"${CURRENT_TAG}\")" 45 | 46 | for package in k3k k3k-kubelet 47 | do 48 | echo "Deleting ${package} image" 49 | PACKAGE_TO_DELETE=$(gh api /user/packages/container/${package}/versions --jq "${JQ_QUERY}") 50 | echo $PACKAGE_TO_DELETE | jq 51 | 52 | PACKAGE_ID=$(echo $PACKAGE_TO_DELETE | jq .id) 53 | echo "Deleting ${PACKAGE_ID}" 54 | gh api --method DELETE /user/packages/container/${package}/versions/${PACKAGE_ID} 55 | done 56 | 57 | - name: Delete Github release 58 | run: | 59 | CURRENT_TAG=${{ inputs.tag }} 60 | echo "Deleting release ${CURRENT_TAG}" 61 | gh release delete ${CURRENT_TAG} 62 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | workflow_dispatch: 8 | inputs: 9 | commit: 10 | type: string 11 | description: Checkout a specific commit 12 | 13 | permissions: 14 | contents: write 15 | packages: write 16 | id-token: write 17 | 18 | jobs: 19 | release: 20 | runs-on: ubuntu-latest 21 | 22 | steps: 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | with: 26 | fetch-depth: 0 27 | fetch-tags: true 28 | 29 | - name: Checkout code at the specific commit 30 | if: inputs.commit != '' 31 | run: git checkout ${{ inputs.commit }} 32 | 33 | - name: Set up Go 34 | uses: actions/setup-go@v5 35 | with: 36 | go-version-file: go.mod 37 | 38 | - name: Set up QEMU 39 | uses: docker/setup-qemu-action@v3 40 | 41 | - name: "Read secrets" 42 | uses: rancher-eio/read-vault-secrets@main 43 | if: github.repository_owner == 'rancher' 44 | with: 45 | secrets: | 46 | secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | DOCKER_USERNAME ; 47 | secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | DOCKER_PASSWORD ; 48 | 49 | # Manually dispatched workflows (or forks) will use ghcr.io 50 | - name: Setup ghcr.io 51 | if: github.event_name == 'workflow_dispatch' || github.repository_owner != 'rancher' 52 | run: | 53 | echo "REGISTRY=ghcr.io" >> $GITHUB_ENV 54 | echo "DOCKER_USERNAME=${{ github.actor }}" >> $GITHUB_ENV 55 | echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV 56 | 57 | - name: Login to container registry 58 | uses: docker/login-action@v3 59 | with: 60 | registry: ${{ env.REGISTRY }} 61 | username: ${{ env.DOCKER_USERNAME }} 62 | password: ${{ env.DOCKER_PASSWORD }} 63 | 64 | # If the tag does not exists the workflow was manually triggered. 65 | # That means we are creating temporary nightly builds, with a "fake" local tag 66 | - name: Check release tag 67 | id: release-tag 68 | run: | 69 | CURRENT_TAG=$(git describe --tag --always --match="v[0-9]*") 70 | 71 | if git show-ref --tags ${CURRENT_TAG} --quiet; then 72 | echo "tag ${CURRENT_TAG} already exists"; 73 | else 74 | echo "tag ${CURRENT_TAG} does not exist" 75 | git tag ${CURRENT_TAG} 76 | fi 77 | 78 | echo "CURRENT_TAG=${CURRENT_TAG}" >> "$GITHUB_OUTPUT" 79 | 80 | - name: Run GoReleaser 81 | uses: goreleaser/goreleaser-action@v6 82 | with: 83 | distribution: goreleaser 84 | version: v2 85 | args: --clean 86 | env: 87 | GITHUB_TOKEN: ${{ github.token }} 88 | GORELEASER_CURRENT_TAG: ${{ steps.release-tag.outputs.CURRENT_TAG }} 89 | REGISTRY: ${{ env.REGISTRY }} 90 | REPO: ${{ github.repository }} 91 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | lint: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v4 18 | 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version-file: go.mod 22 | 23 | - name: golangci-lint 24 | uses: golangci/golangci-lint-action@v6 25 | with: 26 | args: --timeout=5m 27 | version: v1.64 28 | 29 | tests: 30 | runs-on: ubuntu-latest 31 | 32 | steps: 33 | - name: Checkout code 34 | uses: actions/checkout@v4 35 | 36 | - uses: actions/setup-go@v5 37 | with: 38 | go-version-file: go.mod 39 | 40 | - name: Validate 41 | run: make validate 42 | 43 | - name: Run unit tests 44 | run: make test-unit 45 | 46 | tests-e2e: 47 | runs-on: ubuntu-latest 48 | 49 | steps: 50 | - name: Checkout code 51 | uses: actions/checkout@v4 52 | with: 53 | fetch-depth: 0 54 | fetch-tags: true 55 | 56 | - uses: actions/setup-go@v5 57 | with: 58 | go-version-file: go.mod 59 | 60 | - name: Validate 61 | run: make validate 62 | 63 | - name: Install Ginkgo 64 | run: go install github.com/onsi/ginkgo/v2/ginkgo 65 | 66 | - name: Build and package 67 | run: | 68 | make build 69 | make package 70 | 71 | # add k3kcli to $PATH 72 | echo "${{ github.workspace }}/bin" >> $GITHUB_PATH 73 | 74 | - name: Check k3kcli 75 | run: k3kcli -v 76 | 77 | - name: Run e2e tests 78 | run: make test-e2e 79 | 80 | - name: Archive k3s logs 81 | uses: actions/upload-artifact@v4 82 | if: always() 83 | with: 84 | name: k3s-logs 85 | path: /tmp/k3s.log 86 | 87 | - name: Archive k3k logs 88 | uses: actions/upload-artifact@v4 89 | if: always() 90 | with: 91 | name: k3k-logs 92 | path: /tmp/k3k.log 93 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.dapper 2 | /.cache 3 | /bin 4 | /dist 5 | *.swp 6 | .idea 7 | .vscode/ 8 | __debug* 9 | *-kubeconfig.yaml 10 | .envtest 11 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | linters: 2 | enable: 3 | # default linters 4 | - errcheck 5 | - gosimple 6 | - govet 7 | - ineffassign 8 | - staticcheck 9 | - unused 10 | 11 | # extra 12 | - misspell 13 | - wsl 14 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | release: 4 | draft: true 5 | replace_existing_draft: true 6 | prerelease: auto 7 | 8 | before: 9 | hooks: 10 | - go mod tidy 11 | - go generate ./... 12 | 13 | builds: 14 | - id: k3k 15 | env: 16 | - CGO_ENABLED=0 17 | goos: 18 | - linux 19 | goarch: 20 | - "amd64" 21 | - "arm64" 22 | - "s390x" 23 | ldflags: 24 | - -w -s # strip debug info and symbol table 25 | - -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}" 26 | 27 | - id: k3k-kubelet 28 | main: ./k3k-kubelet 29 | binary: k3k-kubelet 30 | env: 31 | - CGO_ENABLED=0 32 | goos: 33 | - linux 34 | goarch: 35 | - "amd64" 36 | - "arm64" 37 | - "s390x" 38 | ldflags: 39 | - -w -s # strip debug info and symbol table 40 | - -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}" 41 | 42 | - id: k3kcli 43 | main: ./cli 44 | binary: k3kcli 45 | env: 46 | - CGO_ENABLED=0 47 | goarch: 48 | - "amd64" 49 | - "arm64" 50 | ldflags: 51 | - -w -s # strip debug info and symbol table 52 | - -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}" 53 | 54 | archives: 55 | - format: binary 56 | name_template: >- 57 | {{ .Binary }}-{{- .Os }}-{{ .Arch }} 58 | {{- if .Arm }}v{{ .Arm }}{{ end }} 59 | format_overrides: 60 | - goos: windows 61 | format: zip 62 | 63 | # For the image_templates we are using the following expression to build images for the correct registry 64 | # {{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }} 65 | # 66 | # REGISTRY= -> rancher/k3k:vX.Y.Z 67 | # REGISTRY=ghcr.io -> ghcr.io/rancher/k3k:latest:vX.Y.Z 68 | # 69 | dockers: 70 | # k3k amd64 71 | - use: buildx 72 | goarch: amd64 73 | ids: 74 | - k3k 75 | - k3kcli 76 | dockerfile: "package/Dockerfile.k3k" 77 | skip_push: false 78 | image_templates: 79 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64" 80 | build_flag_templates: 81 | - "--build-arg=BIN_K3K=k3k" 82 | - "--build-arg=BIN_K3KCLI=k3kcli" 83 | - "--pull" 84 | - "--platform=linux/amd64" 85 | 86 | # k3k arm64 87 | - use: buildx 88 | goarch: arm64 89 | ids: 90 | - k3k 91 | - k3kcli 92 | dockerfile: "package/Dockerfile.k3k" 93 | skip_push: false 94 | image_templates: 95 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64" 96 | build_flag_templates: 97 | - "--build-arg=BIN_K3K=k3k" 98 | - "--build-arg=BIN_K3KCLI=k3kcli" 99 | - "--pull" 100 | - "--platform=linux/arm64" 101 | 102 | # k3k-kubelet amd64 103 | - use: buildx 104 | goarch: amd64 105 | ids: 106 | - k3k-kubelet 107 | dockerfile: "package/Dockerfile.k3k-kubelet" 108 | skip_push: false 109 | image_templates: 110 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64" 111 | build_flag_templates: 112 | - "--build-arg=BIN_K3K_KUBELET=k3k-kubelet" 113 | - "--pull" 114 | - "--platform=linux/amd64" 115 | 116 | # k3k-kubelet arm64 117 | - use: buildx 118 | goarch: arm64 119 | ids: 120 | - k3k-kubelet 121 | dockerfile: "package/Dockerfile.k3k-kubelet" 122 | skip_push: false 123 | image_templates: 124 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64" 125 | build_flag_templates: 126 | - "--build-arg=BIN_K3K_KUBELET=k3k-kubelet" 127 | - "--pull" 128 | - "--platform=linux/arm64" 129 | 130 | docker_manifests: 131 | # k3k 132 | - name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}" 133 | image_templates: 134 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64" 135 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64" 136 | 137 | # k3k-kubelet arm64 138 | - name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}" 139 | image_templates: 140 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64" 141 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64" 142 | 143 | changelog: 144 | sort: asc 145 | filters: 146 | exclude: 147 | - "^docs:" 148 | - "^test:" 149 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | REPO ?= rancher 3 | VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*") 4 | 5 | ## Dependencies 6 | 7 | GOLANGCI_LINT_VERSION := v1.64.8 8 | GINKGO_VERSION ?= v2.21.0 9 | ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5 10 | ENVTEST_K8S_VERSION := 1.31.0 11 | CRD_REF_DOCS_VER ?= v0.1.0 12 | 13 | GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION) 14 | GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION) 15 | CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER) 16 | 17 | ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION) 18 | ENVTEST_DIR ?= $(shell pwd)/.envtest 19 | export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path) 20 | 21 | 22 | .PHONY: all 23 | all: version generate build package ## Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package' 24 | 25 | .PHONY: version 26 | version: ## Print the current version 27 | @echo $(VERSION) 28 | 29 | .PHONY: build 30 | build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli) 31 | @VERSION=$(VERSION) ./scripts/build 32 | 33 | .PHONY: package 34 | package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images 35 | 36 | .PHONY: package-% 37 | package-%: 38 | docker build -f package/Dockerfile.$* \ 39 | -t $(REPO)/$*:$(VERSION) \ 40 | -t $(REPO)/$*:latest \ 41 | -t $(REPO)/$*:dev . 42 | 43 | .PHONY: push 44 | push: push-k3k push-k3k-kubelet ## Push the K3k images to the registry 45 | 46 | .PHONY: push-% 47 | push-%: 48 | docker push $(REPO)/$*:$(VERSION) 49 | docker push $(REPO)/$*:latest 50 | docker push $(REPO)/$*:dev 51 | 52 | .PHONY: test 53 | test: ## Run all the tests 54 | $(GINKGO) -v -r --label-filter=$(label-filter) 55 | 56 | .PHONY: test-unit 57 | test-unit: ## Run the unit tests (skips the e2e) 58 | $(GINKGO) -v -r --skip-file=tests/* 59 | 60 | .PHONY: test-controller 61 | test-controller: ## Run the controller tests (pkg/controller) 62 | $(GINKGO) -v -r pkg/controller 63 | 64 | .PHONY: test-e2e 65 | test-e2e: ## Run the e2e tests 66 | $(GINKGO) -v -r tests 67 | 68 | .PHONY: generate 69 | generate: ## Generate the CRDs specs 70 | go generate ./... 71 | 72 | .PHONY: docs 73 | docs: ## Build the CRDs and CLI docs 74 | $(CRD_REF_DOCS) --config=./docs/crds/config.yaml \ 75 | --renderer=markdown \ 76 | --source-path=./pkg/apis/k3k.io/v1alpha1 \ 77 | --output-path=./docs/crds/crd-docs.md 78 | @go run ./docs/cli/genclidoc.go 79 | 80 | .PHONY: lint 81 | lint: ## Find any linting issues in the project 82 | $(GOLANGCI_LINT) run --timeout=5m 83 | 84 | .PHONY: validate 85 | validate: generate docs ## Validate the project checking for any dependency or doc mismatch 86 | $(GINKGO) unfocus 87 | go mod tidy 88 | git status --porcelain 89 | git --no-pager diff --exit-code 90 | 91 | .PHONY: install 92 | install: ## Install K3k with Helm on the targeted Kubernetes cluster 93 | helm upgrade --install --namespace k3k-system --create-namespace \ 94 | --set image.repository=$(REPO)/k3k \ 95 | --set image.tag=$(VERSION) \ 96 | --set sharedAgent.image.repository=$(REPO)/k3k-kubelet \ 97 | --set sharedAgent.image.tag=$(VERSION) \ 98 | k3k ./charts/k3k/ 99 | 100 | .PHONY: help 101 | help: ## Show this help. 102 | @egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}' 103 | -------------------------------------------------------------------------------- /charts/k3k/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/k3k/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: k3k 3 | description: A Helm chart for K3K 4 | type: application 5 | version: 0.3.2 6 | appVersion: v0.3.2 7 | -------------------------------------------------------------------------------- /charts/k3k/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "k3k.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "k3k.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "k3k.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "k3k.labels" -}} 37 | helm.sh/chart: {{ include "k3k.chart" . }} 38 | {{ include "k3k.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "k3k.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "k3k.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "k3k.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "k3k.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /charts/k3k/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "k3k.fullname" . }} 5 | labels: 6 | {{- include "k3k.labels" . | nindent 4 }} 7 | namespace: {{ .Release.Namespace }} 8 | spec: 9 | replicas: {{ .Values.image.replicaCount }} 10 | selector: 11 | matchLabels: 12 | {{- include "k3k.selectorLabels" . | nindent 6 }} 13 | template: 14 | metadata: 15 | labels: 16 | {{- include "k3k.selectorLabels" . | nindent 8 }} 17 | spec: 18 | containers: 19 | - image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" 20 | imagePullPolicy: {{ .Values.image.pullPolicy }} 21 | name: {{ .Chart.Name }} 22 | env: 23 | - name: CLUSTER_CIDR 24 | value: {{ .Values.host.clusterCIDR }} 25 | - name: SHARED_AGENT_IMAGE 26 | value: "{{ .Values.sharedAgent.image.repository }}:{{ default .Chart.AppVersion .Values.sharedAgent.image.tag }}" 27 | - name: SHARED_AGENT_PULL_POLICY 28 | value: {{ .Values.sharedAgent.image.pullPolicy }} 29 | - name: K3S_IMAGE 30 | value: {{ .Values.k3sServer.image.repository }} 31 | - name: K3S_IMAGE_PULL_POLICY 32 | value: {{ .Values.k3sServer.image.pullPolicy }} 33 | ports: 34 | - containerPort: 8080 35 | name: https 36 | protocol: TCP 37 | - containerPort: 9443 38 | name: https-webhook 39 | protocol: TCP 40 | serviceAccountName: {{ include "k3k.serviceAccountName" . }} 41 | -------------------------------------------------------------------------------- /charts/k3k/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: {{ include "k3k.fullname" . }} 5 | labels: 6 | {{- include "k3k.labels" . | nindent 4 }} 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: cluster-admin 11 | subjects: 12 | - kind: ServiceAccount 13 | name: {{ include "k3k.serviceAccountName" . }} 14 | namespace: {{ .Release.Namespace }} 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: ClusterRole 18 | metadata: 19 | name: {{ include "k3k.fullname" . }}-node-proxy 20 | rules: 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - "nodes" 25 | - "nodes/proxy" 26 | verbs: 27 | - "get" 28 | - "list" 29 | --- 30 | kind: ClusterRoleBinding 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | metadata: 33 | name: {{ include "k3k.fullname" . }}-node-proxy 34 | roleRef: 35 | kind: ClusterRole 36 | name: {{ include "k3k.fullname" . }}-node-proxy 37 | apiGroup: rbac.authorization.k8s.io 38 | -------------------------------------------------------------------------------- /charts/k3k/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: k3k-webhook 5 | labels: 6 | {{- include "k3k.labels" . | nindent 4 }} 7 | namespace: {{ .Release.Namespace }} 8 | spec: 9 | ports: 10 | - port: 443 11 | protocol: TCP 12 | name: https-webhook 13 | targetPort: 9443 14 | selector: 15 | {{- include "k3k.selectorLabels" . | nindent 6 }} 16 | -------------------------------------------------------------------------------- /charts/k3k/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "k3k.serviceAccountName" . }} 6 | labels: 7 | {{- include "k3k.labels" . | nindent 4 }} 8 | namespace: {{ .Release.Namespace }} 9 | {{- end }} 10 | -------------------------------------------------------------------------------- /charts/k3k/values.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | image: 4 | repository: rancher/k3k 5 | tag: "" 6 | pullPolicy: "" 7 | 8 | imagePullSecrets: [] 9 | nameOverride: "" 10 | fullnameOverride: "" 11 | 12 | host: 13 | # clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set 14 | # the controller will collect the PodCIDRs of all the nodes on the system. 15 | clusterCIDR: "" 16 | 17 | serviceAccount: 18 | # Specifies whether a service account should be created 19 | create: true 20 | # The name of the service account to use. 21 | # If not set and create is true, a name is generated using the fullname template 22 | name: "" 23 | 24 | # configuration related to the shared agent mode in k3k 25 | sharedAgent: 26 | image: 27 | repository: "rancher/k3k-kubelet" 28 | tag: "" 29 | pullPolicy: "" 30 | # image registry configuration related to the k3s server 31 | k3sServer: 32 | image: 33 | repository: "rancher/k3s" 34 | pullPolicy: "" 35 | -------------------------------------------------------------------------------- /cli/cmds/cluster.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/urfave/cli/v2" 5 | ) 6 | 7 | func NewClusterCmd(appCtx *AppContext) *cli.Command { 8 | return &cli.Command{ 9 | Name: "cluster", 10 | Usage: "cluster command", 11 | Subcommands: []*cli.Command{ 12 | NewClusterCreateCmd(appCtx), 13 | NewClusterDeleteCmd(appCtx), 14 | NewClusterListCmd(appCtx), 15 | }, 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /cli/cmds/cluster_create.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "net/url" 7 | "strings" 8 | "time" 9 | 10 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 11 | k3kcluster "github.com/rancher/k3k/pkg/controller/cluster" 12 | "github.com/rancher/k3k/pkg/controller/kubeconfig" 13 | "github.com/sirupsen/logrus" 14 | "github.com/urfave/cli/v2" 15 | v1 "k8s.io/api/core/v1" 16 | apierrors "k8s.io/apimachinery/pkg/api/errors" 17 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 18 | "k8s.io/apimachinery/pkg/util/wait" 19 | clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 20 | "k8s.io/client-go/util/retry" 21 | "k8s.io/utils/ptr" 22 | ) 23 | 24 | type CreateConfig struct { 25 | token string 26 | clusterCIDR string 27 | serviceCIDR string 28 | servers int 29 | agents int 30 | serverArgs cli.StringSlice 31 | agentArgs cli.StringSlice 32 | serverEnvs cli.StringSlice 33 | agentEnvs cli.StringSlice 34 | persistenceType string 35 | storageClassName string 36 | version string 37 | mode string 38 | kubeconfigServerHost string 39 | policy string 40 | } 41 | 42 | func NewClusterCreateCmd(appCtx *AppContext) *cli.Command { 43 | createConfig := &CreateConfig{} 44 | createFlags := NewCreateFlags(createConfig) 45 | 46 | return &cli.Command{ 47 | Name: "create", 48 | Usage: "Create new cluster", 49 | UsageText: "k3kcli cluster create [command options] NAME", 50 | Action: createAction(appCtx, createConfig), 51 | Flags: WithCommonFlags(appCtx, createFlags...), 52 | HideHelpCommand: true, 53 | } 54 | } 55 | 56 | func createAction(appCtx *AppContext, config *CreateConfig) cli.ActionFunc { 57 | return func(clx *cli.Context) error { 58 | ctx := context.Background() 59 | client := appCtx.Client 60 | 61 | if clx.NArg() != 1 { 62 | return cli.ShowSubcommandHelp(clx) 63 | } 64 | 65 | name := clx.Args().First() 66 | if name == k3kcluster.ClusterInvalidName { 67 | return errors.New("invalid cluster name") 68 | } 69 | 70 | if config.mode == string(v1alpha1.SharedClusterMode) && config.agents != 0 { 71 | return errors.New("invalid flag, --agents flag is only allowed in virtual mode") 72 | } 73 | 74 | namespace := appCtx.Namespace(name) 75 | 76 | if err := createNamespace(ctx, client, namespace, config.policy); err != nil { 77 | return err 78 | } 79 | 80 | if strings.Contains(config.version, "+") { 81 | orig := config.version 82 | config.version = strings.Replace(config.version, "+", "-", -1) 83 | logrus.Warnf("Invalid K3s docker reference version: '%s'. Using '%s' instead", orig, config.version) 84 | } 85 | 86 | if config.token != "" { 87 | logrus.Info("Creating cluster token secret") 88 | 89 | obj := k3kcluster.TokenSecretObj(config.token, name, namespace) 90 | 91 | if err := client.Create(ctx, &obj); err != nil { 92 | return err 93 | } 94 | } 95 | 96 | logrus.Infof("Creating cluster [%s] in namespace [%s]", name, namespace) 97 | 98 | cluster := newCluster(name, namespace, config) 99 | 100 | cluster.Spec.Expose = &v1alpha1.ExposeConfig{ 101 | NodePort: &v1alpha1.NodePortConfig{}, 102 | } 103 | 104 | // add Host IP address as an extra TLS-SAN to expose the k3k cluster 105 | url, err := url.Parse(appCtx.RestConfig.Host) 106 | if err != nil { 107 | return err 108 | } 109 | 110 | host := strings.Split(url.Host, ":") 111 | if config.kubeconfigServerHost != "" { 112 | host = []string{config.kubeconfigServerHost} 113 | } 114 | 115 | cluster.Spec.TLSSANs = []string{host[0]} 116 | 117 | if err := client.Create(ctx, cluster); err != nil { 118 | if apierrors.IsAlreadyExists(err) { 119 | logrus.Infof("Cluster [%s] already exists", name) 120 | } else { 121 | return err 122 | } 123 | } 124 | 125 | logrus.Infof("Extracting Kubeconfig for [%s] cluster", name) 126 | 127 | logrus.Infof("waiting for cluster to be available..") 128 | 129 | // retry every 5s for at most 2m, or 25 times 130 | availableBackoff := wait.Backoff{ 131 | Duration: 5 * time.Second, 132 | Cap: 2 * time.Minute, 133 | Steps: 25, 134 | } 135 | 136 | cfg := kubeconfig.New() 137 | 138 | var kubeconfig *clientcmdapi.Config 139 | 140 | if err := retry.OnError(availableBackoff, apierrors.IsNotFound, func() error { 141 | kubeconfig, err = cfg.Extract(ctx, client, cluster, host[0]) 142 | return err 143 | }); err != nil { 144 | return err 145 | } 146 | 147 | return writeKubeconfigFile(cluster, kubeconfig) 148 | } 149 | } 150 | 151 | func newCluster(name, namespace string, config *CreateConfig) *v1alpha1.Cluster { 152 | cluster := &v1alpha1.Cluster{ 153 | ObjectMeta: metav1.ObjectMeta{ 154 | Name: name, 155 | Namespace: namespace, 156 | }, 157 | TypeMeta: metav1.TypeMeta{ 158 | Kind: "Cluster", 159 | APIVersion: "k3k.io/v1alpha1", 160 | }, 161 | Spec: v1alpha1.ClusterSpec{ 162 | Servers: ptr.To(int32(config.servers)), 163 | Agents: ptr.To(int32(config.agents)), 164 | ClusterCIDR: config.clusterCIDR, 165 | ServiceCIDR: config.serviceCIDR, 166 | ServerArgs: config.serverArgs.Value(), 167 | AgentArgs: config.agentArgs.Value(), 168 | ServerEnvs: env(config.serverEnvs.Value()), 169 | AgentEnvs: env(config.agentEnvs.Value()), 170 | Version: config.version, 171 | Mode: v1alpha1.ClusterMode(config.mode), 172 | Persistence: v1alpha1.PersistenceConfig{ 173 | Type: v1alpha1.PersistenceMode(config.persistenceType), 174 | StorageClassName: ptr.To(config.storageClassName), 175 | }, 176 | }, 177 | } 178 | if config.storageClassName == "" { 179 | cluster.Spec.Persistence.StorageClassName = nil 180 | } 181 | 182 | if config.token != "" { 183 | cluster.Spec.TokenSecretRef = &v1.SecretReference{ 184 | Name: k3kcluster.TokenSecretName(name), 185 | Namespace: namespace, 186 | } 187 | } 188 | 189 | return cluster 190 | } 191 | 192 | func env(envSlice []string) []v1.EnvVar { 193 | var envVars []v1.EnvVar 194 | 195 | for _, env := range envSlice { 196 | keyValue := strings.Split(env, "=") 197 | if len(keyValue) != 2 { 198 | logrus.Fatalf("incorrect value for environment variable %s", env) 199 | } 200 | 201 | envVars = append(envVars, v1.EnvVar{ 202 | Name: keyValue[0], 203 | Value: keyValue[1], 204 | }) 205 | } 206 | 207 | return envVars 208 | } 209 | -------------------------------------------------------------------------------- /cli/cmds/cluster_create_flags.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | "github.com/urfave/cli/v2" 8 | ) 9 | 10 | func NewCreateFlags(config *CreateConfig) []cli.Flag { 11 | return []cli.Flag{ 12 | &cli.IntFlag{ 13 | Name: "servers", 14 | Usage: "number of servers", 15 | Destination: &config.servers, 16 | Value: 1, 17 | Action: func(ctx *cli.Context, value int) error { 18 | if value <= 0 { 19 | return errors.New("invalid number of servers") 20 | } 21 | return nil 22 | }, 23 | }, 24 | &cli.IntFlag{ 25 | Name: "agents", 26 | Usage: "number of agents", 27 | Destination: &config.agents, 28 | }, 29 | &cli.StringFlag{ 30 | Name: "token", 31 | Usage: "token of the cluster", 32 | Destination: &config.token, 33 | }, 34 | &cli.StringFlag{ 35 | Name: "cluster-cidr", 36 | Usage: "cluster CIDR", 37 | Destination: &config.clusterCIDR, 38 | }, 39 | &cli.StringFlag{ 40 | Name: "service-cidr", 41 | Usage: "service CIDR", 42 | Destination: &config.serviceCIDR, 43 | }, 44 | &cli.StringFlag{ 45 | Name: "persistence-type", 46 | Usage: "persistence mode for the nodes (dynamic, ephemeral, static)", 47 | Value: string(v1alpha1.DynamicPersistenceMode), 48 | Destination: &config.persistenceType, 49 | Action: func(ctx *cli.Context, value string) error { 50 | switch v1alpha1.PersistenceMode(value) { 51 | case v1alpha1.EphemeralPersistenceMode, v1alpha1.DynamicPersistenceMode: 52 | return nil 53 | default: 54 | return errors.New(`persistence-type should be one of "dynamic", "ephemeral" or "static"`) 55 | } 56 | }, 57 | }, 58 | &cli.StringFlag{ 59 | Name: "storage-class-name", 60 | Usage: "storage class name for dynamic persistence type", 61 | Destination: &config.storageClassName, 62 | }, 63 | &cli.StringSliceFlag{ 64 | Name: "server-args", 65 | Usage: "servers extra arguments", 66 | Destination: &config.serverArgs, 67 | }, 68 | &cli.StringSliceFlag{ 69 | Name: "agent-args", 70 | Usage: "agents extra arguments", 71 | Destination: &config.agentArgs, 72 | }, 73 | &cli.StringSliceFlag{ 74 | Name: "server-envs", 75 | Usage: "servers extra Envs", 76 | Destination: &config.serverEnvs, 77 | }, 78 | &cli.StringSliceFlag{ 79 | Name: "agent-envs", 80 | Usage: "agents extra Envs", 81 | Destination: &config.agentEnvs, 82 | }, 83 | &cli.StringFlag{ 84 | Name: "version", 85 | Usage: "k3s version", 86 | Destination: &config.version, 87 | }, 88 | &cli.StringFlag{ 89 | Name: "mode", 90 | Usage: "k3k mode type (shared, virtual)", 91 | Destination: &config.mode, 92 | Value: "shared", 93 | Action: func(ctx *cli.Context, value string) error { 94 | switch value { 95 | case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode): 96 | return nil 97 | default: 98 | return errors.New(`mode should be one of "shared" or "virtual"`) 99 | } 100 | }, 101 | }, 102 | &cli.StringFlag{ 103 | Name: "kubeconfig-server", 104 | Usage: "override the kubeconfig server host", 105 | Destination: &config.kubeconfigServerHost, 106 | }, 107 | &cli.StringFlag{ 108 | Name: "policy", 109 | Usage: "The policy to create the cluster in", 110 | Destination: &config.policy, 111 | }, 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /cli/cmds/cluster_delete.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 8 | k3kcluster "github.com/rancher/k3k/pkg/controller/cluster" 9 | "github.com/rancher/k3k/pkg/controller/cluster/agent" 10 | "github.com/sirupsen/logrus" 11 | "github.com/urfave/cli/v2" 12 | v1 "k8s.io/api/core/v1" 13 | apierrors "k8s.io/apimachinery/pkg/api/errors" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/types" 16 | ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" 17 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 18 | ) 19 | 20 | var keepData bool 21 | 22 | func NewClusterDeleteCmd(appCtx *AppContext) *cli.Command { 23 | return &cli.Command{ 24 | Name: "delete", 25 | Usage: "Delete an existing cluster", 26 | UsageText: "k3kcli cluster delete [command options] NAME", 27 | Action: delete(appCtx), 28 | Flags: WithCommonFlags(appCtx, &cli.BoolFlag{ 29 | Name: "keep-data", 30 | Usage: "keeps persistence volumes created for the cluster after deletion", 31 | Destination: &keepData, 32 | }), 33 | HideHelpCommand: true, 34 | } 35 | } 36 | 37 | func delete(appCtx *AppContext) cli.ActionFunc { 38 | return func(clx *cli.Context) error { 39 | ctx := context.Background() 40 | client := appCtx.Client 41 | 42 | if clx.NArg() != 1 { 43 | return cli.ShowSubcommandHelp(clx) 44 | } 45 | 46 | name := clx.Args().First() 47 | if name == k3kcluster.ClusterInvalidName { 48 | return errors.New("invalid cluster name") 49 | } 50 | 51 | namespace := appCtx.Namespace(name) 52 | 53 | logrus.Infof("Deleting [%s] cluster in namespace [%s]", name, namespace) 54 | 55 | cluster := v1alpha1.Cluster{ 56 | ObjectMeta: metav1.ObjectMeta{ 57 | Name: name, 58 | Namespace: namespace, 59 | }, 60 | } 61 | // keep bootstrap secrets and tokens if --keep-data flag is passed 62 | if keepData { 63 | // skip removing tokenSecret 64 | if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil { 65 | return err 66 | } 67 | 68 | // skip removing webhook secret 69 | if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil { 70 | return err 71 | } 72 | } else { 73 | matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"}) 74 | listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace} 75 | matchingLabels.ApplyToList(&listOpts) 76 | deleteOpts := &ctrlclient.DeleteAllOfOptions{ListOptions: listOpts} 77 | 78 | if err := client.DeleteAllOf(ctx, &v1.PersistentVolumeClaim{}, deleteOpts); err != nil { 79 | return ctrlclient.IgnoreNotFound(err) 80 | } 81 | } 82 | 83 | if err := client.Delete(ctx, &cluster); err != nil { 84 | return ctrlclient.IgnoreNotFound(err) 85 | } 86 | 87 | return nil 88 | } 89 | } 90 | 91 | func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1alpha1.Cluster) error { 92 | var secret v1.Secret 93 | 94 | key := types.NamespacedName{ 95 | Name: name, 96 | Namespace: cluster.Namespace, 97 | } 98 | 99 | if err := cl.Get(ctx, key, &secret); err != nil { 100 | if apierrors.IsNotFound(err) { 101 | logrus.Warnf("%s secret is not found", name) 102 | return nil 103 | } 104 | 105 | return err 106 | } 107 | 108 | if controllerutil.HasControllerReference(&secret) { 109 | if err := controllerutil.RemoveOwnerReference(&cluster, &secret, cl.Scheme()); err != nil { 110 | return err 111 | } 112 | 113 | return cl.Update(ctx, &secret) 114 | } 115 | 116 | return nil 117 | } 118 | -------------------------------------------------------------------------------- /cli/cmds/cluster_list.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | "github.com/urfave/cli/v2" 8 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 9 | "k8s.io/apimachinery/pkg/types" 10 | "k8s.io/cli-runtime/pkg/printers" 11 | ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | func NewClusterListCmd(appCtx *AppContext) *cli.Command { 15 | return &cli.Command{ 16 | Name: "list", 17 | Usage: "List all the existing cluster", 18 | UsageText: "k3kcli cluster list [command options]", 19 | Action: list(appCtx), 20 | Flags: WithCommonFlags(appCtx), 21 | HideHelpCommand: true, 22 | } 23 | } 24 | 25 | func list(appCtx *AppContext) cli.ActionFunc { 26 | return func(clx *cli.Context) error { 27 | ctx := context.Background() 28 | client := appCtx.Client 29 | 30 | if clx.NArg() > 0 { 31 | return cli.ShowSubcommandHelp(clx) 32 | } 33 | 34 | var clusters v1alpha1.ClusterList 35 | if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil { 36 | return err 37 | } 38 | 39 | crd := &apiextensionsv1.CustomResourceDefinition{} 40 | if err := client.Get(ctx, types.NamespacedName{Name: "clusters.k3k.io"}, crd); err != nil { 41 | return err 42 | } 43 | 44 | items := toPointerSlice(clusters.Items) 45 | table := createTable(crd, items) 46 | 47 | printer := printers.NewTablePrinter(printers.PrintOptions{WithNamespace: true}) 48 | 49 | return printer.PrintObj(table, clx.App.Writer) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /cli/cmds/kubeconfig.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | "net/url" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "time" 10 | 11 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 12 | "github.com/rancher/k3k/pkg/controller" 13 | "github.com/rancher/k3k/pkg/controller/certs" 14 | "github.com/rancher/k3k/pkg/controller/kubeconfig" 15 | "github.com/sirupsen/logrus" 16 | "github.com/urfave/cli/v2" 17 | apierrors "k8s.io/apimachinery/pkg/api/errors" 18 | "k8s.io/apimachinery/pkg/types" 19 | "k8s.io/apiserver/pkg/authentication/user" 20 | "k8s.io/client-go/tools/clientcmd" 21 | clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 22 | "k8s.io/client-go/util/retry" 23 | ) 24 | 25 | var ( 26 | name string 27 | cn string 28 | org cli.StringSlice 29 | altNames cli.StringSlice 30 | expirationDays int64 31 | configName string 32 | kubeconfigServerHost string 33 | generateKubeconfigFlags = []cli.Flag{ 34 | &cli.StringFlag{ 35 | Name: "name", 36 | Usage: "cluster name", 37 | Destination: &name, 38 | }, 39 | &cli.StringFlag{ 40 | Name: "config-name", 41 | Usage: "the name of the generated kubeconfig file", 42 | Destination: &configName, 43 | }, 44 | &cli.StringFlag{ 45 | Name: "cn", 46 | Usage: "Common name (CN) of the generated certificates for the kubeconfig", 47 | Destination: &cn, 48 | Value: controller.AdminCommonName, 49 | }, 50 | &cli.StringSliceFlag{ 51 | Name: "org", 52 | Usage: "Organization name (ORG) of the generated certificates for the kubeconfig", 53 | Value: &org, 54 | }, 55 | &cli.StringSliceFlag{ 56 | Name: "altNames", 57 | Usage: "altNames of the generated certificates for the kubeconfig", 58 | Value: &altNames, 59 | }, 60 | &cli.Int64Flag{ 61 | Name: "expiration-days", 62 | Usage: "Expiration date of the certificates used for the kubeconfig", 63 | Destination: &expirationDays, 64 | Value: 356, 65 | }, 66 | &cli.StringFlag{ 67 | Name: "kubeconfig-server", 68 | Usage: "override the kubeconfig server host", 69 | Destination: &kubeconfigServerHost, 70 | Value: "", 71 | }, 72 | } 73 | ) 74 | 75 | func NewKubeconfigCmd(appCtx *AppContext) *cli.Command { 76 | return &cli.Command{ 77 | Name: "kubeconfig", 78 | Usage: "Manage kubeconfig for clusters", 79 | Subcommands: []*cli.Command{ 80 | NewKubeconfigGenerateCmd(appCtx), 81 | }, 82 | } 83 | } 84 | 85 | func NewKubeconfigGenerateCmd(appCtx *AppContext) *cli.Command { 86 | return &cli.Command{ 87 | Name: "generate", 88 | Usage: "Generate kubeconfig for clusters", 89 | SkipFlagParsing: false, 90 | Action: generate(appCtx), 91 | Flags: WithCommonFlags(appCtx, generateKubeconfigFlags...), 92 | } 93 | } 94 | 95 | func generate(appCtx *AppContext) cli.ActionFunc { 96 | return func(clx *cli.Context) error { 97 | ctx := context.Background() 98 | client := appCtx.Client 99 | 100 | clusterKey := types.NamespacedName{ 101 | Name: name, 102 | Namespace: appCtx.Namespace(name), 103 | } 104 | 105 | var cluster v1alpha1.Cluster 106 | 107 | if err := client.Get(ctx, clusterKey, &cluster); err != nil { 108 | return err 109 | } 110 | 111 | url, err := url.Parse(appCtx.RestConfig.Host) 112 | if err != nil { 113 | return err 114 | } 115 | 116 | host := strings.Split(url.Host, ":") 117 | if kubeconfigServerHost != "" { 118 | host = []string{kubeconfigServerHost} 119 | 120 | if err := altNames.Set(kubeconfigServerHost); err != nil { 121 | return err 122 | } 123 | } 124 | 125 | certAltNames := certs.AddSANs(altNames.Value()) 126 | 127 | orgs := org.Value() 128 | if orgs == nil { 129 | orgs = []string{user.SystemPrivilegedGroup} 130 | } 131 | 132 | cfg := kubeconfig.KubeConfig{ 133 | CN: cn, 134 | ORG: orgs, 135 | ExpiryDate: time.Hour * 24 * time.Duration(expirationDays), 136 | AltNames: certAltNames, 137 | } 138 | 139 | logrus.Infof("waiting for cluster to be available..") 140 | 141 | var kubeconfig *clientcmdapi.Config 142 | 143 | if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error { 144 | kubeconfig, err = cfg.Extract(ctx, client, &cluster, host[0]) 145 | return err 146 | }); err != nil { 147 | return err 148 | } 149 | 150 | return writeKubeconfigFile(&cluster, kubeconfig) 151 | } 152 | } 153 | 154 | func writeKubeconfigFile(cluster *v1alpha1.Cluster, kubeconfig *clientcmdapi.Config) error { 155 | if configName == "" { 156 | configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml" 157 | } 158 | 159 | pwd, err := os.Getwd() 160 | if err != nil { 161 | return err 162 | } 163 | 164 | logrus.Infof(`You can start using the cluster with: 165 | 166 | export KUBECONFIG=%s 167 | kubectl cluster-info 168 | `, filepath.Join(pwd, configName)) 169 | 170 | kubeconfigData, err := clientcmd.Write(*kubeconfig) 171 | if err != nil { 172 | return err 173 | } 174 | 175 | return os.WriteFile(configName, kubeconfigData, 0644) 176 | } 177 | -------------------------------------------------------------------------------- /cli/cmds/policy.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/urfave/cli/v2" 5 | ) 6 | 7 | func NewPolicyCmd(appCtx *AppContext) *cli.Command { 8 | return &cli.Command{ 9 | Name: "policy", 10 | Usage: "policy command", 11 | Subcommands: []*cli.Command{ 12 | NewPolicyCreateCmd(appCtx), 13 | NewPolicyDeleteCmd(appCtx), 14 | NewPolicyListCmd(appCtx), 15 | }, 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /cli/cmds/policy_create.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 8 | "github.com/rancher/k3k/pkg/controller/policy" 9 | "github.com/sirupsen/logrus" 10 | "github.com/urfave/cli/v2" 11 | v1 "k8s.io/api/core/v1" 12 | apierrors "k8s.io/apimachinery/pkg/api/errors" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/types" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | ) 17 | 18 | type VirtualClusterPolicyCreateConfig struct { 19 | mode string 20 | } 21 | 22 | func NewPolicyCreateCmd(appCtx *AppContext) *cli.Command { 23 | config := &VirtualClusterPolicyCreateConfig{} 24 | 25 | createFlags := []cli.Flag{ 26 | &cli.StringFlag{ 27 | Name: "mode", 28 | Usage: "The allowed mode type of the policy", 29 | Destination: &config.mode, 30 | Value: "shared", 31 | Action: func(ctx *cli.Context, value string) error { 32 | switch value { 33 | case string(v1alpha1.VirtualClusterMode), string(v1alpha1.SharedClusterMode): 34 | return nil 35 | default: 36 | return errors.New(`mode should be one of "shared" or "virtual"`) 37 | } 38 | }, 39 | }, 40 | } 41 | 42 | return &cli.Command{ 43 | Name: "create", 44 | Usage: "Create new policy", 45 | UsageText: "k3kcli policy create [command options] NAME", 46 | Action: policyCreateAction(appCtx, config), 47 | Flags: WithCommonFlags(appCtx, createFlags...), 48 | HideHelpCommand: true, 49 | } 50 | } 51 | 52 | func policyCreateAction(appCtx *AppContext, config *VirtualClusterPolicyCreateConfig) cli.ActionFunc { 53 | return func(clx *cli.Context) error { 54 | ctx := context.Background() 55 | client := appCtx.Client 56 | 57 | if clx.NArg() != 1 { 58 | return cli.ShowSubcommandHelp(clx) 59 | } 60 | 61 | policyName := clx.Args().First() 62 | 63 | _, err := createPolicy(ctx, client, v1alpha1.ClusterMode(config.mode), policyName) 64 | 65 | return err 66 | } 67 | } 68 | 69 | func createNamespace(ctx context.Context, client client.Client, name, policyName string) error { 70 | ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} 71 | 72 | if policyName != "" { 73 | ns.Labels = map[string]string{ 74 | policy.PolicyNameLabelKey: policyName, 75 | } 76 | } 77 | 78 | if err := client.Get(ctx, types.NamespacedName{Name: name}, ns); err != nil { 79 | if !apierrors.IsNotFound(err) { 80 | return err 81 | } 82 | 83 | logrus.Infof(`Creating namespace [%s]`, name) 84 | 85 | if err := client.Create(ctx, ns); err != nil { 86 | return err 87 | } 88 | } 89 | 90 | return nil 91 | } 92 | 93 | func createPolicy(ctx context.Context, client client.Client, mode v1alpha1.ClusterMode, policyName string) (*v1alpha1.VirtualClusterPolicy, error) { 94 | logrus.Infof("Creating policy [%s]", policyName) 95 | 96 | policy := &v1alpha1.VirtualClusterPolicy{ 97 | ObjectMeta: metav1.ObjectMeta{ 98 | Name: policyName, 99 | }, 100 | TypeMeta: metav1.TypeMeta{ 101 | Kind: "VirtualClusterPolicy", 102 | APIVersion: "k3k.io/v1alpha1", 103 | }, 104 | Spec: v1alpha1.VirtualClusterPolicySpec{ 105 | AllowedMode: mode, 106 | }, 107 | } 108 | 109 | if err := client.Create(ctx, policy); err != nil { 110 | if !apierrors.IsAlreadyExists(err) { 111 | return nil, err 112 | } 113 | 114 | logrus.Infof("Policy [%s] already exists", policyName) 115 | } 116 | 117 | return policy, nil 118 | } 119 | -------------------------------------------------------------------------------- /cli/cmds/policy_delete.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 8 | k3kcluster "github.com/rancher/k3k/pkg/controller/cluster" 9 | "github.com/sirupsen/logrus" 10 | "github.com/urfave/cli/v2" 11 | apierrors "k8s.io/apimachinery/pkg/api/errors" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | ) 14 | 15 | func NewPolicyDeleteCmd(appCtx *AppContext) *cli.Command { 16 | return &cli.Command{ 17 | Name: "delete", 18 | Usage: "Delete an existing policy", 19 | UsageText: "k3kcli policy delete [command options] NAME", 20 | Action: policyDeleteAction(appCtx), 21 | Flags: WithCommonFlags(appCtx), 22 | HideHelpCommand: true, 23 | } 24 | } 25 | 26 | func policyDeleteAction(appCtx *AppContext) cli.ActionFunc { 27 | return func(clx *cli.Context) error { 28 | ctx := context.Background() 29 | client := appCtx.Client 30 | 31 | if clx.NArg() != 1 { 32 | return cli.ShowSubcommandHelp(clx) 33 | } 34 | 35 | name := clx.Args().First() 36 | if name == k3kcluster.ClusterInvalidName { 37 | return errors.New("invalid cluster name") 38 | } 39 | 40 | namespace := appCtx.Namespace(name) 41 | 42 | logrus.Infof("Deleting policy in namespace [%s]", namespace) 43 | 44 | policy := &v1alpha1.VirtualClusterPolicy{ 45 | ObjectMeta: metav1.ObjectMeta{ 46 | Name: "default", 47 | Namespace: namespace, 48 | }, 49 | } 50 | 51 | if err := client.Delete(ctx, policy); err != nil { 52 | if apierrors.IsNotFound(err) { 53 | logrus.Warnf("Policy not found in namespace [%s]", namespace) 54 | } else { 55 | return err 56 | } 57 | } 58 | 59 | return nil 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /cli/cmds/policy_list.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | "github.com/urfave/cli/v2" 8 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 9 | "k8s.io/apimachinery/pkg/types" 10 | "k8s.io/cli-runtime/pkg/printers" 11 | ) 12 | 13 | func NewPolicyListCmd(appCtx *AppContext) *cli.Command { 14 | return &cli.Command{ 15 | Name: "list", 16 | Usage: "List all the existing policies", 17 | UsageText: "k3kcli policy list [command options]", 18 | Action: policyList(appCtx), 19 | Flags: WithCommonFlags(appCtx), 20 | HideHelpCommand: true, 21 | } 22 | } 23 | 24 | func policyList(appCtx *AppContext) cli.ActionFunc { 25 | return func(clx *cli.Context) error { 26 | ctx := context.Background() 27 | client := appCtx.Client 28 | 29 | if clx.NArg() > 0 { 30 | return cli.ShowSubcommandHelp(clx) 31 | } 32 | 33 | var policies v1alpha1.VirtualClusterPolicyList 34 | if err := client.List(ctx, &policies); err != nil { 35 | return err 36 | } 37 | 38 | crd := &apiextensionsv1.CustomResourceDefinition{} 39 | if err := client.Get(ctx, types.NamespacedName{Name: "virtualclusterpolicies.k3k.io"}, crd); err != nil { 40 | return err 41 | } 42 | 43 | items := toPointerSlice(policies.Items) 44 | table := createTable(crd, items) 45 | 46 | printer := printers.NewTablePrinter(printers.PrintOptions{}) 47 | 48 | return printer.PrintObj(table, clx.App.Writer) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /cli/cmds/root.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | "github.com/rancher/k3k/pkg/buildinfo" 8 | "github.com/sirupsen/logrus" 9 | "github.com/urfave/cli/v2" 10 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 13 | "k8s.io/client-go/rest" 14 | "k8s.io/client-go/tools/clientcmd" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | ) 17 | 18 | type AppContext struct { 19 | RestConfig *rest.Config 20 | Client client.Client 21 | 22 | // Global flags 23 | Debug bool 24 | Kubeconfig string 25 | namespace string 26 | } 27 | 28 | func NewApp() *cli.App { 29 | appCtx := &AppContext{} 30 | 31 | app := cli.NewApp() 32 | app.Name = "k3kcli" 33 | app.Usage = "CLI for K3K" 34 | app.Flags = WithCommonFlags(appCtx) 35 | 36 | app.Before = func(clx *cli.Context) error { 37 | if appCtx.Debug { 38 | logrus.SetLevel(logrus.DebugLevel) 39 | } 40 | 41 | restConfig, err := loadRESTConfig(appCtx.Kubeconfig) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | scheme := runtime.NewScheme() 47 | _ = clientgoscheme.AddToScheme(scheme) 48 | _ = v1alpha1.AddToScheme(scheme) 49 | _ = apiextensionsv1.AddToScheme(scheme) 50 | 51 | ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme}) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | appCtx.RestConfig = restConfig 57 | appCtx.Client = ctrlClient 58 | 59 | return nil 60 | } 61 | 62 | app.Version = buildinfo.Version 63 | cli.VersionPrinter = func(cCtx *cli.Context) { 64 | fmt.Println("k3kcli Version: " + buildinfo.Version) 65 | } 66 | 67 | app.Commands = []*cli.Command{ 68 | NewClusterCmd(appCtx), 69 | NewPolicyCmd(appCtx), 70 | NewKubeconfigCmd(appCtx), 71 | } 72 | 73 | return app 74 | } 75 | 76 | func (ctx *AppContext) Namespace(name string) string { 77 | if ctx.namespace != "" { 78 | return ctx.namespace 79 | } 80 | 81 | return "k3k-" + name 82 | } 83 | 84 | func loadRESTConfig(kubeconfig string) (*rest.Config, error) { 85 | loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() 86 | configOverrides := &clientcmd.ConfigOverrides{} 87 | 88 | if kubeconfig != "" { 89 | loadingRules.ExplicitPath = kubeconfig 90 | } 91 | 92 | kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) 93 | 94 | return kubeConfig.ClientConfig() 95 | } 96 | 97 | func WithCommonFlags(appCtx *AppContext, flags ...cli.Flag) []cli.Flag { 98 | commonFlags := []cli.Flag{ 99 | &cli.BoolFlag{ 100 | Name: "debug", 101 | Usage: "Turn on debug logs", 102 | Destination: &appCtx.Debug, 103 | EnvVars: []string{"K3K_DEBUG"}, 104 | }, 105 | &cli.StringFlag{ 106 | Name: "kubeconfig", 107 | Usage: "kubeconfig path", 108 | Destination: &appCtx.Kubeconfig, 109 | DefaultText: "$HOME/.kube/config or $KUBECONFIG if set", 110 | }, 111 | &cli.StringFlag{ 112 | Name: "namespace", 113 | Usage: "namespace to create the k3k cluster in", 114 | Aliases: []string{"n"}, 115 | Destination: &appCtx.namespace, 116 | }, 117 | } 118 | 119 | return append(commonFlags, flags...) 120 | } 121 | -------------------------------------------------------------------------------- /cli/cmds/table_printer.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "k8s.io/apimachinery/pkg/runtime" 7 | "k8s.io/client-go/util/jsonpath" 8 | ) 9 | 10 | // createTable creates a table to print from the printerColumn defined in the CRD spec, plus the name at the beginning 11 | func createTable[T runtime.Object](crd *apiextensionsv1.CustomResourceDefinition, objs []T) *metav1.Table { 12 | printerColumns := getPrinterColumnsFromCRD(crd) 13 | 14 | return &metav1.Table{ 15 | TypeMeta: metav1.TypeMeta{APIVersion: "meta.k8s.io/v1", Kind: "Table"}, 16 | ColumnDefinitions: convertToTableColumns(printerColumns), 17 | Rows: createTableRows(objs, printerColumns), 18 | } 19 | } 20 | 21 | func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []apiextensionsv1.CustomResourceColumnDefinition { 22 | printerColumns := []apiextensionsv1.CustomResourceColumnDefinition{ 23 | {Name: "Name", Type: "string", Format: "name", Description: "Name of the Resource", JSONPath: ".metadata.name"}, 24 | } 25 | 26 | for _, version := range crd.Spec.Versions { 27 | if version.Name == "v1alpha1" { 28 | printerColumns = append(printerColumns, version.AdditionalPrinterColumns...) 29 | break 30 | } 31 | } 32 | 33 | return printerColumns 34 | } 35 | 36 | func convertToTableColumns(printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableColumnDefinition { 37 | var columnDefinitions []metav1.TableColumnDefinition 38 | 39 | for _, col := range printerColumns { 40 | columnDefinitions = append(columnDefinitions, metav1.TableColumnDefinition{ 41 | Name: col.Name, 42 | Type: col.Type, 43 | Format: col.Format, 44 | Description: col.Description, 45 | Priority: col.Priority, 46 | }) 47 | } 48 | 49 | return columnDefinitions 50 | } 51 | 52 | func createTableRows[T runtime.Object](objs []T, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableRow { 53 | var rows []metav1.TableRow 54 | 55 | for _, obj := range objs { 56 | objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj) 57 | if err != nil { 58 | rows = append(rows, metav1.TableRow{Cells: []any{""}}) 59 | continue 60 | } 61 | 62 | rows = append(rows, metav1.TableRow{ 63 | Cells: buildRowCells(objMap, printerColumns), 64 | Object: runtime.RawExtension{Object: obj}, 65 | }) 66 | } 67 | 68 | return rows 69 | } 70 | 71 | func buildRowCells(objMap map[string]any, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []any { 72 | var cells []any 73 | 74 | for _, printCol := range printerColumns { 75 | j := jsonpath.New(printCol.Name) 76 | 77 | err := j.Parse("{" + printCol.JSONPath + "}") 78 | if err != nil { 79 | cells = append(cells, "") 80 | continue 81 | } 82 | 83 | results, err := j.FindResults(objMap) 84 | if err != nil || len(results) == 0 || len(results[0]) == 0 { 85 | cells = append(cells, "") 86 | continue 87 | } 88 | 89 | cells = append(cells, results[0][0].Interface()) 90 | } 91 | 92 | return cells 93 | } 94 | 95 | func toPointerSlice[T any](v []T) []*T { 96 | var vPtr = make([]*T, len(v)) 97 | 98 | for i := range v { 99 | vPtr[i] = &v[i] 100 | } 101 | 102 | return vPtr 103 | } 104 | -------------------------------------------------------------------------------- /cli/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/rancher/k3k/cli/cmds" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func main() { 11 | app := cmds.NewApp() 12 | if err := app.Run(os.Args); err != nil { 13 | logrus.Fatal(err) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /docs/advanced-usage.md: -------------------------------------------------------------------------------- 1 | # Advanced Usage 2 | 3 | This document provides advanced usage information for k3k, including detailed use cases and explanations of the `Cluster` resource fields for customization. 4 | 5 | ## Customizing the Cluster Resource 6 | 7 | The `Cluster` resource provides a variety of fields for customizing the behavior of your virtual clusters. You can check the [CRD documentation](./crds/crd-docs.md) for the full specs. 8 | 9 | **Note:** Most of these customization options can also be configured using the `k3kcli` tool. Refer to the [k3kcli](./cli/cli-docs.md) documentation for more details. 10 | 11 | 12 | 13 | This example creates a "shared" mode K3k cluster with: 14 | 15 | - 3 servers 16 | - K3s version v1.31.3-k3s1 17 | - Custom network configuration 18 | - Deployment on specific nodes with the `nodeSelector` 19 | - `kube-api` exposed using an ingress 20 | - Custom K3s `serverArgs` 21 | - ETCD data persisted using a `PVC` 22 | 23 | 24 | ```yaml 25 | apiVersion: k3k.io/v1alpha1 26 | kind: Cluster 27 | metadata: 28 | name: my-virtual-cluster 29 | namespace: my-namespace 30 | spec: 31 | mode: shared 32 | version: v1.31.3-k3s1 33 | servers: 3 34 | tlsSANs: 35 | - my-cluster.example.com 36 | nodeSelector: 37 | disktype: ssd 38 | expose: 39 | ingress: 40 | ingressClassName: nginx 41 | annotations: 42 | nginx.ingress.kubernetes.io/ssl-passthrough: "true" 43 | nginx.ingress.kubernetes.io/backend-protocol: "true" 44 | nginx.ingress.kubernetes.io/ssl-redirect: "HTTPS" 45 | clusterCIDR: 10.42.0.0/16 46 | serviceCIDR: 10.43.0.0/16 47 | clusterDNS: 10.43.0.10 48 | serverArgs: 49 | - --tls-san=my-cluster.example.com 50 | persistence: 51 | type: dynamic 52 | storageClassName: local-path 53 | ``` 54 | 55 | 56 | ### `mode` 57 | 58 | The `mode` field specifies the cluster provisioning mode, which can be either `shared` or `virtual`. The default mode is `shared`. 59 | 60 | * **`shared` mode:** In this mode, the virtual cluster shares the host cluster's resources and networking. This mode is suitable for lightweight workloads and development environments where isolation is not a primary concern. 61 | * **`virtual` mode:** In this mode, the virtual cluster runs as a separate K3s cluster within the host cluster. This mode provides stronger isolation and is suitable for production workloads or when dedicated resources are required. 62 | 63 | 64 | ### `version` 65 | 66 | The `version` field specifies the Kubernetes version to be used by the virtual nodes. If not specified, K3k will use the same K3s version as the host cluster. For example, if the host cluster is running Kubernetes v1.31.3, K3k will use the corresponding K3s version (e.g., `v1.31.3-k3s1`). 67 | 68 | 69 | ### `servers` 70 | 71 | The `servers` field specifies the number of K3s server nodes to deploy for the virtual cluster. The default value is 1. 72 | 73 | 74 | ### `agents` 75 | 76 | The `agents` field specifies the number of K3s agent nodes to deploy for the virtual cluster. The default value is 0. 77 | 78 | **Note:** In `shared` mode, this field is ignored, as the Virtual Kubelet acts as the agent, and there are no K3s worker nodes. 79 | 80 | 81 | ### `nodeSelector` 82 | 83 | The `nodeSelector` field allows you to specify a node selector that will be applied to all server/agent pods. In `shared` mode, the node selector will also be applied to the workloads. 84 | 85 | 86 | ### `expose` 87 | 88 | The `expose` field contains options for exposing the API server of the virtual cluster. By default, the API server is only exposed as a `ClusterIP`, which is relatively secure but difficult to access from outside the cluster. 89 | 90 | You can use the `expose` field to enable exposure via `NodePort`, `LoadBalancer`, or `Ingress`. 91 | 92 | In this example we are exposing the Cluster with a Nginx ingress-controller, that has to be configured with the `--enable-ssl-passthrough` flag. 93 | 94 | 95 | ### `clusterCIDR` 96 | 97 | The `clusterCIDR` field specifies the CIDR range for the pods of the cluster. The default value is `10.42.0.0/16` in shared mode, and `10.52.0.0/16` in virtual mode. 98 | 99 | 100 | ### `serviceCIDR` 101 | 102 | The `serviceCIDR` field specifies the CIDR range for the services in the cluster. The default value is `10.43.0.0/16` in shared mode, and `10.53.0.0/16` in virtual mode. 103 | 104 | **Note:** In `shared` mode, the `serviceCIDR` should match the host cluster's `serviceCIDR` to prevent conflicts and in `virtual` mode both `serviceCIDR` and `clusterCIDR` should be different than the host cluster. 105 | 106 | 107 | ### `clusterDNS` 108 | 109 | The `clusterDNS` field specifies the IP address for the CoreDNS service. It needs to be in the range provided by `serviceCIDR`. The default value is `10.43.0.10`. 110 | 111 | 112 | ### `serverArgs` 113 | 114 | The `serverArgs` field allows you to specify additional arguments to be passed to the K3s server pods. 115 | 116 | ## Using the cli 117 | 118 | You can check the [k3kcli documentation](./cli/cli-docs.md) for the full specs. 119 | 120 | ### No storage provider: 121 | 122 | * Ephemeral Storage: 123 | 124 | ```bash 125 | k3kcli cluster create --persistence-type ephemeral my-cluster 126 | ``` 127 | 128 | *Important Notes:* 129 | 130 | * Using `--persistence-type ephemeral` will result in data loss if the nodes are restarted. 131 | 132 | * It is highly recommended to use `--persistence-type dynamic` with a configured storage class. -------------------------------------------------------------------------------- /docs/cli/cli-docs.md: -------------------------------------------------------------------------------- 1 | # NAME 2 | 3 | k3kcli - CLI for K3K 4 | 5 | # SYNOPSIS 6 | 7 | k3kcli 8 | 9 | ``` 10 | [--debug] 11 | [--kubeconfig]=[value] 12 | [--namespace|-n]=[value] 13 | ``` 14 | 15 | **Usage**: 16 | 17 | ``` 18 | k3kcli [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...] 19 | ``` 20 | 21 | # GLOBAL OPTIONS 22 | 23 | **--debug**: Turn on debug logs 24 | 25 | **--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set) 26 | 27 | **--namespace, -n**="": namespace to create the k3k cluster in 28 | 29 | 30 | # COMMANDS 31 | 32 | ## cluster 33 | 34 | cluster command 35 | 36 | ### create 37 | 38 | Create new cluster 39 | 40 | >k3kcli cluster create [command options] NAME 41 | 42 | **--agent-args**="": agents extra arguments 43 | 44 | **--agent-envs**="": agents extra Envs 45 | 46 | **--agents**="": number of agents (default: 0) 47 | 48 | **--cluster-cidr**="": cluster CIDR 49 | 50 | **--debug**: Turn on debug logs 51 | 52 | **--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set) 53 | 54 | **--kubeconfig-server**="": override the kubeconfig server host 55 | 56 | **--mode**="": k3k mode type (shared, virtual) (default: "shared") 57 | 58 | **--namespace, -n**="": namespace to create the k3k cluster in 59 | 60 | **--persistence-type**="": persistence mode for the nodes (dynamic, ephemeral, static) (default: "dynamic") 61 | 62 | **--policy**="": The policy to create the cluster in 63 | 64 | **--server-args**="": servers extra arguments 65 | 66 | **--server-envs**="": servers extra Envs 67 | 68 | **--servers**="": number of servers (default: 1) 69 | 70 | **--service-cidr**="": service CIDR 71 | 72 | **--storage-class-name**="": storage class name for dynamic persistence type 73 | 74 | **--token**="": token of the cluster 75 | 76 | **--version**="": k3s version 77 | 78 | ### delete 79 | 80 | Delete an existing cluster 81 | 82 | >k3kcli cluster delete [command options] NAME 83 | 84 | **--debug**: Turn on debug logs 85 | 86 | **--keep-data**: keeps persistence volumes created for the cluster after deletion 87 | 88 | **--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set) 89 | 90 | **--namespace, -n**="": namespace to create the k3k cluster in 91 | 92 | ### list 93 | 94 | List all the existing cluster 95 | 96 | >k3kcli cluster list [command options] 97 | 98 | **--debug**: Turn on debug logs 99 | 100 | **--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set) 101 | 102 | **--namespace, -n**="": namespace to create the k3k cluster in 103 | 104 | ## policy 105 | 106 | policy command 107 | 108 | ### create 109 | 110 | Create new policy 111 | 112 | >k3kcli policy create [command options] NAME 113 | 114 | **--debug**: Turn on debug logs 115 | 116 | **--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set) 117 | 118 | **--mode**="": The allowed mode type of the policy (default: "shared") 119 | 120 | **--namespace, -n**="": namespace to create the k3k cluster in 121 | 122 | ### delete 123 | 124 | Delete an existing policy 125 | 126 | >k3kcli policy delete [command options] NAME 127 | 128 | **--debug**: Turn on debug logs 129 | 130 | **--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set) 131 | 132 | **--namespace, -n**="": namespace to create the k3k cluster in 133 | 134 | ### list 135 | 136 | List all the existing policies 137 | 138 | >k3kcli policy list [command options] 139 | 140 | **--debug**: Turn on debug logs 141 | 142 | **--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set) 143 | 144 | **--namespace, -n**="": namespace to create the k3k cluster in 145 | 146 | ## kubeconfig 147 | 148 | Manage kubeconfig for clusters 149 | 150 | ### generate 151 | 152 | Generate kubeconfig for clusters 153 | 154 | **--altNames**="": altNames of the generated certificates for the kubeconfig 155 | 156 | **--cn**="": Common name (CN) of the generated certificates for the kubeconfig (default: "system:admin") 157 | 158 | **--config-name**="": the name of the generated kubeconfig file 159 | 160 | **--debug**: Turn on debug logs 161 | 162 | **--expiration-days**="": Expiration date of the certificates used for the kubeconfig (default: 356) 163 | 164 | **--kubeconfig**="": kubeconfig path (default: $HOME/.kube/config or $KUBECONFIG if set) 165 | 166 | **--kubeconfig-server**="": override the kubeconfig server host 167 | 168 | **--name**="": cluster name 169 | 170 | **--namespace, -n**="": namespace to create the k3k cluster in 171 | 172 | **--org**="": Organization name (ORG) of the generated certificates for the kubeconfig 173 | -------------------------------------------------------------------------------- /docs/cli/genclidoc.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | 8 | "github.com/rancher/k3k/cli/cmds" 9 | ) 10 | 11 | func main() { 12 | // Instantiate the CLI application 13 | app := cmds.NewApp() 14 | 15 | // Generate the Markdown documentation 16 | md, err := app.ToMarkdown() 17 | if err != nil { 18 | fmt.Println("Error generating documentation:", err) 19 | os.Exit(1) 20 | } 21 | 22 | wd, err := os.Getwd() 23 | if err != nil { 24 | fmt.Println(err) 25 | os.Exit(1) 26 | } 27 | 28 | outputFile := path.Join(wd, "docs/cli/cli-docs.md") 29 | 30 | err = os.WriteFile(outputFile, []byte(md), 0644) 31 | if err != nil { 32 | fmt.Println("Error generating documentation:", err) 33 | os.Exit(1) 34 | } 35 | 36 | fmt.Println("Documentation generated at " + outputFile) 37 | } 38 | -------------------------------------------------------------------------------- /docs/crds/config.yaml: -------------------------------------------------------------------------------- 1 | processor: 2 | # RE2 regular expressions describing type fields that should be excluded from the generated documentation. 3 | ignoreFields: 4 | - "status$" 5 | - "TypeMeta$" 6 | 7 | render: 8 | # Version of Kubernetes to use when generating links to Kubernetes API documentation. 9 | kubernetesVersion: "1.31" 10 | -------------------------------------------------------------------------------- /docs/development.md: -------------------------------------------------------------------------------- 1 | # Development 2 | 3 | 4 | ## Prerequisites 5 | 6 | To start developing K3k you will need: 7 | 8 | - Go 9 | - Docker 10 | - Helm 11 | - A running Kubernetes cluster 12 | 13 | 14 | ### TLDR 15 | 16 | ```shell 17 | #!/bin/bash 18 | 19 | set -euo pipefail 20 | 21 | # These environment variables configure the image repository and tag. 22 | export REPO=ghcr.io/myuser 23 | export VERSION=dev-$(date -u '+%Y%m%d%H%M') 24 | 25 | make 26 | make push 27 | make install 28 | ``` 29 | 30 | ### Makefile 31 | 32 | To see all the available Make commands you can run `make help`, i.e: 33 | 34 | ``` 35 | -> % make help 36 | all Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package' 37 | version Print the current version 38 | build Build the the K3k binaries (k3k, k3k-kubelet and k3kcli) 39 | package Package the k3k and k3k-kubelet Docker images 40 | push Push the K3k images to the registry 41 | test Run all the tests 42 | test-unit Run the unit tests (skips the e2e) 43 | test-controller Run the controller tests (pkg/controller) 44 | test-e2e Run the e2e tests 45 | generate Generate the CRDs specs 46 | docs Build the CRDs and CLI docs 47 | lint Find any linting issues in the project 48 | validate Validate the project checking for any dependency or doc mismatch 49 | install Install K3k with Helm on the targeted Kubernetes cluster 50 | help Show this help. 51 | ``` 52 | 53 | ### Build 54 | 55 | To build the needed binaries (`k3k`, `k3k-kubelet` and the `k3kcli`) and package the images you can simply run `make`. 56 | 57 | By default the `rancher` repository will be used, but you can customize this to your registry with the `REPO` env var: 58 | 59 | ``` 60 | REPO=ghcr.io/userorg make 61 | ``` 62 | 63 | To customize the tag you can also explicitly set the VERSION: 64 | 65 | ``` 66 | VERSION=dev-$(date -u '+%Y%m%d%H%M') make 67 | ``` 68 | 69 | 70 | ### Push 71 | 72 | You will need to push the built images to your registry, and you can use the `make push` command to do this. 73 | 74 | 75 | ### Install 76 | 77 | Once you have your images available you can install K3k with the `make install` command. This will use `helm` to install the release. 78 | 79 | 80 | ## Tests 81 | 82 | To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`). 83 | 84 | We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers. 85 | 86 | The required binaries for `envtest` are installed with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), in the `.envtest` folder. 87 | 88 | 89 | ## CRDs and Docs 90 | 91 | We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make generate`. 92 | 93 | Remember also to update the CRDs documentation running the `make docs` command. 94 | 95 | ## How to install k3k on k3d 96 | 97 | This document provides a guide on how to install k3k on [k3d](https://k3d.io). 98 | 99 | ### Installing k3d 100 | 101 | Since k3d uses docker under the hood, we need to expose the ports on the host that we'll then use for the NodePort in virtual cluster creation. 102 | 103 | Create the k3d cluster in the following way: 104 | 105 | ```bash 106 | k3d cluster create k3k -p "30000-30010:30000-30010@server:0" 107 | ``` 108 | 109 | With this syntax ports from 30000 to 30010 will be exposed on the host. 110 | 111 | ### Install k3k 112 | 113 | Install now k3k as usual: 114 | 115 | ```bash 116 | helm repo update 117 | helm install --namespace k3k-system --create-namespace k3k k3k/k3k 118 | ``` 119 | 120 | ### Create a virtual cluster 121 | 122 | Once the k3k controller is up and running, create a namespace where to create our first virtual cluster. 123 | 124 | ```bash 125 | kubectl create ns k3k-mycluster 126 | ``` 127 | 128 | Create then the virtual cluster exposing through NodePort one of the ports that we set up in the previous step: 129 | 130 | ```bash 131 | cat <` | K3k controller image (replace `` with the desired version) | 23 | | `rancher/k3k-kubelet:` | K3k agent image for shared mode | 24 | | `rancher/k3s:` | K3s server/agent image for virtual clusters | 25 | 26 | Load these images into your internal (air-gapped) registry. 27 | 28 | --- 29 | 30 | ## 2. Configure Helm Chart for Air Gap installation 31 | 32 | Update the `values.yaml` file in the K3k Helm chart with air gap settings: 33 | 34 | ```yaml 35 | image: 36 | repository: rancher/k3k 37 | tag: "" # Specify the version tag 38 | pullPolicy: "" # Optional: "IfNotPresent", "Always", etc. 39 | 40 | sharedAgent: 41 | image: 42 | repository: rancher/k3k-kubelet 43 | tag: "" # Specify the version tag 44 | pullPolicy: "" # Optional 45 | 46 | k3sServer: 47 | image: 48 | repository: rancher/k3s 49 | pullPolicy: "" # Optional 50 | ``` 51 | 52 | These values enforce the use of internal image repositories for the K3k controller, the agent and the server. 53 | 54 | **Note** : All virtual clusters will use automatically those settings. 55 | 56 | --- 57 | 58 | ## 3. Enforce Registry in Virtual Clusters 59 | 60 | When creating a virtual cluster, use the `--system-default-registry` flag to ensure all system components (e.g., CoreDNS) pull from your internal registry: 61 | 62 | ```bash 63 | k3kcli cluster create \ 64 | --server-args "--system-default-registry=registry.internal.domain" \ 65 | my-cluster 66 | ``` 67 | 68 | This flag is passed directly to the K3s server in the virtual cluster, influencing all system workload image pulls. 69 | [K3s Server CLI Reference](https://docs.k3s.io/cli/server#k3s-server-cli-help) 70 | 71 | --- 72 | 73 | ## 4. Specify K3s Version for Virtual Clusters 74 | 75 | K3k allows specifying the K3s version used in each virtual cluster: 76 | 77 | ```bash 78 | k3kcli cluster create \ 79 | --k3s-version v1.29.4+k3s1 \ 80 | my-cluster 81 | ``` 82 | 83 | - If omitted, the **host cluster’s K3s version** will be used by default, which might not exist if it's not part of the air gap package. 84 | -------------------------------------------------------------------------------- /docs/images/architecture/shared-mode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/k3k/5758b880a518be93bba2edabceefd8e2a33c72e0/docs/images/architecture/shared-mode.png -------------------------------------------------------------------------------- /docs/images/architecture/virtual-mode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/k3k/5758b880a518be93bba2edabceefd8e2a33c72e0/docs/images/architecture/virtual-mode.png -------------------------------------------------------------------------------- /docs/virtualclusterpolicy.md: -------------------------------------------------------------------------------- 1 | # VirtualClusterPolicy 2 | 3 | The VirtualClusterPolicy Custom Resource in K3k provides a way to define and enforce consistent configurations, security settings, and resource management rules for your virtual clusters and the Namespaces they operate within. 4 | 5 | By using VCPs, administrators can centrally manage these aspects, reducing manual configuration, ensuring alignment with organizational standards, and enhancing the overall security and operational consistency of the K3k environment. 6 | 7 | ## Core Concepts 8 | 9 | ### What is a VirtualClusterPolicy? 10 | 11 | A `VirtualClusterPolicy` is a cluster-scoped Kubernetes Custom Resource that specifies a set of rules and configurations. These policies are then applied to K3k virtual clusters (`Cluster` resources) operating within Kubernetes Namespaces that are explicitly bound to a VCP. 12 | 13 | ### Binding a Policy to a Namespace 14 | 15 | To apply a `VirtualClusterPolicy` to one or more Namespaces (and thus to all K3k `Cluster` resources within those Namespaces), you need to label the desired Namespace(s). Add the following label to your Namespace metadata: 16 | 17 | `policy.k3k.io/policy-name: ` 18 | 19 | **Example: Labeling a Namespace** 20 | 21 | ```yaml 22 | apiVersion: v1 23 | kind: Namespace 24 | metadata: 25 | name: my-app-namespace 26 | labels: 27 | policy.k3k.io/policy-name: "standard-dev-policy" 28 | ``` 29 | 30 | In this example, `my-app-namespace` will adhere to the rules defined in the `VirtualClusterPolicy` named `standard-dev-policy`. Multiple Namespaces can be bound to the same policy for uniform configuration, or different Namespaces can be bound to distinct policies. 31 | 32 | It's also important to note what happens when a Namespace's policy binding changes. If a Namespace is unbound from a VirtualClusterPolicy (by removing the policy.k3k.io/policy-name label), K3k will clean up and remove the resources (such as ResourceQuotas, LimitRanges, and managed Namespace labels) that were originally applied by that policy. Similarly, if the label is changed to bind the Namespace to a new VirtualClusterPolicy, K3k will first remove the resources associated with the old policy before applying the configurations from the new one, ensuring a clean transition. 33 | 34 | ### Default Policy Values 35 | 36 | If you create a `VirtualClusterPolicy` without specifying any `spec` fields (e.g., using `k3kcli policy create my-default-policy`), it will be created with default settings. Currently, this includes `spec.allowedMode` being set to `"shared"`. 37 | 38 | ```yaml 39 | # Example of a minimal VCP (after creation with defaults) 40 | apiVersion: k3k.io/v1alpha1 41 | kind: VirtualClusterPolicy 42 | metadata: 43 | name: my-default-policy 44 | spec: 45 | allowedMode: shared 46 | ``` 47 | 48 | ## Key Capabilities & Examples 49 | 50 | A `VirtualClusterPolicy` can configure several aspects of the Namespaces it's bound to and the virtual clusters operating within them. 51 | 52 | ### 1. Restricting Allowed Virtual Cluster Modes (`AllowedMode`) 53 | 54 | You can restrict the `mode` (e.g., "shared" or "virtual") in which K3k `Cluster` resources can be provisioned within bound Namespaces. If a `Cluster` is created in a bound Namespace with a mode not allowed in `allowedMode`, its creation might proceed but an error should be reported in the `Cluster` resource's status. 55 | 56 | **Example:** Allow only "shared" mode clusters. 57 | 58 | ```yaml 59 | apiVersion: k3k.io/v1alpha1 60 | kind: VirtualClusterPolicy 61 | metadata: 62 | name: shared-only-policy 63 | spec: 64 | allowedModeTypes: 65 | - shared 66 | ``` 67 | 68 | You can also specify this using the CLI: `k3kcli policy create --mode shared shared-only-policy` (or `--mode virtual`). 69 | 70 | ### 2. Defining Resource Quotas (`quota`) 71 | 72 | You can define resource consumption limits for bound Namespaces by specifying a `ResourceQuota`. K3k will create a `ResourceQuota` object in each bound Namespace with the provided specifications. 73 | 74 | **Example:** Set CPU, memory, and pod limits. 75 | 76 | ```yaml 77 | apiVersion: k3k.io/v1alpha1 78 | kind: VirtualClusterPolicy 79 | metadata: 80 | name: quota-policy 81 | spec: 82 | quota: 83 | hard: 84 | cpu: "10" 85 | memory: "20Gi" 86 | pods: "10" 87 | ``` 88 | 89 | ### 3. Setting Limit Ranges (`limit`) 90 | 91 | You can define default resource requests/limits and min/max constraints for containers running in bound Namespaces by specifying a `LimitRange`. K3k will create a `LimitRange` object in each bound Namespace. 92 | 93 | **Example:** Define default CPU requests/limits and min/max CPU. 94 | 95 | ```yaml 96 | apiVersion: k3k.io/v1alpha1 97 | kind: VirtualClusterPolicy 98 | metadata: 99 | name: limit-policy 100 | spec: 101 | limit: 102 | limits: 103 | - default: 104 | cpu: "500m" 105 | defaultRequest: 106 | cpu: "500m" 107 | max: 108 | cpu: "1" 109 | min: 110 | cpu: "100m" 111 | type: Container 112 | ``` 113 | 114 | ### 4. Managing Network Isolation (`disableNetworkPolicy`) 115 | 116 | By default, K3k creates a `NetworkPolicy` in bound Namespaces to provide network isolation for virtual clusters (especially in shared mode). You can disable the creation of this default policy. 117 | 118 | **Example:** Disable the default NetworkPolicy. 119 | 120 | ```yaml 121 | apiVersion: k3k.io/v1alpha1 122 | kind: VirtualClusterPolicy 123 | metadata: 124 | name: no-default-netpol-policy 125 | spec: 126 | disableNetworkPolicy: true 127 | ``` 128 | 129 | ### 5. Enforcing Pod Security Admission (`podSecurityAdmissionLevel`) 130 | 131 | You can enforce Pod Security Standards (PSS) by specifying a Pod Security Admission (PSA) level. K3k will apply the corresponding PSA labels to each bound Namespace. The allowed values are `privileged`, `baseline`, `restricted`, and this will add labels like `pod-security.kubernetes.io/enforce: ` to the bound Namespace. 132 | 133 | **Example:** Enforce the "baseline" PSS level. 134 | 135 | ```yaml 136 | apiVersion: k3k.io/v1alpha1 137 | kind: VirtualClusterPolicy 138 | metadata: 139 | name: baseline-psa-policy 140 | spec: 141 | podSecurityAdmissionLevel: baseline 142 | ``` 143 | 144 | ## Further Reading 145 | 146 | * For a complete reference of all `VirtualClusterPolicy` spec fields, see the [API Reference for VirtualClusterPolicy](./crds/crd-docs.md#virtualclusterpolicy). 147 | * To understand how VCPs fit into the overall K3k system, see the [Architecture](./architecture.md) document. 148 | -------------------------------------------------------------------------------- /examples/clusterset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k3k.io/v1alpha1 2 | kind: VirtualClusterPolicy 3 | metadata: 4 | name: policy-example 5 | # spec: 6 | # disableNetworkPolicy: false 7 | # allowedMode: "shared" 8 | # podSecurityAdmissionLevel: "baseline" 9 | # defaultPriorityClass: "lowpriority" 10 | -------------------------------------------------------------------------------- /examples/multiple-servers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k3k.io/v1alpha1 2 | kind: Cluster 3 | metadata: 4 | name: example1 5 | spec: 6 | mode: "shared" 7 | servers: 1 8 | agents: 3 9 | token: test 10 | version: v1.26.0-k3s2 11 | clusterCIDR: 10.30.0.0/16 12 | serviceCIDR: 10.31.0.0/16 13 | clusterDNS: 10.30.0.10 14 | serverArgs: 15 | - "--write-kubeconfig-mode=777" 16 | expose: 17 | ingress: 18 | enabled: true 19 | ingressClassName: "nginx" 20 | -------------------------------------------------------------------------------- /examples/single-server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k3k.io/v1alpha1 2 | kind: Cluster 3 | metadata: 4 | name: single-server 5 | spec: 6 | mode: "shared" 7 | servers: 1 8 | agents: 3 9 | token: test 10 | version: v1.26.0-k3s2 11 | clusterCIDR: 10.30.0.0/16 12 | serviceCIDR: 10.31.0.0/16 13 | clusterDNS: 10.30.0.10 14 | serverArgs: 15 | - "--write-kubeconfig-mode=777" 16 | expose: 17 | ingress: 18 | enabled: true 19 | ingressClassName: "nginx" 20 | -------------------------------------------------------------------------------- /k3k-kubelet/README.md: -------------------------------------------------------------------------------- 1 | ## Virtual Kubelet 2 | 3 | This package provides an impelementation of a virtual cluster node using [virtual-kubelet](https://github.com/virtual-kubelet/virtual-kubelet). 4 | 5 | The implementation is based on several projects, including: 6 | - [Virtual Kubelet](https://github.com/virtual-kubelet/virtual-kubelet) 7 | - [Kubectl](https://github.com/kubernetes/kubectl) 8 | - [Client-go](https://github.com/kubernetes/client-go) 9 | - [Azure-Aci](https://github.com/virtual-kubelet/azure-aci) 10 | 11 | ## Overview 12 | 13 | This project creates a node that registers itself in the virtual cluster. When workloads are scheduled to this node, it simply creates/updates the workload on the host cluster. 14 | 15 | ## Usage 16 | 17 | Build/Push the image using (from the root of rancher/k3k): 18 | 19 | ``` 20 | make build 21 | docker buildx build -f package/Dockerfile . -t $REPO/$IMAGE:$TAG 22 | ``` 23 | 24 | When running, it is recommended to deploy a k3k cluster with 1 server (with `--disable-agent` as a server arg) and no agents (so that the workloads can only be scheduled on the virtual node/host cluster). 25 | 26 | After the image is built, it should be deployed with the following ENV vars set: 27 | - `CLUSTER_NAME` should be the name of the cluster. 28 | - `CLUSTER_NAMESPACE` should be the namespace the cluster is running in. 29 | - `HOST_KUBECONFIG` should be the path on the local filesystem (in container) to a kubeconfig for the host cluster (likely stored in a secret/mounted as a volume). 30 | - `VIRT_KUBECONFIG`should be the path on the local filesystem (in container) to a kubeconfig for the virtual cluster (likely stored in a secret/mounted as a volume). 31 | - `VIRT_POD_IP` should be the IP that the container is accessible from. 32 | 33 | This project is still under development and there are many features yet to be implemented, but it can run a basic nginx pod. 34 | 35 | -------------------------------------------------------------------------------- /k3k-kubelet/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | 7 | "gopkg.in/yaml.v2" 8 | ) 9 | 10 | // config has all virtual-kubelet startup options 11 | type config struct { 12 | ClusterName string `yaml:"clusterName,omitempty"` 13 | ClusterNamespace string `yaml:"clusterNamespace,omitempty"` 14 | ServiceName string `yaml:"serviceName,omitempty"` 15 | Token string `yaml:"token,omitempty"` 16 | AgentHostname string `yaml:"agentHostname,omitempty"` 17 | HostConfigPath string `yaml:"hostConfigPath,omitempty"` 18 | VirtualConfigPath string `yaml:"virtualConfigPath,omitempty"` 19 | KubeletPort string `yaml:"kubeletPort,omitempty"` 20 | ServerIP string `yaml:"serverIP,omitempty"` 21 | Version string `yaml:"version,omitempty"` 22 | } 23 | 24 | func (c *config) unmarshalYAML(data []byte) error { 25 | var conf config 26 | 27 | if err := yaml.Unmarshal(data, &conf); err != nil { 28 | return err 29 | } 30 | 31 | if c.ClusterName == "" { 32 | c.ClusterName = conf.ClusterName 33 | } 34 | 35 | if c.ClusterNamespace == "" { 36 | c.ClusterNamespace = conf.ClusterNamespace 37 | } 38 | 39 | if c.HostConfigPath == "" { 40 | c.HostConfigPath = conf.HostConfigPath 41 | } 42 | 43 | if c.VirtualConfigPath == "" { 44 | c.VirtualConfigPath = conf.VirtualConfigPath 45 | } 46 | 47 | if c.KubeletPort == "" { 48 | c.KubeletPort = conf.KubeletPort 49 | } 50 | 51 | if c.AgentHostname == "" { 52 | c.AgentHostname = conf.AgentHostname 53 | } 54 | 55 | if c.ServiceName == "" { 56 | c.ServiceName = conf.ServiceName 57 | } 58 | 59 | if c.Token == "" { 60 | c.Token = conf.Token 61 | } 62 | 63 | if c.ServerIP == "" { 64 | c.ServerIP = conf.ServerIP 65 | } 66 | 67 | if c.Version == "" { 68 | c.Version = conf.Version 69 | } 70 | 71 | return nil 72 | } 73 | 74 | func (c *config) validate() error { 75 | if c.ClusterName == "" { 76 | return errors.New("cluster name is not provided") 77 | } 78 | 79 | if c.ClusterNamespace == "" { 80 | return errors.New("cluster namespace is not provided") 81 | } 82 | 83 | if c.AgentHostname == "" { 84 | return errors.New("agent Hostname is not provided") 85 | } 86 | 87 | return nil 88 | } 89 | 90 | func (c *config) parse(path string) error { 91 | if _, err := os.Stat(path); os.IsNotExist(err) { 92 | return nil 93 | } 94 | 95 | b, err := os.ReadFile(path) 96 | if err != nil { 97 | return err 98 | } 99 | 100 | return c.unmarshalYAML(b) 101 | } 102 | -------------------------------------------------------------------------------- /k3k-kubelet/controller/handler.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | 8 | "github.com/rancher/k3k/k3k-kubelet/translate" 9 | k3klog "github.com/rancher/k3k/pkg/log" 10 | v1 "k8s.io/api/core/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/controller-runtime/pkg/manager" 16 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 17 | ) 18 | 19 | type ControllerHandler struct { 20 | sync.RWMutex 21 | // Mgr is the manager used to run new controllers - from the virtual cluster 22 | Mgr manager.Manager 23 | // Scheme is the scheme used to run new controllers - from the virtual cluster 24 | Scheme runtime.Scheme 25 | // HostClient is the client used to communicate with the host cluster 26 | HostClient client.Client 27 | // VirtualClient is the client used to communicate with the virtual cluster 28 | VirtualClient client.Client 29 | // Translator is the translator that will be used to adjust objects before they 30 | // are made on the host cluster 31 | Translator translate.ToHostTranslator 32 | // Logger is the logger that the controller will use to log errors 33 | Logger *k3klog.Logger 34 | // controllers are the controllers which are currently running 35 | controllers map[schema.GroupVersionKind]updateableReconciler 36 | } 37 | 38 | // updateableReconciler is a reconciler that only syncs specific resources (by name/namespace). This list can 39 | // be altered through the Add and Remove methods 40 | type updateableReconciler interface { 41 | reconcile.Reconciler 42 | AddResource(ctx context.Context, namespace string, name string) error 43 | RemoveResource(ctx context.Context, namespace string, name string) error 44 | } 45 | 46 | func (c *ControllerHandler) AddResource(ctx context.Context, obj client.Object) error { 47 | c.RLock() 48 | 49 | controllers := c.controllers 50 | if controllers != nil { 51 | if r, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()]; ok { 52 | err := r.AddResource(ctx, obj.GetNamespace(), obj.GetName()) 53 | c.RUnlock() 54 | 55 | return err 56 | } 57 | } 58 | 59 | // we need to manually lock/unlock since we intned on write locking to add a new controller 60 | c.RUnlock() 61 | 62 | var r updateableReconciler 63 | 64 | switch obj.(type) { 65 | case *v1.Secret: 66 | r = &SecretSyncer{ 67 | HostClient: c.HostClient, 68 | VirtualClient: c.VirtualClient, 69 | // TODO: Need actual function 70 | TranslateFunc: func(s *v1.Secret) (*v1.Secret, error) { 71 | // note that this doesn't do any type safety - fix this 72 | // when generics work 73 | c.Translator.TranslateTo(s) 74 | // Remove service-account-token types when synced to the host 75 | if s.Type == v1.SecretTypeServiceAccountToken { 76 | s.Type = v1.SecretTypeOpaque 77 | } 78 | return s, nil 79 | }, 80 | Logger: c.Logger, 81 | } 82 | case *v1.ConfigMap: 83 | r = &ConfigMapSyncer{ 84 | HostClient: c.HostClient, 85 | VirtualClient: c.VirtualClient, 86 | // TODO: Need actual function 87 | TranslateFunc: func(s *v1.ConfigMap) (*v1.ConfigMap, error) { 88 | c.Translator.TranslateTo(s) 89 | return s, nil 90 | }, 91 | Logger: c.Logger, 92 | } 93 | default: 94 | // TODO: Technically, the configmap/secret syncers are relatively generic, and this 95 | // logic could be used for other types. 96 | return fmt.Errorf("unrecognized type: %T", obj) 97 | } 98 | 99 | err := ctrl.NewControllerManagedBy(c.Mgr). 100 | For(&v1.ConfigMap{}). 101 | Complete(r) 102 | 103 | if err != nil { 104 | return fmt.Errorf("unable to start configmap controller: %w", err) 105 | } 106 | 107 | c.Lock() 108 | if c.controllers == nil { 109 | c.controllers = map[schema.GroupVersionKind]updateableReconciler{} 110 | } 111 | 112 | c.controllers[obj.GetObjectKind().GroupVersionKind()] = r 113 | 114 | c.Unlock() 115 | 116 | return r.AddResource(ctx, obj.GetNamespace(), obj.GetName()) 117 | } 118 | 119 | func (c *ControllerHandler) RemoveResource(ctx context.Context, obj client.Object) error { 120 | // since we aren't adding a new controller, we don't need to lock 121 | c.RLock() 122 | ctrl, ok := c.controllers[obj.GetObjectKind().GroupVersionKind()] 123 | c.RUnlock() 124 | 125 | if !ok { 126 | return fmt.Errorf("no controller found for gvk %s", obj.GetObjectKind().GroupVersionKind()) 127 | } 128 | 129 | return ctrl.RemoveResource(ctx, obj.GetNamespace(), obj.GetName()) 130 | } 131 | -------------------------------------------------------------------------------- /k3k-kubelet/controller/persistentvolumeclaims.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/k3k/k3k-kubelet/translate" 7 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 8 | "github.com/rancher/k3k/pkg/log" 9 | v1 "k8s.io/api/core/v1" 10 | apierrors "k8s.io/apimachinery/pkg/api/errors" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/types" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/controller-runtime/pkg/controller" 16 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 17 | "sigs.k8s.io/controller-runtime/pkg/manager" 18 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 19 | ) 20 | 21 | const ( 22 | pvcController = "pvc-syncer-controller" 23 | pvcFinalizerName = "pvc.k3k.io/finalizer" 24 | ) 25 | 26 | type PVCReconciler struct { 27 | virtualClient ctrlruntimeclient.Client 28 | hostClient ctrlruntimeclient.Client 29 | clusterName string 30 | clusterNamespace string 31 | Scheme *runtime.Scheme 32 | HostScheme *runtime.Scheme 33 | logger *log.Logger 34 | Translator translate.ToHostTranslator 35 | } 36 | 37 | // AddPVCSyncer adds persistentvolumeclaims syncer controller to k3k-kubelet 38 | func AddPVCSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error { 39 | translator := translate.ToHostTranslator{ 40 | ClusterName: clusterName, 41 | ClusterNamespace: clusterNamespace, 42 | } 43 | // initialize a new Reconciler 44 | reconciler := PVCReconciler{ 45 | virtualClient: virtMgr.GetClient(), 46 | hostClient: hostMgr.GetClient(), 47 | Scheme: virtMgr.GetScheme(), 48 | HostScheme: hostMgr.GetScheme(), 49 | logger: logger.Named(pvcController), 50 | Translator: translator, 51 | clusterName: clusterName, 52 | clusterNamespace: clusterNamespace, 53 | } 54 | 55 | return ctrl.NewControllerManagedBy(virtMgr). 56 | For(&v1.PersistentVolumeClaim{}). 57 | WithOptions(controller.Options{ 58 | MaxConcurrentReconciles: maxConcurrentReconciles, 59 | }). 60 | Complete(&reconciler) 61 | } 62 | 63 | func (r *PVCReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { 64 | log := r.logger.With("Cluster", r.clusterName, "PersistentVolumeClaim", req.NamespacedName) 65 | 66 | var ( 67 | virtPVC v1.PersistentVolumeClaim 68 | cluster v1alpha1.Cluster 69 | ) 70 | 71 | if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil { 72 | return reconcile.Result{}, err 73 | } 74 | 75 | // handling persistent volume sync 76 | if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPVC); err != nil { 77 | return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) 78 | } 79 | 80 | syncedPVC := r.pvc(&virtPVC) 81 | if err := controllerutil.SetControllerReference(&cluster, syncedPVC, r.HostScheme); err != nil { 82 | return reconcile.Result{}, err 83 | } 84 | 85 | // handle deletion 86 | if !virtPVC.DeletionTimestamp.IsZero() { 87 | // deleting the synced service if exists 88 | if err := r.hostClient.Delete(ctx, syncedPVC); !apierrors.IsNotFound(err) { 89 | return reconcile.Result{}, err 90 | } 91 | // remove the finalizer after cleaning up the synced service 92 | if controllerutil.RemoveFinalizer(&virtPVC, pvcFinalizerName) { 93 | if err := r.virtualClient.Update(ctx, &virtPVC); err != nil { 94 | return reconcile.Result{}, err 95 | } 96 | } 97 | 98 | return reconcile.Result{}, nil 99 | } 100 | 101 | // Add finalizer if it does not exist 102 | if controllerutil.AddFinalizer(&virtPVC, pvcFinalizerName) { 103 | if err := r.virtualClient.Update(ctx, &virtPVC); err != nil { 104 | return reconcile.Result{}, err 105 | } 106 | } 107 | 108 | // create the pvc on host 109 | log.Info("creating the persistent volume for the first time on the host cluster") 110 | 111 | // note that we dont need to update the PVC on the host cluster, only syncing the PVC to allow being 112 | // handled by the host cluster. 113 | return reconcile.Result{}, ctrlruntimeclient.IgnoreAlreadyExists(r.hostClient.Create(ctx, syncedPVC)) 114 | } 115 | 116 | func (r *PVCReconciler) pvc(obj *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim { 117 | hostPVC := obj.DeepCopy() 118 | r.Translator.TranslateTo(hostPVC) 119 | 120 | return hostPVC 121 | } 122 | -------------------------------------------------------------------------------- /k3k-kubelet/controller/pod.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/k3k/k3k-kubelet/translate" 7 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 8 | "github.com/rancher/k3k/pkg/log" 9 | v1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/types" 13 | "k8s.io/component-helpers/storage/volume" 14 | ctrl "sigs.k8s.io/controller-runtime" 15 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 16 | "sigs.k8s.io/controller-runtime/pkg/controller" 17 | "sigs.k8s.io/controller-runtime/pkg/manager" 18 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 19 | ) 20 | 21 | const ( 22 | podController = "pod-pvc-controller" 23 | pseudoPVLabel = "pod.k3k.io/pseudoPV" 24 | ) 25 | 26 | type PodReconciler struct { 27 | virtualClient ctrlruntimeclient.Client 28 | hostClient ctrlruntimeclient.Client 29 | clusterName string 30 | clusterNamespace string 31 | Scheme *runtime.Scheme 32 | HostScheme *runtime.Scheme 33 | logger *log.Logger 34 | Translator translate.ToHostTranslator 35 | } 36 | 37 | // AddPodPVCController adds pod controller to k3k-kubelet 38 | func AddPodPVCController(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error { 39 | translator := translate.ToHostTranslator{ 40 | ClusterName: clusterName, 41 | ClusterNamespace: clusterNamespace, 42 | } 43 | // initialize a new Reconciler 44 | reconciler := PodReconciler{ 45 | virtualClient: virtMgr.GetClient(), 46 | hostClient: hostMgr.GetClient(), 47 | Scheme: virtMgr.GetScheme(), 48 | HostScheme: hostMgr.GetScheme(), 49 | logger: logger.Named(podController), 50 | Translator: translator, 51 | clusterName: clusterName, 52 | clusterNamespace: clusterNamespace, 53 | } 54 | 55 | return ctrl.NewControllerManagedBy(virtMgr). 56 | For(&v1.Pod{}). 57 | WithOptions(controller.Options{ 58 | MaxConcurrentReconciles: maxConcurrentReconciles, 59 | }). 60 | Complete(&reconciler) 61 | } 62 | 63 | func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { 64 | log := ctrl.LoggerFrom(ctx).WithValues("cluster", r.clusterName, "clusterNamespace", r.clusterNamespace) 65 | 66 | var ( 67 | virtPod v1.Pod 68 | cluster v1alpha1.Cluster 69 | ) 70 | 71 | if err := r.hostClient.Get(ctx, types.NamespacedName{Name: r.clusterName, Namespace: r.clusterNamespace}, &cluster); err != nil { 72 | return reconcile.Result{}, err 73 | } 74 | 75 | // handling pod 76 | if err := r.virtualClient.Get(ctx, req.NamespacedName, &virtPod); err != nil { 77 | return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) 78 | } 79 | 80 | // reconcile pods with pvcs 81 | for _, vol := range virtPod.Spec.Volumes { 82 | if vol.PersistentVolumeClaim != nil { 83 | log.Info("Handling pod with pvc") 84 | 85 | if err := r.reconcilePodWithPVC(ctx, &virtPod, vol.PersistentVolumeClaim); err != nil { 86 | return reconcile.Result{}, err 87 | } 88 | } 89 | } 90 | 91 | return reconcile.Result{}, nil 92 | } 93 | 94 | // reconcilePodWithPVC will make sure to create a fake PV for each PVC for any pod so that it can be scheduled on the virtual-kubelet 95 | // and then created on the host, the PV is not synced to the host cluster. 96 | func (r *PodReconciler) reconcilePodWithPVC(ctx context.Context, pod *v1.Pod, pvcSource *v1.PersistentVolumeClaimVolumeSource) error { 97 | log := ctrl.LoggerFrom(ctx).WithValues("PersistentVolumeClaim", pvcSource.ClaimName) 98 | 99 | var pvc v1.PersistentVolumeClaim 100 | 101 | key := types.NamespacedName{ 102 | Name: pvcSource.ClaimName, 103 | Namespace: pod.Namespace, 104 | } 105 | 106 | if err := r.virtualClient.Get(ctx, key, &pvc); err != nil { 107 | return ctrlruntimeclient.IgnoreNotFound(err) 108 | } 109 | 110 | log.Info("Creating pseudo Persistent Volume") 111 | 112 | pv := r.pseudoPV(&pvc) 113 | if err := r.virtualClient.Create(ctx, pv); err != nil { 114 | return ctrlruntimeclient.IgnoreAlreadyExists(err) 115 | } 116 | 117 | orig := pv.DeepCopy() 118 | pv.Status = v1.PersistentVolumeStatus{ 119 | Phase: v1.VolumeBound, 120 | } 121 | 122 | if err := r.virtualClient.Status().Patch(ctx, pv, ctrlruntimeclient.MergeFrom(orig)); err != nil { 123 | return err 124 | } 125 | 126 | log.Info("Patch the status of PersistentVolumeClaim to Bound") 127 | 128 | pvcPatch := pvc.DeepCopy() 129 | if pvcPatch.Annotations == nil { 130 | pvcPatch.Annotations = make(map[string]string) 131 | } 132 | 133 | pvcPatch.Annotations[volume.AnnBoundByController] = "yes" 134 | pvcPatch.Annotations[volume.AnnBindCompleted] = "yes" 135 | pvcPatch.Status.Phase = v1.ClaimBound 136 | pvcPatch.Status.AccessModes = pvcPatch.Spec.AccessModes 137 | 138 | return r.virtualClient.Status().Update(ctx, pvcPatch) 139 | } 140 | 141 | func (r *PodReconciler) pseudoPV(obj *v1.PersistentVolumeClaim) *v1.PersistentVolume { 142 | var storageClass string 143 | 144 | if obj.Spec.StorageClassName != nil { 145 | storageClass = *obj.Spec.StorageClassName 146 | } 147 | 148 | return &v1.PersistentVolume{ 149 | ObjectMeta: metav1.ObjectMeta{ 150 | Name: obj.Name, 151 | Labels: map[string]string{ 152 | pseudoPVLabel: "true", 153 | }, 154 | Annotations: map[string]string{ 155 | volume.AnnBoundByController: "true", 156 | volume.AnnDynamicallyProvisioned: "k3k-kubelet", 157 | }, 158 | }, 159 | TypeMeta: metav1.TypeMeta{ 160 | Kind: "PersistentVolume", 161 | APIVersion: "v1", 162 | }, 163 | Spec: v1.PersistentVolumeSpec{ 164 | PersistentVolumeSource: v1.PersistentVolumeSource{ 165 | FlexVolume: &v1.FlexPersistentVolumeSource{ 166 | Driver: "pseudopv", 167 | }, 168 | }, 169 | StorageClassName: storageClass, 170 | VolumeMode: obj.Spec.VolumeMode, 171 | PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete, 172 | AccessModes: obj.Spec.AccessModes, 173 | Capacity: obj.Spec.Resources.Requests, 174 | ClaimRef: &v1.ObjectReference{ 175 | APIVersion: obj.APIVersion, 176 | UID: obj.UID, 177 | ResourceVersion: obj.ResourceVersion, 178 | Kind: obj.Kind, 179 | Namespace: obj.Namespace, 180 | Name: obj.Name, 181 | }, 182 | }, 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /k3k-kubelet/controller/service.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/k3k/k3k-kubelet/translate" 7 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 8 | "github.com/rancher/k3k/pkg/log" 9 | 10 | v1 "k8s.io/api/core/v1" 11 | apierrors "k8s.io/apimachinery/pkg/api/errors" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/types" 14 | ctrl "sigs.k8s.io/controller-runtime" 15 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 16 | "sigs.k8s.io/controller-runtime/pkg/controller" 17 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 18 | "sigs.k8s.io/controller-runtime/pkg/manager" 19 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 20 | ) 21 | 22 | const ( 23 | serviceSyncerController = "service-syncer-controller" 24 | maxConcurrentReconciles = 1 25 | serviceFinalizerName = "service.k3k.io/finalizer" 26 | ) 27 | 28 | type ServiceReconciler struct { 29 | virtualClient ctrlruntimeclient.Client 30 | hostClient ctrlruntimeclient.Client 31 | clusterName string 32 | clusterNamespace string 33 | Scheme *runtime.Scheme 34 | HostScheme *runtime.Scheme 35 | logger *log.Logger 36 | Translator translate.ToHostTranslator 37 | } 38 | 39 | // AddServiceSyncer adds service syncer controller to the manager of the virtual cluster 40 | func AddServiceSyncer(ctx context.Context, virtMgr, hostMgr manager.Manager, clusterName, clusterNamespace string, logger *log.Logger) error { 41 | translator := translate.ToHostTranslator{ 42 | ClusterName: clusterName, 43 | ClusterNamespace: clusterNamespace, 44 | } 45 | // initialize a new Reconciler 46 | reconciler := ServiceReconciler{ 47 | virtualClient: virtMgr.GetClient(), 48 | hostClient: hostMgr.GetClient(), 49 | Scheme: virtMgr.GetScheme(), 50 | HostScheme: hostMgr.GetScheme(), 51 | logger: logger.Named(serviceSyncerController), 52 | Translator: translator, 53 | clusterName: clusterName, 54 | clusterNamespace: clusterNamespace, 55 | } 56 | 57 | return ctrl.NewControllerManagedBy(virtMgr). 58 | For(&v1.Service{}). 59 | WithOptions(controller.Options{ 60 | MaxConcurrentReconciles: maxConcurrentReconciles, 61 | }). 62 | Complete(&reconciler) 63 | } 64 | 65 | func (s *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { 66 | log := s.logger.With("Cluster", s.clusterName, "Service", req.NamespacedName) 67 | 68 | if req.Name == "kubernetes" || req.Name == "kube-dns" { 69 | return reconcile.Result{}, nil 70 | } 71 | 72 | var ( 73 | virtService v1.Service 74 | hostService v1.Service 75 | cluster v1alpha1.Cluster 76 | ) 77 | // getting the cluster for setting the controller reference 78 | if err := s.hostClient.Get(ctx, types.NamespacedName{Name: s.clusterName, Namespace: s.clusterNamespace}, &cluster); err != nil { 79 | return reconcile.Result{}, err 80 | } 81 | 82 | if err := s.virtualClient.Get(ctx, req.NamespacedName, &virtService); err != nil { 83 | return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) 84 | } 85 | 86 | syncedService := s.service(&virtService) 87 | if err := controllerutil.SetControllerReference(&cluster, syncedService, s.HostScheme); err != nil { 88 | return reconcile.Result{}, err 89 | } 90 | 91 | // handle deletion 92 | if !virtService.DeletionTimestamp.IsZero() { 93 | // deleting the synced service if exists 94 | if err := s.hostClient.Delete(ctx, syncedService); err != nil { 95 | return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) 96 | } 97 | 98 | // remove the finalizer after cleaning up the synced service 99 | if controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) { 100 | controllerutil.RemoveFinalizer(&virtService, serviceFinalizerName) 101 | 102 | if err := s.virtualClient.Update(ctx, &virtService); err != nil { 103 | return reconcile.Result{}, err 104 | } 105 | } 106 | 107 | return reconcile.Result{}, nil 108 | } 109 | 110 | // Add finalizer if it does not exist 111 | if !controllerutil.ContainsFinalizer(&virtService, serviceFinalizerName) { 112 | controllerutil.AddFinalizer(&virtService, serviceFinalizerName) 113 | 114 | if err := s.virtualClient.Update(ctx, &virtService); err != nil { 115 | return reconcile.Result{}, err 116 | } 117 | } 118 | // create or update the service on host 119 | if err := s.hostClient.Get(ctx, types.NamespacedName{Name: syncedService.Name, Namespace: s.clusterNamespace}, &hostService); err != nil { 120 | if apierrors.IsNotFound(err) { 121 | log.Info("creating the service for the first time on the host cluster") 122 | return reconcile.Result{}, s.hostClient.Create(ctx, syncedService) 123 | } 124 | 125 | return reconcile.Result{}, err 126 | } 127 | 128 | log.Info("updating service on the host cluster") 129 | 130 | return reconcile.Result{}, s.hostClient.Update(ctx, syncedService) 131 | } 132 | 133 | func (s *ServiceReconciler) service(obj *v1.Service) *v1.Service { 134 | hostService := obj.DeepCopy() 135 | s.Translator.TranslateTo(hostService) 136 | // don't sync finalizers to the host 137 | return hostService 138 | } 139 | -------------------------------------------------------------------------------- /k3k-kubelet/controller/webhook/pod.go: -------------------------------------------------------------------------------- 1 | package webhook 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/rancher/k3k/pkg/controller/cluster/agent" 11 | "github.com/rancher/k3k/pkg/log" 12 | admissionregistrationv1 "k8s.io/api/admissionregistration/v1" 13 | v1 "k8s.io/api/core/v1" 14 | apierrors "k8s.io/apimachinery/pkg/api/errors" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/apimachinery/pkg/runtime" 17 | "k8s.io/apimachinery/pkg/types" 18 | "k8s.io/utils/ptr" 19 | ctrl "sigs.k8s.io/controller-runtime" 20 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 21 | "sigs.k8s.io/controller-runtime/pkg/manager" 22 | ) 23 | 24 | const ( 25 | webhookName = "podmutator.k3k.io" 26 | webhookTimeout = int32(10) 27 | webhookPort = "9443" 28 | webhookPath = "/mutate--v1-pod" 29 | FieldpathField = "k3k.io/fieldpath" 30 | ) 31 | 32 | type webhookHandler struct { 33 | client ctrlruntimeclient.Client 34 | scheme *runtime.Scheme 35 | serviceName string 36 | clusterName string 37 | clusterNamespace string 38 | logger *log.Logger 39 | } 40 | 41 | // AddPodMutatorWebhook will add a mutator webhook to the virtual cluster to 42 | // modify the nodeName of the created pods with the name of the virtual kubelet node name 43 | // as well as remove any status fields of the downward apis env fields 44 | func AddPodMutatorWebhook(ctx context.Context, mgr manager.Manager, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace, serviceName string, logger *log.Logger) error { 45 | handler := webhookHandler{ 46 | client: mgr.GetClient(), 47 | scheme: mgr.GetScheme(), 48 | logger: logger, 49 | serviceName: serviceName, 50 | clusterName: clusterName, 51 | clusterNamespace: clusterNamespace, 52 | } 53 | 54 | // create mutator webhook configuration to the cluster 55 | config, err := handler.configuration(ctx, hostClient) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | if err := handler.client.Create(ctx, config); err != nil { 61 | if !apierrors.IsAlreadyExists(err) { 62 | return err 63 | } 64 | } 65 | // register webhook with the manager 66 | return ctrl.NewWebhookManagedBy(mgr).For(&v1.Pod{}).WithDefaulter(&handler).Complete() 67 | } 68 | 69 | func (w *webhookHandler) Default(ctx context.Context, obj runtime.Object) error { 70 | pod, ok := obj.(*v1.Pod) 71 | if !ok { 72 | return fmt.Errorf("invalid request: object was type %t not cluster", obj) 73 | } 74 | 75 | w.logger.Infow("mutator webhook request", "Pod", pod.Name, "Namespace", pod.Namespace) 76 | // look for status.* fields in the env 77 | if pod.Annotations == nil { 78 | pod.Annotations = make(map[string]string) 79 | } 80 | 81 | for i, container := range pod.Spec.Containers { 82 | for j, env := range container.Env { 83 | if env.ValueFrom == nil || env.ValueFrom.FieldRef == nil { 84 | continue 85 | } 86 | 87 | fieldPath := env.ValueFrom.FieldRef.FieldPath 88 | if strings.Contains(fieldPath, "status.") { 89 | annotationKey := fmt.Sprintf("%s_%d_%s", FieldpathField, i, env.Name) 90 | pod.Annotations[annotationKey] = fieldPath 91 | pod.Spec.Containers[i].Env = removeEnv(pod.Spec.Containers[i].Env, j) 92 | } 93 | } 94 | } 95 | 96 | return nil 97 | } 98 | 99 | func (w *webhookHandler) configuration(ctx context.Context, hostClient ctrlruntimeclient.Client) (*admissionregistrationv1.MutatingWebhookConfiguration, error) { 100 | w.logger.Infow("extracting webhook tls from host cluster") 101 | 102 | var ( 103 | webhookTLSSecret v1.Secret 104 | ) 105 | 106 | if err := hostClient.Get(ctx, types.NamespacedName{Name: agent.WebhookSecretName(w.clusterName), Namespace: w.clusterNamespace}, &webhookTLSSecret); err != nil { 107 | return nil, err 108 | } 109 | 110 | caBundle, ok := webhookTLSSecret.Data["ca.crt"] 111 | if !ok { 112 | return nil, errors.New("webhook CABundle does not exist in secret") 113 | } 114 | 115 | webhookURL := "https://" + w.serviceName + ":" + webhookPort + webhookPath 116 | 117 | return &admissionregistrationv1.MutatingWebhookConfiguration{ 118 | TypeMeta: metav1.TypeMeta{ 119 | APIVersion: "admissionregistration.k8s.io/v1", 120 | Kind: "MutatingWebhookConfiguration", 121 | }, 122 | ObjectMeta: metav1.ObjectMeta{ 123 | Name: webhookName + "-configuration", 124 | }, 125 | Webhooks: []admissionregistrationv1.MutatingWebhook{ 126 | { 127 | Name: webhookName, 128 | AdmissionReviewVersions: []string{"v1"}, 129 | SideEffects: ptr.To(admissionregistrationv1.SideEffectClassNone), 130 | TimeoutSeconds: ptr.To(webhookTimeout), 131 | ClientConfig: admissionregistrationv1.WebhookClientConfig{ 132 | URL: ptr.To(webhookURL), 133 | CABundle: caBundle, 134 | }, 135 | Rules: []admissionregistrationv1.RuleWithOperations{ 136 | { 137 | Operations: []admissionregistrationv1.OperationType{ 138 | "CREATE", 139 | }, 140 | Rule: admissionregistrationv1.Rule{ 141 | APIGroups: []string{""}, 142 | APIVersions: []string{"v1"}, 143 | Resources: []string{"pods"}, 144 | Scope: ptr.To(admissionregistrationv1.NamespacedScope), 145 | }, 146 | }, 147 | }, 148 | }, 149 | }, 150 | }, nil 151 | } 152 | 153 | func removeEnv(envs []v1.EnvVar, i int) []v1.EnvVar { 154 | envs[i] = envs[len(envs)-1] 155 | return envs[:len(envs)-1] 156 | } 157 | 158 | func ParseFieldPathAnnotationKey(annotationKey string) (int, string, error) { 159 | s := strings.SplitN(annotationKey, "_", 3) 160 | if len(s) != 3 { 161 | return -1, "", errors.New("fieldpath annotation is not set correctly") 162 | } 163 | 164 | containerIndex, err := strconv.Atoi(s[1]) 165 | if err != nil { 166 | return -1, "", err 167 | } 168 | 169 | envName := s[2] 170 | 171 | return containerIndex, envName, nil 172 | } 173 | -------------------------------------------------------------------------------- /k3k-kubelet/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "os" 6 | 7 | "github.com/go-logr/zapr" 8 | "github.com/rancher/k3k/pkg/log" 9 | "github.com/sirupsen/logrus" 10 | "github.com/urfave/cli/v2" 11 | "go.uber.org/zap" 12 | ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log" 13 | ) 14 | 15 | var ( 16 | configFile string 17 | cfg config 18 | logger *log.Logger 19 | debug bool 20 | ) 21 | 22 | func main() { 23 | app := cli.NewApp() 24 | app.Name = "k3k-kubelet" 25 | app.Usage = "virtual kubelet implementation k3k" 26 | app.Flags = []cli.Flag{ 27 | &cli.StringFlag{ 28 | Name: "cluster-name", 29 | Usage: "Name of the k3k cluster", 30 | Destination: &cfg.ClusterName, 31 | EnvVars: []string{"CLUSTER_NAME"}, 32 | }, 33 | &cli.StringFlag{ 34 | Name: "cluster-namespace", 35 | Usage: "Namespace of the k3k cluster", 36 | Destination: &cfg.ClusterNamespace, 37 | EnvVars: []string{"CLUSTER_NAMESPACE"}, 38 | }, 39 | &cli.StringFlag{ 40 | Name: "cluster-token", 41 | Usage: "K3S token of the k3k cluster", 42 | Destination: &cfg.Token, 43 | EnvVars: []string{"CLUSTER_TOKEN"}, 44 | }, 45 | &cli.StringFlag{ 46 | Name: "host-config-path", 47 | Usage: "Path to the host kubeconfig, if empty then virtual-kubelet will use incluster config", 48 | Destination: &cfg.HostConfigPath, 49 | EnvVars: []string{"HOST_KUBECONFIG"}, 50 | }, 51 | &cli.StringFlag{ 52 | Name: "virtual-config-path", 53 | Usage: "Path to the k3k cluster kubeconfig, if empty then virtual-kubelet will create its own config from k3k cluster", 54 | Destination: &cfg.VirtualConfigPath, 55 | EnvVars: []string{"CLUSTER_NAME"}, 56 | }, 57 | &cli.StringFlag{ 58 | Name: "kubelet-port", 59 | Usage: "kubelet API port number", 60 | Destination: &cfg.KubeletPort, 61 | EnvVars: []string{"SERVER_PORT"}, 62 | Value: "10250", 63 | }, 64 | &cli.StringFlag{ 65 | Name: "service-name", 66 | Usage: "The service name deployed by the k3k controller", 67 | Destination: &cfg.ServiceName, 68 | EnvVars: []string{"SERVICE_NAME"}, 69 | }, 70 | &cli.StringFlag{ 71 | Name: "agent-hostname", 72 | Usage: "Agent Hostname used for TLS SAN for the kubelet server", 73 | Destination: &cfg.AgentHostname, 74 | EnvVars: []string{"AGENT_HOSTNAME"}, 75 | }, 76 | &cli.StringFlag{ 77 | Name: "server-ip", 78 | Usage: "Server IP used for registering the virtual kubelet to the cluster", 79 | Destination: &cfg.ServerIP, 80 | EnvVars: []string{"SERVER_IP"}, 81 | }, 82 | &cli.StringFlag{ 83 | Name: "version", 84 | Usage: "Version of kubernetes server", 85 | Destination: &cfg.Version, 86 | EnvVars: []string{"VERSION"}, 87 | }, 88 | &cli.StringFlag{ 89 | Name: "config", 90 | Usage: "Path to k3k-kubelet config file", 91 | Destination: &configFile, 92 | EnvVars: []string{"CONFIG_FILE"}, 93 | Value: "/etc/rancher/k3k/config.yaml", 94 | }, 95 | &cli.BoolFlag{ 96 | Name: "debug", 97 | Usage: "Enable debug logging", 98 | Destination: &debug, 99 | EnvVars: []string{"DEBUG"}, 100 | }, 101 | } 102 | app.Before = func(clx *cli.Context) error { 103 | logger = log.New(debug) 104 | ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1)))) 105 | 106 | return nil 107 | } 108 | app.Action = run 109 | 110 | if err := app.Run(os.Args); err != nil { 111 | logrus.Fatal(err) 112 | } 113 | } 114 | 115 | func run(clx *cli.Context) error { 116 | ctx := context.Background() 117 | 118 | if err := cfg.parse(configFile); err != nil { 119 | logger.Fatalw("failed to parse config file", "path", configFile, zap.Error(err)) 120 | } 121 | 122 | if err := cfg.validate(); err != nil { 123 | logger.Fatalw("failed to validate config", zap.Error(err)) 124 | } 125 | 126 | k, err := newKubelet(ctx, &cfg, logger) 127 | if err != nil { 128 | logger.Fatalw("failed to create new virtual kubelet instance", zap.Error(err)) 129 | } 130 | 131 | if err := k.registerNode(ctx, k.agentIP, cfg.KubeletPort, cfg.ClusterNamespace, cfg.ClusterName, cfg.AgentHostname, cfg.ServerIP, k.dnsIP, cfg.Version); err != nil { 132 | logger.Fatalw("failed to register new node", zap.Error(err)) 133 | } 134 | 135 | k.start(ctx) 136 | 137 | return nil 138 | } 139 | -------------------------------------------------------------------------------- /k3k-kubelet/provider/configure.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 8 | k3klog "github.com/rancher/k3k/pkg/log" 9 | corev1 "k8s.io/api/core/v1" 10 | v1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/labels" 13 | "k8s.io/apimachinery/pkg/types" 14 | typedv1 "k8s.io/client-go/kubernetes/typed/core/v1" 15 | "sigs.k8s.io/controller-runtime/pkg/client" 16 | ) 17 | 18 | func ConfigureNode(logger *k3klog.Logger, node *v1.Node, hostname string, servicePort int, ip string, coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualCluster v1alpha1.Cluster, version string) { 19 | node.Status.Conditions = nodeConditions() 20 | node.Status.DaemonEndpoints.KubeletEndpoint.Port = int32(servicePort) 21 | node.Status.Addresses = []v1.NodeAddress{ 22 | { 23 | Type: v1.NodeHostName, 24 | Address: hostname, 25 | }, 26 | { 27 | Type: v1.NodeInternalIP, 28 | Address: ip, 29 | }, 30 | } 31 | 32 | node.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true" 33 | node.Labels["kubernetes.io/os"] = "linux" 34 | 35 | // configure versions 36 | node.Status.NodeInfo.KubeletVersion = version 37 | node.Status.NodeInfo.KubeProxyVersion = version 38 | 39 | updateNodeCapacityInterval := 10 * time.Second 40 | ticker := time.NewTicker(updateNodeCapacityInterval) 41 | 42 | go func() { 43 | for range ticker.C { 44 | if err := updateNodeCapacity(coreClient, virtualClient, node.Name, virtualCluster.Spec.NodeSelector); err != nil { 45 | logger.Error("error updating node capacity", err) 46 | } 47 | } 48 | }() 49 | } 50 | 51 | // nodeConditions returns the basic conditions which mark the node as ready 52 | func nodeConditions() []v1.NodeCondition { 53 | return []v1.NodeCondition{ 54 | { 55 | Type: "Ready", 56 | Status: v1.ConditionTrue, 57 | LastHeartbeatTime: metav1.Now(), 58 | LastTransitionTime: metav1.Now(), 59 | Reason: "KubeletReady", 60 | Message: "kubelet is ready.", 61 | }, 62 | { 63 | Type: "OutOfDisk", 64 | Status: v1.ConditionFalse, 65 | LastHeartbeatTime: metav1.Now(), 66 | LastTransitionTime: metav1.Now(), 67 | Reason: "KubeletHasSufficientDisk", 68 | Message: "kubelet has sufficient disk space available", 69 | }, 70 | { 71 | Type: "MemoryPressure", 72 | Status: v1.ConditionFalse, 73 | LastHeartbeatTime: metav1.Now(), 74 | LastTransitionTime: metav1.Now(), 75 | Reason: "KubeletHasSufficientMemory", 76 | Message: "kubelet has sufficient memory available", 77 | }, 78 | { 79 | Type: "DiskPressure", 80 | Status: v1.ConditionFalse, 81 | LastHeartbeatTime: metav1.Now(), 82 | LastTransitionTime: metav1.Now(), 83 | Reason: "KubeletHasNoDiskPressure", 84 | Message: "kubelet has no disk pressure", 85 | }, 86 | { 87 | Type: "NetworkUnavailable", 88 | Status: v1.ConditionFalse, 89 | LastHeartbeatTime: metav1.Now(), 90 | LastTransitionTime: metav1.Now(), 91 | Reason: "RouteCreated", 92 | Message: "RouteController created a route", 93 | }, 94 | } 95 | } 96 | 97 | // updateNodeCapacity will update the virtual node capacity (and the allocatable field) with the sum of all the resource in the host nodes. 98 | // If the nodeLabels are specified only the matching nodes will be considered. 99 | func updateNodeCapacity(coreClient typedv1.CoreV1Interface, virtualClient client.Client, virtualNodeName string, nodeLabels map[string]string) error { 100 | ctx := context.Background() 101 | 102 | capacity, allocatable, err := getResourcesFromNodes(ctx, coreClient, nodeLabels) 103 | if err != nil { 104 | return err 105 | } 106 | 107 | var virtualNode corev1.Node 108 | if err := virtualClient.Get(ctx, types.NamespacedName{Name: virtualNodeName}, &virtualNode); err != nil { 109 | return err 110 | } 111 | 112 | virtualNode.Status.Capacity = capacity 113 | virtualNode.Status.Allocatable = allocatable 114 | 115 | return virtualClient.Status().Update(ctx, &virtualNode) 116 | } 117 | 118 | // getResourcesFromNodes will return a sum of all the resource capacity of the host nodes, and the allocatable resources. 119 | // If some node labels are specified only the matching nodes will be considered. 120 | func getResourcesFromNodes(ctx context.Context, coreClient typedv1.CoreV1Interface, nodeLabels map[string]string) (v1.ResourceList, v1.ResourceList, error) { 121 | listOpts := metav1.ListOptions{} 122 | 123 | if nodeLabels != nil { 124 | labelSelector := metav1.LabelSelector{MatchLabels: nodeLabels} 125 | listOpts.LabelSelector = labels.Set(labelSelector.MatchLabels).String() 126 | } 127 | 128 | nodeList, err := coreClient.Nodes().List(ctx, listOpts) 129 | if err != nil { 130 | return nil, nil, err 131 | } 132 | 133 | // sum all 134 | virtualCapacityResources := corev1.ResourceList{} 135 | virtualAvailableResources := corev1.ResourceList{} 136 | 137 | for _, node := range nodeList.Items { 138 | // check if the node is Ready 139 | for _, condition := range node.Status.Conditions { 140 | if condition.Type != corev1.NodeReady { 141 | continue 142 | } 143 | 144 | // if the node is not Ready then we can skip it 145 | if condition.Status != corev1.ConditionTrue { 146 | break 147 | } 148 | } 149 | 150 | // add all the available metrics to the virtual node 151 | for resourceName, resourceQuantity := range node.Status.Capacity { 152 | virtualResource := virtualCapacityResources[resourceName] 153 | 154 | (&virtualResource).Add(resourceQuantity) 155 | virtualCapacityResources[resourceName] = virtualResource 156 | } 157 | 158 | for resourceName, resourceQuantity := range node.Status.Allocatable { 159 | virtualResource := virtualAvailableResources[resourceName] 160 | 161 | (&virtualResource).Add(resourceQuantity) 162 | virtualAvailableResources[resourceName] = virtualResource 163 | } 164 | } 165 | 166 | return virtualCapacityResources, virtualAvailableResources, nil 167 | } 168 | -------------------------------------------------------------------------------- /k3k-kubelet/provider/node.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | // Node implements the node.Provider interface from Virtual Kubelet 10 | type Node struct { 11 | notifyCallback func(*corev1.Node) 12 | } 13 | 14 | // Ping is called to check if the node is healthy - in the current format it always is 15 | func (n *Node) Ping(context.Context) error { 16 | return nil 17 | } 18 | 19 | // NotifyNodeStatus sets the callback function for a node being changed. As of now, no changes are made 20 | func (n *Node) NotifyNodeStatus(ctx context.Context, cb func(*corev1.Node)) { 21 | n.notifyCallback = cb 22 | } 23 | -------------------------------------------------------------------------------- /k3k-kubelet/provider/provider_test.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | v1 "k8s.io/api/core/v1" 9 | ) 10 | 11 | func Test_overrideEnvVars(t *testing.T) { 12 | type args struct { 13 | orig []corev1.EnvVar 14 | new []corev1.EnvVar 15 | } 16 | 17 | tests := []struct { 18 | name string 19 | args args 20 | want []corev1.EnvVar 21 | }{ 22 | { 23 | name: "orig and new are empty", 24 | args: args{ 25 | orig: []v1.EnvVar{}, 26 | new: []v1.EnvVar{}, 27 | }, 28 | want: []v1.EnvVar{}, 29 | }, 30 | { 31 | name: "only orig is empty", 32 | args: args{ 33 | orig: []v1.EnvVar{}, 34 | new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}}, 35 | }, 36 | want: []v1.EnvVar{}, 37 | }, 38 | { 39 | name: "orig has a matching element", 40 | args: args{ 41 | orig: []v1.EnvVar{{Name: "FOO", Value: "old_val"}}, 42 | new: []v1.EnvVar{{Name: "FOO", Value: "new_val"}}, 43 | }, 44 | want: []v1.EnvVar{{Name: "FOO", Value: "new_val"}}, 45 | }, 46 | { 47 | name: "orig have multiple elements", 48 | args: args{ 49 | orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}}, 50 | new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}}, 51 | }, 52 | want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}}, 53 | }, 54 | { 55 | name: "orig and new have multiple elements and some not matching", 56 | args: args{ 57 | orig: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "old_val_1"}}, 58 | new: []v1.EnvVar{{Name: "FOO_1", Value: "new_val_1"}, {Name: "FOO_2", Value: "val_1"}}, 59 | }, 60 | want: []v1.EnvVar{{Name: "FOO_0", Value: "old_val_0"}, {Name: "FOO_1", Value: "new_val_1"}}, 61 | }, 62 | } 63 | 64 | for _, tt := range tests { 65 | t.Run(tt.name, func(t *testing.T) { 66 | if got := overrideEnvVars(tt.args.orig, tt.args.new); !reflect.DeepEqual(got, tt.want) { 67 | t.Errorf("overrideEnvVars() = %v, want %v", got, tt.want) 68 | } 69 | }) 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /k3k-kubelet/provider/token.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | k3kcontroller "github.com/rancher/k3k/pkg/controller" 9 | corev1 "k8s.io/api/core/v1" 10 | apierrors "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/types" 13 | "k8s.io/utils/ptr" 14 | ) 15 | 16 | const ( 17 | kubeAPIAccessPrefix = "kube-api-access" 18 | serviceAccountTokenMountPath = "/var/run/secrets/kubernetes.io/serviceaccount" 19 | ) 20 | 21 | // transformTokens copies the serviceaccount tokens used by pod's serviceaccount to a secret on the host cluster and mount it 22 | // to look like the serviceaccount token 23 | func (p *Provider) transformTokens(ctx context.Context, pod, tPod *corev1.Pod) error { 24 | p.logger.Infow("transforming token", "Pod", pod.Name, "Namespace", pod.Namespace, "serviceAccountName", pod.Spec.ServiceAccountName) 25 | 26 | // skip this process if the kube-api-access is already removed from the pod 27 | // this is needed in case users already adds their own custom tokens like in rancher imported clusters 28 | if !isKubeAccessVolumeFound(pod) { 29 | return nil 30 | } 31 | 32 | virtualSecretName := k3kcontroller.SafeConcatNameWithPrefix(pod.Spec.ServiceAccountName, "token") 33 | 34 | virtualSecret := virtualSecret(virtualSecretName, pod.Namespace, pod.Spec.ServiceAccountName) 35 | if err := p.VirtualClient.Create(ctx, virtualSecret); err != nil { 36 | if !apierrors.IsAlreadyExists(err) { 37 | return err 38 | } 39 | } 40 | 41 | // extracting the tokens data from the secret we just created 42 | virtualSecretKey := types.NamespacedName{ 43 | Name: virtualSecret.Name, 44 | Namespace: virtualSecret.Namespace, 45 | } 46 | if err := p.VirtualClient.Get(ctx, virtualSecretKey, virtualSecret); err != nil { 47 | return err 48 | } 49 | // To avoid race conditions we need to check if the secret's data has been populated 50 | // including the token, ca.crt and namespace 51 | if len(virtualSecret.Data) < 3 { 52 | return fmt.Errorf("token secret %s/%s data is empty", virtualSecret.Namespace, virtualSecret.Name) 53 | } 54 | 55 | hostSecret := virtualSecret.DeepCopy() 56 | hostSecret.Type = "" 57 | hostSecret.Annotations = make(map[string]string) 58 | 59 | p.Translator.TranslateTo(hostSecret) 60 | 61 | if err := p.HostClient.Create(ctx, hostSecret); err != nil { 62 | if !apierrors.IsAlreadyExists(err) { 63 | return err 64 | } 65 | } 66 | 67 | p.translateToken(tPod, hostSecret.Name) 68 | 69 | return nil 70 | } 71 | 72 | func virtualSecret(name, namespace, serviceAccountName string) *corev1.Secret { 73 | return &corev1.Secret{ 74 | TypeMeta: metav1.TypeMeta{ 75 | Kind: "Secret", 76 | APIVersion: "v1", 77 | }, 78 | ObjectMeta: metav1.ObjectMeta{ 79 | Name: name, 80 | Namespace: namespace, 81 | Annotations: map[string]string{ 82 | corev1.ServiceAccountNameKey: serviceAccountName, 83 | }, 84 | }, 85 | Type: corev1.SecretTypeServiceAccountToken, 86 | } 87 | } 88 | 89 | // translateToken will remove the serviceaccount from the pod and replace the kube-api-access volume 90 | // with a custom token volume and mount it to all containers within the pod 91 | func (p *Provider) translateToken(pod *corev1.Pod, hostSecretName string) { 92 | pod.Spec.ServiceAccountName = "" 93 | pod.Spec.DeprecatedServiceAccount = "" 94 | pod.Spec.AutomountServiceAccountToken = ptr.To(false) 95 | removeKubeAccessVolume(pod) 96 | addKubeAccessVolume(pod, hostSecretName) 97 | } 98 | 99 | func isKubeAccessVolumeFound(pod *corev1.Pod) bool { 100 | for _, volume := range pod.Spec.Volumes { 101 | if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) { 102 | return true 103 | } 104 | } 105 | 106 | return false 107 | } 108 | 109 | func removeKubeAccessVolume(pod *corev1.Pod) { 110 | for i, volume := range pod.Spec.Volumes { 111 | if strings.HasPrefix(volume.Name, kubeAPIAccessPrefix) { 112 | pod.Spec.Volumes = append(pod.Spec.Volumes[:i], pod.Spec.Volumes[i+1:]...) 113 | } 114 | } 115 | // init containers 116 | for i, container := range pod.Spec.InitContainers { 117 | for j, mountPath := range container.VolumeMounts { 118 | if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) { 119 | pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts[:j], pod.Spec.InitContainers[i].VolumeMounts[j+1:]...) 120 | } 121 | } 122 | } 123 | 124 | for i, container := range pod.Spec.Containers { 125 | for j, mountPath := range container.VolumeMounts { 126 | if strings.HasPrefix(mountPath.Name, kubeAPIAccessPrefix) { 127 | pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts[:j], pod.Spec.Containers[i].VolumeMounts[j+1:]...) 128 | } 129 | } 130 | } 131 | } 132 | 133 | func addKubeAccessVolume(pod *corev1.Pod, hostSecretName string) { 134 | var tokenVolumeName = k3kcontroller.SafeConcatNameWithPrefix(kubeAPIAccessPrefix) 135 | pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ 136 | Name: tokenVolumeName, 137 | VolumeSource: corev1.VolumeSource{ 138 | Secret: &corev1.SecretVolumeSource{ 139 | SecretName: hostSecretName, 140 | }, 141 | }, 142 | }) 143 | 144 | for i := range pod.Spec.InitContainers { 145 | pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts, corev1.VolumeMount{ 146 | Name: tokenVolumeName, 147 | MountPath: serviceAccountTokenMountPath, 148 | }) 149 | } 150 | 151 | for i := range pod.Spec.Containers { 152 | pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, corev1.VolumeMount{ 153 | Name: tokenVolumeName, 154 | MountPath: serviceAccountTokenMountPath, 155 | }) 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /k3k-kubelet/provider/util.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "github.com/virtual-kubelet/virtual-kubelet/node/api" 5 | "k8s.io/client-go/tools/remotecommand" 6 | ) 7 | 8 | // translatorSizeQueue feeds the size events from the WebSocket 9 | // resizeChan into the SPDY client input. Implements TerminalSizeQueue 10 | // interface. 11 | type translatorSizeQueue struct { 12 | resizeChan <-chan api.TermSize 13 | } 14 | 15 | func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize { 16 | size, ok := <-t.resizeChan 17 | if !ok { 18 | return nil 19 | } 20 | 21 | return &remotecommand.TerminalSize{ 22 | Width: size.Width, 23 | Height: size.Height, 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /k3k-kubelet/translate/host.go: -------------------------------------------------------------------------------- 1 | package translate 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | 7 | "github.com/rancher/k3k/pkg/controller" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | const ( 12 | // ClusterNameLabel is the key for the label that contains the name of the virtual cluster 13 | // this resource was made in 14 | ClusterNameLabel = "k3k.io/clusterName" 15 | // ResourceNameAnnotation is the key for the annotation that contains the original name of this 16 | // resource in the virtual cluster 17 | ResourceNameAnnotation = "k3k.io/name" 18 | // ResourceNamespaceAnnotation is the key for the annotation that contains the original namespace of this 19 | // resource in the virtual cluster 20 | ResourceNamespaceAnnotation = "k3k.io/namespace" 21 | // MetadataNameField is the downwardapi field for object's name 22 | MetadataNameField = "metadata.name" 23 | // MetadataNamespaceField is the downward field for the object's namespace 24 | MetadataNamespaceField = "metadata.namespace" 25 | ) 26 | 27 | type ToHostTranslator struct { 28 | // ClusterName is the name of the virtual cluster whose resources we are 29 | // translating to a host cluster 30 | ClusterName string 31 | // ClusterNamespace is the namespace of the virtual cluster whose resources 32 | // we are translating to a host cluster 33 | ClusterNamespace string 34 | } 35 | 36 | // Translate translates a virtual cluster object to a host cluster object. This should only be used for 37 | // static resources such as configmaps/secrets, and not for things like pods (which can reference other 38 | // objects). Note that this won't set host-cluster values (like resource version) so when updating you 39 | // may need to fetch the existing value and do some combination before using this. 40 | func (t *ToHostTranslator) TranslateTo(obj client.Object) { 41 | // owning objects may be in the virtual cluster, but may not be in the host cluster 42 | obj.SetOwnerReferences(nil) 43 | // add some annotations to make it easier to track source object 44 | annotations := obj.GetAnnotations() 45 | if annotations == nil { 46 | annotations = map[string]string{} 47 | } 48 | 49 | annotations[ResourceNameAnnotation] = obj.GetName() 50 | annotations[ResourceNamespaceAnnotation] = obj.GetNamespace() 51 | obj.SetAnnotations(annotations) 52 | 53 | // add a label to quickly identify objects owned by a given virtual cluster 54 | labels := obj.GetLabels() 55 | if labels == nil { 56 | labels = map[string]string{} 57 | } 58 | 59 | labels[ClusterNameLabel] = t.ClusterName 60 | obj.SetLabels(labels) 61 | 62 | // resource version/UID won't match what's in the host cluster. 63 | obj.SetResourceVersion("") 64 | obj.SetUID("") 65 | 66 | // set the name and the namespace so that this goes in the proper host namespace 67 | // and doesn't collide with other resources 68 | obj.SetName(t.TranslateName(obj.GetNamespace(), obj.GetName())) 69 | obj.SetNamespace(t.ClusterNamespace) 70 | obj.SetFinalizers(nil) 71 | } 72 | 73 | func (t *ToHostTranslator) TranslateFrom(obj client.Object) { 74 | // owning objects may be in the virtual cluster, but may not be in the host cluster 75 | obj.SetOwnerReferences(nil) 76 | 77 | // remove the annotations added to track original name 78 | annotations := obj.GetAnnotations() 79 | // TODO: It's possible that this was erased by a change on the host cluster 80 | // In this case, we need to have some sort of fallback or error return 81 | name := annotations[ResourceNameAnnotation] 82 | namespace := annotations[ResourceNamespaceAnnotation] 83 | 84 | obj.SetName(name) 85 | obj.SetNamespace(namespace) 86 | delete(annotations, ResourceNameAnnotation) 87 | delete(annotations, ResourceNamespaceAnnotation) 88 | obj.SetAnnotations(annotations) 89 | 90 | // remove the clusteName tracking label 91 | labels := obj.GetLabels() 92 | delete(labels, ClusterNameLabel) 93 | obj.SetLabels(labels) 94 | 95 | // resource version/UID won't match what's in the virtual cluster. 96 | obj.SetResourceVersion("") 97 | obj.SetUID("") 98 | } 99 | 100 | // TranslateName returns the name of the resource in the host cluster. Will not update the object with this name. 101 | func (t *ToHostTranslator) TranslateName(namespace string, name string) string { 102 | // we need to come up with a name which is: 103 | // - somewhat connectable to the original resource 104 | // - a valid k8s name 105 | // - idempotently calculatable 106 | // - unique for this combination of name/namespace/cluster 107 | namePrefix := fmt.Sprintf("%s-%s-%s", name, namespace, t.ClusterName) 108 | // use + as a separator since it can't be in an object name 109 | nameKey := fmt.Sprintf("%s+%s+%s", name, namespace, t.ClusterName) 110 | // it's possible that the suffix will be in the name, so we use hex to make it valid for k8s 111 | nameSuffix := hex.EncodeToString([]byte(nameKey)) 112 | 113 | return controller.SafeConcatName(namePrefix, nameSuffix) 114 | } 115 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | //go:generate ./scripts/generate 2 | package main 3 | 4 | import ( 5 | "context" 6 | "errors" 7 | "fmt" 8 | "os" 9 | 10 | "github.com/go-logr/zapr" 11 | "github.com/rancher/k3k/cli/cmds" 12 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 13 | "github.com/rancher/k3k/pkg/buildinfo" 14 | "github.com/rancher/k3k/pkg/controller/cluster" 15 | "github.com/rancher/k3k/pkg/controller/policy" 16 | "github.com/rancher/k3k/pkg/log" 17 | "github.com/urfave/cli/v2" 18 | "go.uber.org/zap" 19 | v1 "k8s.io/api/core/v1" 20 | "k8s.io/apimachinery/pkg/runtime" 21 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 22 | "k8s.io/client-go/tools/clientcmd" 23 | ctrl "sigs.k8s.io/controller-runtime" 24 | ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log" 25 | "sigs.k8s.io/controller-runtime/pkg/manager" 26 | ) 27 | 28 | var ( 29 | scheme = runtime.NewScheme() 30 | clusterCIDR string 31 | sharedAgentImage string 32 | sharedAgentImagePullPolicy string 33 | kubeconfig string 34 | k3SImage string 35 | k3SImagePullPolicy string 36 | debug bool 37 | logger *log.Logger 38 | flags = []cli.Flag{ 39 | &cli.StringFlag{ 40 | Name: "kubeconfig", 41 | EnvVars: []string{"KUBECONFIG"}, 42 | Usage: "Kubeconfig path", 43 | Destination: &kubeconfig, 44 | }, 45 | &cli.StringFlag{ 46 | Name: "cluster-cidr", 47 | EnvVars: []string{"CLUSTER_CIDR"}, 48 | Usage: "Cluster CIDR to be added to the networkpolicy", 49 | Destination: &clusterCIDR, 50 | }, 51 | &cli.StringFlag{ 52 | Name: "shared-agent-image", 53 | EnvVars: []string{"SHARED_AGENT_IMAGE"}, 54 | Usage: "K3K Virtual Kubelet image", 55 | Value: "rancher/k3k:latest", 56 | Destination: &sharedAgentImage, 57 | }, 58 | &cli.StringFlag{ 59 | Name: "shared-agent-pull-policy", 60 | EnvVars: []string{"SHARED_AGENT_PULL_POLICY"}, 61 | Usage: "K3K Virtual Kubelet image pull policy must be one of Always, IfNotPresent or Never", 62 | Destination: &sharedAgentImagePullPolicy, 63 | }, 64 | &cli.BoolFlag{ 65 | Name: "debug", 66 | EnvVars: []string{"DEBUG"}, 67 | Usage: "Debug level logging", 68 | Destination: &debug, 69 | }, 70 | &cli.StringFlag{ 71 | Name: "k3s-image", 72 | EnvVars: []string{"K3S_IMAGE"}, 73 | Usage: "K3K server image", 74 | Value: "rancher/k3k", 75 | Destination: &k3SImage, 76 | }, 77 | &cli.StringFlag{ 78 | Name: "k3s-image-pull-policy", 79 | EnvVars: []string{"K3S_IMAGE_PULL_POLICY"}, 80 | Usage: "K3K server image pull policy", 81 | Destination: &k3SImagePullPolicy, 82 | }, 83 | } 84 | ) 85 | 86 | func init() { 87 | _ = clientgoscheme.AddToScheme(scheme) 88 | _ = v1alpha1.AddToScheme(scheme) 89 | } 90 | 91 | func main() { 92 | app := cmds.NewApp() 93 | app.Flags = flags 94 | app.Action = run 95 | app.Version = buildinfo.Version 96 | app.Before = func(clx *cli.Context) error { 97 | if err := validate(); err != nil { 98 | return err 99 | } 100 | 101 | logger = log.New(debug) 102 | 103 | return nil 104 | } 105 | 106 | if err := app.Run(os.Args); err != nil { 107 | logger.Fatalw("failed to run k3k controller", zap.Error(err)) 108 | } 109 | } 110 | 111 | func run(clx *cli.Context) error { 112 | ctx := context.Background() 113 | 114 | logger.Info("Starting k3k - Version: " + buildinfo.Version) 115 | 116 | restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 117 | if err != nil { 118 | return fmt.Errorf("failed to create config from kubeconfig file: %v", err) 119 | } 120 | 121 | mgr, err := ctrl.NewManager(restConfig, manager.Options{ 122 | Scheme: scheme, 123 | }) 124 | 125 | if err != nil { 126 | return fmt.Errorf("failed to create new controller runtime manager: %v", err) 127 | } 128 | 129 | ctrlruntimelog.SetLogger(zapr.NewLogger(logger.Desugar().WithOptions(zap.AddCallerSkip(1)))) 130 | 131 | logger.Info("adding cluster controller") 132 | 133 | if err := cluster.Add(ctx, mgr, sharedAgentImage, sharedAgentImagePullPolicy, k3SImage, k3SImagePullPolicy); err != nil { 134 | return fmt.Errorf("failed to add the new cluster controller: %v", err) 135 | } 136 | 137 | logger.Info("adding etcd pod controller") 138 | 139 | if err := cluster.AddPodController(ctx, mgr); err != nil { 140 | return fmt.Errorf("failed to add the new cluster controller: %v", err) 141 | } 142 | 143 | logger.Info("adding clusterpolicy controller") 144 | 145 | if err := policy.Add(mgr, clusterCIDR); err != nil { 146 | return fmt.Errorf("failed to add the clusterpolicy controller: %v", err) 147 | } 148 | 149 | if err := mgr.Start(ctx); err != nil { 150 | return fmt.Errorf("failed to start the manager: %v", err) 151 | } 152 | 153 | return nil 154 | } 155 | 156 | func validate() error { 157 | if sharedAgentImagePullPolicy != "" { 158 | if sharedAgentImagePullPolicy != string(v1.PullAlways) && 159 | sharedAgentImagePullPolicy != string(v1.PullIfNotPresent) && 160 | sharedAgentImagePullPolicy != string(v1.PullNever) { 161 | return errors.New("invalid value for shared agent image policy") 162 | } 163 | } 164 | 165 | return nil 166 | } 167 | -------------------------------------------------------------------------------- /package/Dockerfile.k3k: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | ARG BIN_K3K=bin/k3k 4 | ARG BIN_K3KCLI=bin/k3kcli 5 | 6 | COPY ${BIN_K3K} /usr/bin/ 7 | COPY ${BIN_K3KCLI} /usr/bin/ 8 | 9 | CMD ["k3k"] 10 | -------------------------------------------------------------------------------- /package/Dockerfile.k3k-kubelet: -------------------------------------------------------------------------------- 1 | # TODO: swicth this to BCI-micro or scratch. Left as base right now so that debug can be done a bit easier 2 | FROM registry.suse.com/bci/bci-base:15.6 3 | 4 | ARG BIN_K3K_KUBELET=bin/k3k-kubelet 5 | 6 | COPY ${BIN_K3K_KUBELET} /usr/bin/ 7 | 8 | ENTRYPOINT ["/usr/bin/k3k-kubelet"] 9 | -------------------------------------------------------------------------------- /pkg/apis/k3k.io/register.go: -------------------------------------------------------------------------------- 1 | package k3k 2 | 3 | var ( 4 | GroupName = "k3k.io" 5 | ) 6 | -------------------------------------------------------------------------------- /pkg/apis/k3k.io/v1alpha1/doc.go: -------------------------------------------------------------------------------- 1 | // +k8s:deepcopy-gen=package 2 | // +groupName=k3k.io 3 | package v1alpha1 4 | -------------------------------------------------------------------------------- /pkg/apis/k3k.io/v1alpha1/register.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | k3k "github.com/rancher/k3k/pkg/apis/k3k.io" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "k8s.io/apimachinery/pkg/runtime" 7 | "k8s.io/apimachinery/pkg/runtime/schema" 8 | ) 9 | 10 | var ( 11 | SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1alpha1"} 12 | SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes) 13 | AddToScheme = SchemBuilder.AddToScheme 14 | ) 15 | 16 | func Resource(resource string) schema.GroupResource { 17 | return SchemeGroupVersion.WithResource(resource).GroupResource() 18 | } 19 | 20 | func addKnownTypes(s *runtime.Scheme) error { 21 | s.AddKnownTypes(SchemeGroupVersion, 22 | &Cluster{}, 23 | &ClusterList{}, 24 | &VirtualClusterPolicy{}, 25 | &VirtualClusterPolicyList{}, 26 | ) 27 | metav1.AddToGroupVersion(s, SchemeGroupVersion) 28 | 29 | return nil 30 | } 31 | -------------------------------------------------------------------------------- /pkg/buildinfo/buildinfo.go: -------------------------------------------------------------------------------- 1 | package buildinfo 2 | 3 | var Version = "dev" 4 | -------------------------------------------------------------------------------- /pkg/controller/certs/certs.go: -------------------------------------------------------------------------------- 1 | package certs 2 | 3 | import ( 4 | "crypto" 5 | "crypto/x509" 6 | "fmt" 7 | "net" 8 | "time" 9 | 10 | certutil "github.com/rancher/dynamiclistener/cert" 11 | ) 12 | 13 | func CreateClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, expiresAt time.Duration, caCert, caKey string) ([]byte, []byte, error) { 14 | caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey)) 15 | if err != nil { 16 | return nil, nil, err 17 | } 18 | 19 | caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert)) 20 | if err != nil { 21 | return nil, nil, err 22 | } 23 | 24 | b, err := generateKey() 25 | if err != nil { 26 | return nil, nil, err 27 | } 28 | 29 | key, err := certutil.ParsePrivateKeyPEM(b) 30 | if err != nil { 31 | return nil, nil, err 32 | } 33 | 34 | cfg := certutil.Config{ 35 | CommonName: commonName, 36 | Organization: organization, 37 | Usages: extKeyUsage, 38 | ExpiresAt: expiresAt, 39 | } 40 | if altNames != nil { 41 | cfg.AltNames = *altNames 42 | } 43 | 44 | cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer)) 45 | if err != nil { 46 | return nil, nil, err 47 | } 48 | 49 | return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil 50 | } 51 | 52 | func generateKey() (data []byte, err error) { 53 | generatedData, err := certutil.MakeEllipticPrivateKeyPEM() 54 | if err != nil { 55 | return nil, fmt.Errorf("error generating key: %v", err) 56 | } 57 | 58 | return generatedData, nil 59 | } 60 | 61 | func AddSANs(sans []string) certutil.AltNames { 62 | var altNames certutil.AltNames 63 | 64 | for _, san := range sans { 65 | ip := net.ParseIP(san) 66 | if ip == nil { 67 | altNames.DNSNames = append(altNames.DNSNames, san) 68 | } else { 69 | altNames.IPs = append(altNames.IPs, ip) 70 | } 71 | } 72 | 73 | return altNames 74 | } 75 | -------------------------------------------------------------------------------- /pkg/controller/cluster/agent/agent.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 8 | "github.com/rancher/k3k/pkg/controller" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 14 | ) 15 | 16 | const ( 17 | configName = "agent-config" 18 | ) 19 | 20 | type ResourceEnsurer interface { 21 | EnsureResources(context.Context) error 22 | } 23 | 24 | type Config struct { 25 | cluster *v1alpha1.Cluster 26 | client ctrlruntimeclient.Client 27 | scheme *runtime.Scheme 28 | } 29 | 30 | func NewConfig(cluster *v1alpha1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config { 31 | return &Config{ 32 | cluster: cluster, 33 | client: client, 34 | scheme: scheme, 35 | } 36 | } 37 | 38 | func configSecretName(clusterName string) string { 39 | return controller.SafeConcatNameWithPrefix(clusterName, configName) 40 | } 41 | 42 | func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error { 43 | log := ctrl.LoggerFrom(ctx) 44 | 45 | key := ctrlruntimeclient.ObjectKeyFromObject(obj) 46 | 47 | log.Info(fmt.Sprintf("ensuring %T", obj), "key", key) 48 | 49 | if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil { 50 | return err 51 | } 52 | 53 | if err := cfg.client.Create(ctx, obj); err != nil { 54 | if apierrors.IsAlreadyExists(err) { 55 | return cfg.client.Update(ctx, obj) 56 | } 57 | 58 | return err 59 | } 60 | 61 | return nil 62 | } 63 | -------------------------------------------------------------------------------- /pkg/controller/cluster/agent/shared_test.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | "github.com/stretchr/testify/assert" 8 | "gopkg.in/yaml.v2" 9 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ) 11 | 12 | func Test_sharedAgentData(t *testing.T) { 13 | type args struct { 14 | cluster *v1alpha1.Cluster 15 | serviceName string 16 | ip string 17 | token string 18 | } 19 | 20 | tests := []struct { 21 | name string 22 | args args 23 | expectedData map[string]string 24 | }{ 25 | { 26 | name: "simple config", 27 | args: args{ 28 | cluster: &v1alpha1.Cluster{ 29 | ObjectMeta: v1.ObjectMeta{ 30 | Name: "mycluster", 31 | Namespace: "ns-1", 32 | }, 33 | Spec: v1alpha1.ClusterSpec{ 34 | Version: "v1.2.3", 35 | }, 36 | }, 37 | ip: "10.0.0.21", 38 | serviceName: "service-name", 39 | token: "dnjklsdjnksd892389238", 40 | }, 41 | expectedData: map[string]string{ 42 | "clusterName": "mycluster", 43 | "clusterNamespace": "ns-1", 44 | "serverIP": "10.0.0.21", 45 | "serviceName": "service-name", 46 | "token": "dnjklsdjnksd892389238", 47 | "version": "v1.2.3", 48 | }, 49 | }, 50 | { 51 | name: "version in status", 52 | args: args{ 53 | cluster: &v1alpha1.Cluster{ 54 | ObjectMeta: v1.ObjectMeta{ 55 | Name: "mycluster", 56 | Namespace: "ns-1", 57 | }, 58 | Spec: v1alpha1.ClusterSpec{ 59 | Version: "v1.2.3", 60 | }, 61 | Status: v1alpha1.ClusterStatus{ 62 | HostVersion: "v1.3.3", 63 | }, 64 | }, 65 | ip: "10.0.0.21", 66 | serviceName: "service-name", 67 | token: "dnjklsdjnksd892389238", 68 | }, 69 | expectedData: map[string]string{ 70 | "clusterName": "mycluster", 71 | "clusterNamespace": "ns-1", 72 | "serverIP": "10.0.0.21", 73 | "serviceName": "service-name", 74 | "token": "dnjklsdjnksd892389238", 75 | "version": "v1.2.3", 76 | }, 77 | }, 78 | { 79 | name: "missing version in spec", 80 | args: args{ 81 | cluster: &v1alpha1.Cluster{ 82 | ObjectMeta: v1.ObjectMeta{ 83 | Name: "mycluster", 84 | Namespace: "ns-1", 85 | }, 86 | Status: v1alpha1.ClusterStatus{ 87 | HostVersion: "v1.3.3", 88 | }, 89 | }, 90 | ip: "10.0.0.21", 91 | serviceName: "service-name", 92 | token: "dnjklsdjnksd892389238", 93 | }, 94 | expectedData: map[string]string{ 95 | "clusterName": "mycluster", 96 | "clusterNamespace": "ns-1", 97 | "serverIP": "10.0.0.21", 98 | "serviceName": "service-name", 99 | "token": "dnjklsdjnksd892389238", 100 | "version": "v1.3.3", 101 | }, 102 | }, 103 | } 104 | 105 | for _, tt := range tests { 106 | t.Run(tt.name, func(t *testing.T) { 107 | config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip) 108 | 109 | data := make(map[string]string) 110 | err := yaml.Unmarshal([]byte(config), data) 111 | 112 | assert.NoError(t, err) 113 | assert.Equal(t, tt.expectedData, data) 114 | }) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /pkg/controller/cluster/agent/virtual_test.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "gopkg.in/yaml.v2" 8 | ) 9 | 10 | func Test_virtualAgentData(t *testing.T) { 11 | type args struct { 12 | serviceIP string 13 | token string 14 | } 15 | 16 | tests := []struct { 17 | name string 18 | args args 19 | expectedData map[string]string 20 | }{ 21 | { 22 | name: "simple config", 23 | args: args{ 24 | serviceIP: "10.0.0.21", 25 | token: "dnjklsdjnksd892389238", 26 | }, 27 | expectedData: map[string]string{ 28 | "server": "https://10.0.0.21", 29 | "token": "dnjklsdjnksd892389238", 30 | "with-node-id": "true", 31 | }, 32 | }, 33 | } 34 | 35 | for _, tt := range tests { 36 | t.Run(tt.name, func(t *testing.T) { 37 | config := virtualAgentData(tt.args.serviceIP, tt.args.token) 38 | 39 | data := make(map[string]string) 40 | err := yaml.Unmarshal([]byte(config), data) 41 | 42 | assert.NoError(t, err) 43 | assert.Equal(t, tt.expectedData, data) 44 | }) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/controller/cluster/cluster_finalize.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "reflect" 7 | 8 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 9 | "github.com/rancher/k3k/pkg/controller" 10 | "github.com/rancher/k3k/pkg/controller/cluster/agent" 11 | v1 "k8s.io/api/core/v1" 12 | rbacv1 "k8s.io/api/rbac/v1" 13 | "k8s.io/apimachinery/pkg/types" 14 | ctrl "sigs.k8s.io/controller-runtime" 15 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 16 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 17 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 18 | ) 19 | 20 | func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster v1alpha1.Cluster) (reconcile.Result, error) { 21 | log := ctrl.LoggerFrom(ctx) 22 | log.Info("finalizing Cluster") 23 | 24 | // remove finalizer from the server pods and update them. 25 | matchingLabels := ctrlruntimeclient.MatchingLabels(map[string]string{"role": "server"}) 26 | listOpts := &ctrlruntimeclient.ListOptions{Namespace: cluster.Namespace} 27 | matchingLabels.ApplyToList(listOpts) 28 | 29 | var podList v1.PodList 30 | if err := c.Client.List(ctx, &podList, listOpts); err != nil { 31 | return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) 32 | } 33 | 34 | for _, pod := range podList.Items { 35 | if controllerutil.ContainsFinalizer(&pod, etcdPodFinalizerName) { 36 | controllerutil.RemoveFinalizer(&pod, etcdPodFinalizerName) 37 | 38 | if err := c.Client.Update(ctx, &pod); err != nil { 39 | return reconcile.Result{}, err 40 | } 41 | } 42 | } 43 | 44 | if err := c.unbindNodeProxyClusterRole(ctx, &cluster); err != nil { 45 | return reconcile.Result{}, err 46 | } 47 | 48 | if controllerutil.ContainsFinalizer(&cluster, clusterFinalizerName) { 49 | // remove finalizer from the cluster and update it. 50 | controllerutil.RemoveFinalizer(&cluster, clusterFinalizerName) 51 | 52 | if err := c.Client.Update(ctx, &cluster); err != nil { 53 | return reconcile.Result{}, err 54 | } 55 | } 56 | 57 | return reconcile.Result{}, nil 58 | } 59 | 60 | func (c *ClusterReconciler) unbindNodeProxyClusterRole(ctx context.Context, cluster *v1alpha1.Cluster) error { 61 | clusterRoleBinding := &rbacv1.ClusterRoleBinding{} 62 | if err := c.Client.Get(ctx, types.NamespacedName{Name: "k3k-node-proxy"}, clusterRoleBinding); err != nil { 63 | return fmt.Errorf("failed to get or find k3k-node-proxy ClusterRoleBinding: %w", err) 64 | } 65 | 66 | subjectName := controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName) 67 | 68 | var cleanedSubjects []rbacv1.Subject 69 | 70 | for _, subject := range clusterRoleBinding.Subjects { 71 | if subject.Name != subjectName || subject.Namespace != cluster.Namespace { 72 | cleanedSubjects = append(cleanedSubjects, subject) 73 | } 74 | } 75 | 76 | // if no subject was removed, all good 77 | if reflect.DeepEqual(clusterRoleBinding.Subjects, cleanedSubjects) { 78 | return nil 79 | } 80 | 81 | clusterRoleBinding.Subjects = cleanedSubjects 82 | 83 | return c.Client.Update(ctx, clusterRoleBinding) 84 | } 85 | -------------------------------------------------------------------------------- /pkg/controller/cluster/cluster_suite_test.go: -------------------------------------------------------------------------------- 1 | package cluster_test 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/go-logr/zapr" 9 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 10 | "github.com/rancher/k3k/pkg/controller/cluster" 11 | 12 | "go.uber.org/zap" 13 | appsv1 "k8s.io/api/apps/v1" 14 | corev1 "k8s.io/api/core/v1" 15 | networkingv1 "k8s.io/api/networking/v1" 16 | rbacv1 "k8s.io/api/rbac/v1" 17 | "k8s.io/apimachinery/pkg/runtime" 18 | "k8s.io/client-go/kubernetes" 19 | ctrl "sigs.k8s.io/controller-runtime" 20 | "sigs.k8s.io/controller-runtime/pkg/client" 21 | "sigs.k8s.io/controller-runtime/pkg/envtest" 22 | 23 | . "github.com/onsi/ginkgo/v2" 24 | . "github.com/onsi/gomega" 25 | ) 26 | 27 | func TestController(t *testing.T) { 28 | RegisterFailHandler(Fail) 29 | RunSpecs(t, "Cluster Controller Suite") 30 | } 31 | 32 | var ( 33 | testEnv *envtest.Environment 34 | k8s *kubernetes.Clientset 35 | k8sClient client.Client 36 | ctx context.Context 37 | cancel context.CancelFunc 38 | ) 39 | 40 | var _ = BeforeSuite(func() { 41 | 42 | By("bootstrapping test environment") 43 | testEnv = &envtest.Environment{ 44 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")}, 45 | ErrorIfCRDPathMissing: true, 46 | } 47 | cfg, err := testEnv.Start() 48 | Expect(err).NotTo(HaveOccurred()) 49 | 50 | k8s, err = kubernetes.NewForConfig(cfg) 51 | Expect(err).NotTo(HaveOccurred()) 52 | 53 | scheme := buildScheme() 54 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) 55 | Expect(err).NotTo(HaveOccurred()) 56 | 57 | ctrl.SetLogger(zapr.NewLogger(zap.NewNop())) 58 | 59 | mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme}) 60 | Expect(err).NotTo(HaveOccurred()) 61 | 62 | ctx, cancel = context.WithCancel(context.Background()) 63 | err = cluster.Add(ctx, mgr, "rancher/k3k-kubelet:latest", "", "rancher/k3s", "") 64 | Expect(err).NotTo(HaveOccurred()) 65 | 66 | go func() { 67 | defer GinkgoRecover() 68 | err = mgr.Start(ctx) 69 | Expect(err).NotTo(HaveOccurred(), "failed to run manager") 70 | }() 71 | }) 72 | 73 | var _ = AfterSuite(func() { 74 | cancel() 75 | 76 | By("tearing down the test environment") 77 | err := testEnv.Stop() 78 | Expect(err).NotTo(HaveOccurred()) 79 | }) 80 | 81 | func buildScheme() *runtime.Scheme { 82 | scheme := runtime.NewScheme() 83 | 84 | err := corev1.AddToScheme(scheme) 85 | Expect(err).NotTo(HaveOccurred()) 86 | err = rbacv1.AddToScheme(scheme) 87 | Expect(err).NotTo(HaveOccurred()) 88 | err = appsv1.AddToScheme(scheme) 89 | Expect(err).NotTo(HaveOccurred()) 90 | err = networkingv1.AddToScheme(scheme) 91 | Expect(err).NotTo(HaveOccurred()) 92 | err = v1alpha1.AddToScheme(scheme) 93 | Expect(err).NotTo(HaveOccurred()) 94 | 95 | return scheme 96 | } 97 | -------------------------------------------------------------------------------- /pkg/controller/cluster/server/bootstrap/bootstrap.go: -------------------------------------------------------------------------------- 1 | package bootstrap 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "encoding/base64" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "net/http" 11 | "syscall" 12 | "time" 13 | 14 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 15 | "github.com/rancher/k3k/pkg/controller" 16 | v1 "k8s.io/api/core/v1" 17 | "k8s.io/apimachinery/pkg/types" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | ) 20 | 21 | var ErrServerNotReady = errors.New("server not ready") 22 | 23 | type ControlRuntimeBootstrap struct { 24 | ServerCA content `json:"serverCA"` 25 | ServerCAKey content `json:"serverCAKey"` 26 | ClientCA content `json:"clientCA"` 27 | ClientCAKey content `json:"clientCAKey"` 28 | ETCDServerCA content `json:"etcdServerCA"` 29 | ETCDServerCAKey content `json:"etcdServerCAKey"` 30 | } 31 | 32 | type content struct { 33 | Timestamp string 34 | Content string 35 | } 36 | 37 | // Generate generates the bootstrap for the cluster: 38 | // 1- use the server token to get the bootstrap data from k3s 39 | // 2- save the bootstrap data as a secret 40 | func GenerateBootstrapData(ctx context.Context, cluster *v1alpha1.Cluster, ip, token string) ([]byte, error) { 41 | bootstrap, err := requestBootstrap(token, ip) 42 | if err != nil { 43 | return nil, fmt.Errorf("failed to request bootstrap secret: %w", err) 44 | } 45 | 46 | if err := decodeBootstrap(bootstrap); err != nil { 47 | return nil, fmt.Errorf("failed to decode bootstrap secret: %w", err) 48 | } 49 | 50 | return json.Marshal(bootstrap) 51 | } 52 | 53 | func requestBootstrap(token, serverIP string) (*ControlRuntimeBootstrap, error) { 54 | url := "https://" + serverIP + "/v1-k3s/server-bootstrap" 55 | 56 | client := http.Client{ 57 | Transport: &http.Transport{ 58 | TLSClientConfig: &tls.Config{ 59 | InsecureSkipVerify: true, 60 | }, 61 | }, 62 | Timeout: 5 * time.Second, 63 | } 64 | 65 | req, err := http.NewRequest(http.MethodGet, url, nil) 66 | if err != nil { 67 | return nil, err 68 | } 69 | 70 | req.Header.Add("Authorization", "Basic "+basicAuth("server", token)) 71 | 72 | resp, err := client.Do(req) 73 | if err != nil { 74 | if errors.Is(err, syscall.ECONNREFUSED) { 75 | return nil, ErrServerNotReady 76 | } 77 | 78 | return nil, err 79 | } 80 | defer resp.Body.Close() 81 | 82 | var runtimeBootstrap ControlRuntimeBootstrap 83 | if err := json.NewDecoder(resp.Body).Decode(&runtimeBootstrap); err != nil { 84 | return nil, err 85 | } 86 | 87 | return &runtimeBootstrap, nil 88 | } 89 | 90 | func basicAuth(username, password string) string { 91 | auth := username + ":" + password 92 | return base64.StdEncoding.EncodeToString([]byte(auth)) 93 | } 94 | 95 | func decodeBootstrap(bootstrap *ControlRuntimeBootstrap) error { 96 | //client-ca 97 | decoded, err := base64.StdEncoding.DecodeString(bootstrap.ClientCA.Content) 98 | if err != nil { 99 | return err 100 | } 101 | 102 | bootstrap.ClientCA.Content = string(decoded) 103 | 104 | //client-ca-key 105 | decoded, err = base64.StdEncoding.DecodeString(bootstrap.ClientCAKey.Content) 106 | if err != nil { 107 | return err 108 | } 109 | 110 | bootstrap.ClientCAKey.Content = string(decoded) 111 | 112 | //server-ca 113 | decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCA.Content) 114 | if err != nil { 115 | return err 116 | } 117 | 118 | bootstrap.ServerCA.Content = string(decoded) 119 | 120 | //server-ca-key 121 | decoded, err = base64.StdEncoding.DecodeString(bootstrap.ServerCAKey.Content) 122 | if err != nil { 123 | return err 124 | } 125 | 126 | bootstrap.ServerCAKey.Content = string(decoded) 127 | 128 | //etcd-ca 129 | decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCA.Content) 130 | if err != nil { 131 | return err 132 | } 133 | 134 | bootstrap.ETCDServerCA.Content = string(decoded) 135 | 136 | //etcd-ca-key 137 | decoded, err = base64.StdEncoding.DecodeString(bootstrap.ETCDServerCAKey.Content) 138 | if err != nil { 139 | return err 140 | } 141 | 142 | bootstrap.ETCDServerCAKey.Content = string(decoded) 143 | 144 | return nil 145 | } 146 | 147 | func DecodedBootstrap(token, ip string) (*ControlRuntimeBootstrap, error) { 148 | bootstrap, err := requestBootstrap(token, ip) 149 | if err != nil { 150 | return nil, err 151 | } 152 | 153 | if err := decodeBootstrap(bootstrap); err != nil { 154 | return nil, err 155 | } 156 | 157 | return bootstrap, nil 158 | } 159 | 160 | func GetFromSecret(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster) (*ControlRuntimeBootstrap, error) { 161 | key := types.NamespacedName{ 162 | Name: controller.SafeConcatNameWithPrefix(cluster.Name, "bootstrap"), 163 | Namespace: cluster.Namespace, 164 | } 165 | 166 | var bootstrapSecret v1.Secret 167 | if err := client.Get(ctx, key, &bootstrapSecret); err != nil { 168 | return nil, err 169 | } 170 | 171 | bootstrapData := bootstrapSecret.Data["bootstrap"] 172 | if bootstrapData == nil { 173 | return nil, errors.New("empty bootstrap") 174 | } 175 | 176 | var bootstrap ControlRuntimeBootstrap 177 | err := json.Unmarshal(bootstrapData, &bootstrap) 178 | 179 | return &bootstrap, err 180 | } 181 | -------------------------------------------------------------------------------- /pkg/controller/cluster/server/config.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | "github.com/rancher/k3k/pkg/controller" 8 | "github.com/rancher/k3k/pkg/controller/cluster/agent" 9 | v1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | ) 12 | 13 | func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) { 14 | name := configSecretName(s.cluster.Name, init) 15 | s.cluster.Status.TLSSANs = append(s.cluster.Spec.TLSSANs, 16 | serviceIP, 17 | ServiceName(s.cluster.Name), 18 | fmt.Sprintf("%s.%s", ServiceName(s.cluster.Name), s.cluster.Namespace), 19 | ) 20 | 21 | config := serverConfigData(serviceIP, s.cluster, s.token) 22 | if init { 23 | config = initConfigData(s.cluster, s.token) 24 | } 25 | 26 | return &v1.Secret{ 27 | TypeMeta: metav1.TypeMeta{ 28 | Kind: "Secret", 29 | APIVersion: "v1", 30 | }, 31 | ObjectMeta: metav1.ObjectMeta{ 32 | Name: name, 33 | Namespace: s.cluster.Namespace, 34 | }, 35 | Data: map[string][]byte{ 36 | "config.yaml": []byte(config), 37 | }, 38 | }, nil 39 | } 40 | 41 | func serverConfigData(serviceIP string, cluster *v1alpha1.Cluster, token string) string { 42 | return "cluster-init: true\nserver: https://" + serviceIP + "\n" + serverOptions(cluster, token) 43 | } 44 | 45 | func initConfigData(cluster *v1alpha1.Cluster, token string) string { 46 | return "cluster-init: true\n" + serverOptions(cluster, token) 47 | } 48 | 49 | func serverOptions(cluster *v1alpha1.Cluster, token string) string { 50 | var opts string 51 | 52 | // TODO: generate token if not found 53 | if token != "" { 54 | opts = "token: " + token + "\n" 55 | } 56 | 57 | if cluster.Status.ClusterCIDR != "" { 58 | opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n" 59 | } 60 | 61 | if cluster.Status.ServiceCIDR != "" { 62 | opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n" 63 | } 64 | 65 | if cluster.Spec.ClusterDNS != "" { 66 | opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n" 67 | } 68 | 69 | if len(cluster.Status.TLSSANs) > 0 { 70 | opts = opts + "tls-san:\n" 71 | for _, addr := range cluster.Status.TLSSANs { 72 | opts = opts + "- " + addr + "\n" 73 | } 74 | } 75 | 76 | if cluster.Spec.Mode != agent.VirtualNodeMode { 77 | opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage" 78 | } 79 | // TODO: Add extra args to the options 80 | 81 | return opts 82 | } 83 | 84 | func configSecretName(clusterName string, init bool) string { 85 | if !init { 86 | return controller.SafeConcatNameWithPrefix(clusterName, configName) 87 | } 88 | 89 | return controller.SafeConcatNameWithPrefix(clusterName, initConfigName) 90 | } 91 | -------------------------------------------------------------------------------- /pkg/controller/cluster/server/ingress.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | "github.com/rancher/k3k/pkg/controller" 8 | networkingv1 "k8s.io/api/networking/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/utils/ptr" 11 | ) 12 | 13 | const ( 14 | httpsPort = 443 15 | k3sServerPort = 6443 16 | etcdPort = 2379 17 | ) 18 | 19 | func IngressName(clusterName string) string { 20 | return controller.SafeConcatNameWithPrefix(clusterName, "ingress") 21 | } 22 | 23 | func Ingress(ctx context.Context, cluster *v1alpha1.Cluster) networkingv1.Ingress { 24 | ingress := networkingv1.Ingress{ 25 | TypeMeta: metav1.TypeMeta{ 26 | Kind: "Ingress", 27 | APIVersion: "networking.k8s.io/v1", 28 | }, 29 | ObjectMeta: metav1.ObjectMeta{ 30 | Name: IngressName(cluster.Name), 31 | Namespace: cluster.Namespace, 32 | }, 33 | Spec: networkingv1.IngressSpec{ 34 | Rules: ingressRules(cluster), 35 | }, 36 | } 37 | 38 | if cluster.Spec.Expose != nil && cluster.Spec.Expose.Ingress != nil { 39 | ingressConfig := cluster.Spec.Expose.Ingress 40 | 41 | if ingressConfig.IngressClassName != "" { 42 | ingress.Spec.IngressClassName = ptr.To(ingressConfig.IngressClassName) 43 | } 44 | 45 | if ingressConfig.Annotations != nil { 46 | ingress.Annotations = ingressConfig.Annotations 47 | } 48 | } 49 | 50 | return ingress 51 | } 52 | 53 | func ingressRules(cluster *v1alpha1.Cluster) []networkingv1.IngressRule { 54 | var ingressRules []networkingv1.IngressRule 55 | 56 | if cluster.Spec.Expose == nil || cluster.Spec.Expose.Ingress == nil { 57 | return ingressRules 58 | } 59 | 60 | path := networkingv1.HTTPIngressPath{ 61 | Path: "/", 62 | PathType: ptr.To(networkingv1.PathTypePrefix), 63 | Backend: networkingv1.IngressBackend{ 64 | Service: &networkingv1.IngressServiceBackend{ 65 | Name: ServiceName(cluster.Name), 66 | Port: networkingv1.ServiceBackendPort{ 67 | Number: httpsPort, 68 | }, 69 | }, 70 | }, 71 | } 72 | 73 | hosts := cluster.Spec.TLSSANs 74 | for _, host := range hosts { 75 | ingressRules = append(ingressRules, networkingv1.IngressRule{ 76 | Host: host, 77 | IngressRuleValue: networkingv1.IngressRuleValue{ 78 | HTTP: &networkingv1.HTTPIngressRuleValue{ 79 | Paths: []networkingv1.HTTPIngressPath{path}, 80 | }, 81 | }, 82 | }) 83 | } 84 | 85 | return ingressRules 86 | } 87 | -------------------------------------------------------------------------------- /pkg/controller/cluster/server/service.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 5 | "github.com/rancher/k3k/pkg/controller" 6 | v1 "k8s.io/api/core/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/apimachinery/pkg/util/intstr" 9 | ) 10 | 11 | func Service(cluster *v1alpha1.Cluster) *v1.Service { 12 | service := &v1.Service{ 13 | TypeMeta: metav1.TypeMeta{ 14 | Kind: "Service", 15 | APIVersion: "v1", 16 | }, 17 | ObjectMeta: metav1.ObjectMeta{ 18 | Name: ServiceName(cluster.Name), 19 | Namespace: cluster.Namespace, 20 | }, 21 | Spec: v1.ServiceSpec{ 22 | Selector: map[string]string{ 23 | "cluster": cluster.Name, 24 | "role": "server", 25 | }, 26 | }, 27 | } 28 | 29 | k3sServerPort := v1.ServicePort{ 30 | Name: "k3s-server-port", 31 | Protocol: v1.ProtocolTCP, 32 | Port: httpsPort, 33 | TargetPort: intstr.FromInt(k3sServerPort), 34 | } 35 | 36 | etcdPort := v1.ServicePort{ 37 | Name: "k3s-etcd-port", 38 | Protocol: v1.ProtocolTCP, 39 | Port: etcdPort, 40 | } 41 | 42 | // If no expose is specified, default to ClusterIP 43 | if cluster.Spec.Expose == nil { 44 | service.Spec.Type = v1.ServiceTypeClusterIP 45 | service.Spec.Ports = append(service.Spec.Ports, k3sServerPort, etcdPort) 46 | } 47 | 48 | // If expose is specified, set the type to the appropriate type 49 | if cluster.Spec.Expose != nil { 50 | expose := cluster.Spec.Expose 51 | 52 | // ingress 53 | if expose.Ingress != nil { 54 | service.Spec.Type = v1.ServiceTypeClusterIP 55 | service.Spec.Ports = append(service.Spec.Ports, k3sServerPort, etcdPort) 56 | } 57 | 58 | // loadbalancer 59 | if expose.LoadBalancer != nil { 60 | service.Spec.Type = v1.ServiceTypeLoadBalancer 61 | addLoadBalancerPorts(service, *expose.LoadBalancer, k3sServerPort, etcdPort) 62 | } 63 | 64 | // nodeport 65 | if expose.NodePort != nil { 66 | service.Spec.Type = v1.ServiceTypeNodePort 67 | addNodePortPorts(service, *expose.NodePort, k3sServerPort, etcdPort) 68 | } 69 | } 70 | 71 | return service 72 | } 73 | 74 | // addLoadBalancerPorts adds the load balancer ports to the service 75 | func addLoadBalancerPorts(service *v1.Service, loadbalancerConfig v1alpha1.LoadBalancerConfig, k3sServerPort, etcdPort v1.ServicePort) { 76 | // If the server port is not specified, use the default port 77 | if loadbalancerConfig.ServerPort == nil { 78 | service.Spec.Ports = append(service.Spec.Ports, k3sServerPort) 79 | } else if *loadbalancerConfig.ServerPort > 0 { 80 | // If the server port is specified, set the port, otherwise the service will not be exposed 81 | k3sServerPort.Port = *loadbalancerConfig.ServerPort 82 | service.Spec.Ports = append(service.Spec.Ports, k3sServerPort) 83 | } 84 | 85 | // If the etcd port is not specified, use the default port 86 | if loadbalancerConfig.ETCDPort == nil { 87 | service.Spec.Ports = append(service.Spec.Ports, etcdPort) 88 | } else if *loadbalancerConfig.ETCDPort > 0 { 89 | // If the etcd port is specified, set the port, otherwise the service will not be exposed 90 | etcdPort.Port = *loadbalancerConfig.ETCDPort 91 | service.Spec.Ports = append(service.Spec.Ports, etcdPort) 92 | } 93 | } 94 | 95 | // addNodePortPorts adds the node port ports to the service 96 | func addNodePortPorts(service *v1.Service, nodePortConfig v1alpha1.NodePortConfig, k3sServerPort, etcdPort v1.ServicePort) { 97 | // If the server port is not specified Kubernetes will set the node port to a random port between 30000-32767 98 | if nodePortConfig.ServerPort == nil { 99 | service.Spec.Ports = append(service.Spec.Ports, k3sServerPort) 100 | } else { 101 | serverNodePort := *nodePortConfig.ServerPort 102 | 103 | // If the server port is in the range of 30000-32767, set the node port 104 | // otherwise the service will not be exposed 105 | if serverNodePort >= 30000 && serverNodePort <= 32767 { 106 | k3sServerPort.NodePort = serverNodePort 107 | service.Spec.Ports = append(service.Spec.Ports, k3sServerPort) 108 | } 109 | } 110 | 111 | // If the etcd port is not specified Kubernetes will set the node port to a random port between 30000-32767 112 | if nodePortConfig.ETCDPort == nil { 113 | service.Spec.Ports = append(service.Spec.Ports, etcdPort) 114 | } else { 115 | etcdNodePort := *nodePortConfig.ETCDPort 116 | 117 | // If the etcd port is in the range of 30000-32767, set the node port 118 | // otherwise the service will not be exposed 119 | if etcdNodePort >= 30000 && etcdNodePort <= 32767 { 120 | etcdPort.NodePort = etcdNodePort 121 | service.Spec.Ports = append(service.Spec.Ports, etcdPort) 122 | } 123 | } 124 | } 125 | 126 | func (s *Server) StatefulServerService() *v1.Service { 127 | return &v1.Service{ 128 | TypeMeta: metav1.TypeMeta{ 129 | Kind: "Service", 130 | APIVersion: "v1", 131 | }, 132 | ObjectMeta: metav1.ObjectMeta{ 133 | Name: headlessServiceName(s.cluster.Name), 134 | Namespace: s.cluster.Namespace, 135 | }, 136 | Spec: v1.ServiceSpec{ 137 | Type: v1.ServiceTypeClusterIP, 138 | ClusterIP: v1.ClusterIPNone, 139 | Selector: map[string]string{ 140 | "cluster": s.cluster.Name, 141 | "role": "server", 142 | }, 143 | Ports: []v1.ServicePort{ 144 | { 145 | Name: "k3s-server-port", 146 | Protocol: v1.ProtocolTCP, 147 | Port: httpsPort, 148 | TargetPort: intstr.FromInt(k3sServerPort), 149 | }, 150 | { 151 | Name: "k3s-etcd-port", 152 | Protocol: v1.ProtocolTCP, 153 | Port: etcdPort, 154 | }, 155 | }, 156 | }, 157 | } 158 | } 159 | 160 | func ServiceName(clusterName string) string { 161 | return controller.SafeConcatNameWithPrefix(clusterName, "service") 162 | } 163 | 164 | func headlessServiceName(clusterName string) string { 165 | return controller.SafeConcatNameWithPrefix(clusterName, "service", "headless") 166 | } 167 | -------------------------------------------------------------------------------- /pkg/controller/cluster/server/template.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | var singleServerTemplate string = ` 4 | if [ -d "{{.ETCD_DIR}}" ]; then 5 | # if directory exists then it means its not an initial run 6 | /bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log 7 | fi 8 | rm -f /var/lib/rancher/k3s/server/db/reset-flag 9 | /bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log` 10 | 11 | var HAServerTemplate string = ` 12 | if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then 13 | /bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log 14 | else 15 | /bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log 16 | fi` 17 | -------------------------------------------------------------------------------- /pkg/controller/cluster/token.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "crypto/rand" 6 | "encoding/hex" 7 | "fmt" 8 | 9 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 10 | "github.com/rancher/k3k/pkg/controller" 11 | v1 "k8s.io/api/core/v1" 12 | apierrors "k8s.io/apimachinery/pkg/api/errors" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/types" 15 | ctrl "sigs.k8s.io/controller-runtime" 16 | "sigs.k8s.io/controller-runtime/pkg/client" 17 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 18 | ) 19 | 20 | func (c *ClusterReconciler) token(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) { 21 | if cluster.Spec.TokenSecretRef == nil { 22 | return c.ensureTokenSecret(ctx, cluster) 23 | } 24 | // get token data from secretRef 25 | nn := types.NamespacedName{ 26 | Name: cluster.Spec.TokenSecretRef.Name, 27 | Namespace: cluster.Spec.TokenSecretRef.Namespace, 28 | } 29 | 30 | var tokenSecret v1.Secret 31 | 32 | if err := c.Client.Get(ctx, nn, &tokenSecret); err != nil { 33 | return "", err 34 | } 35 | 36 | if _, ok := tokenSecret.Data["token"]; !ok { 37 | return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name) 38 | } 39 | 40 | return string(tokenSecret.Data["token"]), nil 41 | } 42 | 43 | func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1alpha1.Cluster) (string, error) { 44 | log := ctrl.LoggerFrom(ctx) 45 | 46 | // check if the secret is already created 47 | key := types.NamespacedName{ 48 | Name: TokenSecretName(cluster.Name), 49 | Namespace: cluster.Namespace, 50 | } 51 | 52 | var tokenSecret v1.Secret 53 | if err := c.Client.Get(ctx, key, &tokenSecret); err != nil { 54 | if !apierrors.IsNotFound(err) { 55 | return "", err 56 | } 57 | } 58 | 59 | if tokenSecret.Data != nil { 60 | return string(tokenSecret.Data["token"]), nil 61 | } 62 | 63 | log.Info("Token secret is not specified, creating a random token") 64 | 65 | token, err := random(16) 66 | if err != nil { 67 | return "", err 68 | } 69 | 70 | tokenSecret = TokenSecretObj(token, cluster.Name, cluster.Namespace) 71 | key = client.ObjectKeyFromObject(&tokenSecret) 72 | 73 | result, err := controllerutil.CreateOrUpdate(ctx, c.Client, &tokenSecret, func() error { 74 | return controllerutil.SetControllerReference(cluster, &tokenSecret, c.Scheme) 75 | }) 76 | 77 | if result != controllerutil.OperationResultNone { 78 | log.Info("ensuring tokenSecret", "key", key, "result", result) 79 | } 80 | 81 | return token, err 82 | } 83 | 84 | func random(size int) (string, error) { 85 | token := make([]byte, size) 86 | 87 | _, err := rand.Read(token) 88 | if err != nil { 89 | return "", err 90 | } 91 | 92 | return hex.EncodeToString(token), err 93 | } 94 | 95 | func TokenSecretObj(token, name, namespace string) v1.Secret { 96 | return v1.Secret{ 97 | TypeMeta: metav1.TypeMeta{ 98 | APIVersion: "v1", 99 | Kind: "Secret", 100 | }, 101 | ObjectMeta: metav1.ObjectMeta{ 102 | Name: TokenSecretName(name), 103 | Namespace: namespace, 104 | }, 105 | Data: map[string][]byte{ 106 | "token": []byte(token), 107 | }, 108 | } 109 | } 110 | 111 | func TokenSecretName(clusterName string) string { 112 | return controller.SafeConcatNameWithPrefix(clusterName, "token") 113 | } 114 | -------------------------------------------------------------------------------- /pkg/controller/controller.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "strings" 7 | "time" 8 | 9 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 10 | "k8s.io/apimachinery/pkg/util/wait" 11 | ) 12 | 13 | const ( 14 | namePrefix = "k3k" 15 | AdminCommonName = "system:admin" 16 | ) 17 | 18 | // Backoff is the cluster creation duration backoff 19 | var Backoff = wait.Backoff{ 20 | Steps: 5, 21 | Duration: 5 * time.Second, 22 | Factor: 2, 23 | Jitter: 0.1, 24 | } 25 | 26 | // K3SImage returns the rancher/k3s image tagged with the specified Version. 27 | // If Version is empty it will use with the same k8s version of the host cluster, 28 | // stored in the Status object. It will return the untagged version as last fallback. 29 | func K3SImage(cluster *v1alpha1.Cluster, k3SImage string) string { 30 | if cluster.Spec.Version != "" { 31 | return k3SImage + ":" + cluster.Spec.Version 32 | } 33 | 34 | if cluster.Status.HostVersion != "" { 35 | return k3SImage + ":" + cluster.Status.HostVersion 36 | } 37 | 38 | return k3SImage 39 | } 40 | 41 | // SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix. 42 | func SafeConcatNameWithPrefix(name ...string) string { 43 | return SafeConcatName(append([]string{namePrefix}, name...)...) 44 | } 45 | 46 | // SafeConcatName concatenates the given strings and ensures the returned name is under 64 characters 47 | // by cutting the string off at 57 characters and setting the last 6 with an encoded version of the concatenated string. 48 | func SafeConcatName(name ...string) string { 49 | fullPath := strings.Join(name, "-") 50 | if len(fullPath) < 64 { 51 | return fullPath 52 | } 53 | 54 | digest := sha256.Sum256([]byte(fullPath)) 55 | 56 | // since we cut the string in the middle, the last char may not be compatible with what is expected in k8s 57 | // we are checking and if necessary removing the last char 58 | c := fullPath[56] 59 | if 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { 60 | return fullPath[0:57] + "-" + hex.EncodeToString(digest[0:])[0:5] 61 | } 62 | 63 | return fullPath[0:56] + "-" + hex.EncodeToString(digest[0:])[0:6] 64 | } 65 | -------------------------------------------------------------------------------- /pkg/controller/kubeconfig/kubeconfig.go: -------------------------------------------------------------------------------- 1 | package kubeconfig 2 | 3 | import ( 4 | "context" 5 | "crypto/x509" 6 | "fmt" 7 | "slices" 8 | "time" 9 | 10 | certutil "github.com/rancher/dynamiclistener/cert" 11 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 12 | "github.com/rancher/k3k/pkg/controller" 13 | "github.com/rancher/k3k/pkg/controller/certs" 14 | "github.com/rancher/k3k/pkg/controller/cluster/server" 15 | "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" 16 | "github.com/sirupsen/logrus" 17 | v1 "k8s.io/api/core/v1" 18 | networkingv1 "k8s.io/api/networking/v1" 19 | "k8s.io/apimachinery/pkg/types" 20 | "k8s.io/apiserver/pkg/authentication/user" 21 | clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 22 | "sigs.k8s.io/controller-runtime/pkg/client" 23 | ) 24 | 25 | type KubeConfig struct { 26 | AltNames certutil.AltNames 27 | CN string 28 | ORG []string 29 | ExpiryDate time.Duration 30 | } 31 | 32 | func New() *KubeConfig { 33 | return &KubeConfig{ 34 | CN: controller.AdminCommonName, 35 | ORG: []string{user.SystemPrivilegedGroup}, 36 | ExpiryDate: 0, 37 | } 38 | } 39 | 40 | func (k *KubeConfig) Extract(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) (*clientcmdapi.Config, error) { 41 | bootstrapData, err := bootstrap.GetFromSecret(ctx, client, cluster) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | serverCACert := []byte(bootstrapData.ServerCA.Content) 47 | 48 | adminCert, adminKey, err := certs.CreateClientCertKey( 49 | k.CN, 50 | k.ORG, 51 | &k.AltNames, 52 | []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, 53 | k.ExpiryDate, 54 | bootstrapData.ClientCA.Content, 55 | bootstrapData.ClientCAKey.Content, 56 | ) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | url, err := getURLFromService(ctx, client, cluster, hostServerIP) 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | config := NewConfig(url, serverCACert, adminCert, adminKey) 67 | 68 | return config, nil 69 | } 70 | 71 | func NewConfig(url string, serverCA, clientCert, clientKey []byte) *clientcmdapi.Config { 72 | config := clientcmdapi.NewConfig() 73 | 74 | cluster := clientcmdapi.NewCluster() 75 | cluster.CertificateAuthorityData = serverCA 76 | cluster.Server = url 77 | 78 | authInfo := clientcmdapi.NewAuthInfo() 79 | authInfo.ClientCertificateData = clientCert 80 | authInfo.ClientKeyData = clientKey 81 | 82 | context := clientcmdapi.NewContext() 83 | context.AuthInfo = "default" 84 | context.Cluster = "default" 85 | 86 | config.Clusters["default"] = cluster 87 | config.AuthInfos["default"] = authInfo 88 | config.Contexts["default"] = context 89 | config.CurrentContext = "default" 90 | 91 | return config 92 | } 93 | 94 | func getURLFromService(ctx context.Context, client client.Client, cluster *v1alpha1.Cluster, hostServerIP string) (string, error) { 95 | // get the server service to extract the right IP 96 | key := types.NamespacedName{ 97 | Name: server.ServiceName(cluster.Name), 98 | Namespace: cluster.Namespace, 99 | } 100 | 101 | var k3kService v1.Service 102 | if err := client.Get(ctx, key, &k3kService); err != nil { 103 | return "", err 104 | } 105 | 106 | ip := k3kService.Spec.ClusterIP 107 | port := int32(443) 108 | 109 | switch k3kService.Spec.Type { 110 | case v1.ServiceTypeNodePort: 111 | ip = hostServerIP 112 | port = k3kService.Spec.Ports[0].NodePort 113 | case v1.ServiceTypeLoadBalancer: 114 | ip = k3kService.Status.LoadBalancer.Ingress[0].IP 115 | port = k3kService.Spec.Ports[0].Port 116 | } 117 | 118 | if !slices.Contains(cluster.Status.TLSSANs, ip) { 119 | logrus.Warnf("ip %s not in tlsSANs", ip) 120 | 121 | if len(cluster.Spec.TLSSANs) > 0 { 122 | logrus.Warnf("Using the first TLS SAN in the spec as a fallback: %s", cluster.Spec.TLSSANs[0]) 123 | 124 | ip = cluster.Spec.TLSSANs[0] 125 | } else if len(cluster.Status.TLSSANs) > 0 { 126 | logrus.Warnf("No explicit tlsSANs specified. Trying to use the first TLS SAN in the status: %s", cluster.Status.TLSSANs[0]) 127 | 128 | ip = cluster.Status.TLSSANs[0] 129 | } else { 130 | logrus.Warn("ip not found in tlsSANs. This could cause issue with the certificate validation.") 131 | } 132 | } 133 | 134 | url := "https://" + ip 135 | if port != 443 { 136 | url = fmt.Sprintf("%s:%d", url, port) 137 | } 138 | 139 | // if ingress is specified, use the ingress host 140 | if cluster.Spec.Expose != nil && cluster.Spec.Expose.Ingress != nil { 141 | var k3kIngress networkingv1.Ingress 142 | 143 | ingressKey := types.NamespacedName{ 144 | Name: server.IngressName(cluster.Name), 145 | Namespace: cluster.Namespace, 146 | } 147 | 148 | if err := client.Get(ctx, ingressKey, &k3kIngress); err != nil { 149 | return "", err 150 | } 151 | 152 | url = fmt.Sprintf("https://%s", k3kIngress.Spec.Rules[0].Host) 153 | } 154 | 155 | return url, nil 156 | } 157 | -------------------------------------------------------------------------------- /pkg/controller/policy/namespace.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | v1 "k8s.io/api/core/v1" 8 | networkingv1 "k8s.io/api/networking/v1" 9 | "k8s.io/apimachinery/pkg/labels" 10 | "k8s.io/apimachinery/pkg/selection" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | // reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy 16 | func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1alpha1.VirtualClusterPolicy) { 17 | log := ctrl.LoggerFrom(ctx) 18 | log.Info("reconciling PSA labels") 19 | 20 | // cleanup of old labels 21 | delete(namespace.Labels, "pod-security.kubernetes.io/enforce") 22 | delete(namespace.Labels, "pod-security.kubernetes.io/enforce-version") 23 | delete(namespace.Labels, "pod-security.kubernetes.io/warn") 24 | delete(namespace.Labels, "pod-security.kubernetes.io/warn-version") 25 | 26 | // if a PSA level is specified add the proper labels 27 | if policy.Spec.PodSecurityAdmissionLevel != nil { 28 | psaLevel := *policy.Spec.PodSecurityAdmissionLevel 29 | 30 | namespace.Labels["pod-security.kubernetes.io/enforce"] = string(psaLevel) 31 | namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest" 32 | 33 | // skip the 'warn' only for the privileged PSA level 34 | if psaLevel != v1alpha1.PrivilegedPodSecurityAdmissionLevel { 35 | namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel) 36 | namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest" 37 | } 38 | } 39 | } 40 | 41 | // cleanupNamespaces will cleanup the Namespaces without the "policy.k3k.io/policy-name" label 42 | // deleting the resources in them with the "app.kubernetes.io/managed-by=k3k-policy-controller" label 43 | func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context) error { 44 | log := ctrl.LoggerFrom(ctx) 45 | log.Info("deleting resources") 46 | 47 | var namespaces v1.NamespaceList 48 | if err := c.Client.List(ctx, &namespaces); err != nil { 49 | return err 50 | } 51 | 52 | for _, ns := range namespaces.Items { 53 | deleteOpts := []client.DeleteAllOfOption{ 54 | client.InNamespace(ns.Name), 55 | client.MatchingLabels{ManagedByLabelKey: VirtualPolicyControllerName}, 56 | } 57 | 58 | // if the namespace is bound to a policy -> cleanup resources of other policies 59 | if ns.Labels[PolicyNameLabelKey] != "" { 60 | requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]}) 61 | 62 | // log the error but continue cleaning up the other namespaces 63 | if err != nil { 64 | log.Error(err, "error creating requirement", "policy", ns.Labels[PolicyNameLabelKey]) 65 | } else { 66 | sel := labels.NewSelector().Add(*requirement) 67 | deleteOpts = append(deleteOpts, client.MatchingLabelsSelector{Selector: sel}) 68 | } 69 | } 70 | 71 | if err := c.Client.DeleteAllOf(ctx, &networkingv1.NetworkPolicy{}, deleteOpts...); err != nil { 72 | return err 73 | } 74 | 75 | if err := c.Client.DeleteAllOf(ctx, &v1.ResourceQuota{}, deleteOpts...); err != nil { 76 | return err 77 | } 78 | 79 | if err := c.Client.DeleteAllOf(ctx, &v1.LimitRange{}, deleteOpts...); err != nil { 80 | return err 81 | } 82 | } 83 | 84 | return nil 85 | } 86 | -------------------------------------------------------------------------------- /pkg/controller/policy/networkpolicy.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 7 | k3kcontroller "github.com/rancher/k3k/pkg/controller" 8 | v1 "k8s.io/api/core/v1" 9 | networkingv1 "k8s.io/api/networking/v1" 10 | apierrors "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | ) 15 | 16 | func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1alpha1.VirtualClusterPolicy) error { 17 | log := ctrl.LoggerFrom(ctx) 18 | log.Info("reconciling NetworkPolicy") 19 | 20 | var cidrList []string 21 | 22 | if c.ClusterCIDR != "" { 23 | cidrList = []string{c.ClusterCIDR} 24 | } else { 25 | var nodeList v1.NodeList 26 | if err := c.Client.List(ctx, &nodeList); err != nil { 27 | return err 28 | } 29 | 30 | for _, node := range nodeList.Items { 31 | if len(node.Spec.PodCIDRs) > 0 { 32 | cidrList = append(cidrList, node.Spec.PodCIDRs...) 33 | } else { 34 | cidrList = append(cidrList, node.Spec.PodCIDR) 35 | } 36 | } 37 | } 38 | 39 | networkPolicy := networkPolicy(namespace, policy, cidrList) 40 | 41 | if err := ctrl.SetControllerReference(policy, networkPolicy, c.Scheme); err != nil { 42 | return err 43 | } 44 | 45 | // if disabled then delete the existing network policy 46 | if policy.Spec.DisableNetworkPolicy { 47 | err := c.Client.Delete(ctx, networkPolicy) 48 | return client.IgnoreNotFound(err) 49 | } 50 | 51 | // otherwise try to create/update 52 | err := c.Client.Create(ctx, networkPolicy) 53 | if apierrors.IsAlreadyExists(err) { 54 | return c.Client.Update(ctx, networkPolicy) 55 | } 56 | 57 | return err 58 | } 59 | 60 | func networkPolicy(namespaceName string, policy *v1alpha1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy { 61 | return &networkingv1.NetworkPolicy{ 62 | TypeMeta: metav1.TypeMeta{ 63 | Kind: "NetworkPolicy", 64 | APIVersion: "networking.k8s.io/v1", 65 | }, 66 | ObjectMeta: metav1.ObjectMeta{ 67 | Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name), 68 | Namespace: namespaceName, 69 | Labels: map[string]string{ 70 | ManagedByLabelKey: VirtualPolicyControllerName, 71 | PolicyNameLabelKey: policy.Name, 72 | }, 73 | }, 74 | Spec: networkingv1.NetworkPolicySpec{ 75 | PolicyTypes: []networkingv1.PolicyType{ 76 | networkingv1.PolicyTypeIngress, 77 | networkingv1.PolicyTypeEgress, 78 | }, 79 | Ingress: []networkingv1.NetworkPolicyIngressRule{ 80 | {}, 81 | }, 82 | Egress: []networkingv1.NetworkPolicyEgressRule{ 83 | { 84 | To: []networkingv1.NetworkPolicyPeer{ 85 | { 86 | IPBlock: &networkingv1.IPBlock{ 87 | CIDR: "0.0.0.0/0", 88 | Except: cidrList, 89 | }, 90 | }, 91 | { 92 | NamespaceSelector: &metav1.LabelSelector{ 93 | MatchLabels: map[string]string{ 94 | "kubernetes.io/metadata.name": namespaceName, 95 | }, 96 | }, 97 | }, 98 | { 99 | NamespaceSelector: &metav1.LabelSelector{ 100 | MatchLabels: map[string]string{ 101 | "kubernetes.io/metadata.name": metav1.NamespaceSystem, 102 | }, 103 | }, 104 | PodSelector: &metav1.LabelSelector{ 105 | MatchLabels: map[string]string{ 106 | "k8s-app": "kube-dns", 107 | }, 108 | }, 109 | }, 110 | }, 111 | }, 112 | }, 113 | }, 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /pkg/controller/policy/policy_suite_test.go: -------------------------------------------------------------------------------- 1 | package policy_test 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/go-logr/zapr" 9 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 10 | "github.com/rancher/k3k/pkg/controller/policy" 11 | 12 | "go.uber.org/zap" 13 | appsv1 "k8s.io/api/apps/v1" 14 | corev1 "k8s.io/api/core/v1" 15 | networkingv1 "k8s.io/api/networking/v1" 16 | "k8s.io/apimachinery/pkg/runtime" 17 | ctrl "sigs.k8s.io/controller-runtime" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | "sigs.k8s.io/controller-runtime/pkg/envtest" 20 | 21 | . "github.com/onsi/ginkgo/v2" 22 | . "github.com/onsi/gomega" 23 | ) 24 | 25 | func TestController(t *testing.T) { 26 | RegisterFailHandler(Fail) 27 | RunSpecs(t, "VirtualClusterPolicy Controller Suite") 28 | } 29 | 30 | var ( 31 | testEnv *envtest.Environment 32 | k8sClient client.Client 33 | ctx context.Context 34 | cancel context.CancelFunc 35 | ) 36 | 37 | var _ = BeforeSuite(func() { 38 | 39 | By("bootstrapping test environment") 40 | testEnv = &envtest.Environment{ 41 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "crds")}, 42 | ErrorIfCRDPathMissing: true, 43 | } 44 | cfg, err := testEnv.Start() 45 | Expect(err).NotTo(HaveOccurred()) 46 | 47 | scheme := buildScheme() 48 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) 49 | Expect(err).NotTo(HaveOccurred()) 50 | 51 | mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme}) 52 | Expect(err).NotTo(HaveOccurred()) 53 | 54 | ctrl.SetLogger(zapr.NewLogger(zap.NewNop())) 55 | 56 | ctx, cancel = context.WithCancel(context.Background()) 57 | err = policy.Add(mgr, "") 58 | Expect(err).NotTo(HaveOccurred()) 59 | 60 | go func() { 61 | defer GinkgoRecover() 62 | err = mgr.Start(ctx) 63 | Expect(err).NotTo(HaveOccurred(), "failed to run manager") 64 | }() 65 | }) 66 | 67 | var _ = AfterSuite(func() { 68 | cancel() 69 | 70 | By("tearing down the test environment") 71 | err := testEnv.Stop() 72 | Expect(err).NotTo(HaveOccurred()) 73 | }) 74 | 75 | func buildScheme() *runtime.Scheme { 76 | scheme := runtime.NewScheme() 77 | 78 | err := corev1.AddToScheme(scheme) 79 | Expect(err).NotTo(HaveOccurred()) 80 | err = appsv1.AddToScheme(scheme) 81 | Expect(err).NotTo(HaveOccurred()) 82 | err = networkingv1.AddToScheme(scheme) 83 | Expect(err).NotTo(HaveOccurred()) 84 | err = v1alpha1.AddToScheme(scheme) 85 | Expect(err).NotTo(HaveOccurred()) 86 | 87 | return scheme 88 | } 89 | -------------------------------------------------------------------------------- /pkg/log/zap.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/virtual-kubelet/virtual-kubelet/log" 7 | "go.uber.org/zap" 8 | "go.uber.org/zap/zapcore" 9 | ctrlruntimezap "sigs.k8s.io/controller-runtime/pkg/log/zap" 10 | ) 11 | 12 | type Logger struct { 13 | *zap.SugaredLogger 14 | } 15 | 16 | func New(debug bool) *Logger { 17 | return &Logger{newZappLogger(debug).Sugar()} 18 | } 19 | 20 | func (l *Logger) WithError(err error) log.Logger { 21 | return l 22 | } 23 | 24 | func (l *Logger) WithField(string, interface{}) log.Logger { 25 | return l 26 | } 27 | 28 | func (l *Logger) WithFields(field log.Fields) log.Logger { 29 | return l 30 | } 31 | 32 | func (l *Logger) Named(name string) *Logger { 33 | l.SugaredLogger = l.SugaredLogger.Named(name) 34 | return l 35 | } 36 | 37 | func newZappLogger(debug bool) *zap.Logger { 38 | encCfg := zap.NewProductionEncoderConfig() 39 | encCfg.TimeKey = "timestamp" 40 | encCfg.EncodeTime = zapcore.ISO8601TimeEncoder 41 | 42 | lvl := zap.NewAtomicLevelAt(zap.InfoLevel) 43 | if debug { 44 | lvl = zap.NewAtomicLevelAt(zap.DebugLevel) 45 | } 46 | 47 | encoder := zapcore.NewJSONEncoder(encCfg) 48 | core := zapcore.NewCore(&ctrlruntimezap.KubeAwareEncoder{Encoder: encoder}, zapcore.AddSync(os.Stderr), lvl) 49 | 50 | return zap.New(core) 51 | } 52 | -------------------------------------------------------------------------------- /scripts/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eou pipefail 4 | 5 | LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${VERSION}\"" 6 | 7 | echo "Building k3k... [cli os/arch: $(go env GOOS)/$(go env GOARCH)]" 8 | echo "Current TAG: ${VERSION} " 9 | 10 | export CGO_ENABLED=0 11 | GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k 12 | GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" -o bin/k3k-kubelet ./k3k-kubelet 13 | 14 | # build the cli for the local OS and ARCH 15 | go build -ldflags="${LDFLAGS}" -o bin/k3kcli ./cli 16 | -------------------------------------------------------------------------------- /scripts/generate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eou pipefail 4 | 5 | 6 | CONTROLLER_TOOLS_VERSION=v0.14.0 7 | 8 | # This will return non-zero until all of our objects in ./pkg/apis can generate valid crds. 9 | # allowDangerousTypes is needed for struct that use floats 10 | go run sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_TOOLS_VERSION} \ 11 | crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \ 12 | object paths=./pkg/apis/... \ 13 | output:crd:dir=./charts/k3k/crds 14 | -------------------------------------------------------------------------------- /tests/cluster_network_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | . "github.com/onsi/gomega" 6 | ) 7 | 8 | var _ = When("two virtual clusters are installed", Label("e2e"), func() { 9 | 10 | var ( 11 | cluster1 *VirtualCluster 12 | cluster2 *VirtualCluster 13 | ) 14 | 15 | BeforeEach(func() { 16 | clusters := NewVirtualClusters(2) 17 | cluster1 = clusters[0] 18 | cluster2 = clusters[1] 19 | }) 20 | 21 | AfterEach(func() { 22 | DeleteNamespaces(cluster1.Cluster.Namespace, cluster2.Cluster.Namespace) 23 | }) 24 | 25 | It("can create pods in each of them that are isolated", func() { 26 | 27 | pod1Cluster1, pod1Cluster1IP := cluster1.NewNginxPod("") 28 | pod2Cluster1, pod2Cluster1IP := cluster1.NewNginxPod("") 29 | pod1Cluster2, pod1Cluster2IP := cluster2.NewNginxPod("") 30 | 31 | var ( 32 | stdout string 33 | stderr string 34 | curlCmd string 35 | err error 36 | ) 37 | 38 | By("Checking that Pods can reach themselves") 39 | 40 | curlCmd = "curl --no-progress-meter " + pod1Cluster1IP 41 | stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd) 42 | Expect(err).To(Not(HaveOccurred())) 43 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 44 | 45 | curlCmd = "curl --no-progress-meter " + pod2Cluster1IP 46 | stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd) 47 | Expect(err).To(Not(HaveOccurred())) 48 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 49 | 50 | curlCmd = "curl --no-progress-meter " + pod1Cluster2IP 51 | stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd) 52 | Expect(err).To(Not(HaveOccurred())) 53 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 54 | 55 | // Pods in the same Virtual Cluster should be able to reach each other 56 | // Pod1 should be able to call Pod2, and viceversa 57 | 58 | By("Checking that Pods in the same virtual clusters can reach each other") 59 | 60 | curlCmd = "curl --no-progress-meter " + pod2Cluster1IP 61 | stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd) 62 | Expect(err).To(Not(HaveOccurred())) 63 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 64 | 65 | curlCmd = "curl --no-progress-meter " + pod1Cluster1IP 66 | stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd) 67 | Expect(err).To(Not(HaveOccurred())) 68 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 69 | 70 | By("Checking that Pods in the different virtual clusters cannot reach each other") 71 | 72 | // Pods in Cluster 1 should not be able to reach the Pod in Cluster 2 73 | 74 | curlCmd = "curl --no-progress-meter " + pod1Cluster2IP 75 | _, stderr, err = cluster1.ExecCmd(pod1Cluster1, curlCmd) 76 | Expect(err).Should(HaveOccurred()) 77 | Expect(stderr).To(ContainSubstring("Failed to connect")) 78 | 79 | curlCmd = "curl --no-progress-meter " + pod1Cluster2IP 80 | _, stderr, err = cluster1.ExecCmd(pod2Cluster1, curlCmd) 81 | Expect(err).To(HaveOccurred()) 82 | Expect(stderr).To(ContainSubstring("Failed to connect")) 83 | 84 | // Pod in Cluster 2 should not be able to reach Pods in Cluster 1 85 | 86 | curlCmd = "curl --no-progress-meter " + pod1Cluster1IP 87 | _, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd) 88 | Expect(err).To(HaveOccurred()) 89 | Expect(stderr).To(ContainSubstring("Failed to connect")) 90 | 91 | curlCmd = "curl --no-progress-meter " + pod2Cluster1IP 92 | _, stderr, err = cluster2.ExecCmd(pod1Cluster2, curlCmd) 93 | Expect(err).To(HaveOccurred()) 94 | Expect(stderr).To(ContainSubstring("Failed to connect")) 95 | }) 96 | }) 97 | -------------------------------------------------------------------------------- /tests/cluster_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | "context" 5 | "crypto/x509" 6 | "errors" 7 | "fmt" 8 | "time" 9 | 10 | . "github.com/onsi/ginkgo/v2" 11 | . "github.com/onsi/gomega" 12 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 13 | corev1 "k8s.io/api/core/v1" 14 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | ) 16 | 17 | var _ = When("k3k is installed", Label("e2e"), func() { 18 | It("is in Running status", func() { 19 | 20 | // check that the controller is running 21 | Eventually(func() bool { 22 | opts := v1.ListOptions{LabelSelector: "app.kubernetes.io/name=k3k"} 23 | podList, err := k8s.CoreV1().Pods("k3k-system").List(context.Background(), opts) 24 | 25 | Expect(err).To(Not(HaveOccurred())) 26 | Expect(podList.Items).To(Not(BeEmpty())) 27 | 28 | var isRunning bool 29 | for _, pod := range podList.Items { 30 | if pod.Status.Phase == corev1.PodRunning { 31 | isRunning = true 32 | break 33 | } 34 | } 35 | 36 | return isRunning 37 | }). 38 | WithTimeout(time.Second * 10). 39 | WithPolling(time.Second). 40 | Should(BeTrue()) 41 | }) 42 | }) 43 | 44 | var _ = When("a ephemeral cluster is installed", Label("e2e"), func() { 45 | 46 | var virtualCluster *VirtualCluster 47 | 48 | BeforeEach(func() { 49 | virtualCluster = NewVirtualCluster() 50 | }) 51 | 52 | AfterEach(func() { 53 | DeleteNamespaces(virtualCluster.Cluster.Namespace) 54 | }) 55 | 56 | It("can create a nginx pod", func() { 57 | _, _ = virtualCluster.NewNginxPod("") 58 | }) 59 | 60 | It("regenerates the bootstrap secret after a restart", func() { 61 | ctx := context.Background() 62 | 63 | _, err := virtualCluster.Client.DiscoveryClient.ServerVersion() 64 | Expect(err).To(Not(HaveOccurred())) 65 | 66 | labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server" 67 | serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector}) 68 | Expect(err).To(Not(HaveOccurred())) 69 | 70 | Expect(len(serverPods.Items)).To(Equal(1)) 71 | serverPod := serverPods.Items[0] 72 | 73 | fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name) 74 | err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{}) 75 | Expect(err).To(Not(HaveOccurred())) 76 | 77 | By("Deleting server pod") 78 | 79 | // check that the server pods restarted 80 | Eventually(func() any { 81 | serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector}) 82 | Expect(err).To(Not(HaveOccurred())) 83 | Expect(len(serverPods.Items)).To(Equal(1)) 84 | return serverPods.Items[0].DeletionTimestamp 85 | }). 86 | WithTimeout(time.Minute). 87 | WithPolling(time.Second * 5). 88 | Should(BeNil()) 89 | 90 | By("Server pod up and running again") 91 | 92 | By("Using old k8s client configuration should fail") 93 | 94 | Eventually(func() bool { 95 | _, err = virtualCluster.Client.DiscoveryClient.ServerVersion() 96 | var unknownAuthorityErr x509.UnknownAuthorityError 97 | return errors.As(err, &unknownAuthorityErr) 98 | }). 99 | WithTimeout(time.Minute * 2). 100 | WithPolling(time.Second * 5). 101 | Should(BeTrue()) 102 | 103 | By("Recover new config should succeed") 104 | 105 | Eventually(func() error { 106 | virtualCluster.Client, virtualCluster.RestConfig = NewVirtualK8sClientAndConfig(virtualCluster.Cluster) 107 | _, err = virtualCluster.Client.DiscoveryClient.ServerVersion() 108 | return err 109 | }). 110 | WithTimeout(time.Minute * 2). 111 | WithPolling(time.Second * 5). 112 | Should(BeNil()) 113 | }) 114 | }) 115 | 116 | var _ = When("a dynamic cluster is installed", func() { 117 | 118 | var virtualCluster *VirtualCluster 119 | 120 | BeforeEach(func() { 121 | namespace := NewNamespace() 122 | cluster := NewCluster(namespace.Name) 123 | cluster.Spec.Persistence.Type = v1alpha1.DynamicPersistenceMode 124 | CreateCluster(cluster) 125 | client, restConfig := NewVirtualK8sClientAndConfig(cluster) 126 | 127 | virtualCluster = &VirtualCluster{ 128 | Cluster: cluster, 129 | RestConfig: restConfig, 130 | Client: client, 131 | } 132 | }) 133 | 134 | AfterEach(func() { 135 | DeleteNamespaces(virtualCluster.Cluster.Namespace) 136 | }) 137 | 138 | It("can create a nginx pod", func() { 139 | _, _ = virtualCluster.NewNginxPod("") 140 | }) 141 | 142 | It("use the same bootstrap secret after a restart", func() { 143 | ctx := context.Background() 144 | 145 | _, err := virtualCluster.Client.DiscoveryClient.ServerVersion() 146 | Expect(err).To(Not(HaveOccurred())) 147 | 148 | labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server" 149 | serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector}) 150 | Expect(err).To(Not(HaveOccurred())) 151 | 152 | Expect(len(serverPods.Items)).To(Equal(1)) 153 | serverPod := serverPods.Items[0] 154 | 155 | fmt.Fprintf(GinkgoWriter, "deleting pod %s/%s\n", serverPod.Namespace, serverPod.Name) 156 | err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).Delete(ctx, serverPod.Name, v1.DeleteOptions{}) 157 | Expect(err).To(Not(HaveOccurred())) 158 | 159 | By("Deleting server pod") 160 | 161 | // check that the server pods restarted 162 | Eventually(func() any { 163 | serverPods, err = k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector}) 164 | Expect(err).To(Not(HaveOccurred())) 165 | Expect(len(serverPods.Items)).To(Equal(1)) 166 | return serverPods.Items[0].DeletionTimestamp 167 | }). 168 | WithTimeout(60 * time.Second). 169 | WithPolling(time.Second * 5). 170 | Should(BeNil()) 171 | 172 | By("Server pod up and running again") 173 | 174 | By("Using old k8s client configuration should succeed") 175 | 176 | Eventually(func() error { 177 | _, err = virtualCluster.Client.DiscoveryClient.ServerVersion() 178 | return err 179 | }). 180 | WithTimeout(2 * time.Minute). 181 | WithPolling(time.Second * 5). 182 | Should(BeNil()) 183 | }) 184 | }) 185 | -------------------------------------------------------------------------------- /tests/k8s_restclientgetter_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/api/meta" 5 | "k8s.io/client-go/discovery" 6 | memory "k8s.io/client-go/discovery/cached" 7 | "k8s.io/client-go/rest" 8 | "k8s.io/client-go/restmapper" 9 | "k8s.io/client-go/tools/clientcmd" 10 | ) 11 | 12 | type RESTClientGetter struct { 13 | clientconfig clientcmd.ClientConfig 14 | restConfig *rest.Config 15 | discoveryClient discovery.CachedDiscoveryInterface 16 | } 17 | 18 | func NewRESTClientGetter(kubeconfig []byte) (*RESTClientGetter, error) { 19 | clientconfig, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig)) 20 | if err != nil { 21 | return nil, err 22 | } 23 | 24 | restConfig, err := clientconfig.ClientConfig() 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | dc, err := discovery.NewDiscoveryClientForConfig(restConfig) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | return &RESTClientGetter{ 35 | clientconfig: clientconfig, 36 | restConfig: restConfig, 37 | discoveryClient: memory.NewMemCacheClient(dc), 38 | }, nil 39 | } 40 | 41 | func (r *RESTClientGetter) ToRESTConfig() (*rest.Config, error) { 42 | return r.restConfig, nil 43 | } 44 | 45 | func (r *RESTClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { 46 | return r.discoveryClient, nil 47 | } 48 | 49 | func (r *RESTClientGetter) ToRESTMapper() (meta.RESTMapper, error) { 50 | return restmapper.NewDeferredDiscoveryRESTMapper(r.discoveryClient), nil 51 | } 52 | 53 | func (r *RESTClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig { 54 | return r.clientconfig 55 | } 56 | -------------------------------------------------------------------------------- /tests/tests_suite_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "maps" 8 | "os" 9 | "path" 10 | "testing" 11 | "time" 12 | 13 | "github.com/go-logr/zapr" 14 | . "github.com/onsi/ginkgo/v2" 15 | . "github.com/onsi/gomega" 16 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1alpha1" 17 | "github.com/testcontainers/testcontainers-go" 18 | "github.com/testcontainers/testcontainers-go/modules/k3s" 19 | "go.uber.org/zap" 20 | "helm.sh/helm/v3/pkg/action" 21 | "helm.sh/helm/v3/pkg/chart/loader" 22 | corev1 "k8s.io/api/core/v1" 23 | v1 "k8s.io/api/core/v1" 24 | "k8s.io/apimachinery/pkg/runtime" 25 | "k8s.io/client-go/kubernetes" 26 | "k8s.io/client-go/tools/clientcmd" 27 | "sigs.k8s.io/controller-runtime/pkg/client" 28 | "sigs.k8s.io/controller-runtime/pkg/log" 29 | ) 30 | 31 | func TestTests(t *testing.T) { 32 | RegisterFailHandler(Fail) 33 | RunSpecs(t, "Tests Suite") 34 | } 35 | 36 | var ( 37 | k3sContainer *k3s.K3sContainer 38 | hostIP string 39 | k8s *kubernetes.Clientset 40 | k8sClient client.Client 41 | ) 42 | 43 | var _ = BeforeSuite(func() { 44 | var err error 45 | ctx := context.Background() 46 | 47 | k3sContainer, err = k3s.Run(ctx, "rancher/k3s:v1.32.1-k3s1") 48 | Expect(err).To(Not(HaveOccurred())) 49 | 50 | hostIP, err = k3sContainer.ContainerIP(ctx) 51 | Expect(err).To(Not(HaveOccurred())) 52 | fmt.Fprintln(GinkgoWriter, "K3s containerIP: "+hostIP) 53 | 54 | kubeconfig, err := k3sContainer.GetKubeConfig(context.Background()) 55 | Expect(err).To(Not(HaveOccurred())) 56 | 57 | initKubernetesClient(kubeconfig) 58 | installK3kChart(kubeconfig) 59 | }) 60 | 61 | func initKubernetesClient(kubeconfig []byte) { 62 | restcfg, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig) 63 | Expect(err).To(Not(HaveOccurred())) 64 | 65 | k8s, err = kubernetes.NewForConfig(restcfg) 66 | Expect(err).To(Not(HaveOccurred())) 67 | 68 | scheme := buildScheme() 69 | k8sClient, err = client.New(restcfg, client.Options{Scheme: scheme}) 70 | Expect(err).NotTo(HaveOccurred()) 71 | 72 | logger, err := zap.NewDevelopment() 73 | Expect(err).NotTo(HaveOccurred()) 74 | log.SetLogger(zapr.NewLogger(logger)) 75 | } 76 | 77 | func installK3kChart(kubeconfig []byte) { 78 | pwd, err := os.Getwd() 79 | Expect(err).To(Not(HaveOccurred())) 80 | 81 | k3kChart, err := loader.Load(path.Join(pwd, "../charts/k3k")) 82 | Expect(err).To(Not(HaveOccurred())) 83 | 84 | actionConfig := new(action.Configuration) 85 | 86 | restClientGetter, err := NewRESTClientGetter(kubeconfig) 87 | Expect(err).To(Not(HaveOccurred())) 88 | 89 | releaseName := "k3k" 90 | releaseNamespace := "k3k-system" 91 | 92 | err = actionConfig.Init(restClientGetter, releaseNamespace, os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) { 93 | fmt.Fprintf(GinkgoWriter, "helm debug: "+format+"\n", v...) 94 | }) 95 | Expect(err).To(Not(HaveOccurred())) 96 | 97 | iCli := action.NewInstall(actionConfig) 98 | iCli.ReleaseName = releaseName 99 | iCli.Namespace = releaseNamespace 100 | iCli.CreateNamespace = true 101 | iCli.Timeout = time.Minute 102 | iCli.Wait = true 103 | 104 | imageMap, _ := k3kChart.Values["image"].(map[string]any) 105 | maps.Copy(imageMap, map[string]any{ 106 | "repository": "rancher/k3k", 107 | "tag": "dev", 108 | "pullPolicy": "IfNotPresent", 109 | }) 110 | 111 | sharedAgentMap, _ := k3kChart.Values["sharedAgent"].(map[string]any) 112 | sharedAgentImageMap, _ := sharedAgentMap["image"].(map[string]any) 113 | maps.Copy(sharedAgentImageMap, map[string]any{ 114 | "repository": "rancher/k3k-kubelet", 115 | "tag": "dev", 116 | }) 117 | 118 | err = k3sContainer.LoadImages(context.Background(), "rancher/k3k:dev", "rancher/k3k-kubelet:dev") 119 | Expect(err).To(Not(HaveOccurred())) 120 | 121 | release, err := iCli.Run(k3kChart, k3kChart.Values) 122 | Expect(err).To(Not(HaveOccurred())) 123 | 124 | fmt.Fprintf(GinkgoWriter, "Release %s installed in %s namespace\n", release.Name, release.Namespace) 125 | } 126 | 127 | var _ = AfterSuite(func() { 128 | // dump k3s logs 129 | readCloser, err := k3sContainer.Logs(context.Background()) 130 | Expect(err).To(Not(HaveOccurred())) 131 | 132 | logs, err := io.ReadAll(readCloser) 133 | Expect(err).To(Not(HaveOccurred())) 134 | 135 | logfile := path.Join(os.TempDir(), "k3s.log") 136 | err = os.WriteFile(logfile, logs, 0644) 137 | Expect(err).To(Not(HaveOccurred())) 138 | 139 | fmt.Fprintln(GinkgoWriter, "k3s logs written to: "+logfile) 140 | 141 | // dump k3k controller logs 142 | readCloser, err = k3sContainer.Logs(context.Background()) 143 | Expect(err).To(Not(HaveOccurred())) 144 | writeLogs("k3s.log", readCloser) 145 | 146 | // dump k3k logs 147 | writeK3kLogs() 148 | 149 | testcontainers.CleanupContainer(GinkgoTB(), k3sContainer) 150 | }) 151 | 152 | func buildScheme() *runtime.Scheme { 153 | scheme := runtime.NewScheme() 154 | 155 | err := corev1.AddToScheme(scheme) 156 | Expect(err).NotTo(HaveOccurred()) 157 | err = v1alpha1.AddToScheme(scheme) 158 | Expect(err).NotTo(HaveOccurred()) 159 | 160 | return scheme 161 | } 162 | 163 | func writeK3kLogs() { 164 | var ( 165 | err error 166 | podList v1.PodList 167 | ) 168 | 169 | ctx := context.Background() 170 | err = k8sClient.List(ctx, &podList, &client.ListOptions{Namespace: "k3k-system"}) 171 | Expect(err).To(Not(HaveOccurred())) 172 | 173 | k3kPod := podList.Items[0] 174 | req := k8s.CoreV1().Pods(k3kPod.Namespace).GetLogs(k3kPod.Name, &corev1.PodLogOptions{}) 175 | podLogs, err := req.Stream(ctx) 176 | Expect(err).To(Not(HaveOccurred())) 177 | writeLogs("k3k.log", podLogs) 178 | } 179 | 180 | func writeLogs(filename string, logs io.ReadCloser) { 181 | defer logs.Close() 182 | 183 | logsStr, err := io.ReadAll(logs) 184 | Expect(err).To(Not(HaveOccurred())) 185 | 186 | tempfile := path.Join(os.TempDir(), filename) 187 | err = os.WriteFile(tempfile, []byte(logsStr), 0644) 188 | Expect(err).To(Not(HaveOccurred())) 189 | 190 | fmt.Fprintln(GinkgoWriter, "logs written to: "+filename) 191 | } 192 | --------------------------------------------------------------------------------