├── pkg ├── apis │ └── k3k.io │ │ ├── register.go │ │ └── v1beta1 │ │ ├── doc.go │ │ └── register.go ├── buildinfo │ └── buildinfo.go ├── controller │ ├── cluster │ │ ├── server │ │ │ ├── template.go │ │ │ ├── ingress.go │ │ │ └── config.go │ │ ├── agent │ │ │ ├── virtual_test.go │ │ │ ├── agent.go │ │ │ └── shared_test.go │ │ ├── filter.go │ │ ├── client.go │ │ ├── pod.go │ │ ├── cluster_suite_test.go │ │ ├── service.go │ │ ├── token.go │ │ ├── status.go │ │ └── cluster_finalize.go │ ├── controller_test.go │ ├── certs │ │ └── certs.go │ ├── controller.go │ └── policy │ │ ├── policy_suite_test.go │ │ ├── namespace.go │ │ └── networkpolicy.go └── log │ └── zap.go ├── .cr.yaml ├── docs ├── images │ └── architecture │ │ ├── shared-mode.png │ │ └── virtual-mode.png ├── crds │ └── config.yaml ├── cli │ ├── k3kcli.md │ ├── k3kcli_kubeconfig.md │ ├── k3kcli_policy_list.md │ ├── k3kcli_policy_delete.md │ ├── genclidoc.go │ ├── k3kcli_policy.md │ ├── k3kcli_cluster.md │ ├── k3kcli_cluster_list.md │ ├── k3kcli_cluster_delete.md │ ├── k3kcli_policy_create.md │ ├── k3kcli_kubeconfig_generate.md │ └── k3kcli_cluster_create.md ├── howtos │ ├── expose-workloads.md │ ├── airgap.md │ └── choose-mode.md └── development.md ├── charts └── k3k │ ├── Chart.yaml │ ├── templates │ ├── serviceaccount.yaml │ ├── service.yaml │ ├── rbac.yaml │ ├── deployment.yaml │ └── _helpers.tpl │ ├── .helmignore │ └── values.yaml ├── .gitignore ├── package ├── Dockerfile.k3k └── Dockerfile.k3k-kubelet ├── cli ├── main.go └── cmds │ ├── policy.go │ ├── cluster.go │ ├── policy_delete.go │ ├── policy_list.go │ ├── cluster_list.go │ ├── cluster_create_test.go │ ├── cluster_create_flags.go │ ├── table_printer.go │ ├── root.go │ ├── cluster_delete.go │ └── kubeconfig.go ├── tests ├── testdata │ └── customcerts │ │ ├── client-ca.key │ │ ├── server-ca.key │ │ ├── etcd │ │ ├── peer-ca.key │ │ ├── server-ca.key │ │ ├── peer-ca.pem │ │ ├── server-ca.pem │ │ └── peer-ca.crt │ │ ├── request-header-ca.key │ │ ├── client-ca.pem │ │ ├── server-ca.pem │ │ ├── request-header-ca.pem │ │ ├── service.key │ │ ├── root-ca.crt │ │ ├── root-ca.pem │ │ ├── intermediate-ca.pem │ │ ├── root-ca.key │ │ ├── intermediate-ca.key │ │ ├── intermediate-ca.crt │ │ ├── client-ca.crt │ │ └── server-ca.crt ├── k8s_restclientgetter_test.go ├── cluster_network_test.go ├── cluster_certs_test.go ├── cluster_status_test.go └── cluster_sync_test.go ├── examples ├── virtual-server.yaml ├── virtualclusterpolicy.yaml ├── shared-single-server.yaml └── shared-multiple-servers.yaml ├── k3k-kubelet ├── controller │ └── syncer │ │ ├── syncer.go │ │ └── persistentvolumeclaims_test.go ├── provider │ ├── node.go │ └── util.go ├── config.go └── README.md ├── .golangci.yml ├── scripts ├── generate └── build ├── .github ├── workflows │ ├── chart.yml │ ├── release-delete.yml │ ├── build.yml │ ├── release.yml │ ├── test.yaml │ └── test-conformance-virtual.yaml └── ISSUE_TEMPLATE │ └── bug_report.md ├── Makefile └── .goreleaser.yaml /pkg/apis/k3k.io/register.go: -------------------------------------------------------------------------------- 1 | package k3k 2 | 3 | var GroupName = "k3k.io" 4 | -------------------------------------------------------------------------------- /pkg/buildinfo/buildinfo.go: -------------------------------------------------------------------------------- 1 | package buildinfo 2 | 3 | var Version = "dev" 4 | -------------------------------------------------------------------------------- /pkg/apis/k3k.io/v1beta1/doc.go: -------------------------------------------------------------------------------- 1 | // +k8s:deepcopy-gen=package 2 | // +groupName=k3k.io 3 | package v1beta1 4 | -------------------------------------------------------------------------------- /.cr.yaml: -------------------------------------------------------------------------------- 1 | release-name-template: chart-{{ .Version }} 2 | make-release-latest: false 3 | skip-existing: true 4 | -------------------------------------------------------------------------------- /docs/images/architecture/shared-mode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/k3k/HEAD/docs/images/architecture/shared-mode.png -------------------------------------------------------------------------------- /docs/images/architecture/virtual-mode.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/k3k/HEAD/docs/images/architecture/virtual-mode.png -------------------------------------------------------------------------------- /charts/k3k/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: k3k 3 | description: A Helm chart for K3K 4 | type: application 5 | version: 1.0.1 6 | appVersion: v1.0.1 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.dapper 2 | /.cache 3 | /bin 4 | /dist 5 | *.swp 6 | .idea 7 | .vscode/ 8 | __debug* 9 | *-kubeconfig.yaml 10 | .envtest 11 | cover.out 12 | covcounters.** 13 | covmeta.** 14 | -------------------------------------------------------------------------------- /package/Dockerfile.k3k: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | ARG BIN_K3K=bin/k3k 4 | ARG BIN_K3KCLI=bin/k3kcli 5 | 6 | COPY ${BIN_K3K} /usr/bin/ 7 | COPY ${BIN_K3KCLI} /usr/bin/ 8 | 9 | CMD ["k3k"] 10 | -------------------------------------------------------------------------------- /cli/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/sirupsen/logrus" 5 | 6 | "github.com/rancher/k3k/cli/cmds" 7 | ) 8 | 9 | func main() { 10 | app := cmds.NewRootCmd() 11 | if err := app.Execute(); err != nil { 12 | logrus.Fatal(err) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/client-ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEIPcLNko4+/EQ5/23zlmItihyMP1E7TidAN0bIoPArlHToAoGCCqGSM49 3 | AwEHoUQDQgAE90bIIKEOMnIQ926Jf5rC7/I1GYjOpbFZ+6jVCuBpG06RIhlrlGFx 4 | /4Xmz5X+Wwm+JlZOwueW+Z5oNWTSN/pSvA== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/server-ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEIAQ2EsMn+Dy0Eh88MFflIeXd7Bde3z4Z1bm2KhiXu5p4oAoGCCqGSM49 3 | AwEHoUQDQgAEdG/lRafgvaXjr1L3zpvw+EmcN23jypiMODfge9QVU9ST6wPOMcj2 4 | cyOluWAKSNw30lVhCSvi+nS2Hn/Za1U4aA== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/etcd/peer-ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEIOUlrNSewqNM4x31I4DeuP5GksD7NUfMZ3Lui+eFCD33oAoGCCqGSM49 3 | AwEHoUQDQgAEH9/YWeczKMngMCXY3woc+fdJ0Pgbgkwm+ad8wGb2DXA7puDbfiIi 4 | Ez7z8qZDehxnckl5KdcWIK8hNcKBCRGILQ== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/etcd/server-ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEICaMBfRVf3YbfM9fgseSQ9gdYGEMmumos1Ddy3Vtz6l3oAoGCCqGSM49 3 | AwEHoUQDQgAEZnohi2w6JQEhOu8QOutTjc7yeFcH5Q2f2+G3UvuUZTI6FWD2V0Df 4 | j6+RsA8fgJMaFycZvTuioKaf3bcVqyZeZg== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /examples/virtual-server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k3k.io/v1beta1 2 | kind: Cluster 3 | metadata: 4 | name: virtual-server 5 | spec: 6 | mode: virtual 7 | servers: 3 8 | agents: 3 9 | version: v1.33.1-k3s1 10 | tlsSANs: 11 | - myserver.app 12 | expose: 13 | nodePort: {} 14 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/request-header-ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEIC9U7kptnqrP7Yhr2npCmv3iR+5G2wysuzs2E1HMtiMQoAoGCCqGSM49 3 | AwEHoUQDQgAEiTFCxaLbPfpF/tcRArXw55+xAgHXzl7Pq1g+ksFSCPYgmLNht3x9 4 | COF7QQoSZIEy5Xb7hKfUlpfgLAIWoPNaDA== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /examples/virtualclusterpolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k3k.io/v1beta1 2 | kind: VirtualClusterPolicy 3 | metadata: 4 | name: policy-example 5 | spec: 6 | allowedMode: shared 7 | disableNetworkPolicy: true 8 | # podSecurityAdmissionLevel: "baseline" 9 | # defaultPriorityClass: "lowpriority" 10 | -------------------------------------------------------------------------------- /package/Dockerfile.k3k-kubelet: -------------------------------------------------------------------------------- 1 | # TODO: swicth this to BCI-micro or scratch. Left as base right now so that debug can be done a bit easier 2 | FROM registry.suse.com/bci/bci-base:15.6 3 | 4 | ARG BIN_K3K_KUBELET=bin/k3k-kubelet 5 | 6 | COPY ${BIN_K3K_KUBELET} /usr/bin/ 7 | 8 | ENTRYPOINT ["/usr/bin/k3k-kubelet"] 9 | -------------------------------------------------------------------------------- /charts/k3k/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "k3k.serviceAccountName" . }} 6 | labels: 7 | {{- include "k3k.labels" . | nindent 4 }} 8 | namespace: {{ .Release.Namespace }} 9 | {{- end }} 10 | -------------------------------------------------------------------------------- /examples/shared-single-server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k3k.io/v1beta1 2 | kind: Cluster 3 | metadata: 4 | name: shared-single-server 5 | spec: 6 | mode: shared 7 | servers: 1 8 | version: v1.33.1-k3s1 9 | serverArgs: 10 | - "--write-kubeconfig-mode=777" 11 | tlsSANs: 12 | - myserver.app 13 | expose: 14 | nodePort: {} 15 | -------------------------------------------------------------------------------- /docs/crds/config.yaml: -------------------------------------------------------------------------------- 1 | processor: 2 | # RE2 regular expressions describing type fields that should be excluded from the generated documentation. 3 | ignoreFields: 4 | - "status$" 5 | - "TypeMeta$" 6 | 7 | render: 8 | # Version of Kubernetes to use when generating links to Kubernetes API documentation. 9 | kubernetesVersion: "1.31" 10 | -------------------------------------------------------------------------------- /examples/shared-multiple-servers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k3k.io/v1beta1 2 | kind: Cluster 3 | metadata: 4 | name: shared-multiple-servers 5 | spec: 6 | mode: shared 7 | servers: 3 8 | agents: 3 9 | version: v1.33.1-k3s1 10 | serverArgs: 11 | - "--write-kubeconfig-mode=777" 12 | tlsSANs: 13 | - myserver.app 14 | expose: 15 | nodePort: {} 16 | -------------------------------------------------------------------------------- /cli/cmds/policy.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | func NewPolicyCmd(appCtx *AppContext) *cobra.Command { 8 | cmd := &cobra.Command{ 9 | Use: "policy", 10 | Short: "policy command", 11 | } 12 | 13 | cmd.AddCommand( 14 | NewPolicyCreateCmd(appCtx), 15 | NewPolicyDeleteCmd(appCtx), 16 | NewPolicyListCmd(appCtx), 17 | ) 18 | 19 | return cmd 20 | } 21 | -------------------------------------------------------------------------------- /k3k-kubelet/controller/syncer/syncer.go: -------------------------------------------------------------------------------- 1 | package syncer 2 | 3 | import ( 4 | "sigs.k8s.io/controller-runtime/pkg/client" 5 | 6 | "github.com/rancher/k3k/k3k-kubelet/translate" 7 | ) 8 | 9 | type SyncerContext struct { 10 | ClusterName string 11 | ClusterNamespace string 12 | VirtualClient client.Client 13 | HostClient client.Client 14 | Translator translate.ToHostTranslator 15 | } 16 | -------------------------------------------------------------------------------- /charts/k3k/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: k3k-webhook 5 | labels: 6 | {{- include "k3k.labels" . | nindent 4 }} 7 | namespace: {{ .Release.Namespace }} 8 | spec: 9 | ports: 10 | - port: 443 11 | protocol: TCP 12 | name: https-webhook 13 | targetPort: 9443 14 | selector: 15 | {{- include "k3k.selectorLabels" . | nindent 6 }} 16 | -------------------------------------------------------------------------------- /cli/cmds/cluster.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | func NewClusterCmd(appCtx *AppContext) *cobra.Command { 8 | cmd := &cobra.Command{ 9 | Use: "cluster", 10 | Short: "cluster command", 11 | } 12 | 13 | cmd.AddCommand( 14 | NewClusterCreateCmd(appCtx), 15 | NewClusterDeleteCmd(appCtx), 16 | NewClusterListCmd(appCtx), 17 | ) 18 | 19 | return cmd 20 | } 21 | -------------------------------------------------------------------------------- /charts/k3k/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /docs/cli/k3kcli.md: -------------------------------------------------------------------------------- 1 | ## k3kcli 2 | 3 | CLI for K3K 4 | 5 | ### Options 6 | 7 | ``` 8 | --debug Turn on debug logs 9 | -h, --help help for k3kcli 10 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 11 | ``` 12 | 13 | ### SEE ALSO 14 | 15 | * [k3kcli cluster](k3kcli_cluster.md) - cluster command 16 | * [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters 17 | * [k3kcli policy](k3kcli_policy.md) - policy command 18 | 19 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_kubeconfig.md: -------------------------------------------------------------------------------- 1 | ## k3kcli kubeconfig 2 | 3 | Manage kubeconfig for clusters 4 | 5 | ### Options 6 | 7 | ``` 8 | -h, --help help for kubeconfig 9 | ``` 10 | 11 | ### Options inherited from parent commands 12 | 13 | ``` 14 | --debug Turn on debug logs 15 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 16 | ``` 17 | 18 | ### SEE ALSO 19 | 20 | * [k3kcli](k3kcli.md) - CLI for K3K 21 | * [k3kcli kubeconfig generate](k3kcli_kubeconfig_generate.md) - Generate kubeconfig for clusters 22 | 23 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | linters: 4 | enable: 5 | - misspell 6 | - wsl_v5 7 | 8 | formatters: 9 | enable: 10 | - gci 11 | - gofmt 12 | - gofumpt 13 | settings: 14 | gci: 15 | # The default order is `standard > default > custom > blank > dot > alias > localmodule`. 16 | custom-order: true 17 | sections: 18 | - standard 19 | - default 20 | - alias 21 | - localmodule 22 | - dot 23 | - blank 24 | gofmt: 25 | rewrite-rules: 26 | - pattern: 'interface{}' 27 | replacement: 'any' 28 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_policy_list.md: -------------------------------------------------------------------------------- 1 | ## k3kcli policy list 2 | 3 | List all the existing policies 4 | 5 | ``` 6 | k3kcli policy list [flags] 7 | ``` 8 | 9 | ### Examples 10 | 11 | ``` 12 | k3kcli policy list [command options] 13 | ``` 14 | 15 | ### Options 16 | 17 | ``` 18 | -h, --help help for list 19 | ``` 20 | 21 | ### Options inherited from parent commands 22 | 23 | ``` 24 | --debug Turn on debug logs 25 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 26 | ``` 27 | 28 | ### SEE ALSO 29 | 30 | * [k3kcli policy](k3kcli_policy.md) - policy command 31 | 32 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_policy_delete.md: -------------------------------------------------------------------------------- 1 | ## k3kcli policy delete 2 | 3 | Delete an existing policy 4 | 5 | ``` 6 | k3kcli policy delete [flags] 7 | ``` 8 | 9 | ### Examples 10 | 11 | ``` 12 | k3kcli policy delete [command options] NAME 13 | ``` 14 | 15 | ### Options 16 | 17 | ``` 18 | -h, --help help for delete 19 | ``` 20 | 21 | ### Options inherited from parent commands 22 | 23 | ``` 24 | --debug Turn on debug logs 25 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 26 | ``` 27 | 28 | ### SEE ALSO 29 | 30 | * [k3kcli policy](k3kcli_policy.md) - policy command 31 | 32 | -------------------------------------------------------------------------------- /docs/cli/genclidoc.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | 8 | "github.com/spf13/cobra/doc" 9 | 10 | "github.com/rancher/k3k/cli/cmds" 11 | ) 12 | 13 | func main() { 14 | // Instantiate the CLI application 15 | k3kcli := cmds.NewRootCmd() 16 | 17 | wd, err := os.Getwd() 18 | if err != nil { 19 | fmt.Println(err) 20 | os.Exit(1) 21 | } 22 | 23 | outputDir := path.Join(wd, "docs/cli") 24 | 25 | if err := doc.GenMarkdownTree(k3kcli, outputDir); err != nil { 26 | fmt.Println("Error generating documentation:", err) 27 | os.Exit(1) 28 | } 29 | 30 | fmt.Println("Documentation generated at " + outputDir) 31 | } 32 | -------------------------------------------------------------------------------- /k3k-kubelet/provider/node.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | // Node implements the node.Provider interface from Virtual Kubelet 10 | type Node struct { 11 | notifyCallback func(*corev1.Node) 12 | } 13 | 14 | // Ping is called to check if the node is healthy - in the current format it always is 15 | func (n *Node) Ping(context.Context) error { 16 | return nil 17 | } 18 | 19 | // NotifyNodeStatus sets the callback function for a node being changed. As of now, no changes are made 20 | func (n *Node) NotifyNodeStatus(ctx context.Context, cb func(*corev1.Node)) { 21 | n.notifyCallback = cb 22 | } 23 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_policy.md: -------------------------------------------------------------------------------- 1 | ## k3kcli policy 2 | 3 | policy command 4 | 5 | ### Options 6 | 7 | ``` 8 | -h, --help help for policy 9 | ``` 10 | 11 | ### Options inherited from parent commands 12 | 13 | ``` 14 | --debug Turn on debug logs 15 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 16 | ``` 17 | 18 | ### SEE ALSO 19 | 20 | * [k3kcli](k3kcli.md) - CLI for K3K 21 | * [k3kcli policy create](k3kcli_policy_create.md) - Create new policy 22 | * [k3kcli policy delete](k3kcli_policy_delete.md) - Delete an existing policy 23 | * [k3kcli policy list](k3kcli_policy_list.md) - List all the existing policies 24 | 25 | -------------------------------------------------------------------------------- /k3k-kubelet/provider/util.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "github.com/virtual-kubelet/virtual-kubelet/node/api" 5 | "k8s.io/client-go/tools/remotecommand" 6 | ) 7 | 8 | // translatorSizeQueue feeds the size events from the WebSocket 9 | // resizeChan into the SPDY client input. Implements TerminalSizeQueue 10 | // interface. 11 | type translatorSizeQueue struct { 12 | resizeChan <-chan api.TermSize 13 | } 14 | 15 | func (t *translatorSizeQueue) Next() *remotecommand.TerminalSize { 16 | size, ok := <-t.resizeChan 17 | if !ok { 18 | return nil 19 | } 20 | 21 | return &remotecommand.TerminalSize{ 22 | Width: size.Width, 23 | Height: size.Height, 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_cluster.md: -------------------------------------------------------------------------------- 1 | ## k3kcli cluster 2 | 3 | cluster command 4 | 5 | ### Options 6 | 7 | ``` 8 | -h, --help help for cluster 9 | ``` 10 | 11 | ### Options inherited from parent commands 12 | 13 | ``` 14 | --debug Turn on debug logs 15 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 16 | ``` 17 | 18 | ### SEE ALSO 19 | 20 | * [k3kcli](k3kcli.md) - CLI for K3K 21 | * [k3kcli cluster create](k3kcli_cluster_create.md) - Create new cluster 22 | * [k3kcli cluster delete](k3kcli_cluster_delete.md) - Delete an existing cluster 23 | * [k3kcli cluster list](k3kcli_cluster_list.md) - List all the existing cluster 24 | 25 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_cluster_list.md: -------------------------------------------------------------------------------- 1 | ## k3kcli cluster list 2 | 3 | List all the existing cluster 4 | 5 | ``` 6 | k3kcli cluster list [flags] 7 | ``` 8 | 9 | ### Examples 10 | 11 | ``` 12 | k3kcli cluster list [command options] 13 | ``` 14 | 15 | ### Options 16 | 17 | ``` 18 | -h, --help help for list 19 | -n, --namespace string namespace of the k3k cluster 20 | ``` 21 | 22 | ### Options inherited from parent commands 23 | 24 | ``` 25 | --debug Turn on debug logs 26 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 27 | ``` 28 | 29 | ### SEE ALSO 30 | 31 | * [k3kcli cluster](k3kcli_cluster.md) - cluster command 32 | 33 | -------------------------------------------------------------------------------- /pkg/controller/cluster/server/template.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | var singleServerTemplate string = ` 4 | if [ -d "{{.ETCD_DIR}}" ]; then 5 | # if directory exists then it means its not an initial run 6 | /bin/k3s server --cluster-reset --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log 7 | fi 8 | rm -f /var/lib/rancher/k3s/server/db/reset-flag 9 | /bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log` 10 | 11 | var HAServerTemplate string = ` 12 | if [ ${POD_NAME: -1} == 0 ] && [ ! -d "{{.ETCD_DIR}}" ]; then 13 | /bin/k3s server --config {{.INIT_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log 14 | else 15 | /bin/k3s server --config {{.SERVER_CONFIG}} {{.EXTRA_ARGS}} 2>&1 | tee /var/log/k3s.log 16 | fi` 17 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_cluster_delete.md: -------------------------------------------------------------------------------- 1 | ## k3kcli cluster delete 2 | 3 | Delete an existing cluster 4 | 5 | ``` 6 | k3kcli cluster delete [flags] 7 | ``` 8 | 9 | ### Examples 10 | 11 | ``` 12 | k3kcli cluster delete [command options] NAME 13 | ``` 14 | 15 | ### Options 16 | 17 | ``` 18 | -h, --help help for delete 19 | --keep-data keeps persistence volumes created for the cluster after deletion 20 | -n, --namespace string namespace of the k3k cluster 21 | ``` 22 | 23 | ### Options inherited from parent commands 24 | 25 | ``` 26 | --debug Turn on debug logs 27 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 28 | ``` 29 | 30 | ### SEE ALSO 31 | 32 | * [k3kcli cluster](k3kcli_cluster.md) - cluster command 33 | 34 | -------------------------------------------------------------------------------- /scripts/generate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eou pipefail 4 | 5 | 6 | CONTROLLER_TOOLS_VERSION=v0.16.0 7 | 8 | # This will return non-zero until all of our objects in ./pkg/apis can generate valid crds. 9 | # allowDangerousTypes is needed for struct that use floats 10 | go run sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_TOOLS_VERSION} \ 11 | crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=false \ 12 | object paths=./pkg/apis/... \ 13 | output:crd:dir=./charts/k3k/templates/crds 14 | 15 | # add the 'helm.sh/resource-policy: keep' annotation to the CRDs 16 | for f in ./charts/k3k/templates/crds/*.yaml; do 17 | sed -i '0,/^[[:space:]]*annotations:/s/^[[:space:]]*annotations:/&\n helm.sh\/resource-policy: keep/' "$f" 18 | echo "Validating $f" 19 | yq . "$f" > /dev/null 20 | done 21 | -------------------------------------------------------------------------------- /.github/workflows/chart.yml: -------------------------------------------------------------------------------- 1 | name: Chart 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | permissions: 7 | contents: write 8 | 9 | jobs: 10 | chart-release: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v4 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Configure Git 19 | run: | 20 | git config user.name "$GITHUB_ACTOR" 21 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 22 | 23 | - name: Install Helm 24 | uses: azure/setup-helm@v4 25 | env: 26 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 27 | 28 | - name: Run chart-releaser 29 | uses: helm/chart-releaser-action@v1.6.0 30 | with: 31 | config: .cr.yaml 32 | env: 33 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 34 | -------------------------------------------------------------------------------- /scripts/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eou pipefail 4 | 5 | LDFLAGS="-X \"github.com/rancher/k3k/pkg/buildinfo.Version=${VERSION}\"" 6 | 7 | build_args=() 8 | 9 | # Check if coverage is enabled, e.g., in CI or when manually set 10 | if [[ "${COVERAGE:-false}" == "true" ]]; then 11 | echo "Coverage build enabled." 12 | build_args+=("-cover" "-coverpkg=./..." "-covermode=atomic") 13 | fi 14 | 15 | echo "Building k3k... [cli os/arch: $(go env GOOS)/$(go env GOARCH)]" 16 | echo "Current TAG: ${VERSION} " 17 | 18 | export CGO_ENABLED=0 19 | GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k 20 | GOOS=linux GOARCH=amd64 go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3k-kubelet ./k3k-kubelet 21 | 22 | # build the cli for the local OS and ARCH 23 | go build -ldflags="${LDFLAGS}" "${build_args[@]}" -o bin/k3kcli ./cli 24 | -------------------------------------------------------------------------------- /pkg/apis/k3k.io/v1beta1/register.go: -------------------------------------------------------------------------------- 1 | package v1beta1 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime" 5 | "k8s.io/apimachinery/pkg/runtime/schema" 6 | 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | 9 | k3k "github.com/rancher/k3k/pkg/apis/k3k.io" 10 | ) 11 | 12 | var ( 13 | SchemeGroupVersion = schema.GroupVersion{Group: k3k.GroupName, Version: "v1beta1"} 14 | SchemBuilder = runtime.NewSchemeBuilder(addKnownTypes) 15 | AddToScheme = SchemBuilder.AddToScheme 16 | ) 17 | 18 | func Resource(resource string) schema.GroupResource { 19 | return SchemeGroupVersion.WithResource(resource).GroupResource() 20 | } 21 | 22 | func addKnownTypes(s *runtime.Scheme) error { 23 | s.AddKnownTypes(SchemeGroupVersion, 24 | &Cluster{}, 25 | &ClusterList{}, 26 | &VirtualClusterPolicy{}, 27 | &VirtualClusterPolicyList{}, 28 | ) 29 | metav1.AddToGroupVersion(s, SchemeGroupVersion) 30 | 31 | return nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/log/zap.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "os" 5 | 6 | "go.uber.org/zap" 7 | "go.uber.org/zap/zapcore" 8 | 9 | ctrlruntimezap "sigs.k8s.io/controller-runtime/pkg/log/zap" 10 | ) 11 | 12 | func New(debug bool, format string) *zap.Logger { 13 | lvl := zap.NewAtomicLevelAt(zap.InfoLevel) 14 | if debug { 15 | lvl = zap.NewAtomicLevelAt(zap.DebugLevel) 16 | } 17 | 18 | encoder := newEncoder(format) 19 | core := zapcore.NewCore(encoder, zapcore.AddSync(os.Stderr), lvl) 20 | 21 | return zap.New(core) 22 | } 23 | 24 | func newEncoder(format string) zapcore.Encoder { 25 | encCfg := zap.NewProductionEncoderConfig() 26 | encCfg.TimeKey = "timestamp" 27 | encCfg.EncodeTime = zapcore.ISO8601TimeEncoder 28 | 29 | var encoder zapcore.Encoder 30 | if format == "text" { 31 | encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder 32 | encoder = zapcore.NewConsoleEncoder(encCfg) 33 | } else { 34 | encoder = zapcore.NewJSONEncoder(encCfg) 35 | } 36 | 37 | return &ctrlruntimezap.KubeAwareEncoder{Encoder: encoder} 38 | } 39 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_policy_create.md: -------------------------------------------------------------------------------- 1 | ## k3kcli policy create 2 | 3 | Create new policy 4 | 5 | ``` 6 | k3kcli policy create [flags] 7 | ``` 8 | 9 | ### Examples 10 | 11 | ``` 12 | k3kcli policy create [command options] NAME 13 | ``` 14 | 15 | ### Options 16 | 17 | ``` 18 | --annotations stringArray Annotations to add to the policy object (e.g. key=value) 19 | -h, --help help for create 20 | --labels stringArray Labels to add to the policy object (e.g. key=value) 21 | --mode string The allowed mode type of the policy (default "shared") 22 | --namespace strings The namespaces where to bind the policy 23 | --overwrite Overwrite namespace binding of existing policy 24 | ``` 25 | 26 | ### Options inherited from parent commands 27 | 28 | ``` 29 | --debug Turn on debug logs 30 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 31 | ``` 32 | 33 | ### SEE ALSO 34 | 35 | * [k3kcli policy](k3kcli_policy.md) - policy command 36 | 37 | -------------------------------------------------------------------------------- /pkg/controller/cluster/agent/virtual_test.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "gopkg.in/yaml.v2" 8 | ) 9 | 10 | func Test_virtualAgentData(t *testing.T) { 11 | type args struct { 12 | serviceIP string 13 | token string 14 | } 15 | 16 | tests := []struct { 17 | name string 18 | args args 19 | expectedData map[string]string 20 | }{ 21 | { 22 | name: "simple config", 23 | args: args{ 24 | serviceIP: "10.0.0.21", 25 | token: "dnjklsdjnksd892389238", 26 | }, 27 | expectedData: map[string]string{ 28 | "server": "https://10.0.0.21", 29 | "token": "dnjklsdjnksd892389238", 30 | "with-node-id": "true", 31 | }, 32 | }, 33 | } 34 | 35 | for _, tt := range tests { 36 | t.Run(tt.name, func(t *testing.T) { 37 | config := virtualAgentData(tt.args.serviceIP, tt.args.token) 38 | 39 | data := make(map[string]string) 40 | err := yaml.Unmarshal([]byte(config), data) 41 | 42 | assert.NoError(t, err) 43 | assert.Equal(t, tt.expectedData, data) 44 | }) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/controller/cluster/filter.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/types" 5 | "sigs.k8s.io/controller-runtime/pkg/client" 6 | "sigs.k8s.io/controller-runtime/pkg/predicate" 7 | 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | 10 | "github.com/rancher/k3k/k3k-kubelet/translate" 11 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 12 | ) 13 | 14 | func newClusterPredicate() predicate.Predicate { 15 | return predicate.NewPredicateFuncs(func(object client.Object) bool { 16 | owner := metav1.GetControllerOf(object) 17 | 18 | return owner != nil && 19 | owner.Kind == "Cluster" && 20 | owner.APIVersion == v1beta1.SchemeGroupVersion.String() 21 | }) 22 | } 23 | 24 | func clusterNamespacedName(object client.Object) types.NamespacedName { 25 | var clusterName string 26 | 27 | owner := metav1.GetControllerOf(object) 28 | if owner != nil && owner.Kind == "Cluster" && owner.APIVersion == v1beta1.SchemeGroupVersion.String() { 29 | clusterName = owner.Name 30 | } else { 31 | clusterName = object.GetLabels()[translate.ClusterNameLabel] 32 | } 33 | 34 | return types.NamespacedName{ 35 | Name: clusterName, 36 | Namespace: object.GetNamespace(), 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /cli/cmds/policy_delete.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/sirupsen/logrus" 7 | "github.com/spf13/cobra" 8 | 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | 11 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 12 | ) 13 | 14 | func NewPolicyDeleteCmd(appCtx *AppContext) *cobra.Command { 15 | return &cobra.Command{ 16 | Use: "delete", 17 | Short: "Delete an existing policy", 18 | Example: "k3kcli policy delete [command options] NAME", 19 | RunE: policyDeleteAction(appCtx), 20 | Args: cobra.ExactArgs(1), 21 | } 22 | } 23 | 24 | func policyDeleteAction(appCtx *AppContext) func(cmd *cobra.Command, args []string) error { 25 | return func(cmd *cobra.Command, args []string) error { 26 | ctx := context.Background() 27 | client := appCtx.Client 28 | name := args[0] 29 | 30 | policy := &v1beta1.VirtualClusterPolicy{} 31 | policy.Name = name 32 | 33 | if err := client.Delete(ctx, policy); err != nil { 34 | if !apierrors.IsNotFound(err) { 35 | return err 36 | } 37 | 38 | logrus.Warnf("Policy '%s' not found", name) 39 | 40 | return nil 41 | } 42 | 43 | logrus.Infof("Policy '%s' deleted", name) 44 | 45 | return nil 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /k3k-kubelet/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | // config has all virtual-kubelet startup options 8 | type config struct { 9 | ClusterName string `mapstructure:"clusterName"` 10 | ClusterNamespace string `mapstructure:"clusterNamespace"` 11 | ServiceName string `mapstructure:"serviceName"` 12 | Token string `mapstructure:"token"` 13 | AgentHostname string `mapstructure:"agentHostname"` 14 | HostKubeconfig string `mapstructure:"hostKubeconfig"` 15 | VirtKubeconfig string `mapstructure:"virtKubeconfig"` 16 | KubeletPort int `mapstructure:"kubeletPort"` 17 | WebhookPort int `mapstructure:"webhookPort"` 18 | ServerIP string `mapstructure:"serverIP"` 19 | Version string `mapstructure:"version"` 20 | MirrorHostNodes bool `mapstructure:"mirrorHostNodes"` 21 | } 22 | 23 | func (c *config) validate() error { 24 | if c.ClusterName == "" { 25 | return errors.New("cluster name is not provided") 26 | } 27 | 28 | if c.ClusterNamespace == "" { 29 | return errors.New("cluster namespace is not provided") 30 | } 31 | 32 | if c.AgentHostname == "" { 33 | return errors.New("agent Hostname is not provided") 34 | } 35 | 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /pkg/controller/cluster/client.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "k8s.io/apimachinery/pkg/types" 8 | "k8s.io/client-go/tools/clientcmd" 9 | 10 | v1 "k8s.io/api/core/v1" 11 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 12 | 13 | "github.com/rancher/k3k/pkg/controller" 14 | ) 15 | 16 | // newVirtualClient creates a new Client that can be used to interact with the virtual cluster 17 | func newVirtualClient(ctx context.Context, hostClient ctrlruntimeclient.Client, clusterName, clusterNamespace string) (ctrlruntimeclient.Client, error) { 18 | var clusterKubeConfig v1.Secret 19 | 20 | kubeconfigSecretName := types.NamespacedName{ 21 | Name: controller.SafeConcatNameWithPrefix(clusterName, "kubeconfig"), 22 | Namespace: clusterNamespace, 23 | } 24 | 25 | if err := hostClient.Get(ctx, kubeconfigSecretName, &clusterKubeConfig); err != nil { 26 | return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err) 27 | } 28 | 29 | restConfig, err := clientcmd.RESTConfigFromKubeConfig(clusterKubeConfig.Data["kubeconfig.yaml"]) 30 | if err != nil { 31 | return nil, fmt.Errorf("failed to create config from kubeconfig file: %w", err) 32 | } 33 | 34 | return ctrlruntimeclient.New(restConfig, ctrlruntimeclient.Options{}) 35 | } 36 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_kubeconfig_generate.md: -------------------------------------------------------------------------------- 1 | ## k3kcli kubeconfig generate 2 | 3 | Generate kubeconfig for clusters 4 | 5 | ``` 6 | k3kcli kubeconfig generate [flags] 7 | ``` 8 | 9 | ### Options 10 | 11 | ``` 12 | --altNames strings altNames of the generated certificates for the kubeconfig 13 | --cn string Common name (CN) of the generated certificates for the kubeconfig (default "system:admin") 14 | --config-name string the name of the generated kubeconfig file 15 | --expiration-days int Expiration date of the certificates used for the kubeconfig (default 365) 16 | -h, --help help for generate 17 | --kubeconfig-server string override the kubeconfig server host 18 | --name string cluster name 19 | -n, --namespace string namespace of the k3k cluster 20 | --org strings Organization name (ORG) of the generated certificates for the kubeconfig 21 | ``` 22 | 23 | ### Options inherited from parent commands 24 | 25 | ``` 26 | --debug Turn on debug logs 27 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 28 | ``` 29 | 30 | ### SEE ALSO 31 | 32 | * [k3kcli kubeconfig](k3kcli_kubeconfig.md) - Manage kubeconfig for clusters 33 | 34 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/client-ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDZjCCAU6gAwIBAgIIXrx+TJOdkuMwDQYJKoZIhvcNAQELBQAwKTEnMCUGA1UE 3 | AwweazNzLWludGVybWVkaWF0ZS1jYUAxNzUxOTY1MTAwMB4XDTI1MDcwODA4NTgy 4 | M1oXDTM1MDgyNTA4NTgyM1owIzEhMB8GA1UEAwwYazNzLWNsaWVudC1jYUAxNzUx 5 | OTY1MTAwMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE90bIIKEOMnIQ926Jf5rC 6 | 7/I1GYjOpbFZ+6jVCuBpG06RIhlrlGFx/4Xmz5X+Wwm+JlZOwueW+Z5oNWTSN/pS 7 | vKNjMGEwHQYDVR0OBBYEFAYyQJEmofE3Socg5OBg/vBQwu44MB8GA1UdIwQYMBaA 8 | FJlWOM0MjJgK2OweAyu1eVn/fUi6MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ 9 | BAQDAgKkMA0GCSqGSIb3DQEBCwUAA4ICAQAxi9riQZ2/3RCrbXtuvHCXKWEJXNID 10 | z0Ubd1T61m6bXPrDUiRMwyB11pwVMQhSpOTuwVuP0tDcz95Gj4KOy4cWYBgN56hs 11 | wAVPAKwsfbinxXBcZ16B6qnTNnPRhDbIqFHuezuzs40+RmR9wYAoyv34Cuw11oCe 12 | h55w+0pnB9zGgpYGGm7uIwq5DVBOcMC36IGj/un5w+jniguGgxrgx2nxcZqab5aB 13 | TVnrqA7vLpZPFrJ0m4QWDez+D9fYi53qI7JBZu7ixyN5Bo8mRxHjRvVMrZCotrzB 14 | eBEbJNFEC5B49/R5GFLWvpyBpvAwE3ArxI/pvt2p1aoxwMl7hKcyDgWSRZQfpqZ/ 15 | Kk+9BUE2lka4xIx3WMXFsf8Ts/7h7bH91CAGmILHc2iDtF/whBNJuAT80MDQ2gfV 16 | 1482KHV6nRsX/l73bJRmqUvoM2DEZDWgdHvNNKi3+SLkMsuAIwdNE7RkuKYLxiC1 17 | go39t5qLfwahGzOkkj7703MsbFObsB9zwY9bnYLjhei0uPk6I5m6sMJiN5Uit++Q 18 | lgAFjbBqN7qdPHY4z5cXazqDEcZdOGIxYJ/ZyA/XcKrXcO6/uO2uZH0QLoxK0yI4 19 | uSyJfyHIEmmcYKbwXhruwkvToY7O1DMGO2RuBY/JiCEsseR5LNyZsgVYgOuVkQTX 20 | 0nfWkI7eQYRUzQ== 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/server-ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDZjCCAU6gAwIBAgIIXrx+TJOdkuQwDQYJKoZIhvcNAQELBQAwKTEnMCUGA1UE 3 | AwweazNzLWludGVybWVkaWF0ZS1jYUAxNzUxOTY1MTAwMB4XDTI1MDcwODA4NTgy 4 | NFoXDTM1MDgyNTA4NTgyNFowIzEhMB8GA1UEAwwYazNzLXNlcnZlci1jYUAxNzUx 5 | OTY1MTAwMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEdG/lRafgvaXjr1L3zpvw 6 | +EmcN23jypiMODfge9QVU9ST6wPOMcj2cyOluWAKSNw30lVhCSvi+nS2Hn/Za1U4 7 | aKNjMGEwHQYDVR0OBBYEFGpvp9k1WNA5fse5+UFUg+F0idhlMB8GA1UdIwQYMBaA 8 | FJlWOM0MjJgK2OweAyu1eVn/fUi6MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ 9 | BAQDAgKkMA0GCSqGSIb3DQEBCwUAA4ICAQCuUOrRoYovbN95EERDmEqgXfUpVSTm 10 | GGhRXAPbN7O6aVRj226+sFPuPwLcpid7/G0N5+wS2+8+DT2uO8qGF5fUKwi8URit 11 | TyCGnesp9mrUouFmqwZoufiPJhi6ZN4SF/pT3pAzFrjRbV7+/itgQMSoFXChmJ0F 12 | +4IzXXABAhDb7b7SCxNfO8PZDdw1b+4xqby6K4y13VYj5fy4Z0Mv4gDE5PRMa4gL 13 | 4QhBUe0YYZV8CGd9rVB8X29faxma0gjkcMdYB4eb7YNFZhDXuf0RXcmBVZxCwZLw 14 | 10i6Yfx6WOLBMq8nOmvvo/PFw+gx4yOiHmlkb06K04GwhB84w9By4VUihNxDdCmq 15 | XzahtO7UrvXT7AskySMr2/+PFOTIzl9w6fh/BRACkYGJakDURywCYsSu/XtFfG/1 16 | pMV6PTH0ThvIVIh2K7WEOPCPi0c9CmXTb+VRoHl1sX5USmK04pep0/l4LjE455is 17 | KlMdVPv1e1CgShIERNqBINcjWyyHY7PaeNGesXE1oB5+uNYcwX/Lpn/HV5L0sjhg 18 | 9a6UsrTw5wLMxO1+eUB43MFtHg6nXvJ2bQIjMUZEasErMw3NrBdmEWapSLEzYHli 19 | v57H5KwV1kIdyz3KbI5kps98PZHEMg1LUNTkwUMeYduPJRftPHm8bEJRfVZxHkQl 20 | vHba/vaD+ni4uw== 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/etcd/peer-ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDaTCCAVGgAwIBAgIIXrx+TJOdkuYwDQYJKoZIhvcNAQELBQAwKTEnMCUGA1UE 3 | AwweazNzLWludGVybWVkaWF0ZS1jYUAxNzUxOTY1MTAwMB4XDTI1MDcwODA4NTgy 4 | NFoXDTM1MDgyNTA4NTgyNFowJjEkMCIGA1UEAwwbazNzLWV0Y2QtcGVlci1jYUAx 5 | NzUxOTY1MTAwMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEH9/YWeczKMngMCXY 6 | 3woc+fdJ0Pgbgkwm+ad8wGb2DXA7puDbfiIiEz7z8qZDehxnckl5KdcWIK8hNcKB 7 | CRGILaNjMGEwHQYDVR0OBBYEFI0GOYl4luW+/mBH7BCKsBZbnC0vMB8GA1UdIwQY 8 | MBaAFJlWOM0MjJgK2OweAyu1eVn/fUi6MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0P 9 | AQH/BAQDAgKkMA0GCSqGSIb3DQEBCwUAA4ICAQClsvi6TIEfYkRJ3923gMEmhAaF 10 | WIXdLssY/FQSdhYeNBaJksTHPCmWDKpKJzL0x5MdePNy2oYPCtrAVzbyQe/oatp/ 11 | gg2krE1Wnq8YG95d/MrELZ5KGkQcwA8MGs2sMHYe/0XNDQ/DKOOJBhGvxs3OfySQ 12 | BUks+UiY7UIpolTbxPIGI56IBfKk16fAPA57O9nQv1ReioEWnrOsJY7B/Dxly8tR 13 | qlLwSJ5mJFs1ozFOrBG/CZ+zn/fRthXH7M2tYtmv8lEnOGE0R0A8iaW/vc9kWwq0 14 | N6jSoC/w7njMl1rTMWrInsqa5ippK1EwV1kERF+qer08eJ/giZoA16C0zyG6ajPD 15 | z/qSzn2TiknZR1DX7zZsvwfactpwUDK+taTL/3kbUzQYayjjG1PI/dNmELqWLXqZ 16 | AoG5/s6+TKdSxjLGbchw56AC6E1+I0dNLMbxzjrNlIpa9V9BjI1ZbHIlEzjO7a63 17 | 1eZH9Ni5kYiaFUOtzqZQiM34W6q0LDAowyELik8FEkoe5fNsknWAhiSEDfbil8u+ 18 | NpNlQqpMxLydqZvepEyx34tsu4gvunPMFkxh+5kpSDmWm9e3oK7g1Mt7+qaxsy/N 19 | jkoLbiP2ZVCp4LSwQombPNZFupo33kwIi1wN5F4RLlfesDHs007UrhV403xP7g3d 20 | V9Q1NXFPCuFOCThm/w== 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/etcd/server-ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDazCCAVOgAwIBAgIIXrx+TJOdkucwDQYJKoZIhvcNAQELBQAwKTEnMCUGA1UE 3 | AwweazNzLWludGVybWVkaWF0ZS1jYUAxNzUxOTY1MTAwMB4XDTI1MDcwODA4NTgy 4 | NFoXDTM1MDgyNTA4NTgyNFowKDEmMCQGA1UEAwwdazNzLWV0Y2Qtc2VydmVyLWNh 5 | QDE3NTE5NjUxMDAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARmeiGLbDolASE6 6 | 7xA661ONzvJ4VwflDZ/b4bdS+5RlMjoVYPZXQN+Pr5GwDx+AkxoXJxm9O6Kgpp/d 7 | txWrJl5mo2MwYTAdBgNVHQ4EFgQUlkh6vQIk77W9P5jJbv6u1XWh2gwwHwYDVR0j 8 | BBgwFoAUmVY4zQyMmArY7B4DK7V5Wf99SLowDwYDVR0TAQH/BAUwAwEB/zAOBgNV 9 | HQ8BAf8EBAMCAqQwDQYJKoZIhvcNAQELBQADggIBAMjWYIUg4NNLFcCEQNYKLk92 10 | e4t89kr9iVSz81Kr38xD1GAdR2rMitCe2MFgjtimOf01APOzJh/yCDHLIJNKprfu 11 | NP0EaXM/0NkGnsx+8jWwc0GETPgEdkfg1ZBd9gJirQRs7yfReYI3AlXrvNW4+8CL 12 | Cjji3BfJ+GHPs23HX8IKrNJeyULdPGSqwIvJpqjkvI5XkVgK0i64jf6wzZOgkX+a 13 | gvwC4/xTu4jhCtSpoLE5M1Zf+sFufuVw1N/9y4FEr/aB5lfdFNYA14GyGrxrJ+56 14 | DVfCoXBJfUCeognhXcolX6/QakBEdv2DQOBR8P/1AYBrwSCJDYJi70Z9RRH2sbM+ 15 | dmtB+dTcm6QF713tswT71+ZbZNkkaazpHfVkRlIkCTXzPFuNLAEoiorBjfQAZRNg 16 | tMbXet6OECO/eS3vlTm4cj5D+aux3qjhnF5cn/cNyZ4lsZBm0lCQ72zF6ZL5RuIN 17 | JWKh2++wUlAX7VZXc5ku0yIMNJVv7C8FUZK0YcDH9tRncYNkHAJpVQNE5Cr2VukG 18 | InJCbl7cuXQK8cqa+36pgcV6/bqZas7/G6dVKPprSpF3G0vMRCx6QMcJIOrfjpB0 19 | pXuXsotFKLN8p9TCkcAYYWu3pvhLDqn/l2ESQgXbiUAyYonrE1ndrLQWaMUq4glu 20 | hatNYA2lxZ9c2Yb9bLoW 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/request-header-ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDbjCCAVagAwIBAgIIXrx+TJOdkuUwDQYJKoZIhvcNAQELBQAwKTEnMCUGA1UE 3 | AwweazNzLWludGVybWVkaWF0ZS1jYUAxNzUxOTY1MTAwMB4XDTI1MDcwODA4NTgy 4 | NFoXDTM1MDgyNTA4NTgyNFowKzEpMCcGA1UEAwwgazNzLXJlcXVlc3QtaGVhZGVy 5 | LWNhQDE3NTE5NjUxMDAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASJMULFots9 6 | +kX+1xECtfDnn7ECAdfOXs+rWD6SwVII9iCYs2G3fH0I4XtBChJkgTLldvuEp9SW 7 | l+AsAhag81oMo2MwYTAdBgNVHQ4EFgQUorDfU1fyEsXL5LLhvRnhb2ohF5QwHwYD 8 | VR0jBBgwFoAUmVY4zQyMmArY7B4DK7V5Wf99SLowDwYDVR0TAQH/BAUwAwEB/zAO 9 | BgNVHQ8BAf8EBAMCAqQwDQYJKoZIhvcNAQELBQADggIBAEa3NyNf5roUG8MlpsMa 10 | TcyIQJ2KyEvNhQxvyY9kBLHmWQpIcXdg1yGFS4t2fWs3tQItw97MTAptb1V/TqAW 11 | h2UQeU6nsHyl2/iYXDknz1lBDYelQv9V+drvnW1ENFS6+M3973oeQ+G3APOsDUOq 12 | xKIlRkwwLVJ/vJTRtQk3ZnhcqvcPAheALV3d7pLAOk3akoi7pA5IfWpe65Oee4dy 13 | SM+qo0/wPytMfpn++mU3FxXXkS+Q9AUZJAzTB4rHI5yDXAP3x95FuuZCxD3Rd9j5 14 | OEWGmVZHCRm0mq7QAmQNbWk0mYO5eANoQrBffooRPATIQyH6LxcaNfMfNhwmm1Px 15 | SwlaQMzMfQ74geExXAhOsgt3S3pvuy4Fj6EMtcPJm69SuPBHIejWE7EE3H09sDlR 16 | CKp+3OkWCslk5+wxhBOBgL4xMrMZSPbP5OYqfn4v1KbwO7rHu1ws/sPLJalz/bgT 17 | dhRVkhBsLqfREI1dOthts4eE2NYjHXdNQ3cIV0rB+ZGHembivJmUROQANehOhUhF 18 | /GIYn+MJXpbCFqeT4+/XB0EkiFnxjZtgTSk7rVQ9mVSEWhVKlCr+XB2J793nJ1/C 19 | iosqQ9khSrqF8P01UoFLgZxIqaIjWxOgYvjVssPpr9X+I8p+dFqwNJ7ptJ+BfVMZ 20 | OuWWE3YvsiKz1FsmYp8mQMs7 21 | -----END CERTIFICATE----- 22 | -------------------------------------------------------------------------------- /cli/cmds/policy_list.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/spf13/cobra" 7 | "k8s.io/apimachinery/pkg/types" 8 | "k8s.io/cli-runtime/pkg/printers" 9 | 10 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 11 | 12 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 13 | ) 14 | 15 | func NewPolicyListCmd(appCtx *AppContext) *cobra.Command { 16 | return &cobra.Command{ 17 | Use: "list", 18 | Short: "List all the existing policies", 19 | Example: "k3kcli policy list [command options]", 20 | RunE: policyList(appCtx), 21 | Args: cobra.NoArgs, 22 | } 23 | } 24 | 25 | func policyList(appCtx *AppContext) func(cmd *cobra.Command, args []string) error { 26 | return func(cmd *cobra.Command, args []string) error { 27 | ctx := context.Background() 28 | client := appCtx.Client 29 | 30 | var policies v1beta1.VirtualClusterPolicyList 31 | if err := client.List(ctx, &policies); err != nil { 32 | return err 33 | } 34 | 35 | crd := &apiextensionsv1.CustomResourceDefinition{} 36 | if err := client.Get(ctx, types.NamespacedName{Name: "virtualclusterpolicies.k3k.io"}, crd); err != nil { 37 | return err 38 | } 39 | 40 | items := toPointerSlice(policies.Items) 41 | table := createTable(crd, items) 42 | 43 | printer := printers.NewTablePrinter(printers.PrintOptions{}) 44 | 45 | return printer.PrintObj(table, cmd.OutOrStdout()) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /charts/k3k/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: {{ include "k3k.fullname" . }} 5 | labels: 6 | {{- include "k3k.labels" . | nindent 4 }} 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: cluster-admin 11 | subjects: 12 | - kind: ServiceAccount 13 | name: {{ include "k3k.serviceAccountName" . }} 14 | namespace: {{ .Release.Namespace }} 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: ClusterRole 18 | metadata: 19 | name: k3k-kubelet-node 20 | rules: 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - "nodes" 25 | - "nodes/proxy" 26 | verbs: 27 | - "get" 28 | - "list" 29 | --- 30 | kind: ClusterRoleBinding 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | metadata: 33 | name: k3k-kubelet-node 34 | roleRef: 35 | kind: ClusterRole 36 | name: k3k-kubelet-node 37 | apiGroup: rbac.authorization.k8s.io 38 | --- 39 | apiVersion: rbac.authorization.k8s.io/v1 40 | kind: ClusterRole 41 | metadata: 42 | name: k3k-priorityclass 43 | rules: 44 | - apiGroups: 45 | - "scheduling.k8s.io" 46 | resources: 47 | - "priorityclasses" 48 | verbs: 49 | - "*" 50 | --- 51 | kind: ClusterRoleBinding 52 | apiVersion: rbac.authorization.k8s.io/v1 53 | metadata: 54 | name: k3k-priorityclass 55 | roleRef: 56 | kind: ClusterRole 57 | name: k3k-priorityclass 58 | apiGroup: rbac.authorization.k8s.io 59 | -------------------------------------------------------------------------------- /cli/cmds/cluster_list.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/spf13/cobra" 7 | "k8s.io/apimachinery/pkg/types" 8 | "k8s.io/cli-runtime/pkg/printers" 9 | 10 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 11 | ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" 12 | 13 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 14 | ) 15 | 16 | func NewClusterListCmd(appCtx *AppContext) *cobra.Command { 17 | cmd := &cobra.Command{ 18 | Use: "list", 19 | Short: "List all the existing cluster", 20 | Example: "k3kcli cluster list [command options]", 21 | RunE: list(appCtx), 22 | Args: cobra.NoArgs, 23 | } 24 | 25 | CobraFlagNamespace(appCtx, cmd.Flags()) 26 | 27 | return cmd 28 | } 29 | 30 | func list(appCtx *AppContext) func(cmd *cobra.Command, args []string) error { 31 | return func(cmd *cobra.Command, args []string) error { 32 | ctx := context.Background() 33 | client := appCtx.Client 34 | 35 | var clusters v1beta1.ClusterList 36 | if err := client.List(ctx, &clusters, ctrlclient.InNamespace(appCtx.namespace)); err != nil { 37 | return err 38 | } 39 | 40 | crd := &apiextensionsv1.CustomResourceDefinition{} 41 | if err := client.Get(ctx, types.NamespacedName{Name: "clusters.k3k.io"}, crd); err != nil { 42 | return err 43 | } 44 | 45 | items := toPointerSlice(clusters.Items) 46 | table := createTable(crd, items) 47 | 48 | printer := printers.NewTablePrinter(printers.PrintOptions{WithNamespace: true}) 49 | 50 | return printer.PrintObj(table, cmd.OutOrStdout()) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /tests/k8s_restclientgetter_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/api/meta" 5 | "k8s.io/client-go/discovery" 6 | "k8s.io/client-go/rest" 7 | "k8s.io/client-go/restmapper" 8 | "k8s.io/client-go/tools/clientcmd" 9 | 10 | memory "k8s.io/client-go/discovery/cached" 11 | ) 12 | 13 | type RESTClientGetter struct { 14 | clientconfig clientcmd.ClientConfig 15 | restConfig *rest.Config 16 | discoveryClient discovery.CachedDiscoveryInterface 17 | } 18 | 19 | func NewRESTClientGetter(kubeconfig []byte) (*RESTClientGetter, error) { 20 | clientconfig, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig)) 21 | if err != nil { 22 | return nil, err 23 | } 24 | 25 | restConfig, err := clientconfig.ClientConfig() 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | dc, err := discovery.NewDiscoveryClientForConfig(restConfig) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return &RESTClientGetter{ 36 | clientconfig: clientconfig, 37 | restConfig: restConfig, 38 | discoveryClient: memory.NewMemCacheClient(dc), 39 | }, nil 40 | } 41 | 42 | func (r *RESTClientGetter) ToRESTConfig() (*rest.Config, error) { 43 | return r.restConfig, nil 44 | } 45 | 46 | func (r *RESTClientGetter) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { 47 | return r.discoveryClient, nil 48 | } 49 | 50 | func (r *RESTClientGetter) ToRESTMapper() (meta.RESTMapper, error) { 51 | return restmapper.NewDeferredDiscoveryRESTMapper(r.discoveryClient), nil 52 | } 53 | 54 | func (r *RESTClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig { 55 | return r.clientconfig 56 | } 57 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Environmental Info:** 13 | Host Cluster Version: 14 | 15 | 16 | Node(s) CPU architecture, OS, and Version: 17 | 18 | 19 | Host Cluster Configuration: 20 | 21 | 22 | K3K Cluster Configuration: 23 | 24 | 25 | **Describe the bug:** 26 | 27 | 28 | **Steps To Reproduce:** 29 | - Created a cluster with `k3k create`: 30 | 31 | **Expected behavior:** 32 | 33 | 34 | **Actual behavior:** 35 | 36 | 37 | **Additional context / logs:** 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/service.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEAneZv1/R43ZliQuH36SsvYsE8e1fIRhpov/wRji+QKJl3BWsL 3 | OSZ+0dt+XxiJ+fVxYXKK2xAzx4gXLQqFVwd7kxB1TDueQk3JuAhJqACRaz4UNdvy 4 | xh+h4yTRcDTdrXRP63IbaUEW7/i3Gnyq8KpZi45DmtYxop8m5XiQhped8TTF+0pt 5 | rGYsvdpesYYfQVg0mnBpbsen2tDytHgwAxWVXBbvAPb24Q8ZyG3APTb4NvtgrIl5 6 | OH00SbGtL/dXE/lafQcqf90TskO9bDe/IOdJxdzrzFUwZX5ycehGB83c08luCJdP 7 | BCumdXspfEOHk3eNbf0CcmPJm9aVC27BCLcjpwIDAQABAoIBABUDoArrnFJRoYQp 8 | MqczeiD4eqYnrp210g8K6wMzTUo58l8kOeAnQWWIgq8BQwujIK3JYrV42ItLj1oN 9 | NmW4tzeBTzsQDCXi2F/HqpXTTYcqQeJRHWREvXTPZ5g5UO9OtXwuOXuuj/Dr9uJt 10 | iQpygWxTSKgIrZ5o8/JCM2nWL8zz9XtG1ikFCF2r9gBdj+/1qXj9gdY/82odkKcl 11 | WJPoUEh6eVA8juQVTWaoXMAWtQwyrKgpw7etH2kdVFYKR11HpLNo/qLU68r+SyJP 12 | 88luXIWoBxCbf39qCJ5VUeRo7WUYYeNpa9fnFDve6kaIyBBrBoNbSBJDJ8vltpO5 13 | y94tPqECgYEA0mekJrVl6lXX+XDhE9vIX9TGOF5uFxzgFYt70Yd5oOCumNNT23aT 14 | WL+jOKmVa0Z0sDRNydvJL/sl6cLjWFuoZarqiLyAHKbdROnrbrZ6m0BxKVQvYcpD 15 | kcFj8pY4EsF0M/RpdwYe73pm/SfHAPIY3gct9h9TR5LYQCzlUF4b/D8CgYEAwB4P 16 | T9VFXq4XEXmZPItgtcIdKjPt+Wfp2xWsP6+m3jXw00Bs81y9zx/AKB0gm3ibF7O+ 17 | pykFSZGA3GSbYS0poGBMHtzLToNiiLFnbn1DeWUtZZV7c0w7DYb1JROyQ/7KqTU/ 18 | pv72OlQJmjyQSnvx0vbAn8wwjr4anK2vjLoSHpkCgYEAub+Xkji4dY09ctAtVDvG 19 | hJuyNtdet3bdeQe/0rWYMefJG9ANCwV+hW4BaaeE6sSzhU9XkSpATeSZjupnjHy2 20 | iLLABODs9N53HzynhQEB8oeMn2Dbx6gpyMaDNwlZDW3N6YQPi21S6DYWL83rCLGy 21 | NGkAMXVsLxa7ZMR92VqLuD8CgYEAjHBs+QsKnt/cdSXS4vNwSu4Pq94yRHO1/DWn 22 | qRaagS4lUghynKRbDKJFMg243G4Z0gXPhRrzhogbDwFspGkDWP2MJ7N323kn+ozU 23 | 82wWexN/UBcqG2rKcGULp4Lyeco0E+WdFlKjZJgyPNGxGQHqETHYNfhqNq65fXdq 24 | MRGEVWECgYANGkM0NzMQW6yLCzOoxcXOslIjDgdtUqHKqrpMXyDSPbZt34vXnth4 25 | n+Oa8hvozl1C0/EAg486vsbVHZSUlKfoC9KFmDqH933bM7vO6F00mBzepn+LDTwr 26 | rouXecdTdV5Bia55V/Iecpe3pg6xCKkgocep2iRIOLX/T3aL8Elcgg== 27 | -----END RSA PRIVATE KEY----- 28 | 29 | -------------------------------------------------------------------------------- /pkg/controller/cluster/agent/agent.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "k8s.io/apimachinery/pkg/runtime" 8 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 9 | 10 | apierrors "k8s.io/apimachinery/pkg/api/errors" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 13 | 14 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 15 | "github.com/rancher/k3k/pkg/controller" 16 | ) 17 | 18 | const ( 19 | configName = "agent-config" 20 | ) 21 | 22 | type ResourceEnsurer interface { 23 | EnsureResources(context.Context) error 24 | } 25 | 26 | type Config struct { 27 | cluster *v1beta1.Cluster 28 | client ctrlruntimeclient.Client 29 | scheme *runtime.Scheme 30 | } 31 | 32 | func NewConfig(cluster *v1beta1.Cluster, client ctrlruntimeclient.Client, scheme *runtime.Scheme) *Config { 33 | return &Config{ 34 | cluster: cluster, 35 | client: client, 36 | scheme: scheme, 37 | } 38 | } 39 | 40 | func configSecretName(clusterName string) string { 41 | return controller.SafeConcatNameWithPrefix(clusterName, configName) 42 | } 43 | 44 | func ensureObject(ctx context.Context, cfg *Config, obj ctrlruntimeclient.Object) error { 45 | key := ctrlruntimeclient.ObjectKeyFromObject(obj) 46 | log := ctrl.LoggerFrom(ctx).WithValues("key", key) 47 | 48 | if err := controllerutil.SetControllerReference(cfg.cluster, obj, cfg.scheme); err != nil { 49 | return err 50 | } 51 | 52 | if err := cfg.client.Create(ctx, obj); err != nil { 53 | if apierrors.IsAlreadyExists(err) { 54 | log.V(1).Info(fmt.Sprintf("Resource %T already exists, updating.", obj)) 55 | 56 | return cfg.client.Update(ctx, obj) 57 | } 58 | 59 | return err 60 | } 61 | 62 | log.V(1).Info(fmt.Sprintf("Creating %T.", obj)) 63 | 64 | return nil 65 | } 66 | -------------------------------------------------------------------------------- /k3k-kubelet/README.md: -------------------------------------------------------------------------------- 1 | ## Virtual Kubelet 2 | 3 | This package provides an impelementation of a virtual cluster node using [virtual-kubelet](https://github.com/virtual-kubelet/virtual-kubelet). 4 | 5 | The implementation is based on several projects, including: 6 | - [Virtual Kubelet](https://github.com/virtual-kubelet/virtual-kubelet) 7 | - [Kubectl](https://github.com/kubernetes/kubectl) 8 | - [Client-go](https://github.com/kubernetes/client-go) 9 | - [Azure-Aci](https://github.com/virtual-kubelet/azure-aci) 10 | 11 | ## Overview 12 | 13 | This project creates a node that registers itself in the virtual cluster. When workloads are scheduled to this node, it simply creates/updates the workload on the host cluster. 14 | 15 | ## Usage 16 | 17 | Build/Push the image using (from the root of rancher/k3k): 18 | 19 | ``` 20 | make build 21 | docker buildx build -f package/Dockerfile . -t $REPO/$IMAGE:$TAG 22 | ``` 23 | 24 | When running, it is recommended to deploy a k3k cluster with 1 server (with `--disable-agent` as a server arg) and no agents (so that the workloads can only be scheduled on the virtual node/host cluster). 25 | 26 | After the image is built, it should be deployed with the following ENV vars set: 27 | - `CLUSTER_NAME` should be the name of the cluster. 28 | - `CLUSTER_NAMESPACE` should be the namespace the cluster is running in. 29 | - `HOST_KUBECONFIG` should be the path on the local filesystem (in container) to a kubeconfig for the host cluster (likely stored in a secret/mounted as a volume). 30 | - `VIRT_KUBECONFIG`should be the path on the local filesystem (in container) to a kubeconfig for the virtual cluster (likely stored in a secret/mounted as a volume). 31 | - `VIRT_POD_IP` should be the IP that the container is accessible from. 32 | 33 | This project is still under development and there are many features yet to be implemented, but it can run a basic nginx pod. 34 | 35 | -------------------------------------------------------------------------------- /pkg/controller/controller_test.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | 10 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 11 | ) 12 | 13 | func Test_K3S_Image(t *testing.T) { 14 | type args struct { 15 | cluster *v1beta1.Cluster 16 | k3sImage string 17 | } 18 | 19 | tests := []struct { 20 | name string 21 | args args 22 | expectedData string 23 | }{ 24 | { 25 | name: "cluster with assigned version spec", 26 | args: args{ 27 | k3sImage: "rancher/k3s", 28 | cluster: &v1beta1.Cluster{ 29 | ObjectMeta: v1.ObjectMeta{ 30 | Name: "mycluster", 31 | Namespace: "ns-1", 32 | }, 33 | Spec: v1beta1.ClusterSpec{ 34 | Version: "v1.2.3", 35 | }, 36 | }, 37 | }, 38 | expectedData: "rancher/k3s:v1.2.3", 39 | }, 40 | { 41 | name: "cluster with empty version spec and assigned hostVersion status", 42 | args: args{ 43 | k3sImage: "rancher/k3s", 44 | cluster: &v1beta1.Cluster{ 45 | ObjectMeta: v1.ObjectMeta{ 46 | Name: "mycluster", 47 | Namespace: "ns-1", 48 | }, 49 | Status: v1beta1.ClusterStatus{ 50 | HostVersion: "v4.5.6", 51 | }, 52 | }, 53 | }, 54 | expectedData: "rancher/k3s:v4.5.6-k3s1", 55 | }, 56 | { 57 | name: "cluster with empty version spec and empty hostVersion status", 58 | args: args{ 59 | k3sImage: "rancher/k3s", 60 | cluster: &v1beta1.Cluster{ 61 | ObjectMeta: v1.ObjectMeta{ 62 | Name: "mycluster", 63 | Namespace: "ns-1", 64 | }, 65 | }, 66 | }, 67 | expectedData: "rancher/k3s:latest", 68 | }, 69 | } 70 | 71 | for _, tt := range tests { 72 | t.Run(tt.name, func(t *testing.T) { 73 | fullImage := K3SImage(tt.args.cluster, tt.args.k3sImage) 74 | assert.Equal(t, tt.expectedData, fullImage) 75 | }) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /.github/workflows/release-delete.yml: -------------------------------------------------------------------------------- 1 | name: Release - Delete Draft 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | tag: 7 | type: string 8 | description: The tag of the release 9 | 10 | permissions: 11 | contents: write 12 | packages: write 13 | 14 | env: 15 | GH_TOKEN: ${{ github.token }} 16 | 17 | jobs: 18 | release-delete: 19 | runs-on: ubuntu-latest 20 | 21 | steps: 22 | - name: Check tag 23 | if: inputs.tag == '' 24 | run: echo "::error::Missing tag from input" && exit 1 25 | 26 | - name: Checkout code 27 | uses: actions/checkout@v4 28 | 29 | - name: Check if release is draft 30 | run: | 31 | CURRENT_TAG=${{ inputs.tag }} 32 | isDraft=$(gh release view ${CURRENT_TAG} --json isDraft --jq ".isDraft") 33 | if [ "$isDraft" = true ]; then 34 | echo "Release ${CURRENT_TAG} is draft" 35 | else 36 | echo "::error::Cannot delete non-draft release" && exit 1 37 | fi 38 | 39 | - name: Delete packages from Github Container Registry 40 | run: | 41 | CURRENT_TAG=${{ inputs.tag }} 42 | echo "Deleting packages with tag ${CURRENT_TAG}" 43 | 44 | JQ_QUERY=".[] | select(.metadata.container.tags[] == \"${CURRENT_TAG}\")" 45 | 46 | for package in k3k k3k-kubelet 47 | do 48 | echo "Deleting ${package} image" 49 | PACKAGE_TO_DELETE=$(gh api /user/packages/container/${package}/versions --jq "${JQ_QUERY}") 50 | echo $PACKAGE_TO_DELETE | jq 51 | 52 | PACKAGE_ID=$(echo $PACKAGE_TO_DELETE | jq .id) 53 | echo "Deleting ${PACKAGE_ID}" 54 | gh api --method DELETE /user/packages/container/${package}/versions/${PACKAGE_ID} 55 | done 56 | 57 | - name: Delete Github release 58 | run: | 59 | CURRENT_TAG=${{ inputs.tag }} 60 | echo "Deleting release ${CURRENT_TAG}" 61 | gh release delete ${CURRENT_TAG} 62 | -------------------------------------------------------------------------------- /pkg/controller/certs/certs.go: -------------------------------------------------------------------------------- 1 | package certs 2 | 3 | import ( 4 | "crypto" 5 | "crypto/x509" 6 | "fmt" 7 | "net" 8 | "time" 9 | 10 | certutil "github.com/rancher/dynamiclistener/cert" 11 | ) 12 | 13 | func CreateClientCertKey(commonName string, organization []string, altNames *certutil.AltNames, extKeyUsage []x509.ExtKeyUsage, expiresAt time.Duration, caCert, caKey string) ([]byte, []byte, error) { 14 | caKeyPEM, err := certutil.ParsePrivateKeyPEM([]byte(caKey)) 15 | if err != nil { 16 | return nil, nil, err 17 | } 18 | 19 | caCertPEM, err := certutil.ParseCertsPEM([]byte(caCert)) 20 | if err != nil { 21 | return nil, nil, err 22 | } 23 | 24 | b, err := generateKey() 25 | if err != nil { 26 | return nil, nil, err 27 | } 28 | 29 | key, err := certutil.ParsePrivateKeyPEM(b) 30 | if err != nil { 31 | return nil, nil, err 32 | } 33 | 34 | cfg := certutil.Config{ 35 | CommonName: commonName, 36 | Organization: organization, 37 | Usages: extKeyUsage, 38 | ExpiresAt: expiresAt, 39 | } 40 | if altNames != nil { 41 | cfg.AltNames = *altNames 42 | } 43 | 44 | cert, err := certutil.NewSignedCert(cfg, key.(crypto.Signer), caCertPEM[0], caKeyPEM.(crypto.Signer)) 45 | if err != nil { 46 | return nil, nil, err 47 | } 48 | 49 | return append(certutil.EncodeCertPEM(cert), certutil.EncodeCertPEM(caCertPEM[0])...), b, nil 50 | } 51 | 52 | func generateKey() (data []byte, err error) { 53 | generatedData, err := certutil.MakeEllipticPrivateKeyPEM() 54 | if err != nil { 55 | return nil, fmt.Errorf("error generating key: %v", err) 56 | } 57 | 58 | return generatedData, nil 59 | } 60 | 61 | func AddSANs(sans []string) certutil.AltNames { 62 | var altNames certutil.AltNames 63 | 64 | for _, san := range sans { 65 | ip := net.ParseIP(san) 66 | if ip == nil { 67 | altNames.DNSNames = append(altNames.DNSNames, san) 68 | } else { 69 | altNames.IPs = append(altNames.IPs, ip) 70 | } 71 | } 72 | 73 | return altNames 74 | } 75 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/root-ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFMzCCAxugAwIBAgIUALEl344JEZxaOvwyrtO8QwLHAacwDQYJKoZIhvcNAQEL 3 | BQAwITEfMB0GA1UEAwwWazNzLXJvb3QtY2FAMTc1MTk2NTEwMDAeFw0yNTA3MDgw 4 | ODU4MjJaFw00NTA3MDMwODU4MjJaMCExHzAdBgNVBAMMFmszcy1yb290LWNhQDE3 5 | NTE5NjUxMDAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDtESS+0V7T 6 | 0b3rN0ga9/zWThgaGIfOoKiCr2CSR4az0hjpmISAlS5pplhWrutGhydvaR/7vV7g 7 | Grtn0ptmqidEc0XSeQTosqUv29SZ+hbCITZG05TFbMGGXmFo/1LHhJ8ZpgzUBqc7 8 | inOXmEBbCyhOWX+fhddoJJiUIX7l2R3cbyNk7I7KFiZ1JDDnoyDWFMOJLvXwOqeT 9 | CJQesS+2qpeXpdazgtkRQi7aOhZtXnCsJHnFbpL9VFTmWpq7vMIA9xkDva3/80S4 10 | T2USiyN4/quUagjP8si5PIaGBVx9bJhnKHUdTrPPZWHFfxm74GUG7en4OtR9lOTM 11 | TV49XuofNpJaJ2K5Dz+rk6gIYp2R9W+r8CN1HBXeLF5WJCvw54Lpcp0V2BA+MARq 12 | Ij8UTUymu++rIo+WjKcHDT0qn39rhOhqJ3dXynW4UF/95KwB7e5UI+OxZ9sJ4oNP 13 | FuP5PHrDINXsYG0DDM3KTDxA8memVSFGMdhm31DPD2GUvUPlupnd6uCepdcBc9ed 14 | +9gTeSxF5cA6Q3gVOQa8ok9Ts0E5DwGaCIxjasyD+MIORf3Bkv0Z3lBNXicWDngP 15 | FU5ya30GeTZQgzqhlGoXehjYfRxV28pgiR5ejl0/rmsYGfiWPdfa4Qy0LnxSKHj1 16 | f2R1jEB55ORATo3f+k4Ez5bdjhk4KlA95QIDAQABo2MwYTAdBgNVHQ4EFgQUjC+D 17 | R6x4XaKOOmUXnTrrBoSQbEwwHwYDVR0jBBgwFoAUjC+DR6x4XaKOOmUXnTrrBoSQ 18 | bEwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAqQwDQYJKoZIhvcNAQEL 19 | BQADggIBACpzO1nV6/hS1xLIkkGf9RdypATYCH6RqeBJqMf/R5YEUQssLF/QBEvQ 20 | NwockkxD1l8VwjH3Pp36QxkgthAl0D3ewK9LLkxEq1pp9VuVGxeD9siN/fxx6htG 21 | KP1jT49pBDIdbzhJ7eR/O8xuI5dyZNLZLJkaumQkd7sEVHvDTFw55PhwUEJ3Wcxl 22 | jAxXM1FFCKftXjWFmvVmzYZYkPj/AhB+PcVIIkFnNQYTXdUCUtsnSgj3pF1z/+g5 23 | PttBGhsttrm93lJgddRFTEWV1lzfw1csrHkLYDYLDKDzsNQaVo71wKPmorK+xnbM 24 | h1PQAVJeXypLTAfE636+n+Md/wSvnQuo0RzPBE24S9c9TWM2d96dvtU9kgJPbqoA 25 | RX6jHw2ACnKp4RFJILqDCqFCOrytYPk3J/L8myW44dGpCCdSrFREqNCsyMrqu4v7 26 | U+W9ENHT0qe7Nm0T4XNFlQstt6uGvk6ddEdbgcTfTvSv5jx2++Jfl2ynF+G67l0U 27 | UASFHsrwThnulGQtpK2+heHkU8xQFjQOGZoQMlLiWzWg+bqo07aghAndKhKnW8s8 28 | iRvMvdcsLjjDaPFCgeopGeQauiTd2od5aXGWCn+djzLq0fjIvezs4K70XOsStbGA 29 | cJFFAnsnM40SbnrWyfe1EBlzuVJu0csuF77fpEU7CFz8uzd268Ho 30 | -----END CERTIFICATE----- 31 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/root-ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFMzCCAxugAwIBAgIUALEl344JEZxaOvwyrtO8QwLHAacwDQYJKoZIhvcNAQEL 3 | BQAwITEfMB0GA1UEAwwWazNzLXJvb3QtY2FAMTc1MTk2NTEwMDAeFw0yNTA3MDgw 4 | ODU4MjJaFw00NTA3MDMwODU4MjJaMCExHzAdBgNVBAMMFmszcy1yb290LWNhQDE3 5 | NTE5NjUxMDAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDtESS+0V7T 6 | 0b3rN0ga9/zWThgaGIfOoKiCr2CSR4az0hjpmISAlS5pplhWrutGhydvaR/7vV7g 7 | Grtn0ptmqidEc0XSeQTosqUv29SZ+hbCITZG05TFbMGGXmFo/1LHhJ8ZpgzUBqc7 8 | inOXmEBbCyhOWX+fhddoJJiUIX7l2R3cbyNk7I7KFiZ1JDDnoyDWFMOJLvXwOqeT 9 | CJQesS+2qpeXpdazgtkRQi7aOhZtXnCsJHnFbpL9VFTmWpq7vMIA9xkDva3/80S4 10 | T2USiyN4/quUagjP8si5PIaGBVx9bJhnKHUdTrPPZWHFfxm74GUG7en4OtR9lOTM 11 | TV49XuofNpJaJ2K5Dz+rk6gIYp2R9W+r8CN1HBXeLF5WJCvw54Lpcp0V2BA+MARq 12 | Ij8UTUymu++rIo+WjKcHDT0qn39rhOhqJ3dXynW4UF/95KwB7e5UI+OxZ9sJ4oNP 13 | FuP5PHrDINXsYG0DDM3KTDxA8memVSFGMdhm31DPD2GUvUPlupnd6uCepdcBc9ed 14 | +9gTeSxF5cA6Q3gVOQa8ok9Ts0E5DwGaCIxjasyD+MIORf3Bkv0Z3lBNXicWDngP 15 | FU5ya30GeTZQgzqhlGoXehjYfRxV28pgiR5ejl0/rmsYGfiWPdfa4Qy0LnxSKHj1 16 | f2R1jEB55ORATo3f+k4Ez5bdjhk4KlA95QIDAQABo2MwYTAdBgNVHQ4EFgQUjC+D 17 | R6x4XaKOOmUXnTrrBoSQbEwwHwYDVR0jBBgwFoAUjC+DR6x4XaKOOmUXnTrrBoSQ 18 | bEwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAqQwDQYJKoZIhvcNAQEL 19 | BQADggIBACpzO1nV6/hS1xLIkkGf9RdypATYCH6RqeBJqMf/R5YEUQssLF/QBEvQ 20 | NwockkxD1l8VwjH3Pp36QxkgthAl0D3ewK9LLkxEq1pp9VuVGxeD9siN/fxx6htG 21 | KP1jT49pBDIdbzhJ7eR/O8xuI5dyZNLZLJkaumQkd7sEVHvDTFw55PhwUEJ3Wcxl 22 | jAxXM1FFCKftXjWFmvVmzYZYkPj/AhB+PcVIIkFnNQYTXdUCUtsnSgj3pF1z/+g5 23 | PttBGhsttrm93lJgddRFTEWV1lzfw1csrHkLYDYLDKDzsNQaVo71wKPmorK+xnbM 24 | h1PQAVJeXypLTAfE636+n+Md/wSvnQuo0RzPBE24S9c9TWM2d96dvtU9kgJPbqoA 25 | RX6jHw2ACnKp4RFJILqDCqFCOrytYPk3J/L8myW44dGpCCdSrFREqNCsyMrqu4v7 26 | U+W9ENHT0qe7Nm0T4XNFlQstt6uGvk6ddEdbgcTfTvSv5jx2++Jfl2ynF+G67l0U 27 | UASFHsrwThnulGQtpK2+heHkU8xQFjQOGZoQMlLiWzWg+bqo07aghAndKhKnW8s8 28 | iRvMvdcsLjjDaPFCgeopGeQauiTd2od5aXGWCn+djzLq0fjIvezs4K70XOsStbGA 29 | cJFFAnsnM40SbnrWyfe1EBlzuVJu0csuF77fpEU7CFz8uzd268Ho 30 | -----END CERTIFICATE----- 31 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/intermediate-ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFLzCCAxegAwIBAgIIXrx+TJOdkuIwDQYJKoZIhvcNAQELBQAwITEfMB0GA1UE 3 | AwwWazNzLXJvb3QtY2FAMTc1MTk2NTEwMDAeFw0yNTA3MDgwODU4MjNaFw0zNTA4 4 | MjUwODU4MjNaMCkxJzAlBgNVBAMMHmszcy1pbnRlcm1lZGlhdGUtY2FAMTc1MTk2 5 | NTEwMDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfxJHxlMq/Ul+7X 6 | avcxrg6x8joXUn0T4TbaYiQJVRK0z8j42yDh1uJSQ82SkCB1ltkF2xHzQ65iqPx9 7 | n6uluTP/fEoGOiGxfZfIqk+H+mPbumJPd7n3hmtqPwPmjhYCPZpLadT60rjNalkM 8 | 4hUX2O0PwXtvejODUaLgF9YLUJbIdoZvkzKNcv2bTg9gruW0AoAzEqAKLYRDh58N 9 | XQMync3OQnnAMzUyPBBukhM7P2tysufV0HJg/ZcsgLek8hpuBQ2T1QVUrZFAgOXh 10 | ZCcrK5OGKcjQh5bo1vcbIRr4DtfHT/Jl5RhA6q5IqTAiS/dQVAhNYs0lDHgF69kU 11 | gI2iHqsv2awuW3k9Q+i5q9vMlo/h9wxpj87cbfxLpBkS2o/h+6zB1DdI6U1WRo/Q 12 | pFp3vzsdtr/G8WEFxwL1OMDewjA+EsjhSjI0BBrm7CC2uJkNRM3v97r9RGNzBCYL 13 | Qn3IxqqzVjOOEhJzQc3G8FKRJj1zPAkURvTus4s9c7vdMOp1sVd78b9ObXFa0TDF 14 | DPe9ZQu5YGEvGGibKjp5cFwJ2M2pwjv53ZUDS4sxqqdXRBmVxcCkUtrwRL35PIpb 15 | Tje1UZP8RJ66iXnXUPco2BKyd9NYyiaUAW4gZz5WROzfgPdnI/ArGc5RThk+O5sg 16 | cb62eU/nADPGsG8t+VEUXfJjP19pAgMBAAGjYzBhMB0GA1UdDgQWBBSZVjjNDIyY 17 | CtjsHgMrtXlZ/31IujAfBgNVHSMEGDAWgBSML4NHrHhdoo46ZRedOusGhJBsTDAP 18 | BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwICpDANBgkqhkiG9w0BAQsFAAOC 19 | AgEAxgdjETEkUB7G2g3cFz6Jb8TznrZXprVk9G1p1UNuMAAEe2NaiGI0BvivJNoQ 20 | 6DNpngB3x6l7u5hMOJY/72wzQRDXg05m5T8/vBfTcz/HcgoKU82hgYDxQiGA0I5e 21 | 5czfEhQu95uPT7T26m5pvWA38Tf6WbXCB7c0/FT8II8w1ptJlZrSKRW33+j1tb2r 22 | t0/8RoMaAUS1MP6+12juG7qcuEx0rDVeKx33pa20NjsAtZeTAsXjugJkmUkkd/PC 23 | cKbawQB9meqtWPfGzwmUu1qz6SQeWtOWFrOBSeTzx0HTeimSAiHVSaXGd7ms3orx 24 | KsKUoPUbXi9rVIDVxCe5XwAKmcHMz8DGHfxJ6sodol25pYbHxKy/swgAzQdwtGF9 25 | HWJAm6/3YSjtmD6+t89/yYzvxv+aMNDXVMLpDFb+7/ESsSl4757WjvvFoz0HxcwD 26 | 4qfmV2z+EaLM44P3QaJDD599/Qwt+TFHSQRfD/MqMH6A+9vZhZGWeGgFFmu82kzH 27 | xKJas/jI+t+V+2TbfagUYlsinZ6UmLcju99myl6wq6nJu8X5b8Uhpv5/8kVniqXE 28 | lWtFpMBmnE0oq0U+KR3OfnovSYdYTc7uWpaJlqMamsE5UVVBrlSyUe9J9EQzYYea 29 | Ufoq67KnJInMobbQ4aonz7EQZW6WIZpqASuVGfT5heSKHDc= 30 | -----END CERTIFICATE----- 31 | -------------------------------------------------------------------------------- /docs/cli/k3kcli_cluster_create.md: -------------------------------------------------------------------------------- 1 | ## k3kcli cluster create 2 | 3 | Create new cluster 4 | 5 | ``` 6 | k3kcli cluster create [flags] 7 | ``` 8 | 9 | ### Examples 10 | 11 | ``` 12 | k3kcli cluster create [command options] NAME 13 | ``` 14 | 15 | ### Options 16 | 17 | ``` 18 | --agent-args strings agents extra arguments 19 | --agent-envs strings agents extra Envs 20 | --agents int number of agents 21 | --annotations stringArray Annotations to add to the cluster object (e.g. key=value) 22 | --cluster-cidr string cluster CIDR 23 | --custom-certs string The path for custom certificate directory 24 | -h, --help help for create 25 | --kubeconfig-server string override the kubeconfig server host 26 | --labels stringArray Labels to add to the cluster object (e.g. key=value) 27 | --mirror-host-nodes Mirror Host Cluster Nodes 28 | --mode string k3k mode type (shared, virtual) (default "shared") 29 | -n, --namespace string namespace of the k3k cluster 30 | --persistence-type string persistence mode for the nodes (dynamic, ephemeral) (default "dynamic") 31 | --policy string The policy to create the cluster in 32 | --server-args strings servers extra arguments 33 | --server-envs strings servers extra Envs 34 | --servers int number of servers (default 1) 35 | --service-cidr string service CIDR 36 | --storage-class-name string storage class name for dynamic persistence type 37 | --storage-request-size string storage size for dynamic persistence type 38 | --timeout duration The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h). (default 3m0s) 39 | --token string token of the cluster 40 | --version string k3s version 41 | ``` 42 | 43 | ### Options inherited from parent commands 44 | 45 | ``` 46 | --debug Turn on debug logs 47 | --kubeconfig string kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set) 48 | ``` 49 | 50 | ### SEE ALSO 51 | 52 | * [k3kcli cluster](k3kcli_cluster.md) - cluster command 53 | 54 | -------------------------------------------------------------------------------- /docs/howtos/expose-workloads.md: -------------------------------------------------------------------------------- 1 | # How-to: Expose Workloads Outside the Virtual Cluster 2 | 3 | This guide explains how to expose workloads running in k3k-managed virtual clusters to external networks. Behavior varies depending on the operating mode of the virtual cluster. 4 | 5 | ## Virtual Mode 6 | 7 | > [!CAUTION] 8 | > **Not Supported** 9 | > In *virtual mode*, direct external exposure of workloads is **not available**. 10 | > This mode is designed for strong isolation and does not expose the virtual cluster's network directly. 11 | 12 | ## Shared Mode 13 | 14 | In *shared mode*, workloads can be exposed to the external network using standard Kubernetes service types or an ingress controller, depending on your requirements. 15 | 16 | > [!NOTE] 17 | > *`Services`* are always synced from the virtual cluster to the host cluster following the same principle described [here](../architecture.md#shared-mode) for pods. 18 | 19 | ### Option 1: Use `NodePort` or `LoadBalancer` 20 | 21 | To expose a service such as a web application outside the host cluster: 22 | 23 | - **`NodePort`**: 24 | Exposes the service on a static port on each node’s IP. 25 | Access the service at `http://:`. 26 | 27 | - **`LoadBalancer`**: 28 | Provisions an external load balancer (if supported by the environment) and exposes the service via the load balancer’s IP. 29 | 30 | > **Note** 31 | > The `LoadBalancer` IP is currently not reflected back to the virtual cluster service. 32 | > [k3k issue #365](https://github.com/rancher/k3k/issues/365) 33 | 34 | ### Option 2: Use `ClusterIP` for Internal Communication 35 | 36 | If the workload should only be accessible to other services or pods *within* the host cluster: 37 | 38 | - Use the `ClusterIP` service type. 39 | This exposes the service on an internal IP, only reachable inside the host cluster. 40 | 41 | ### Option 3: Use Ingress for HTTP/HTTPS Routing 42 | 43 | For more advanced routing (e.g., hostname- or path-based routing), deploy an **Ingress controller** in the virtual cluster, and expose it via `NodePort` or `LoadBalancer`. 44 | 45 | This allows you to: 46 | 47 | - Define Ingress resources in the virtual cluster. 48 | - Route external traffic to services within the virtual cluster. 49 | 50 | >**Note** 51 | > Support for using the host cluster's Ingress controller from a virtual cluster is being tracked in 52 | > [k3k issue #356](https://github.com/rancher/k3k/issues/356) 53 | -------------------------------------------------------------------------------- /cli/cmds/cluster_create_test.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "k8s.io/utils/ptr" 8 | 9 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 10 | ) 11 | 12 | func Test_printClusterDetails(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | cluster *v1beta1.Cluster 16 | want string 17 | wantErr bool 18 | }{ 19 | { 20 | name: "simple cluster", 21 | cluster: &v1beta1.Cluster{ 22 | Spec: v1beta1.ClusterSpec{ 23 | Mode: v1beta1.SharedClusterMode, 24 | Version: "123", 25 | Persistence: v1beta1.PersistenceConfig{ 26 | Type: v1beta1.DynamicPersistenceMode, 27 | }, 28 | }, 29 | Status: v1beta1.ClusterStatus{ 30 | HostVersion: "456", 31 | }, 32 | }, 33 | want: `Cluster details: 34 | Mode: shared 35 | Servers: 0 36 | Version: 123 (Host: 456) 37 | Persistence: 38 | Type: dynamic`, 39 | }, 40 | { 41 | name: "simple cluster with no version", 42 | cluster: &v1beta1.Cluster{ 43 | Spec: v1beta1.ClusterSpec{ 44 | Mode: v1beta1.SharedClusterMode, 45 | Persistence: v1beta1.PersistenceConfig{ 46 | Type: v1beta1.DynamicPersistenceMode, 47 | }, 48 | }, 49 | Status: v1beta1.ClusterStatus{ 50 | HostVersion: "456", 51 | }, 52 | }, 53 | want: `Cluster details: 54 | Mode: shared 55 | Servers: 0 56 | Version: 456 (Host: 456) 57 | Persistence: 58 | Type: dynamic`, 59 | }, 60 | { 61 | name: "cluster with agents", 62 | cluster: &v1beta1.Cluster{ 63 | Spec: v1beta1.ClusterSpec{ 64 | Mode: v1beta1.SharedClusterMode, 65 | Agents: ptr.To[int32](3), 66 | Persistence: v1beta1.PersistenceConfig{ 67 | Type: v1beta1.DynamicPersistenceMode, 68 | StorageClassName: ptr.To("local-path"), 69 | StorageRequestSize: "3gb", 70 | }, 71 | }, 72 | Status: v1beta1.ClusterStatus{ 73 | HostVersion: "456", 74 | }, 75 | }, 76 | want: `Cluster details: 77 | Mode: shared 78 | Servers: 0 79 | Agents: 3 80 | Version: 456 (Host: 456) 81 | Persistence: 82 | Type: dynamic 83 | StorageClass: local-path 84 | Size: 3gb`, 85 | }, 86 | } 87 | 88 | for _, tt := range tests { 89 | t.Run(tt.name, func(t *testing.T) { 90 | clusterDetails, err := printClusterDetails(tt.cluster) 91 | assert.NoError(t, err) 92 | assert.Equal(t, tt.want, clusterDetails) 93 | }) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /pkg/controller/cluster/server/ingress.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | 6 | "k8s.io/utils/ptr" 7 | 8 | networkingv1 "k8s.io/api/networking/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | 11 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 12 | "github.com/rancher/k3k/pkg/controller" 13 | ) 14 | 15 | const ( 16 | httpsPort = 443 17 | k3sServerPort = 6443 18 | etcdPort = 2379 19 | ) 20 | 21 | func IngressName(clusterName string) string { 22 | return controller.SafeConcatNameWithPrefix(clusterName, "ingress") 23 | } 24 | 25 | func Ingress(ctx context.Context, cluster *v1beta1.Cluster) networkingv1.Ingress { 26 | ingress := networkingv1.Ingress{ 27 | TypeMeta: metav1.TypeMeta{ 28 | Kind: "Ingress", 29 | APIVersion: "networking.k8s.io/v1", 30 | }, 31 | ObjectMeta: metav1.ObjectMeta{ 32 | Name: IngressName(cluster.Name), 33 | Namespace: cluster.Namespace, 34 | }, 35 | Spec: networkingv1.IngressSpec{ 36 | Rules: ingressRules(cluster), 37 | }, 38 | } 39 | 40 | if cluster.Spec.Expose != nil && cluster.Spec.Expose.Ingress != nil { 41 | ingressConfig := cluster.Spec.Expose.Ingress 42 | 43 | if ingressConfig.IngressClassName != "" { 44 | ingress.Spec.IngressClassName = ptr.To(ingressConfig.IngressClassName) 45 | } 46 | 47 | if ingressConfig.Annotations != nil { 48 | ingress.Annotations = ingressConfig.Annotations 49 | } 50 | } 51 | 52 | return ingress 53 | } 54 | 55 | func ingressRules(cluster *v1beta1.Cluster) []networkingv1.IngressRule { 56 | var ingressRules []networkingv1.IngressRule 57 | 58 | if cluster.Spec.Expose == nil || cluster.Spec.Expose.Ingress == nil { 59 | return ingressRules 60 | } 61 | 62 | path := networkingv1.HTTPIngressPath{ 63 | Path: "/", 64 | PathType: ptr.To(networkingv1.PathTypePrefix), 65 | Backend: networkingv1.IngressBackend{ 66 | Service: &networkingv1.IngressServiceBackend{ 67 | Name: ServiceName(cluster.Name), 68 | Port: networkingv1.ServiceBackendPort{ 69 | Number: httpsPort, 70 | }, 71 | }, 72 | }, 73 | } 74 | 75 | hosts := cluster.Spec.TLSSANs 76 | for _, host := range hosts { 77 | ingressRules = append(ingressRules, networkingv1.IngressRule{ 78 | Host: host, 79 | IngressRuleValue: networkingv1.IngressRuleValue{ 80 | HTTP: &networkingv1.HTTPIngressRuleValue{ 81 | Paths: []networkingv1.HTTPIngressPath{path}, 82 | }, 83 | }, 84 | }) 85 | } 86 | 87 | return ingressRules 88 | } 89 | -------------------------------------------------------------------------------- /pkg/controller/controller.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "slices" 7 | "strings" 8 | "time" 9 | 10 | "k8s.io/apimachinery/pkg/util/wait" 11 | 12 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 13 | ) 14 | 15 | const ( 16 | namePrefix = "k3k" 17 | AdminCommonName = "system:admin" 18 | ) 19 | 20 | // Backoff is the cluster creation duration backoff 21 | var Backoff = wait.Backoff{ 22 | Steps: 5, 23 | Duration: 5 * time.Second, 24 | Factor: 2, 25 | Jitter: 0.1, 26 | } 27 | 28 | // K3SImage returns the rancher/k3s image tagged with the found K3SVersion. 29 | func K3SImage(cluster *v1beta1.Cluster, k3SImage string) string { 30 | return k3SImage + ":" + K3SVersion(cluster) 31 | } 32 | 33 | // K3SVersion returns the rancher/k3s specified version. 34 | // If empty it will return the k3s version of the Kubernetes version of the host cluster, stored in the Status object. 35 | // Returns the latest version as fallback. 36 | func K3SVersion(cluster *v1beta1.Cluster) string { 37 | if cluster.Spec.Version != "" { 38 | return cluster.Spec.Version 39 | } 40 | 41 | if cluster.Status.HostVersion != "" { 42 | return cluster.Status.HostVersion + "-k3s1" 43 | } 44 | 45 | return "latest" 46 | } 47 | 48 | // SafeConcatNameWithPrefix runs the SafeConcatName with extra prefix. 49 | func SafeConcatNameWithPrefix(name ...string) string { 50 | return SafeConcatName(append([]string{namePrefix}, name...)...) 51 | } 52 | 53 | // SafeConcatName concatenates the given strings and ensures the returned name is under 64 characters 54 | // by cutting the string off at 57 characters and setting the last 6 with an encoded version of the concatenated string. 55 | // Empty strings in the array will be ignored. 56 | func SafeConcatName(name ...string) string { 57 | name = slices.DeleteFunc(name, func(s string) bool { 58 | return s == "" 59 | }) 60 | 61 | fullPath := strings.Join(name, "-") 62 | if len(fullPath) < 64 { 63 | return fullPath 64 | } 65 | 66 | digest := sha256.Sum256([]byte(fullPath)) 67 | 68 | // since we cut the string in the middle, the last char may not be compatible with what is expected in k8s 69 | // we are checking and if necessary removing the last char 70 | c := fullPath[56] 71 | if 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { 72 | return fullPath[0:57] + "-" + hex.EncodeToString(digest[0:])[0:5] 73 | } 74 | 75 | return fullPath[0:56] + "-" + hex.EncodeToString(digest[0:])[0:6] 76 | } 77 | -------------------------------------------------------------------------------- /pkg/controller/policy/policy_suite_test.go: -------------------------------------------------------------------------------- 1 | package policy_test 2 | 3 | import ( 4 | "context" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/go-logr/zapr" 9 | "go.uber.org/zap" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/envtest" 13 | 14 | appsv1 "k8s.io/api/apps/v1" 15 | corev1 "k8s.io/api/core/v1" 16 | networkingv1 "k8s.io/api/networking/v1" 17 | ctrl "sigs.k8s.io/controller-runtime" 18 | 19 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 20 | "github.com/rancher/k3k/pkg/controller/policy" 21 | 22 | . "github.com/onsi/ginkgo/v2" 23 | . "github.com/onsi/gomega" 24 | ) 25 | 26 | func TestController(t *testing.T) { 27 | RegisterFailHandler(Fail) 28 | RunSpecs(t, "VirtualClusterPolicy Controller Suite") 29 | } 30 | 31 | var ( 32 | testEnv *envtest.Environment 33 | k8sClient client.Client 34 | ctx context.Context 35 | cancel context.CancelFunc 36 | ) 37 | 38 | var _ = BeforeSuite(func() { 39 | By("bootstrapping test environment") 40 | testEnv = &envtest.Environment{ 41 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")}, 42 | ErrorIfCRDPathMissing: true, 43 | } 44 | cfg, err := testEnv.Start() 45 | Expect(err).NotTo(HaveOccurred()) 46 | 47 | scheme := buildScheme() 48 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) 49 | Expect(err).NotTo(HaveOccurred()) 50 | 51 | mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme}) 52 | Expect(err).NotTo(HaveOccurred()) 53 | 54 | ctrl.SetLogger(zapr.NewLogger(zap.NewNop())) 55 | 56 | ctx, cancel = context.WithCancel(context.Background()) 57 | err = policy.Add(mgr, "", 50) 58 | Expect(err).NotTo(HaveOccurred()) 59 | 60 | go func() { 61 | defer GinkgoRecover() 62 | err = mgr.Start(ctx) 63 | Expect(err).NotTo(HaveOccurred(), "failed to run manager") 64 | }() 65 | }) 66 | 67 | var _ = AfterSuite(func() { 68 | cancel() 69 | 70 | By("tearing down the test environment") 71 | err := testEnv.Stop() 72 | Expect(err).NotTo(HaveOccurred()) 73 | }) 74 | 75 | func buildScheme() *runtime.Scheme { 76 | scheme := runtime.NewScheme() 77 | 78 | err := corev1.AddToScheme(scheme) 79 | Expect(err).NotTo(HaveOccurred()) 80 | err = appsv1.AddToScheme(scheme) 81 | Expect(err).NotTo(HaveOccurred()) 82 | err = networkingv1.AddToScheme(scheme) 83 | Expect(err).NotTo(HaveOccurred()) 84 | err = v1beta1.AddToScheme(scheme) 85 | Expect(err).NotTo(HaveOccurred()) 86 | 87 | return scheme 88 | } 89 | -------------------------------------------------------------------------------- /pkg/controller/cluster/pod.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | 6 | "k8s.io/apimachinery/pkg/runtime" 7 | "sigs.k8s.io/controller-runtime/pkg/controller" 8 | "sigs.k8s.io/controller-runtime/pkg/manager" 9 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 10 | 11 | v1 "k8s.io/api/core/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 15 | 16 | "github.com/rancher/k3k/k3k-kubelet/translate" 17 | ) 18 | 19 | const ( 20 | podController = "k3k-pod-controller" 21 | ) 22 | 23 | type PodReconciler struct { 24 | Client ctrlruntimeclient.Client 25 | Scheme *runtime.Scheme 26 | } 27 | 28 | // AddPodController adds a new controller for Pods to the manager. 29 | // It will reconcile the Pods of the Host Cluster with the one of the Virtual Cluster. 30 | func AddPodController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error { 31 | reconciler := PodReconciler{ 32 | Client: mgr.GetClient(), 33 | Scheme: mgr.GetScheme(), 34 | } 35 | 36 | return ctrl.NewControllerManagedBy(mgr). 37 | For(&v1.Pod{}). 38 | Named(podController). 39 | WithEventFilter(newClusterPredicate()). 40 | WithOptions(controller.Options{MaxConcurrentReconciles: maxConcurrentReconciles}). 41 | Complete(&reconciler) 42 | } 43 | 44 | func (r *PodReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { 45 | log := ctrl.LoggerFrom(ctx) 46 | log.V(1).Info("Reconciling Pod") 47 | 48 | var pod v1.Pod 49 | if err := r.Client.Get(ctx, req.NamespacedName, &pod); err != nil { 50 | return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) 51 | } 52 | 53 | // get cluster from the object 54 | cluster := clusterNamespacedName(&pod) 55 | 56 | virtualClient, err := newVirtualClient(ctx, r.Client, cluster.Name, cluster.Namespace) 57 | if err != nil { 58 | return reconcile.Result{}, err 59 | } 60 | 61 | if !pod.DeletionTimestamp.IsZero() { 62 | virtName := pod.GetAnnotations()[translate.ResourceNameAnnotation] 63 | virtNamespace := pod.GetAnnotations()[translate.ResourceNamespaceAnnotation] 64 | 65 | virtPod := v1.Pod{ 66 | ObjectMeta: metav1.ObjectMeta{ 67 | Name: virtName, 68 | Namespace: virtNamespace, 69 | }, 70 | } 71 | 72 | log.V(1).Info("Deleting Virtual Pod", "name", virtName, "namespace", virtNamespace) 73 | 74 | return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(virtualClient.Delete(ctx, &virtPod)) 75 | } 76 | 77 | return reconcile.Result{}, nil 78 | } 79 | -------------------------------------------------------------------------------- /charts/k3k/values.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: "" 2 | fullnameOverride: "" 3 | 4 | global: 5 | # -- Global override for container image registry 6 | imageRegistry: "" 7 | # -- Global override for container image registry pull secrets 8 | imagePullSecrets: [] 9 | 10 | serviceAccount: 11 | # Specifies whether a service account should be created 12 | create: true 13 | # The name of the service account to use. 14 | # If not set and create is true, a name is generated using the fullname template 15 | name: "" 16 | 17 | host: 18 | # clusterCIDR specifies the clusterCIDR that will be added to the default networkpolicy, if not set 19 | # the controller will collect the PodCIDRs of all the nodes on the system. 20 | clusterCIDR: "" 21 | 22 | controller: 23 | replicas: 1 24 | image: 25 | registry: "" 26 | repository: rancher/k3k 27 | tag: "" 28 | pullPolicy: "" 29 | 30 | imagePullSecrets: [] 31 | 32 | # extraEnv allows you to specify additional environment variables for the k3k controller deployment. 33 | # This is useful for passing custom configuration or secrets to the controller. 34 | # For example: 35 | # extraEnv: 36 | # - name: MY_CUSTOM_VAR 37 | # value: "my_custom_value" 38 | # - name: ANOTHER_VAR 39 | # valueFrom: 40 | # secretKeyRef: 41 | # name: my-secret 42 | # key: my-key 43 | extraEnv: [] 44 | 45 | # resources allows you to set resources limits and requests for CPU and Memory 46 | # resources: 47 | # limits: 48 | # cpu: "200m" 49 | # memory: "200Mi" 50 | # requests: 51 | # cpu: "100m" 52 | # memory: "100Mi" 53 | resources: {} 54 | 55 | # configuration related to k3s server component in k3k 56 | server: 57 | imagePullSecrets: [] 58 | image: 59 | registry: 60 | repository: "rancher/k3s" 61 | pullPolicy: "" 62 | 63 | # configuration related to the agent component in k3k 64 | agent: 65 | imagePullSecrets: [] 66 | 67 | # configuration related to agent in shared mode 68 | shared: 69 | image: 70 | registry: "" 71 | repository: "rancher/k3k-kubelet" 72 | tag: "" 73 | pullPolicy: "" 74 | 75 | # Specifies the port range that will be used for k3k-kubelet api if mirrorHostNodes is enabled 76 | kubeletPortRange: "50000-51000" 77 | # Specifies the port range that will be used for webhook if mirrorHostNodes is enabled 78 | webhookPortRange: "51001-52000" 79 | 80 | # configuration related to agent in virtual mode 81 | virtual: 82 | image: 83 | registry: "" 84 | repository: "rancher/k3s" 85 | pullPolicy: "" 86 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | 16 | permissions: 17 | contents: read 18 | security-events: write # for github/codeql-action/upload-sarif to upload SARIF results 19 | 20 | steps: 21 | - name: Checkout code 22 | uses: actions/checkout@v4 23 | 24 | - name: Set up Go 25 | uses: actions/setup-go@v5 26 | with: 27 | go-version-file: go.mod 28 | 29 | - name: Set up QEMU 30 | uses: docker/setup-qemu-action@v3 31 | 32 | - name: Run GoReleaser 33 | uses: goreleaser/goreleaser-action@v6 34 | with: 35 | distribution: goreleaser 36 | version: v2 37 | args: --clean --snapshot 38 | env: 39 | REPO: ${{ github.repository }} 40 | REGISTRY: "" 41 | 42 | - name: Run Trivy vulnerability scanner (k3kcli) 43 | uses: aquasecurity/trivy-action@0.28.0 44 | with: 45 | ignore-unfixed: true 46 | severity: 'MEDIUM,HIGH,CRITICAL' 47 | scan-type: 'fs' 48 | scan-ref: 'dist/k3kcli_linux_amd64_v1/k3kcli' 49 | format: 'sarif' 50 | output: 'trivy-results-k3kcli.sarif' 51 | 52 | - name: Upload Trivy scan results to GitHub Security tab (k3kcli) 53 | uses: github/codeql-action/upload-sarif@v3 54 | with: 55 | sarif_file: trivy-results-k3kcli.sarif 56 | category: k3kcli 57 | 58 | - name: Run Trivy vulnerability scanner (k3k) 59 | uses: aquasecurity/trivy-action@0.28.0 60 | with: 61 | ignore-unfixed: true 62 | severity: 'MEDIUM,HIGH,CRITICAL' 63 | scan-type: 'image' 64 | scan-ref: '${{ github.repository }}:v0.0.0-amd64' 65 | format: 'sarif' 66 | output: 'trivy-results-k3k.sarif' 67 | 68 | - name: Upload Trivy scan results to GitHub Security tab (k3k) 69 | uses: github/codeql-action/upload-sarif@v3 70 | with: 71 | sarif_file: trivy-results-k3k.sarif 72 | category: k3k 73 | 74 | - name: Run Trivy vulnerability scanner (k3k-kubelet) 75 | uses: aquasecurity/trivy-action@0.28.0 76 | with: 77 | ignore-unfixed: true 78 | severity: 'MEDIUM,HIGH,CRITICAL' 79 | scan-type: 'image' 80 | scan-ref: '${{ github.repository }}-kubelet:v0.0.0-amd64' 81 | format: 'sarif' 82 | output: 'trivy-results-k3k-kubelet.sarif' 83 | 84 | - name: Upload Trivy scan results to GitHub Security tab (k3k-kubelet) 85 | uses: github/codeql-action/upload-sarif@v3 86 | with: 87 | sarif_file: trivy-results-k3k-kubelet.sarif 88 | category: k3k-kubelet 89 | -------------------------------------------------------------------------------- /pkg/controller/cluster/server/config.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "fmt" 5 | 6 | "k8s.io/apimachinery/pkg/util/sets" 7 | 8 | v1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | 11 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 12 | "github.com/rancher/k3k/pkg/controller" 13 | "github.com/rancher/k3k/pkg/controller/cluster/agent" 14 | ) 15 | 16 | func (s *Server) Config(init bool, serviceIP string) (*v1.Secret, error) { 17 | name := configSecretName(s.cluster.Name, init) 18 | 19 | sans := sets.NewString(s.cluster.Spec.TLSSANs...) 20 | sans.Insert( 21 | serviceIP, 22 | ServiceName(s.cluster.Name), 23 | fmt.Sprintf("%s.%s", ServiceName(s.cluster.Name), s.cluster.Namespace), 24 | ) 25 | 26 | s.cluster.Status.TLSSANs = sans.List() 27 | 28 | config := serverConfigData(serviceIP, s.cluster, s.token) 29 | if init { 30 | config = initConfigData(s.cluster, s.token) 31 | } 32 | 33 | return &v1.Secret{ 34 | TypeMeta: metav1.TypeMeta{ 35 | Kind: "Secret", 36 | APIVersion: "v1", 37 | }, 38 | ObjectMeta: metav1.ObjectMeta{ 39 | Name: name, 40 | Namespace: s.cluster.Namespace, 41 | }, 42 | Data: map[string][]byte{ 43 | "config.yaml": []byte(config), 44 | }, 45 | }, nil 46 | } 47 | 48 | func serverConfigData(serviceIP string, cluster *v1beta1.Cluster, token string) string { 49 | return "cluster-init: true\nserver: https://" + serviceIP + "\n" + serverOptions(cluster, token) 50 | } 51 | 52 | func initConfigData(cluster *v1beta1.Cluster, token string) string { 53 | return "cluster-init: true\n" + serverOptions(cluster, token) 54 | } 55 | 56 | func serverOptions(cluster *v1beta1.Cluster, token string) string { 57 | var opts string 58 | 59 | // TODO: generate token if not found 60 | if token != "" { 61 | opts = "token: " + token + "\n" 62 | } 63 | 64 | if cluster.Status.ClusterCIDR != "" { 65 | opts = opts + "cluster-cidr: " + cluster.Status.ClusterCIDR + "\n" 66 | } 67 | 68 | if cluster.Status.ServiceCIDR != "" { 69 | opts = opts + "service-cidr: " + cluster.Status.ServiceCIDR + "\n" 70 | } 71 | 72 | if cluster.Spec.ClusterDNS != "" { 73 | opts = opts + "cluster-dns: " + cluster.Spec.ClusterDNS + "\n" 74 | } 75 | 76 | if len(cluster.Status.TLSSANs) > 0 { 77 | opts = opts + "tls-san:\n" 78 | for _, addr := range cluster.Status.TLSSANs { 79 | opts = opts + "- " + addr + "\n" 80 | } 81 | } 82 | 83 | if cluster.Spec.Mode != agent.VirtualNodeMode { 84 | opts = opts + "disable-agent: true\negress-selector-mode: disabled\ndisable:\n- servicelb\n- traefik\n- metrics-server\n- local-storage" 85 | } 86 | // TODO: Add extra args to the options 87 | 88 | return opts 89 | } 90 | 91 | func configSecretName(clusterName string, init bool) string { 92 | if !init { 93 | return controller.SafeConcatNameWithPrefix(clusterName, configName) 94 | } 95 | 96 | return controller.SafeConcatNameWithPrefix(clusterName, initConfigName) 97 | } 98 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*" 7 | workflow_dispatch: 8 | inputs: 9 | commit: 10 | type: string 11 | description: Checkout a specific commit 12 | 13 | permissions: 14 | contents: write 15 | packages: write 16 | id-token: write 17 | 18 | jobs: 19 | release: 20 | runs-on: ubuntu-latest 21 | 22 | steps: 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | with: 26 | fetch-depth: 0 27 | fetch-tags: true 28 | 29 | - name: Checkout code at the specific commit 30 | if: inputs.commit != '' 31 | run: git checkout ${{ inputs.commit }} 32 | 33 | - name: Set up Go 34 | uses: actions/setup-go@v5 35 | with: 36 | go-version-file: go.mod 37 | 38 | - name: Set up QEMU 39 | uses: docker/setup-qemu-action@v3 40 | 41 | - name: "Read secrets" 42 | uses: rancher-eio/read-vault-secrets@main 43 | if: github.repository_owner == 'rancher' 44 | with: 45 | secrets: | 46 | secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | DOCKER_USERNAME ; 47 | secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | DOCKER_PASSWORD ; 48 | 49 | # Manually dispatched workflows (or forks) will use ghcr.io 50 | - name: Setup ghcr.io 51 | if: github.event_name == 'workflow_dispatch' || github.repository_owner != 'rancher' 52 | run: | 53 | echo "REGISTRY=ghcr.io" >> $GITHUB_ENV 54 | echo "DOCKER_USERNAME=${{ github.actor }}" >> $GITHUB_ENV 55 | echo "DOCKER_PASSWORD=${{ github.token }}" >> $GITHUB_ENV 56 | 57 | - name: Login to container registry 58 | uses: docker/login-action@v3 59 | with: 60 | registry: ${{ env.REGISTRY }} 61 | username: ${{ env.DOCKER_USERNAME }} 62 | password: ${{ env.DOCKER_PASSWORD }} 63 | 64 | # If the tag does not exists the workflow was manually triggered. 65 | # That means we are creating temporary nightly builds, with a "fake" local tag 66 | - name: Check release tag 67 | id: release-tag 68 | run: | 69 | CURRENT_TAG=$(git describe --tag --always --match="v[0-9]*") 70 | 71 | if git show-ref --tags ${CURRENT_TAG} --quiet; then 72 | echo "tag ${CURRENT_TAG} already exists"; 73 | else 74 | echo "tag ${CURRENT_TAG} does not exist" 75 | git tag ${CURRENT_TAG} 76 | fi 77 | 78 | echo "CURRENT_TAG=${CURRENT_TAG}" >> "$GITHUB_OUTPUT" 79 | 80 | - name: Run GoReleaser 81 | uses: goreleaser/goreleaser-action@v6 82 | with: 83 | distribution: goreleaser 84 | version: v2 85 | args: --clean 86 | env: 87 | GITHUB_TOKEN: ${{ github.token }} 88 | GORELEASER_CURRENT_TAG: ${{ steps.release-tag.outputs.CURRENT_TAG }} 89 | REGISTRY: ${{ env.REGISTRY }} 90 | REPO: ${{ github.repository }} 91 | -------------------------------------------------------------------------------- /charts/k3k/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "k3k.fullname" . }} 5 | labels: 6 | {{- include "k3k.labels" . | nindent 4 }} 7 | namespace: {{ .Release.Namespace }} 8 | spec: 9 | replicas: {{ .Values.controller.replicas }} 10 | selector: 11 | matchLabels: 12 | {{- include "k3k.selectorLabels" . | nindent 6 }} 13 | template: 14 | metadata: 15 | labels: 16 | {{- include "k3k.selectorLabels" . | nindent 8 }} 17 | spec: 18 | imagePullSecrets: {{- include "image.pullSecrets" (concat .Values.controller.imagePullSecrets .Values.global.imagePullSecrets) | nindent 8 }} 19 | containers: 20 | - image: "{{- include "controller.registry" .}}{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}" 21 | imagePullPolicy: {{ .Values.controller.image.pullPolicy }} 22 | name: {{ .Chart.Name }} 23 | {{- with .Values.controller.resources }} 24 | resources: 25 | {{- toYaml . | nindent 12 }} 26 | {{- end }} 27 | args: 28 | - k3k 29 | - --cluster-cidr={{ .Values.host.clusterCIDR }} 30 | - --k3s-server-image={{- include "server.registry" .}}{{ .Values.server.image.repository }} 31 | - --k3s-server-image-pull-policy={{ .Values.server.image.pullPolicy }} 32 | - --agent-shared-image={{- include "agent.shared.registry" .}}{{ .Values.agent.shared.image.repository }}:{{ default .Chart.AppVersion .Values.agent.shared.image.tag }} 33 | - --agent-shared-image-pull-policy={{ .Values.agent.shared.image.pullPolicy }} 34 | - --agent-virtual-image={{- include "agent.virtual.registry" .}}{{ .Values.agent.virtual.image.repository }} 35 | - --agent-virtual-image-pull-policy={{ .Values.agent.virtual.image.pullPolicy }} 36 | - --kubelet-port-range={{ .Values.agent.shared.kubeletPortRange }} 37 | - --webhook-port-range={{ .Values.agent.shared.webhookPortRange }} 38 | {{- range $key, $value := include "image.pullSecrets" (concat .Values.agent.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }} 39 | - --agent-image-pull-secret 40 | - {{ .name }} 41 | {{- end }} 42 | {{- range $key, $value := include "image.pullSecrets" (concat .Values.server.imagePullSecrets .Values.global.imagePullSecrets) | fromYamlArray }} 43 | - --server-image-pull-secret 44 | - {{ .name }} 45 | {{- end }} 46 | env: 47 | - name: CONTROLLER_NAMESPACE 48 | valueFrom: 49 | fieldRef: 50 | fieldPath: metadata.namespace 51 | {{- with .Values.controller.extraEnv }} 52 | {{- toYaml . | nindent 10 }} 53 | {{- end }} 54 | ports: 55 | - containerPort: 8080 56 | name: https 57 | protocol: TCP 58 | - containerPort: 9443 59 | name: https-webhook 60 | protocol: TCP 61 | serviceAccountName: {{ include "k3k.serviceAccountName" . }} 62 | -------------------------------------------------------------------------------- /docs/howtos/airgap.md: -------------------------------------------------------------------------------- 1 | # K3k Air Gap Installation Guide 2 | 3 | Applicable K3k modes: `virtual`, `shared` 4 | 5 | This guide describes how to deploy **K3k** in an **air-gapped environment**, including the packaging of required images, Helm chart configurations, and cluster creation using a private container registry. 6 | 7 | --- 8 | 9 | ## 1. Package Required Container Images 10 | 11 | ### 1.1: Follow K3s Air Gap Preparation 12 | 13 | Begin with the official K3s air gap packaging instructions: 14 | [K3s Air Gap Installation Docs](https://docs.k3s.io/installation/airgap) 15 | 16 | ### 1.2: Include K3k-Specific Images 17 | 18 | In addition to the K3s images, make sure to include the following in your image bundle: 19 | 20 | | Image Names | Descriptions | 21 | | --------------------------- | --------------------------------------------------------------- | 22 | | `rancher/k3k:` | K3k controller image (replace `` with the desired version) | 23 | | `rancher/k3k-kubelet:` | K3k agent image for shared mode | 24 | | `rancher/k3s:` | K3s server/agent image for virtual clusters | 25 | 26 | Load these images into your internal (air-gapped) registry. 27 | 28 | --- 29 | 30 | ## 2. Configure Helm Chart for Air Gap installation 31 | 32 | Update the `values.yaml` file in the K3k Helm chart with air gap settings: 33 | 34 | ```yaml 35 | controller: 36 | imagePullSecrets: [] # Optional 37 | image: 38 | repository: rancher/k3k 39 | tag: "" # Specify the version tag 40 | pullPolicy: "" # Optional: "IfNotPresent", "Always", etc. 41 | 42 | agent: 43 | imagePullSecrets: [] 44 | virtual: 45 | image: 46 | repository: rancher/k3s 47 | pullPolicy: "" # Optional 48 | shared: 49 | image: 50 | repository: rancher/k3k-kubelet 51 | tag: "" # Specify the version tag 52 | pullPolicy: "" # Optional 53 | 54 | server: 55 | imagePullSecrets: [] # Optional 56 | image: 57 | repository: rancher/k3s 58 | pullPolicy: "" # Optional 59 | ``` 60 | 61 | These values enforce the use of internal image repositories for the K3k controller, the agent and the server. 62 | 63 | **Note** : All virtual clusters will use automatically those settings. 64 | 65 | --- 66 | 67 | ## 3. Enforce Registry in Virtual Clusters 68 | 69 | When creating a virtual cluster, use the `--system-default-registry` flag to ensure all system components (e.g., CoreDNS) pull from your internal registry: 70 | 71 | ```bash 72 | k3kcli cluster create \ 73 | --server-args "--system-default-registry=registry.internal.domain" \ 74 | my-cluster 75 | ``` 76 | 77 | This flag is passed directly to the K3s server in the virtual cluster, influencing all system workload image pulls. 78 | [K3s Server CLI Reference](https://docs.k3s.io/cli/server#k3s-server-cli-help) 79 | 80 | --- 81 | 82 | ## 4. Specify K3s Version for Virtual Clusters 83 | 84 | K3k allows specifying the K3s version used in each virtual cluster: 85 | 86 | ```bash 87 | k3kcli cluster create \ 88 | --k3s-version v1.29.4+k3s1 \ 89 | my-cluster 90 | ``` 91 | 92 | - If omitted, the **host cluster’s K3s version** will be used by default, which might not exist if it's not part of the air gap package. 93 | -------------------------------------------------------------------------------- /pkg/controller/cluster/cluster_suite_test.go: -------------------------------------------------------------------------------- 1 | package cluster_test 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "path/filepath" 7 | "testing" 8 | 9 | "github.com/go-logr/zapr" 10 | "go.uber.org/zap" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/client-go/kubernetes" 13 | "k8s.io/client-go/tools/record" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/controller-runtime/pkg/envtest" 16 | 17 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 18 | ctrl "sigs.k8s.io/controller-runtime" 19 | 20 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 21 | "github.com/rancher/k3k/pkg/controller/cluster" 22 | "github.com/rancher/k3k/pkg/controller/cluster/agent" 23 | 24 | . "github.com/onsi/ginkgo/v2" 25 | . "github.com/onsi/gomega" 26 | ) 27 | 28 | func TestController(t *testing.T) { 29 | RegisterFailHandler(Fail) 30 | RunSpecs(t, "Cluster Controller Suite") 31 | } 32 | 33 | var ( 34 | testEnv *envtest.Environment 35 | k8s *kubernetes.Clientset 36 | k8sClient client.Client 37 | ctx context.Context 38 | cancel context.CancelFunc 39 | ) 40 | 41 | var _ = BeforeSuite(func() { 42 | By("bootstrapping test environment") 43 | testEnv = &envtest.Environment{ 44 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "k3k", "templates", "crds")}, 45 | ErrorIfCRDPathMissing: true, 46 | } 47 | 48 | // setting controller namespace env to activate port range allocator 49 | _ = os.Setenv("CONTROLLER_NAMESPACE", "default") 50 | 51 | cfg, err := testEnv.Start() 52 | Expect(err).NotTo(HaveOccurred()) 53 | 54 | k8s, err = kubernetes.NewForConfig(cfg) 55 | Expect(err).NotTo(HaveOccurred()) 56 | 57 | scheme := buildScheme() 58 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) 59 | Expect(err).NotTo(HaveOccurred()) 60 | 61 | ctrl.SetLogger(zapr.NewLogger(zap.NewNop())) 62 | 63 | mgr, err := ctrl.NewManager(cfg, ctrl.Options{Scheme: scheme}) 64 | Expect(err).NotTo(HaveOccurred()) 65 | 66 | portAllocator, err := agent.NewPortAllocator(ctx, mgr.GetClient()) 67 | Expect(err).NotTo(HaveOccurred()) 68 | 69 | err = mgr.Add(portAllocator.InitPortAllocatorConfig(ctx, mgr.GetClient(), "50000-51000", "51001-52000")) 70 | Expect(err).NotTo(HaveOccurred()) 71 | 72 | ctx, cancel = context.WithCancel(context.Background()) 73 | 74 | clusterConfig := &cluster.Config{ 75 | SharedAgentImage: "rancher/k3k-kubelet:latest", 76 | K3SServerImage: "rancher/k3s", 77 | VirtualAgentImage: "rancher/k3s", 78 | } 79 | err = cluster.Add(ctx, mgr, clusterConfig, 50, portAllocator, &record.FakeRecorder{}) 80 | Expect(err).NotTo(HaveOccurred()) 81 | 82 | go func() { 83 | defer GinkgoRecover() 84 | err = mgr.Start(ctx) 85 | Expect(err).NotTo(HaveOccurred(), "failed to run manager") 86 | }() 87 | }) 88 | 89 | var _ = AfterSuite(func() { 90 | cancel() 91 | 92 | By("tearing down the test environment") 93 | err := testEnv.Stop() 94 | Expect(err).NotTo(HaveOccurred()) 95 | }) 96 | 97 | func buildScheme() *runtime.Scheme { 98 | scheme := runtime.NewScheme() 99 | 100 | err := clientgoscheme.AddToScheme(scheme) 101 | Expect(err).NotTo(HaveOccurred()) 102 | err = v1beta1.AddToScheme(scheme) 103 | Expect(err).NotTo(HaveOccurred()) 104 | 105 | return scheme 106 | } 107 | -------------------------------------------------------------------------------- /k3k-kubelet/controller/syncer/persistentvolumeclaims_test.go: -------------------------------------------------------------------------------- 1 | package syncer_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "k8s.io/apimachinery/pkg/api/resource" 9 | "k8s.io/utils/ptr" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | 12 | v1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | 15 | "github.com/rancher/k3k/k3k-kubelet/controller/syncer" 16 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 17 | 18 | . "github.com/onsi/ginkgo/v2" 19 | . "github.com/onsi/gomega" 20 | ) 21 | 22 | var PVCTests = func() { 23 | var ( 24 | namespace string 25 | cluster v1beta1.Cluster 26 | ) 27 | 28 | BeforeEach(func() { 29 | ctx := context.Background() 30 | 31 | ns := v1.Namespace{ 32 | ObjectMeta: metav1.ObjectMeta{GenerateName: "ns-"}, 33 | } 34 | err := hostTestEnv.k8sClient.Create(ctx, &ns) 35 | Expect(err).NotTo(HaveOccurred()) 36 | 37 | namespace = ns.Name 38 | 39 | cluster = v1beta1.Cluster{ 40 | ObjectMeta: metav1.ObjectMeta{ 41 | GenerateName: "cluster-", 42 | Namespace: namespace, 43 | }, 44 | } 45 | err = hostTestEnv.k8sClient.Create(ctx, &cluster) 46 | Expect(err).NotTo(HaveOccurred()) 47 | 48 | err = syncer.AddPVCSyncer(ctx, virtManager, hostManager, cluster.Name, cluster.Namespace) 49 | Expect(err).NotTo(HaveOccurred()) 50 | }) 51 | 52 | AfterEach(func() { 53 | ns := v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} 54 | err := hostTestEnv.k8sClient.Delete(context.Background(), &ns) 55 | Expect(err).NotTo(HaveOccurred()) 56 | }) 57 | 58 | It("creates a pvc on the host cluster and virtual pv in virtual cluster", func() { 59 | ctx := context.Background() 60 | 61 | pvc := &v1.PersistentVolumeClaim{ 62 | ObjectMeta: metav1.ObjectMeta{ 63 | GenerateName: "pvc-", 64 | Namespace: "default", 65 | Labels: map[string]string{ 66 | "foo": "bar", 67 | }, 68 | }, 69 | Spec: v1.PersistentVolumeClaimSpec{ 70 | StorageClassName: ptr.To("test-sc"), 71 | AccessModes: []v1.PersistentVolumeAccessMode{ 72 | v1.ReadOnlyMany, 73 | }, 74 | Resources: v1.VolumeResourceRequirements{ 75 | Requests: v1.ResourceList{ 76 | "storage": resource.MustParse("1G"), 77 | }, 78 | }, 79 | }, 80 | } 81 | 82 | err := virtTestEnv.k8sClient.Create(ctx, pvc) 83 | Expect(err).NotTo(HaveOccurred()) 84 | 85 | By(fmt.Sprintf("Created PVC %s in virtual cluster", pvc.Name)) 86 | 87 | var hostPVC v1.PersistentVolumeClaim 88 | hostPVCName := translateName(cluster, pvc.Namespace, pvc.Name) 89 | 90 | Eventually(func() error { 91 | key := client.ObjectKey{Name: hostPVCName, Namespace: namespace} 92 | return hostTestEnv.k8sClient.Get(ctx, key, &hostPVC) 93 | }). 94 | WithPolling(time.Millisecond * 300). 95 | WithTimeout(time.Second * 10). 96 | Should(BeNil()) 97 | 98 | By(fmt.Sprintf("Created PVC %s in host cluster", hostPVCName)) 99 | 100 | Expect(*hostPVC.Spec.StorageClassName).To(Equal("test-sc")) 101 | 102 | GinkgoWriter.Printf("labels: %v\n", hostPVC.Labels) 103 | 104 | var virtualPV v1.PersistentVolume 105 | key := client.ObjectKey{Name: pvc.Name} 106 | 107 | err = virtTestEnv.k8sClient.Get(ctx, key, &virtualPV) 108 | Expect(err).NotTo(HaveOccurred()) 109 | }) 110 | } 111 | -------------------------------------------------------------------------------- /cli/cmds/cluster_create_flags.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "errors" 5 | "time" 6 | 7 | "github.com/spf13/cobra" 8 | "k8s.io/apimachinery/pkg/api/resource" 9 | 10 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 11 | ) 12 | 13 | func createFlags(cmd *cobra.Command, cfg *CreateConfig) { 14 | cmd.Flags().IntVar(&cfg.servers, "servers", 1, "number of servers") 15 | cmd.Flags().IntVar(&cfg.agents, "agents", 0, "number of agents") 16 | cmd.Flags().StringVar(&cfg.token, "token", "", "token of the cluster") 17 | cmd.Flags().StringVar(&cfg.clusterCIDR, "cluster-cidr", "", "cluster CIDR") 18 | cmd.Flags().StringVar(&cfg.serviceCIDR, "service-cidr", "", "service CIDR") 19 | cmd.Flags().BoolVar(&cfg.mirrorHostNodes, "mirror-host-nodes", false, "Mirror Host Cluster Nodes") 20 | cmd.Flags().StringVar(&cfg.persistenceType, "persistence-type", string(v1beta1.DynamicPersistenceMode), "persistence mode for the nodes (dynamic, ephemeral)") 21 | cmd.Flags().StringVar(&cfg.storageClassName, "storage-class-name", "", "storage class name for dynamic persistence type") 22 | cmd.Flags().StringVar(&cfg.storageRequestSize, "storage-request-size", "", "storage size for dynamic persistence type") 23 | cmd.Flags().StringSliceVar(&cfg.serverArgs, "server-args", []string{}, "servers extra arguments") 24 | cmd.Flags().StringSliceVar(&cfg.agentArgs, "agent-args", []string{}, "agents extra arguments") 25 | cmd.Flags().StringSliceVar(&cfg.serverEnvs, "server-envs", []string{}, "servers extra Envs") 26 | cmd.Flags().StringSliceVar(&cfg.agentEnvs, "agent-envs", []string{}, "agents extra Envs") 27 | cmd.Flags().StringArrayVar(&cfg.labels, "labels", []string{}, "Labels to add to the cluster object (e.g. key=value)") 28 | cmd.Flags().StringArrayVar(&cfg.annotations, "annotations", []string{}, "Annotations to add to the cluster object (e.g. key=value)") 29 | cmd.Flags().StringVar(&cfg.version, "version", "", "k3s version") 30 | cmd.Flags().StringVar(&cfg.mode, "mode", "shared", "k3k mode type (shared, virtual)") 31 | cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host") 32 | cmd.Flags().StringVar(&cfg.policy, "policy", "", "The policy to create the cluster in") 33 | cmd.Flags().StringVar(&cfg.customCertsPath, "custom-certs", "", "The path for custom certificate directory") 34 | cmd.Flags().DurationVar(&cfg.timeout, "timeout", 3*time.Minute, "The timeout for waiting for the cluster to become ready (e.g., 10s, 5m, 1h).") 35 | } 36 | 37 | func validateCreateConfig(cfg *CreateConfig) error { 38 | if cfg.servers <= 0 { 39 | return errors.New("invalid number of servers") 40 | } 41 | 42 | if cfg.persistenceType != "" { 43 | switch v1beta1.PersistenceMode(cfg.persistenceType) { 44 | case v1beta1.EphemeralPersistenceMode, v1beta1.DynamicPersistenceMode: 45 | return nil 46 | default: 47 | return errors.New(`persistence-type should be one of "dynamic" or "ephemeral"`) 48 | } 49 | } 50 | 51 | if _, err := resource.ParseQuantity(cfg.storageRequestSize); err != nil { 52 | return errors.New(`invalid storage size, should be a valid resource quantity e.g "10Gi"`) 53 | } 54 | 55 | if cfg.mode != "" { 56 | switch cfg.mode { 57 | case string(v1beta1.VirtualClusterMode), string(v1beta1.SharedClusterMode): 58 | return nil 59 | default: 60 | return errors.New(`mode should be one of "shared" or "virtual"`) 61 | } 62 | } 63 | 64 | return nil 65 | } 66 | -------------------------------------------------------------------------------- /pkg/controller/cluster/service.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "k8s.io/apimachinery/pkg/api/equality" 8 | "k8s.io/apimachinery/pkg/types" 9 | "sigs.k8s.io/controller-runtime/pkg/manager" 10 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 11 | 12 | v1 "k8s.io/api/core/v1" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" 15 | 16 | "github.com/rancher/k3k/k3k-kubelet/translate" 17 | ) 18 | 19 | const ( 20 | serviceController = "k3k-service-controller" 21 | ) 22 | 23 | type ServiceReconciler struct { 24 | HostClient ctrlruntimeclient.Client 25 | } 26 | 27 | // Add adds a new controller to the manager 28 | func AddServiceController(ctx context.Context, mgr manager.Manager, maxConcurrentReconciles int) error { 29 | reconciler := ServiceReconciler{ 30 | HostClient: mgr.GetClient(), 31 | } 32 | 33 | return ctrl.NewControllerManagedBy(mgr). 34 | Named(serviceController). 35 | For(&v1.Service{}). 36 | WithEventFilter(newClusterPredicate()). 37 | Complete(&reconciler) 38 | } 39 | 40 | func (r *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { 41 | log := ctrl.LoggerFrom(ctx) 42 | log.V(1).Info("Reconciling Service") 43 | 44 | var hostService v1.Service 45 | if err := r.HostClient.Get(ctx, req.NamespacedName, &hostService); err != nil { 46 | return reconcile.Result{}, ctrlruntimeclient.IgnoreNotFound(err) 47 | } 48 | 49 | // Some services are owned by the cluster but don't have the annotations set (i.e. the kubelet svc) 50 | // They don't exists in the virtual cluster, so we can skip them 51 | 52 | virtualServiceName, virtualServiceNameFound := hostService.Annotations[translate.ResourceNameAnnotation] 53 | virtualServiceNamespace, virtualServiceNamespaceFound := hostService.Annotations[translate.ResourceNamespaceAnnotation] 54 | 55 | if !virtualServiceNameFound || !virtualServiceNamespaceFound { 56 | log.V(1).Info(fmt.Sprintf("Service %s/%s does not have virtual service annotations, skipping", hostService.Namespace, hostService.Name)) 57 | return reconcile.Result{}, nil 58 | } 59 | 60 | // get cluster from the object 61 | cluster := clusterNamespacedName(&hostService) 62 | 63 | virtualClient, err := newVirtualClient(ctx, r.HostClient, cluster.Name, cluster.Namespace) 64 | if err != nil { 65 | return reconcile.Result{}, fmt.Errorf("failed to get cluster info: %v", err) 66 | } 67 | 68 | if !hostService.DeletionTimestamp.IsZero() { 69 | return reconcile.Result{}, nil 70 | } 71 | 72 | virtualServiceKey := types.NamespacedName{ 73 | Name: virtualServiceName, 74 | Namespace: virtualServiceNamespace, 75 | } 76 | 77 | var virtualService v1.Service 78 | if err := virtualClient.Get(ctx, virtualServiceKey, &virtualService); err != nil { 79 | return reconcile.Result{}, fmt.Errorf("failed to get virtual service: %v", err) 80 | } 81 | 82 | if !equality.Semantic.DeepEqual(virtualService.Status.LoadBalancer, hostService.Status.LoadBalancer) { 83 | log.V(1).Info("Updating Virtual Service Status", "name", virtualServiceName, "namespace", virtualServiceNamespace) 84 | 85 | virtualService.Status.LoadBalancer = hostService.Status.LoadBalancer 86 | 87 | if err := virtualClient.Status().Update(ctx, &virtualService); err != nil { 88 | return reconcile.Result{}, err 89 | } 90 | } 91 | 92 | return reconcile.Result{}, nil 93 | } 94 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/root-ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEA7REkvtFe09G96zdIGvf81k4YGhiHzqCogq9gkkeGs9IY6ZiE 3 | gJUuaaZYVq7rRocnb2kf+71e4Bq7Z9KbZqonRHNF0nkE6LKlL9vUmfoWwiE2RtOU 4 | xWzBhl5haP9Sx4SfGaYM1AanO4pzl5hAWwsoTll/n4XXaCSYlCF+5dkd3G8jZOyO 5 | yhYmdSQw56Mg1hTDiS718DqnkwiUHrEvtqqXl6XWs4LZEUIu2joWbV5wrCR5xW6S 6 | /VRU5lqau7zCAPcZA72t//NEuE9lEosjeP6rlGoIz/LIuTyGhgVcfWyYZyh1HU6z 7 | z2VhxX8Zu+BlBu3p+DrUfZTkzE1ePV7qHzaSWidiuQ8/q5OoCGKdkfVvq/AjdRwV 8 | 3ixeViQr8OeC6XKdFdgQPjAEaiI/FE1MprvvqyKPloynBw09Kp9/a4Toaid3V8p1 9 | uFBf/eSsAe3uVCPjsWfbCeKDTxbj+Tx6wyDV7GBtAwzNykw8QPJnplUhRjHYZt9Q 10 | zw9hlL1D5bqZ3ergnqXXAXPXnfvYE3ksReXAOkN4FTkGvKJPU7NBOQ8BmgiMY2rM 11 | g/jCDkX9wZL9Gd5QTV4nFg54DxVOcmt9Bnk2UIM6oZRqF3oY2H0cVdvKYIkeXo5d 12 | P65rGBn4lj3X2uEMtC58Uih49X9kdYxAeeTkQE6N3/pOBM+W3Y4ZOCpQPeUCAwEA 13 | AQKCAgA46iK+RRnVFMfZzr3a66qh8MHMkhMYwm1yYpR2ygFG7qvYeStmi2pHJw6S 14 | URBfMFeBYeWx1HcQqppPhLqWXUdsIZijvTY2f5007jwOc4I/PSYAvw86jq+viL0u 15 | Lg47pFVmHP17cdV4b/bscDsTIIyestH3BHUApbiT567Fk+idYXlH45ssXUECYpvz 16 | ILDjdLy1FLcq44oTvL2C2NsxqacXW1M+aa2ffRoufj+gJko6qc8qXS+g7jwryZjY 17 | darF/Ize3w0FI+xdq4ICf7EWfV3IFeTjt9AE2MkbJ/JaklXMfmQPkzOMGTTt62PM 18 | PUVD8p53X/hf5f1AJ1r2tPDUaY+c6wYOD6WwYgXmGgeLXh2D+/csPSzDkml6IGKM 19 | ZN1qSI5PLzrxdGGrLOasYYl4R/TQD5KVN01mIBDOilWH+KdfYBF/TlpP/k6xb/Zy 20 | 1rSN2Giq8L7yDK8UaR9IvBM3xyclQ3VAuSUmwj24ZJJ2YnOa1rAg2WxRhjUVuS9b 21 | 3Qvw+Ifs2AHQyhl3f4F4QAVe7+KhlSbeP5pSLfCIGFPv7CZYz+ijaGcHL2P76aK5 22 | EYioP4aijr8ZUw4pvy3IY2FEjRU4Jry6RNpWQDsbpuZar1JbSnhumeXVP9ph2Q1v 23 | iNVpo7HEWxiroP+7dPoGmTpGhJ369nWAKgI5am/9uBC317h8gQKCAQEA+jskxC1b 24 | ckZX5czPacWsj1AXcSkr+ZP/NNBMiqPRaXaS/K2qTgWFx54t12Sii4QcBl8P6PRP 25 | zJgKm4Q5VOVo+4pOWHuPHncAUTBQj9ShVTBk2PCqB/QR7dxEsLsWyturciXFtzL+ 26 | 7gN/dA0n8rO74nvqjXRiaJkMcBLnP+YG6hl5BCVFsOPWAA5W+XnvYhKvGmnVQznw 27 | +4dAZN743ZUXMnkIBPerCrfyz/1sv0EsSKysno4y5jUNjIAIdYm41vwnRi2cT1Xn 28 | uGyPDkypQd+3d9BVScf4P2eD12y6S1htcSa2UeFLqM2HD5QsWnOohDxFdz66WEsU 29 | IiIAbvZ4YhLicQKCAQEA8ohOVb/Bilqw6WkYGfOSNi2ZRvE9W2rCB5cMmeOQ3GWc 30 | xGBi4xVzO3Tqn1VbaIkbILPIkcINTLPbNJBTklQpTcEd4ib22A2ixqwJJPfhIVBJ 31 | kvPUtuikzNBa+JELWXoZtYS04i8NgFEeZLKAYxyGVdI5+dnnQeV2ZkugyYUcVxg3 32 | f2r58uGIQKEIETnjXSgJW5316/adPmk3xUm+SbpEthjFG0XedHrXMjVDm9VH8qjV 33 | R5d0UfSBCmy7nqYE/xpZNGFytkd0BK9x+pMfR+3OkuuMumgw656Tpp8dJ6/XPg4l 34 | e403WR501ffc2Fz6hwOOmkE1lf9euG62AaWAF6JktQKCAQEA8wimePMksiSoEkWN 35 | 3clkA/1iB0JZt5mKcR0ueikJp0jHEisKEaVDfdGf7GeNh7vUDEwgA73mE2xIQSt3 36 | E4GNKWH3HfFD2+7wm+o1FL0LxNWv3RRB0F+5WjBpdsz/Ih+gsMkG8xvQhhNXort0 37 | ZUEz5pE8Cg9T1QtxDRkPCPy9EnmTE/evbFKc8oj66GsJmVNURm8r9pM7/tAqNs5p 38 | H61CTn9GzqxNr6dhaalWCZufCybKsWSjAvvcIO3pSV9t60AUVRDPlC53VKP7fYPv 39 | kE9cvj3V2EckUVCUuJKdjbhg81kKExSii1yzJOpg+akDrwtq3JpMGp0w/MXRbfRs 40 | j7SPwQKCAQAuvG+D/Ki6FZHj2LmpPpOdVxojXpd5R1BOkCAAg6bFodscyIolwltr 41 | SLNxsswjj9AndB2hYOiZMEt8jJdeKlOvRRiSHPoSVkZYzIwSkKXUeplC9TO3b4ta 42 | YIg3QBQU0P+lSAZnU7PhV0BpHTC6aKPGY/WCHSiAPUycl9RLIRh9/A+twRqbYDSW 43 | Z7GbSDF1ISL0gbMDHoncnf/+R6CgqoFVKd7Jy6P7hDR122fE3su3iitXWWsz487+ 44 | CEf7YXizBAvOmTy2vXww7vIi3Dj57myRSUzcGvnaXbuMLzs6C5uJvMYiUesphEH1 45 | fYcUNo6cd+YB+bDuz8AcAFGYbaEKbOPFAoIBAQDoP7ah0fc5kEojuyhl/lQATA6P 46 | B1lqd0GK7WNBGHkWZuFI5CPcyljxqF5Ki79mjknhDW2Tr8teUVhiwuRWfKWJaoJo 47 | 5E4tbJbbVluIzqj1ym8jSv2jaPp6VqTGJndmddB87rOTdZ1v3tVYIu9rc63+gyUg 48 | 96DT3YGxNc8KVU9XGdkoD9aCIlTg8IPBflzTiXRqv7YdSUYf6/2jhYQ/A87s+aVe 49 | SOJ6XxOkCEwQcJAjuBRZV96gHmOBeQW5mxt60LhsQNBISvz89p03AJ7KRk80KEN+ 50 | cd+ZSly24lXSjAwV2WQuuZBbMzffVOJvFixnK4EK3T2XWZ6mCfTPJr4TCkaI 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/intermediate-ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJJwIBAAKCAgEA1/EkfGUyr9SX7tdq9zGuDrHyOhdSfRPhNtpiJAlVErTPyPjb 3 | IOHW4lJDzZKQIHWW2QXbEfNDrmKo/H2fq6W5M/98SgY6IbF9l8iqT4f6Y9u6Yk93 4 | ufeGa2o/A+aOFgI9mktp1PrSuM1qWQziFRfY7Q/Be296M4NRouAX1gtQlsh2hm+T 5 | Mo1y/ZtOD2Cu5bQCgDMSoAothEOHnw1dAzKdzc5CecAzNTI8EG6SEzs/a3Ky59XQ 6 | cmD9lyyAt6TyGm4FDZPVBVStkUCA5eFkJysrk4YpyNCHlujW9xshGvgO18dP8mXl 7 | GEDqrkipMCJL91BUCE1izSUMeAXr2RSAjaIeqy/ZrC5beT1D6Lmr28yWj+H3DGmP 8 | ztxt/EukGRLaj+H7rMHUN0jpTVZGj9CkWne/Ox22v8bxYQXHAvU4wN7CMD4SyOFK 9 | MjQEGubsILa4mQ1Eze/3uv1EY3MEJgtCfcjGqrNWM44SEnNBzcbwUpEmPXM8CRRG 10 | 9O6ziz1zu90w6nWxV3vxv05tcVrRMMUM971lC7lgYS8YaJsqOnlwXAnYzanCO/nd 11 | lQNLizGqp1dEGZXFwKRS2vBEvfk8iltON7VRk/xEnrqJeddQ9yjYErJ301jKJpQB 12 | biBnPlZE7N+A92cj8CsZzlFOGT47myBxvrZ5T+cAM8awby35URRd8mM/X2kCAwEA 13 | AQKCAgAcJAAxt8xhro44IVl+qjo5DwZ2fIiS5S7Rw6bLdG3iOK7lTUzdHaEvsDHG 14 | zeU7XaeRU5qHXdDBnnjQIpzWtQuME6zCRsp3jpZD2/IZ2CoQrlc9LYb4NKIPSHK6 15 | 0uZMRvF5NH+vshoY3CgSP8QMpKVvy0BXEiF2KhRO6e4hRisz4x7TCSJBEb7c/squ 16 | 5VYVeB5lT4KLWZgx5sz7NLYczxFSeyEdlab2tTuvloExwWRT38ghvzLhXPNfKn0B 17 | sZydRh4hdVTq02ylA6dojbVMB2uv12mFkmtBBsnQvPfU+GOSyhYIjC/NN9R9btmh 18 | fK6ypYS16kPIYR4dXAQur2XLT0e4oPyloIJCLsM5e1kOToQaHXRz5HzAhSdue7Cx 19 | 18jYe3IdZb1a7T+JoDbCp/9fqTSpuIecSwzfQK67/NSBr8oRZr/dUDzlmFA9zo7+ 20 | BYzY+7l+6hfpYlaEzCtF1wrZkgjk/aoRjjKTO1HdbKs1vd5bFcnlf82ZDP2eDZhL 21 | QTURY8hc+DD8+92iqiLJfd+MlOznMt9xo2JmwEsb0q3aGoQpm5PZNx7VdnNl4C/B 22 | +JcatbGQX19r1JiZ+BbxdvssLUbWq+qqnYqijyk7XFV4TFmq3lKd6CCoYE16KupW 23 | n94LBWHkTMRjbSMDxL5hLTz2SbwAwZMuCmjd68vZ4TcNaxzM2wKCAQEA8x5oJMxG 24 | uy9h2sbJ6M91Cr7Q2qfbGNSOg2G5H+x2J15rdtUNB+IaJGXwwuu62nhQQ7QZ8Oi/ 25 | GL4Gro7THs0+StKOyhj3YEU5bsCLTisBjW0FVlFadW/f2XdP573hxpGIRj08EA3e 26 | 1RA9qoFA+9uzjB1wbBEdXVzQJoPmKAGP9yzK4KKRZ41LNo2ElCc2tZQBHDWrjcaG 27 | 6xAPGsBF5/iWu738hxMJZXb17nz5klmoYBQ0j+yBEi4xoNPGNf7SsvCGBwTdkMZv 28 | v97u0oZK1RJrJxJvR/Wj7xDs4ff1U/OLAaxRW2cVrF3QIdUovblEVwnklHEC5UsC 29 | O0JwsE5SOcYzbwKCAQEA42IeAnVxdGJskMtU9M5igkEFFK3rtCI+rFUX1IPU7ZGN 30 | +RFmVeBuauSYy84+3kSJXiEjM3H//DuT7oXiK7GWAgU/4z1YOePGsWIa/W1O0sD/ 31 | oZ0KY/QDd38jQShMbc2hcVZRkthSE1UTZlkGXsduIHY/wsAHx5K9NiFXvQ8F0d7E 32 | Es4h5ryz7e+5a4rAdjbcuqw6+nLlEbgtn9U3f43euLzWys+QTI7Ojof5JTzVgvXf 33 | cQsOfszWkoDKFUEm26az3Gt09fzjR8GR8xucvIed4SF5rS1zkRDxPQMMQGaI0JHn 34 | Hu5SBlQenScRQ39nuPnu5QiCtYQMyLJM0qPkYThOpwKCAQA9PhKj+mVy78uprdvc 35 | 7q2gKFM6UYBqr9i6ldpphUp6Plm51I90xesp8hgFMhaexCIL/Alw22CQHgZW4Jmk 36 | L7WaaZIYrNNcB/QgxxYQedrpQmZOyS2NWcI86MZTLUz7lVuLvg8sSCIy7+Vo1yiE 37 | iWKgUCYqwuDvzNqOaTmIKGSYskrk7W7NdBVXR6z3GS257e+dqJNvomwIOMJlTbwO 38 | ZFusLX64k/4Q9jebfRXtXPKCSXS4MK6O4t4TkmVi4q827koE6J/bwXETF1h35eZh 39 | 6ELf08/+g41pQo05mxnMrRP+NudDrCMUiYlNjIG30Ty65D4VeqZtFkkYnnL+pqwl 40 | 65y3AoIBAD6lYa87vC5cj5y06Isp8WoBj+zKng3bAXlpWE9sotVxLLRaXt96HfHF 41 | WXONNzT1nQMaDiC2X9iWcYNdz5pKKxITcC6jUBNi9fMZHGaGHxlhowxbv+kZ6Xqa 42 | xJPHDoeSB9C5/299ud8pqVahYGfseiLncVmunnYVr5uiRBIKeYgA3/RuZliz1L7R 43 | NTyz1aK8KsQjf3xQ+1uOasOGcuvpolsza9okpZTyI2aRf8sKn6idJRp3+V5mARgL 44 | 86E3egU6QIOR939uVRAH/LYF/YDTvGOyXVuhEh39lPlCRbXYigksqYiUEHU959FU 45 | WiVGjMUh1vezCJAJ+ZuxxAikrt86LDECggEANzLem5zdf3PIl3wKwcKau1IhRO11 46 | c3hlpoTf0u/A1+tBXMXeSw7Shl1vGgVnV9IQ1eL4Lyxpj9c8724LVVNLcB7vU0DF 47 | 5WQw0bjV8PS0wg4YpJ8wSToE3FNSn1wAqn8zrmC5/y49vvOQsYjtyGMjr4Vn5P6h 48 | ZKwSsKX1YMyaz/28kxxvOlwrWegaqXDWjwfOvYLIsLcJK33cXoKSYfe45HP55YK6 49 | Rbc3ZdfZXLohCSxwcnAHefIjeTlcSVI3JZafF+Mn1U50f2CxK0Thr70waSSf/x4C 50 | pP9KxgoP2URf7xLYK7tM40Lj+Mhwp+SAK2K00xLyxBySHD6FQE8ifFeZ4g== 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /pkg/controller/cluster/token.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "crypto/rand" 6 | "encoding/hex" 7 | "fmt" 8 | 9 | "k8s.io/apimachinery/pkg/types" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 12 | 13 | v1 "k8s.io/api/core/v1" 14 | apierrors "k8s.io/apimachinery/pkg/api/errors" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | ctrl "sigs.k8s.io/controller-runtime" 17 | 18 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 19 | "github.com/rancher/k3k/pkg/controller" 20 | ) 21 | 22 | func (c *ClusterReconciler) token(ctx context.Context, cluster *v1beta1.Cluster) (string, error) { 23 | if cluster.Spec.TokenSecretRef == nil { 24 | return c.ensureTokenSecret(ctx, cluster) 25 | } 26 | // get token data from secretRef 27 | nn := types.NamespacedName{ 28 | Name: cluster.Spec.TokenSecretRef.Name, 29 | Namespace: cluster.Spec.TokenSecretRef.Namespace, 30 | } 31 | 32 | var tokenSecret v1.Secret 33 | 34 | if err := c.Client.Get(ctx, nn, &tokenSecret); err != nil { 35 | return "", err 36 | } 37 | 38 | if _, ok := tokenSecret.Data["token"]; !ok { 39 | return "", fmt.Errorf("no token field in secret %s/%s", nn.Namespace, nn.Name) 40 | } 41 | 42 | return string(tokenSecret.Data["token"]), nil 43 | } 44 | 45 | func (c *ClusterReconciler) ensureTokenSecret(ctx context.Context, cluster *v1beta1.Cluster) (string, error) { 46 | log := ctrl.LoggerFrom(ctx) 47 | 48 | // check if the secret is already created 49 | key := types.NamespacedName{ 50 | Name: TokenSecretName(cluster.Name), 51 | Namespace: cluster.Namespace, 52 | } 53 | 54 | var tokenSecret v1.Secret 55 | if err := c.Client.Get(ctx, key, &tokenSecret); err != nil { 56 | if !apierrors.IsNotFound(err) { 57 | return "", err 58 | } 59 | } 60 | 61 | if tokenSecret.Data != nil { 62 | return string(tokenSecret.Data["token"]), nil 63 | } 64 | 65 | log.V(1).Info("Token secret is not specified, creating a random token") 66 | 67 | token, err := random(16) 68 | if err != nil { 69 | return "", err 70 | } 71 | 72 | tokenSecret = TokenSecretObj(token, cluster.Name, cluster.Namespace) 73 | key = client.ObjectKeyFromObject(&tokenSecret) 74 | 75 | result, err := controllerutil.CreateOrUpdate(ctx, c.Client, &tokenSecret, func() error { 76 | return controllerutil.SetControllerReference(cluster, &tokenSecret, c.Scheme) 77 | }) 78 | 79 | if result != controllerutil.OperationResultNone { 80 | log.V(1).Info("Ensuring tokenSecret", "key", key, "result", result) 81 | } 82 | 83 | return token, err 84 | } 85 | 86 | func random(size int) (string, error) { 87 | token := make([]byte, size) 88 | 89 | _, err := rand.Read(token) 90 | if err != nil { 91 | return "", err 92 | } 93 | 94 | return hex.EncodeToString(token), err 95 | } 96 | 97 | func TokenSecretObj(token, name, namespace string) v1.Secret { 98 | return v1.Secret{ 99 | TypeMeta: metav1.TypeMeta{ 100 | APIVersion: "v1", 101 | Kind: "Secret", 102 | }, 103 | ObjectMeta: metav1.ObjectMeta{ 104 | Name: TokenSecretName(name), 105 | Namespace: namespace, 106 | }, 107 | Data: map[string][]byte{ 108 | "token": []byte(token), 109 | }, 110 | } 111 | } 112 | 113 | func TokenSecretName(clusterName string) string { 114 | return controller.SafeConcatNameWithPrefix(clusterName, "token") 115 | } 116 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | lint: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v4 18 | 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version-file: go.mod 22 | 23 | - name: golangci-lint 24 | uses: golangci/golangci-lint-action@v8 25 | with: 26 | args: --timeout=5m 27 | version: v2.3.0 28 | 29 | validate: 30 | runs-on: ubuntu-latest 31 | 32 | steps: 33 | - name: Checkout code 34 | uses: actions/checkout@v4 35 | 36 | - uses: actions/setup-go@v5 37 | with: 38 | go-version-file: go.mod 39 | 40 | - name: Validate 41 | run: make validate 42 | 43 | tests: 44 | runs-on: ubuntu-latest 45 | needs: validate 46 | 47 | steps: 48 | - name: Checkout code 49 | uses: actions/checkout@v4 50 | 51 | - uses: actions/setup-go@v5 52 | with: 53 | go-version-file: go.mod 54 | 55 | - name: Run unit tests 56 | run: make test-unit 57 | 58 | - name: Upload coverage reports to Codecov 59 | uses: codecov/codecov-action@v5 60 | with: 61 | token: ${{ secrets.CODECOV_TOKEN }} 62 | files: ./cover.out 63 | flags: unit 64 | 65 | tests-cli: 66 | runs-on: ubuntu-latest 67 | needs: validate 68 | 69 | steps: 70 | - name: Checkout code 71 | uses: actions/checkout@v4 72 | with: 73 | fetch-depth: 0 74 | fetch-tags: true 75 | 76 | - uses: actions/setup-go@v5 77 | with: 78 | go-version-file: go.mod 79 | 80 | - name: Install Ginkgo 81 | run: go install github.com/onsi/ginkgo/v2/ginkgo 82 | 83 | - name: Setup environment 84 | run: | 85 | mkdir ${{ github.workspace }}/covdata 86 | 87 | echo "COVERAGE=true" >> $GITHUB_ENV 88 | echo "GOCOVERDIR=${{ github.workspace }}/covdata" >> $GITHUB_ENV 89 | echo "K3S_HOST_VERSION=v1.32.1+k3s1 >> $GITHUB_ENV" 90 | 91 | - name: Build and package 92 | run: | 93 | make build 94 | make package 95 | 96 | # add k3kcli to $PATH 97 | echo "${{ github.workspace }}/bin" >> $GITHUB_PATH 98 | 99 | - name: Check k3kcli 100 | run: k3kcli -v 101 | 102 | - name: Run cli tests 103 | env: 104 | K3K_DOCKER_INSTALL: "true" 105 | K3S_HOST_VERSION: "${{ env.K3S_HOST_VERSION }}" 106 | run: make test-cli 107 | 108 | - name: Convert coverage data 109 | run: go tool covdata textfmt -i=${{ github.workspace }}/covdata -o ${{ github.workspace }}/covdata/cover.out 110 | 111 | - name: Upload coverage reports to Codecov 112 | uses: codecov/codecov-action@v5 113 | with: 114 | token: ${{ secrets.CODECOV_TOKEN }} 115 | files: ${{ github.workspace }}/covdata/cover.out 116 | flags: cli 117 | 118 | - name: Archive k3s logs 119 | uses: actions/upload-artifact@v4 120 | if: always() 121 | with: 122 | name: cli-k3s-logs 123 | path: /tmp/k3s.log 124 | 125 | - name: Archive k3k logs 126 | uses: actions/upload-artifact@v4 127 | if: always() 128 | with: 129 | name: cli-k3k-logs 130 | path: /tmp/k3k.log 131 | -------------------------------------------------------------------------------- /cli/cmds/table_printer.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime" 5 | "k8s.io/client-go/util/jsonpath" 6 | 7 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | // createTable creates a table to print from the printerColumn defined in the CRD spec, plus the name at the beginning 12 | func createTable[T runtime.Object](crd *apiextensionsv1.CustomResourceDefinition, objs []T) *metav1.Table { 13 | printerColumns := getPrinterColumnsFromCRD(crd) 14 | 15 | return &metav1.Table{ 16 | TypeMeta: metav1.TypeMeta{APIVersion: "meta.k8s.io/v1", Kind: "Table"}, 17 | ColumnDefinitions: convertToTableColumns(printerColumns), 18 | Rows: createTableRows(objs, printerColumns), 19 | } 20 | } 21 | 22 | func getPrinterColumnsFromCRD(crd *apiextensionsv1.CustomResourceDefinition) []apiextensionsv1.CustomResourceColumnDefinition { 23 | printerColumns := []apiextensionsv1.CustomResourceColumnDefinition{ 24 | {Name: "Name", Type: "string", Format: "name", Description: "Name of the Resource", JSONPath: ".metadata.name"}, 25 | } 26 | 27 | for _, version := range crd.Spec.Versions { 28 | if version.Name == "v1beta1" { 29 | printerColumns = append(printerColumns, version.AdditionalPrinterColumns...) 30 | break 31 | } 32 | } 33 | 34 | return printerColumns 35 | } 36 | 37 | func convertToTableColumns(printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableColumnDefinition { 38 | var columnDefinitions []metav1.TableColumnDefinition 39 | 40 | for _, col := range printerColumns { 41 | columnDefinitions = append(columnDefinitions, metav1.TableColumnDefinition{ 42 | Name: col.Name, 43 | Type: col.Type, 44 | Format: col.Format, 45 | Description: col.Description, 46 | Priority: col.Priority, 47 | }) 48 | } 49 | 50 | return columnDefinitions 51 | } 52 | 53 | func createTableRows[T runtime.Object](objs []T, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []metav1.TableRow { 54 | var rows []metav1.TableRow 55 | 56 | for _, obj := range objs { 57 | objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj) 58 | if err != nil { 59 | rows = append(rows, metav1.TableRow{Cells: []any{""}}) 60 | continue 61 | } 62 | 63 | rows = append(rows, metav1.TableRow{ 64 | Cells: buildRowCells(objMap, printerColumns), 65 | Object: runtime.RawExtension{Object: obj}, 66 | }) 67 | } 68 | 69 | return rows 70 | } 71 | 72 | func buildRowCells(objMap map[string]any, printerColumns []apiextensionsv1.CustomResourceColumnDefinition) []any { 73 | var cells []any 74 | 75 | for _, printCol := range printerColumns { 76 | j := jsonpath.New(printCol.Name) 77 | 78 | err := j.Parse("{" + printCol.JSONPath + "}") 79 | if err != nil { 80 | cells = append(cells, "") 81 | continue 82 | } 83 | 84 | results, err := j.FindResults(objMap) 85 | if err != nil || len(results) == 0 || len(results[0]) == 0 { 86 | cells = append(cells, "") 87 | continue 88 | } 89 | 90 | cells = append(cells, results[0][0].Interface()) 91 | } 92 | 93 | return cells 94 | } 95 | 96 | func toPointerSlice[T any](v []T) []*T { 97 | vPtr := make([]*T, len(v)) 98 | 99 | for i := range v { 100 | vPtr[i] = &v[i] 101 | } 102 | 103 | return vPtr 104 | } 105 | -------------------------------------------------------------------------------- /pkg/controller/cluster/status.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "k8s.io/apimachinery/pkg/api/meta" 8 | 9 | v1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | 13 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 14 | "github.com/rancher/k3k/pkg/controller/cluster/server/bootstrap" 15 | ) 16 | 17 | const ( 18 | // Condition Types 19 | ConditionReady = "Ready" 20 | 21 | // Condition Reasons 22 | ReasonValidationFailed = "ValidationFailed" 23 | ReasonProvisioning = "Provisioning" 24 | ReasonProvisioned = "Provisioned" 25 | ReasonProvisioningFailed = "ProvisioningFailed" 26 | ReasonTerminating = "Terminating" 27 | ) 28 | 29 | func (c *ClusterReconciler) updateStatus(ctx context.Context, cluster *v1beta1.Cluster, reconcileErr error) { 30 | log := ctrl.LoggerFrom(ctx) 31 | log.V(1).Info("Updating Cluster Conditions") 32 | 33 | if !cluster.DeletionTimestamp.IsZero() { 34 | cluster.Status.Phase = v1beta1.ClusterTerminating 35 | meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ 36 | Type: ConditionReady, 37 | Status: metav1.ConditionFalse, 38 | Reason: ReasonTerminating, 39 | Message: "Cluster is being terminated", 40 | }) 41 | 42 | return 43 | } 44 | 45 | // Handle validation errors specifically to set the Pending phase. 46 | if errors.Is(reconcileErr, ErrClusterValidation) { 47 | cluster.Status.Phase = v1beta1.ClusterPending 48 | meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ 49 | Type: ConditionReady, 50 | Status: metav1.ConditionFalse, 51 | Reason: ReasonValidationFailed, 52 | Message: reconcileErr.Error(), 53 | }) 54 | 55 | c.Eventf(cluster, v1.EventTypeWarning, ReasonValidationFailed, reconcileErr.Error()) 56 | 57 | return 58 | } 59 | 60 | if errors.Is(reconcileErr, bootstrap.ErrServerNotReady) { 61 | cluster.Status.Phase = v1beta1.ClusterProvisioning 62 | meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ 63 | Type: ConditionReady, 64 | Status: metav1.ConditionFalse, 65 | Reason: ReasonProvisioning, 66 | Message: reconcileErr.Error(), 67 | }) 68 | 69 | return 70 | } 71 | 72 | // If there's an error, but it's not a validation error, the cluster is in a failed state. 73 | if reconcileErr != nil { 74 | cluster.Status.Phase = v1beta1.ClusterFailed 75 | meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ 76 | Type: ConditionReady, 77 | Status: metav1.ConditionFalse, 78 | Reason: ReasonProvisioningFailed, 79 | Message: reconcileErr.Error(), 80 | }) 81 | 82 | c.Eventf(cluster, v1.EventTypeWarning, ReasonProvisioningFailed, reconcileErr.Error()) 83 | 84 | return 85 | } 86 | 87 | // If we reach here, everything is successful. 88 | cluster.Status.Phase = v1beta1.ClusterReady 89 | newCondition := metav1.Condition{ 90 | Type: ConditionReady, 91 | Status: metav1.ConditionTrue, 92 | Reason: ReasonProvisioned, 93 | Message: "Cluster successfully provisioned", 94 | } 95 | 96 | // Only emit event on transition to Ready 97 | if !meta.IsStatusConditionPresentAndEqual(cluster.Status.Conditions, ConditionReady, metav1.ConditionTrue) { 98 | c.Eventf(cluster, v1.EventTypeNormal, ReasonProvisioned, newCondition.Message) 99 | } 100 | 101 | meta.SetStatusCondition(&cluster.Status.Conditions, newCondition) 102 | } 103 | -------------------------------------------------------------------------------- /tests/cluster_network_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | . "github.com/onsi/gomega" 6 | ) 7 | 8 | var _ = When("two virtual clusters are installed", Label(e2eTestLabel), Label(networkingTestsLabel), func() { 9 | var ( 10 | cluster1 *VirtualCluster 11 | cluster2 *VirtualCluster 12 | ) 13 | 14 | BeforeEach(func() { 15 | clusters := NewVirtualClusters(2) 16 | cluster1 = clusters[0] 17 | cluster2 = clusters[1] 18 | }) 19 | 20 | AfterEach(func() { 21 | DeleteNamespaces(cluster1.Cluster.Namespace, cluster2.Cluster.Namespace) 22 | }) 23 | 24 | It("can create pods in each of them that are isolated", func() { 25 | pod1Cluster1, pod1Cluster1IP := cluster1.NewNginxPod("") 26 | pod2Cluster1, pod2Cluster1IP := cluster1.NewNginxPod("") 27 | pod1Cluster2, pod1Cluster2IP := cluster2.NewNginxPod("") 28 | 29 | var ( 30 | stdout string 31 | curlCmd string 32 | err error 33 | ) 34 | 35 | By("Checking that Pods can reach themselves") 36 | 37 | curlCmd = "curl --no-progress-meter " + pod1Cluster1IP 38 | stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd) 39 | Expect(err).To(Not(HaveOccurred())) 40 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 41 | 42 | curlCmd = "curl --no-progress-meter " + pod2Cluster1IP 43 | stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd) 44 | Expect(err).To(Not(HaveOccurred())) 45 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 46 | 47 | curlCmd = "curl --no-progress-meter " + pod1Cluster2IP 48 | stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd) 49 | Expect(err).To(Not(HaveOccurred())) 50 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 51 | 52 | // Pods in the same Virtual Cluster should be able to reach each other 53 | // Pod1 should be able to call Pod2, and viceversa 54 | 55 | By("Checking that Pods in the same virtual clusters can reach each other") 56 | 57 | curlCmd = "curl --no-progress-meter " + pod2Cluster1IP 58 | stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd) 59 | Expect(err).To(Not(HaveOccurred())) 60 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 61 | 62 | curlCmd = "curl --no-progress-meter " + pod1Cluster1IP 63 | stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd) 64 | Expect(err).To(Not(HaveOccurred())) 65 | Expect(stdout).To(ContainSubstring("Welcome to nginx!")) 66 | 67 | By("Checking that Pods in the different virtual clusters cannot reach each other") 68 | 69 | // Pods in Cluster 1 should not be able to reach the Pod in Cluster 2 70 | 71 | curlCmd = "curl --no-progress-meter " + pod1Cluster2IP 72 | stdout, _, err = cluster1.ExecCmd(pod1Cluster1, curlCmd) 73 | Expect(err).Should(HaveOccurred()) 74 | Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!"))) 75 | 76 | curlCmd = "curl --no-progress-meter " + pod1Cluster2IP 77 | stdout, _, err = cluster1.ExecCmd(pod2Cluster1, curlCmd) 78 | Expect(err).To(HaveOccurred()) 79 | Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!"))) 80 | 81 | // Pod in Cluster 2 should not be able to reach Pods in Cluster 1 82 | 83 | curlCmd = "curl --no-progress-meter " + pod1Cluster1IP 84 | stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd) 85 | Expect(err).To(HaveOccurred()) 86 | Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!"))) 87 | 88 | curlCmd = "curl --no-progress-meter " + pod2Cluster1IP 89 | stdout, _, err = cluster2.ExecCmd(pod1Cluster2, curlCmd) 90 | Expect(err).To(HaveOccurred()) 91 | Expect(stdout).To(Not(ContainSubstring("Welcome to nginx!"))) 92 | }) 93 | }) 94 | -------------------------------------------------------------------------------- /cli/cmds/root.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/sirupsen/logrus" 8 | "github.com/spf13/cobra" 9 | "github.com/spf13/pflag" 10 | "github.com/spf13/viper" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/client-go/rest" 13 | "k8s.io/client-go/tools/clientcmd" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | 16 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 17 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 18 | 19 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 20 | "github.com/rancher/k3k/pkg/buildinfo" 21 | ) 22 | 23 | type AppContext struct { 24 | RestConfig *rest.Config 25 | Client client.Client 26 | 27 | // Global flags 28 | Debug bool 29 | Kubeconfig string 30 | namespace string 31 | } 32 | 33 | func NewRootCmd() *cobra.Command { 34 | appCtx := &AppContext{} 35 | 36 | rootCmd := &cobra.Command{ 37 | SilenceUsage: true, 38 | Use: "k3kcli", 39 | Short: "CLI for K3K", 40 | Version: buildinfo.Version, 41 | PersistentPreRunE: func(cmd *cobra.Command, args []string) error { 42 | InitializeConfig(cmd) 43 | 44 | if appCtx.Debug { 45 | logrus.SetLevel(logrus.DebugLevel) 46 | } 47 | 48 | restConfig, err := loadRESTConfig(appCtx.Kubeconfig) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | scheme := runtime.NewScheme() 54 | _ = clientgoscheme.AddToScheme(scheme) 55 | _ = v1beta1.AddToScheme(scheme) 56 | _ = apiextensionsv1.AddToScheme(scheme) 57 | 58 | ctrlClient, err := client.New(restConfig, client.Options{Scheme: scheme}) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | appCtx.RestConfig = restConfig 64 | appCtx.Client = ctrlClient 65 | 66 | return nil 67 | }, 68 | DisableAutoGenTag: true, 69 | } 70 | 71 | rootCmd.PersistentFlags().StringVar(&appCtx.Kubeconfig, "kubeconfig", "", "kubeconfig path ($HOME/.kube/config or $KUBECONFIG if set)") 72 | rootCmd.PersistentFlags().BoolVar(&appCtx.Debug, "debug", false, "Turn on debug logs") 73 | 74 | rootCmd.AddCommand( 75 | NewClusterCmd(appCtx), 76 | NewPolicyCmd(appCtx), 77 | NewKubeconfigCmd(appCtx), 78 | ) 79 | 80 | return rootCmd 81 | } 82 | 83 | func (ctx *AppContext) Namespace(name string) string { 84 | if ctx.namespace != "" { 85 | return ctx.namespace 86 | } 87 | 88 | return "k3k-" + name 89 | } 90 | 91 | func loadRESTConfig(kubeconfig string) (*rest.Config, error) { 92 | loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() 93 | configOverrides := &clientcmd.ConfigOverrides{} 94 | 95 | if kubeconfig != "" { 96 | loadingRules.ExplicitPath = kubeconfig 97 | } 98 | 99 | kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) 100 | 101 | return kubeConfig.ClientConfig() 102 | } 103 | 104 | func CobraFlagNamespace(appCtx *AppContext, flag *pflag.FlagSet) { 105 | flag.StringVarP(&appCtx.namespace, "namespace", "n", "", "namespace of the k3k cluster") 106 | } 107 | 108 | func InitializeConfig(cmd *cobra.Command) { 109 | viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) 110 | viper.AutomaticEnv() 111 | 112 | // Bind the current command's flags to viper 113 | cmd.Flags().VisitAll(func(f *pflag.Flag) { 114 | // Apply the viper config value to the flag when the flag is not set and viper has a value 115 | if !f.Changed && viper.IsSet(f.Name) { 116 | val := viper.Get(f.Name) 117 | _ = cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val)) 118 | } 119 | }) 120 | } 121 | -------------------------------------------------------------------------------- /pkg/controller/policy/namespace.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "context" 5 | 6 | "k8s.io/apimachinery/pkg/labels" 7 | "k8s.io/apimachinery/pkg/selection" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | 10 | v1 "k8s.io/api/core/v1" 11 | networkingv1 "k8s.io/api/networking/v1" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | 14 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 15 | ) 16 | 17 | // reconcileNamespacePodSecurityLabels will update the labels of the namespace to reconcile the PSA level specified in the VirtualClusterPolicy 18 | func (c *VirtualClusterPolicyReconciler) reconcileNamespacePodSecurityLabels(ctx context.Context, namespace *v1.Namespace, policy *v1beta1.VirtualClusterPolicy) { 19 | log := ctrl.LoggerFrom(ctx) 20 | log.V(1).Info("Reconciling PSA labels") 21 | 22 | // cleanup of old labels 23 | delete(namespace.Labels, "pod-security.kubernetes.io/enforce") 24 | delete(namespace.Labels, "pod-security.kubernetes.io/enforce-version") 25 | delete(namespace.Labels, "pod-security.kubernetes.io/warn") 26 | delete(namespace.Labels, "pod-security.kubernetes.io/warn-version") 27 | 28 | // if a PSA level is specified add the proper labels 29 | if policy.Spec.PodSecurityAdmissionLevel != nil { 30 | psaLevel := *policy.Spec.PodSecurityAdmissionLevel 31 | 32 | namespace.Labels["pod-security.kubernetes.io/enforce"] = string(psaLevel) 33 | namespace.Labels["pod-security.kubernetes.io/enforce-version"] = "latest" 34 | 35 | // skip the 'warn' only for the privileged PSA level 36 | if psaLevel != v1beta1.PrivilegedPodSecurityAdmissionLevel { 37 | namespace.Labels["pod-security.kubernetes.io/warn"] = string(psaLevel) 38 | namespace.Labels["pod-security.kubernetes.io/warn-version"] = "latest" 39 | } 40 | } 41 | } 42 | 43 | // cleanupNamespaces will cleanup the Namespaces without the "policy.k3k.io/policy-name" label 44 | // deleting the resources in them with the "app.kubernetes.io/managed-by=k3k-policy-controller" label 45 | func (c *VirtualClusterPolicyReconciler) cleanupNamespaces(ctx context.Context) error { 46 | log := ctrl.LoggerFrom(ctx) 47 | log.V(1).Info("Cleanup Namespace resources") 48 | 49 | var namespaces v1.NamespaceList 50 | if err := c.Client.List(ctx, &namespaces); err != nil { 51 | return err 52 | } 53 | 54 | for _, ns := range namespaces.Items { 55 | selector := labels.NewSelector() 56 | 57 | if req, err := labels.NewRequirement(ManagedByLabelKey, selection.Equals, []string{VirtualPolicyControllerName}); err == nil { 58 | selector = selector.Add(*req) 59 | } 60 | 61 | // if the namespace is bound to a policy -> cleanup resources of other policies 62 | if ns.Labels[PolicyNameLabelKey] != "" { 63 | requirement, err := labels.NewRequirement(PolicyNameLabelKey, selection.NotEquals, []string{ns.Labels[PolicyNameLabelKey]}) 64 | 65 | // log the error but continue cleaning up the other namespaces 66 | if err != nil { 67 | log.Error(err, "error creating requirement", "policy", ns.Labels[PolicyNameLabelKey]) 68 | } else { 69 | selector = selector.Add(*requirement) 70 | } 71 | } 72 | 73 | deleteOpts := []client.DeleteAllOfOption{ 74 | client.InNamespace(ns.Name), 75 | client.MatchingLabelsSelector{Selector: selector}, 76 | } 77 | 78 | if err := c.Client.DeleteAllOf(ctx, &networkingv1.NetworkPolicy{}, deleteOpts...); err != nil { 79 | return err 80 | } 81 | 82 | if err := c.Client.DeleteAllOf(ctx, &v1.ResourceQuota{}, deleteOpts...); err != nil { 83 | return err 84 | } 85 | 86 | if err := c.Client.DeleteAllOf(ctx, &v1.LimitRange{}, deleteOpts...); err != nil { 87 | return err 88 | } 89 | } 90 | 91 | return nil 92 | } 93 | -------------------------------------------------------------------------------- /charts/k3k/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "k3k.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "k3k.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "k3k.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "k3k.labels" -}} 37 | helm.sh/chart: {{ include "k3k.chart" . }} 38 | {{ include "k3k.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "k3k.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "k3k.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "k3k.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "k3k.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | 64 | {{/* 65 | Print the image pull secrets in the expected format (an array of objects with one possible field, "name"). 66 | */}} 67 | {{- define "image.pullSecrets" }} 68 | {{- $imagePullSecrets := list }} 69 | {{- range . }} 70 | {{- if kindIs "string" . }} 71 | {{- $imagePullSecrets = append $imagePullSecrets (dict "name" .) }} 72 | {{- else }} 73 | {{- $imagePullSecrets = append $imagePullSecrets . }} 74 | {{- end }} 75 | {{- end }} 76 | {{- toYaml $imagePullSecrets }} 77 | {{- end }} 78 | 79 | {{- define "controller.registry" }} 80 | {{- $registry := .Values.global.imageRegistry | default .Values.controller.image.registry -}} 81 | {{- if $registry }} 82 | {{- $registry }}/ 83 | {{- else }} 84 | {{- $registry }} 85 | {{- end }} 86 | {{- end }} 87 | 88 | {{- define "server.registry" }} 89 | {{- $registry := .Values.global.imageRegistry | default .Values.server.image.registry -}} 90 | {{- if $registry }} 91 | {{- $registry }}/ 92 | {{- else }} 93 | {{- $registry }} 94 | {{- end }} 95 | {{- end }} 96 | 97 | {{- define "agent.virtual.registry" }} 98 | {{- $registry := .Values.global.imageRegistry | default .Values.agent.virtual.image.registry -}} 99 | {{- if $registry }} 100 | {{- $registry }}/ 101 | {{- else }} 102 | {{- $registry }} 103 | {{- end }} 104 | {{- end }} 105 | 106 | {{- define "agent.shared.registry" }} 107 | {{- $registry := .Values.global.imageRegistry | default .Values.agent.shared.image.registry -}} 108 | {{- if $registry }} 109 | {{- $registry }}/ 110 | {{- else }} 111 | {{- $registry }} 112 | {{- end }} 113 | {{- end }} 114 | -------------------------------------------------------------------------------- /cli/cmds/cluster_delete.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | 7 | "github.com/sirupsen/logrus" 8 | "github.com/spf13/cobra" 9 | "k8s.io/apimachinery/pkg/types" 10 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 11 | 12 | v1 "k8s.io/api/core/v1" 13 | apierrors "k8s.io/apimachinery/pkg/api/errors" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" 16 | 17 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 18 | k3kcluster "github.com/rancher/k3k/pkg/controller/cluster" 19 | "github.com/rancher/k3k/pkg/controller/cluster/agent" 20 | ) 21 | 22 | var keepData bool 23 | 24 | func NewClusterDeleteCmd(appCtx *AppContext) *cobra.Command { 25 | cmd := &cobra.Command{ 26 | Use: "delete", 27 | Short: "Delete an existing cluster", 28 | Example: "k3kcli cluster delete [command options] NAME", 29 | RunE: delete(appCtx), 30 | Args: cobra.ExactArgs(1), 31 | } 32 | 33 | CobraFlagNamespace(appCtx, cmd.Flags()) 34 | cmd.Flags().BoolVar(&keepData, "keep-data", false, "keeps persistence volumes created for the cluster after deletion") 35 | 36 | return cmd 37 | } 38 | 39 | func delete(appCtx *AppContext) func(cmd *cobra.Command, args []string) error { 40 | return func(cmd *cobra.Command, args []string) error { 41 | ctx := context.Background() 42 | client := appCtx.Client 43 | name := args[0] 44 | 45 | if name == k3kcluster.ClusterInvalidName { 46 | return errors.New("invalid cluster name") 47 | } 48 | 49 | namespace := appCtx.Namespace(name) 50 | 51 | logrus.Infof("Deleting '%s' cluster in namespace '%s'", name, namespace) 52 | 53 | cluster := v1beta1.Cluster{ 54 | ObjectMeta: metav1.ObjectMeta{ 55 | Name: name, 56 | Namespace: namespace, 57 | }, 58 | } 59 | // keep bootstrap secrets and tokens if --keep-data flag is passed 60 | if keepData { 61 | // skip removing tokenSecret 62 | if err := RemoveOwnerReferenceFromSecret(ctx, k3kcluster.TokenSecretName(cluster.Name), client, cluster); err != nil { 63 | return err 64 | } 65 | 66 | // skip removing webhook secret 67 | if err := RemoveOwnerReferenceFromSecret(ctx, agent.WebhookSecretName(cluster.Name), client, cluster); err != nil { 68 | return err 69 | } 70 | } else { 71 | matchingLabels := ctrlclient.MatchingLabels(map[string]string{"cluster": cluster.Name, "role": "server"}) 72 | listOpts := ctrlclient.ListOptions{Namespace: cluster.Namespace} 73 | matchingLabels.ApplyToList(&listOpts) 74 | deleteOpts := &ctrlclient.DeleteAllOfOptions{ListOptions: listOpts} 75 | 76 | if err := client.DeleteAllOf(ctx, &v1.PersistentVolumeClaim{}, deleteOpts); err != nil { 77 | return ctrlclient.IgnoreNotFound(err) 78 | } 79 | } 80 | 81 | if err := client.Delete(ctx, &cluster); err != nil { 82 | return ctrlclient.IgnoreNotFound(err) 83 | } 84 | 85 | return nil 86 | } 87 | } 88 | 89 | func RemoveOwnerReferenceFromSecret(ctx context.Context, name string, cl ctrlclient.Client, cluster v1beta1.Cluster) error { 90 | var secret v1.Secret 91 | 92 | key := types.NamespacedName{ 93 | Name: name, 94 | Namespace: cluster.Namespace, 95 | } 96 | 97 | if err := cl.Get(ctx, key, &secret); err != nil { 98 | if apierrors.IsNotFound(err) { 99 | logrus.Warnf("%s secret is not found", name) 100 | return nil 101 | } 102 | 103 | return err 104 | } 105 | 106 | if controllerutil.HasControllerReference(&secret) { 107 | if err := controllerutil.RemoveOwnerReference(&cluster, &secret, cl.Scheme()); err != nil { 108 | return err 109 | } 110 | 111 | return cl.Update(ctx, &secret) 112 | } 113 | 114 | return nil 115 | } 116 | -------------------------------------------------------------------------------- /tests/cluster_certs_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "strings" 7 | 8 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | 10 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 11 | 12 | . "github.com/onsi/ginkgo/v2" 13 | . "github.com/onsi/gomega" 14 | ) 15 | 16 | var _ = When("a cluster with custom certificates is installed with individual cert secrets", Label(e2eTestLabel), Label(certificatesTestsLabel), func() { 17 | var virtualCluster *VirtualCluster 18 | 19 | BeforeEach(func() { 20 | ctx := context.Background() 21 | 22 | namespace := NewNamespace() 23 | 24 | DeferCleanup(func() { 25 | DeleteNamespaces(namespace.Name) 26 | }) 27 | 28 | // create custom cert secret 29 | customCertDir := "testdata/customcerts/" 30 | 31 | certList := []string{ 32 | "server-ca", 33 | "client-ca", 34 | "request-header-ca", 35 | "service", 36 | "etcd-peer-ca", 37 | "etcd-server-ca", 38 | } 39 | 40 | for _, certName := range certList { 41 | var cert, key []byte 42 | var err error 43 | filePathPrefix := "" 44 | certfile := certName 45 | if strings.HasPrefix(certName, "etcd") { 46 | filePathPrefix = "etcd/" 47 | certfile = strings.TrimPrefix(certName, "etcd-") 48 | } 49 | if !strings.Contains(certName, "service") { 50 | cert, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".crt") 51 | Expect(err).To(Not(HaveOccurred())) 52 | } 53 | key, err = os.ReadFile(customCertDir + filePathPrefix + certfile + ".key") 54 | Expect(err).To(Not(HaveOccurred())) 55 | 56 | certSecret := caCertSecret(certName, namespace.Name, cert, key) 57 | err = k8sClient.Create(ctx, certSecret) 58 | Expect(err).To(Not(HaveOccurred())) 59 | } 60 | 61 | cluster := NewCluster(namespace.Name) 62 | 63 | cluster.Spec.CustomCAs = &v1beta1.CustomCAs{ 64 | Enabled: true, 65 | Sources: v1beta1.CredentialSources{ 66 | ServerCA: v1beta1.CredentialSource{ 67 | SecretName: "server-ca", 68 | }, 69 | ClientCA: v1beta1.CredentialSource{ 70 | SecretName: "client-ca", 71 | }, 72 | ETCDServerCA: v1beta1.CredentialSource{ 73 | SecretName: "etcd-server-ca", 74 | }, 75 | ETCDPeerCA: v1beta1.CredentialSource{ 76 | SecretName: "etcd-peer-ca", 77 | }, 78 | RequestHeaderCA: v1beta1.CredentialSource{ 79 | SecretName: "request-header-ca", 80 | }, 81 | ServiceAccountToken: v1beta1.CredentialSource{ 82 | SecretName: "service", 83 | }, 84 | }, 85 | } 86 | 87 | CreateCluster(cluster) 88 | 89 | client, restConfig := NewVirtualK8sClientAndConfig(cluster) 90 | 91 | virtualCluster = &VirtualCluster{ 92 | Cluster: cluster, 93 | RestConfig: restConfig, 94 | Client: client, 95 | } 96 | }) 97 | 98 | It("will load the custom certs in the server pod", func() { 99 | ctx := context.Background() 100 | 101 | labelSelector := "cluster=" + virtualCluster.Cluster.Name + ",role=server" 102 | serverPods, err := k8s.CoreV1().Pods(virtualCluster.Cluster.Namespace).List(ctx, v1.ListOptions{LabelSelector: labelSelector}) 103 | Expect(err).To(Not(HaveOccurred())) 104 | 105 | Expect(len(serverPods.Items)).To(Equal(1)) 106 | serverPod := serverPods.Items[0] 107 | 108 | // check server-ca.crt 109 | serverCACrtPath := "/var/lib/rancher/k3s/server/tls/server-ca.crt" 110 | serverCACrt, err := readFileWithinPod(ctx, k8s, restcfg, serverPod.Name, serverPod.Namespace, serverCACrtPath) 111 | Expect(err).To(Not(HaveOccurred())) 112 | 113 | serverCACrtTestFile, err := os.ReadFile("testdata/customcerts/server-ca.crt") 114 | Expect(err).To(Not(HaveOccurred())) 115 | Expect(serverCACrt).To(Equal(serverCACrtTestFile)) 116 | }) 117 | }) 118 | -------------------------------------------------------------------------------- /pkg/controller/policy/networkpolicy.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "context" 5 | 6 | "sigs.k8s.io/controller-runtime/pkg/client" 7 | 8 | v1 "k8s.io/api/core/v1" 9 | networkingv1 "k8s.io/api/networking/v1" 10 | apierrors "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | 14 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 15 | k3kcontroller "github.com/rancher/k3k/pkg/controller" 16 | ) 17 | 18 | func (c *VirtualClusterPolicyReconciler) reconcileNetworkPolicy(ctx context.Context, namespace string, policy *v1beta1.VirtualClusterPolicy) error { 19 | log := ctrl.LoggerFrom(ctx) 20 | log.V(1).Info("Reconciling NetworkPolicy") 21 | 22 | var cidrList []string 23 | 24 | if c.ClusterCIDR != "" { 25 | cidrList = []string{c.ClusterCIDR} 26 | } else { 27 | var nodeList v1.NodeList 28 | if err := c.Client.List(ctx, &nodeList); err != nil { 29 | return err 30 | } 31 | 32 | for _, node := range nodeList.Items { 33 | if len(node.Spec.PodCIDRs) > 0 { 34 | cidrList = append(cidrList, node.Spec.PodCIDRs...) 35 | } else { 36 | cidrList = append(cidrList, node.Spec.PodCIDR) 37 | } 38 | } 39 | } 40 | 41 | networkPolicy := networkPolicy(namespace, policy, cidrList) 42 | 43 | if err := ctrl.SetControllerReference(policy, networkPolicy, c.Scheme); err != nil { 44 | return err 45 | } 46 | 47 | // if disabled then delete the existing network policy 48 | if policy.Spec.DisableNetworkPolicy { 49 | log.V(1).Info("Deleting NetworkPolicy") 50 | 51 | return client.IgnoreNotFound(c.Client.Delete(ctx, networkPolicy)) 52 | } 53 | 54 | log.V(1).Info("Creating NetworkPolicy") 55 | 56 | // otherwise try to create/update 57 | err := c.Client.Create(ctx, networkPolicy) 58 | if apierrors.IsAlreadyExists(err) { 59 | log.V(1).Info("NetworkPolicy already exists, updating.") 60 | 61 | return c.Client.Update(ctx, networkPolicy) 62 | } 63 | 64 | return err 65 | } 66 | 67 | func networkPolicy(namespaceName string, policy *v1beta1.VirtualClusterPolicy, cidrList []string) *networkingv1.NetworkPolicy { 68 | return &networkingv1.NetworkPolicy{ 69 | TypeMeta: metav1.TypeMeta{ 70 | Kind: "NetworkPolicy", 71 | APIVersion: "networking.k8s.io/v1", 72 | }, 73 | ObjectMeta: metav1.ObjectMeta{ 74 | Name: k3kcontroller.SafeConcatNameWithPrefix(policy.Name), 75 | Namespace: namespaceName, 76 | Labels: map[string]string{ 77 | ManagedByLabelKey: VirtualPolicyControllerName, 78 | PolicyNameLabelKey: policy.Name, 79 | }, 80 | }, 81 | Spec: networkingv1.NetworkPolicySpec{ 82 | PolicyTypes: []networkingv1.PolicyType{ 83 | networkingv1.PolicyTypeIngress, 84 | networkingv1.PolicyTypeEgress, 85 | }, 86 | Ingress: []networkingv1.NetworkPolicyIngressRule{ 87 | {}, 88 | }, 89 | Egress: []networkingv1.NetworkPolicyEgressRule{ 90 | { 91 | To: []networkingv1.NetworkPolicyPeer{ 92 | { 93 | IPBlock: &networkingv1.IPBlock{ 94 | CIDR: "0.0.0.0/0", 95 | Except: cidrList, 96 | }, 97 | }, 98 | { 99 | NamespaceSelector: &metav1.LabelSelector{ 100 | MatchLabels: map[string]string{ 101 | "kubernetes.io/metadata.name": namespaceName, 102 | }, 103 | }, 104 | }, 105 | { 106 | NamespaceSelector: &metav1.LabelSelector{ 107 | MatchLabels: map[string]string{ 108 | "kubernetes.io/metadata.name": metav1.NamespaceSystem, 109 | }, 110 | }, 111 | PodSelector: &metav1.LabelSelector{ 112 | MatchLabels: map[string]string{ 113 | "k8s-app": "kube-dns", 114 | }, 115 | }, 116 | }, 117 | }, 118 | }, 119 | }, 120 | }, 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /tests/testdata/customcerts/intermediate-ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFLzCCAxegAwIBAgIIXrx+TJOdkuIwDQYJKoZIhvcNAQELBQAwITEfMB0GA1UE 3 | AwwWazNzLXJvb3QtY2FAMTc1MTk2NTEwMDAeFw0yNTA3MDgwODU4MjNaFw0zNTA4 4 | MjUwODU4MjNaMCkxJzAlBgNVBAMMHmszcy1pbnRlcm1lZGlhdGUtY2FAMTc1MTk2 5 | NTEwMDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfxJHxlMq/Ul+7X 6 | avcxrg6x8joXUn0T4TbaYiQJVRK0z8j42yDh1uJSQ82SkCB1ltkF2xHzQ65iqPx9 7 | n6uluTP/fEoGOiGxfZfIqk+H+mPbumJPd7n3hmtqPwPmjhYCPZpLadT60rjNalkM 8 | 4hUX2O0PwXtvejODUaLgF9YLUJbIdoZvkzKNcv2bTg9gruW0AoAzEqAKLYRDh58N 9 | XQMync3OQnnAMzUyPBBukhM7P2tysufV0HJg/ZcsgLek8hpuBQ2T1QVUrZFAgOXh 10 | ZCcrK5OGKcjQh5bo1vcbIRr4DtfHT/Jl5RhA6q5IqTAiS/dQVAhNYs0lDHgF69kU 11 | gI2iHqsv2awuW3k9Q+i5q9vMlo/h9wxpj87cbfxLpBkS2o/h+6zB1DdI6U1WRo/Q 12 | pFp3vzsdtr/G8WEFxwL1OMDewjA+EsjhSjI0BBrm7CC2uJkNRM3v97r9RGNzBCYL 13 | Qn3IxqqzVjOOEhJzQc3G8FKRJj1zPAkURvTus4s9c7vdMOp1sVd78b9ObXFa0TDF 14 | DPe9ZQu5YGEvGGibKjp5cFwJ2M2pwjv53ZUDS4sxqqdXRBmVxcCkUtrwRL35PIpb 15 | Tje1UZP8RJ66iXnXUPco2BKyd9NYyiaUAW4gZz5WROzfgPdnI/ArGc5RThk+O5sg 16 | cb62eU/nADPGsG8t+VEUXfJjP19pAgMBAAGjYzBhMB0GA1UdDgQWBBSZVjjNDIyY 17 | CtjsHgMrtXlZ/31IujAfBgNVHSMEGDAWgBSML4NHrHhdoo46ZRedOusGhJBsTDAP 18 | BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwICpDANBgkqhkiG9w0BAQsFAAOC 19 | AgEAxgdjETEkUB7G2g3cFz6Jb8TznrZXprVk9G1p1UNuMAAEe2NaiGI0BvivJNoQ 20 | 6DNpngB3x6l7u5hMOJY/72wzQRDXg05m5T8/vBfTcz/HcgoKU82hgYDxQiGA0I5e 21 | 5czfEhQu95uPT7T26m5pvWA38Tf6WbXCB7c0/FT8II8w1ptJlZrSKRW33+j1tb2r 22 | t0/8RoMaAUS1MP6+12juG7qcuEx0rDVeKx33pa20NjsAtZeTAsXjugJkmUkkd/PC 23 | cKbawQB9meqtWPfGzwmUu1qz6SQeWtOWFrOBSeTzx0HTeimSAiHVSaXGd7ms3orx 24 | KsKUoPUbXi9rVIDVxCe5XwAKmcHMz8DGHfxJ6sodol25pYbHxKy/swgAzQdwtGF9 25 | HWJAm6/3YSjtmD6+t89/yYzvxv+aMNDXVMLpDFb+7/ESsSl4757WjvvFoz0HxcwD 26 | 4qfmV2z+EaLM44P3QaJDD599/Qwt+TFHSQRfD/MqMH6A+9vZhZGWeGgFFmu82kzH 27 | xKJas/jI+t+V+2TbfagUYlsinZ6UmLcju99myl6wq6nJu8X5b8Uhpv5/8kVniqXE 28 | lWtFpMBmnE0oq0U+KR3OfnovSYdYTc7uWpaJlqMamsE5UVVBrlSyUe9J9EQzYYea 29 | Ufoq67KnJInMobbQ4aonz7EQZW6WIZpqASuVGfT5heSKHDc= 30 | -----END CERTIFICATE----- 31 | -----BEGIN CERTIFICATE----- 32 | MIIFMzCCAxugAwIBAgIUALEl344JEZxaOvwyrtO8QwLHAacwDQYJKoZIhvcNAQEL 33 | BQAwITEfMB0GA1UEAwwWazNzLXJvb3QtY2FAMTc1MTk2NTEwMDAeFw0yNTA3MDgw 34 | ODU4MjJaFw00NTA3MDMwODU4MjJaMCExHzAdBgNVBAMMFmszcy1yb290LWNhQDE3 35 | NTE5NjUxMDAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDtESS+0V7T 36 | 0b3rN0ga9/zWThgaGIfOoKiCr2CSR4az0hjpmISAlS5pplhWrutGhydvaR/7vV7g 37 | Grtn0ptmqidEc0XSeQTosqUv29SZ+hbCITZG05TFbMGGXmFo/1LHhJ8ZpgzUBqc7 38 | inOXmEBbCyhOWX+fhddoJJiUIX7l2R3cbyNk7I7KFiZ1JDDnoyDWFMOJLvXwOqeT 39 | CJQesS+2qpeXpdazgtkRQi7aOhZtXnCsJHnFbpL9VFTmWpq7vMIA9xkDva3/80S4 40 | T2USiyN4/quUagjP8si5PIaGBVx9bJhnKHUdTrPPZWHFfxm74GUG7en4OtR9lOTM 41 | TV49XuofNpJaJ2K5Dz+rk6gIYp2R9W+r8CN1HBXeLF5WJCvw54Lpcp0V2BA+MARq 42 | Ij8UTUymu++rIo+WjKcHDT0qn39rhOhqJ3dXynW4UF/95KwB7e5UI+OxZ9sJ4oNP 43 | FuP5PHrDINXsYG0DDM3KTDxA8memVSFGMdhm31DPD2GUvUPlupnd6uCepdcBc9ed 44 | +9gTeSxF5cA6Q3gVOQa8ok9Ts0E5DwGaCIxjasyD+MIORf3Bkv0Z3lBNXicWDngP 45 | FU5ya30GeTZQgzqhlGoXehjYfRxV28pgiR5ejl0/rmsYGfiWPdfa4Qy0LnxSKHj1 46 | f2R1jEB55ORATo3f+k4Ez5bdjhk4KlA95QIDAQABo2MwYTAdBgNVHQ4EFgQUjC+D 47 | R6x4XaKOOmUXnTrrBoSQbEwwHwYDVR0jBBgwFoAUjC+DR6x4XaKOOmUXnTrrBoSQ 48 | bEwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAqQwDQYJKoZIhvcNAQEL 49 | BQADggIBACpzO1nV6/hS1xLIkkGf9RdypATYCH6RqeBJqMf/R5YEUQssLF/QBEvQ 50 | NwockkxD1l8VwjH3Pp36QxkgthAl0D3ewK9LLkxEq1pp9VuVGxeD9siN/fxx6htG 51 | KP1jT49pBDIdbzhJ7eR/O8xuI5dyZNLZLJkaumQkd7sEVHvDTFw55PhwUEJ3Wcxl 52 | jAxXM1FFCKftXjWFmvVmzYZYkPj/AhB+PcVIIkFnNQYTXdUCUtsnSgj3pF1z/+g5 53 | PttBGhsttrm93lJgddRFTEWV1lzfw1csrHkLYDYLDKDzsNQaVo71wKPmorK+xnbM 54 | h1PQAVJeXypLTAfE636+n+Md/wSvnQuo0RzPBE24S9c9TWM2d96dvtU9kgJPbqoA 55 | RX6jHw2ACnKp4RFJILqDCqFCOrytYPk3J/L8myW44dGpCCdSrFREqNCsyMrqu4v7 56 | U+W9ENHT0qe7Nm0T4XNFlQstt6uGvk6ddEdbgcTfTvSv5jx2++Jfl2ynF+G67l0U 57 | UASFHsrwThnulGQtpK2+heHkU8xQFjQOGZoQMlLiWzWg+bqo07aghAndKhKnW8s8 58 | iRvMvdcsLjjDaPFCgeopGeQauiTd2od5aXGWCn+djzLq0fjIvezs4K70XOsStbGA 59 | cJFFAnsnM40SbnrWyfe1EBlzuVJu0csuF77fpEU7CFz8uzd268Ho 60 | -----END CERTIFICATE----- 61 | -------------------------------------------------------------------------------- /docs/howtos/choose-mode.md: -------------------------------------------------------------------------------- 1 | # How to Choose Between Shared and Virtual Mode 2 | 3 | This guide helps you choose the right mode for your virtual cluster: **Shared** or **Virtual**. 4 | If you're unsure, start with **Shared mode** — it's the default and fits most common scenarios. 5 | 6 | --- 7 | 8 | ## Shared Mode (default) 9 | 10 | **Best for:** 11 | - Developers who want to run workloads quickly without managing Kubernetes internals 12 | - Platform teams that require visibility and control over all workloads 13 | - Users who need access to host-level resources (e.g., GPUs) 14 | 15 | In **Shared mode**, the virtual cluster runs its own K3s server but relies on the host to execute workloads. The virtual kubelet syncs resources, enabling lightweight, fast provisioning with support for cluster resource isolation. More details on the [architecture](./../architecture.md#shared-mode). 16 | 17 | --- 18 | 19 | ### Use Cases by Persona 20 | 21 | #### 👩‍💻 Developer 22 | *"I’m building a web app that should be exposed outside the virtual cluster."* 23 | → Use **Shared mode**. It allows you to [expose](./expose-workloads.md) your application. 24 | 25 | #### 👩‍🔬 Data Scientist: 26 | *“I need to run Jupyter notebooks that leverage the cluster's GPU.”* 27 | → Use **Shared mode**. It gives access to physical devices while keeping overhead low. 28 | 29 | #### 🧑‍💼 Platform Admin 30 | *"I want to monitor and secure all tenant workloads from a central location."* 31 | → Use **Shared mode**. Host-level agents (e.g., observability, policy enforcement) work across all virtual clusters. 32 | 33 | #### 🔒 Security Engineer 34 | *"I need to enforce security policies like network policies or runtime scanning across all workloads."* 35 | → Use **Shared mode**. The platform can enforce policies globally without tenant bypass. 36 | 37 | *"I need to test a new admission controller or policy engine."* 38 | → Use **Shared mode**, if it's scoped to your virtual cluster. You can run tools like Kubewarden without affecting the host. 39 | 40 | #### 🔁 CI/CD Engineer 41 | *"I want to spin up disposable virtual clusters per pipeline run, fast and with low resource cost."* 42 | → Use **Shared mode**. It's quick to provision and ideal for short-lived, namespace-scoped environments. 43 | 44 | --- 45 | 46 | ## Virtual Mode 47 | 48 | **Best for:** 49 | - Advanced users who need full Kubernetes isolation 50 | - Developers testing experimental or cluster-wide features 51 | - Use cases requiring control over the entire Kubernetes control plane 52 | 53 | In **Virtual mode**, the virtual cluster runs its own isolated Kubernetes control plane. It supports different CNIs, and API configurations — ideal for deep experimentation or advanced workloads. More details on the [architecture](./../architecture.md#virtual-mode). 54 | 55 | --- 56 | 57 | ### Use Cases by Persona 58 | 59 | #### 👩‍💻 Developer 60 | *"I need to test a new Kubernetes feature gate that’s disabled in the host cluster."* 61 | → Use **Virtual mode**. You can configure your own control plane flags and API features. 62 | 63 | #### 🧑‍💼 Platform Admin 64 | *"We’re testing upgrades across Kubernetes versions, including new API behaviors."* 65 | → Use Virtual mode. You can run different Kubernetes versions and safely validate upgrade paths. 66 | 67 | #### 🌐 Network Engineer 68 | *"I’m evaluating a new CNI that needs full control of the cluster’s networking."* 69 | → Use **Virtual mode**. You can run a separate CNI stack without affecting the host or other tenants. 70 | 71 | #### 🔒 Security Engineer 72 | *"I’m testing a new admission controller and policy engine before rolling it out cluster-wide."* 73 | → Use **Virtual mode**, if you need to test cluster-wide policies, custom admission flow, or advanced extensions with full control. 74 | 75 | --- 76 | 77 | ## Still Not Sure? 78 | 79 | If you're evaluating more advanced use cases or want a deeper comparison, see the full trade-off breakdown in the [Architecture documentation](../architecture.md). -------------------------------------------------------------------------------- /pkg/controller/cluster/agent/shared_test.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "gopkg.in/yaml.v2" 8 | 9 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | 11 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 12 | ) 13 | 14 | func Test_sharedAgentData(t *testing.T) { 15 | type args struct { 16 | cluster *v1beta1.Cluster 17 | serviceName string 18 | ip string 19 | kubeletPort int 20 | webhookPort int 21 | token string 22 | } 23 | 24 | tests := []struct { 25 | name string 26 | args args 27 | expectedData map[string]string 28 | }{ 29 | { 30 | name: "simple config", 31 | args: args{ 32 | cluster: &v1beta1.Cluster{ 33 | ObjectMeta: v1.ObjectMeta{ 34 | Name: "mycluster", 35 | Namespace: "ns-1", 36 | }, 37 | Spec: v1beta1.ClusterSpec{ 38 | Version: "v1.2.3", 39 | }, 40 | }, 41 | kubeletPort: 10250, 42 | webhookPort: 9443, 43 | ip: "10.0.0.21", 44 | serviceName: "service-name", 45 | token: "dnjklsdjnksd892389238", 46 | }, 47 | expectedData: map[string]string{ 48 | "clusterName": "mycluster", 49 | "clusterNamespace": "ns-1", 50 | "serverIP": "10.0.0.21", 51 | "serviceName": "service-name", 52 | "token": "dnjklsdjnksd892389238", 53 | "version": "v1.2.3", 54 | "mirrorHostNodes": "false", 55 | "kubeletPort": "10250", 56 | "webhookPort": "9443", 57 | }, 58 | }, 59 | { 60 | name: "version in status", 61 | args: args{ 62 | cluster: &v1beta1.Cluster{ 63 | ObjectMeta: v1.ObjectMeta{ 64 | Name: "mycluster", 65 | Namespace: "ns-1", 66 | }, 67 | Spec: v1beta1.ClusterSpec{ 68 | Version: "v1.2.3", 69 | }, 70 | Status: v1beta1.ClusterStatus{ 71 | HostVersion: "v1.3.3", 72 | }, 73 | }, 74 | ip: "10.0.0.21", 75 | kubeletPort: 10250, 76 | webhookPort: 9443, 77 | serviceName: "service-name", 78 | token: "dnjklsdjnksd892389238", 79 | }, 80 | expectedData: map[string]string{ 81 | "clusterName": "mycluster", 82 | "clusterNamespace": "ns-1", 83 | "serverIP": "10.0.0.21", 84 | "serviceName": "service-name", 85 | "token": "dnjklsdjnksd892389238", 86 | "version": "v1.2.3", 87 | "mirrorHostNodes": "false", 88 | "kubeletPort": "10250", 89 | "webhookPort": "9443", 90 | }, 91 | }, 92 | { 93 | name: "missing version in spec", 94 | args: args{ 95 | cluster: &v1beta1.Cluster{ 96 | ObjectMeta: v1.ObjectMeta{ 97 | Name: "mycluster", 98 | Namespace: "ns-1", 99 | }, 100 | Status: v1beta1.ClusterStatus{ 101 | HostVersion: "v1.3.3", 102 | }, 103 | }, 104 | kubeletPort: 10250, 105 | webhookPort: 9443, 106 | ip: "10.0.0.21", 107 | serviceName: "service-name", 108 | token: "dnjklsdjnksd892389238", 109 | }, 110 | expectedData: map[string]string{ 111 | "clusterName": "mycluster", 112 | "clusterNamespace": "ns-1", 113 | "serverIP": "10.0.0.21", 114 | "serviceName": "service-name", 115 | "token": "dnjklsdjnksd892389238", 116 | "version": "v1.3.3", 117 | "mirrorHostNodes": "false", 118 | "kubeletPort": "10250", 119 | "webhookPort": "9443", 120 | }, 121 | }, 122 | } 123 | 124 | for _, tt := range tests { 125 | t.Run(tt.name, func(t *testing.T) { 126 | config := sharedAgentData(tt.args.cluster, tt.args.serviceName, tt.args.token, tt.args.ip, tt.args.kubeletPort, tt.args.webhookPort) 127 | 128 | data := make(map[string]string) 129 | err := yaml.Unmarshal([]byte(config), data) 130 | 131 | assert.NoError(t, err) 132 | assert.Equal(t, tt.expectedData, data) 133 | }) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | REPO ?= rancher 3 | COVERAGE ?= false 4 | VERSION ?= $(shell git describe --tags --always --dirty --match="v[0-9]*") 5 | 6 | ## Dependencies 7 | 8 | GOLANGCI_LINT_VERSION := v2.3.0 9 | GINKGO_VERSION ?= v2.21.0 10 | GINKGO_FLAGS ?= -v -r --coverprofile=cover.out --coverpkg=./... 11 | ENVTEST_VERSION ?= v0.0.0-20250505003155-b6c5897febe5 12 | ENVTEST_K8S_VERSION := 1.31.0 13 | CRD_REF_DOCS_VER ?= v0.1.0 14 | 15 | GOLANGCI_LINT ?= go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION) 16 | GINKGO ?= go run github.com/onsi/ginkgo/v2/ginkgo@$(GINKGO_VERSION) 17 | CRD_REF_DOCS := go run github.com/elastic/crd-ref-docs@$(CRD_REF_DOCS_VER) 18 | 19 | ENVTEST ?= go run sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION) 20 | ENVTEST_DIR ?= $(shell pwd)/.envtest 21 | 22 | E2E_LABEL_FILTER ?= e2e 23 | 24 | export KUBEBUILDER_ASSETS ?= $(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(ENVTEST_DIR) -p path) 25 | 26 | 27 | .PHONY: all 28 | all: version generate build package ## Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package' 29 | 30 | .PHONY: version 31 | version: ## Print the current version 32 | @echo $(VERSION) 33 | 34 | .PHONY: build 35 | build: ## Build the the K3k binaries (k3k, k3k-kubelet and k3kcli) 36 | @VERSION=$(VERSION) COVERAGE=$(COVERAGE) ./scripts/build 37 | 38 | .PHONY: package 39 | package: package-k3k package-k3k-kubelet ## Package the k3k and k3k-kubelet Docker images 40 | 41 | .PHONY: package-% 42 | package-%: 43 | docker build -f package/Dockerfile.$* \ 44 | -t $(REPO)/$*:$(VERSION) \ 45 | -t $(REPO)/$*:latest \ 46 | -t $(REPO)/$*:dev . 47 | 48 | .PHONY: push 49 | push: push-k3k push-k3k-kubelet ## Push the K3k images to the registry 50 | 51 | .PHONY: push-% 52 | push-%: 53 | docker push $(REPO)/$*:$(VERSION) 54 | docker push $(REPO)/$*:latest 55 | docker push $(REPO)/$*:dev 56 | 57 | .PHONY: test 58 | test: ## Run all the tests 59 | $(GINKGO) $(GINKGO_FLAGS) --label-filter=$(label-filter) 60 | 61 | .PHONY: test-unit 62 | test-unit: ## Run the unit tests (skips the e2e) 63 | $(GINKGO) $(GINKGO_FLAGS) --skip-file=tests/* 64 | 65 | .PHONY: test-controller 66 | test-controller: ## Run the controller tests (pkg/controller) 67 | $(GINKGO) $(GINKGO_FLAGS) pkg/controller 68 | 69 | .PHONY: test-kubelet-controller 70 | test-kubelet-controller: ## Run the controller tests (pkg/controller) 71 | $(GINKGO) $(GINKGO_FLAGS) k3k-kubelet/controller 72 | 73 | .PHONY: test-e2e 74 | test-e2e: ## Run the e2e tests 75 | $(GINKGO) $(GINKGO_FLAGS) --label-filter="$(E2E_LABEL_FILTER)" tests 76 | 77 | .PHONY: test-cli 78 | test-cli: ## Run the cli tests 79 | $(GINKGO) $(GINKGO_FLAGS) --label-filter=cli --flake-attempts=3 tests 80 | 81 | .PHONY: generate 82 | generate: ## Generate the CRDs specs 83 | go generate ./... 84 | 85 | .PHONY: docs 86 | docs: ## Build the CRDs and CLI docs 87 | $(CRD_REF_DOCS) --config=./docs/crds/config.yaml \ 88 | --renderer=markdown \ 89 | --source-path=./pkg/apis/k3k.io/v1beta1 \ 90 | --output-path=./docs/crds/crd-docs.md 91 | @go run ./docs/cli/genclidoc.go 92 | 93 | .PHONY: lint 94 | lint: ## Find any linting issues in the project 95 | $(GOLANGCI_LINT) run --timeout=5m 96 | 97 | .PHONY: fmt 98 | fmt: ## Find any linting issues in the project 99 | $(GOLANGCI_LINT) fmt ./... 100 | 101 | .PHONY: validate 102 | validate: generate docs fmt ## Validate the project checking for any dependency or doc mismatch 103 | $(GINKGO) unfocus 104 | go mod tidy 105 | git status --porcelain 106 | git --no-pager diff --exit-code 107 | 108 | .PHONY: install 109 | install: ## Install K3k with Helm on the targeted Kubernetes cluster 110 | helm upgrade --install --namespace k3k-system --create-namespace \ 111 | --set controller.extraEnv[0].name=DEBUG \ 112 | --set-string controller.extraEnv[0].value=true \ 113 | --set controller.image.repository=$(REPO)/k3k \ 114 | --set controller.image.tag=$(VERSION) \ 115 | --set agent.shared.image.repository=$(REPO)/k3k-kubelet \ 116 | --set agent.shared.image.tag=$(VERSION) \ 117 | k3k ./charts/k3k/ 118 | 119 | .PHONY: help 120 | help: ## Show this help. 121 | @egrep -h '\s##\s' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m %-30s\033[0m %s\n", $$1, $$2}' 122 | -------------------------------------------------------------------------------- /pkg/controller/cluster/cluster_finalize.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "reflect" 8 | "slices" 9 | 10 | "k8s.io/apimachinery/pkg/api/meta" 11 | "k8s.io/apimachinery/pkg/types" 12 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 13 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 14 | 15 | coordinationv1 "k8s.io/api/coordination/v1" 16 | rbacv1 "k8s.io/api/rbac/v1" 17 | apierrors "k8s.io/apimachinery/pkg/api/errors" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | ctrl "sigs.k8s.io/controller-runtime" 20 | 21 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 22 | "github.com/rancher/k3k/pkg/controller" 23 | "github.com/rancher/k3k/pkg/controller/cluster/agent" 24 | ) 25 | 26 | func (c *ClusterReconciler) finalizeCluster(ctx context.Context, cluster *v1beta1.Cluster) (reconcile.Result, error) { 27 | log := ctrl.LoggerFrom(ctx) 28 | log.V(1).Info("Deleting Cluster") 29 | 30 | // Set the Terminating phase and condition 31 | cluster.Status.Phase = v1beta1.ClusterTerminating 32 | meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ 33 | Type: ConditionReady, 34 | Status: metav1.ConditionFalse, 35 | Reason: ReasonTerminating, 36 | Message: "Cluster is being terminated", 37 | }) 38 | 39 | if err := c.unbindClusterRoles(ctx, cluster); err != nil { 40 | return reconcile.Result{}, err 41 | } 42 | 43 | // Deallocate ports for kubelet and webhook if used 44 | if cluster.Spec.Mode == v1beta1.SharedClusterMode && cluster.Spec.MirrorHostNodes { 45 | log.V(1).Info("dellocating ports for kubelet and webhook") 46 | 47 | if err := c.PortAllocator.DeallocateKubeletPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.KubeletPort); err != nil { 48 | return reconcile.Result{}, err 49 | } 50 | 51 | if err := c.PortAllocator.DeallocateWebhookPort(ctx, cluster.Name, cluster.Namespace, cluster.Status.WebhookPort); err != nil { 52 | return reconcile.Result{}, err 53 | } 54 | } 55 | 56 | // delete API server lease 57 | lease := &coordinationv1.Lease{ 58 | TypeMeta: metav1.TypeMeta{ 59 | Kind: "Lease", 60 | APIVersion: "coordination.k8s.io/v1", 61 | }, 62 | ObjectMeta: metav1.ObjectMeta{ 63 | Name: cluster.Name, 64 | Namespace: cluster.Namespace, 65 | }, 66 | } 67 | if err := c.Client.Delete(ctx, lease); err != nil && !apierrors.IsNotFound(err) { 68 | return reconcile.Result{}, err 69 | } 70 | 71 | // Remove finalizer from the cluster and update it only when all resources are cleaned up 72 | if controllerutil.RemoveFinalizer(cluster, clusterFinalizerName) { 73 | log.Info("Deleting Cluster removing finalizer") 74 | 75 | if err := c.Client.Update(ctx, cluster); err != nil { 76 | return reconcile.Result{}, err 77 | } 78 | } 79 | 80 | return reconcile.Result{}, nil 81 | } 82 | 83 | func (c *ClusterReconciler) unbindClusterRoles(ctx context.Context, cluster *v1beta1.Cluster) error { 84 | log := ctrl.LoggerFrom(ctx) 85 | log.V(1).Info("Unbinding ClusterRoles") 86 | 87 | clusterRoles := []string{"k3k-kubelet-node", "k3k-priorityclass"} 88 | 89 | var err error 90 | 91 | for _, clusterRole := range clusterRoles { 92 | var clusterRoleBinding rbacv1.ClusterRoleBinding 93 | if getErr := c.Client.Get(ctx, types.NamespacedName{Name: clusterRole}, &clusterRoleBinding); getErr != nil { 94 | err = errors.Join(err, fmt.Errorf("failed to get or find %s ClusterRoleBinding: %w", clusterRole, getErr)) 95 | continue 96 | } 97 | 98 | clusterSubject := rbacv1.Subject{ 99 | Kind: rbacv1.ServiceAccountKind, 100 | Name: controller.SafeConcatNameWithPrefix(cluster.Name, agent.SharedNodeAgentName), 101 | Namespace: cluster.Namespace, 102 | } 103 | 104 | // remove the clusterSubject from the ClusterRoleBinding 105 | cleanedSubjects := slices.DeleteFunc(clusterRoleBinding.Subjects, func(subject rbacv1.Subject) bool { 106 | return reflect.DeepEqual(subject, clusterSubject) 107 | }) 108 | 109 | if !reflect.DeepEqual(clusterRoleBinding.Subjects, cleanedSubjects) { 110 | clusterRoleBinding.Subjects = cleanedSubjects 111 | 112 | if updateErr := c.Client.Update(ctx, &clusterRoleBinding); updateErr != nil { 113 | err = errors.Join(err, fmt.Errorf("failed to update %s ClusterRoleBinding: %w", clusterRole, updateErr)) 114 | } 115 | } 116 | } 117 | 118 | return err 119 | } 120 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | release: 4 | draft: true 5 | replace_existing_draft: true 6 | prerelease: auto 7 | 8 | before: 9 | hooks: 10 | - go mod tidy 11 | - go generate ./... 12 | 13 | builds: 14 | - id: k3k 15 | env: 16 | - CGO_ENABLED=0 17 | goos: 18 | - linux 19 | goarch: 20 | - "amd64" 21 | - "arm64" 22 | - "s390x" 23 | ldflags: 24 | - -w -s # strip debug info and symbol table 25 | - -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}" 26 | 27 | - id: k3k-kubelet 28 | main: ./k3k-kubelet 29 | binary: k3k-kubelet 30 | env: 31 | - CGO_ENABLED=0 32 | goos: 33 | - linux 34 | goarch: 35 | - "amd64" 36 | - "arm64" 37 | - "s390x" 38 | ldflags: 39 | - -w -s # strip debug info and symbol table 40 | - -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}" 41 | 42 | - id: k3kcli 43 | main: ./cli 44 | binary: k3kcli 45 | env: 46 | - CGO_ENABLED=0 47 | goarch: 48 | - "amd64" 49 | - "arm64" 50 | ldflags: 51 | - -w -s # strip debug info and symbol table 52 | - -X "github.com/rancher/k3k/pkg/buildinfo.Version={{ .Tag }}" 53 | 54 | archives: 55 | - format: binary 56 | name_template: >- 57 | {{ .Binary }}-{{- .Os }}-{{ .Arch }} 58 | {{- if .Arm }}v{{ .Arm }}{{ end }} 59 | format_overrides: 60 | - goos: windows 61 | format: zip 62 | 63 | # For the image_templates we are using the following expression to build images for the correct registry 64 | # {{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }} 65 | # 66 | # REGISTRY= -> rancher/k3k:vX.Y.Z 67 | # REGISTRY=ghcr.io -> ghcr.io/rancher/k3k:latest:vX.Y.Z 68 | # 69 | dockers: 70 | # k3k amd64 71 | - use: buildx 72 | goarch: amd64 73 | ids: 74 | - k3k 75 | - k3kcli 76 | dockerfile: "package/Dockerfile.k3k" 77 | skip_push: false 78 | image_templates: 79 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64" 80 | build_flag_templates: 81 | - "--build-arg=BIN_K3K=k3k" 82 | - "--build-arg=BIN_K3KCLI=k3kcli" 83 | - "--pull" 84 | - "--platform=linux/amd64" 85 | 86 | # k3k arm64 87 | - use: buildx 88 | goarch: arm64 89 | ids: 90 | - k3k 91 | - k3kcli 92 | dockerfile: "package/Dockerfile.k3k" 93 | skip_push: false 94 | image_templates: 95 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64" 96 | build_flag_templates: 97 | - "--build-arg=BIN_K3K=k3k" 98 | - "--build-arg=BIN_K3KCLI=k3kcli" 99 | - "--pull" 100 | - "--platform=linux/arm64" 101 | 102 | # k3k-kubelet amd64 103 | - use: buildx 104 | goarch: amd64 105 | ids: 106 | - k3k-kubelet 107 | dockerfile: "package/Dockerfile.k3k-kubelet" 108 | skip_push: false 109 | image_templates: 110 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64" 111 | build_flag_templates: 112 | - "--build-arg=BIN_K3K_KUBELET=k3k-kubelet" 113 | - "--pull" 114 | - "--platform=linux/amd64" 115 | 116 | # k3k-kubelet arm64 117 | - use: buildx 118 | goarch: arm64 119 | ids: 120 | - k3k-kubelet 121 | dockerfile: "package/Dockerfile.k3k-kubelet" 122 | skip_push: false 123 | image_templates: 124 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64" 125 | build_flag_templates: 126 | - "--build-arg=BIN_K3K_KUBELET=k3k-kubelet" 127 | - "--pull" 128 | - "--platform=linux/arm64" 129 | 130 | docker_manifests: 131 | # k3k 132 | - name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}" 133 | image_templates: 134 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-amd64" 135 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}:{{ .Tag }}-arm64" 136 | 137 | # k3k-kubelet arm64 138 | - name_template: "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}" 139 | image_templates: 140 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-amd64" 141 | - "{{- if .Env.REGISTRY }}{{ .Env.REGISTRY }}/{{ end }}{{ .Env.REPO }}-kubelet:{{ .Tag }}-arm64" 142 | 143 | changelog: 144 | sort: asc 145 | filters: 146 | exclude: 147 | - "^docs:" 148 | - "^test:" 149 | -------------------------------------------------------------------------------- /tests/cluster_status_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "k8s.io/apimachinery/pkg/api/meta" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | 13 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 14 | "github.com/rancher/k3k/pkg/controller/cluster" 15 | "github.com/rancher/k3k/pkg/controller/policy" 16 | 17 | . "github.com/onsi/ginkgo/v2" 18 | . "github.com/onsi/gomega" 19 | ) 20 | 21 | var _ = When("a cluster's status is tracked", Label(e2eTestLabel), Label(statusTestsLabel), func() { 22 | var ( 23 | namespace *corev1.Namespace 24 | vcp *v1beta1.VirtualClusterPolicy 25 | ) 26 | 27 | // This BeforeEach/AfterEach will create a new namespace and a default policy for each test. 28 | BeforeEach(func() { 29 | ctx := context.Background() 30 | 31 | vcp = &v1beta1.VirtualClusterPolicy{ 32 | ObjectMeta: metav1.ObjectMeta{ 33 | GenerateName: "policy-", 34 | }, 35 | } 36 | Expect(k8sClient.Create(ctx, vcp)).To(Succeed()) 37 | 38 | namespace = NewNamespace() 39 | 40 | err := k8sClient.Get(ctx, client.ObjectKeyFromObject(namespace), namespace) 41 | Expect(err).To(Not(HaveOccurred())) 42 | 43 | namespace.Labels = map[string]string{ 44 | policy.PolicyNameLabelKey: vcp.Name, 45 | } 46 | Expect(k8sClient.Update(ctx, namespace)).To(Succeed()) 47 | }) 48 | 49 | AfterEach(func() { 50 | err := k8sClient.Delete(context.Background(), vcp) 51 | Expect(err).To(Not(HaveOccurred())) 52 | 53 | DeleteNamespaces(namespace.Name) 54 | }) 55 | 56 | Context("and the cluster is created with a valid configuration", func() { 57 | It("should start with Provisioning status and transition to Ready", func() { 58 | ctx := context.Background() 59 | 60 | clusterObj := &v1beta1.Cluster{ 61 | ObjectMeta: metav1.ObjectMeta{ 62 | GenerateName: "status-cluster-", 63 | Namespace: namespace.Name, 64 | }, 65 | } 66 | Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed()) 67 | 68 | clusterKey := client.ObjectKeyFromObject(clusterObj) 69 | 70 | // Check for the initial status to be set 71 | Eventually(func(g Gomega) { 72 | err := k8sClient.Get(ctx, clusterKey, clusterObj) 73 | g.Expect(err).NotTo(HaveOccurred()) 74 | 75 | g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterProvisioning)) 76 | 77 | cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady) 78 | g.Expect(cond).NotTo(BeNil()) 79 | g.Expect(cond.Status).To(Equal(metav1.ConditionFalse)) 80 | g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioning)) 81 | }). 82 | WithPolling(time.Second * 2). 83 | WithTimeout(time.Second * 20). 84 | Should(Succeed()) 85 | 86 | // Check for the status to be updated to Ready 87 | Eventually(func(g Gomega) { 88 | err := k8sClient.Get(ctx, clusterKey, clusterObj) 89 | g.Expect(err).NotTo(HaveOccurred()) 90 | 91 | g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterReady)) 92 | 93 | cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady) 94 | g.Expect(cond).NotTo(BeNil()) 95 | g.Expect(cond.Status).To(Equal(metav1.ConditionTrue)) 96 | g.Expect(cond.Reason).To(Equal(cluster.ReasonProvisioned)) 97 | }). 98 | WithTimeout(time.Minute * 3). 99 | WithPolling(time.Second * 5). 100 | Should(Succeed()) 101 | }) 102 | }) 103 | 104 | Context("and the cluster has validation errors", func() { 105 | It("should be in Pending status with ValidationFailed reason", func() { 106 | ctx := context.Background() 107 | 108 | clusterObj := &v1beta1.Cluster{ 109 | ObjectMeta: metav1.ObjectMeta{ 110 | GenerateName: "cluster-", 111 | Namespace: namespace.Name, 112 | }, 113 | Spec: v1beta1.ClusterSpec{ 114 | Mode: v1beta1.VirtualClusterMode, 115 | }, 116 | } 117 | Expect(k8sClient.Create(ctx, clusterObj)).To(Succeed()) 118 | 119 | clusterKey := client.ObjectKeyFromObject(clusterObj) 120 | 121 | // Check for the status to be updated 122 | Eventually(func(g Gomega) { 123 | err := k8sClient.Get(ctx, clusterKey, clusterObj) 124 | g.Expect(err).NotTo(HaveOccurred()) 125 | 126 | g.Expect(clusterObj.Status.Phase).To(Equal(v1beta1.ClusterPending)) 127 | 128 | cond := meta.FindStatusCondition(clusterObj.Status.Conditions, cluster.ConditionReady) 129 | g.Expect(cond).NotTo(BeNil()) 130 | g.Expect(cond.Status).To(Equal(metav1.ConditionFalse)) 131 | g.Expect(cond.Reason).To(Equal(cluster.ReasonValidationFailed)) 132 | g.Expect(cond.Message).To(ContainSubstring(`mode "virtual" is not allowed by the policy`)) 133 | }). 134 | WithPolling(time.Second * 2). 135 | WithTimeout(time.Second * 20). 136 | Should(Succeed()) 137 | }) 138 | }) 139 | }) 140 | -------------------------------------------------------------------------------- /tests/cluster_sync_test.go: -------------------------------------------------------------------------------- 1 | package k3k_test 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | apierrors "k8s.io/apimachinery/pkg/api/errors" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | 11 | "github.com/rancher/k3k/k3k-kubelet/translate" 12 | 13 | . "github.com/onsi/ginkgo/v2" 14 | . "github.com/onsi/gomega" 15 | ) 16 | 17 | var _ = When("a shared mode cluster is created", Ordered, Label(e2eTestLabel), func() { 18 | var ( 19 | virtualCluster *VirtualCluster 20 | virtualConfigMap *corev1.ConfigMap 21 | virtualService *corev1.Service 22 | ) 23 | 24 | BeforeAll(func() { 25 | virtualCluster = NewVirtualCluster() 26 | 27 | DeferCleanup(func() { 28 | DeleteNamespaces(virtualCluster.Cluster.Namespace) 29 | }) 30 | }) 31 | 32 | When("a ConfigMap is created in the virtual cluster", func() { 33 | BeforeAll(func() { 34 | ctx := context.Background() 35 | 36 | virtualConfigMap = &corev1.ConfigMap{ 37 | ObjectMeta: metav1.ObjectMeta{ 38 | Name: "test-cm", 39 | Namespace: "default", 40 | }, 41 | } 42 | 43 | var err error 44 | 45 | virtualConfigMap, err = virtualCluster.Client.CoreV1().ConfigMaps("default").Create(ctx, virtualConfigMap, metav1.CreateOptions{}) 46 | Expect(err).To(Not(HaveOccurred())) 47 | }) 48 | 49 | It("is replicated in the host cluster", func() { 50 | ctx := context.Background() 51 | 52 | hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster) 53 | namespacedName := hostTranslator.NamespacedName(virtualConfigMap) 54 | 55 | // check that the ConfigMap is synced in the host cluster 56 | Eventually(func(g Gomega) { 57 | _, err := k8s.CoreV1().ConfigMaps(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{}) 58 | g.Expect(err).To(Not(HaveOccurred())) 59 | }). 60 | WithTimeout(time.Minute). 61 | WithPolling(time.Second). 62 | Should(Succeed()) 63 | }) 64 | }) 65 | 66 | When("a Service is created in the virtual cluster", func() { 67 | BeforeAll(func() { 68 | ctx := context.Background() 69 | 70 | virtualService = &corev1.Service{ 71 | ObjectMeta: metav1.ObjectMeta{ 72 | Name: "test-svc", 73 | Namespace: "default", 74 | }, 75 | Spec: corev1.ServiceSpec{ 76 | Type: corev1.ServiceTypeClusterIP, 77 | Ports: []corev1.ServicePort{{Port: 8888}}, 78 | }, 79 | } 80 | 81 | var err error 82 | virtualService, err = virtualCluster.Client.CoreV1().Services("default").Create(ctx, virtualService, metav1.CreateOptions{}) 83 | Expect(err).To(Not(HaveOccurred())) 84 | }) 85 | 86 | It("is replicated in the host cluster", func() { 87 | ctx := context.Background() 88 | 89 | hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster) 90 | namespacedName := hostTranslator.NamespacedName(virtualService) 91 | 92 | // check that the ConfigMap is synced in the host cluster 93 | Eventually(func(g Gomega) { 94 | _, err := k8s.CoreV1().Services(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{}) 95 | g.Expect(err).To(Not(HaveOccurred())) 96 | }). 97 | WithTimeout(time.Minute). 98 | WithPolling(time.Second). 99 | Should(Succeed()) 100 | }) 101 | }) 102 | 103 | When("the cluster is deleted", func() { 104 | BeforeAll(func() { 105 | ctx := context.Background() 106 | 107 | By("Deleting cluster") 108 | 109 | err := k8sClient.Delete(ctx, virtualCluster.Cluster) 110 | Expect(err).To(Not(HaveOccurred())) 111 | }) 112 | 113 | It("will delete the ConfigMap from the host cluster", func() { 114 | ctx := context.Background() 115 | 116 | hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster) 117 | namespacedName := hostTranslator.NamespacedName(virtualConfigMap) 118 | 119 | // check that the ConfigMap is deleted from the host cluster 120 | Eventually(func(g Gomega) { 121 | _, err := k8s.CoreV1().ConfigMaps(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{}) 122 | g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) 123 | }). 124 | WithTimeout(time.Minute). 125 | WithPolling(time.Second). 126 | Should(Succeed()) 127 | }) 128 | 129 | It("will delete the Service from the host cluster", func() { 130 | ctx := context.Background() 131 | 132 | hostTranslator := translate.NewHostTranslator(virtualCluster.Cluster) 133 | namespacedName := hostTranslator.NamespacedName(virtualService) 134 | 135 | // check that the Service is deleted from the host cluster 136 | Eventually(func(g Gomega) { 137 | _, err := k8s.CoreV1().Services(namespacedName.Namespace).Get(ctx, namespacedName.Name, metav1.GetOptions{}) 138 | g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) 139 | }). 140 | WithTimeout(time.Minute). 141 | WithPolling(time.Second). 142 | Should(Succeed()) 143 | }) 144 | }) 145 | }) 146 | -------------------------------------------------------------------------------- /cli/cmds/kubeconfig.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "context" 5 | "net/url" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "time" 10 | 11 | "github.com/sirupsen/logrus" 12 | "github.com/spf13/cobra" 13 | "k8s.io/apimachinery/pkg/types" 14 | "k8s.io/apiserver/pkg/authentication/user" 15 | "k8s.io/client-go/tools/clientcmd" 16 | "k8s.io/client-go/util/retry" 17 | 18 | apierrors "k8s.io/apimachinery/pkg/api/errors" 19 | clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 20 | 21 | "github.com/rancher/k3k/pkg/apis/k3k.io/v1beta1" 22 | "github.com/rancher/k3k/pkg/controller" 23 | "github.com/rancher/k3k/pkg/controller/certs" 24 | "github.com/rancher/k3k/pkg/controller/kubeconfig" 25 | ) 26 | 27 | type GenerateKubeconfigConfig struct { 28 | name string 29 | configName string 30 | cn string 31 | org []string 32 | altNames []string 33 | expirationDays int64 34 | kubeconfigServerHost string 35 | } 36 | 37 | func NewKubeconfigCmd(appCtx *AppContext) *cobra.Command { 38 | cmd := &cobra.Command{ 39 | Use: "kubeconfig", 40 | Short: "Manage kubeconfig for clusters", 41 | } 42 | 43 | cmd.AddCommand( 44 | NewKubeconfigGenerateCmd(appCtx), 45 | ) 46 | 47 | return cmd 48 | } 49 | 50 | func NewKubeconfigGenerateCmd(appCtx *AppContext) *cobra.Command { 51 | cfg := &GenerateKubeconfigConfig{} 52 | 53 | cmd := &cobra.Command{ 54 | Use: "generate", 55 | Short: "Generate kubeconfig for clusters", 56 | RunE: generate(appCtx, cfg), 57 | Args: cobra.NoArgs, 58 | } 59 | 60 | CobraFlagNamespace(appCtx, cmd.Flags()) 61 | generateKubeconfigFlags(cmd, cfg) 62 | 63 | return cmd 64 | } 65 | 66 | func generateKubeconfigFlags(cmd *cobra.Command, cfg *GenerateKubeconfigConfig) { 67 | cmd.Flags().StringVar(&cfg.name, "name", "", "cluster name") 68 | cmd.Flags().StringVar(&cfg.configName, "config-name", "", "the name of the generated kubeconfig file") 69 | cmd.Flags().StringVar(&cfg.cn, "cn", controller.AdminCommonName, "Common name (CN) of the generated certificates for the kubeconfig") 70 | cmd.Flags().StringSliceVar(&cfg.org, "org", nil, "Organization name (ORG) of the generated certificates for the kubeconfig") 71 | cmd.Flags().StringSliceVar(&cfg.altNames, "altNames", nil, "altNames of the generated certificates for the kubeconfig") 72 | cmd.Flags().Int64Var(&cfg.expirationDays, "expiration-days", 365, "Expiration date of the certificates used for the kubeconfig") 73 | cmd.Flags().StringVar(&cfg.kubeconfigServerHost, "kubeconfig-server", "", "override the kubeconfig server host") 74 | } 75 | 76 | func generate(appCtx *AppContext, cfg *GenerateKubeconfigConfig) func(cmd *cobra.Command, args []string) error { 77 | return func(cmd *cobra.Command, args []string) error { 78 | ctx := context.Background() 79 | client := appCtx.Client 80 | 81 | clusterKey := types.NamespacedName{ 82 | Name: cfg.name, 83 | Namespace: appCtx.Namespace(cfg.name), 84 | } 85 | 86 | var cluster v1beta1.Cluster 87 | 88 | if err := client.Get(ctx, clusterKey, &cluster); err != nil { 89 | return err 90 | } 91 | 92 | url, err := url.Parse(appCtx.RestConfig.Host) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | host := strings.Split(url.Host, ":") 98 | if cfg.kubeconfigServerHost != "" { 99 | host = []string{cfg.kubeconfigServerHost} 100 | cfg.altNames = append(cfg.altNames, cfg.kubeconfigServerHost) 101 | } 102 | 103 | certAltNames := certs.AddSANs(cfg.altNames) 104 | 105 | if len(cfg.org) == 0 { 106 | cfg.org = []string{user.SystemPrivilegedGroup} 107 | } 108 | 109 | kubeCfg := kubeconfig.KubeConfig{ 110 | CN: cfg.cn, 111 | ORG: cfg.org, 112 | ExpiryDate: time.Hour * 24 * time.Duration(cfg.expirationDays), 113 | AltNames: certAltNames, 114 | } 115 | 116 | logrus.Infof("waiting for cluster to be available..") 117 | 118 | var kubeconfig *clientcmdapi.Config 119 | 120 | if err := retry.OnError(controller.Backoff, apierrors.IsNotFound, func() error { 121 | kubeconfig, err = kubeCfg.Generate(ctx, client, &cluster, host[0], 0) 122 | return err 123 | }); err != nil { 124 | return err 125 | } 126 | 127 | return writeKubeconfigFile(&cluster, kubeconfig, cfg.configName) 128 | } 129 | } 130 | 131 | func writeKubeconfigFile(cluster *v1beta1.Cluster, kubeconfig *clientcmdapi.Config, configName string) error { 132 | if configName == "" { 133 | configName = cluster.Namespace + "-" + cluster.Name + "-kubeconfig.yaml" 134 | } 135 | 136 | pwd, err := os.Getwd() 137 | if err != nil { 138 | return err 139 | } 140 | 141 | logrus.Infof(`You can start using the cluster with: 142 | 143 | export KUBECONFIG=%s 144 | kubectl cluster-info 145 | `, filepath.Join(pwd, configName)) 146 | 147 | kubeconfigData, err := clientcmd.Write(*kubeconfig) 148 | if err != nil { 149 | return err 150 | } 151 | 152 | return os.WriteFile(configName, kubeconfigData, 0o644) 153 | } 154 | -------------------------------------------------------------------------------- /.github/workflows/test-conformance-virtual.yaml: -------------------------------------------------------------------------------- 1 | name: Conformance Tests - Virtual Mode 2 | 3 | on: 4 | schedule: 5 | - cron: "0 1 * * *" 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: read 10 | 11 | jobs: 12 | conformance: 13 | runs-on: ubuntu-latest 14 | 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | type: 19 | - parallel 20 | - serial 21 | 22 | steps: 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | with: 26 | fetch-depth: 0 27 | fetch-tags: true 28 | 29 | - uses: actions/setup-go@v5 30 | with: 31 | go-version-file: go.mod 32 | 33 | - name: Install helm 34 | uses: azure/setup-helm@v4.3.0 35 | 36 | - name: Install hydrophone 37 | run: go install sigs.k8s.io/hydrophone@latest 38 | 39 | - name: Install k3s 40 | env: 41 | KUBECONFIG: /etc/rancher/k3s/k3s.yaml 42 | K3S_HOST_VERSION: v1.32.1+k3s1 43 | run: | 44 | curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${K3S_HOST_VERSION} INSTALL_K3S_EXEC="--write-kubeconfig-mode=777" sh -s - 45 | 46 | kubectl cluster-info 47 | kubectl get nodes 48 | 49 | - name: Build, package and setup K3k 50 | env: 51 | KUBECONFIG: /etc/rancher/k3s/k3s.yaml 52 | run: | 53 | export REPO=ttl.sh/$(uuidgen) 54 | export VERSION=1h 55 | 56 | make build 57 | make package 58 | make push 59 | make install 60 | 61 | # add k3kcli to $PATH 62 | echo "${{ github.workspace }}/bin" >> $GITHUB_PATH 63 | 64 | echo "Wait for K3k controller to be available" 65 | kubectl wait -n k3k-system pod --for condition=Ready -l "app.kubernetes.io/name=k3k" --timeout=5m 66 | 67 | - name: Check k3kcli 68 | run: k3kcli -v 69 | 70 | - name: Create virtual cluster 71 | env: 72 | KUBECONFIG: /etc/rancher/k3s/k3s.yaml 73 | run: | 74 | k3kcli cluster create --mode=virtual --servers=2 mycluster 75 | 76 | export KUBECONFIG=${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml 77 | 78 | kubectl cluster-info 79 | kubectl get nodes 80 | kubectl get pods -A 81 | 82 | - name: Run conformance tests (parallel) 83 | if: matrix.type == 'parallel' 84 | run: | 85 | # Run conformance tests in parallel mode (skipping serial) 86 | hydrophone --conformance --parallel 4 --skip='\[Serial\]' \ 87 | --kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \ 88 | --output-dir /tmp 89 | 90 | - name: Run conformance tests (serial) 91 | if: matrix.type == 'serial' 92 | run: | 93 | # Run serial conformance tests 94 | hydrophone --focus='\[Serial\].*\[Conformance\]' \ 95 | --kubeconfig ${{ github.workspace }}/k3k-mycluster-mycluster-kubeconfig.yaml \ 96 | --output-dir /tmp 97 | 98 | - name: Export logs 99 | if: always() 100 | env: 101 | KUBECONFIG: /etc/rancher/k3s/k3s.yaml 102 | run: | 103 | journalctl -u k3s -o cat --no-pager > /tmp/k3s.log 104 | kubectl logs -n k3k-system -l "app.kubernetes.io/name=k3k" --tail=-1 > /tmp/k3k.log 105 | 106 | - name: Archive K3s logs 107 | uses: actions/upload-artifact@v4 108 | if: always() 109 | with: 110 | name: k3s-${{ matrix.type }}-logs 111 | path: /tmp/k3s.log 112 | 113 | - name: Archive K3k logs 114 | uses: actions/upload-artifact@v4 115 | if: always() 116 | with: 117 | name: k3k-${{ matrix.type }}-logs 118 | path: /tmp/k3k.log 119 | 120 | - name: Archive conformance logs 121 | uses: actions/upload-artifact@v4 122 | if: always() 123 | with: 124 | name: conformance-${{ matrix.type }}-logs 125 | path: /tmp/e2e.log 126 | 127 | - name: Job Summary 128 | if: always() 129 | run: | 130 | echo '## 📊 Conformance Tests Results (${{ matrix.type }})' >> $GITHUB_STEP_SUMMARY 131 | echo '| Passed | Failed | Pending | Skipped |' >> $GITHUB_STEP_SUMMARY 132 | echo '|---|---|---|---|' >> $GITHUB_STEP_SUMMARY 133 | 134 | RESULTS=$(tail -10 /tmp/e2e.log | grep -E "Passed .* Failed .* Pending .* Skipped" | cut -d '-' -f 3) 135 | RESULTS=$(echo $RESULTS | grep -oE '[0-9]+' | xargs | sed 's/ / | /g') 136 | echo "| $RESULTS |" >> $GITHUB_STEP_SUMMARY 137 | 138 | # only include failed tests section if there are any 139 | if grep -q '\[FAIL\]' /tmp/e2e.log; then 140 | echo '' >> $GITHUB_STEP_SUMMARY 141 | echo '### Failed Tests' >> $GITHUB_STEP_SUMMARY 142 | echo '```' >> $GITHUB_STEP_SUMMARY 143 | grep '\[FAIL\]' /tmp/e2e.log >> $GITHUB_STEP_SUMMARY 144 | echo '```' >> $GITHUB_STEP_SUMMARY 145 | fi 146 | -------------------------------------------------------------------------------- /docs/development.md: -------------------------------------------------------------------------------- 1 | # Development 2 | 3 | 4 | ## Prerequisites 5 | 6 | To start developing K3k you will need: 7 | 8 | - Go 9 | - Docker 10 | - Helm 11 | - A running Kubernetes cluster 12 | 13 | 14 | ### TLDR 15 | 16 | ```shell 17 | #!/bin/bash 18 | 19 | set -euo pipefail 20 | 21 | # These environment variables configure the image repository and tag. 22 | export REPO=ghcr.io/myuser 23 | export VERSION=dev-$(date -u '+%Y%m%d%H%M') 24 | 25 | make 26 | make push 27 | make install 28 | ``` 29 | 30 | ### Makefile 31 | 32 | To see all the available Make commands you can run `make help`, i.e: 33 | 34 | ``` 35 | -> % make help 36 | all Run 'make' or 'make all' to run 'version', 'generate', 'build' and 'package' 37 | version Print the current version 38 | build Build the the K3k binaries (k3k, k3k-kubelet and k3kcli) 39 | package Package the k3k and k3k-kubelet Docker images 40 | push Push the K3k images to the registry 41 | test Run all the tests 42 | test-unit Run the unit tests (skips the e2e) 43 | test-controller Run the controller tests (pkg/controller) 44 | test-kubelet-controller Run the controller tests (pkg/controller) 45 | test-e2e Run the e2e tests 46 | generate Generate the CRDs specs 47 | docs Build the CRDs and CLI docs 48 | lint Find any linting issues in the project 49 | validate Validate the project checking for any dependency or doc mismatch 50 | install Install K3k with Helm on the targeted Kubernetes cluster 51 | help Show this help. 52 | ``` 53 | 54 | ### Build 55 | 56 | To build the needed binaries (`k3k`, `k3k-kubelet` and the `k3kcli`) and package the images you can simply run `make`. 57 | 58 | By default the `rancher` repository will be used, but you can customize this to your registry with the `REPO` env var: 59 | 60 | ``` 61 | REPO=ghcr.io/userorg make 62 | ``` 63 | 64 | To customize the tag you can also explicitly set the VERSION: 65 | 66 | ``` 67 | VERSION=dev-$(date -u '+%Y%m%d%H%M') make 68 | ``` 69 | 70 | 71 | ### Push 72 | 73 | You will need to push the built images to your registry, and you can use the `make push` command to do this. 74 | 75 | 76 | ### Install 77 | 78 | Once you have your images available you can install K3k with the `make install` command. This will use `helm` to install the release. 79 | 80 | 81 | ## Tests 82 | 83 | To run the tests you can just run `make test`, or one of the other available "sub-tests" targets (`test-unit`, `test-controller`, `test-e2e`). 84 | 85 | We use [Ginkgo](https://onsi.github.io/ginkgo/), and [`envtest`](https://book.kubebuilder.io/reference/envtest) for testing the controllers. 86 | 87 | The required binaries for `envtest` are installed with [`setup-envtest`](https://pkg.go.dev/sigs.k8s.io/controller-runtime/tools/setup-envtest), in the `.envtest` folder. 88 | 89 | 90 | ## CRDs and Docs 91 | 92 | We are using Kubebuilder and `controller-gen` to build the needed CRDs. To generate the specs you can run `make generate`. 93 | 94 | Remember also to update the CRDs documentation running the `make docs` command. 95 | 96 | ## How to install k3k on k3d 97 | 98 | This document provides a guide on how to install k3k on [k3d](https://k3d.io). 99 | 100 | ### Installing k3d 101 | 102 | Since k3d uses docker under the hood, we need to expose the ports on the host that we'll then use for the NodePort in virtual cluster creation. 103 | 104 | Create the k3d cluster in the following way: 105 | 106 | ```bash 107 | k3d cluster create k3k -p "30000-30010:30000-30010@server:0" 108 | ``` 109 | 110 | With this syntax ports from 30000 to 30010 will be exposed on the host. 111 | 112 | ### Install k3k 113 | 114 | Install now k3k as usual: 115 | 116 | ```bash 117 | helm repo update 118 | helm install --namespace k3k-system --create-namespace k3k k3k/k3k 119 | ``` 120 | 121 | ### Create a virtual cluster 122 | 123 | Once the k3k controller is up and running, create a namespace where to create our first virtual cluster. 124 | 125 | ```bash 126 | kubectl create ns k3k-mycluster 127 | ``` 128 | 129 | Create then the virtual cluster exposing through NodePort one of the ports that we set up in the previous step: 130 | 131 | ```bash 132 | cat <