├── .dockerignore ├── .drone.yml ├── .droneignore ├── .github ├── .codecov.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── cut_release.md │ ├── feature_request.md │ └── os_validation.md ├── PULL_REQUEST_TEMPLATE.md ├── SECURITY.md ├── actions │ ├── setup-go │ │ └── action.yaml │ └── vagrant-setup │ │ └── action.yaml ├── dco.yml ├── dependabot.yml └── workflows │ ├── build-k3s.yaml │ ├── e2e.yaml │ ├── epic.yaml │ ├── govulncheck.yml │ ├── install.yaml │ ├── integration.yaml │ ├── nightly-install.yaml │ ├── release.yml │ ├── scorecard.yml │ ├── stale.yml │ ├── trivy.yaml │ ├── unitcoverage.yaml │ └── updatecli.yaml ├── .gitignore ├── .golangci.json ├── ADOPTERS.md ├── BUILDING.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── DCO ├── Dockerfile.dapper ├── Dockerfile.local ├── Dockerfile.manifest ├── Dockerfile.test ├── LICENSE ├── MAINTAINERS ├── Makefile ├── README.md ├── ROADMAP.md ├── channel.yaml ├── cmd ├── agent │ └── main.go ├── cert │ └── main.go ├── completion │ └── main.go ├── containerd │ └── main.go ├── ctr │ └── main.go ├── encrypt │ └── main.go ├── etcdsnapshot │ └── main.go ├── k3s │ ├── main.go │ ├── main_linux.go │ ├── main_test.go │ └── main_windows.go ├── kubectl │ └── main.go ├── server │ └── main.go └── token │ └── main.go ├── conformance ├── Dockerfile └── run-test.sh ├── contrib ├── ansible │ └── README.md ├── gotests_templates │ ├── call.tmpl │ ├── function.tmpl │ ├── header.tmpl │ ├── inline.tmpl │ ├── inputs.tmpl │ ├── message.tmpl │ └── results.tmpl └── util │ ├── DIAGNOSTICS.md │ ├── check-config.sh │ ├── diagnostics.sh │ ├── fetch-diags.sh │ ├── generate-custom-ca-certs.sh │ └── rotate-default-ca-certs.sh ├── docker-compose.yml ├── docs ├── adrs │ ├── add-auto-import-containerd.md │ ├── add-dual-stack-support-to-netpol-agent.md │ ├── agent-join-token.md │ ├── ca-cert-rotation.md │ ├── cert-expiry-checks.md │ ├── core-controller-user.md │ ├── cri-dockerd.md │ ├── deprecating-and-removing-flags.md │ ├── embedded-registry.md │ ├── etcd-s3-secret.md │ ├── etcd-snapshot-cr.md │ ├── flannel-options.md │ ├── gh-branch-strategy.md │ ├── integrate-vpns.md │ ├── k3s-charts.md │ ├── record-architecture-decisions.md │ ├── remove-svclb-daemonset.md │ ├── secrets-encryption-v3.md │ ├── security-updates-automation.md │ ├── server-token-rotation.md │ ├── servicelb-ccm.md │ ├── standalone-containerd.md │ ├── status-for-etcd-node.md │ └── testing-2024.md ├── contrib │ ├── code_conventions.md │ ├── continuous_integration.md │ ├── development.md │ └── git_workflow.md └── release │ ├── expanded │ ├── build_container.md │ ├── channel_server.md │ ├── cut_release.md │ ├── milestones.md │ ├── pr.md │ ├── rebase.md │ ├── release_images.md │ ├── release_notes.md │ ├── setup_env.md │ ├── setup_k3s_repos.md │ ├── setup_k8s_repos.md │ ├── setup_rc.md │ ├── tagging.md │ └── update_kdm.md │ ├── kubernetes-upgrade.md │ └── release.md ├── go.mod ├── go.sum ├── hack └── crdgen.go ├── install.sh ├── install.sh.sha256sum ├── k3s-rootless.service ├── k3s.service ├── main.go ├── manifests ├── ccm.yaml ├── coredns.yaml ├── local-storage.yaml ├── metrics-server │ ├── aggregated-metrics-reader.yaml │ ├── auth-delegator.yaml │ ├── auth-reader.yaml │ ├── metrics-apiservice.yaml │ ├── metrics-server-deployment.yaml │ ├── metrics-server-service.yaml │ └── resource-reader.yaml ├── rolebindings.yaml ├── runtimes.yaml └── traefik.yaml ├── package ├── Dockerfile └── rpm │ ├── install.sh │ ├── k3s.spec │ └── repo-setup.sh ├── pkg ├── agent │ ├── config │ │ ├── config.go │ │ ├── config_internal_test.go │ │ ├── config_linux.go │ │ ├── config_linux_test.go │ │ └── config_windows.go │ ├── containerd │ │ ├── command.go │ │ ├── command_windows.go │ │ ├── config.go │ │ ├── config_linux.go │ │ ├── config_test.go │ │ ├── config_windows.go │ │ ├── containerd.go │ │ ├── runtimes.go │ │ ├── runtimes_test.go │ │ ├── selinux.go │ │ └── watcher.go │ ├── cri │ │ ├── cri.go │ │ ├── cri_linux.go │ │ └── cri_windows.go │ ├── cridockerd │ │ ├── config_linux.go │ │ ├── config_windows.go │ │ ├── cridockerd.go │ │ └── nocridockerd.go │ ├── flannel │ │ ├── flannel.go │ │ ├── setup.go │ │ ├── setup_linux.go │ │ ├── setup_test.go │ │ └── setup_windows.go │ ├── https │ │ └── https.go │ ├── loadbalancer │ │ ├── config.go │ │ ├── httpproxy.go │ │ ├── httpproxy_test.go │ │ ├── loadbalancer.go │ │ ├── loadbalancer_test.go │ │ ├── metrics.go │ │ ├── servers.go │ │ ├── utility.go │ │ ├── utility_windows.go │ │ └── utlity_linux.go │ ├── netpol │ │ ├── netpol.go │ │ └── netpol_windows.go │ ├── proxy │ │ └── apiproxy.go │ ├── run.go │ ├── run_linux.go │ ├── run_test.go │ ├── run_windows.go │ ├── syssetup │ │ ├── setup.go │ │ └── setup_windows.go │ ├── templates │ │ ├── templates.go │ │ ├── templates_linux.go │ │ └── templates_windows.go │ ├── tunnel │ │ └── tunnel.go │ └── util │ │ ├── file.go │ │ └── strings.go ├── authenticator │ ├── authenticator.go │ ├── basicauth │ │ ├── basicauth.go │ │ ├── basicauth_test.go │ │ └── interfaces.go │ ├── hash │ │ ├── hash.go │ │ ├── scrypt.go │ │ └── scrypt_test.go │ └── passwordfile │ │ ├── passwordfile.go │ │ └── passwordfile_test.go ├── bootstrap │ ├── bootstrap.go │ └── bootstrap_test.go ├── certmonitor │ └── certmonitor.go ├── cgroups │ ├── cgroups_linux.go │ └── cgroups_windows.go ├── cli │ ├── agent │ │ └── agent.go │ ├── cert │ │ └── cert.go │ ├── cmds │ │ ├── agent.go │ │ ├── certs.go │ │ ├── check-config.go │ │ ├── completion.go │ │ ├── config.go │ │ ├── const_linux.go │ │ ├── const_windows.go │ │ ├── cover_default.go │ │ ├── cover_linux.go │ │ ├── crictl.go │ │ ├── ctr.go │ │ ├── etcd_snapshot.go │ │ ├── golang.go │ │ ├── init_default.go │ │ ├── init_linux.go │ │ ├── kubectl.go │ │ ├── log.go │ │ ├── log_default.go │ │ ├── log_linux.go │ │ ├── nostage.go │ │ ├── root.go │ │ ├── secrets_encrypt.go │ │ ├── server.go │ │ ├── stage.go │ │ └── token.go │ ├── completion │ │ └── completion.go │ ├── crictl │ │ └── crictl.go │ ├── ctr │ │ └── ctr.go │ ├── etcdsnapshot │ │ └── etcd_snapshot.go │ ├── kubectl │ │ └── kubectl.go │ ├── secretsencrypt │ │ └── secrets_encrypt.go │ ├── server │ │ └── server.go │ └── token │ │ └── token.go ├── clientaccess │ ├── kubeconfig.go │ ├── token.go │ ├── token_linux_test.go │ └── token_test.go ├── cloudprovider │ ├── cloudprovider.go │ ├── instances.go │ ├── instances_test.go │ ├── loadbalancer.go │ ├── servicelb.go │ └── servicelb_test.go ├── cluster │ ├── address_controller.go │ ├── bootstrap.go │ ├── bootstrap_test.go │ ├── cluster.go │ ├── encrypt.go │ ├── etcd.go │ ├── https.go │ ├── managed.go │ ├── managed │ │ └── drivers.go │ └── storage.go ├── codegen │ └── main.go ├── configfilearg │ ├── defaultparser.go │ ├── defaultparser_test.go │ ├── parser.go │ ├── parser_test.go │ └── testdata │ │ ├── data.yaml │ │ ├── data.yaml.d │ │ ├── 01-data.yml │ │ ├── 02-data-ignore-this.txt │ │ └── 02-data.yaml │ │ ├── defaultdata.yaml │ │ ├── dropin-only.yaml.d │ │ ├── 01-data.yml │ │ ├── 02-data-ignore-this.txt │ │ └── 02-data.yaml │ │ ├── invalid-dropin.yaml.d │ │ └── 01-data.yml │ │ └── invalid.yaml ├── containerd │ ├── builtins.go │ ├── builtins_cri.go │ ├── builtins_linux.go │ ├── builtins_windows.go │ ├── main.go │ ├── none.go │ ├── utility_linux.go │ └── utility_windows.go ├── crd │ └── crds.go ├── ctr │ └── main.go ├── daemons │ ├── agent │ │ ├── agent.go │ │ ├── agent_linux.go │ │ └── agent_windows.go │ ├── config │ │ └── types.go │ ├── control │ │ ├── deps │ │ │ ├── deps.go │ │ │ └── deps_test.go │ │ ├── proxy │ │ │ └── proxy.go │ │ ├── server.go │ │ ├── server_test.go │ │ └── tunnel.go │ └── executor │ │ ├── embed.go │ │ ├── embed_linux.go │ │ ├── embed_windows.go │ │ ├── etcd.go │ │ └── executor.go ├── data │ └── data.go ├── datadir │ └── datadir.go ├── dataverify │ └── dataverify.go ├── deploy │ ├── controller.go │ ├── nostage.go │ ├── stage.go │ └── zz_generated_bindata.go ├── etcd │ ├── apiaddresses_controller.go │ ├── etcd.go │ ├── etcd_linux_test.go │ ├── etcdproxy.go │ ├── member_controller.go │ ├── metadata_controller.go │ ├── resolver.go │ ├── s3 │ │ ├── config_secret.go │ │ ├── s3.go │ │ └── s3_test.go │ ├── snapshot.go │ ├── snapshot │ │ └── types.go │ ├── snapshot_controller.go │ ├── snapshot_handler.go │ └── snapshot_metrics.go ├── flock │ ├── flock_other.go │ ├── flock_unix.go │ └── flock_unix_test.go ├── kubeadm │ ├── token.go │ ├── types.go │ └── utils.go ├── kubectl │ └── main.go ├── metrics │ └── metrics.go ├── node │ └── controller.go ├── nodeconfig │ ├── nodeconfig.go │ └── nodeconfig_test.go ├── nodepassword │ ├── nodepassword.go │ ├── nodepassword_test.go │ └── validate.go ├── passwd │ └── passwd.go ├── proctitle │ ├── proctile.go │ └── proctile_windows.go ├── profile │ └── profile.go ├── rootless │ ├── mounts.go │ ├── portdriver.go │ ├── rootless.go │ └── rootless_windows.go ├── rootlessports │ ├── controller.go │ └── controller_windows.go ├── secretsencrypt │ └── config.go ├── server │ ├── auth │ │ └── auth.go │ ├── context.go │ ├── handlers │ │ ├── cert.go │ │ ├── handlers.go │ │ ├── handlers_test.go │ │ ├── router.go │ │ ├── secrets-encrypt.go │ │ └── token.go │ ├── server.go │ └── types.go ├── spegel │ ├── bootstrap.go │ ├── registry.go │ └── spegel.go ├── static │ ├── nostage.go │ ├── stage.go │ └── zz_generated_bindata.go ├── untar │ └── untar.go ├── util │ ├── api.go │ ├── apierrors.go │ ├── args.go │ ├── args_test.go │ ├── cert.go │ ├── client.go │ ├── client_test.go │ ├── command.go │ ├── errors.go │ ├── file.go │ ├── labels.go │ ├── metrics │ │ └── metrics.go │ ├── net.go │ ├── net_test.go │ ├── net_unix.go │ ├── net_windows.go │ ├── permissions │ │ ├── permissions_others.go │ │ └── permissions_windows.go │ ├── reflect.go │ ├── services │ │ ├── services.go │ │ └── services_test.go │ └── token.go ├── version │ └── version.go └── vpn │ └── vpn.go ├── scripts ├── airgap │ ├── generate-list.sh │ ├── image-list.txt │ └── volume-test.yaml ├── binary_size_check.sh ├── build ├── build-tests-sonobuoy ├── build-upload ├── ci ├── clean ├── dispatch ├── download ├── entry.sh ├── generate ├── git_version.sh ├── image_scan.sh ├── manifest ├── package ├── package-airgap ├── package-cli ├── package-image ├── sonobuoy-config.json ├── tag-image-latest ├── test ├── test-mods ├── validate └── version.sh ├── tests ├── TESTING.md ├── client.go ├── docker │ ├── autoimport │ │ └── autoimport_test.go │ ├── basics │ │ └── basics_test.go │ ├── bootstraptoken │ │ └── bootstraptoken_test.go │ ├── cacerts │ │ └── cacerts_test.go │ ├── conformance │ │ └── conformance_test.go │ ├── etcd │ │ └── etcd_test.go │ ├── flaky-tests │ ├── hardened │ │ ├── cluster-level-pss.yaml │ │ └── hardened_test.go │ ├── lazypull │ │ └── lazypull_test.go │ ├── resources │ │ ├── clusterip.yaml │ │ ├── hardened-ingress.yaml │ │ ├── hardened-netpol.yaml │ │ ├── loadbalancer-allTraffic.yaml │ │ ├── loadbalancer-extTrafficPol.yaml │ │ ├── loadbalancer-intTrafficPol.yaml │ │ ├── nodeport.yaml │ │ ├── pod_client.yaml │ │ ├── secrets.yaml │ │ ├── snapshot-test.yaml │ │ └── volume-test.yaml │ ├── secretsencryption │ │ └── secretsencryption_test.go │ ├── skew │ │ └── skew_test.go │ ├── snapshotrestore │ │ └── snapshotrestore_test.go │ ├── svcpoliciesandfirewall │ │ └── svcpoliciesandfirewall_test.go │ ├── test-helpers │ ├── test-helpers.go │ ├── test-runner │ ├── test-setup-sonobuoy │ ├── test-setup-sonobuoy-etcd │ ├── test-setup-sonobuoy-mysql │ ├── test-setup-sonobuoy-postgres │ ├── token │ │ └── token_test.go │ └── upgrade │ │ └── upgrade_test.go ├── e2e │ ├── README.md │ ├── amd64_resource_files │ │ ├── cluster-cidr-ipv6.yaml │ │ ├── cluster-cidr.yaml │ │ ├── clusterip.yaml │ │ ├── daemonset.yaml │ │ ├── dnsutils.yaml │ │ ├── dualstack_clusterip.yaml │ │ ├── dualstack_ingress.yaml │ │ ├── dualstack_nodeport.yaml │ │ ├── hardened_psp.yaml │ │ ├── ingress.yaml │ │ ├── loadbalancer.yaml │ │ ├── local-path-provisioner.yaml │ │ ├── netpol-fail.yaml │ │ ├── netpol-work.yaml │ │ ├── nodeport.yaml │ │ ├── pod_client.yaml │ │ ├── secrets.yaml │ │ └── wasm-workloads.yaml │ ├── btrfs │ │ ├── Vagrantfile │ │ └── btrfs_test.go │ ├── cis_amd64_resource_files │ │ ├── clusterip.yaml │ │ ├── daemonset.yaml │ │ ├── dnsutils.yaml │ │ ├── ingress.yaml │ │ ├── loadbalancer.yaml │ │ ├── local-path-provisioner.yaml │ │ ├── nodeport.yaml │ │ └── secrets.yaml │ ├── dualstack │ │ ├── Vagrantfile │ │ └── dualstack_test.go │ ├── e2e_test_playbook.yaml │ ├── embeddedmirror │ │ ├── Vagrantfile │ │ └── embeddedmirror_test.go │ ├── externalip │ │ ├── Vagrantfile │ │ └── externalip_test.go │ ├── privateregistry │ │ ├── Vagrantfile │ │ └── privateregistry_test.go │ ├── rootless │ │ ├── Vagrantfile │ │ └── rootless_test.go │ ├── rotateca │ │ ├── Vagrantfile │ │ └── rotateca_test.go │ ├── s3 │ │ ├── Vagrantfile │ │ └── s3_test.go │ ├── scripts │ │ ├── Dockerfile │ │ ├── Jenkinsfile │ │ ├── cleanup_vms.sh │ │ ├── drone_registries.sh │ │ ├── harden.sh │ │ ├── hosts │ │ ├── init.sh │ │ ├── ipv6.sh │ │ ├── latest_commit.sh │ │ ├── rancher.sh │ │ ├── registry.sh │ │ ├── run_tests.sh │ │ └── setup_rootless.sh │ ├── secretsencryption │ │ ├── Vagrantfile │ │ └── secretsencryption_test.go │ ├── secretsencryption_old │ │ ├── Vagrantfile │ │ └── secretsencryption_test.go │ ├── splitserver │ │ ├── Vagrantfile │ │ └── splitserver_test.go │ ├── startup │ │ ├── Vagrantfile │ │ └── startup_test.go │ ├── tailscale │ │ ├── README.md │ │ ├── Vagrantfile │ │ └── tailscale_test.go │ ├── testutils.go │ ├── upgradecluster │ │ ├── Vagrantfile │ │ └── upgradecluster_test.go │ ├── vagrantdefaults.rb │ ├── validatecluster │ │ ├── Vagrantfile │ │ └── validatecluster_test.go │ └── wasm │ │ ├── Vagrantfile │ │ └── wasm_test.go ├── install │ ├── centos-9 │ │ └── Vagrantfile │ ├── fedora │ │ └── Vagrantfile │ ├── install_util.rb │ ├── opensuse-leap │ │ └── Vagrantfile │ ├── opensuse-microos │ │ └── Vagrantfile │ ├── rocky-8 │ │ └── Vagrantfile │ ├── rocky-9 │ │ └── Vagrantfile │ └── ubuntu-2404 │ │ └── Vagrantfile ├── integration │ ├── Dockerfile.test │ ├── README.md │ ├── cacertrotation │ │ └── cacertrotation_int_test.go │ ├── certrotation │ │ └── certrotation_int_test.go │ ├── custometcdargs │ │ └── custometcdargs_int_test.go │ ├── dualstack │ │ └── dualstack_int_test.go │ ├── etcdrestore │ │ ├── etcd_restore_int_test.go │ │ └── testdata │ │ │ ├── temp_depl.yaml │ │ │ └── temp_depl2.yaml │ ├── etcdsnapshot │ │ └── etcdsnapshot_int_test.go │ ├── flannelipv6masq │ │ └── flannelipv6masq_int_test.go │ ├── flannelnone │ │ └── flannelnone_int_test.go │ ├── integration.go │ ├── kubeflags │ │ └── kubeflags_test.go │ ├── localstorage │ │ ├── localstorage_int_test.go │ │ └── testdata │ │ │ ├── localstorage_pod.yaml │ │ │ └── localstorage_pvc.yaml │ ├── longhorn │ │ ├── longhorn_int_test.go │ │ └── testdata │ │ │ ├── longhorn.yaml │ │ │ ├── pod.yaml │ │ │ └── pvc.yaml │ ├── secretsencryption │ │ └── secretsencryption_int_test.go │ ├── startup │ │ ├── startup_int_test.go │ │ └── testdata │ │ │ ├── agnhost.yaml │ │ │ └── dummy.yaml │ └── test-runner.sh ├── mock │ ├── core.go │ ├── executor.go │ ├── executor_helpers.go │ └── matchers.go ├── perf │ ├── .gitignore │ ├── Makefile │ ├── README.md │ ├── agents │ │ ├── data.tf │ │ ├── files │ │ │ └── pool_worker_userdata.tmpl │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── scripts │ │ ├── config │ │ ├── perf │ │ └── test │ ├── server │ │ ├── data.tf │ │ ├── files │ │ │ ├── etcd.tmpl │ │ │ ├── etcd_build.sh │ │ │ ├── server_userdata.tmpl │ │ │ └── worker_userdata.tmpl │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── versions.tf │ └── tests │ │ ├── density │ │ ├── 2000_nodes │ │ │ └── override.yaml │ │ ├── 5000_nodes │ │ │ └── override.yaml │ │ ├── 600_nodes │ │ │ └── high_density_override.yaml │ │ ├── config.yaml │ │ └── deployment.yaml │ │ └── load │ │ ├── config.yaml │ │ ├── configmap.yaml │ │ ├── daemonset-priorityclass.yaml │ │ ├── daemonset.yaml │ │ ├── deployment.yaml │ │ ├── job.yaml │ │ ├── networkpolicy.yaml │ │ ├── pvc.yaml │ │ ├── secret.yaml │ │ ├── service.yaml │ │ ├── statefulset.yaml │ │ └── statefulset_service.yaml └── unit.go └── updatecli ├── README.md ├── scripts ├── run-go-generate.sh └── run-go-mod-update.sh ├── updatecli.d ├── coredns.yaml ├── golang-alpine.yaml ├── klipper-helm-and-controller.yaml ├── klipper-lb.yaml ├── local-path-provisioner.yaml ├── sonobuoy.yaml ├── trivy.yaml └── updatek3sroot.yaml └── values.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | ./bin 2 | ./etc 3 | ./pkg/data/zz_generated_bindata.go 4 | ./.vagrant 5 | ./.cache 6 | ./.dapper 7 | ./.trash-cache 8 | ./.git/objects/pack -------------------------------------------------------------------------------- /.droneignore: -------------------------------------------------------------------------------- 1 | ^.*\.md$ 2 | ^\.droneignore$ 3 | ^\.github\/.*$ 4 | ^MAINTAINERS$ 5 | ^CODEOWNERS$ 6 | ^LICENSE$ 7 | ^DCO$ 8 | ^channel\.yaml$ 9 | -------------------------------------------------------------------------------- /.github/.codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: false # disable the default status that measures entire project 5 | pkg: # declare a new status context "pkg" 6 | paths: 7 | - pkg/* # only include coverage in "pkg/" folder 8 | informational: true # Always pass check 9 | patch: off # disable the commit only checks 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Environmental Info:** 13 | K3s Version: 14 | 15 | 16 | Node(s) CPU architecture, OS, and Version: 17 | 18 | 19 | Cluster Configuration: 20 | 21 | 22 | **Describe the bug:** 23 | 24 | 25 | **Steps To Reproduce:** 26 | 27 | - Installed K3s: 28 | 29 | **Expected behavior:** 30 | 31 | 32 | **Actual behavior:** 33 | 34 | 35 | **Additional context / logs:** 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **Is your feature request related to a problem? Please describe.** 13 | 14 | 15 | **Describe the solution you'd like** 16 | 17 | 18 | **Describe alternatives you've considered** 19 | 20 | 21 | **Additional context** 22 | 23 | 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/os_validation.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Validate Operating System 3 | about: Request validation of an operating system 4 | title: 'Validate OS VERSION' 5 | labels: ["kind/os-validation"] 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | 12 | **K3s Versions to be Validated** 13 | 14 | 15 | 16 | **Testing Considerations** 17 | 18 | 1. Install and run sonobuoy conformance tests on a hardened cluster 19 | 2. Validate SUC upgrade 20 | 3. Install Rancher Manager 21 | 4. Validate snapshot restore via `cluster-reset-restore-path` 22 | 23 | 24 | **Additional Information** 25 | 26 | 27 | -------------------------------------------------------------------------------- /.github/SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | K3s supports responsible disclosure and endeavors to resolve security issues in a reasonable timeframe. To report a security vulnerability, email security@k3s.io . 6 | -------------------------------------------------------------------------------- /.github/actions/setup-go/action.yaml: -------------------------------------------------------------------------------- 1 | name: 'Setup golang with master only caching' 2 | description: 'A composite action that installs golang, but with a caching strategy that only updates the cache on master branch.' 3 | runs: 4 | using: 'composite' 5 | steps: 6 | - uses: actions/setup-go@v5 7 | with: 8 | go-version-file: 'go.mod' # Just use whatever version is in the go.mod file 9 | cache: ${{ github.ref == 'refs/heads/master' || github.ref == 'refs/heads/release-1.32' }} 10 | 11 | - name: Prepare for go cache 12 | if: github.ref != 'refs/heads/master' && github.ref != 'refs/heads/release-1.32' 13 | shell: bash 14 | run: | 15 | echo "GO_CACHE=$(go env GOCACHE)" | tee -a "$GITHUB_ENV" 16 | echo "GO_MODCACHE=$(go env GOMODCACHE)" | tee -a "$GITHUB_ENV" 17 | echo "GO_VERSION=$(go env GOVERSION | tr -d 'go')" | tee -a "$GITHUB_ENV" 18 | 19 | - name: Setup read-only cache 20 | if: github.ref != 'refs/heads/master' && github.ref != 'refs/heads/release-1.32' 21 | uses: actions/cache/restore@v4 22 | with: 23 | path: | 24 | ${{ env.GO_MODCACHE }} 25 | ${{ env.GO_CACHE }} 26 | # Match the cache key to the setup-go action https://github.com/actions/setup-go/blob/main/src/cache-restore.ts#L34 27 | key: setup-go-${{ runner.os }}-${{ env.ImageOS }}-go-${{ env.GO_VERSION }}-${{ hashFiles('go.sum') }} 28 | restore-keys: | 29 | setup-go-${{ runner.os }}- -------------------------------------------------------------------------------- /.github/actions/vagrant-setup/action.yaml: -------------------------------------------------------------------------------- 1 | name: 'Setup Vagrant and Libvirt' 2 | description: 'A composite action that installs latest versions of vagrant and libvirt for use on ubuntu based runners' 3 | runs: 4 | using: 'composite' 5 | steps: 6 | - name: Add vagrant to apt-get sources 7 | shell: bash 8 | run: | 9 | curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg 10 | echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list 11 | - name: Install vagrant and libvirt 12 | shell: bash 13 | run: | 14 | sudo apt-get update 15 | sudo apt-get install -y libvirt-daemon libvirt-daemon-system vagrant=2.4.1-1 ruby-libvirt 16 | sudo systemctl enable --now libvirtd 17 | - name: Install vagrant dependencies 18 | shell: bash 19 | run: | 20 | sudo apt-get install -y --no-install-recommends libxslt-dev libxml2-dev libvirt-dev ruby-bundler ruby-dev zlib1g-dev 21 | # This is a workaround for the libvirt group not being available in the current shell 22 | # https://github.com/actions/runner-images/issues/7670#issuecomment-1900711711 23 | - name: Make the libvirt socket rw accessible to everyone 24 | shell: bash 25 | run: | 26 | sudo chmod a+rw /var/run/libvirt/libvirt-sock 27 | - name: Install vagrant-libvirt plugin 28 | shell: bash 29 | run: vagrant plugin install vagrant-libvirt -------------------------------------------------------------------------------- /.github/dco.yml: -------------------------------------------------------------------------------- 1 | require: 2 | members: false 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | labels: 7 | - "kind/dependabot" 8 | reviewers: 9 | - "k3s-io/k3s-dev" 10 | schedule: 11 | interval: "weekly" 12 | 13 | - package-ecosystem: "docker" 14 | directory: "/" 15 | labels: 16 | - "kind/dependabot" 17 | reviewers: 18 | - "k3s-io/k3s-dev" 19 | schedule: 20 | interval: "weekly" 21 | 22 | - package-ecosystem: "docker" 23 | directory: "/conformance" 24 | labels: 25 | - "kind/dependabot" 26 | reviewers: 27 | - "k3s-io/k3s-dev" 28 | schedule: 29 | interval: "weekly" 30 | 31 | - package-ecosystem: "docker" 32 | directory: "/tests/e2e/scripts" 33 | labels: 34 | - "kind/dependabot" 35 | reviewers: 36 | - "k3s-io/k3s-dev" 37 | schedule: 38 | interval: "weekly" 39 | 40 | - package-ecosystem: "docker" 41 | directory: "/package" 42 | labels: 43 | - "kind/dependabot" 44 | reviewers: 45 | - "k3s-io/k3s-dev" 46 | schedule: 47 | interval: "weekly" 48 | 49 | - package-ecosystem: "docker" 50 | directory: "/tests/integration" 51 | labels: 52 | - "kind/dependabot" 53 | reviewers: 54 | - "k3s-io/k3s-dev" 55 | schedule: 56 | interval: "weekly" 57 | 58 | - package-ecosystem: "docker" 59 | directory: "/tests/terraform" 60 | labels: 61 | - "kind/dependabot" 62 | reviewers: 63 | - "k3s-io/k3s-dev" 64 | schedule: 65 | interval: "weekly" 66 | -------------------------------------------------------------------------------- /.github/workflows/epic.yaml: -------------------------------------------------------------------------------- 1 | name: Update epics 2 | on: 3 | issues: 4 | types: [opened, closed, reopened] 5 | 6 | permissions: 7 | contents: read 8 | 9 | jobs: 10 | epics: 11 | runs-on: ubuntu-latest 12 | name: Update epic issues 13 | permissions: 14 | issues: write 15 | steps: 16 | - name: Run epics action 17 | uses: cloudaper/epics-action@v1 18 | with: 19 | github-token: ${{ secrets.GITHUB_TOKEN }} 20 | epic-label-name: epic 21 | auto-close-epic: false -------------------------------------------------------------------------------- /.github/workflows/govulncheck.yml: -------------------------------------------------------------------------------- 1 | name: govulncheck 2 | on: 3 | push: 4 | paths: 5 | - go.sum 6 | schedule: 7 | - cron: "0 0 * * *" 8 | workflow_dispatch: {} 9 | 10 | permissions: read-all 11 | 12 | jobs: 13 | govulncheck: 14 | name: govulncheck 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | - name: Install Go 20 | uses: ./.github/actions/setup-go 21 | - name: Go Generate 22 | run: | 23 | ./scripts/download 24 | ./scripts/generate 25 | - name: Install govulncheck 26 | run: go install golang.org/x/vuln/cmd/govulncheck@latest 27 | - name: Run govulncheck 28 | run: govulncheck -format=text ./... 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | /.dapper 3 | /.tags 4 | /.idea 5 | /.trash-cache 6 | .vagrant/ 7 | /.kube 8 | /.cache 9 | /.docker 10 | /.*_history 11 | /.viminfo 12 | /.lesshst 13 | /*.log 14 | /bin 15 | /etc 16 | /build 17 | /data-dir 18 | /dist 19 | /image/root 20 | /image/agent 21 | /image/go_build_agent 22 | /image/main.squashfs 23 | /package/k3s 24 | /package/data-* 25 | /pkg/data/zz_generated_bindata.go 26 | __pycache__ 27 | /tests/.pytest_cache/ 28 | /tests/.tox/ 29 | /tests/.vscode 30 | /sonobuoy-output 31 | *.tmp 32 | config/local.tfvars 33 | *.terraform 34 | *.tfstate 35 | .terraform.lock.hcl 36 | .DS_Store 37 | -------------------------------------------------------------------------------- /.golangci.json: -------------------------------------------------------------------------------- 1 | { 2 | "linters": { 3 | "disable-all": true, 4 | "enable": [ 5 | "govet", 6 | "revive", 7 | "goimports", 8 | "misspell", 9 | "gofmt" 10 | ] 11 | }, 12 | "run": { 13 | "deadline": "5m" 14 | }, 15 | "issues": { 16 | "exclude-dirs": [ 17 | "build", 18 | "contrib", 19 | "manifests", 20 | "package", 21 | "scripts", 22 | "vendor" 23 | ], 24 | "exclude-files": [ 25 | "/zz_generated_" 26 | ], 27 | "exclude-rules": [ 28 | { 29 | "linters": "typecheck", 30 | "text": "imported but not used" 31 | }, 32 | { 33 | "linters": "typecheck", 34 | "text": "build constraints exclude all Go files" 35 | }, 36 | { 37 | "linters": "revive", 38 | "text": "should have comment" 39 | }, 40 | { 41 | "linters": "revive", 42 | "text": "exported" 43 | } 44 | ] 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /ADOPTERS.md: -------------------------------------------------------------------------------- 1 | ## k3s Adopters 2 | 3 | A non-exhaustive list of k3s adopters is provided below. To add your company to this list, please open a [PR](https://github.com/k3s-io/k3s/pulls). 4 | 5 | - [Rocket.Chat](https://rocket.chat) 6 | - [Ayedo.de](https://ayedo.de/) 7 | - [PITS Global Data Recovery Services](https://www.pitsdatarecovery.net/) 8 | - [External Secrets Inc.](https://externalsecrets.com) 9 | - [Uffizzi](https://www.uffizzi.com/) 10 | 11 | **_Other Projects_** - While the above list provides a number of official adopters, k3s' compact and simple nature provides a clean base for other projects to build off of, or to embed. Some such projects are listed below: 12 | - SUSE's RKE2 (or RKE Government) [RKE2](github.com/rancher/rke2/) 13 | - [k3ai](https://k3ai.github.io/) 14 | - SUSE's [Rancher Desktop](https://rancherdesktop.io/) 15 | - [Kairos](https://kairos.io) 16 | - [Getdeck Beiboot](https://github.com/Getdeck/beiboot) 17 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @k3s-io/k3s-dev 2 | 3 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Code of Conduct 2 | k3s observes the [CNCF Community Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). 3 | -------------------------------------------------------------------------------- /DCO: -------------------------------------------------------------------------------- 1 | Developer Certificate of Origin 2 | Version 1.1 3 | 4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 5 | 1 Letterman Drive 6 | Suite D4700 7 | San Francisco, CA, 94129 8 | 9 | Everyone is permitted to copy and distribute verbatim copies of this 10 | license document, but changing it is not allowed. 11 | 12 | 13 | Developer's Certificate of Origin 1.1 14 | 15 | By making a contribution to this project, I certify that: 16 | 17 | (a) The contribution was created in whole or in part by me and I 18 | have the right to submit it under the open source license 19 | indicated in the file; or 20 | 21 | (b) The contribution is based upon previous work that, to the best 22 | of my knowledge, is covered under an appropriate open source 23 | license and I have the right under that license to submit that 24 | work with modifications, whether created in whole or in part 25 | by me, under the same open source license (unless I am 26 | permitted to submit under a different license), as indicated 27 | in the file; or 28 | 29 | (c) The contribution was provided directly to me by some other 30 | person who certified (a), (b) or (c) and I have not modified 31 | it. 32 | 33 | (d) I understand and agree that this project and the contribution 34 | are public and that a record of the contribution (including all 35 | personal information I submit with it, including my sign-off) is 36 | maintained indefinitely and may be redistributed consistent with 37 | this project or the open source license(s) involved. 38 | 39 | -------------------------------------------------------------------------------- /Dockerfile.manifest: -------------------------------------------------------------------------------- 1 | ARG GOLANG=golang:1.24.2-alpine3.21 2 | FROM ${GOLANG} 3 | 4 | COPY --from=plugins/manifest:1.2.3 /bin/* /bin/ 5 | 6 | RUN apk -U --no-cache add bash 7 | 8 | ARG DOCKER_USERNAME 9 | ENV DOCKER_USERNAME $DOCKER_USERNAME 10 | 11 | ARG DOCKER_PASSWORD 12 | ENV DOCKER_PASSWORD $DOCKER_PASSWORD 13 | 14 | ARG DRONE_TAG 15 | ENV DRONE_TAG $DRONE_TAG 16 | 17 | COPY ./scripts/manifest /bin/ 18 | 19 | RUN manifest 20 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | # The following is the list of current K3s maintainers 2 | # Github ID, Name, Email Address 3 | 4 | brandond, Brad Davidson, brad.davidson@suse.com 5 | briandowns, Brian Downs, brian.downs@suse.com 6 | brooksn, Brooks Newberry, brooks.newberry@suse.com 7 | caroline-suse-rancher, Caroline Davis, caroline.davis@suse.com 8 | cwayne18, Chris Wayne, chris.wayne@suse.com 9 | dereknola, Derek Nola, derek.nola@suse.com 10 | galal-hussein, Hussein Galal, hussein.galalabdelazizahmed@suse.com 11 | manuelbuil, Manuel Buil, mbuil@suse.com 12 | matttrach, Matt Trachier, matt.trachier@suse.com 13 | mdrahman-suse, MD Rahman, md.rahman@suse.com 14 | Oats87, Chris Kim, chris.kim@suse.com 15 | rancher-max, Max Ross, max.ross@suse.com 16 | rbrtbnfgl, Roberto Bonafiglia, roberto.bonafiglia@suse.com 17 | ShylajaDevadiga, Shylaja Devadiga, shylaja.devadiga@suse.com 18 | thomasferrandiz, Thomas Ferrandiz, thomas.ferrandiz@suse.com 19 | VestigeJ, Justin Janes, justin.janes@suse.com 20 | 21 | # Community Management 22 | OrlinVasilev, Orlin Vasilev, orlin.vasilev@suse.com 23 | robertsirc, Robert Sirchia, robert.sirchia@suse.com 24 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | Roadmap 2 | --- 3 | This represents the larger, bigger impact features and enhancements we have planned for K3s. Features are planned, but do not represent a commitment to develop and can change at any time. There are many more tactical enhancements and fixes that can be found by reviewing our [GitHub milestones](https://github.com/k3s-io/k3s/milestones). 4 | 5 | Next 6 | --- 7 | - Embedded registry support 8 | - Windows OS Support 9 | 10 | Later 11 | --- 12 | - Multus CNI support 13 | - Align K3s with upstream Kubernetes by removing patches or upstreaming them 14 | - FIPS-140-2 Compliant binaries 15 | - Graduate encrypted networking support from experimental to GA 16 | - Graduate network policy support from experimental to GA 17 | 18 | -------------------------------------------------------------------------------- /cmd/agent/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | 8 | "github.com/k3s-io/k3s/pkg/cli/agent" 9 | "github.com/k3s-io/k3s/pkg/cli/cmds" 10 | "github.com/k3s-io/k3s/pkg/configfilearg" 11 | "github.com/sirupsen/logrus" 12 | "github.com/urfave/cli/v2" 13 | ) 14 | 15 | func main() { 16 | app := cmds.NewApp() 17 | app.DisableSliceFlagSeparator = true 18 | app.Commands = []*cli.Command{ 19 | cmds.NewAgentCommand(agent.Run), 20 | } 21 | 22 | if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) { 23 | logrus.Fatalf("Error: %v", err) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /cmd/cert/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | 8 | "github.com/k3s-io/k3s/pkg/cli/cert" 9 | "github.com/k3s-io/k3s/pkg/cli/cmds" 10 | "github.com/k3s-io/k3s/pkg/configfilearg" 11 | "github.com/sirupsen/logrus" 12 | "github.com/urfave/cli/v2" 13 | ) 14 | 15 | func main() { 16 | app := cmds.NewApp() 17 | app.Commands = []*cli.Command{ 18 | cmds.NewCertCommands( 19 | cert.Check, 20 | cert.Rotate, 21 | cert.RotateCA, 22 | ), 23 | } 24 | 25 | if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) { 26 | logrus.Fatal(err) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /cmd/completion/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | 8 | "github.com/k3s-io/k3s/pkg/cli/cmds" 9 | "github.com/k3s-io/k3s/pkg/cli/completion" 10 | "github.com/sirupsen/logrus" 11 | "github.com/urfave/cli/v2" 12 | ) 13 | 14 | func main() { 15 | app := cmds.NewApp() 16 | app.Commands = []*cli.Command{ 17 | cmds.NewCompletionCommand(completion.Run), 18 | } 19 | 20 | if err := app.Run(os.Args); err != nil && !errors.Is(err, context.Canceled) { 21 | logrus.Fatal(err) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /cmd/containerd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/k3s-io/k3s/pkg/containerd" 5 | "k8s.io/klog/v2" 6 | ) 7 | 8 | func main() { 9 | klog.InitFlags(nil) 10 | containerd.Main() 11 | } 12 | -------------------------------------------------------------------------------- /cmd/ctr/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/k3s-io/k3s/pkg/ctr" 4 | 5 | func main() { 6 | ctr.Main() 7 | } 8 | -------------------------------------------------------------------------------- /cmd/encrypt/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | 8 | "github.com/k3s-io/k3s/pkg/cli/cmds" 9 | "github.com/k3s-io/k3s/pkg/cli/secretsencrypt" 10 | "github.com/k3s-io/k3s/pkg/configfilearg" 11 | "github.com/sirupsen/logrus" 12 | "github.com/urfave/cli/v2" 13 | ) 14 | 15 | func main() { 16 | app := cmds.NewApp() 17 | app.Commands = []*cli.Command{ 18 | cmds.NewSecretsEncryptCommands( 19 | secretsencrypt.Status, 20 | secretsencrypt.Enable, 21 | secretsencrypt.Disable, 22 | secretsencrypt.Prepare, 23 | secretsencrypt.Rotate, 24 | secretsencrypt.Reencrypt, 25 | secretsencrypt.RotateKeys, 26 | ), 27 | } 28 | 29 | if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) { 30 | logrus.Fatal(err) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /cmd/etcdsnapshot/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | 8 | "github.com/k3s-io/k3s/pkg/cli/cmds" 9 | "github.com/k3s-io/k3s/pkg/cli/etcdsnapshot" 10 | "github.com/k3s-io/k3s/pkg/configfilearg" 11 | "github.com/sirupsen/logrus" 12 | "github.com/urfave/cli/v2" 13 | ) 14 | 15 | func main() { 16 | app := cmds.NewApp() 17 | app.Commands = []*cli.Command{ 18 | cmds.NewEtcdSnapshotCommands( 19 | etcdsnapshot.Delete, 20 | etcdsnapshot.List, 21 | etcdsnapshot.Prune, 22 | etcdsnapshot.Save, 23 | ), 24 | } 25 | 26 | if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) { 27 | logrus.Fatal(err) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /cmd/k3s/main_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package main 5 | 6 | import ( 7 | "os" 8 | "syscall" 9 | 10 | pkgerrors "github.com/pkg/errors" 11 | ) 12 | 13 | const programPostfix = "" 14 | 15 | func runExec(cmd string, args []string, calledAsInternal bool) (err error) { 16 | if err := syscall.Exec(cmd, args, os.Environ()); err != nil { 17 | return pkgerrors.WithMessagef(err, "exec %s failed", cmd) 18 | } 19 | return nil 20 | } 21 | -------------------------------------------------------------------------------- /cmd/k3s/main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "testing" 4 | 5 | func Test_UnitFindPreferBundledBin(t *testing.T) { 6 | tests := []struct { 7 | name string 8 | args []string 9 | want bool 10 | }{ 11 | { 12 | name: "Single argument", 13 | args: []string{"--prefer-bundled-bin"}, 14 | want: true, 15 | }, 16 | { 17 | name: "no argument", 18 | args: []string{""}, 19 | want: false, 20 | }, 21 | { 22 | name: "Argument with equal true", 23 | args: []string{"--prefer-bundled-bin=true"}, 24 | want: true, 25 | }, 26 | { 27 | name: "Argument with equal false", 28 | args: []string{"--prefer-bundled-bin=false"}, 29 | want: false, 30 | }, 31 | { 32 | name: "Argument with equal 1", 33 | args: []string{"--prefer-bundled-bin=1"}, 34 | want: true, 35 | }, 36 | { 37 | name: "Argument with equal 0", 38 | args: []string{"--prefer-bundled-bin=0"}, 39 | want: false, 40 | }, 41 | { 42 | name: "Multiple arguments", 43 | args: []string{"--abcd", "--prefer-bundled-bin", "--efgh"}, 44 | want: true, 45 | }, 46 | { 47 | name: "Repeated arguments", 48 | args: []string{"--abcd", "--prefer-bundled-bin=false", "--prefer-bundled-bin"}, 49 | want: true, 50 | }, 51 | } 52 | for _, tt := range tests { 53 | t.Run(tt.name, func(t *testing.T) { 54 | if got := findPreferBundledBin(tt.args); got != tt.want { 55 | t.Errorf("findPreferBundledBin() = %+v\nWant = %+v", got, tt.want) 56 | } 57 | }) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /cmd/k3s/main_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package main 5 | 6 | import ( 7 | "os" 8 | "os/exec" 9 | ) 10 | 11 | const programPostfix = ".exe" 12 | 13 | func runExec(cmd string, args []string, calledAsInternal bool) (err error) { 14 | // syscall.Exec: not supported by windows 15 | if calledAsInternal { 16 | args = args[1:] 17 | } 18 | cmdObj := exec.Command(cmd, args...) 19 | cmdObj.Stdout = os.Stdout 20 | cmdObj.Stderr = os.Stderr 21 | cmdObj.Stdin = os.Stdin 22 | cmdObj.Env = os.Environ() 23 | return cmdObj.Run() 24 | } 25 | -------------------------------------------------------------------------------- /cmd/kubectl/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/k3s-io/k3s/pkg/kubectl" 4 | 5 | func main() { 6 | kubectl.Main() 7 | } 8 | -------------------------------------------------------------------------------- /cmd/token/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "os" 7 | 8 | "github.com/k3s-io/k3s/pkg/cli/cmds" 9 | "github.com/k3s-io/k3s/pkg/cli/token" 10 | "github.com/k3s-io/k3s/pkg/configfilearg" 11 | "github.com/sirupsen/logrus" 12 | "github.com/urfave/cli/v2" 13 | ) 14 | 15 | func main() { 16 | app := cmds.NewApp() 17 | app.Commands = []*cli.Command{ 18 | cmds.NewTokenCommands( 19 | token.Create, 20 | token.Delete, 21 | token.Generate, 22 | token.List, 23 | token.Rotate, 24 | ), 25 | } 26 | 27 | if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) { 28 | logrus.Fatal(err) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /conformance/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.21 2 | ENV SONOBUOY_VERSION 0.57.2 3 | RUN apk add curl tar gzip 4 | RUN curl -sfL https://github.com/vmware-tanzu/sonobuoy/releases/download/v${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION}_linux_amd64.tar.gz | tar xvzf - -C /usr/bin 5 | COPY run-test.sh /usr/bin 6 | CMD ["/usr/bin/run-test.sh"] 7 | -------------------------------------------------------------------------------- /conformance/run-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | 4 | while [ ! -e /etc/rancher/k3s/k3s.yaml ]; do 5 | echo waiting for config 6 | sleep 1 7 | done 8 | 9 | mkdir -p /root/.kube 10 | sed 's/localhost/server/g' /etc/rancher/k3s/k3s.yaml > /root/.kube/config 11 | export KUBECONFIG=/root/.kube/config 12 | cat /etc/rancher/k3s/k3s.yaml 13 | cat $KUBECONFIG 14 | sonobuoy run --sonobuoy-image=rancher/mirrored-sonobuoy-sonobuoy:v0.56.14 15 | sleep 15 16 | sonobuoy logs -f 17 | -------------------------------------------------------------------------------- /contrib/ansible/README.md: -------------------------------------------------------------------------------- 1 | # Build a Kubernetes cluster using k3s via Ansible 2 | 3 | The ansible playbook was moved to https://github.com/k3s-io/k3s-ansible 4 | -------------------------------------------------------------------------------- /contrib/gotests_templates/call.tmpl: -------------------------------------------------------------------------------- 1 | {{define "call"}}{{with .Receiver}}{{if not .IsStruct}}tt.{{end}}{{Receiver .}}.{{end}}{{.Name}}({{range $i, $el := .Parameters}}{{if $i}}, {{end}}{{if not .IsWriter}}tt.args.{{end}}{{Param .}}{{if .Type.IsVariadic}}...{{end}}{{end}}){{end}} 2 | -------------------------------------------------------------------------------- /contrib/gotests_templates/header.tmpl: -------------------------------------------------------------------------------- 1 | {{define "header"}} 2 | {{range .Comments}}{{.}} 3 | {{end -}} 4 | package {{.Package}} 5 | 6 | import ( 7 | {{range .Imports}}{{.Name}} {{.Path}} 8 | {{end}} 9 | ) 10 | {{end}} 11 | -------------------------------------------------------------------------------- /contrib/gotests_templates/inline.tmpl: -------------------------------------------------------------------------------- 1 | {{define "inline"}} {{template "call" .}} {{end}} 2 | -------------------------------------------------------------------------------- /contrib/gotests_templates/inputs.tmpl: -------------------------------------------------------------------------------- 1 | {{define "inputs"}}{{$f := .}}{{if not .Subtests}}tt.name, {{end}}{{if $f.PrintInputs}}{{range $f.Parameters}}tt.args.{{Param .}}, {{end}}{{end}}{{end}} 2 | -------------------------------------------------------------------------------- /contrib/gotests_templates/message.tmpl: -------------------------------------------------------------------------------- 1 | {{define "message" -}} 2 | {{if not .Subtests}}%q. {{end}}{{with .Receiver}}{{.Type.Value}}.{{end}}{{.Name}}({{if .PrintInputs}}{{range $i, $el := .Parameters}}{{if $i}}, {{end}}%v{{end}}{{end}}) 3 | {{- end}} 4 | -------------------------------------------------------------------------------- /contrib/gotests_templates/results.tmpl: -------------------------------------------------------------------------------- 1 | {{define "results"}} {{range $i, $el := .Results}}{{if $i}}, {{end}}{{Got .}}{{end}}{{if .ReturnsError}}, err{{end}} {{if or .Results .ReturnsError}} := {{end}} {{end}} 2 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # to run define K3S_TOKEN, K3S_VERSION is optional, eg: 2 | # K3S_TOKEN=${RANDOM}${RANDOM}${RANDOM} docker-compose up 3 | 4 | services: 5 | 6 | server: 7 | image: "rancher/k3s:${K3S_VERSION:-latest}" 8 | command: server 9 | tmpfs: 10 | - /run 11 | - /var/run 12 | ulimits: 13 | nproc: 65535 14 | nofile: 15 | soft: 65535 16 | hard: 65535 17 | privileged: true 18 | restart: always 19 | environment: 20 | - K3S_TOKEN=${K3S_TOKEN:?err} 21 | - K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml 22 | - K3S_KUBECONFIG_MODE=666 23 | volumes: 24 | - k3s-server:/var/lib/rancher/k3s 25 | # This is just so that we get the kubeconfig file out 26 | - .:/output 27 | ports: 28 | - 6443:6443 # Kubernetes API Server 29 | - 80:80 # Ingress controller port 80 30 | - 443:443 # Ingress controller port 443 31 | 32 | agent: 33 | image: "rancher/k3s:${K3S_VERSION:-latest}" 34 | tmpfs: 35 | - /run 36 | - /var/run 37 | ulimits: 38 | nproc: 65535 39 | nofile: 40 | soft: 65535 41 | hard: 65535 42 | privileged: true 43 | restart: always 44 | environment: 45 | - K3S_URL=https://server:6443 46 | - K3S_TOKEN=${K3S_TOKEN:?err} 47 | volumes: 48 | - k3s-agent:/var/lib/rancher/k3s 49 | 50 | volumes: 51 | k3s-server: {} 52 | k3s-agent: {} 53 | -------------------------------------------------------------------------------- /docs/adrs/core-controller-user.md: -------------------------------------------------------------------------------- 1 | # Use a dedicated user for K3s core controllers 2 | 3 | Date: 2023-05-26 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | Users who collect audit logs from K3s currently have a hard time determining if an action was performed by an administrator, or by the K3s supervisor. 12 | This is due to the K3s supervisor using the same `system:admin` user for both the admin kubeconfig, and the kubeconfig used by core Wrangler controllers that drive core functionality and the deploy/helm controllers. 13 | 14 | Users may have policies in place that prohibit use of the `system:admin` account, or that require service accounts to be distinct from user accounts. 15 | 16 | ## Decision 17 | 18 | * We will add a new kubeconfig for the K3s supervisor controllers: core functionality, deploy (AddOns; aka the manifests directory), and helm (HelmChart/HelmChartConfig). 19 | * Each of the three controllers will use a dedicated user-agent to further assist in discriminating between events, via both audit logs and resource ManageFields tracking. 20 | * The new user account will use existing core Kubernetes group RBAC. 21 | 22 | ## Consequences 23 | 24 | * K3s servers will create and manage an additional kubeconfig, client cert, and key that is intended only for use by the supervisor controllers. 25 | * K3s supervisor controllers will use distinct user-agents to further discriminate between which component initiated the request. 26 | -------------------------------------------------------------------------------- /docs/adrs/gh-branch-strategy.md: -------------------------------------------------------------------------------- 1 | # Branching Strategy in Github 2 | 3 | Proposal Date: 2024-05-23 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | K3s is released at the same cadence as upstream Kubernetes. This requires management of multiple versions at any given point in time. The current branching strategy uses `release-v[MAJOR].[MINOR]`, with the `master` branch corresponding to the highest version released based on [semver](https://semver.org/). Github's Tags are then used to cut releases, which are just point-in-time snapshots of the specified branch at a given point. As there is the potential for bugs and regressions to be on present on any given branch, this branching and release strategy requires a code freeze to QA the branch without new potentially breaking changes going in. 12 | 13 | ## Decision 14 | All code changes go into the `master` branch. We maintain branches for all current release versions in the format `release-v[MAJOR].[MINOR]`. When changes made in master are necessary in a release, they should be backported directly into the release branches. If ever there are changes required only in the release branches and not in master, such as when bumping the kubernetes version from upstream, those can be made directly into the release branches themselves. 15 | 16 | ## Consequences 17 | 18 | - Allows for constant development, with code freeze only relevant for the release branches. 19 | - This requires maintaining one additional branch than the current workflow, which also means one additional issue. 20 | - Testing would be more constant from the master branch. 21 | - Minor release captain will have to cut the new branch as soon as they bring in that new minor version. 22 | -------------------------------------------------------------------------------- /docs/adrs/k3s-charts.md: -------------------------------------------------------------------------------- 1 | # Stage Helm charts through k3s-charts 2 | 3 | Date: 2022-11-17 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | The upstream Traefik chart repo has seen significant changes over the last month. Upstream has changed their repo structure, and actively removed content from deprecated locations, 12 | at least twice. In both cases, this immediately broke K3s CI, requiring changes to our build scripts in order to restore the ability to build, test, and package K3s. 13 | 14 | The K3s chart build process also makes several changes to the upstream chart to add values and break out the CRDs, using an ad-hoc set of scripts that are difficult to maintain. 15 | There are better tools available to perform this same task, if we did so in a dedicated repo. 16 | 17 | ## Decision 18 | 19 | We will make use of the [charts-build-scripts](https://github.com/rancher/charts-build-scripts) tool to customize the upstream chart and stage it through a stable intermediate repo. 20 | 21 | ## Consequences 22 | 23 | When updating Helm charts distributed with K3s, additional pull requests will be necessary to stage new versions into the k3s-io/k3s-charts repo, before updating the chart version in K3s. 24 | -------------------------------------------------------------------------------- /docs/adrs/record-architecture-decisions.md: -------------------------------------------------------------------------------- 1 | # Record architecture decisions 2 | 3 | Date: 2021-12-09 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | We need to record the architectural decisions made on this project. 12 | 13 | ## Decision 14 | 15 | We will use Architecture Decision Records, as [described by Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions). 16 | 17 | ## Consequences 18 | 19 | See Michael Nygard's article, linked above. For a lightweight ADR toolset, see Nat Pryce's [adr-tools](https://github.com/npryce/adr-tools). -------------------------------------------------------------------------------- /docs/adrs/status-for-etcd-node.md: -------------------------------------------------------------------------------- 1 | # A way for seeing the status of the etcd node 2 | 3 | Date: 2023-09-14 4 | 5 | ## Status 6 | 7 | Decided 8 | 9 | ## Context 10 | 11 | It is difficult for a user to see if the etcd status has changed from learner to voter. 12 | As a result, there is a need for a controller or condition to make it easier for the user to view the status of their etcd node and how it is running. 13 | 14 | One issue with not having this controller or condition is that when a cluster is provisioned with scaling, 15 | it's quite possible for the cluster to break due to quickly adding or removing a node for any reason. 16 | 17 | With this feature, the user will be able to have a better understanding of the etcd status for each node, thus avoiding problems when provisioning clusters. 18 | 19 | ## Decision 20 | 21 | We decided to add a status flag on our etcd controller. 22 | 23 | ## Consequences 24 | 25 | Good: 26 | - Better view of the etcd status -------------------------------------------------------------------------------- /docs/release/expanded/build_container.md: -------------------------------------------------------------------------------- 1 | # Generate Build Container 2 | 3 | 1. set env variable PATH_TO_KUBERNETES_REPO to the path to your local kubernetes/kubernetes copy: 4 | ``` 5 | export GHUSER="mtrachier" 6 | export PATH_TO_KUBERNETES_REPO="/Users/$GHUSER/go/src/github.com/kubernetes/kubernetes" 7 | ``` 8 | 2. set env variable GOVERSION to the expected version of go for the kubernetes/kubernetes version checked out: 9 | ``` 10 | export GOVERSION=$(yq -e '.dependencies[] | select(.name == "golang: upstream version").version' $PATH_TO_KUBERNETES_REPO/build/dependencies.yaml) 11 | ``` 12 | 3. set env variable GOIMAGE to the expected container image to base our custom build image on: 13 | ``` 14 | export GOIMAGE="golang:${GOVERSION}-alpine" 15 | ``` 16 | 4. set env variable BUILD_CONTAINER to the contents of a dockerfile for the build container: 17 | ``` 18 | export BUILD_CONTAINER="FROM ${GOIMAGE}\nRUN apk add --no-cache bash gnupg git make tar gzip curl git coreutils rsync alpine-sdk" 19 | ``` 20 | 5. use Docker to create the build container: 21 | ``` 22 | echo -e $BUILD_CONTAINER | docker build -t ${GOIMAGE}-dev - 23 | ``` 24 | -------------------------------------------------------------------------------- /docs/release/expanded/channel_server.md: -------------------------------------------------------------------------------- 1 | # Update Channel Server 2 | 3 | Once the release is verified, the channel server config needs to be updated to reflect the new version for “stable”.   4 | 5 | 1. Channel.yaml can be found at the [root of the K3s repo.](https://github.com/k3s-io/k3s/blob/master/channel.yaml) 6 | 1. When updating the channel server a single-line change will need to be performed.   7 | 1. Release Captains responsible for this change will need to update the following stanza to reflect the new stable version of kubernetes relative to the release in progress.   8 | 1. Example: 9 | ``` 10 | channels: 11 | name: stable 12 | latest: v1.22.12+k3s1 13 | ``` 14 | -------------------------------------------------------------------------------- /docs/release/expanded/milestones.md: -------------------------------------------------------------------------------- 1 | # Generate Milestones 2 | 3 | If no milestones exist in the k3s repo for the releases, generate them. 4 | No due date or description necessary, we can update them as necessary afterwards. 5 | Make sure to post the new milestones in the SLACK channel if generated. 6 | -------------------------------------------------------------------------------- /docs/release/expanded/rebase.md: -------------------------------------------------------------------------------- 1 | # Rebase 2 | 3 | 1. clear out any cached or old files: `git add -A; git reset --hard HEAD` 4 | 1. clear out any cached or older outputs: `rm -rf _output` 5 | 1. rebase your local copy to move the old k3s tag from the old k8s tag to the new k8s tag 6 | 1. so there are three copies of the code involved in this process: 7 | 1. the upstream kubernetes/kubernets copy on GitHub 8 | 1. the k3s-io/kubernetes copy on GitHub 9 | 1. and the local copy on your laptop which is a merge of those 10 | 1. the local copy has every branch and every tag from the remotes you have added 11 | 1. there are custom/proprietary commits in the k3s-io copy that are not in the kubernetes copy 12 | 1. there are commits in the kubernetes copy do not exist in the k3s-io copy 13 | 1. we want the new commits added to the kubernetes copy to be in the k3s-io copy 14 | 1. we want the custom/proprietary commits from the k3s-io copy on top of the new kubernetes commits 15 | 1. before rebase our local copy has all of the commits, but the custom/proprietary k3s-io commits are between the old kubernetes version and the new kubernetes version 16 | 1. after the rebase our local copy will have the k3s-io custom/proprietary commits after the latest kubernetes commits 17 | 1. `git rebase --onto $NEW_K8S $OLD_K8S $OLD_K3S_VER~1` 18 | 1. After rebase you will be in a detached head state, this is normal -------------------------------------------------------------------------------- /docs/release/expanded/setup_env.md: -------------------------------------------------------------------------------- 1 | # Setup Go Environment 2 | 3 | These steps are expected for using the scripts and ecm_distro tools for release. 4 | Some of these steps are for properly setting up Go on your machine, some for Docker, and Git. 5 | 6 | ## Git 7 | 8 | 1. install Git (using any method that makes sense 9 | 1. Configure Git for working with GitHub (add your ssh key, etc) 10 | 11 | ## Go 12 | 13 | 1. install Go from binary 14 | 1. set up default Go file structure 15 | 1. create $HOME/go/src/github.com/ 16 | 1. create $HOME/go/src/github.com/k3s-io 17 | 1. create $HOME/go/src/github.com/rancher 18 | 1. create $HOME/go/src/github.com/rancherlabs 19 | 1. create $HOME/go/src/github.com/kubernetes 20 | 1. set GOPATH=$HOME/go 21 | 22 | ## Docker 23 | 24 | 1. install Docker (or Docker desktop) using whatever method makes sense 25 | -------------------------------------------------------------------------------- /docs/release/expanded/setup_k3s_repos.md: -------------------------------------------------------------------------------- 1 | # Set Up K3S Repos 2 | 3 | 1. make sure the $HOME/go/src/github.com/k3s-io directory exists 4 | 1. clear out (remove) k3s repo if is already there (just makes things smoother with a new clone) 5 | 1. clone k3s-io/k3s repo into that directory as "upstream" 6 | 1. fork that repo so that you have a private fork of it 7 | 1. if you already have a fork, sync it 8 | 1. add your fork repo as "origin" 9 | 1. fetch all objects from both repos into your local copy 10 | 1. it is important to follow these steps because Go is very particular about the file structure (it uses the file structure to infer the urls it will pull dependencies from) 11 | 1. this is why it is important that the repo is in the github.com/k3s-io directory, and that the repo's directory is "k3s" matching the upstream copy's name 12 | `$HOME/go/src/github.com/k3s-io/k3s` -------------------------------------------------------------------------------- /docs/release/expanded/setup_k8s_repos.md: -------------------------------------------------------------------------------- 1 | # Set Up Kubernetes Repos 2 | 3 | 1. make sure the $HOME/go/src/github.com/kubernetes directory exists 4 | 1. clear out (remove) kubernetes repo if is already there (just makes things smoother with a new clone) 5 | 1. clone kubernetes/kubernetes repo into that directory as "upstream" 6 | 1. add k3s-io/kubernetes repo as "k3s-io" 7 | 1. fetch all objects from both repos into your local copy 8 | 1. it is important to follow these steps because Go is very particular about the file structure (it uses the file structure to infer the urls it will pull dependencies from) 9 | 1. this is why it is important that the repo is in the github.com/kubernetes directory, and that the repo's directory is "kubernetes" matching the upstream copy's name `$HOME/go/src/github.com/kubernetes/kubernetes` 10 | -------------------------------------------------------------------------------- /hack/crdgen.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | k3scrd "github.com/k3s-io/k3s/pkg/crd" 7 | _ "github.com/k3s-io/api/pkg/generated/controllers/k3s.cattle.io/v1" 8 | "github.com/rancher/wrangler/v3/pkg/crd" 9 | ) 10 | 11 | func main() { 12 | crd.Print(os.Stdout, k3scrd.List()) 13 | } 14 | -------------------------------------------------------------------------------- /install.sh.sha256sum: -------------------------------------------------------------------------------- 1 | 9ca7930c31179d83bc13de20078fd8ad3e1ee00875b31f39a7e524ca4ef7d9de install.sh 2 | -------------------------------------------------------------------------------- /k3s.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Lightweight Kubernetes 3 | Documentation=https://k3s.io 4 | After=network-online.target 5 | Wants=network-online.target 6 | 7 | [Service] 8 | Type=notify 9 | EnvironmentFile=-/etc/default/%N 10 | EnvironmentFile=-/etc/sysconfig/%N 11 | EnvironmentFile=-/etc/systemd/system/k3s.service.env 12 | ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service 2>/dev/null' 13 | ExecStart=/usr/local/bin/k3s server 14 | KillMode=process 15 | Delegate=yes 16 | # Having non-zero Limit*s causes performance problems due to accounting overhead 17 | # in the kernel. We recommend using cgroups to do container-local accounting. 18 | LimitNOFILE=1048576 19 | LimitNPROC=infinity 20 | LimitCORE=infinity 21 | TasksMax=infinity 22 | TimeoutStartSec=0 23 | Restart=always 24 | RestartSec=5s 25 | 26 | [Install] 27 | WantedBy=multi-user.target 28 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | //go:generate go run pkg/codegen/main.go 2 | //go:generate go fmt pkg/deploy/zz_generated_bindata.go 3 | //go:generate go fmt pkg/static/zz_generated_bindata.go 4 | 5 | package main 6 | 7 | import ( 8 | "context" 9 | "errors" 10 | "os" 11 | 12 | "github.com/k3s-io/k3s/pkg/cli/agent" 13 | "github.com/k3s-io/k3s/pkg/cli/cert" 14 | "github.com/k3s-io/k3s/pkg/cli/cmds" 15 | "github.com/k3s-io/k3s/pkg/cli/completion" 16 | "github.com/k3s-io/k3s/pkg/cli/crictl" 17 | "github.com/k3s-io/k3s/pkg/cli/etcdsnapshot" 18 | "github.com/k3s-io/k3s/pkg/cli/kubectl" 19 | "github.com/k3s-io/k3s/pkg/cli/secretsencrypt" 20 | "github.com/k3s-io/k3s/pkg/cli/server" 21 | "github.com/k3s-io/k3s/pkg/configfilearg" 22 | "github.com/sirupsen/logrus" 23 | "github.com/urfave/cli/v2" 24 | ) 25 | 26 | func main() { 27 | app := cmds.NewApp() 28 | app.DisableSliceFlagSeparator = true 29 | app.Commands = []*cli.Command{ 30 | cmds.NewServerCommand(server.Run), 31 | cmds.NewAgentCommand(agent.Run), 32 | cmds.NewKubectlCommand(kubectl.Run), 33 | cmds.NewCRICTL(crictl.Run), 34 | cmds.NewEtcdSnapshotCommands( 35 | etcdsnapshot.Delete, 36 | etcdsnapshot.List, 37 | etcdsnapshot.Prune, 38 | etcdsnapshot.Save, 39 | ), 40 | cmds.NewSecretsEncryptCommands( 41 | secretsencrypt.Status, 42 | secretsencrypt.Enable, 43 | secretsencrypt.Disable, 44 | secretsencrypt.Prepare, 45 | secretsencrypt.Rotate, 46 | secretsencrypt.Reencrypt, 47 | secretsencrypt.RotateKeys, 48 | ), 49 | cmds.NewCertCommands( 50 | cert.Check, 51 | cert.Rotate, 52 | cert.RotateCA, 53 | ), 54 | cmds.NewCompletionCommand(completion.Run), 55 | } 56 | 57 | if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) { 58 | logrus.Fatal(err) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /manifests/metrics-server/aggregated-metrics-reader.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: system:aggregated-metrics-reader 5 | labels: 6 | rbac.authorization.k8s.io/aggregate-to-view: "true" 7 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 8 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 9 | rules: 10 | - apiGroups: ["metrics.k8s.io"] 11 | resources: ["pods", "nodes"] 12 | verbs: ["get", "list", "watch"] 13 | -------------------------------------------------------------------------------- /manifests/metrics-server/auth-delegator.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: metrics-server:system:auth-delegator 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: system:auth-delegator 10 | subjects: 11 | - kind: ServiceAccount 12 | name: metrics-server 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /manifests/metrics-server/auth-reader.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: metrics-server-auth-reader 6 | namespace: kube-system 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: Role 10 | name: extension-apiserver-authentication-reader 11 | subjects: 12 | - kind: ServiceAccount 13 | name: metrics-server 14 | namespace: kube-system 15 | -------------------------------------------------------------------------------- /manifests/metrics-server/metrics-apiservice.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiregistration.k8s.io/v1 3 | kind: APIService 4 | metadata: 5 | name: v1beta1.metrics.k8s.io 6 | spec: 7 | service: 8 | name: metrics-server 9 | namespace: kube-system 10 | group: metrics.k8s.io 11 | version: v1beta1 12 | insecureSkipTLSVerify: true 13 | groupPriorityMinimum: 100 14 | versionPriority: 100 15 | -------------------------------------------------------------------------------- /manifests/metrics-server/metrics-server-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | labels: 8 | kubernetes.io/name: "Metrics-server" 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | selector: 12 | k8s-app: metrics-server 13 | ports: 14 | - port: 443 15 | name: https 16 | protocol: TCP 17 | targetPort: https 18 | ipFamilyPolicy: PreferDualStack 19 | -------------------------------------------------------------------------------- /manifests/metrics-server/resource-reader.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: system:metrics-server 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - nodes/metrics 11 | verbs: 12 | - get 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - pods 17 | - nodes 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | --- 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | kind: ClusterRoleBinding 25 | metadata: 26 | name: system:metrics-server 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: system:metrics-server 31 | subjects: 32 | - kind: ServiceAccount 33 | name: metrics-server 34 | namespace: kube-system 35 | -------------------------------------------------------------------------------- /manifests/runtimes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: node.k8s.io/v1 2 | kind: RuntimeClass 3 | metadata: 4 | name: nvidia 5 | handler: nvidia 6 | --- 7 | apiVersion: node.k8s.io/v1 8 | kind: RuntimeClass 9 | metadata: 10 | name: nvidia-experimental 11 | handler: nvidia-experimental 12 | --- 13 | apiVersion: node.k8s.io/v1 14 | kind: RuntimeClass 15 | metadata: 16 | name: crun 17 | handler: crun 18 | --- 19 | apiVersion: node.k8s.io/v1 20 | kind: RuntimeClass 21 | metadata: 22 | name: lunatic 23 | handler: lunatic 24 | --- 25 | apiVersion: node.k8s.io/v1 26 | kind: RuntimeClass 27 | metadata: 28 | name: slight 29 | handler: slight 30 | --- 31 | apiVersion: node.k8s.io/v1 32 | kind: RuntimeClass 33 | metadata: 34 | name: spin 35 | handler: spin 36 | --- 37 | apiVersion: node.k8s.io/v1 38 | kind: RuntimeClass 39 | metadata: 40 | name: wws 41 | handler: wws 42 | --- 43 | apiVersion: node.k8s.io/v1 44 | kind: RuntimeClass 45 | metadata: 46 | name: wasmedge 47 | handler: wasmedge 48 | --- 49 | apiVersion: node.k8s.io/v1 50 | kind: RuntimeClass 51 | metadata: 52 | name: wasmer 53 | handler: wasmer 54 | --- 55 | apiVersion: node.k8s.io/v1 56 | kind: RuntimeClass 57 | metadata: 58 | name: wasmtime 59 | handler: wasmtime -------------------------------------------------------------------------------- /manifests/traefik.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.cattle.io/v1 3 | kind: HelmChart 4 | metadata: 5 | name: traefik-crd 6 | namespace: kube-system 7 | spec: 8 | chart: https://%{KUBERNETES_API}%/static/charts/traefik-crd-34.2.1+up34.2.0.tgz 9 | --- 10 | apiVersion: helm.cattle.io/v1 11 | kind: HelmChart 12 | metadata: 13 | name: traefik 14 | namespace: kube-system 15 | spec: 16 | chart: https://%{KUBERNETES_API}%/static/charts/traefik-34.2.1+up34.2.0.tgz 17 | set: 18 | global.systemDefaultRegistry: "%{SYSTEM_DEFAULT_REGISTRY_RAW}%" 19 | valuesContent: |- 20 | deployment: 21 | podAnnotations: 22 | prometheus.io/port: "8082" 23 | prometheus.io/scrape: "true" 24 | providers: 25 | kubernetesIngress: 26 | publishedService: 27 | enabled: true 28 | priorityClassName: "system-cluster-critical" 29 | image: 30 | repository: "rancher/mirrored-library-traefik" 31 | tag: "3.3.6" 32 | tolerations: 33 | - key: "CriticalAddonsOnly" 34 | operator: "Exists" 35 | - key: "node-role.kubernetes.io/control-plane" 36 | operator: "Exists" 37 | effect: "NoSchedule" 38 | - key: "node-role.kubernetes.io/master" 39 | operator: "Exists" 40 | effect: "NoSchedule" 41 | service: 42 | ipFamilyPolicy: "PreferDualStack" 43 | -------------------------------------------------------------------------------- /package/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.21 AS base 2 | RUN apk add -U ca-certificates zstd tzdata 3 | ARG TARGETARCH 4 | COPY build/out/data-linux*.tar.zst / 5 | RUN SOURCE_TAR_ZST="/data-linux-${TARGETARCH}.tar.zst" && \ 6 | # If the arch-specific file doesn't exist, try the default one (used with Dapper or single-arch) 7 | if [ ! -f "${SOURCE_TAR_ZST}" ]; then \ 8 | SOURCE_TAR_ZST="/data-linux.tar.zst" ; \ 9 | fi && \ 10 | \ 11 | mkdir -p /image/etc/ssl/certs /image/run /image/var/run /image/tmp /image/lib/modules /image/lib/firmware /image/var/lib/rancher/k3s/data/cni && \ 12 | zstdcat -d "${SOURCE_TAR_ZST}" | tar -xa -C /image && \ 13 | for FILE in cni $(/image/bin/find /image/bin -lname cni -printf "%f\n"); do ln -s /bin/cni /image/var/lib/rancher/k3s/data/cni/$FILE; done && \ 14 | echo "root:x:0:0:root:/:/bin/sh" > /image/etc/passwd && \ 15 | echo "root:x:0:" > /image/etc/group && \ 16 | cp /etc/ssl/certs/ca-certificates.crt /image/etc/ssl/certs/ca-certificates.crt 17 | 18 | FROM scratch AS collect 19 | ARG DRONE_TAG="dev" 20 | COPY --from=base /image / 21 | COPY --from=base /usr/share/zoneinfo /usr/share/zoneinfo 22 | RUN mkdir -p /etc && \ 23 | echo 'hosts: files dns' > /etc/nsswitch.conf && \ 24 | echo "PRETTY_NAME=\"K3s ${DRONE_TAG}\"" > /etc/os-release && \ 25 | chmod 1777 /tmp 26 | 27 | FROM scratch 28 | VOLUME /var/lib/kubelet 29 | VOLUME /var/lib/rancher/k3s 30 | VOLUME /var/lib/cni 31 | VOLUME /var/log 32 | COPY --from=collect / / 33 | ENV PATH="/var/lib/rancher/k3s/data/cni:$PATH:/bin/aux" 34 | ENV CRI_CONFIG_FILE="/var/lib/rancher/k3s/agent/etc/crictl.yaml" 35 | ENTRYPOINT ["/bin/k3s"] 36 | CMD ["agent"] 37 | -------------------------------------------------------------------------------- /package/rpm/repo-setup.sh: -------------------------------------------------------------------------------- 1 | cat </etc/yum.repos.d/rancher.repo 2 | [rancher] 3 | name=Rancher Repository 4 | baseurl=https://rpm.rancher.io/ 5 | enabled=1 6 | gpgcheck=1 7 | gpgkey=https://rpm.rancher.io/public.key 8 | EOF 9 | -------------------------------------------------------------------------------- /pkg/agent/config/config_internal_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | ) 7 | 8 | func Test_isValidResolvConf(t *testing.T) { 9 | tests := []struct { 10 | name string 11 | fileContent string 12 | expectedResult bool 13 | }{ 14 | {name: "Valid ResolvConf", fileContent: "nameserver 8.8.8.8\nnameserver 2001:4860:4860::8888\n", expectedResult: true}, 15 | {name: "Invalid ResolvConf", fileContent: "nameserver 999.999.999.999\nnameserver not.an.ip\n", expectedResult: false}, 16 | {name: "Wrong Nameserver", fileContent: "search example.com\n", expectedResult: false}, 17 | {name: "One valid nameserver", fileContent: "test test.com\nnameserver 8.8.8.8", expectedResult: true}, 18 | {name: "Non GlobalUnicast", fileContent: "nameserver ::1\nnameserver 169.254.0.1\nnameserver fe80::1\n", expectedResult: false}, 19 | {name: "Empty File", fileContent: "", expectedResult: false}, 20 | } 21 | 22 | for _, tt := range tests { 23 | t.Run(tt.name, func(t *testing.T) { 24 | tmpfile, err := os.CreateTemp("", "resolv.conf") 25 | if err != nil { 26 | t.Fatal(err) 27 | } 28 | defer os.Remove(tmpfile.Name()) 29 | 30 | if _, err := tmpfile.WriteString(tt.fileContent); err != nil { 31 | t.Errorf("error writing to file: %v with content: %v", tmpfile.Name(), tt.fileContent) 32 | } 33 | 34 | res := isValidResolvConf(tmpfile.Name()) 35 | if res != tt.expectedResult { 36 | t.Errorf("isValidResolvConf(%s) = %v; want %v", tt.name, res, tt.expectedResult) 37 | } 38 | }) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /pkg/agent/containerd/command.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | 3 | package containerd 4 | 5 | import ( 6 | "os/exec" 7 | "syscall" 8 | ) 9 | 10 | func addDeathSig(cmd *exec.Cmd) { 11 | // not supported in this OS 12 | cmd.SysProcAttr = &syscall.SysProcAttr{ 13 | Pdeathsig: syscall.SIGKILL, 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /pkg/agent/containerd/command_windows.go: -------------------------------------------------------------------------------- 1 | package containerd 2 | 3 | import "os/exec" 4 | 5 | func addDeathSig(_ *exec.Cmd) { 6 | // not supported in this OS 7 | } 8 | -------------------------------------------------------------------------------- /pkg/agent/containerd/selinux.go: -------------------------------------------------------------------------------- 1 | package containerd 2 | 3 | import ( 4 | "github.com/opencontainers/selinux/go-selinux" 5 | ) 6 | 7 | const ( 8 | SELinuxContextType = "container_runtime_t" 9 | ) 10 | 11 | func selinuxStatus() (bool, bool, error) { 12 | if !selinux.GetEnabled() { 13 | return false, false, nil 14 | } 15 | 16 | label, err := selinux.CurrentLabel() 17 | if err != nil { 18 | return true, false, err 19 | } 20 | 21 | ctx, err := selinux.NewContext(label) 22 | if err != nil { 23 | return true, false, err 24 | } 25 | 26 | return true, ctx["type"] == SELinuxContextType, nil 27 | } 28 | -------------------------------------------------------------------------------- /pkg/agent/cri/cri_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package cri 5 | 6 | const socketPrefix = "unix://" 7 | -------------------------------------------------------------------------------- /pkg/agent/cri/cri_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package cri 5 | 6 | const socketPrefix = "npipe://" 7 | -------------------------------------------------------------------------------- /pkg/agent/cridockerd/config_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux && !no_cri_dockerd 2 | // +build linux,!no_cri_dockerd 3 | 4 | package cridockerd 5 | 6 | import ( 7 | "context" 8 | "strings" 9 | 10 | "github.com/docker/docker/client" 11 | "github.com/k3s-io/k3s/pkg/daemons/config" 12 | pkgerrors "github.com/pkg/errors" 13 | ) 14 | 15 | const socketPrefix = "unix://" 16 | 17 | func setupDockerCRIConfig(ctx context.Context, cfg *config.Node) error { 18 | clientOpts := []client.Opt{client.FromEnv, client.WithAPIVersionNegotiation()} 19 | if cfg.ContainerRuntimeEndpoint != "" { 20 | host := cfg.ContainerRuntimeEndpoint 21 | if !strings.HasPrefix(host, socketPrefix) { 22 | host = socketPrefix + host 23 | } 24 | clientOpts = append(clientOpts, client.WithHost(host)) 25 | } 26 | c, err := client.NewClientWithOpts(clientOpts...) 27 | if err != nil { 28 | return pkgerrors.WithMessage(err, "failed to create docker client") 29 | } 30 | i, err := c.Info(ctx) 31 | if err != nil { 32 | return pkgerrors.WithMessage(err, "failed to get docker runtime info") 33 | } 34 | // note: this mutatation of the passed agent.Config is later used to set the 35 | // kubelet's cgroup-driver flag. This may merit moving to somewhere else in order 36 | // to avoid mutating the configuration while setting up the docker CRI. 37 | cfg.AgentConfig.Systemd = i.CgroupDriver == "systemd" 38 | return nil 39 | } 40 | -------------------------------------------------------------------------------- /pkg/agent/cridockerd/config_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows && !no_cri_dockerd 2 | // +build windows,!no_cri_dockerd 3 | 4 | package cridockerd 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/k3s-io/k3s/pkg/daemons/config" 10 | ) 11 | 12 | const socketPrefix = "npipe://" 13 | 14 | func setupDockerCRIConfig(ctx context.Context, cfg *config.Node) error { 15 | return nil 16 | } 17 | -------------------------------------------------------------------------------- /pkg/agent/cridockerd/nocridockerd.go: -------------------------------------------------------------------------------- 1 | //go:build no_cri_dockerd 2 | // +build no_cri_dockerd 3 | 4 | package cridockerd 5 | 6 | import ( 7 | "context" 8 | "errors" 9 | 10 | "github.com/k3s-io/k3s/pkg/daemons/config" 11 | ) 12 | 13 | func Run(ctx context.Context, cfg *config.Node) error { 14 | return errors.New("cri-dockerd disabled at build time") 15 | } 16 | -------------------------------------------------------------------------------- /pkg/agent/flannel/setup_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package flannel 5 | 6 | const ( 7 | cniConf = `{ 8 | "name":"cbr0", 9 | "cniVersion":"1.0.0", 10 | "plugins":[ 11 | { 12 | "type":"flannel", 13 | "delegate":{ 14 | "hairpinMode":true, 15 | "forceAddress":true, 16 | "isDefaultGateway":true 17 | } 18 | }, 19 | { 20 | "type":"portmap", 21 | "capabilities":{ 22 | "portMappings":true 23 | } 24 | }, 25 | { 26 | "type":"bandwidth", 27 | "capabilities":{ 28 | "bandwidth":true 29 | } 30 | } 31 | ] 32 | } 33 | ` 34 | 35 | vxlanBackend = `{ 36 | "Type": "vxlan" 37 | }` 38 | ) 39 | -------------------------------------------------------------------------------- /pkg/agent/flannel/setup_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package flannel 5 | 6 | const ( 7 | cniConf = `{ 8 | "name":"flannel.4096", 9 | "cniVersion":"1.0.0", 10 | "plugins":[ 11 | { 12 | "type":"flannel", 13 | "capabilities": { 14 | "portMappings": true, 15 | "dns": true 16 | }, 17 | "delegate": { 18 | "type": "win-overlay", 19 | "apiVersion": 2, 20 | "Policies": [{ 21 | "Name": "EndpointPolicy", 22 | "Value": { 23 | "Type": "OutBoundNAT", 24 | "Settings": { 25 | "Exceptions": [ 26 | "%CLUSTER_CIDR%", "%SERVICE_CIDR%" 27 | ] 28 | } 29 | } 30 | }, { 31 | "Name": "EndpointPolicy", 32 | "Value": { 33 | "Type": "SDNRoute", 34 | "Settings": { 35 | "DestinationPrefix": "%SERVICE_CIDR%", 36 | "NeedEncap": true 37 | } 38 | } 39 | }, { 40 | "name": "EndpointPolicy", 41 | "value": { 42 | "Type": "ProviderAddress", 43 | "Settings": { 44 | "ProviderAddress": "%IPV4_ADDRESS%" 45 | } 46 | } 47 | }] 48 | } 49 | } 50 | ] 51 | } 52 | ` 53 | 54 | vxlanBackend = `{ 55 | "Type": "vxlan", 56 | "VNI": 4096, 57 | "Port": 4789 58 | }` 59 | ) 60 | -------------------------------------------------------------------------------- /pkg/agent/loadbalancer/config.go: -------------------------------------------------------------------------------- 1 | package loadbalancer 2 | 3 | import ( 4 | "encoding/json" 5 | "os" 6 | 7 | "github.com/k3s-io/k3s/pkg/agent/util" 8 | ) 9 | 10 | // lbConfig stores loadbalancer state that should be persisted across restarts. 11 | type lbConfig struct { 12 | ServerURL string `json:"ServerURL"` 13 | ServerAddresses []string `json:"ServerAddresses"` 14 | } 15 | 16 | func (lb *LoadBalancer) writeConfig() error { 17 | config := &lbConfig{ 18 | ServerURL: lb.scheme + "://" + lb.servers.getDefaultAddress(), 19 | ServerAddresses: lb.servers.getAddresses(), 20 | } 21 | configOut, err := json.MarshalIndent(config, "", " ") 22 | if err != nil { 23 | return err 24 | } 25 | return util.WriteFile(lb.configFile, string(configOut)) 26 | } 27 | 28 | func (lb *LoadBalancer) updateConfig() error { 29 | if configBytes, err := os.ReadFile(lb.configFile); err == nil { 30 | config := &lbConfig{} 31 | if err := json.Unmarshal(configBytes, config); err == nil { 32 | // if the default server from the config matches our current default, 33 | // load the rest of the addresses as well. 34 | if config.ServerURL == lb.scheme+"://"+lb.servers.getDefaultAddress() { 35 | lb.Update(config.ServerAddresses) 36 | return nil 37 | } 38 | } 39 | } 40 | // config didn't exist or used a different default server, write the current config to disk. 41 | return lb.writeConfig() 42 | } 43 | -------------------------------------------------------------------------------- /pkg/agent/loadbalancer/metrics.go: -------------------------------------------------------------------------------- 1 | package loadbalancer 2 | 3 | import ( 4 | "github.com/k3s-io/k3s/pkg/version" 5 | "github.com/prometheus/client_golang/prometheus" 6 | "k8s.io/component-base/metrics" 7 | ) 8 | 9 | var ( 10 | loadbalancerConnections = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 11 | Name: version.Program + "_loadbalancer_server_connections", 12 | Help: "Count of current connections to loadbalancer server", 13 | }, []string{"name", "server"}) 14 | 15 | loadbalancerState = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 16 | Name: version.Program + "_loadbalancer_server_health", 17 | Help: "Current health value of loadbalancer server", 18 | }, []string{"name", "server"}) 19 | 20 | loadbalancerDials = prometheus.NewHistogramVec(prometheus.HistogramOpts{ 21 | Name: version.Program + "_loadbalancer_dial_duration_seconds", 22 | Help: "Time taken to dial a connection to a backend server", 23 | Buckets: metrics.ExponentialBuckets(0.001, 2, 15), 24 | }, []string{"name", "status"}) 25 | ) 26 | 27 | // MustRegister registers loadbalancer metrics 28 | func MustRegister(registerer prometheus.Registerer) { 29 | registerer.MustRegister(loadbalancerConnections, loadbalancerState, loadbalancerDials) 30 | } 31 | -------------------------------------------------------------------------------- /pkg/agent/loadbalancer/utility.go: -------------------------------------------------------------------------------- 1 | package loadbalancer 2 | 3 | import ( 4 | "errors" 5 | "net/url" 6 | "sort" 7 | "strings" 8 | ) 9 | 10 | func parseURL(serverURL, newHost string) (string, string, error) { 11 | parsedURL, err := url.Parse(serverURL) 12 | if err != nil { 13 | return "", "", err 14 | } 15 | if parsedURL.Host == "" { 16 | return "", "", errors.New("Initial server URL host is not defined for load balancer") 17 | } 18 | address := parsedURL.Host 19 | if parsedURL.Port() == "" { 20 | if strings.ToLower(parsedURL.Scheme) == "http" { 21 | address += ":80" 22 | } 23 | if strings.ToLower(parsedURL.Scheme) == "https" { 24 | address += ":443" 25 | } 26 | } 27 | parsedURL.Host = newHost 28 | return address, parsedURL.String(), nil 29 | } 30 | 31 | // sortServers returns a sorted, unique list of strings, with any 32 | // empty values removed. The returned bool is true if the list 33 | // contains the search string. 34 | func sortServers(input []string, search string) ([]string, bool) { 35 | result := []string{} 36 | found := false 37 | skip := map[string]bool{"": true} 38 | 39 | for _, entry := range input { 40 | if skip[entry] { 41 | continue 42 | } 43 | if search == entry { 44 | found = true 45 | } 46 | skip[entry] = true 47 | result = append(result, entry) 48 | } 49 | 50 | sort.Strings(result) 51 | return result, found 52 | } 53 | -------------------------------------------------------------------------------- /pkg/agent/loadbalancer/utility_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package loadbalancer 5 | 6 | import "syscall" 7 | 8 | func reusePort(network, address string, conn syscall.RawConn) error { 9 | return nil 10 | } 11 | -------------------------------------------------------------------------------- /pkg/agent/loadbalancer/utlity_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package loadbalancer 4 | 5 | import ( 6 | "syscall" 7 | 8 | "golang.org/x/sys/unix" 9 | ) 10 | 11 | func reusePort(network, address string, conn syscall.RawConn) error { 12 | return conn.Control(func(descriptor uintptr) { 13 | syscall.SetsockoptInt(int(descriptor), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) 14 | }) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/agent/netpol/netpol_windows.go: -------------------------------------------------------------------------------- 1 | package netpol 2 | 3 | import ( 4 | "context" 5 | 6 | daemonconfig "github.com/k3s-io/k3s/pkg/daemons/config" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func Run(ctx context.Context, nodeConfig *daemonconfig.Node) error { 11 | logrus.Warnf("Skipping network policy controller start, netpol is not supported on windows") 12 | return nil 13 | } 14 | -------------------------------------------------------------------------------- /pkg/agent/run_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package agent 5 | 6 | import ( 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/k3s-io/k3s/pkg/cli/cmds" 11 | "github.com/k3s-io/k3s/pkg/daemons/config" 12 | ) 13 | 14 | const ( 15 | criDockerdSock = "unix:///run/k3s/cri-dockerd/cri-dockerd.sock" 16 | containerdSock = "unix:///run/k3s/containerd/containerd.sock" 17 | ) 18 | 19 | // setupCriCtlConfig creates the crictl config file and populates it 20 | // with the given data from config. 21 | func setupCriCtlConfig(cfg cmds.Agent, nodeConfig *config.Node) error { 22 | cre := nodeConfig.ContainerRuntimeEndpoint 23 | if cre == "" { 24 | switch { 25 | case cfg.Docker: 26 | cre = criDockerdSock 27 | default: 28 | cre = containerdSock 29 | } 30 | } 31 | 32 | agentConfDir := filepath.Join(cfg.DataDir, "agent", "etc") 33 | if _, err := os.Stat(agentConfDir); os.IsNotExist(err) { 34 | if err := os.MkdirAll(agentConfDir, 0700); err != nil { 35 | return err 36 | } 37 | } 38 | 39 | // Send to node struct the value from cli/config default runtime 40 | if cfg.DefaultRuntime != "" { 41 | nodeConfig.DefaultRuntime = cfg.DefaultRuntime 42 | } 43 | 44 | crp := "runtime-endpoint: " + cre + "\n" 45 | ise := nodeConfig.ImageServiceEndpoint 46 | if ise != "" && ise != cre { 47 | crp += "image-endpoint: " + cre + "\n" 48 | } 49 | return os.WriteFile(agentConfDir+"/crictl.yaml", []byte(crp), 0600) 50 | } 51 | -------------------------------------------------------------------------------- /pkg/agent/run_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package agent 5 | 6 | import ( 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/k3s-io/k3s/pkg/cli/cmds" 12 | "github.com/k3s-io/k3s/pkg/daemons/config" 13 | ) 14 | 15 | const ( 16 | dockershimSock = "npipe:////./pipe/docker_engine" 17 | containerdSock = "npipe:////./pipe/containerd-containerd" 18 | ) 19 | 20 | // setupCriCtlConfig creates the crictl config file and populates it 21 | // with the given data from config. 22 | func setupCriCtlConfig(cfg cmds.Agent, nodeConfig *config.Node) error { 23 | cre := nodeConfig.ContainerRuntimeEndpoint 24 | if cre == "" || strings.HasPrefix(cre, "npipe:") { 25 | switch { 26 | case cfg.Docker: 27 | cre = dockershimSock 28 | default: 29 | cre = containerdSock 30 | } 31 | } else { 32 | cre = containerdSock 33 | } 34 | agentConfDir := filepath.Join(cfg.DataDir, "agent", "etc") 35 | if _, err := os.Stat(agentConfDir); os.IsNotExist(err) { 36 | if err := os.MkdirAll(agentConfDir, 0700); err != nil { 37 | return err 38 | } 39 | } 40 | 41 | crp := "runtime-endpoint: " + cre + "\n" 42 | ise := nodeConfig.ImageServiceEndpoint 43 | if ise != "" && ise != cre { 44 | crp += "image-endpoint: " + cre + "\n" 45 | } 46 | return os.WriteFile(filepath.Join(agentConfDir, "crictl.yaml"), []byte(crp), 0600) 47 | } 48 | -------------------------------------------------------------------------------- /pkg/agent/syssetup/setup_windows.go: -------------------------------------------------------------------------------- 1 | package syssetup 2 | 3 | import kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" 4 | 5 | func Configure(enableIPv6 bool, config *kubeproxyconfig.KubeProxyConntrackConfiguration) { 6 | 7 | } 8 | -------------------------------------------------------------------------------- /pkg/agent/templates/templates_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package templates 4 | 5 | import ( 6 | "encoding/json" 7 | "text/template" 8 | ) 9 | 10 | // Linux config templates do not need fixups 11 | var templateFuncs = template.FuncMap{ 12 | "deschemify": func(s string) string { 13 | return s 14 | }, 15 | "toJson": func(v interface{}) string { 16 | output, _ := json.Marshal(v) 17 | return string(output) 18 | }, 19 | } 20 | -------------------------------------------------------------------------------- /pkg/agent/templates/templates_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package templates 5 | 6 | import ( 7 | "encoding/json" 8 | "net/url" 9 | "strings" 10 | "text/template" 11 | ) 12 | 13 | // Windows config templates need named pipe addresses fixed up 14 | var templateFuncs = template.FuncMap{ 15 | "deschemify": func(s string) string { 16 | if strings.HasPrefix(s, "npipe:") { 17 | u, err := url.Parse(s) 18 | if err != nil { 19 | return "" 20 | } 21 | return u.Path 22 | } 23 | return s 24 | }, 25 | "toJson": func(v interface{}) string { 26 | output, _ := json.Marshal(v) 27 | return string(output) 28 | }, 29 | } 30 | -------------------------------------------------------------------------------- /pkg/agent/util/file.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | "path/filepath" 7 | 8 | pkgerrors "github.com/pkg/errors" 9 | ) 10 | 11 | func WriteFile(name string, content string) error { 12 | os.MkdirAll(filepath.Dir(name), 0755) 13 | err := os.WriteFile(name, []byte(content), 0644) 14 | if err != nil { 15 | return pkgerrors.WithMessagef(err, "writing %s", name) 16 | } 17 | return nil 18 | } 19 | 20 | func CopyFile(sourceFile string, destinationFile string, ignoreNotExist bool) error { 21 | os.MkdirAll(filepath.Dir(destinationFile), 0755) 22 | input, err := os.ReadFile(sourceFile) 23 | if errors.Is(err, os.ErrNotExist) && ignoreNotExist { 24 | return nil 25 | } else if err != nil { 26 | return pkgerrors.WithMessagef(err, "copying %s to %s", sourceFile, destinationFile) 27 | } 28 | err = os.WriteFile(destinationFile, input, 0644) 29 | if err != nil { 30 | return pkgerrors.WithMessagef(err, "copying %s to %s", sourceFile, destinationFile) 31 | } 32 | return nil 33 | } 34 | -------------------------------------------------------------------------------- /pkg/agent/util/strings.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "strings" 4 | 5 | // HasSuffixI returns true if string s has any of the given suffixes, ignoring case. 6 | func HasSuffixI(s string, suffixes ...string) bool { 7 | s = strings.ToLower(s) 8 | for _, suffix := range suffixes { 9 | if strings.HasSuffix(s, strings.ToLower(suffix)) { 10 | return true 11 | } 12 | } 13 | return false 14 | } 15 | -------------------------------------------------------------------------------- /pkg/authenticator/basicauth/interfaces.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2014 The Kubernetes Authors. 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | Unless required by applicable law or agreed to in writing, software 8 | distributed under the License is distributed on an "AS IS" BASIS, 9 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | See the License for the specific language governing permissions and 11 | limitations under the License. 12 | */ 13 | 14 | package basicauth 15 | 16 | import ( 17 | "context" 18 | 19 | "k8s.io/apiserver/pkg/authentication/authenticator" 20 | ) 21 | 22 | // Password checks a username and password against a backing authentication 23 | // store and returns a Response or an error if the password could not be 24 | // checked. 25 | type Password interface { 26 | AuthenticatePassword(ctx context.Context, user, password string) (*authenticator.Response, bool, error) 27 | } 28 | -------------------------------------------------------------------------------- /pkg/authenticator/hash/hash.go: -------------------------------------------------------------------------------- 1 | package hash 2 | 3 | // Hasher is a generic interface for hashing algorithms 4 | type Hasher interface { 5 | // CreateHash will return a hashed version of the secretKey, or an error 6 | CreateHash(secretKey string) (string, error) 7 | // VerifyHash will compare a secretKey and a hash, and return nil if they match 8 | VerifyHash(hash, secretKey string) error 9 | } 10 | -------------------------------------------------------------------------------- /pkg/authenticator/hash/scrypt_test.go: -------------------------------------------------------------------------------- 1 | package hash 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | ) 7 | 8 | func Test_UnitSCrypt_VerifyHash(t *testing.T) { 9 | type args struct { 10 | secretKey string 11 | } 12 | tests := []struct { 13 | name string 14 | args args 15 | wantErr bool 16 | }{ 17 | { 18 | name: "Basic Hash Test", 19 | args: args{ 20 | secretKey: "hello world", 21 | }, 22 | }, 23 | { 24 | name: "Long Hash Test", 25 | args: args{ 26 | secretKey: strings.Repeat("A", 720), 27 | }, 28 | }, 29 | } 30 | for _, tt := range tests { 31 | t.Run(tt.name, func(t *testing.T) { 32 | hasher := NewSCrypt() 33 | hash, _ := hasher.CreateHash(tt.args.secretKey) 34 | if err := hasher.VerifyHash(hash, tt.args.secretKey); (err != nil) != tt.wantErr { 35 | t.Errorf("SCrypt.VerifyHash() error = %v, wantErr %v", err, tt.wantErr) 36 | } 37 | }) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /pkg/bootstrap/bootstrap_test.go: -------------------------------------------------------------------------------- 1 | package bootstrap 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/k3s-io/k3s/pkg/daemons/config" 7 | ) 8 | 9 | func TestObjToMap(t *testing.T) { 10 | type args struct { 11 | obj interface{} 12 | } 13 | tests := []struct { 14 | name string 15 | args args 16 | want map[string]string 17 | wantErr bool 18 | }{ 19 | { 20 | name: "Minimal Valid", 21 | args: args{ 22 | obj: &config.ControlRuntimeBootstrap{ 23 | ServerCA: "/var/lib/rancher/k3s/server/tls/server-ca.crt", 24 | ServerCAKey: "/var/lib/rancher/k3s/server/tls/server-ca.key", 25 | }, 26 | }, 27 | wantErr: false, 28 | }, 29 | { 30 | name: "Minimal Invalid", 31 | args: args{ 32 | obj: 1, 33 | }, 34 | wantErr: true, 35 | }, 36 | } 37 | for _, tt := range tests { 38 | t.Run(tt.name, func(t *testing.T) { 39 | _, err := ObjToMap(tt.args.obj) 40 | if (err != nil) != tt.wantErr { 41 | t.Errorf("ObjToMap() error = %v, wantErr %v", err, tt.wantErr) 42 | return 43 | } 44 | }) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/cgroups/cgroups_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package cgroups 5 | 6 | func Validate() error { 7 | return nil 8 | } 9 | 10 | func CheckCgroups() (kubeletRoot, runtimeRoot string, controllers map[string]bool) { 11 | return 12 | } 13 | -------------------------------------------------------------------------------- /pkg/cli/cmds/check-config.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/urfave/cli/v2" 5 | ) 6 | 7 | func NewCheckConfigCommand(action func(*cli.Context) error) *cli.Command { 8 | return &cli.Command{ 9 | Name: "check-config", 10 | Usage: "Run config check", 11 | SkipFlagParsing: true, 12 | Action: action, 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /pkg/cli/cmds/completion.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/urfave/cli/v2" 5 | ) 6 | 7 | func NewCompletionCommand(action func(*cli.Context) error) *cli.Command { 8 | return &cli.Command{ 9 | Name: "completion", 10 | Usage: "Install shell completion script", 11 | UsageText: appName + " completion [SHELL] (valid shells: bash, zsh)", 12 | Action: action, 13 | Flags: []cli.Flag{ 14 | &cli.BoolFlag{ 15 | Name: "i", 16 | Usage: "Install source line to rc file", 17 | }, 18 | }, 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /pkg/cli/cmds/config.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/k3s-io/k3s/pkg/version" 5 | "github.com/urfave/cli/v2" 6 | ) 7 | 8 | var ( 9 | // ConfigFlag is here to show to the user, but the actually processing is done by configfileargs before 10 | // call urfave 11 | ConfigFlag = &cli.StringFlag{ 12 | Name: "config", 13 | Aliases: []string{"c"}, 14 | Usage: "(config) Load configuration from `FILE`", 15 | EnvVars: []string{version.ProgramUpper + "_CONFIG_FILE"}, 16 | Value: "/etc/rancher/" + version.Program + "/config.yaml", 17 | } 18 | ) 19 | -------------------------------------------------------------------------------- /pkg/cli/cmds/const_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package cmds 4 | 5 | const ( 6 | DefaultSnapshotter = "overlayfs" 7 | ) 8 | -------------------------------------------------------------------------------- /pkg/cli/cmds/const_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package cmds 5 | 6 | const ( 7 | DefaultSnapshotter = "windows" 8 | ) 9 | -------------------------------------------------------------------------------- /pkg/cli/cmds/cover_default.go: -------------------------------------------------------------------------------- 1 | //go:build !linux || !cover 2 | 3 | package cmds 4 | 5 | import "context" 6 | 7 | func WriteCoverage(ctx context.Context) {} 8 | -------------------------------------------------------------------------------- /pkg/cli/cmds/cover_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux && cover 2 | 3 | package cmds 4 | 5 | import ( 6 | "context" 7 | "os" 8 | "runtime/coverage" 9 | "time" 10 | 11 | "github.com/sirupsen/logrus" 12 | ) 13 | 14 | // writeCoverage checks if GOCOVERDIR is set on startup and writes coverage files to that directory 15 | // every 20 seconds. This is done to ensure that the coverage files are written even if the process is killed. 16 | func WriteCoverage(ctx context.Context) { 17 | if k, ok := os.LookupEnv("GOCOVERDIR"); ok { 18 | for { 19 | select { 20 | case <-ctx.Done(): 21 | if err := coverage.WriteCountersDir(k); err != nil { 22 | logrus.Warn(err) 23 | } 24 | return 25 | case <-time.After(20 * time.Second): 26 | if err := coverage.WriteCountersDir(k); err != nil { 27 | logrus.Warn(err) 28 | } 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /pkg/cli/cmds/crictl.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/urfave/cli/v2" 5 | ) 6 | 7 | func NewCRICTL(action func(*cli.Context) error) *cli.Command { 8 | return &cli.Command{ 9 | Name: "crictl", 10 | Usage: "Run crictl", 11 | SkipFlagParsing: true, 12 | Action: action, 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /pkg/cli/cmds/ctr.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/urfave/cli/v2" 5 | ) 6 | 7 | func NewCtrCommand(action func(*cli.Context) error) *cli.Command { 8 | return &cli.Command{ 9 | Name: "ctr", 10 | Usage: "Run ctr", 11 | SkipFlagParsing: true, 12 | Action: action, 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /pkg/cli/cmds/golang.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | "strings" 7 | 8 | "github.com/k3s-io/k3s/pkg/version" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | func ValidateGolang() error { 13 | k8sVersion, _, _ := strings.Cut(version.Version, "+") 14 | if version.UpstreamGolang == "" { 15 | return fmt.Errorf("kubernetes golang build version not set - see 'golang: upstream version' in https://github.com/kubernetes/kubernetes/blob/%s/build/dependencies.yaml", k8sVersion) 16 | } 17 | if v, _, _ := strings.Cut(runtime.Version(), " "); version.UpstreamGolang != v { 18 | return fmt.Errorf("incorrect golang build version - kubernetes %s should be built with %s, runtime version is %s", k8sVersion, version.UpstreamGolang, v) 19 | } 20 | return nil 21 | } 22 | 23 | func MustValidateGolang() { 24 | if err := ValidateGolang(); err != nil { 25 | logrus.Fatalf("Failed to validate golang version: %v", err) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /pkg/cli/cmds/init_default.go: -------------------------------------------------------------------------------- 1 | //go:build !linux || !cgo 2 | // +build !linux !cgo 3 | 4 | package cmds 5 | 6 | func EvacuateCgroup2() error { 7 | return nil 8 | } 9 | -------------------------------------------------------------------------------- /pkg/cli/cmds/init_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux && cgo 2 | 3 | package cmds 4 | 5 | import ( 6 | "os" 7 | 8 | "github.com/moby/sys/userns" 9 | pkgerrors "github.com/pkg/errors" 10 | "github.com/rootless-containers/rootlesskit/pkg/parent/cgrouputil" 11 | ) 12 | 13 | // EvacuateCgroup2 will handle evacuating the root cgroup in order to enable subtree_control, 14 | // if running as pid 1 without rootless support. 15 | func EvacuateCgroup2() error { 16 | if os.Getpid() == 1 && !userns.RunningInUserNS() { 17 | // The root cgroup has to be empty to enable subtree_control, so evacuate it by placing 18 | // ourselves in the init cgroup. 19 | if err := cgrouputil.EvacuateCgroup2("init"); err != nil { 20 | return pkgerrors.WithMessage(err, "failed to evacuate root cgroup") 21 | } 22 | } 23 | return nil 24 | } 25 | -------------------------------------------------------------------------------- /pkg/cli/cmds/kubectl.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "github.com/urfave/cli/v2" 5 | ) 6 | 7 | func NewKubectlCommand(action func(*cli.Context) error) *cli.Command { 8 | return &cli.Command{ 9 | Name: "kubectl", 10 | Usage: "Run kubectl", 11 | SkipFlagParsing: true, 12 | Action: action, 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /pkg/cli/cmds/log_default.go: -------------------------------------------------------------------------------- 1 | //go:build !linux || !cgo 2 | // +build !linux !cgo 3 | 4 | package cmds 5 | 6 | func forkIfLoggingOrReaping() error { 7 | return nil 8 | } 9 | -------------------------------------------------------------------------------- /pkg/cli/cmds/nostage.go: -------------------------------------------------------------------------------- 1 | //go:build no_stage 2 | // +build no_stage 3 | 4 | package cmds 5 | 6 | const ( 7 | // The coredns and servicelb controllers can still be disabled, even if their manifests 8 | // are missing. Same with CloudController/ccm. 9 | DisableItems = "coredns, servicelb" 10 | ) 11 | -------------------------------------------------------------------------------- /pkg/cli/cmds/root.go: -------------------------------------------------------------------------------- 1 | package cmds 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "runtime" 7 | 8 | "github.com/k3s-io/k3s/pkg/version" 9 | "github.com/urfave/cli/v2" 10 | ) 11 | 12 | var ( 13 | Debug bool 14 | DebugFlag = &cli.BoolFlag{ 15 | Name: "debug", 16 | Usage: "(logging) Turn on debug logs", 17 | Destination: &Debug, 18 | EnvVars: []string{version.ProgramUpper + "_DEBUG"}, 19 | } 20 | PreferBundledBin = &cli.BoolFlag{ 21 | Name: "prefer-bundled-bin", 22 | Usage: "(experimental) Prefer bundled userspace binaries over host binaries", 23 | } 24 | ) 25 | 26 | func init() { 27 | // hack - force "file,dns" lookup order if go dns is used 28 | if os.Getenv("RES_OPTIONS") == "" { 29 | os.Setenv("RES_OPTIONS", " ") 30 | } 31 | } 32 | 33 | func NewApp() *cli.App { 34 | app := cli.NewApp() 35 | app.Name = appName 36 | app.Usage = "Kubernetes, but small and simple" 37 | app.Version = fmt.Sprintf("%s (%s)", version.Version, version.GitCommit) 38 | cli.VersionPrinter = func(c *cli.Context) { 39 | fmt.Printf("%s version %s\n", app.Name, app.Version) 40 | fmt.Printf("go version %s\n", runtime.Version()) 41 | } 42 | app.Flags = []cli.Flag{ 43 | DebugFlag, 44 | DataDirFlag, 45 | } 46 | 47 | return app 48 | } 49 | -------------------------------------------------------------------------------- /pkg/cli/cmds/stage.go: -------------------------------------------------------------------------------- 1 | //go:build !no_stage 2 | 3 | package cmds 4 | 5 | const ( 6 | // coredns and servicelb run controllers that are turned off when their manifests are disabled. 7 | // The k3s CloudController also has a bundled manifest and can be disabled via the 8 | // --disable-cloud-controller flag or --disable=ccm, but the latter method is not documented. 9 | DisableItems = "coredns, servicelb, traefik, local-storage, metrics-server, runtimes" 10 | ) 11 | -------------------------------------------------------------------------------- /pkg/cli/crictl/crictl.go: -------------------------------------------------------------------------------- 1 | package crictl 2 | 3 | import ( 4 | "os" 5 | "runtime" 6 | 7 | "github.com/urfave/cli/v2" 8 | "sigs.k8s.io/cri-tools/cmd/crictl" 9 | ) 10 | 11 | func Run(ctx *cli.Context) error { 12 | if runtime.GOOS == "windows" { 13 | os.Args = os.Args[1:] 14 | } 15 | crictl.Main() 16 | return nil 17 | } 18 | -------------------------------------------------------------------------------- /pkg/cli/ctr/ctr.go: -------------------------------------------------------------------------------- 1 | package ctr 2 | 3 | import ( 4 | "github.com/k3s-io/k3s/pkg/ctr" 5 | "github.com/urfave/cli/v2" 6 | ) 7 | 8 | func Run(ctx *cli.Context) error { 9 | ctr.Main() 10 | return nil 11 | } 12 | -------------------------------------------------------------------------------- /pkg/cli/kubectl/kubectl.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "github.com/k3s-io/k3s/pkg/kubectl" 5 | "github.com/urfave/cli/v2" 6 | ) 7 | 8 | func Run(ctx *cli.Context) error { 9 | kubectl.Main() 10 | return nil 11 | } 12 | -------------------------------------------------------------------------------- /pkg/clientaccess/kubeconfig.go: -------------------------------------------------------------------------------- 1 | package clientaccess 2 | 3 | import ( 4 | "os" 5 | 6 | pkgerrors "github.com/pkg/errors" 7 | "k8s.io/client-go/tools/clientcmd" 8 | clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 9 | ) 10 | 11 | // WriteClientKubeConfig generates a kubeconfig at destFile that can be used to connect to a server at url with the given certs and keys 12 | func WriteClientKubeConfig(destFile, url, serverCAFile, clientCertFile, clientKeyFile string) error { 13 | serverCA, err := os.ReadFile(serverCAFile) 14 | if err != nil { 15 | return pkgerrors.WithMessagef(err, "failed to read %s", serverCAFile) 16 | } 17 | 18 | clientCert, err := os.ReadFile(clientCertFile) 19 | if err != nil { 20 | return pkgerrors.WithMessagef(err, "failed to read %s", clientCertFile) 21 | } 22 | 23 | clientKey, err := os.ReadFile(clientKeyFile) 24 | if err != nil { 25 | return pkgerrors.WithMessagef(err, "failed to read %s", clientKeyFile) 26 | } 27 | 28 | config := clientcmdapi.NewConfig() 29 | 30 | cluster := clientcmdapi.NewCluster() 31 | cluster.CertificateAuthorityData = serverCA 32 | cluster.Server = url 33 | 34 | authInfo := clientcmdapi.NewAuthInfo() 35 | authInfo.ClientCertificateData = clientCert 36 | authInfo.ClientKeyData = clientKey 37 | 38 | context := clientcmdapi.NewContext() 39 | context.AuthInfo = "default" 40 | context.Cluster = "default" 41 | 42 | config.Clusters["default"] = cluster 43 | config.AuthInfos["default"] = authInfo 44 | config.Contexts["default"] = context 45 | config.CurrentContext = "default" 46 | 47 | return clientcmd.WriteToFile(*config, destFile) 48 | } 49 | -------------------------------------------------------------------------------- /pkg/cluster/etcd.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "github.com/k3s-io/k3s/pkg/cluster/managed" 5 | "github.com/k3s-io/k3s/pkg/etcd" 6 | ) 7 | 8 | func init() { 9 | managed.RegisterDriver(etcd.NewETCD()) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/cluster/managed/drivers.go: -------------------------------------------------------------------------------- 1 | package managed 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | 7 | "github.com/k3s-io/k3s/pkg/clientaccess" 8 | "github.com/k3s-io/k3s/pkg/daemons/config" 9 | ) 10 | 11 | var ( 12 | drivers []Driver 13 | ) 14 | 15 | type Driver interface { 16 | SetControlConfig(config *config.Control) error 17 | IsInitialized() (bool, error) 18 | Register(handler http.Handler) (http.Handler, error) 19 | Reset(ctx context.Context, reboostrap func() error) error 20 | IsReset() (bool, error) 21 | ResetFile() string 22 | Start(ctx context.Context, clientAccessInfo *clientaccess.Info) error 23 | Restore(ctx context.Context) error 24 | EndpointName() string 25 | Snapshot(ctx context.Context) (*SnapshotResult, error) 26 | ReconcileSnapshotData(ctx context.Context) error 27 | GetMembersClientURLs(ctx context.Context) ([]string, error) 28 | RemoveSelf(ctx context.Context) error 29 | } 30 | 31 | func RegisterDriver(d Driver) { 32 | drivers = append(drivers, d) 33 | } 34 | 35 | func Registered() []Driver { 36 | return drivers 37 | } 38 | 39 | func Default() Driver { 40 | return drivers[0] 41 | } 42 | 43 | func Clear() { 44 | drivers = []Driver{} 45 | } 46 | 47 | // SnapshotResult is returned by the Snapshot function, 48 | // and lists the names of created and deleted snapshots. 49 | type SnapshotResult struct { 50 | Created []string `json:"created,omitempty"` 51 | Deleted []string `json:"deleted,omitempty"` 52 | } 53 | -------------------------------------------------------------------------------- /pkg/codegen/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | bindata "github.com/go-bindata/go-bindata" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func main() { 11 | os.Unsetenv("GOPATH") 12 | bc := &bindata.Config{ 13 | Input: []bindata.InputConfig{ 14 | { 15 | Path: "build/data", 16 | Recursive: true, 17 | }, 18 | }, 19 | Package: "data", 20 | NoCompress: true, 21 | NoMemCopy: true, 22 | NoMetadata: true, 23 | Output: "pkg/data/zz_generated_bindata.go", 24 | } 25 | if err := bindata.Translate(bc); err != nil { 26 | logrus.Fatal(err) 27 | } 28 | 29 | bc = &bindata.Config{ 30 | Input: []bindata.InputConfig{ 31 | { 32 | Path: "manifests", 33 | Recursive: true, 34 | }, 35 | }, 36 | Package: "deploy", 37 | NoMetadata: true, 38 | Prefix: "manifests/", 39 | Output: "pkg/deploy/zz_generated_bindata.go", 40 | Tags: "!no_stage", 41 | } 42 | if err := bindata.Translate(bc); err != nil { 43 | logrus.Fatal(err) 44 | } 45 | 46 | bc = &bindata.Config{ 47 | Input: []bindata.InputConfig{ 48 | { 49 | Path: "build/static", 50 | Recursive: true, 51 | }, 52 | }, 53 | Package: "static", 54 | NoMetadata: true, 55 | Prefix: "build/static/", 56 | Output: "pkg/static/zz_generated_bindata.go", 57 | Tags: "!no_stage", 58 | } 59 | if err := bindata.Translate(bc); err != nil { 60 | logrus.Fatal(err) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/data.yaml: -------------------------------------------------------------------------------- 1 | foo-bar: baz 2 | alice: bob 3 | a-slice: 4 | - 1 5 | - "2" 6 | - "" 7 | - three 8 | isempty: 9 | c: b 10 | isfalse: false 11 | islast: true -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/data.yaml.d/01-data.yml: -------------------------------------------------------------------------------- 1 | foo-bar: get-overriden 2 | a-slice: 3 | - 1 4 | - "1.5" 5 | - "2" 6 | - "" 7 | - three 8 | b-string: one 9 | c-slice: 10 | - one 11 | - two 12 | d-slice: 13 | - one 14 | - two 15 | f-string: beta -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/data.yaml.d/02-data-ignore-this.txt: -------------------------------------------------------------------------------- 1 | foo-bar: ignored 2 | -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/data.yaml.d/02-data.yaml: -------------------------------------------------------------------------------- 1 | foo-bar: bar-foo 2 | b-string+: two 3 | c-slice+: 4 | - three 5 | d-slice: 6 | - three 7 | - four 8 | e-slice+: 9 | - one 10 | - two -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/defaultdata.yaml: -------------------------------------------------------------------------------- 1 | token: 12345 2 | node-label: DEAFBEEF 3 | etcd-s3: true 4 | etcd-s3-bucket: my-backup 5 | notaflag : true 6 | kubelet-arg: "max-pods=999" -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/dropin-only.yaml.d/01-data.yml: -------------------------------------------------------------------------------- 1 | foo-bar: get-overriden 2 | a-slice: 3 | - 1 4 | - "1.5" 5 | - "2" 6 | - "" 7 | - three 8 | b-string: one 9 | c-slice: 10 | - one 11 | - two 12 | d-slice: 13 | - one 14 | - two 15 | f-string: beta -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/dropin-only.yaml.d/02-data-ignore-this.txt: -------------------------------------------------------------------------------- 1 | foo-bar: ignored 2 | -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/dropin-only.yaml.d/02-data.yaml: -------------------------------------------------------------------------------- 1 | foo-bar: bar-foo 2 | b-string+: two 3 | c-slice+: 4 | - three 5 | d-slice: 6 | - three 7 | - four 8 | e-slice+: 9 | - one 10 | - two -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/invalid-dropin.yaml.d/01-data.yml: -------------------------------------------------------------------------------- 1 | !invalid 2 | -------------------------------------------------------------------------------- /pkg/configfilearg/testdata/invalid.yaml: -------------------------------------------------------------------------------- 1 | !invalid 2 | -------------------------------------------------------------------------------- /pkg/containerd/builtins_cri.go: -------------------------------------------------------------------------------- 1 | //go:build ctrd 2 | // +build ctrd 3 | 4 | /* 5 | Copyright The containerd Authors. 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | */ 19 | 20 | package containerd 21 | 22 | import ( 23 | _ "github.com/containerd/containerd/v2/plugins/cri" 24 | _ "github.com/containerd/containerd/v2/plugins/cri/images" 25 | _ "github.com/containerd/containerd/v2/plugins/cri/runtime" 26 | ) 27 | -------------------------------------------------------------------------------- /pkg/containerd/builtins_linux.go: -------------------------------------------------------------------------------- 1 | //go:build ctrd 2 | // +build ctrd 3 | 4 | /* 5 | Copyright The containerd Authors. 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | */ 19 | 20 | package containerd 21 | 22 | import ( 23 | _ "github.com/containerd/containerd/api/types/runc/options" 24 | _ "github.com/containerd/containerd/v2/core/metrics/cgroups" 25 | _ "github.com/containerd/containerd/v2/core/metrics/cgroups/v2" 26 | _ "github.com/containerd/containerd/v2/plugins/diff/walking/plugin" 27 | _ "github.com/containerd/containerd/v2/plugins/snapshots/blockfile/plugin" 28 | _ "github.com/containerd/containerd/v2/plugins/snapshots/btrfs/plugin" 29 | _ "github.com/containerd/containerd/v2/plugins/snapshots/devmapper/plugin" 30 | _ "github.com/containerd/containerd/v2/plugins/snapshots/native/plugin" 31 | _ "github.com/containerd/containerd/v2/plugins/snapshots/overlay/plugin" 32 | _ "github.com/containerd/fuse-overlayfs-snapshotter/v2/plugin" 33 | _ "github.com/containerd/stargz-snapshotter/service/plugin" 34 | _ "github.com/containerd/zfs/v2/plugin" 35 | ) 36 | -------------------------------------------------------------------------------- /pkg/containerd/builtins_windows.go: -------------------------------------------------------------------------------- 1 | //go:build ctrd 2 | // +build ctrd 3 | 4 | /* 5 | Copyright The containerd Authors. 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | */ 19 | 20 | package containerd 21 | 22 | import ( 23 | _ "github.com/containerd/containerd/v2/plugins/diff/lcow" 24 | _ "github.com/containerd/containerd/v2/plugins/diff/windows" 25 | _ "github.com/containerd/containerd/v2/plugins/snapshots/lcow" 26 | _ "github.com/containerd/containerd/v2/plugins/snapshots/windows" 27 | ) 28 | -------------------------------------------------------------------------------- /pkg/containerd/main.go: -------------------------------------------------------------------------------- 1 | //go:build ctrd 2 | // +build ctrd 3 | 4 | package containerd 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | 10 | "github.com/containerd/containerd/v2/cmd/containerd/command" 11 | ) 12 | 13 | func Main() { 14 | app := command.App() 15 | if err := app.Run(os.Args); err != nil { 16 | fmt.Fprintf(os.Stderr, "containerd: %s\n", err) 17 | os.Exit(1) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /pkg/containerd/none.go: -------------------------------------------------------------------------------- 1 | //go:build !ctrd 2 | 3 | package containerd 4 | 5 | func Main() { 6 | } 7 | -------------------------------------------------------------------------------- /pkg/containerd/utility_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package containerd 4 | 5 | import ( 6 | "github.com/containerd/containerd/v2/plugins/snapshots/overlay/overlayutils" 7 | fuseoverlayfs "github.com/containerd/fuse-overlayfs-snapshotter/v2" 8 | stargz "github.com/containerd/stargz-snapshotter/service" 9 | ) 10 | 11 | func OverlaySupported(root string) error { 12 | return overlayutils.Supported(root) 13 | } 14 | 15 | func FuseoverlayfsSupported(root string) error { 16 | return fuseoverlayfs.Supported(root) 17 | } 18 | 19 | func StargzSupported(root string) error { 20 | return stargz.Supported(root) 21 | } 22 | -------------------------------------------------------------------------------- /pkg/containerd/utility_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package containerd 5 | 6 | import ( 7 | util2 "github.com/k3s-io/k3s/pkg/util" 8 | pkgerrors "github.com/pkg/errors" 9 | ) 10 | 11 | func OverlaySupported(root string) error { 12 | return pkgerrors.WithMessagef(util2.ErrUnsupportedPlatform, "overlayfs is not supported") 13 | } 14 | 15 | func FuseoverlayfsSupported(root string) error { 16 | return pkgerrors.WithMessagef(util2.ErrUnsupportedPlatform, "fuse-overlayfs is not supported") 17 | } 18 | 19 | func StargzSupported(root string) error { 20 | return pkgerrors.WithMessagef(util2.ErrUnsupportedPlatform, "stargz is not supported") 21 | } 22 | -------------------------------------------------------------------------------- /pkg/crd/crds.go: -------------------------------------------------------------------------------- 1 | package crd 2 | 3 | import ( 4 | v1 "github.com/k3s-io/api/k3s.cattle.io/v1" 5 | "github.com/rancher/wrangler/v3/pkg/crd" 6 | ) 7 | 8 | func List() []crd.CRD { 9 | addon := v1.Addon{} 10 | etcdSnapshotFile := v1.ETCDSnapshotFile{} 11 | return []crd.CRD{ 12 | crd.NamespacedType("Addon.k3s.cattle.io/v1"). 13 | WithSchemaFromStruct(addon). 14 | WithColumn("Source", ".spec.source"). 15 | WithColumn("Checksum", ".spec.checksum"), 16 | crd.NonNamespacedType("ETCDSnapshotFile.k3s.cattle.io/v1"). 17 | WithSchemaFromStruct(etcdSnapshotFile). 18 | WithColumn("SnapshotName", ".spec.snapshotName"). 19 | WithColumn("Node", ".spec.nodeName"). 20 | WithColumn("Location", ".spec.location"). 21 | WithColumn("Size", ".status.size"). 22 | WithColumn("CreationTime", ".status.creationTime"), 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /pkg/ctr/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright The containerd Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ctr 18 | 19 | import ( 20 | "fmt" 21 | "os" 22 | 23 | "github.com/containerd/containerd/v2/cmd/ctr/app" 24 | "github.com/urfave/cli/v2" 25 | ) 26 | 27 | func Main() { 28 | main() 29 | } 30 | 31 | func main() { 32 | app := app.New() 33 | for i, flag := range app.Flags { 34 | if sFlag, ok := flag.(*cli.StringFlag); ok { 35 | if sFlag.Name == "address" { 36 | sFlag.Value = "/run/k3s/containerd/containerd.sock" 37 | app.Flags[i] = sFlag 38 | } else if sFlag.Name == "namespace" { 39 | sFlag.Value = "k8s.io" 40 | app.Flags[i] = sFlag 41 | } 42 | } 43 | } 44 | 45 | if err := app.Run(os.Args); err != nil { 46 | fmt.Fprintf(os.Stderr, "ctr: %s\n", err) 47 | os.Exit(1) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /pkg/daemons/control/deps/deps_test.go: -------------------------------------------------------------------------------- 1 | package deps 2 | 3 | import ( 4 | "net" 5 | "reflect" 6 | "testing" 7 | 8 | certutil "github.com/rancher/dynamiclistener/cert" 9 | ) 10 | 11 | func Test_UnitAddSANs(t *testing.T) { 12 | type args struct { 13 | altNames *certutil.AltNames 14 | sans []string 15 | } 16 | tests := []struct { 17 | name string 18 | args args 19 | want certutil.AltNames 20 | }{ 21 | { 22 | name: "One IP, One DNS", 23 | args: args{ 24 | altNames: &certutil.AltNames{}, 25 | sans: []string{"192.168.205.10", "192.168.205.10.nip.io"}, 26 | }, 27 | want: certutil.AltNames{ 28 | IPs: []net.IP{net.ParseIP("192.168.205.10")}, 29 | DNSNames: []string{"192.168.205.10.nip.io"}, 30 | }, 31 | }, 32 | { 33 | name: "Two IP, No DNS", 34 | args: args{ 35 | altNames: &certutil.AltNames{}, 36 | sans: []string{"192.168.205.10", "10.168.21.15"}, 37 | }, 38 | want: certutil.AltNames{ 39 | IPs: []net.IP{net.ParseIP("192.168.205.10"), net.ParseIP("10.168.21.15")}, 40 | }, 41 | }, 42 | } 43 | for _, tt := range tests { 44 | t.Run(tt.name, func(t *testing.T) { 45 | addSANs(tt.args.altNames, tt.args.sans) 46 | if !reflect.DeepEqual(*tt.args.altNames, tt.want) { 47 | t.Errorf("addSANs() = %v, want %v", *tt.args.altNames, tt.want) 48 | } 49 | }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /pkg/daemons/control/proxy/proxy.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | 7 | pkgerrors "github.com/pkg/errors" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | type proxy struct { 12 | lconn, rconn io.ReadWriteCloser 13 | done bool 14 | errc chan error 15 | } 16 | 17 | func Proxy(lconn, rconn io.ReadWriteCloser) error { 18 | p := &proxy{ 19 | lconn: lconn, 20 | rconn: rconn, 21 | errc: make(chan error), 22 | } 23 | 24 | defer p.rconn.Close() 25 | defer p.lconn.Close() 26 | go p.pipe(p.lconn, p.rconn) 27 | go p.pipe(p.rconn, p.lconn) 28 | return <-p.errc 29 | } 30 | 31 | func (p *proxy) err(err error) { 32 | if p.done { 33 | return 34 | } 35 | if !errors.Is(err, io.EOF) { 36 | logrus.Warnf("Proxy error: %v", err) 37 | } 38 | p.done = true 39 | p.errc <- err 40 | } 41 | 42 | func (p *proxy) pipe(src, dst io.ReadWriter) { 43 | buff := make([]byte, 1<<15) 44 | for { 45 | n, err := src.Read(buff) 46 | if err != nil { 47 | p.err(pkgerrors.WithMessage(err, "read failed")) 48 | return 49 | } 50 | _, err = dst.Write(buff[:n]) 51 | if err != nil { 52 | p.err(pkgerrors.WithMessage(err, "write failed")) 53 | return 54 | } 55 | } 56 | 57 | } 58 | -------------------------------------------------------------------------------- /pkg/daemons/executor/embed_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux && !no_embedded_executor 2 | // +build linux,!no_embedded_executor 3 | 4 | package executor 5 | 6 | import ( 7 | daemonconfig "github.com/k3s-io/k3s/pkg/daemons/config" 8 | 9 | // registering k3s cloud provider 10 | _ "github.com/k3s-io/k3s/pkg/cloudprovider" 11 | ) 12 | 13 | func platformKubeProxyArgs(nodeConfig *daemonconfig.Node) map[string]string { 14 | argsMap := map[string]string{} 15 | return argsMap 16 | } 17 | -------------------------------------------------------------------------------- /pkg/data/data.go: -------------------------------------------------------------------------------- 1 | package data 2 | -------------------------------------------------------------------------------- /pkg/datadir/datadir.go: -------------------------------------------------------------------------------- 1 | package datadir 2 | 3 | import ( 4 | "path/filepath" 5 | 6 | "github.com/k3s-io/k3s/pkg/util/permissions" 7 | "github.com/k3s-io/k3s/pkg/version" 8 | pkgerrors "github.com/pkg/errors" 9 | "github.com/rancher/wrangler/v3/pkg/resolvehome" 10 | ) 11 | 12 | var ( 13 | DefaultDataDir = "/var/lib/rancher/" + version.Program 14 | DefaultHomeDataDir = "${HOME}/.rancher/" + version.Program 15 | HomeConfig = "${HOME}/.kube/" + version.Program + ".yaml" 16 | GlobalConfig = "/etc/rancher/" + version.Program + "/" + version.Program + ".yaml" 17 | ) 18 | 19 | func Resolve(dataDir string) (string, error) { 20 | return LocalHome(dataDir, false) 21 | } 22 | 23 | func LocalHome(dataDir string, forceLocal bool) (string, error) { 24 | if dataDir == "" { 25 | if permissions.IsPrivileged() == nil && !forceLocal { 26 | dataDir = DefaultDataDir 27 | } else { 28 | dataDir = DefaultHomeDataDir 29 | } 30 | } 31 | 32 | dataDir, err := resolvehome.Resolve(dataDir) 33 | if err != nil { 34 | return "", pkgerrors.WithMessagef(err, "resolving %s", dataDir) 35 | } 36 | 37 | return filepath.Abs(dataDir) 38 | } 39 | -------------------------------------------------------------------------------- /pkg/deploy/nostage.go: -------------------------------------------------------------------------------- 1 | //go:build no_stage 2 | // +build no_stage 3 | 4 | package deploy 5 | 6 | func Stage(dataDir string, templateVars map[string]string, skips map[string]bool) error { 7 | return nil 8 | } 9 | -------------------------------------------------------------------------------- /pkg/deploy/stage.go: -------------------------------------------------------------------------------- 1 | //go:build !no_stage 2 | 3 | package deploy 4 | 5 | import ( 6 | "bytes" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | 11 | pkgerrors "github.com/pkg/errors" 12 | "github.com/sirupsen/logrus" 13 | ) 14 | 15 | func Stage(dataDir string, templateVars map[string]string, skips map[string]bool) error { 16 | staging: 17 | for _, name := range AssetNames() { 18 | nameNoExtension := strings.TrimSuffix(name, filepath.Ext(name)) 19 | if skips[name] || skips[nameNoExtension] { 20 | continue staging 21 | } 22 | namePath := strings.Split(name, string(os.PathSeparator)) 23 | for i := 1; i < len(namePath); i++ { 24 | subPath := filepath.Join(namePath[0:i]...) 25 | if skips[subPath] { 26 | continue staging 27 | } 28 | } 29 | 30 | content, err := Asset(name) 31 | if err != nil { 32 | return err 33 | } 34 | for k, v := range templateVars { 35 | content = bytes.Replace(content, []byte(k), []byte(v), -1) 36 | } 37 | p := filepath.Join(dataDir, name) 38 | os.MkdirAll(filepath.Dir(p), 0700) 39 | logrus.Info("Writing manifest: ", p) 40 | if err := os.WriteFile(p, content, 0600); err != nil { 41 | return pkgerrors.WithMessagef(err, "failed to write to %s", name) 42 | } 43 | } 44 | 45 | return nil 46 | } 47 | -------------------------------------------------------------------------------- /pkg/flock/flock_other.go: -------------------------------------------------------------------------------- 1 | //go:build !linux && !darwin && !freebsd && !openbsd && !netbsd && !dragonfly 2 | // +build !linux,!darwin,!freebsd,!openbsd,!netbsd,!dragonfly 3 | 4 | /* 5 | Copyright 2016 The Kubernetes Authors. 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | */ 19 | 20 | package flock 21 | 22 | // Acquire is not implemented on non-unix systems. 23 | func Acquire(path string) (int, error) { 24 | return -1, nil 25 | } 26 | 27 | // AcquireShared creates a shared lock on a file for the duration of the process, or until Release(d). 28 | // This method is reentrant. 29 | func AcquireShared(path string) (int, error) { 30 | return 0, nil 31 | } 32 | 33 | // Release is not implemented on non-unix systems. 34 | func Release(lock int) error { 35 | return nil 36 | } 37 | 38 | // CheckLock checks whether any process is using the lock 39 | func CheckLock(path string) bool { 40 | return false 41 | } 42 | -------------------------------------------------------------------------------- /pkg/flock/flock_unix.go: -------------------------------------------------------------------------------- 1 | //go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly 2 | 3 | /* 4 | Copyright 2016 The Kubernetes Authors. 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | */ 18 | 19 | package flock 20 | 21 | import ( 22 | "golang.org/x/sys/unix" 23 | ) 24 | 25 | // Acquire creates an exclusive lock on a file for the duration of the process, or until Release(d). 26 | // This method is reentrant. 27 | func Acquire(path string) (int, error) { 28 | lock, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0600) 29 | if err != nil { 30 | return -1, err 31 | } 32 | return lock, unix.Flock(lock, unix.LOCK_EX) 33 | } 34 | 35 | // AcquireShared creates a shared lock on a file for the duration of the process, or until Release(d). 36 | // This method is reentrant. 37 | func AcquireShared(path string) (int, error) { 38 | lock, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR, 0600) 39 | if err != nil { 40 | return -1, err 41 | } 42 | return lock, unix.Flock(lock, unix.LOCK_SH) 43 | } 44 | 45 | // Release removes an existing lock held by this process. 46 | func Release(lock int) error { 47 | return unix.Flock(lock, unix.LOCK_UN) 48 | } 49 | -------------------------------------------------------------------------------- /pkg/kubeadm/token.go: -------------------------------------------------------------------------------- 1 | package kubeadm 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/k3s-io/k3s/pkg/cli/cmds" 7 | "github.com/k3s-io/k3s/pkg/version" 8 | "github.com/urfave/cli/v2" 9 | bootstrapapi "k8s.io/cluster-bootstrap/token/api" 10 | bootstraputil "k8s.io/cluster-bootstrap/token/util" 11 | ) 12 | 13 | var ( 14 | NodeBootstrapTokenAuthGroup = "system:bootstrappers:" + version.Program + ":default-node-token" 15 | ) 16 | 17 | // SetDefaults ensures that the default values are set on the token configuration. 18 | // These are set here, rather than in the default Token struct, to avoid 19 | // importing the cluster-bootstrap packages into the CLI. 20 | func SetDefaults(clx *cli.Context, cfg *cmds.Token) error { 21 | if !clx.IsSet("groups") { 22 | cfg.Groups = *cli.NewStringSlice(NodeBootstrapTokenAuthGroup) 23 | } 24 | 25 | if !clx.IsSet("usages") { 26 | cfg.Usages = *cli.NewStringSlice(bootstrapapi.KnownTokenUsages...) 27 | } 28 | 29 | if cfg.Output == "" { 30 | cfg.Output = "text" 31 | } else { 32 | switch cfg.Output { 33 | case "text", "json", "yaml": 34 | default: 35 | return errors.New("invalid output format: " + cfg.Output) 36 | } 37 | } 38 | 39 | if clx.Args().Len() > 0 { 40 | cfg.Token = clx.Args().Get(0) 41 | } 42 | 43 | if cfg.Token == "" { 44 | var err error 45 | cfg.Token, err = bootstraputil.GenerateBootstrapToken() 46 | if err != nil { 47 | return err 48 | } 49 | } 50 | 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /pkg/kubectl/main.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "os" 7 | "runtime" 8 | "strings" 9 | "time" 10 | 11 | "github.com/k3s-io/k3s/pkg/server" 12 | "github.com/sirupsen/logrus" 13 | "k8s.io/component-base/cli" 14 | "k8s.io/kubectl/pkg/cmd" 15 | "k8s.io/kubectl/pkg/cmd/util" 16 | 17 | // Import to initialize client auth plugins. 18 | _ "k8s.io/client-go/plugin/pkg/client/auth" 19 | ) 20 | 21 | func Main() { 22 | if runtime.GOOS == "windows" { 23 | os.Args = os.Args[1:] 24 | } 25 | kubenv := os.Getenv("KUBECONFIG") 26 | for i, arg := range os.Args { 27 | if strings.HasPrefix(arg, "--kubeconfig=") { 28 | kubenv = strings.Split(arg, "=")[1] 29 | } else if strings.HasPrefix(arg, "--kubeconfig") && i+1 < len(os.Args) { 30 | kubenv = os.Args[i+1] 31 | } 32 | } 33 | if kubenv == "" { 34 | config, err := server.HomeKubeConfig(false, false) 35 | if _, serr := os.Stat(config); err == nil && serr == nil { 36 | os.Setenv("KUBECONFIG", config) 37 | } 38 | if err := checkReadConfigPermissions(config); err != nil { 39 | logrus.Warn(err) 40 | } 41 | } 42 | 43 | main() 44 | } 45 | 46 | func main() { 47 | rand.Seed(time.Now().UnixNano()) 48 | 49 | command := cmd.NewDefaultKubectlCommand() 50 | if err := cli.RunNoErrOutput(command); err != nil { 51 | util.CheckErr(err) 52 | } 53 | } 54 | 55 | func checkReadConfigPermissions(configFile string) error { 56 | file, err := os.OpenFile(configFile, os.O_RDONLY, 0600) 57 | if err != nil { 58 | if os.IsPermission(err) { 59 | return fmt.Errorf("Unable to read %s, please start server "+ 60 | "with --write-kubeconfig-mode or --write-kubeconfig-group "+ 61 | "to modify kube config permissions", configFile) 62 | } 63 | } 64 | file.Close() 65 | return nil 66 | } 67 | -------------------------------------------------------------------------------- /pkg/proctitle/proctile.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package proctitle 5 | 6 | import ( 7 | "github.com/erikdubbelboer/gspt" 8 | ) 9 | 10 | func SetProcTitle(cmd string) { 11 | gspt.SetProcTitle(cmd) 12 | } 13 | -------------------------------------------------------------------------------- /pkg/proctitle/proctile_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package proctitle 5 | 6 | func SetProcTitle(cmd string) {} 7 | -------------------------------------------------------------------------------- /pkg/profile/profile.go: -------------------------------------------------------------------------------- 1 | package profile 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "net/http/pprof" 7 | 8 | "github.com/gorilla/mux" 9 | "github.com/k3s-io/k3s/pkg/agent/https" 10 | "github.com/k3s-io/k3s/pkg/daemons/config" 11 | ) 12 | 13 | // DefaultProfiler the default instance of a performance profiling server 14 | var DefaultProfiler = &Config{ 15 | Router: func(context.Context, *config.Node) (*mux.Router, error) { 16 | return nil, errors.New("not implemented") 17 | }, 18 | } 19 | 20 | // Config holds fields for the pprof listener 21 | type Config struct { 22 | // Router will be called to add the pprof API handler to an existing router. 23 | Router https.RouterFunc 24 | } 25 | 26 | // Start starts binds the pprof API to an existing HTTP router. 27 | func (c *Config) Start(ctx context.Context, nodeConfig *config.Node) error { 28 | mRouter, err := c.Router(ctx, nodeConfig) 29 | if err != nil { 30 | return err 31 | } 32 | mRouter.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) 33 | mRouter.HandleFunc("/debug/pprof/profile", pprof.Profile) 34 | mRouter.HandleFunc("/debug/pprof/symbol", pprof.Symbol) 35 | mRouter.HandleFunc("/debug/pprof/trace", pprof.Trace) 36 | mRouter.PathPrefix("/debug/pprof/").HandlerFunc(pprof.Index) 37 | return nil 38 | } 39 | -------------------------------------------------------------------------------- /pkg/rootless/rootless_windows.go: -------------------------------------------------------------------------------- 1 | package rootless 2 | 3 | func Rootless(stateDir string, enableIPv6 bool) error { 4 | panic("Rootless is not supported on windows") 5 | } 6 | -------------------------------------------------------------------------------- /pkg/rootlessports/controller_windows.go: -------------------------------------------------------------------------------- 1 | package rootlessports 2 | 3 | import ( 4 | "context" 5 | 6 | coreClients "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" 7 | ) 8 | 9 | func Register(ctx context.Context, serviceController coreClients.ServiceController, enabled bool, httpsPort int) error { 10 | panic("Rootless is not supported on windows") 11 | } 12 | -------------------------------------------------------------------------------- /pkg/server/types.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/k3s-io/k3s/pkg/cli/cmds" 7 | "github.com/k3s-io/k3s/pkg/daemons/config" 8 | ) 9 | 10 | type Config struct { 11 | DisableAgent bool 12 | ControlConfig config.Control 13 | SupervisorPort int 14 | StartupHooks []cmds.StartupHook 15 | LeaderControllers CustomControllers 16 | Controllers CustomControllers 17 | } 18 | 19 | type CustomControllers []func(ctx context.Context, sc *Context) error 20 | -------------------------------------------------------------------------------- /pkg/spegel/registry.go: -------------------------------------------------------------------------------- 1 | package spegel 2 | 3 | import ( 4 | "net" 5 | 6 | "github.com/containerd/containerd/v2/core/remotes/docker" 7 | "github.com/k3s-io/k3s/pkg/daemons/config" 8 | "github.com/rancher/wharfie/pkg/registries" 9 | ) 10 | 11 | // InjectMirror configures TLS for the registry mirror client, and adds the mirror address as an endpoint 12 | // to all configured registries. 13 | func (c *Config) InjectMirror(nodeConfig *config.Node) error { 14 | mirrorAddr := net.JoinHostPort(c.InternalAddress, c.RegistryPort) 15 | mirrorURL := "https://" + mirrorAddr + "/v2" 16 | registry := nodeConfig.AgentConfig.Registry 17 | 18 | if registry.Configs == nil { 19 | registry.Configs = map[string]registries.RegistryConfig{} 20 | } 21 | registry.Configs[mirrorAddr] = registries.RegistryConfig{ 22 | TLS: ®istries.TLSConfig{ 23 | CAFile: c.ServerCAFile, 24 | CertFile: c.ClientCertFile, 25 | KeyFile: c.ClientKeyFile, 26 | }, 27 | } 28 | 29 | if registry.Mirrors == nil { 30 | registry.Mirrors = map[string]registries.Mirror{} 31 | } 32 | for host, mirror := range registry.Mirrors { 33 | // Don't handle local registry entries 34 | if !docker.IsLocalhost(host) { 35 | mirror.Endpoints = append([]string{mirrorURL}, mirror.Endpoints...) 36 | registry.Mirrors[host] = mirror 37 | } 38 | } 39 | registry.Mirrors[mirrorAddr] = registries.Mirror{ 40 | Endpoints: []string{mirrorURL}, 41 | } 42 | 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /pkg/static/nostage.go: -------------------------------------------------------------------------------- 1 | //go:build no_stage 2 | // +build no_stage 3 | 4 | package static 5 | 6 | func Stage(dataDir string) error { 7 | return nil 8 | } 9 | -------------------------------------------------------------------------------- /pkg/static/stage.go: -------------------------------------------------------------------------------- 1 | //go:build !no_stage 2 | 3 | package static 4 | 5 | import ( 6 | "os" 7 | "path/filepath" 8 | 9 | pkgerrors "github.com/pkg/errors" 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | func Stage(dataDir string) error { 14 | for _, name := range AssetNames() { 15 | content, err := Asset(name) 16 | if err != nil { 17 | return err 18 | } 19 | p := filepath.Join(dataDir, name) 20 | logrus.Info("Writing static file: ", p) 21 | os.MkdirAll(filepath.Dir(p), 0700) 22 | if err := os.WriteFile(p, content, 0600); err != nil { 23 | return pkgerrors.WithMessagef(err, "failed to write to %s", name) 24 | } 25 | } 26 | 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /pkg/util/client_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/urfave/cli/v2" 8 | ) 9 | 10 | func Test_UnitSplitSliceString(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | arg *cli.StringSlice 14 | want []string 15 | }{ 16 | { 17 | name: "Single Argument", 18 | arg: cli.NewStringSlice("foo"), 19 | want: []string{"foo"}, 20 | }, 21 | { 22 | name: "Repeated Arguments", 23 | arg: cli.NewStringSlice("foo", "bar", "baz"), 24 | want: []string{"foo", "bar", "baz"}, 25 | }, 26 | { 27 | name: "Multiple Arguments and Repeated Arguments", 28 | arg: cli.NewStringSlice("foo,bar", "zoo,clar", "baz"), 29 | want: []string{"foo", "bar", "zoo", "clar", "baz"}, 30 | }, 31 | } 32 | for _, tt := range tests { 33 | t.Run(tt.name, func(t *testing.T) { 34 | if got := SplitStringSlice(tt.arg.Value()); !reflect.DeepEqual(got, tt.want) { 35 | t.Errorf("SplitSliceString() = %+v\nWant = %+v", got, tt.want) 36 | } 37 | }) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /pkg/util/command.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "bytes" 5 | "os/exec" 6 | ) 7 | 8 | // ExecCommand executes a command using the VPN binary 9 | // In case of error != nil, the string returned var will have more information 10 | func ExecCommand(command string, args []string) (string, error) { 11 | var out, errOut bytes.Buffer 12 | 13 | cmd := exec.Command(command, args...) 14 | cmd.Stdout = &out 15 | cmd.Stderr = &errOut 16 | err := cmd.Run() 17 | if err != nil { 18 | return errOut.String(), err 19 | } 20 | return out.String(), nil 21 | } 22 | -------------------------------------------------------------------------------- /pkg/util/errors.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "errors" 4 | 5 | var ErrCommandNoArgs = errors.New("this command does not take any arguments") 6 | var ErrUnsupportedPlatform = errors.New("unsupported platform") 7 | -------------------------------------------------------------------------------- /pkg/util/labels.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | const ( 4 | ControlPlaneRoleLabelKey = "node-role.kubernetes.io/control-plane" 5 | ETCDRoleLabelKey = "node-role.kubernetes.io/etcd" 6 | ) 7 | -------------------------------------------------------------------------------- /pkg/util/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | ) 8 | 9 | func ObserveWithStatus(vec *prometheus.HistogramVec, start time.Time, err error, labels ...string) { 10 | status := "success" 11 | if err != nil { 12 | status = "error" 13 | } 14 | labels = append(labels, status) 15 | vec.WithLabelValues(labels...).Observe(time.Since(start).Seconds()) 16 | } 17 | -------------------------------------------------------------------------------- /pkg/util/net_unix.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package util 5 | 6 | import ( 7 | "syscall" 8 | 9 | "golang.org/x/sys/unix" 10 | ) 11 | 12 | // permitReuse enables port and address sharing on the socket 13 | func permitReuse(network, addr string, conn syscall.RawConn) error { 14 | return conn.Control(func(fd uintptr) { 15 | syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1) 16 | syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1) 17 | }) 18 | } 19 | -------------------------------------------------------------------------------- /pkg/util/net_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package util 5 | 6 | import "syscall" 7 | 8 | // permitReuse is a no-op; port and address reuse is not supported on Windows 9 | func permitReuse(network, addr string, conn syscall.RawConn) error { 10 | return nil 11 | } 12 | -------------------------------------------------------------------------------- /pkg/util/permissions/permissions_others.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package permissions 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | ) 10 | 11 | // IsPrivileged returns an error if the process is not running as root. 12 | // Ref: https://github.com/kubernetes/kubernetes/pull/96616 13 | func IsPrivileged() error { 14 | if os.Getuid() != 0 { 15 | return fmt.Errorf("not running as root") 16 | } 17 | return nil 18 | } 19 | -------------------------------------------------------------------------------- /pkg/util/permissions/permissions_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package permissions 5 | 6 | import ( 7 | "fmt" 8 | 9 | pkgerrors "github.com/pkg/errors" 10 | "golang.org/x/sys/windows" 11 | ) 12 | 13 | // IsPrivileged returns an error if the the process is not running as a member of the BUILTIN\Administrators group. 14 | // Ref: https://github.com/kubernetes/kubernetes/pull/96616 15 | func IsPrivileged() error { 16 | var sid *windows.SID 17 | 18 | // Although this looks scary, it is directly copied from the 19 | // official windows documentation. The Go API for this is a 20 | // direct wrap around the official C++ API. 21 | // Ref: https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-checktokenmembership 22 | err := windows.AllocateAndInitializeSid( 23 | &windows.SECURITY_NT_AUTHORITY, 24 | 2, 25 | windows.SECURITY_BUILTIN_DOMAIN_RID, 26 | windows.DOMAIN_ALIAS_RID_ADMINS, 27 | 0, 0, 0, 0, 0, 0, 28 | &sid) 29 | if err != nil { 30 | return pkgerrors.WithMessage(err, "failed to create Windows SID") 31 | } 32 | defer windows.FreeSid(sid) 33 | 34 | // Ref: https://github.com/golang/go/issues/28804#issuecomment-438838144 35 | token := windows.Token(0) 36 | 37 | member, err := token.IsMember(sid) 38 | if err != nil { 39 | return pkgerrors.WithMessage(err, "failed to check group membership") 40 | } 41 | 42 | if !member { 43 | return fmt.Errorf("not running as member of BUILTIN\\Administrators group") 44 | } 45 | 46 | return nil 47 | } 48 | -------------------------------------------------------------------------------- /pkg/util/reflect.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "reflect" 5 | "runtime" 6 | ) 7 | 8 | func GetFunctionName(i interface{}) string { 9 | return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() 10 | } 11 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import "strings" 4 | 5 | var ( 6 | Program = "k3s" 7 | ProgramUpper = strings.ToUpper(Program) 8 | Version = "dev" 9 | GitCommit = "HEAD" 10 | 11 | UpstreamGolang = "" 12 | ) 13 | -------------------------------------------------------------------------------- /scripts/airgap/generate-list.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | 4 | cd $(dirname $0) 5 | 6 | k3s crictl images -o json \ 7 | | jq -r '.images[].repoTags[0] | select(. != null)' \ 8 | | tee image-list.txt 9 | -------------------------------------------------------------------------------- /scripts/airgap/image-list.txt: -------------------------------------------------------------------------------- 1 | docker.io/rancher/klipper-helm:v0.9.5-build20250306 2 | docker.io/rancher/klipper-lb:v0.4.13 3 | docker.io/rancher/local-path-provisioner:v0.0.31 4 | docker.io/rancher/mirrored-coredns-coredns:1.12.1 5 | docker.io/rancher/mirrored-library-busybox:1.36.1 6 | docker.io/rancher/mirrored-library-traefik:3.3.6 7 | docker.io/rancher/mirrored-metrics-server:v0.7.2 8 | docker.io/rancher/mirrored-pause:3.6 9 | -------------------------------------------------------------------------------- /scripts/airgap/volume-test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: local-path-pvc 5 | namespace: kube-system 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: local-path 10 | resources: 11 | requests: 12 | storage: 2Gi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: volume-test 18 | namespace: kube-system 19 | spec: 20 | containers: 21 | - name: volume-test 22 | image: rancher/mirrored-pause:3.6 23 | imagePullPolicy: IfNotPresent 24 | volumeMounts: 25 | - name: volv 26 | mountPath: /data 27 | volumes: 28 | - name: volv 29 | persistentVolumeClaim: 30 | claimName: local-path-pvc 31 | -------------------------------------------------------------------------------- /scripts/binary_size_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | . ./scripts/version.sh 6 | 7 | GO=${GO-go} 8 | ARCH=${ARCH:-$("${GO}" env GOARCH)} 9 | 10 | if [ "${DEBUG}" = 1 ]; then 11 | set -x 12 | fi 13 | 14 | # Try to keep the K3s binary under 75 megabytes. 15 | # "64M ought to be enough for anybody" 16 | MAX_BINARY_MB=75 17 | MAX_BINARY_SIZE=$((MAX_BINARY_MB * 1024 * 1024)) 18 | BIN_SUFFIX="-${ARCH}" 19 | if [ ${ARCH} = amd64 ]; then 20 | BIN_SUFFIX="" 21 | elif [ ${ARCH} = arm ]; then 22 | BIN_SUFFIX="-armhf" 23 | elif [ ${ARCH} = s390x ]; then 24 | BIN_SUFFIX="-s390x" 25 | fi 26 | 27 | CMD_NAME="dist/artifacts/k3s${BIN_SUFFIX}${BINARY_POSTFIX}" 28 | SIZE=$(stat -c '%s' ${CMD_NAME}) 29 | 30 | if [ -n "${DEBUG}" ]; then 31 | echo "DEBUG is set, ignoring binary size" 32 | exit 0 33 | fi 34 | 35 | if [ ${SIZE} -gt ${MAX_BINARY_SIZE} ]; then 36 | echo "k3s binary ${CMD_NAME} size ${SIZE} exceeds max acceptable size of ${MAX_BINARY_SIZE} bytes (${MAX_BINARY_MB} MiB)" 37 | exit 1 38 | fi 39 | 40 | echo "k3s binary ${CMD_NAME} size ${SIZE} is less than max acceptable size of ${MAX_BINARY_SIZE} bytes (${MAX_BINARY_MB} MiB)" 41 | exit 0 42 | -------------------------------------------------------------------------------- /scripts/build-upload: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for AWS Credentials 4 | [ -n "$AWS_SECRET_ACCESS_KEY" ] || { 5 | echo "AWS_SECRET_ACCESS_KEY is not set" 6 | exit 0 7 | } 8 | [ -n "$AWS_ACCESS_KEY_ID" ] || { 9 | echo "AWS_ACCESS_KEY_ID is not set" 10 | exit 0 11 | } 12 | 13 | [ -x "$1" ] || { 14 | echo "First argument should be an executable" >&2 15 | exit 1 16 | } 17 | [ -n "$2" ] || { 18 | echo "Second argument should be a commit hash" >&2 19 | exit 1 20 | } 21 | 22 | umask 077 23 | 24 | TMPDIR=$(mktemp -d) 25 | cleanup() { 26 | exit_code=$? 27 | trap - EXIT INT 28 | rm -rf ${TMPDIR} 29 | exit ${exit_code} 30 | } 31 | trap cleanup EXIT INT 32 | 33 | BUILD_NAME=$(basename $1)-$2 34 | (cd $(dirname $1) && sha256sum $(basename $1)) >${TMPDIR}/${BUILD_NAME}.sha256sum 35 | cp $1 ${TMPDIR}/${BUILD_NAME} 36 | 37 | for FILE in ${TMPDIR}/${BUILD_NAME}*; do 38 | aws s3 cp ${FILE} s3://k3s-ci-builds || exit 1 39 | done 40 | 41 | echo "Build uploaded" >&2 42 | echo "https://k3s-ci-builds.s3.amazonaws.com/${BUILD_NAME}" 43 | -------------------------------------------------------------------------------- /scripts/ci: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | SCRIPT_DIR=$(dirname $0) 5 | pushd $SCRIPT_DIR 6 | 7 | ./download 8 | ./validate 9 | ./build 10 | ./package 11 | 12 | popd 13 | 14 | $SCRIPT_DIR/binary_size_check.sh 15 | -------------------------------------------------------------------------------- /scripts/clean: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd $(dirname $0)/.. 4 | 5 | rm -rf dist bin build k3s hyperkube kubectl 6 | -------------------------------------------------------------------------------- /scripts/dispatch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -x 4 | 5 | REPO="https://api.github.com/repos/k3s-io/k3s-upgrade/dispatches" 6 | 7 | # send dispatch event to REPO 8 | curl -XPOST -u "${PAT_USERNAME}:${PAT_TOKEN}" \ 9 | -H "Accept: application/vnd.github.everest-preview+json" \ 10 | -H "Content-Type: application/json" $REPO \ 11 | --data '{"event_type": "create_tag", "client_payload": {"tag":"'"$DRONE_TAG"'"}}' 12 | 13 | SYSTEM_AGENT_INSTALLER_K3S_REPO="https://api.github.com/repos/rancher/system-agent-installer-k3s/dispatches" 14 | 15 | # send dispatch event to SYSTEM_AGENT_INSTALLER_K3S_REPO 16 | curl -XPOST -H "Authorization: Bearer ${K3S_RELEASE_TOKEN}" \ 17 | -H "Accept: application/vnd.github.everest-preview+json" \ 18 | -H "Content-Type: application/vnd.github+json" $SYSTEM_AGENT_INSTALLER_K3S_REPO \ 19 | --data '{"event_type": "create_tag", "client_payload": {"tag":"'"$DRONE_TAG"'"}}' 20 | -------------------------------------------------------------------------------- /scripts/download: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | cd $(dirname $0)/.. 6 | 7 | . ./scripts/version.sh 8 | 9 | CHARTS_URL=https://k3s.io/k3s-charts/assets 10 | CHARTS_DIR=build/static/charts 11 | RUNC_DIR=build/src/github.com/opencontainers/runc 12 | CONTAINERD_DIR=build/src/github.com/containerd/containerd 13 | HCSSHIM_DIR=build/src/github.com/microsoft/hcsshim 14 | DATA_DIR=build/data 15 | export TZ=UTC 16 | 17 | umask 022 18 | rm -rf ${CHARTS_DIR} 19 | rm -rf ${RUNC_DIR} 20 | rm -rf ${CONTAINERD_DIR} 21 | rm -rf ${HCSSHIM_DIR} 22 | mkdir -p ${CHARTS_DIR} 23 | mkdir -p ${DATA_DIR} 24 | 25 | case ${OS} in 26 | linux) 27 | git clone --single-branch --branch=${VERSION_RUNC} --depth=1 https://github.com/opencontainers/runc ${RUNC_DIR} 28 | curl --compressed -sfL https://github.com/k3s-io/k3s-root/releases/download/${VERSION_ROOT}/k3s-root-${ARCH}.tar | tar xf - 29 | ;; 30 | windows) 31 | git clone --single-branch --branch=${VERSION_HCSSHIM} --depth=1 https://github.com/microsoft/hcsshim ${HCSSHIM_DIR} 32 | ;; 33 | *) 34 | echo "[ERROR] unrecognized operating system: ${OS}" 35 | exit 1 36 | ;; 37 | esac 38 | 39 | git clone --single-branch --branch=${VERSION_CONTAINERD} --depth=1 https://${PKG_CONTAINERD_K3S/\/v*/} ${CONTAINERD_DIR} 40 | 41 | for CHART_FILE in $(grep -rlF HelmChart manifests/ | xargs yq eval --no-doc .spec.chart | xargs -n1 basename); do 42 | CHART_NAME=$(echo $CHART_FILE | grep -oE '^(-*[a-z])+') 43 | curl -sfL ${CHARTS_URL}/${CHART_NAME}/${CHART_FILE} -o ${CHARTS_DIR}/${CHART_FILE} 44 | done 45 | -------------------------------------------------------------------------------- /scripts/entry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | mkdir -p bin dist 5 | if [ -e ./scripts/$1 ]; then 6 | ./scripts/"$@" 7 | else 8 | exec "$@" 9 | fi 10 | 11 | chown -R $DAPPER_UID:$DAPPER_GID . 12 | -------------------------------------------------------------------------------- /scripts/generate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd $(dirname $0)/.. 4 | 5 | rm -rf build/data 6 | mkdir -p build/data 7 | 8 | GO=${GO-go} 9 | 10 | echo Running: "${GO}" generate 11 | "${GO}" generate 12 | -------------------------------------------------------------------------------- /scripts/git_version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | GIT_TAG=$DRONE_TAG 4 | TREE_STATE=clean 5 | COMMIT=$DRONE_COMMIT 6 | 7 | if [ -d .git ]; then 8 | if [ -z "$GIT_TAG" ]; then 9 | GIT_TAG=$(git tag -l --contains HEAD | head -n 1) 10 | fi 11 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 12 | DIRTY="-dirty" 13 | TREE_STATE=dirty 14 | fi 15 | 16 | COMMIT=$(git log -n3 --pretty=format:"%H %ae" | grep -v ' drone@localhost$' | cut -f1 -d\ | head -1) 17 | if [ -z "${COMMIT}" ]; then 18 | COMMIT=$(git rev-parse HEAD || true) 19 | fi 20 | fi 21 | 22 | export GIT_TAG 23 | export TREE_STATE 24 | export COMMIT 25 | export DIRTY -------------------------------------------------------------------------------- /scripts/image_scan.sh: -------------------------------------------------------------------------------- 1 | #/bin/sh 2 | 3 | set -e 4 | 5 | if [ -z $1 ] && [ -z $2 ]; then 6 | echo "error: image name and arch name are required as arguments. exiting..." 7 | exit 1 8 | fi 9 | 10 | ARCH=$2 11 | 12 | # skipping image scan for 32 bits image since trivy dropped support for those https://github.com/aquasecurity/trivy/discussions/4789 13 | if [[ "${ARCH}" = "arm" ]] || [ "${ARCH}" != "386" ]; then 14 | exit 0 15 | fi 16 | 17 | if [ -n ${DEBUG} ]; then 18 | set -x 19 | fi 20 | 21 | IMAGE=$1 22 | SEVERITIES="HIGH,CRITICAL" 23 | TRIVY_TEMPLATE='{{- $critical := 0 }}{{- $high := 0 }} 24 | {{- println "Target - Severity - ID - Package - Vulnerable Version - Fixed Version" -}}{{ print }} 25 | {{ range . }} 26 | {{- $target := .Target -}} 27 | {{ range .Vulnerabilities }} 28 | {{- if eq .Severity "CRITICAL" }}{{- $critical = add $critical 1 }}{{- end }} 29 | {{- if eq .Severity "HIGH" }}{{- $high = add $high 1 }}{{- end }} 30 | {{- list $target .Severity .VulnerabilityID .PkgName .InstalledVersion .FixedVersion | join " - " | println -}} 31 | {{- end -}} 32 | {{ end }} 33 | Vulnerabilities - Critical: {{ $critical }}, High: {{ $high }}{{ println }}' 34 | VEX_REPORT="rancher.openvex.json" 35 | 36 | # Download Rancher's VEX Hub standalone report 37 | curl -fsS -o ${VEX_REPORT} https://raw.githubusercontent.com/rancher/vexhub/refs/heads/main/reports/rancher.openvex.json 38 | 39 | trivy --quiet image --severity ${SEVERITIES} --vex ${VEX_REPORT} --no-progress --ignore-unfixed --format template --template "${TRIVY_TEMPLATE}" ${IMAGE} 40 | 41 | exit 0 42 | -------------------------------------------------------------------------------- /scripts/manifest: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "${DRONE_TAG}" ]; then 4 | echo "DRONE_TAG not defined" >&2 5 | exit 1 6 | fi 7 | 8 | set -e -x 9 | 10 | REPO="rancher/k3s" 11 | 12 | # docker can not contain '+' in the tag, so transform '+' to '-' 13 | DOCKER_TAG=$(echo "${DRONE_TAG}" | sed -e 's/+/-/g') 14 | 15 | # export variables for drone-manifest 16 | export PLUGIN_TEMPLATE="${REPO}:${DOCKER_TAG}-ARCH" 17 | export PLUGIN_PLATFORMS="linux/amd64,linux/arm64,linux/arm" 18 | 19 | # push current version manifest tag to docker hub 20 | PLUGIN_TARGET="${REPO}:${DOCKER_TAG}" drone-manifest 21 | 22 | # do not tag in docker as latest if the github tag contains a '-' 23 | if echo "${DRONE_TAG}" | grep -q '-'; then 24 | exit 0 25 | fi 26 | 27 | # get latest released version from github 28 | GITHUB_URL=https://github.com/k3s-io/k3s/releases 29 | VERSION_K3S=$(curl -w '%{url_effective}' -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||') 30 | 31 | # function for comparing versions 32 | version_ge() { 33 | [ "$1" = "$2" ] || [ "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1" ] 34 | } 35 | 36 | # do not tag in docker as latest if we are not greater than or equal to the latest github tag 37 | if ! version_ge "${DRONE_TAG}" "${VERSION_K3S}"; then 38 | exit 0 39 | fi 40 | 41 | # push latest manifest tag to docker hub 42 | PLUGIN_TARGET="${REPO}:latest" drone-manifest 43 | -------------------------------------------------------------------------------- /scripts/package: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cd $(dirname $0) 5 | 6 | if [ ! -e ../bin/containerd ]; then 7 | ./build 8 | fi 9 | 10 | ./package-cli 11 | 12 | if [ -z "$SKIP_IMAGE" ]; then 13 | ./package-image 14 | fi 15 | if [ -z "$SKIP_AIRGAP" ]; then 16 | ./package-airgap 17 | fi 18 | -------------------------------------------------------------------------------- /scripts/package-airgap: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | 4 | cd $(dirname $0)/.. 5 | 6 | . ./scripts/version.sh 7 | 8 | if [ "${OS}" != "linux" ]; then 9 | exit 10 | fi 11 | 12 | # Used by GHA to inject --platform=linux/(NON_x86_ARCH) on amd64 runners 13 | OPT_ARCH=${1:-""} 14 | if [ -n "${OPT_ARCH}" ]; then 15 | ARCH=${OPT_ARCH} 16 | if [ "${ARCH}" = "arm" ]; then 17 | OPT_PLATFORM="--platform=linux/arm/v7" 18 | else 19 | OPT_PLATFORM="--platform=linux/${ARCH}" 20 | fi 21 | fi 22 | 23 | airgap_image_file='scripts/airgap/image-list.txt' 24 | images=$(cat "${airgap_image_file}") 25 | xargs -n1 docker pull ${OPT_PLATFORM} <<< "${images}" 26 | docker save ${images} -o dist/artifacts/k3s-airgap-images-${ARCH}.tar 27 | zstd --no-progress -T0 -16 -f --long=25 dist/artifacts/k3s-airgap-images-${ARCH}.tar -o dist/artifacts/k3s-airgap-images-${ARCH}.tar.zst 28 | pigz -v -c dist/artifacts/k3s-airgap-images-${ARCH}.tar > dist/artifacts/k3s-airgap-images-${ARCH}.tar.gz 29 | if [ ${ARCH} = amd64 ]; then 30 | cp "${airgap_image_file}" dist/artifacts/k3s-images.txt 31 | fi 32 | -------------------------------------------------------------------------------- /scripts/package-image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cd $(dirname $0)/.. 5 | 6 | . ./scripts/version.sh 7 | 8 | if [ "${OS}" != "linux" ]; then 9 | exit 10 | fi 11 | 12 | TAG=${TAG:-${VERSION_TAG}${SUFFIX}} 13 | REPO=${REPO:-rancher} 14 | IMAGE_NAME=${IMAGE_NAME:-k3s} 15 | 16 | IMAGE=${REPO}/${IMAGE_NAME}:${TAG} 17 | docker build --build-arg DRONE_TAG=${VERSION_TAG} -t ${IMAGE} -f package/Dockerfile . 18 | ./scripts/image_scan.sh ${IMAGE} ${ARCH} 19 | echo Built ${IMAGE} 20 | -------------------------------------------------------------------------------- /scripts/sonobuoy-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "Plugins": [ 3 | { 4 | "name": "e2e" 5 | } 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /scripts/tag-image-latest: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cd $(dirname $0)/.. 5 | 6 | . ./scripts/version.sh 7 | 8 | TAG=${TAG:-${VERSION_TAG}${SUFFIX}} 9 | REPO=${REPO:-rancher} 10 | IMAGE_NAME=${IMAGE_NAME:-k3s} 11 | 12 | IMAGE=${REPO}/${IMAGE_NAME}:${TAG} 13 | LATEST=${REPO}/${IMAGE_NAME}:latest 14 | docker image tag ${IMAGE} ${LATEST} 15 | echo Tagged ${IMAGE} as ${LATEST} 16 | -------------------------------------------------------------------------------- /scripts/test-mods: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -x 3 | 4 | res=$(go mod edit --json | jq -r '.Replace[] | select(.Old.Path | contains("k8s.io/")) | .New.Path' | grep -vE '^(k8s.io/|github.com/k3s-io/)' | wc -l) 5 | if [ $res -gt 0 ];then 6 | echo "Incorrect kubernetes replacement fork in go.mod" 7 | exit 1 8 | else 9 | exit 0 10 | fi 11 | -------------------------------------------------------------------------------- /scripts/validate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -n "$SKIP_VALIDATE" ]; then 5 | echo Skipping validation 6 | exit 7 | fi 8 | 9 | cd $(dirname $0)/.. 10 | 11 | echo Running: install script signature checks 12 | sha256sum -c install.sh.sha256sum 13 | 14 | echo Running: go mod tidy 15 | go mod tidy 16 | 17 | echo Running: go generate 18 | GOOS=linux CC=gcc CXX=g++ go generate 19 | 20 | echo Running validation 21 | 22 | . ./scripts/version.sh 23 | 24 | if [ -n "$DIRTY" ]; then 25 | echo Source dir is dirty 26 | git status --porcelain --untracked-files=no 27 | git diff 28 | exit 1 29 | fi 30 | 31 | echo Running: go version 32 | if ! go version | grep -s "go version ${VERSION_GOLANG} "; then 33 | echo "Unexpected $(go version) - Kubernetes ${VERSION_K8S} should be built with go version ${VERSION_GOLANG}" 34 | exit 1 35 | fi 36 | 37 | echo Running: go mod verify 38 | go mod verify 39 | 40 | if [ ! -e build/data ];then 41 | mkdir -p build/data 42 | fi 43 | 44 | if ! command -v golangci-lint; then 45 | echo Skipping validation: no golangci-lint available 46 | exit 47 | fi 48 | 49 | #echo Running: golangci-lint 50 | ## https://github.com/golangci/golangci-lint/issues/2788 51 | #CGO_ENABLED=0 golangci-lint run -v 52 | -------------------------------------------------------------------------------- /tests/docker/flaky-tests: -------------------------------------------------------------------------------- 1 | [Fail] [sig-node] Pods [It] should run through the lifecycle of Pods and PodStatus [Conformance] 2 | [Fail] [sig-node] KubeletManagedEtcHosts [It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] 3 | [Fail] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] [It] should be able to deny pod and configmap creation [Conformance] 4 | [Fail] [sig-apps] Deployment [It] should run the lifecycle of a Deployment [Conformance] 5 | [Fail] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] [It] listing validating webhooks should work [Conformance] -------------------------------------------------------------------------------- /tests/docker/hardened/cluster-level-pss.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiserver.config.k8s.io/v1 2 | kind: AdmissionConfiguration 3 | plugins: 4 | - name: PodSecurity 5 | configuration: 6 | apiVersion: pod-security.admission.config.k8s.io/v1beta1 7 | kind: PodSecurityConfiguration 8 | defaults: 9 | enforce: "privileged" 10 | enforce-version: "latest" 11 | audit: "baseline" 12 | audit-version: "latest" 13 | warn: "baseline" 14 | warn-version: "latest" 15 | exemptions: 16 | usernames: [] 17 | runtimeClasses: [] 18 | namespaces: [kube-system] 19 | -------------------------------------------------------------------------------- /tests/docker/resources/clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-clusterip 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-clusterip 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-clusterip 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer 18 | ports: 19 | - containerPort: 80 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-clusterip 26 | name: nginx-clusterip-svc 27 | namespace: default 28 | spec: 29 | type: ClusterIP 30 | ports: 31 | - port: 80 32 | selector: 33 | k8s-app: nginx-app-clusterip 34 | -------------------------------------------------------------------------------- /tests/docker/resources/loadbalancer-allTraffic.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: nginx-config 6 | data: 7 | default.conf: | 8 | server { 9 | listen 80; 10 | location /ip { 11 | return 200 "$remote_addr\n"; 12 | } 13 | # Default location block to serve the default "Welcome to nginx" page 14 | location / { 15 | root /usr/share/nginx/html; 16 | index index.html; 17 | } 18 | } 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: test-loadbalancer 24 | spec: 25 | selector: 26 | matchLabels: 27 | k8s-app: nginx-app-loadbalancer 28 | replicas: 2 29 | template: 30 | metadata: 31 | labels: 32 | k8s-app: nginx-app-loadbalancer 33 | spec: 34 | containers: 35 | - name: nginx 36 | image: ranchertest/mytestcontainer 37 | ports: 38 | - containerPort: 80 39 | volumeMounts: 40 | - name: nginx-config-volume 41 | mountPath: /etc/nginx/conf.d 42 | volumes: 43 | - name: nginx-config-volume 44 | configMap: 45 | name: nginx-config 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: nginx-loadbalancer-svc 51 | labels: 52 | k8s-app: nginx-app-loadbalancer 53 | spec: 54 | type: LoadBalancer 55 | ports: 56 | - port: 81 57 | targetPort: 80 58 | protocol: TCP 59 | name: http 60 | selector: 61 | k8s-app: nginx-app-loadbalancer 62 | -------------------------------------------------------------------------------- /tests/docker/resources/loadbalancer-extTrafficPol.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: nginx-config 6 | data: 7 | default.conf: | 8 | server { 9 | listen 80; 10 | location /ip { 11 | return 200 "$remote_addr\n"; 12 | } 13 | # Default location block to serve the default "Welcome to nginx" page 14 | location / { 15 | root /usr/share/nginx/html; 16 | index index.html; 17 | } 18 | } 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: test-loadbalancer-ext 24 | spec: 25 | selector: 26 | matchLabels: 27 | k8s-app: nginx-app-loadbalancer-ext 28 | replicas: 1 29 | template: 30 | metadata: 31 | labels: 32 | k8s-app: nginx-app-loadbalancer-ext 33 | spec: 34 | containers: 35 | - name: nginx 36 | image: ranchertest/mytestcontainer 37 | ports: 38 | - containerPort: 80 39 | volumeMounts: 40 | - name: nginx-config-volume 41 | mountPath: /etc/nginx/conf.d 42 | volumes: 43 | - name: nginx-config-volume 44 | configMap: 45 | name: nginx-config 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: nginx-loadbalancer-svc-ext 51 | labels: 52 | k8s-app: nginx-app-loadbalancer-ext 53 | spec: 54 | type: LoadBalancer 55 | externalTrafficPolicy: Local 56 | ports: 57 | - port: 82 58 | targetPort: 80 59 | protocol: TCP 60 | name: http 61 | selector: 62 | k8s-app: nginx-app-loadbalancer-ext 63 | -------------------------------------------------------------------------------- /tests/docker/resources/loadbalancer-intTrafficPol.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: nginx-config 6 | data: 7 | default.conf: | 8 | server { 9 | listen 80; 10 | location /ip { 11 | return 200 "$remote_addr\n"; 12 | } 13 | # Default location block to serve the default "Welcome to nginx" page 14 | location / { 15 | root /usr/share/nginx/html; 16 | index index.html; 17 | } 18 | } 19 | 20 | --- 21 | apiVersion: apps/v1 22 | kind: Deployment 23 | metadata: 24 | name: test-loadbalancer-int 25 | spec: 26 | selector: 27 | matchLabels: 28 | k8s-app: nginx-app-loadbalancer-int 29 | replicas: 1 30 | template: 31 | metadata: 32 | labels: 33 | k8s-app: nginx-app-loadbalancer-int 34 | spec: 35 | containers: 36 | - name: nginx 37 | image: ranchertest/mytestcontainer 38 | ports: 39 | - containerPort: 80 40 | volumeMounts: 41 | - name: nginx-config-volume 42 | mountPath: /etc/nginx/conf.d 43 | volumes: 44 | - name: nginx-config-volume 45 | configMap: 46 | name: nginx-config 47 | --- 48 | apiVersion: v1 49 | kind: Service 50 | metadata: 51 | name: nginx-loadbalancer-svc-int 52 | labels: 53 | k8s-app: nginx-app-loadbalancer-int 54 | spec: 55 | type: LoadBalancer 56 | internalTrafficPolicy: Local 57 | ports: 58 | - port: 83 59 | targetPort: 80 60 | protocol: TCP 61 | name: http 62 | selector: 63 | k8s-app: nginx-app-loadbalancer-int 64 | -------------------------------------------------------------------------------- /tests/docker/resources/nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-nodeport 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-nodeport 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-nodeport 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer 18 | ports: 19 | - containerPort: 80 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-nodeport 26 | name: nginx-nodeport-svc 27 | namespace: default 28 | spec: 29 | type: NodePort 30 | ports: 31 | - port: 80 32 | nodePort: 30096 33 | name: http 34 | selector: 35 | k8s-app: nginx-app-nodeport 36 | -------------------------------------------------------------------------------- /tests/docker/resources/pod_client.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: client 6 | name: client-deployment 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: client 12 | template: 13 | metadata: 14 | labels: 15 | app: client 16 | spec: 17 | containers: 18 | - image: ranchertest/mytestcontainer 19 | imagePullPolicy: Always 20 | name: client-curl 21 | affinity: 22 | podAntiAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | - labelSelector: 25 | matchExpressions: 26 | - key: app 27 | operator: In 28 | values: 29 | - client 30 | topologyKey: kubernetes.io/hostname 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: client-curl 36 | labels: 37 | app: client 38 | service: client-curl 39 | spec: 40 | type: ClusterIP 41 | selector: 42 | app: client 43 | ports: 44 | - port: 8080 45 | -------------------------------------------------------------------------------- /tests/docker/resources/secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: docker-secret1 5 | type: Opaque 6 | stringData: 7 | config.yaml: | 8 | key: "hello" 9 | val: "world" 10 | --- 11 | apiVersion: v1 12 | kind: Secret 13 | metadata: 14 | name: docker-secret2 15 | type: Opaque 16 | stringData: 17 | config.yaml: | 18 | key: "good" 19 | val: "day" 20 | --- 21 | apiVersion: v1 22 | kind: Secret 23 | metadata: 24 | name: docker-secret3 25 | type: Opaque 26 | stringData: 27 | config.yaml: | 28 | key: "top-secret" 29 | val: "information" 30 | --- 31 | apiVersion: v1 32 | kind: Secret 33 | metadata: 34 | name: docker-secret4 35 | type: Opaque 36 | stringData: 37 | config.yaml: | 38 | key: "lock" 39 | val: "key" 40 | --- 41 | apiVersion: v1 42 | kind: Secret 43 | metadata: 44 | name: docker-secret5 45 | type: Opaque 46 | stringData: 47 | config.yaml: | 48 | key: "last" 49 | val: "call" -------------------------------------------------------------------------------- /tests/docker/resources/snapshot-test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: stargz-snapshot-test 5 | spec: 6 | containers: 7 | - name: stargz-snapshot-test 8 | image: "ghcr.io/stargz-containers/k3s-test-ubuntu:20.04-esgz" 9 | command: ["sleep"] 10 | args: ["infinity"] -------------------------------------------------------------------------------- /tests/docker/resources/volume-test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: local-path-pvc 5 | namespace: kube-system 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: local-path 10 | resources: 11 | requests: 12 | storage: 2Gi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: volume-test 18 | namespace: kube-system 19 | spec: 20 | containers: 21 | - name: volume-test 22 | image: rancher/mirrored-pause:3.6 23 | imagePullPolicy: IfNotPresent 24 | volumeMounts: 25 | - name: volv 26 | mountPath: /data 27 | volumes: 28 | - name: volv 29 | persistentVolumeClaim: 30 | claimName: local-path-pvc 31 | -------------------------------------------------------------------------------- /tests/docker/test-runner: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x -e 3 | cd $(dirname $0)/../.. 4 | 5 | # --- 6 | 7 | for include in $TEST_INCLUDES; do 8 | . $include 9 | done 10 | 11 | test-setup 12 | provision-cluster 13 | start-test $@ 14 | -------------------------------------------------------------------------------- /tests/docker/test-setup-sonobuoy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export NUM_SERVERS=1 4 | export NUM_AGENTS=1 5 | export SERVER_ARGS='--disable=traefik' 6 | export WAIT_SERVICES='coredns local-path-provisioner metrics-server' 7 | 8 | export sonobuoyParallelArgs=(--e2e-focus='\[Conformance\]' --e2e-skip='\[Serial\]' --e2e-parallel=y) 9 | export sonobuoySerialArgs=(--e2e-focus='\[Serial\].*\[Conformance\]') 10 | 11 | start-test() { 12 | sonobuoy-test $@ 13 | } 14 | export -f start-test 15 | 16 | test-post-hook() { 17 | if [[ $1 -eq 0 ]] || [[ ! -f "$TEST_DIR/sonobuoy/plugins/e2e/results/global/e2e.log" ]]; then 18 | return $1 19 | fi 20 | local failures=$(awk '/^Summarizing .* Failures?:$/,0' "$TEST_DIR/sonobuoy/plugins/e2e/results/global/e2e.log") 21 | # Ignore sonobuoy failures if only these flaky tests have failed 22 | flakyFails=$( grep -scF -f ./tests/docker/flaky-tests <<< "$failures" ) 23 | totalFails=$( grep -scF -e "[Fail]" <<< "$failures" ) 24 | [ "$totalFails" -le "$flakyFails" ] 25 | } 26 | export -f test-post-hook 27 | -------------------------------------------------------------------------------- /tests/docker/test-setup-sonobuoy-etcd: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./tests/docker/test-setup-sonobuoy 4 | 5 | export NUM_SERVERS=1 6 | export NUM_AGENTS=1 7 | export SERVER_1_ARGS="--cluster-init" 8 | 9 | server-post-hook() { 10 | if [ $1 -eq 1 ]; then 11 | local url=$(cat $TEST_DIR/servers/1/metadata/url) 12 | export SERVER_ARGS="--server $url" 13 | fi 14 | } 15 | export -f server-post-hook 16 | 17 | test-post-hook() { 18 | if [[ $1 -eq 0 ]] || [[ ! -f "$TEST_DIR/sonobuoy/plugins/e2e/results/global/e2e.log" ]]; then 19 | return $1 20 | fi 21 | local failures=$(awk '/^Summarizing .* Failures?:$/,0' "$TEST_DIR/sonobuoy/plugins/e2e/results/global/e2e.log") 22 | # Ignore sonobuoy failures if only these flaky tests have failed 23 | flakyFails=$( grep -scF -f ./tests/docker/flaky-tests <<< "$failures" ) 24 | totalFails=$( grep -scF -e "[Fail]" <<< "$failures" ) 25 | [ "$totalFails" -le "$flakyFails" ] 26 | } 27 | export -f test-post-hook 28 | 29 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/cluster-cidr-ipv6.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1alpha1 2 | kind: ClusterCIDR 3 | metadata: 4 | name: new-cidr 5 | spec: 6 | nodeSelector: 7 | nodeSelectorTerms: 8 | - matchExpressions: 9 | - key: kubernetes.io/hostname 10 | operator: In 11 | values: 12 | - "agent-0" 13 | perNodeHostBits: 64 14 | ipv6: 2001:cafe:248::/56 15 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/cluster-cidr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1alpha1 2 | kind: ClusterCIDR 3 | metadata: 4 | name: new-cidr 5 | spec: 6 | nodeSelector: 7 | nodeSelectorTerms: 8 | - matchExpressions: 9 | - key: kubernetes.io/hostname 10 | operator: In 11 | values: 12 | - "agent-0" 13 | perNodeHostBits: 8 14 | ipv4: 10.248.0.0/16 15 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-clusterip 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-clusterip 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-clusterip 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer 18 | ports: 19 | - containerPort: 80 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-clusterip 26 | name: nginx-clusterip-svc 27 | namespace: default 28 | spec: 29 | type: ClusterIP 30 | ports: 31 | - port: 80 32 | selector: 33 | k8s-app: nginx-app-clusterip 34 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: test-daemonset 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: test-daemonset 9 | template: 10 | metadata: 11 | labels: 12 | k8s-app: test-daemonset 13 | spec: 14 | containers: 15 | - name: webserver 16 | image: nginx 17 | ports: 18 | - containerPort: 80 19 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/dnsutils.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: dnsutils 5 | namespace: default 6 | spec: 7 | containers: 8 | - name: dnsutils 9 | image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 10 | command: 11 | - sleep 12 | - "3600" 13 | imagePullPolicy: IfNotPresent 14 | restartPolicy: Always 15 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/dualstack_clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: ds-clusterip-pod 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-clusterip 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-clusterip 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer 18 | ports: 19 | - containerPort: 80 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-clusterip 26 | name: ds-clusterip-svc 27 | namespace: default 28 | spec: 29 | type: ClusterIP 30 | ipFamilyPolicy: PreferDualStack 31 | ports: 32 | - protocol: TCP 33 | port: 80 34 | targetPort: 80 35 | selector: 36 | k8s-app: nginx-app-clusterip 37 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/dualstack_ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: ds-ingress 5 | spec: 6 | rules: 7 | - host: testds.com 8 | http: 9 | paths: 10 | - backend: 11 | service: 12 | # Reliant on dualstack_clusterip.yaml 13 | name: ds-clusterip-svc 14 | port: 15 | number: 80 16 | pathType: ImplementationSpecific -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/dualstack_nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: ds-nodeport-pod 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-nodeport 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-nodeport 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer 18 | ports: 19 | - containerPort: 80 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-nodeport 26 | name: ds-nodeport-svc 27 | namespace: default 28 | spec: 29 | type: NodePort 30 | ipFamilyPolicy: PreferDualStack 31 | ports: 32 | - port: 80 33 | nodePort: 30096 34 | name: http 35 | selector: 36 | k8s-app: nginx-app-nodeport 37 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: test-ingress 5 | spec: 6 | rules: 7 | - host: foo1.bar.com 8 | http: 9 | paths: 10 | - backend: 11 | service: 12 | name: nginx-ingress-svc 13 | port: 14 | number: 80 15 | path: / 16 | pathType: ImplementationSpecific 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: nginx-ingress-svc 22 | labels: 23 | k8s-app: nginx-app-ingress 24 | spec: 25 | ports: 26 | - port: 80 27 | targetPort: 80 28 | protocol: TCP 29 | name: http 30 | selector: 31 | k8s-app: nginx-app-ingress 32 | --- 33 | apiVersion: v1 34 | kind: ReplicationController 35 | metadata: 36 | name: test-ingress 37 | spec: 38 | replicas: 2 39 | selector: 40 | k8s-app: nginx-app-ingress 41 | template: 42 | metadata: 43 | labels: 44 | k8s-app: nginx-app-ingress 45 | spec: 46 | terminationGracePeriodSeconds: 60 47 | containers: 48 | - name: testcontainer 49 | image: ranchertest/mytestcontainer 50 | ports: 51 | - containerPort: 80 52 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/loadbalancer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: nginx-config 6 | data: 7 | default.conf: | 8 | server { 9 | listen 80; 10 | location /ip { 11 | return 200 "$remote_addr\n"; 12 | } 13 | # Default location block to serve the default "Welcome to nginx" page 14 | location / { 15 | root /usr/share/nginx/html; 16 | index index.html; 17 | } 18 | } 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: test-loadbalancer 24 | spec: 25 | selector: 26 | matchLabels: 27 | k8s-app: nginx-app-loadbalancer 28 | replicas: 2 29 | template: 30 | metadata: 31 | labels: 32 | k8s-app: nginx-app-loadbalancer 33 | spec: 34 | containers: 35 | - name: nginx 36 | image: ranchertest/mytestcontainer 37 | ports: 38 | - containerPort: 80 39 | volumeMounts: 40 | - name: nginx-config-volume 41 | mountPath: /etc/nginx/conf.d 42 | volumes: 43 | - name: nginx-config-volume 44 | configMap: 45 | name: nginx-config 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: nginx-loadbalancer-svc 51 | labels: 52 | k8s-app: nginx-app-loadbalancer 53 | spec: 54 | type: LoadBalancer 55 | ports: 56 | - port: 81 57 | targetPort: 80 58 | protocol: TCP 59 | name: http 60 | selector: 61 | k8s-app: nginx-app-loadbalancer 62 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/local-path-provisioner.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: local-path-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: local-path 10 | resources: 11 | requests: 12 | storage: 500Mi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: volume-test 18 | namespace: default 19 | spec: 20 | containers: 21 | - name: volume-test 22 | image: nginx:stable-alpine 23 | imagePullPolicy: IfNotPresent 24 | volumeMounts: 25 | - name: volv 26 | mountPath: /data 27 | ports: 28 | - containerPort: 80 29 | volumes: 30 | - name: volv 31 | persistentVolumeClaim: 32 | claimName: local-path-pvc 33 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/netpol-fail.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: test-network-policy 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | k8s-app: nginx-app-clusterip 10 | policyTypes: 11 | - Ingress 12 | ingress: 13 | - from: 14 | - podSelector: 15 | matchLabels: 16 | app: whatever 17 | ports: 18 | - protocol: TCP 19 | port: 80 20 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/netpol-work.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: test-network-policy 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | k8s-app: nginx-app-clusterip 10 | policyTypes: 11 | - Ingress 12 | ingress: 13 | - from: 14 | - podSelector: 15 | matchLabels: 16 | app: client 17 | ports: 18 | - protocol: TCP 19 | port: 80 20 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-nodeport 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-nodeport 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-nodeport 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer 18 | ports: 19 | - containerPort: 80 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-nodeport 26 | name: nginx-nodeport-svc 27 | namespace: default 28 | spec: 29 | type: NodePort 30 | ports: 31 | - port: 80 32 | nodePort: 30096 33 | name: http 34 | selector: 35 | k8s-app: nginx-app-nodeport 36 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/pod_client.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: client 6 | name: client-deployment 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: client 12 | template: 13 | metadata: 14 | labels: 15 | app: client 16 | spec: 17 | containers: 18 | - image: ranchertest/mytestcontainer 19 | imagePullPolicy: Always 20 | name: client-curl 21 | affinity: 22 | podAntiAffinity: 23 | requiredDuringSchedulingIgnoredDuringExecution: 24 | - labelSelector: 25 | matchExpressions: 26 | - key: app 27 | operator: In 28 | values: 29 | - client 30 | topologyKey: kubernetes.io/hostname 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: client-curl 36 | labels: 37 | app: client 38 | service: client-curl 39 | spec: 40 | type: ClusterIP 41 | selector: 42 | app: client 43 | ports: 44 | - port: 8080 45 | -------------------------------------------------------------------------------- /tests/e2e/amd64_resource_files/secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: e2e-secret1 5 | type: Opaque 6 | stringData: 7 | config.yaml: | 8 | key: "hello" 9 | val: "world" 10 | --- 11 | apiVersion: v1 12 | kind: Secret 13 | metadata: 14 | name: e2e-secret2 15 | type: Opaque 16 | stringData: 17 | config.yaml: | 18 | key: "good" 19 | val: "day" 20 | --- 21 | apiVersion: v1 22 | kind: Secret 23 | metadata: 24 | name: e2e-secret3 25 | type: Opaque 26 | stringData: 27 | config.yaml: | 28 | key: "top-secret" 29 | val: "information" 30 | --- 31 | apiVersion: v1 32 | kind: Secret 33 | metadata: 34 | name: e2e-secret4 35 | type: Opaque 36 | stringData: 37 | config.yaml: | 38 | key: "lock" 39 | val: "key" 40 | --- 41 | apiVersion: v1 42 | kind: Secret 43 | metadata: 44 | name: e2e-secret5 45 | type: Opaque 46 | stringData: 47 | config.yaml: | 48 | key: "last" 49 | val: "call" -------------------------------------------------------------------------------- /tests/e2e/cis_amd64_resource_files/clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-clusterip 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-clusterip 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-clusterip 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer:unprivileged 18 | ports: 19 | - containerPort: 8080 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-clusterip 26 | name: nginx-clusterip-svc 27 | namespace: default 28 | spec: 29 | type: ClusterIP 30 | ports: 31 | - port: 8080 32 | selector: 33 | k8s-app: nginx-app-clusterip 34 | -------------------------------------------------------------------------------- /tests/e2e/cis_amd64_resource_files/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: test-daemonset 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: test-daemonset 9 | template: 10 | metadata: 11 | labels: 12 | k8s-app: test-daemonset 13 | spec: 14 | containers: 15 | - name: webserver 16 | image: nginx 17 | ports: 18 | - containerPort: 80 19 | -------------------------------------------------------------------------------- /tests/e2e/cis_amd64_resource_files/dnsutils.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: dnsutils 5 | namespace: default 6 | spec: 7 | containers: 8 | - name: dnsutils 9 | image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 10 | command: 11 | - sleep 12 | - "3600" 13 | imagePullPolicy: IfNotPresent 14 | restartPolicy: Always 15 | -------------------------------------------------------------------------------- /tests/e2e/cis_amd64_resource_files/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-svc 5 | labels: 6 | k8s-app: nginx-app-ingress 7 | spec: 8 | ports: 9 | - port: 8080 10 | targetPort: 8080 11 | protocol: TCP 12 | name: http 13 | selector: 14 | k8s-app: nginx-app-ingress 15 | --- 16 | apiVersion: extensions/v1beta1 17 | kind: Ingress 18 | metadata: 19 | name: test-ingress 20 | spec: 21 | rules: 22 | - host: foo1.bar.com 23 | http: 24 | paths: 25 | - backend: 26 | serviceName: nginx-ingress-svc 27 | servicePort: 8080 28 | pathType: ImplementationSpecific 29 | -------------------------------------------------------------------------------- /tests/e2e/cis_amd64_resource_files/loadbalancer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-loadbalancer 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-loadbalancer 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-loadbalancer 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer:unprivileged 18 | ports: 19 | - containerPort: 8080 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-loadbalancer 26 | name: nginx-loadbalancer-svc 27 | namespace: default 28 | spec: 29 | type: LoadBalancer 30 | ports: 31 | - port: 81 32 | targetPort: 8080 33 | protocol: TCP 34 | name: http 35 | selector: 36 | k8s-app: nginx-app-loadbalancer 37 | -------------------------------------------------------------------------------- /tests/e2e/cis_amd64_resource_files/local-path-provisioner.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: local-path-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: local-path 10 | resources: 11 | requests: 12 | storage: 500Mi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: volume-test 18 | namespace: default 19 | spec: 20 | containers: 21 | - name: volume-test 22 | image: nginx:stable-alpine 23 | imagePullPolicy: IfNotPresent 24 | volumeMounts: 25 | - name: volv 26 | mountPath: /data 27 | ports: 28 | - containerPort: 80 29 | volumes: 30 | - name: volv 31 | persistentVolumeClaim: 32 | claimName: local-path-pvc 33 | -------------------------------------------------------------------------------- /tests/e2e/cis_amd64_resource_files/nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-nodeport 5 | spec: 6 | selector: 7 | matchLabels: 8 | k8s-app: nginx-app-nodeport 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: nginx-app-nodeport 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: ranchertest/mytestcontainer:unprivileged 18 | ports: 19 | - containerPort: 8080 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | labels: 25 | k8s-app: nginx-app-nodeport 26 | name: nginx-nodeport-svc 27 | namespace: default 28 | spec: 29 | type: NodePort 30 | ports: 31 | - port: 8080 32 | nodePort: 30096 33 | name: http 34 | selector: 35 | k8s-app: nginx-app-nodeport 36 | -------------------------------------------------------------------------------- /tests/e2e/cis_amd64_resource_files/secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: e2e-secret1 5 | type: Opaque 6 | stringData: 7 | config.yaml: | 8 | key: "hello" 9 | val: "world" 10 | --- 11 | apiVersion: v1 12 | kind: Secret 13 | metadata: 14 | name: e2e-secret2 15 | type: Opaque 16 | stringData: 17 | config.yaml: | 18 | key: "good" 19 | val: "day" 20 | --- 21 | apiVersion: v1 22 | kind: Secret 23 | metadata: 24 | name: e2e-secret3 25 | type: Opaque 26 | stringData: 27 | config.yaml: | 28 | key: "top-secret" 29 | val: "information" 30 | --- 31 | apiVersion: v1 32 | kind: Secret 33 | metadata: 34 | name: e2e-secret4 35 | type: Opaque 36 | stringData: 37 | config.yaml: | 38 | key: "lock" 39 | val: "key" 40 | --- 41 | apiVersion: v1 42 | kind: Secret 43 | metadata: 44 | name: e2e-secret5 45 | type: Opaque 46 | stringData: 47 | config.yaml: | 48 | key: "last" 49 | val: "call" -------------------------------------------------------------------------------- /tests/e2e/e2e_test_playbook.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Run Cluster validation tests 3 | gather_facts: false 4 | hosts: node 5 | vars: 6 | user: "{{ user }}" 7 | key: "{{ key }}" 8 | ip: "{{ ip }}" 9 | dir: scripts 10 | tasks: 11 | - name : Print var 12 | debug: 13 | msg: | 14 | user: {{ user }} 15 | key: {{ key }} 16 | ip: {{ ip }} 17 | db: {{ db }} 18 | nodeOS: {{ nodeOS }} 19 | serverCount: {{ serverCount }} 20 | agentCount: {{ agentCount }} 21 | 22 | - name: build container image 23 | command: chdir=k3s/tests/e2e /usr/local/bin/docker build -t k3s_nightly_build -f {{ dir }}/Dockerfile . 24 | 25 | - name: Delete docker container 26 | command: "/usr/local/bin/docker rm createcluster -f" 27 | ignore_errors: True 28 | 29 | - name: create docker container 30 | command: "/usr/local/bin/docker run -d -it -v ~/config:/config --cap-add=NET_ADMIN --device /dev/net/tun --sysctl net.ipv6.conf.all.disable_ipv6=0 --name createcluster --privileged k3s_nightly_build" 31 | 32 | - name: execute command in docker 33 | shell: | 34 | /usr/local/bin/docker exec -it createcluster /bin/bash ./run_tests.sh {{ key }} {{ user }} {{ ip }} {{ db }} {{ nodeOS }} {{ serverCount }} {{ agentCount }} 35 | -------------------------------------------------------------------------------- /tests/e2e/scripts/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:24.04 2 | ARG EXTERNAL_ENCODED_VPN 3 | ARG VPN_ENCODED_LOGIN 4 | 5 | RUN apt-get update && \ 6 | apt-get install -y curl bridge-utils iputils-ping openvpn openssh-client && \ 7 | mkdir -p /dev/net && \ 8 | mknod /dev/net/tun c 10 200 && \ 9 | chmod 600 /dev/net/tun 10 | 11 | RUN if [[ -z "$EXTERNAL_ENCODED_VPN" ]] ; then echo "no vpn provided" ; \ 12 | else echo -n $EXTERNAL_ENCODED_VPN | base64 -di > external.ovpn && \ 13 | if [[ -z "$VPN_ENCODED_LOGIN" ]]; then echo "no passcode provided" ; \ 14 | else echo -n $VPN_ENCODED_LOGIN | base64 -di > authfile && \ 15 | sed -i 's/auth-user-pass/auth-user-pass authfile/g' external.ovpn; fi ; fi 16 | 17 | WORKDIR . 18 | COPY scripts/run_tests.sh . 19 | COPY scripts/init.sh . 20 | -------------------------------------------------------------------------------- /tests/e2e/scripts/cleanup_vms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage: ./cleanup_vms.sh [regex_pattern] 4 | # Default pattern matches timestamped VMs older than 2 hours. 5 | # We embed the time in the VM name, so we can easily filter them out. 6 | 7 | # Get the current time in seconds since the epoch 8 | current_time=$(date +%s) 9 | 10 | 11 | def_pattern="_([0-9]+)_(server|agent)" 12 | if [ -n "$1" ]; then 13 | pattern="$1" 14 | else 15 | pattern="$def_pattern" 16 | fi 17 | 18 | # Get the list of VMs 19 | vms=$(virsh list --name --all) 20 | # Cleanup running VMs, happens if a previous test panics 21 | for vm in $vms; do 22 | if [[ $vm =~ $pattern ]]; then 23 | vm_time="${BASH_REMATCH[1]}" 24 | age=$((current_time - vm_time)) 25 | if [ $age -gt 7200 ] || [ "$pattern" != "$def_pattern" ]; then 26 | virsh destroy $vm 27 | virsh undefine $vm --remove-all-storage 28 | fi 29 | fi 30 | done 31 | 32 | # Cleanup inactive domains, happens if previous test is canceled 33 | vms=$(virsh list --name --inactive) 34 | for vm in $vms; do 35 | if [[ $vm =~ $pattern ]]; then 36 | vm_time="${BASH_REMATCH[1]}" 37 | age=$((current_time - vm_time)) 38 | if [ $age -gt 7200 ] || [ "$pattern" != "$def_pattern" ]; then 39 | virsh undefine $vm --remove-all-storage 40 | fi 41 | fi 42 | done -------------------------------------------------------------------------------- /tests/e2e/scripts/harden.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "vm.panic_on_oom=0 4 | vm.overcommit_memory=1 5 | kernel.panic=10 6 | kernel.panic_on_oops=1 7 | kernel.keys.root_maxbytes=25000000 8 | " >> /etc/sysctl.d/90-kubelet.conf 9 | sysctl -p /etc/sysctl.d/90-kubelet.conf 10 | 11 | mkdir -p /var/lib/rancher/k3s/server 12 | mkdir -m 700 /var/lib/rancher/k3s/server/logs 13 | echo "apiVersion: audit.k8s.io/v1 14 | kind: Policy 15 | rules: 16 | - level: Metadata" >> /var/lib/rancher/k3s/server/audit.yaml 17 | 18 | if [ "$1" = "psa" ]; then 19 | echo "apiVersion: apiserver.config.k8s.io/v1 20 | kind: AdmissionConfiguration 21 | plugins: 22 | - name: PodSecurity 23 | configuration: 24 | apiVersion: pod-security.admission.config.k8s.io/v1beta1 25 | kind: PodSecurityConfiguration 26 | defaults: 27 | enforce: \"restricted\" 28 | enforce-version: \"latest\" 29 | audit: \"restricted\" 30 | audit-version: \"latest\" 31 | warn: \"restricted\" 32 | warn-version: \"latest\" 33 | exemptions: 34 | usernames: [] 35 | runtimeClasses: [] 36 | namespaces: [kube-system, cis-operator-system]" >> /var/lib/rancher/k3s/server/psa.yaml 37 | fi -------------------------------------------------------------------------------- /tests/e2e/scripts/hosts: -------------------------------------------------------------------------------- 1 | [node] 2 | localhost ansible_python_interpreter=/usr/local/opt/python@3.9/bin/python3.9 -------------------------------------------------------------------------------- /tests/e2e/scripts/ipv6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ip4_addr=$1 3 | ip6_addr=$2 4 | ip6_addr_gw=$3 5 | os=$4 6 | 7 | sysctl -w net.ipv6.conf.all.disable_ipv6=0 8 | sysctl -w net.ipv6.conf.eth1.accept_dad=0 9 | sysctl -w net.ipv6.conf.eth1.accept_ra=0 10 | sysctl -w net.ipv6.conf.eth1.forwarding=0 11 | 12 | if [ -z "${os##*ubuntu*}" ]; then 13 | netplan set ethernets.eth1.accept-ra=false 14 | netplan set ethernets.eth1.addresses=["$ip4_addr"/24,"$ip6_addr"/64] 15 | netplan set ethernets.eth1.gateway6="$ip6_addr_gw" 16 | netplan apply 17 | elif [ -z "${os##*alpine*}" ]; then 18 | iplink set eth1 down 19 | iplink set eth1 up 20 | ip -6 addr add "$ip6_addr"/64 dev eth1 21 | ip -6 r add default via "$ip6_addr_gw" 22 | else 23 | ip -6 addr add "$ip6_addr"/64 dev eth1 24 | ip -6 r add default via "$ip6_addr_gw" 25 | fi 26 | ip addr show dev eth1 27 | ip -6 r 28 | -------------------------------------------------------------------------------- /tests/e2e/scripts/registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Script to to point k3s to the docker registry running on the host 4 | # This is used to avoid hitting dockerhub rate limits on E2E runners 5 | ip_addr=$1 6 | 7 | mkdir -p /etc/rancher/k3s/ 8 | echo "mirrors: 9 | docker.io: 10 | endpoint: 11 | - \"http://$ip_addr:15000\" 12 | registry.k8s.io: 13 | endpoint: 14 | - \"http://$ip_addr:15001\" 15 | gcr.io: 16 | endpoint: 17 | - \"http://$ip_addr:15002\" 18 | quay.io: 19 | endpoint: 20 | - \"http://$ip_addr:15003\" 21 | ghcr.io: 22 | endpoint: 23 | - \"http://$ip_addr:15004\"" >> /etc/rancher/k3s/registries.yaml -------------------------------------------------------------------------------- /tests/e2e/tailscale/README.md: -------------------------------------------------------------------------------- 1 | # How to run taliscale (E2E) Tests 2 | 3 | Tailscale requires three steps before running the test: 4 | 5 | 1 - Log into tailscale or create an account "https://login.tailscale.com/" 6 | 7 | 2 - In the `Access controls` section, add the cluster routes in the autoApprovers section. For example: 8 | 9 | ``` 10 | "autoApprovers": { 11 | "routes": { 12 | "10.42.0.0/16": ["testing@xyz.com"], 13 | "2001:cafe:42:0::/56": ["testing@xyz.com"], 14 | }, 15 | }, 16 | ``` 17 | 18 | 3 - In `Settings` > `Keys`, generate an auth key which is Reusable and Ephemeral. That key should be the value of a new env variable `E2E_TAILSCALE_KEY` 19 | 20 | # Typical problems 21 | 22 | ### The cluster does not start correctly 23 | 24 | Please verify that the tailscale key was correctly passed to the config. To verify this, check the config in the server/agent in the file /etc/rancher/k3s/config.yaml 25 | 26 | 27 | ### The verification on the routing fails 28 | 29 | Please verify that you filled the autoApprovers section and that the config applies to your key. If you access the tailscale UI and see that the machine has "Subnets" that require manual approval, the test will not work 30 | -------------------------------------------------------------------------------- /tests/integration/Dockerfile.test: -------------------------------------------------------------------------------- 1 | FROM golang:buster 2 | 3 | # Enables integration tests to run on existing cluster via Sonobuoy plugin 4 | 5 | RUN apt update && \ 6 | apt install -y curl git lsof bash openssh-server gcc g++ make ca-certificates && \ 7 | curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh 8 | 9 | WORKDIR $GOPATH/src/github.com/k3s-io/k3s-io/k3s/ 10 | 11 | COPY ./tests/testdata ./testdata 12 | COPY ./tests/integration/test-runner.sh . 13 | COPY ./dist/artifacts/k3s /usr/local/bin 14 | COPY ./dist/artifacts/k3s-integration-* ./tests/ 15 | 16 | RUN go install -u github.com/onsi/gomega 17 | RUN go install -u github.com/onsi/ginkgo 18 | -------------------------------------------------------------------------------- /tests/integration/etcdrestore/testdata/temp_depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | revisionHistoryLimit: 0 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: nginx 15 | template: 16 | metadata: 17 | labels: 18 | app: nginx 19 | spec: 20 | containers: 21 | - name: nginx 22 | image: nginx:1.14.2 23 | ports: 24 | - containerPort: 80 25 | -------------------------------------------------------------------------------- /tests/integration/etcdrestore/testdata/temp_depl2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment-post-snapshot 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | revisionHistoryLimit: 0 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: nginx 15 | template: 16 | metadata: 17 | labels: 18 | app: nginx 19 | spec: 20 | containers: 21 | - name: nginx 22 | image: nginx:1.14.2 23 | ports: 24 | - containerPort: 80 25 | -------------------------------------------------------------------------------- /tests/integration/localstorage/testdata/localstorage_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: volume-test 5 | namespace: default 6 | spec: 7 | containers: 8 | - name: volume-test 9 | image: busybox:stable 10 | imagePullPolicy: IfNotPresent 11 | command: 12 | - sh 13 | - "-c" 14 | - "touch /data/file1 && sleep infinity" 15 | volumeMounts: 16 | - name: volv 17 | mountPath: /data 18 | securityContext: 19 | runAsUser: 1000 20 | runAsGroup: 1000 21 | volumes: 22 | - name: volv 23 | persistentVolumeClaim: 24 | claimName: local-path-pvc 25 | -------------------------------------------------------------------------------- /tests/integration/localstorage/testdata/localstorage_pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: local-path-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: local-path 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | -------------------------------------------------------------------------------- /tests/integration/longhorn/testdata/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: volume-test 5 | namespace: default 6 | spec: 7 | containers: 8 | - name: volume-test 9 | image: nginx:stable-alpine 10 | imagePullPolicy: IfNotPresent 11 | volumeMounts: 12 | - name: volv 13 | mountPath: /data 14 | ports: 15 | - containerPort: 80 16 | volumes: 17 | - name: volv 18 | persistentVolumeClaim: 19 | claimName: longhorn-volv-pvc 20 | -------------------------------------------------------------------------------- /tests/integration/longhorn/testdata/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: longhorn-volv-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: longhorn 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /tests/integration/startup/testdata/agnhost.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: agnhost 5 | spec: 6 | containers: 7 | - name: agnhost 8 | image: registry.k8s.io/e2e-test-images/agnhost:2.53 9 | args: 10 | - inclusterclient 11 | - -v=9 12 | - --poll-interval=5 13 | dnsConfig: 14 | nameservers: 15 | - 8.8.8.8 16 | dnsPolicy: None 17 | -------------------------------------------------------------------------------- /tests/integration/startup/testdata/dummy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: dummy 6 | namespace: kube-system 7 | spec: 8 | containers: 9 | - name: dummy 10 | image: ranchertest/mytestcontainer 11 | imagePullPolicy: IfNotPresent 12 | -------------------------------------------------------------------------------- /tests/integration/test-runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -x 4 | 5 | results_dir="${RESULTS_DIR:-/tmp/results}" 6 | 7 | # saveResults prepares the results for handoff to the Sonobuoy worker. 8 | # See: https://github.com/vmware-tanzu/sonobuoy/blob/master/site/content/docs/master/plugins.md 9 | saveResults() { 10 | cd ${results_dir} 11 | 12 | # Sonobuoy worker expects a tar file. 13 | tar czf results.tar.gz * 14 | 15 | # Signal to the worker that we are done and where to find the results. 16 | printf ${results_dir}/results.tar.gz > ${results_dir}/done 17 | } 18 | 19 | # Ensure that we tell the Sonobuoy worker we are done regardless of results. 20 | trap saveResults EXIT 21 | 22 | runTests() { 23 | cd ./tests 24 | for t in *.test; do 25 | # Run each test (automatically saves the output in the results directory). 26 | ./$t 27 | done 28 | } 29 | runTests 30 | -------------------------------------------------------------------------------- /tests/mock/executor_helpers.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "testing" 5 | 6 | executor "github.com/k3s-io/k3s/pkg/daemons/executor" 7 | "go.uber.org/mock/gomock" 8 | ) 9 | 10 | // NewExecutorWithEmbeddedETCD creates a new mock executor, and sets it as the current executor. 11 | // The executor exepects calls to ETCD(), and wraps the embedded executor method of the same name. 12 | // The various ready channels are also mocked with immediate channel closure. 13 | func NewExecutorWithEmbeddedETCD(t *testing.T) *Executor { 14 | mockController := gomock.NewController(t) 15 | mockExecutor := NewExecutor(mockController) 16 | 17 | embed := &executor.Embedded{} 18 | mockExecutor.EXPECT().Bootstrap(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(embed.Bootstrap) 19 | mockExecutor.EXPECT().CurrentETCDOptions().AnyTimes().DoAndReturn(embed.CurrentETCDOptions) 20 | mockExecutor.EXPECT().ETCD(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(embed.ETCD) 21 | mockExecutor.EXPECT().ETCDReadyChan().AnyTimes().DoAndReturn(embed.ETCDReadyChan) 22 | 23 | closedChannel := func() <-chan struct{} { 24 | c := make(chan struct{}) 25 | close(c) 26 | return c 27 | } 28 | mockExecutor.EXPECT().APIServerReadyChan().AnyTimes().DoAndReturn(closedChannel) 29 | mockExecutor.EXPECT().CRIReadyChan().AnyTimes().DoAndReturn(closedChannel) 30 | 31 | executor.Set(mockExecutor) 32 | 33 | return mockExecutor 34 | } 35 | -------------------------------------------------------------------------------- /tests/mock/matchers.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/onsi/gomega/types" 7 | ) 8 | 9 | type gomockGomegaMatcher struct { 10 | gm types.GomegaMatcher 11 | x any 12 | } 13 | 14 | // GM wraps a gomega matcher for use as a gomock matcher 15 | func GM(gm types.GomegaMatcher) *gomockGomegaMatcher { 16 | return &gomockGomegaMatcher{gm: gm} 17 | } 18 | 19 | func (g *gomockGomegaMatcher) Matches(x any) bool { 20 | g.x = x 21 | ok, _ := g.gm.Match(x) 22 | return ok 23 | } 24 | 25 | func (g *gomockGomegaMatcher) String() string { 26 | if g.x != nil { 27 | ok, err := g.gm.Match(g.x) 28 | if err != nil { 29 | return err.Error() 30 | } 31 | if !ok { 32 | return g.gm.FailureMessage(g.x) 33 | } 34 | } 35 | return fmt.Sprintf("%T", g.gm) 36 | } 37 | -------------------------------------------------------------------------------- /tests/perf/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform* 2 | *.tfstate* 3 | *.tfvars* 4 | *.plan* 5 | *tests_results* 6 | *junit.xml 7 | *kubeconfig.yaml 8 | -------------------------------------------------------------------------------- /tests/perf/Makefile: -------------------------------------------------------------------------------- 1 | MODULE := $(shell basename $$PWD) 2 | 3 | .PHONY: init config apply destroy clean test info 4 | 5 | init: 6 | @scripts/perf init 7 | 8 | config: 9 | @scripts/perf config 10 | 11 | plan: 12 | @scripts/perf plan 13 | 14 | apply: 15 | @scripts/perf apply 16 | 17 | destroy: 18 | @scripts/perf destroy 19 | 20 | clean: 21 | @scripts/perf clean 22 | 23 | test: 24 | @scripts/test test_load 25 | 26 | info: 27 | @scripts/perf info 28 | -------------------------------------------------------------------------------- /tests/perf/agents/data.tf: -------------------------------------------------------------------------------- 1 | data "terraform_remote_state" "server" { 2 | backend = "local" 3 | 4 | config = { 5 | path = "${path.module}/../server/server.tfstate" 6 | } 7 | } 8 | 9 | data "aws_vpc" "default" { 10 | default = true 11 | } 12 | 13 | data "aws_subnet_ids" "available" { 14 | vpc_id = data.aws_vpc.default.id 15 | } 16 | 17 | data "aws_subnet" "selected" { 18 | id = "${tolist(data.aws_subnet_ids.available.ids)[1]}" 19 | } 20 | 21 | data "aws_ami" "ubuntu" { 22 | most_recent = true 23 | owners = ["099720109477"] 24 | 25 | filter { 26 | name = "name" 27 | values = ["ubuntu-minimal/images/*/ubuntu-bionic-18.04-*"] 28 | } 29 | 30 | filter { 31 | name = "virtualization-type" 32 | values = ["hvm"] 33 | } 34 | 35 | filter { 36 | name = "root-device-type" 37 | values = ["ebs"] 38 | } 39 | 40 | filter { 41 | name = "architecture" 42 | values = ["x86_64"] 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /tests/perf/agents/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k3s-io/k3s/07171fd7e8f5d2830308a8ee5ea73da853b50b17/tests/perf/agents/outputs.tf -------------------------------------------------------------------------------- /tests/perf/agents/variables.tf: -------------------------------------------------------------------------------- 1 | variable "agent_node_count" { 2 | description = "Number of nodes to run k3s agents on." 3 | type = number 4 | # default = 10 5 | } 6 | 7 | variable "agent_instance_type" { 8 | type = string 9 | default = "t3.2xlarge" 10 | } 11 | 12 | variable "extra_ssh_keys" { 13 | type = list 14 | default = [] 15 | description = "Extra ssh keys to inject into Rancher instances" 16 | } 17 | 18 | variable "k3s_version" { 19 | default = "v0.9.1" 20 | type = string 21 | description = "Version of K3S to install" 22 | } 23 | 24 | variable "name" { 25 | default = "k3s-loadtest" 26 | type = string 27 | description = "Name to identify this cluster" 28 | } 29 | 30 | variable "k3s_cluster_secret" { 31 | type = string 32 | description = "Cluster secret for k3s cluster registration" 33 | } -------------------------------------------------------------------------------- /tests/perf/agents/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /tests/perf/scripts/config: -------------------------------------------------------------------------------- 1 | ## MAIN VARIABLES ## 2 | #################### 3 | CLUSTER_NAME="loadtest-k3s" 4 | CLUSTER_SECRET="" 5 | DOMAIN_NAME="loadtest.eng.rancher.space" 6 | ZONE_ID="" 7 | K3S_VERSION="v1.0.0" 8 | EXTRA_SSH_KEYS="" # comma separated public keys 9 | PRIVATE_KEY_PATH="~/.ssh/id_rsa" 10 | DEBUG=1 11 | 12 | ## K3S DB VARIABLES ## 13 | ########################## 14 | DB_ENGINE="embedded-etcd" 15 | DB_INSTANCE_TYPE="db.m4.4xlarge" 16 | DB_NAME="k3s" 17 | DB_USERNAME="k3suser" 18 | DB_PASSWORD="" 19 | DB_VERSION=5.7 20 | 21 | ## K3S SERVER VARIABLES ## 22 | ########################## 23 | SERVER_HA=1 24 | SERVER_COUNT=3 25 | SERVER_INSTANCE_TYPE="m5.2xlarge" 26 | 27 | ## PROMETHEUS SERVER VARIABLES ## 28 | ################################# 29 | PROM_WORKER_NODE_COUNT=1 30 | PROM_WORKER_INSTANCE_TYPE="m5.large" 31 | 32 | ## K3S AGENTS VARIABLES ## 33 | ########################## 34 | AGENT_NODE_COUNT=10 35 | AGENT_INSTANCE_TYPE="m5.large" 36 | -------------------------------------------------------------------------------- /tests/perf/server/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_vpc" "default" { 2 | default = true 3 | } 4 | 5 | data "aws_subnet_ids" "available" { 6 | vpc_id = data.aws_vpc.default.id 7 | } 8 | 9 | data "aws_subnet" "selected" { 10 | id = "${tolist(data.aws_subnet_ids.available.ids)[1]}" 11 | } 12 | 13 | data "aws_ami" "ubuntu" { 14 | most_recent = true 15 | owners = ["099720109477"] 16 | 17 | filter { 18 | name = "name" 19 | values = ["ubuntu-minimal/images/*/ubuntu-bionic-18.04-*"] 20 | } 21 | 22 | filter { 23 | name = "virtualization-type" 24 | values = ["hvm"] 25 | } 26 | 27 | filter { 28 | name = "root-device-type" 29 | values = ["ebs"] 30 | } 31 | 32 | filter { 33 | name = "architecture" 34 | values = ["x86_64"] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /tests/perf/server/files/etcd.tmpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | %{ if length(extra_ssh_keys) > 0 } 3 | ssh_authorized_keys: 4 | %{ for ssh_key in extra_ssh_keys } 5 | - ${ssh_key} 6 | %{ endfor } 7 | %{ endif } 8 | runcmd: 9 | - echo "net.ipv4.neigh.default.gc_interval = 3600" >> /etc/sysctl.conf 10 | - echo "net.ipv4.neigh.default.gc_stale_time = 3600" >> /etc/sysctl.conf 11 | - echo "net.ipv4.neigh.default.gc_thresh3 = 16384" >> /etc/sysctl.conf 12 | - echo "net.ipv4.neigh.default.gc_thresh2 = 8192" >> /etc/sysctl.conf 13 | - echo "net.ipv4.neigh.default.gc_thresh1 = 4096" >> /etc/sysctl.conf 14 | - echo "fs.file-max = 12000500" >> /etc/sysctl.conf 15 | - echo "fs.nr_open = 20000500" >> /etc/sysctl.conf 16 | - echo "net.ipv4.tcp_mem = '10000000 10000000 10000000'" >> /etc/sysctl.conf 17 | - echo "net.ipv4.tcp_rmem = '1024 4096 16384'" >> /etc/sysctl.conf 18 | - echo "net.ipv4.tcp_wmem = '1024 4096 16384'" >> /etc/sysctl.conf 19 | - echo "net.core.rmem_max = 16384" >> /etc/sysctl.conf 20 | - echo "net.core.wmem_max = 16384" >> /etc/sysctl.conf 21 | - ulimit -n 20000000 22 | - echo "# " >> /etc/security/limits.d/limits.conf 23 | - echo " * soft nofile 20000" >> /etc/security/limits.d/limits.conf 24 | - echo " * hard nofile 20000" >> /etc/security/limits.d/limits.conf 25 | - sysctl -p 26 | - apt-get update 27 | - apt-get install -y git vim software-properties-common resolvconf linux-headers-$(uname -r) 28 | - echo "nameserver 1.1.1.1" > /etc/resolvconf/resolv.conf.d/tail 29 | - echo "RateLimitIntervalSec=0" >> /etc/systemd/journald.conf 30 | - echo "RateLimitBurst=0" >> /etc/systemd/journald.conf 31 | - curl -sSL https://releases.rancher.com/install-docker/19.03.sh | sh 32 | -------------------------------------------------------------------------------- /tests/perf/server/files/etcd_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | IFS=',' read -r -a public_ips <<< "$PUBLIC_IPS" 5 | IFS=',' read -r -a private_ips <<< "$PRIVATE_IPS" 6 | 7 | conn_string="" 8 | for i in "${!private_ips[@]}"; do 9 | conn_string=$conn_string"etcd-$i=http://${private_ips[i]}:2380," 10 | done 11 | conn_string=${conn_string%?} 12 | for i in "${!public_ips[@]}"; do 13 | while true; do 14 | ssh -i $SSH_KEY_PATH -l ubuntu ${public_ips[i]} "sudo docker run -v /etcd-data:/etcd-data -d -p ${private_ips[i]}:2379:2379 -p ${private_ips[i]}:2380:2380 quay.io/coreos/etcd:$DB_VERSION etcd --initial-advertise-peer-urls http://${private_ips[i]}:2380 --name=etcd-$i --data-dir=/etcd-data --advertise-client-urls=http://0.0.0.0:2379 --listen-peer-urls=http://0.0.0.0:2380 --listen-client-urls=http://0.0.0.0:2379 --initial-cluster-token=etcd-cluster-1 --initial-cluster-state new --initial-cluster $conn_string" 15 | if [ $? == 0 ]; then 16 | break 17 | fi 18 | sleep 10 19 | done 20 | done 21 | 22 | # 23 | -------------------------------------------------------------------------------- /tests/perf/server/files/worker_userdata.tmpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | %{ if length(extra_ssh_keys) > 0 } 3 | ssh_authorized_keys: 4 | %{ for ssh_key in extra_ssh_keys } 5 | - ${ssh_key} 6 | %{ endfor } 7 | %{ endif } 8 | runcmd: 9 | - echo "net.ipv4.neigh.default.gc_interval = 3600" >> /etc/sysctl.conf 10 | - echo "net.ipv4.neigh.default.gc_stale_time = 3600" >> /etc/sysctl.conf 11 | - echo "net.ipv4.neigh.default.gc_thresh3 = 16384" >> /etc/sysctl.conf 12 | - echo "net.ipv4.neigh.default.gc_thresh2 = 8192" >> /etc/sysctl.conf 13 | - echo "net.ipv4.neigh.default.gc_thresh1 = 4096" >> /etc/sysctl.conf 14 | - echo "fs.file-max = 12000500" >> /etc/sysctl.conf 15 | - echo "fs.nr_open = 20000500" >> /etc/sysctl.conf 16 | - echo "net.ipv4.tcp_mem = '10000000 10000000 10000000'" >> /etc/sysctl.conf 17 | - echo "net.ipv4.tcp_rmem = '1024 4096 16384'" >> /etc/sysctl.conf 18 | - echo "net.ipv4.tcp_wmem = '1024 4096 16384'" >> /etc/sysctl.conf 19 | - echo "net.core.rmem_max = 16384" >> /etc/sysctl.conf 20 | - echo "net.core.wmem_max = 16384" >> /etc/sysctl.conf 21 | - ulimit -n 20000 22 | - echo "# " >> /etc/security/limits.d/limits.conf 23 | - echo " * soft nofile 20000" >> /etc/security/limits.d/limits.conf 24 | - echo " * hard nofile 20000" >> /etc/security/limits.d/limits.conf 25 | - sysctl -p 26 | - until (curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${install_k3s_version} INSTALL_K3S_EXEC="${k3s_exec}" K3S_URL=https://${k3s_url}:6443 K3S_CLUSTER_SECRET="${k3s_cluster_secret}" sh -); do echo 'k3s did not install correctly'; sleep 1; done 27 | -------------------------------------------------------------------------------- /tests/perf/server/outputs.tf: -------------------------------------------------------------------------------- 1 | output "public_ip" { 2 | value = var.domain_name 3 | } 4 | 5 | output "install_k3s_version" { 6 | value = local.install_k3s_version 7 | } 8 | 9 | output "k3s_cluster_secret" { 10 | value = local.k3s_cluster_secret 11 | } 12 | 13 | output "k3s_server_ips" { 14 | value = join(",", aws_instance.k3s-server.*.public_ip) 15 | } 16 | -------------------------------------------------------------------------------- /tests/perf/server/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /tests/perf/tests/density/2000_nodes/override.yaml: -------------------------------------------------------------------------------- 1 | NODE_MODE: masteranddns 2 | -------------------------------------------------------------------------------- /tests/perf/tests/density/5000_nodes/override.yaml: -------------------------------------------------------------------------------- 1 | NODE_MODE: masteranddns 2 | -------------------------------------------------------------------------------- /tests/perf/tests/density/600_nodes/high_density_override.yaml: -------------------------------------------------------------------------------- 1 | PODS_PER_NODE: 95 2 | -------------------------------------------------------------------------------- /tests/perf/tests/density/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{.Name}} 5 | labels: 6 | group: {{.Group}} 7 | spec: 8 | replicas: {{.Replicas}} 9 | selector: 10 | matchLabels: 11 | name: {{.Name}} 12 | template: 13 | metadata: 14 | labels: 15 | name: {{.Name}} 16 | group: {{.Group}} 17 | spec: 18 | containers: 19 | - image: registry.k8s.io/pause:3.1 20 | imagePullPolicy: IfNotPresent 21 | name: {{.Name}} 22 | ports: 23 | resources: 24 | requests: 25 | cpu: {{.CpuRequest}} 26 | memory: {{.MemoryRequest}} 27 | # Add not-ready/unreachable tolerations for 15 minutes so that node 28 | # failure doesn't trigger pod deletion. 29 | tolerations: 30 | - key: "node.kubernetes.io/not-ready" 31 | operator: "Exists" 32 | effect: "NoExecute" 33 | tolerationSeconds: 900 34 | - key: "node.kubernetes.io/unreachable" 35 | operator: "Exists" 36 | effect: "NoExecute" 37 | tolerationSeconds: 900 38 | -------------------------------------------------------------------------------- /tests/perf/tests/load/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{.Name}} 5 | data: 6 | data.yaml: |- 7 | a: 1 8 | b: 2 9 | c: 3 10 | -------------------------------------------------------------------------------- /tests/perf/tests/load/daemonset-priorityclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scheduling.k8s.io/v1 2 | kind: PriorityClass 3 | metadata: 4 | name: {{.Name}} 5 | value: 1000000 6 | globalDefault: false 7 | description: "Designated priority class to be used for DaemonSet pods. This is 8 | to make sure they have higher priority than other test pods and there is always 9 | place for them on each node, see kubernetes/kubernetes#82818." 10 | -------------------------------------------------------------------------------- /tests/perf/tests/load/daemonset.yaml: -------------------------------------------------------------------------------- 1 | {{$Image := DefaultParam .Image "registry.k8s.io/pause:3.1"}} 2 | 3 | apiVersion: apps/v1 4 | kind: DaemonSet 5 | metadata: 6 | name: {{.Name}} 7 | labels: 8 | group: load 9 | spec: 10 | updateStrategy: 11 | rollingUpdate: 12 | maxUnavailable: {{MaxInt 10 (DivideInt .Nodes 20)}} # 5% of nodes, but not less than 10 13 | selector: 14 | matchLabels: 15 | name: {{.Name}} 16 | template: 17 | metadata: 18 | labels: 19 | group: load 20 | name: {{.Name}} 21 | spec: 22 | containers: 23 | - name: {{.Name}} 24 | image: {{$Image}} 25 | resources: 26 | requests: 27 | cpu: 10m 28 | memory: "10M" 29 | priorityClassName: daemonset-priorityclass-0 # Name is autogenerated, hence the -0 prefix. 30 | terminationGracePeriodSeconds: 1 31 | # Add not-ready/unreachable tolerations for 15 minutes so that node 32 | # failure doesn't trigger pod deletion. 33 | tolerations: 34 | - key: "node.kubernetes.io/not-ready" 35 | operator: "Exists" 36 | effect: "NoExecute" 37 | tolerationSeconds: 900 38 | - key: "node.kubernetes.io/unreachable" 39 | operator: "Exists" 40 | effect: "NoExecute" 41 | tolerationSeconds: 900 42 | -------------------------------------------------------------------------------- /tests/perf/tests/load/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: {{.Name}} 5 | labels: 6 | group: load 7 | spec: 8 | manualSelector: true 9 | parallelism: {{RandIntRange .ReplicasMin .ReplicasMax}} 10 | selector: 11 | matchLabels: 12 | name: {{.Name}} 13 | template: 14 | metadata: 15 | labels: 16 | group: load 17 | name: {{.Name}} 18 | spec: 19 | containers: 20 | - name: {{.Name}} 21 | # TODO(#799): We should test the "run-to-completion" workflow and hence don't use pause pods. 22 | image: registry.k8s.io/pause:3.1 23 | resources: 24 | requests: 25 | cpu: 10m 26 | memory: "10M" 27 | restartPolicy: Never 28 | terminationGracePeriodSeconds: 1 29 | # Add not-ready/unreachable tolerations for 15 minutes so that node 30 | # failure doesn't trigger pod deletion. 31 | tolerations: 32 | - key: "node.kubernetes.io/not-ready" 33 | operator: "Exists" 34 | effect: "NoExecute" 35 | tolerationSeconds: 900 36 | - key: "node.kubernetes.io/unreachable" 37 | operator: "Exists" 38 | effect: "NoExecute" 39 | tolerationSeconds: 900 40 | -------------------------------------------------------------------------------- /tests/perf/tests/load/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{if eq (Mod .Index 10) 0}} # Create for only 10% of deployments 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: {{.Name}} 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | name: {{.BaseName}}-{{.Index}} 10 | policyTypes: 11 | - Egress 12 | egress: 13 | - to: 14 | - ipBlock: 15 | cidr: 10.0.0.0/24 16 | ports: 17 | - protocol: TCP 18 | port: 8080 19 | {{end}} 20 | -------------------------------------------------------------------------------- /tests/perf/tests/load/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: {{.Name}} 5 | -------------------------------------------------------------------------------- /tests/perf/tests/load/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{.Name}} 5 | type: Opaque 6 | data: 7 | password: c2NhbGFiaWxpdHkK 8 | -------------------------------------------------------------------------------- /tests/perf/tests/load/service.yaml: -------------------------------------------------------------------------------- 1 | {{$SetServiceProxyLabel := DefaultParam .SetServiceProxyLabel false}} 2 | 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: {{.Name}} 7 | {{if and $SetServiceProxyLabel (eq (Mod .Index 2) 0)}} 8 | labels: 9 | service.kubernetes.io/service-proxy-name: foo 10 | {{end}} 11 | spec: 12 | selector: 13 | svc: {{.Name}} 14 | ports: 15 | - port: 80 16 | targetPort: 80 17 | -------------------------------------------------------------------------------- /tests/perf/tests/load/statefulset_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{.Name}} 5 | labels: 6 | name: {{.Name}} 7 | spec: 8 | clusterIP: None 9 | selector: 10 | name: {{.Name}} 11 | -------------------------------------------------------------------------------- /updatecli/scripts/run-go-generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | ./scripts/download >&2 6 | go generate >&2 7 | git diff 8 | 9 | exit 0 10 | 11 | -------------------------------------------------------------------------------- /updatecli/scripts/run-go-mod-update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | go get "${1}" >&2 6 | go mod tidy >&2 7 | git diff 8 | 9 | exit 0 10 | 11 | -------------------------------------------------------------------------------- /updatecli/updatecli.d/updatek3sroot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Update k3sroot version" 3 | 4 | sources: 5 | k3sroot: 6 | name: Get k3s-root version 7 | kind: githubrelease 8 | spec: 9 | owner: k3s-io 10 | repository: k3s-root 11 | token: '{{ requiredEnv .github.token }}' 12 | typefilter: 13 | release: true 14 | draft: false 15 | prerelease: false 16 | versionfilter: 17 | kind: semver 18 | # pattern accepts any semver constraint 19 | pattern: "*" 20 | 21 | targets: 22 | versionfile: 23 | name: "Bump to latest k3s-root version in scripts/version.sh" 24 | kind: file 25 | scmid: default 26 | sourceid: k3sroot 27 | spec: 28 | file: "scripts/version.sh" 29 | matchpattern: '(?m)^VERSION_ROOT=(.*)' 30 | replacepattern: 'VERSION_ROOT="{{ source "k3sroot" }}"' 31 | 32 | 33 | scms: 34 | default: 35 | kind: github 36 | spec: 37 | token: '{{ requiredEnv .github.token }}' 38 | username: '{{ requiredEnv .github.username }}' 39 | user: '{{ .github.user }}' 40 | email: '{{ .github.email }}' 41 | owner: '{{ .k3s.org }}' 42 | repository: '{{ .k3s.repo }}' 43 | branch: '{{ .k3s.branch }}' 44 | 45 | actions: 46 | default: 47 | title: 'Bump K3s-root version to {{ source "k3sroot" }}' 48 | kind: github/pullrequest 49 | spec: 50 | automerge: false 51 | labels: 52 | - dependencies 53 | scmid: default 54 | 55 | -------------------------------------------------------------------------------- /updatecli/values.yaml: -------------------------------------------------------------------------------- 1 | github: 2 | user: "github-actions[bot]" 3 | email: "41898282+github-actions[bot]@users.noreply.github.com" 4 | username: "UPDATECLI_GITHUB_ACTOR" 5 | token: "UPDATECLI_GITHUB_TOKEN" 6 | k3s: 7 | org: "k3s-io" 8 | repo: "k3s" 9 | branch: "master" 10 | klipper_helm: 11 | org: "k3s-io" 12 | repo: "klipper-helm" 13 | branch: "master" 14 | klipper_lb: 15 | org: "k3s-io" 16 | repo: "klipper-lb" 17 | branch: "master" 18 | local_path_provisioner: 19 | org: "rancher" 20 | repo: "local-path-provisioner" 21 | branch: "master" 22 | helm_controller: 23 | org: "k3s-io" 24 | repo: "helm-controller" 25 | branch: "master" 26 | --------------------------------------------------------------------------------