├── .codespellignore
├── .dockerignore
├── .github
├── .codecov.yml
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── cut_release.md
│ ├── feature_request.md
│ └── os_validation.md
├── PULL_REQUEST_TEMPLATE.md
├── actions
│ ├── install-crane
│ │ └── action.yml
│ ├── setup-go
│ │ └── action.yaml
│ └── vagrant-setup
│ │ └── action.yaml
├── dco.yml
├── dependabot.yml
└── workflows
│ ├── build.yml
│ ├── fossa.yml
│ ├── nightly-install.yaml
│ ├── pr.yml
│ ├── release.yml
│ ├── spellcheck.yaml
│ ├── stale.yml
│ ├── test-suite.yaml
│ ├── trivy.yaml
│ ├── unittest.yaml
│ └── updatecli.yml
├── .gitignore
├── .golangci.json
├── BUILDING.md
├── CODEOWNERS
├── Dockerfile
├── Dockerfile.docs
├── Dockerfile.windows
├── LICENSE
├── MAINTAINERS
├── Makefile
├── README.md
├── Vagrantfile
├── bundle
├── bin
│ ├── rke2-killall.sh
│ ├── rke2-uninstall.ps1
│ └── rke2-uninstall.sh
├── lib
│ └── systemd
│ │ └── system
│ │ ├── rke2-agent.env
│ │ ├── rke2-agent.service
│ │ ├── rke2-server.env
│ │ └── rke2-server.service
└── share
│ └── rke2
│ └── rke2-cis-sysctl.conf
├── channels.yaml
├── charts
├── README.md
├── build-chart.sh
├── build-charts.sh
└── chart_versions.yaml
├── contrib
├── custom-image-kubelet
│ ├── .flake8
│ ├── README.md
│ ├── genconfig.py
│ └── requirements.txt
└── gotests_templates
│ ├── call.tmpl
│ ├── function.tmpl
│ ├── header.tmpl
│ ├── inline.tmpl
│ ├── inputs.tmpl
│ ├── message.tmpl
│ └── results.tmpl
├── developer-docs
├── about_rke2_docs.md
├── docker_multiarch.md
├── image_sources.md
├── k3s.md
├── testing.md
├── updating_rke2_charts.md
├── upgrading_dependencies.md
├── upgrading_go.md
├── upgrading_kubernetes.md
└── upgrading_rke2.md
├── docs
└── adrs
│ ├── 001-record-architecture-decisions.md
│ ├── 002-rke2-rpm-support.md
│ ├── 003-rke2-rpm-sle-support.md
│ ├── 004-rke2-rc-versioning.md
│ ├── 004-servicelb-support.md
│ ├── 005-security-updates-automation.md
│ ├── 006-add-flannel-support.md
│ ├── 007-add-kine-support.md
│ ├── 008-gh-branch-strategy.md
│ ├── 008-traefik-ingress.md
│ ├── README.md
│ └── adr-template.md
├── go.mod
├── go.sum
├── install.ps1
├── install.sh
├── main.go
├── mkdocs.yml
├── pkg
├── auth
│ └── auth.go
├── bootstrap
│ └── bootstrap.go
├── cli
│ ├── cmds
│ │ ├── agent.go
│ │ ├── agent_service_linux.go
│ │ ├── agent_service_windows.go
│ │ ├── cert.go
│ │ ├── cmds_test.go
│ │ ├── completion.go
│ │ ├── etcd_snapshot.go
│ │ ├── k3sopts.go
│ │ ├── profile_linux.go
│ │ ├── profile_windows.go
│ │ ├── root.go
│ │ ├── secrets_encrypt.go
│ │ ├── server.go
│ │ └── token.go
│ └── defaults
│ │ ├── defaults.go
│ │ ├── defaults_linux.go
│ │ └── defaults_windows.go
├── controllers
│ └── cisnetworkpolicy
│ │ ├── cleanup.go
│ │ └── controller.go
├── images
│ ├── images.go
│ └── images_test.go
├── logging
│ ├── logging.go
│ └── logging_test.go
├── pebinaryexecutor
│ └── pebinary.go
├── podexecutor
│ ├── command_linux.go
│ ├── command_other.go
│ ├── spw.go
│ └── staticpod.go
├── rke2
│ ├── clusterrole.go
│ ├── clusterrole_bootstrap.go
│ ├── kp.go
│ ├── np.go
│ ├── psa.go
│ ├── rc.go
│ ├── rke2.go
│ ├── rke2_linux.go
│ ├── rke2_linux_test.go
│ ├── rke2_windows.go
│ ├── serviceaccount.go
│ ├── serviceaccount_test.go
│ └── spc.go
├── staticpod
│ └── staticpod.go
└── windows
│ ├── calico.go
│ ├── flannel.go
│ ├── service_linux.go
│ ├── service_windows.go
│ ├── types.go
│ └── utils.go
├── scripts
├── airgap
│ ├── dnsNodeCache-test.yaml
│ └── loadbalancer-test.yaml
├── build
├── build-binary
├── build-image-runtime
├── build-image-test
├── build-images
├── build-upload
├── build-windows-binary
├── build-windows-images
├── checksum
├── clean
├── clean-cache
├── copy-images.sh
├── dev-peer
├── dev-peer-enter
├── dev-rpm
├── dev-runtime-image
├── dev-shell
├── dev-shell-build
├── dev-shell-enter
├── package
├── package-binary
├── package-bundle
├── package-dev-rpm
├── package-image-runtime
├── package-images
├── package-windows-binary
├── package-windows-bundle
├── package-windows-images
├── publish-binary
├── publish-dev-rpm
├── publish-image-runtime
├── publish-image-runtime-windows
├── publish-manifest-kubernetes
├── publish-manifest-runtime
├── remote-debug
├── remote-debug-exit
├── run
├── scan-images
├── semver-parse.sh
├── sonobuoy-config.json
├── test
├── test-unit
├── validate
├── validate-charts
├── validate-release
└── version.sh
├── tests
├── client.go
├── docker
│ ├── basics
│ │ └── basics_test.go
│ ├── dualstack
│ │ └── dualstack_test.go
│ ├── log-upload
│ ├── resources
│ │ ├── clusterip.yaml
│ │ ├── dns-node-cache.yaml
│ │ ├── dnsutils.yaml
│ │ ├── dualstack_clusterip.yaml
│ │ ├── dualstack_ingress.yaml
│ │ ├── dualstack_nodeport.yaml
│ │ ├── loadbalancer.yaml
│ │ ├── nodecache.yaml
│ │ ├── nodeport.yaml
│ │ ├── secrets.yaml
│ │ └── volume-test.yaml
│ ├── secretsencryption
│ │ └── secretsencryption_test.go
│ ├── splitserver
│ │ └── splitserver_test.go
│ ├── test-helpers
│ ├── test-runner
│ ├── test-setup-sonobuoy
│ └── testutils.go
├── e2e
│ ├── ciliumnokp
│ │ ├── Vagrantfile
│ │ └── ciliumnokp_test.go
│ ├── kine
│ │ ├── Vagrantfile
│ │ └── kine_test.go
│ ├── mixedos
│ │ ├── README.md
│ │ ├── Vagrantfile
│ │ └── mixedos_test.go
│ ├── mixedosbgp
│ │ ├── Vagrantfile
│ │ └── mixedosbgp_test.go
│ ├── multus
│ │ ├── Vagrantfile
│ │ └── multus_test.go
│ ├── report
│ │ └── s3upload.go
│ ├── resource_files
│ │ ├── clusterip.yaml
│ │ ├── csi-driver-host-path.txt
│ │ ├── daemonset.yaml
│ │ ├── dnsutils.yaml
│ │ ├── dualstack_clusterip.yaml
│ │ ├── dualstack_ingress.yaml
│ │ ├── dualstack_nodeport.yaml
│ │ ├── ingress.yaml
│ │ ├── loadbalancer.yaml
│ │ ├── local-path-provisioner.yaml
│ │ ├── multus-pods.yaml
│ │ ├── netpol-fail.yaml
│ │ ├── netpol-work.yaml
│ │ ├── nodeport.yaml
│ │ ├── pod_client.yaml
│ │ ├── secrets.yaml
│ │ └── windows_app_deployment.yaml
│ ├── scripts
│ │ ├── calico_manifest.sh
│ │ ├── calico_manifestbgp.sh
│ │ ├── cilium_nokubeproxy.sh
│ │ ├── install-bgp.ps1
│ │ ├── install_sonobuoy.sh
│ │ ├── ipv6.sh
│ │ ├── latest_commit.ps1
│ │ ├── latest_commit.sh
│ │ ├── registry.sh
│ │ └── run_tests.sh
│ ├── secretsencryption
│ │ ├── Vagrantfile
│ │ └── secretsencryption_test.go
│ ├── secretsencryption_old
│ │ ├── Vagrantfile
│ │ └── secretsencryption_test.go
│ ├── splitserver
│ │ ├── Vagrantfile
│ │ └── splitserver_test.go
│ ├── testutils.go
│ ├── upgradecluster
│ │ ├── Vagrantfile
│ │ └── upgradecluster_test.go
│ ├── vagrantdefaults.rb
│ └── validatecluster
│ │ ├── Vagrantfile
│ │ └── validatecluster_test.go
├── install
│ ├── README.md
│ ├── centos-9
│ │ └── Vagrantfile
│ ├── install_util.rb
│ ├── opensuse-leap
│ │ └── Vagrantfile
│ ├── oracle-9
│ │ └── Vagrantfile
│ ├── rocky-8
│ │ └── Vagrantfile
│ ├── ubuntu-2404
│ │ └── Vagrantfile
│ ├── windows-2019
│ │ └── Vagrantfile
│ └── windows-2022
│ │ └── Vagrantfile
└── integration
│ ├── etcdsnapshot
│ └── etcd_int_test.go
│ ├── integration.go
│ └── startup
│ └── startup_test.go
├── updatecli
├── scripts
│ ├── retrieve_chart_version.sh
│ ├── update_chart_and_images.sh
│ └── validate_version.sh
├── updatecli.d
│ ├── vsphere-cpi.yml
│ └── vsphere-csi.yml
└── values.yaml
└── windows
├── rke2-install.ps1
└── rke2-quickstart.ps1
/.codespellignore:
--------------------------------------------------------------------------------
1 | aks
2 | ec2
3 | eks
4 | gce
5 | gcp
6 | ro
7 | ser
8 | shouldnot
9 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | ./.dapper
2 | ./.cache
--------------------------------------------------------------------------------
/.github/.codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | status:
3 | project:
4 | default: false # disable the default status that measures entire project
5 | pkg: # declare a new status context "pkg"
6 | paths:
7 | - pkg/* # only include coverage in "pkg/" folder
8 | informational: true # Always pass check
9 | patch: off # disable the commit only checks
10 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
12 | **Environmental Info:**
13 | RKE2 Version:
14 |
15 |
16 | Node(s) CPU architecture, OS, and Version:
17 |
18 |
19 | Cluster Configuration:
20 |
21 |
22 | **Describe the bug:**
23 |
24 |
25 | **Steps To Reproduce:**
26 |
27 | - Installed RKE2:
28 |
29 | **Expected behavior:**
30 |
31 |
32 | **Actual behavior:**
33 |
34 |
35 | **Additional context / logs:**
36 |
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
12 | **Is your feature request related to a problem? Please describe.**
13 |
14 |
15 | **Describe the solution you'd like**
16 |
17 |
18 | **Describe alternatives you've considered**
19 |
20 |
21 | **Additional context**
22 |
23 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/os_validation.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Validate Operating System
3 | about: Request validation of an operating system
4 | title: 'Validate OS VERSION'
5 | labels: ["kind/os-validation"]
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
12 | **RKE2 Versions to be Validated**
13 |
14 |
15 |
16 | **Testing Considerations**
17 |
18 | 1. Install and run sonobuoy conformance tests on a hardened cluster
19 | 2. Validate SUC upgrade
20 | 3. Install Rancher Manager
21 | 4. Validate snapshot restore via `cluster-reset-restore-path`
22 |
23 |
24 | **Additional Information**
25 |
26 |
27 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | #### Proposed Changes ####
4 |
5 |
6 |
7 |
8 |
9 | #### Types of Changes ####
10 |
11 |
12 |
13 | #### Verification ####
14 |
15 |
16 |
17 | #### Testing ####
18 |
19 |
20 |
21 | #### Linked Issues ####
22 |
23 |
24 |
25 | #### User-Facing Change ####
26 |
30 | ```release-note
31 |
32 | ```
33 |
34 | #### Further Comments ####
35 |
36 |
37 |
--------------------------------------------------------------------------------
/.github/actions/install-crane/action.yml:
--------------------------------------------------------------------------------
1 | name: Install crane
2 | inputs:
3 | version:
4 | default: v0.20.3
5 | checksum:
6 | default: 36c67a932f489b3f2724b64af90b599a8ef2aa7b004872597373c0ad694dc059
7 |
8 | runs:
9 | using: 'composite'
10 | steps:
11 | - shell: bash
12 | run: |
13 | curl -sL "https://github.com/google/go-containerregistry/releases/download/${{ inputs.version }}/go-containerregistry_Linux_x86_64.tar.gz" -o crane.tar.gz
14 | echo "${{ inputs.checksum }} crane.tar.gz" | sha256sum -c
15 | tar -xzvf crane.tar.gz crane
16 | chmod +x crane
17 | mv crane /usr/local/bin/
18 |
--------------------------------------------------------------------------------
/.github/actions/setup-go/action.yaml:
--------------------------------------------------------------------------------
1 | name: 'Setup golang with master only caching'
2 | description: 'A composite action that installs golang, but with a caching strategy that only updates the cache on master branch.'
3 | inputs:
4 | go-version:
5 | description: 'Override the version of Go to install.'
6 | required: false
7 |
8 | runs:
9 | using: 'composite'
10 | steps:
11 | - uses: actions/setup-go@v5
12 | if: inputs.go-version == ''
13 | with:
14 | go-version-file: 'go.mod' # Just use whatever version is in the go.mod file
15 | cache: ${{ github.ref == 'refs/heads/master' }}
16 |
17 | - uses: actions/setup-go@v5
18 | if: inputs.go-version != ''
19 | with:
20 | go-version: ${{ inputs.go-version }}
21 | cache: ${{ github.ref == 'refs/heads/master' }}
22 |
23 | - name: Prepare for go cache
24 | if: ${{ github.ref != 'refs/heads/master' }}
25 | shell: bash
26 | run: |
27 | echo "GO_CACHE=$(go env GOCACHE)" | tee -a "$GITHUB_ENV"
28 | echo "GO_MODCACHE=$(go env GOMODCACHE)" | tee -a "$GITHUB_ENV"
29 | echo "GO_VERSION=$(go env GOVERSION | tr -d 'go')" | tee -a "$GITHUB_ENV"
30 |
31 | - name: Setup read-only cache
32 | if: ${{ github.ref != 'refs/heads/master' }}
33 | uses: actions/cache/restore@v4
34 | with:
35 | path: |
36 | ${{ env.GO_MODCACHE }}
37 | ${{ env.GO_CACHE }}
38 | # Match the cache key to the setup-go action https://github.com/actions/setup-go/blob/main/src/cache-restore.ts#L34
39 | key: setup-go-${{ runner.os }}-${{ env.ImageOS }}-go-${{ env.GO_VERSION }}-${{ hashFiles('go.sum') }}
--------------------------------------------------------------------------------
/.github/actions/vagrant-setup/action.yaml:
--------------------------------------------------------------------------------
1 | name: 'Setup Vagrant and Libvirt'
2 | description: 'A composite action that installs latest versions of vagrant and libvirt for use on ubuntu based runners'
3 | runs:
4 | using: 'composite'
5 | steps:
6 | - name: Add vagrant to apt-get sources
7 | shell: bash
8 | run: |
9 | curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
10 | echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
11 | sudo sed -i 's/^# deb-src/deb-src/' /etc/apt/sources.list
12 | - name: Install vagrant and libvirt
13 | shell: bash
14 | run: |
15 | sudo apt-get update
16 | sudo apt-get install -y libvirt-daemon libvirt-daemon-system vagrant=2.4.1-1 ruby-libvirt
17 | sudo systemctl enable --now libvirtd
18 | - name: Install vagrant dependencies
19 | shell: bash
20 | run: |
21 | sudo apt-get install -y --no-install-recommends libxslt-dev libxml2-dev libvirt-dev ruby-bundler ruby-dev zlib1g-dev
22 | # This is a workaround for the libvirt group not being available in the current shell
23 | # https://github.com/actions/runner-images/issues/7670#issuecomment-1900711711
24 | - name: Make the libvirt socket rw accessible to everyone
25 | shell: bash
26 | run: |
27 | sudo chmod a+rw /var/run/libvirt/libvirt-sock
28 | - name: Install vagrant-libvirt plugin
29 | shell: bash
30 | run: vagrant plugin install vagrant-libvirt
--------------------------------------------------------------------------------
/.github/dco.yml:
--------------------------------------------------------------------------------
1 | require:
2 | members: false
3 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 |
4 | # Maintain dependencies for Docker Images
5 | - package-ecosystem: "docker"
6 | directory: "/"
7 | ignore:
8 | - dependency-name: "rancher/hardened-build-base"
9 | - dependency-name: "rancher/hardened-kubernetes"
10 | schedule:
11 | interval: "weekly"
12 | labels:
13 | - "kind/dependabot"
14 | reviewers:
15 | - "rancher/k3s"
16 |
17 | # Maintain dependencies for GitHub Actions
18 | - package-ecosystem: "github-actions"
19 | directory: "/"
20 | schedule:
21 | interval: "weekly"
22 | labels:
23 | - "kind/dependabot"
24 | reviewers:
25 | - "rancher/k3s"
26 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | paths-ignore:
4 | - "**.md"
5 | - "channel.yaml"
6 | - "install.sh"
7 | - "!.github/workflows/test-suite.yaml"
8 | branches:
9 | - master
10 | - release-**
11 |
12 | name: Branch Merge Build
13 | permissions:
14 | contents: write
15 | id-token: write
16 | jobs:
17 | build-amd64:
18 | runs-on: runs-on,runner=8cpu-linux-x64,run-id=${{ github.run_id }},image=ubuntu22-full-x64,hdd=64
19 | steps:
20 | - name: Checkout code
21 | uses: actions/checkout@v4
22 | - name: Install Dapper
23 | run: |
24 | curl -sL https://releases.rancher.com/dapper/latest/dapper-$(uname -s)-$(uname -m) > /usr/local/bin/dapper
25 | chmod +x /usr/local/bin/dapper
26 | - name: "Read secrets"
27 | uses: rancher-eio/read-vault-secrets@main
28 | with:
29 | secrets: |
30 | secret/data/github/repo/${{ github.repository }}/aws/rke2-ci-uploader/credentials AWS_ACCESS_KEY_ID ;
31 | secret/data/github/repo/${{ github.repository }}/aws/rke2-ci-uploader/credentials AWS_SECRET_ACCESS_KEY ;
32 | - name: Build
33 | run: dapper -f Dockerfile --target dapper make dapper-ci
34 | env:
35 | AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }}
36 | AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }}
37 | - name: Load kernel modules
38 | run: |
39 | sudo modprobe br_netfilter overlay
40 | - name: Test Conformance
41 | run: dapper -f Dockerfile --target dapper make test
42 | - name: Prepare Test Logs on Failure
43 | if: ${{ failure() }}
44 | run: |
45 | sudo cp -r /tmp/rke2-logs ~/rke2-logs
46 | sudo chown -R $USER:$USER ~/rke2-logs
47 | - name: Upload Test Logs on Failure
48 | if: ${{ failure() }}
49 | uses: actions/upload-artifact@v4
50 | with:
51 | name: rke2-test-logs
52 | path: ~/rke2-logs/
53 | build-arm64:
54 | runs-on: runs-on,runner=8cpu-linux-arm64,run-id=${{ github.run_id }},image=ubuntu22-full-arm64,hdd=64
55 | steps:
56 | - name: Checkout code
57 | uses: actions/checkout@v4
58 | - name: Install Dapper
59 | run: |
60 | curl -sL https://releases.rancher.com/dapper/latest/dapper-$(uname -s)-$(uname -m) > /usr/local/bin/dapper
61 | chmod +x /usr/local/bin/dapper
62 | - name: "Read secrets"
63 | uses: rancher-eio/read-vault-secrets@main
64 | with:
65 | secrets: |
66 | secret/data/github/repo/${{ github.repository }}/aws/rke2-ci-uploader/credentials AWS_ACCESS_KEY_ID ;
67 | secret/data/github/repo/${{ github.repository }}/aws/rke2-ci-uploader/credentials AWS_SECRET_ACCESS_KEY ;
68 | - name: Build
69 | run: |
70 | dapper -f Dockerfile --target dapper make dapper-ci
71 | env:
72 | AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }}
73 | AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }}
74 |
--------------------------------------------------------------------------------
/.github/workflows/fossa.yml:
--------------------------------------------------------------------------------
1 | name: FOSSA Scanning
2 |
3 | on:
4 | push:
5 | branches: ["main", "master", "release-**"]
6 | workflow_dispatch:
7 |
8 | permissions:
9 | contents: read
10 | id-token: write
11 |
12 | jobs:
13 | fossa-scanning:
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Checkout
17 | uses: actions/checkout@v4
18 |
19 | - name: Read FOSSA token
20 | uses: rancher-eio/read-vault-secrets@main
21 | with:
22 | secrets: |
23 | secret/data/github/org/rancher/fossa/push token | FOSSA_API_KEY_PUSH_ONLY
24 |
25 | - name: FOSSA scan
26 | uses: fossas/fossa-action@main
27 | with:
28 | api-key: ${{ env.FOSSA_API_KEY_PUSH_ONLY }}
29 | run-tests: false
--------------------------------------------------------------------------------
/.github/workflows/pr.yml:
--------------------------------------------------------------------------------
1 | on:
2 | pull_request:
3 | paths-ignore:
4 | - "**.md"
5 | - "channel.yaml"
6 | - "install.sh"
7 | - "!.github/workflows/test-suite.yaml"
8 |
9 | name: PR Build
10 | permissions:
11 | contents: read
12 | jobs:
13 | build-amd64:
14 | runs-on: runs-on,runner=8cpu-linux-x64,run-id=${{ github.run_id }},image=ubuntu22-full-x64,hdd=64
15 | steps:
16 | - name: Checkout code
17 | uses: actions/checkout@v4
18 | - name: Install Dapper
19 | run: |
20 | curl -sL https://releases.rancher.com/dapper/latest/dapper-$(uname -s)-$(uname -m) > /usr/local/bin/dapper
21 | chmod +x /usr/local/bin/dapper
22 | - name: Build
23 | run: dapper -f Dockerfile --target dapper make dapper-ci
24 | - name: Load kernel modules
25 | run: |
26 | sudo modprobe br_netfilter overlay
27 | - name: Test
28 | run: dapper -f Dockerfile --target dapper make test
29 | - name: Prepare Test Logs on Failure
30 | if: ${{ failure() }}
31 | run: |
32 | sudo cp -r /tmp/rke2-logs ~/rke2-logs
33 | sudo chown -R $USER:$USER ~/rke2-logs
34 | ls -la ~/rke2-logs
35 | - name: Upload Logs on Failure
36 | if: ${{ failure() }}
37 | uses: actions/upload-artifact@v4
38 | with:
39 | name: rke2-test-logs
40 | path: ~/rke2-logs/
41 | build-arm64:
42 | runs-on: runs-on,runner=8cpu-linux-arm64,run-id=${{ github.run_id }},image=ubuntu22-full-arm64,hdd=64
43 | steps:
44 | - name: Checkout code
45 | uses: actions/checkout@v4
46 | - name: Install Dapper
47 | run: |
48 | curl -sL https://releases.rancher.com/dapper/latest/dapper-$(uname -s)-$(uname -m) > /usr/local/bin/dapper
49 | chmod +x /usr/local/bin/dapper
50 | - name: Build
51 | run: |
52 | dapper -f Dockerfile --target dapper make dapper-ci
53 |
--------------------------------------------------------------------------------
/.github/workflows/spellcheck.yaml:
--------------------------------------------------------------------------------
1 | name: Codespell
2 | on:
3 | push:
4 | pull_request:
5 | branches:
6 | - master
7 | workflow_dispatch: {}
8 | jobs:
9 | spellcheck:
10 | name: Spell Check
11 | runs-on: ubuntu-24.04
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@v4
15 | with:
16 | fetch-depth: 1
17 | - name: Set up Python 🐍
18 | uses: actions/setup-python@v5
19 | - name: Install codespell
20 | run: |
21 | python -m pip install --upgrade pip
22 | pip install codespell
23 | - name: Check spelling
24 | run: codespell --skip=.git,./vendor,./MAINTAINERS,go.mod,go.sum --check-filenames --ignore-regex=.te# --ignore-words=.codespellignore
25 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: Stalebot
2 | on:
3 | schedule:
4 | - cron: '0 20 * * *'
5 | workflow_dispatch:
6 | permissions:
7 | contents: write
8 | issues: write
9 | jobs:
10 | stalebot:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Close Stale Issues
14 | uses: actions/stale@v9.1.0
15 | with:
16 | # ensure PRs are exempt
17 | days-before-pr-stale: -1
18 |
19 | days-before-issue-stale: 45
20 | days-before-issue-close: 14
21 | stale-issue-label: status/stale
22 | exempt-all-milestones: true
23 | exempt-all-assignees: true
24 | exempt-issue-labels:
25 | internal,
26 | kind/bug,
27 | kind/bug-qa,
28 | kind/task,
29 | kind/feature,
30 | kind/enhancement,
31 | kind/design,
32 | kind/ci-improvements,
33 | kind/performance,
34 | kind/flaky-test,
35 | kind/documentation,
36 | kind/epic,
37 | kind/upstream-issue,
38 | priority/backlog,
39 | priority/critical-urgent,
40 | priority/important-longterm,
41 | priority/important-soon,
42 | priority/low,
43 | priority/medium,
44 | priority/high,
45 | priority/urgent,
46 | stale-issue-message: >
47 | This repository uses a bot to automatically label issues which have not had any activity (commit/comment/label)
48 | for 45 days. This helps us manage the community issues better. If the issue is still relevant, please add a comment to the
49 | issue so the bot can remove the label and we know it is still valid. If it is no longer relevant (or possibly fixed in the
50 | latest release), the bot will automatically close the issue in 14 days. Thank you for your contributions.
51 |
--------------------------------------------------------------------------------
/.github/workflows/unittest.yaml:
--------------------------------------------------------------------------------
1 | name: Unit Test Coverage
2 | on:
3 | push:
4 | paths-ignore:
5 | - "**.md"
6 | - "channels.yaml"
7 | - "install.sh"
8 | - "tests/**"
9 | - ".github/**"
10 | - "!.github/workflows/unittest.yaml"
11 | pull_request:
12 | paths-ignore:
13 | - "**.md"
14 | - "channels.yaml"
15 | - "install.sh"
16 | - "tests/**"
17 | - ".github/**"
18 | - "!.github/workflows/unittest.yaml"
19 |
20 | permissions:
21 | contents: read
22 |
23 | jobs:
24 | test:
25 | name: Unit Tests
26 | runs-on: ubuntu-latest
27 | timeout-minutes: 20
28 | steps:
29 | - name: Checkout
30 | uses: actions/checkout@v4
31 | with:
32 | fetch-depth: 1
33 | - name: Install Go
34 | uses: ./.github/actions/setup-go
35 | - name: Run Unit Tests
36 | run: |
37 | go test -coverpkg=./... -coverprofile=coverage.out ./pkg/... -run Unit
38 | go tool cover -func coverage.out
39 | - name: Upload Results To Codecov
40 | uses: codecov/codecov-action@v5
41 | with:
42 | token: ${{ secrets.CODECOV_TOKEN }}
43 | files: ./coverage.out
44 | flags: unittests # optional
45 | verbose: true # optional (default = false)
46 |
--------------------------------------------------------------------------------
/.github/workflows/updatecli.yml:
--------------------------------------------------------------------------------
1 | name: "Updatecli: Dependency Management"
2 |
3 | on:
4 | schedule:
5 | # Runs at 06 PM UTC
6 | - cron: '0 18 * * *'
7 | # Allows you to run this workflow manually from the Actions tab
8 | workflow_dispatch:
9 |
10 | permissions:
11 | contents: write
12 | issues: write
13 | pull-requests: write
14 |
15 | jobs:
16 | updatecli:
17 | runs-on: ubuntu-latest
18 | if: github.ref == 'refs/heads/master'
19 | steps:
20 | - name: Checkout
21 | uses: actions/checkout@v4
22 |
23 | - name: Install Go
24 | uses: actions/setup-go@v5
25 | with:
26 | go-version: 'stable'
27 | cache: false
28 |
29 | - name: Install Updatecli
30 | uses: updatecli/updatecli-action@v2
31 |
32 | - name: Delete leftover UpdateCLI branches
33 | run: |
34 | gh pr list \
35 | --search "is:closed is:pr head:updatecli_" \
36 | --json headRefName \
37 | --jq ".[].headRefName" | sort -u > closed_prs_branches.txt
38 | gh pr list \
39 | --search "is:open is:pr head:updatecli_" \
40 | --json headRefName \
41 | --jq ".[].headRefName" | sort -u > open_prs_branches.txt
42 | for branch in $(comm -23 closed_prs_branches.txt open_prs_branches.txt); do
43 | if (git ls-remote --exit-code --heads origin "$branch"); then
44 | echo "Deleting leftover UpdateCLI branch - $branch";
45 | git push origin --delete "$branch";
46 | fi
47 | done
48 | env:
49 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
50 |
51 | - name: Apply Updatecli
52 | # Never use '--debug' option, because it might leak the access tokens.
53 | run: "updatecli apply --clean --config ./updatecli/updatecli.d/ --values ./updatecli/values.yaml"
54 | env:
55 | UPDATECLI_GITHUB_ACTOR: ${{ github.actor }}
56 | UPDATECLI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
57 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /.dapper
2 | /.cache
3 | /bin
4 | /dist
5 | *.swp
6 | .idea
7 | .vscode
8 | /data
9 | /rke2
10 | /build
11 | .vagrant/
12 | .DS_Store
13 | go.work*
14 |
--------------------------------------------------------------------------------
/.golangci.json:
--------------------------------------------------------------------------------
1 | {
2 | "linters": {
3 | "disable-all": true,
4 | "enable": [
5 | "govet",
6 | "revive",
7 | "goimports",
8 | "misspell",
9 | "ineffassign",
10 | "gofmt"
11 | ]
12 | },
13 | "linters-settings": {
14 | "govet": {
15 | "check-shadowing": false
16 | }
17 | },
18 | "run": {
19 | "skip-files": [
20 | "/zz_generated_"
21 | ],
22 | "skip-dirs": [
23 | "bundle",
24 | "charts",
25 | "contrib",
26 | "developer-docs",
27 | "scripts",
28 | "docs"
29 | ],
30 | "timeout": "10m"
31 | },
32 | "issues": {
33 | "exclude-rules": [
34 | {
35 | "linters": "typecheck",
36 | "text": "imported but not used"
37 | },
38 | {
39 | "linters": "revive",
40 | "text": "should have comment"
41 | }
42 | ]
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/BUILDING.md:
--------------------------------------------------------------------------------
1 | # Building RKE2
2 |
3 | ## Prerequisites
4 |
5 | By default, RKE2 is built with Dapper which uses Docker. To build RKE2 you will need to install these packages:
6 | - bash
7 | - docker
8 | - gcc (CGO, don't ya know, if using `scripts/build` directly)
9 | - go (check the `go.mod` for which series of go, e.g. 1.14.x, 1.15.x, etc)
10 | - make
11 |
12 | ### Required for Running
13 | When running RKE2 you will also need to install these packages:
14 | - ca-certificates
15 |
16 | ## Building
17 |
18 | ```shell script
19 | # this will build inside of a container via dapper.
20 | # use `make build` to leverage host-local tooling
21 | make
22 | ```
23 |
24 | ## Running
25 |
26 | ### rke2 (dev-shell)
27 | To run locally in a container, there is a handy `make` target:
28 | ```shell script
29 | make dev-shell
30 | ```
31 |
32 | This will spin up a privileged container and setup the environment ready for you to invoke `./bin/rke2` at your leisure.
33 | Since the `rancher/rke2-runtime` image was built locally and likely not yet pushed, this, along with the airgap images,
34 | has been bind-mounted into the container ready to be imported into containerd on start-up.
35 |
36 | ### rke2 (generic)
37 |
38 | To run the built artifact(s) locally or on a remote host:
39 | - install prerequisites mentioned above
40 | - copy `./bin/rke2` to the path on your host
41 | - if not testing air-gapped, copy these (local) image tarballs to `/var/lib/rancher/rke2/agent/images/`:
42 | - `./build/images/rke2-runtime.tar`
43 | - `./build/images/rke2-kubernetes.tar`
44 | - if testing air-gapped, copy this (local + remote) image tarball to `/var/lib/rancher/rke2/agent/images/`:
45 | - `./build/images/rke2-airgap.tar`
46 | - run rke2 server: `rke2 server --token=test`
47 |
48 | ### kubectl
49 |
50 | It isn't obvious but `kubectl` will be installed and ready to use after starting up `rke2`. To use it you will need to:
51 | - `export KUBECONFIG=/etc/rancher/rke2/rke2.yaml`
52 | - `export PATH=/var/lib/rancher/rke2/bin:$PATH`
53 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @rancher/k3s
2 |
--------------------------------------------------------------------------------
/Dockerfile.docs:
--------------------------------------------------------------------------------
1 | FROM squidfunk/mkdocs-material
2 | RUN pip install mkdocs-markdownextradata-plugin
3 | RUN apk add -U git openssh
4 |
5 |
--------------------------------------------------------------------------------
/MAINTAINERS:
--------------------------------------------------------------------------------
1 | # The following is the list of current RKE2 maintainers
2 | # Github ID, Name, Email Address
3 |
4 | brandond, Brad Davidson, brad.davidson@suse.com
5 | briandowns, Brian Downs, brian.downs@suse.com
6 | brooksn, Brooks Newberry, brooks.newberry@suse.com
7 | caroline-suse-rancher, Caroline Davis, caroline.davis@suse.com
8 | cwayne18, Chris Wayne, chris.wayne@suse.com
9 | dereknola, Derek Nola, derek.nola@suse.com
10 | galal-hussein, Hussein Galal, hussein.galalabdelazizahmed@suse.com
11 | manuelbuil, Manuel Buil, mbuil@suse.com
12 | matttrach, Matt Trachier, matt.trachier@suse.com
13 | mdrahman-suse, MD Rahman, md.rahman@suse.com
14 | Oats87, Chris Kim, chris.kim@suse.com
15 | rancher-max, Max Ross, max.ross@suse.com
16 | rbrtbnfgl, Roberto Bonafiglia, roberto.bonafiglia@suse.com
17 | ShylajaDevadiga, Shylaja Devadiga, shylaja.devadiga@suse.com
18 | thomasferrandiz, Thomas Ferrandiz, thomas.ferrandiz@suse.com
19 | VestigeJ, Justin Janes, justin.janes@suse.com
20 |
--------------------------------------------------------------------------------
/bundle/lib/systemd/system/rke2-agent.env:
--------------------------------------------------------------------------------
1 | HOME=/root
2 |
--------------------------------------------------------------------------------
/bundle/lib/systemd/system/rke2-agent.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Rancher Kubernetes Engine v2 (agent)
3 | Documentation=https://github.com/rancher/rke2#readme
4 | Wants=network-online.target
5 | After=network-online.target
6 | Conflicts=rke2-server.service
7 |
8 | [Install]
9 | WantedBy=multi-user.target
10 |
11 | [Service]
12 | Type=notify
13 | EnvironmentFile=-/etc/default/%N
14 | EnvironmentFile=-/etc/sysconfig/%N
15 | EnvironmentFile=-/usr/local/lib/systemd/system/%N.env
16 | KillMode=process
17 | Delegate=yes
18 | LimitNOFILE=1048576
19 | LimitNPROC=infinity
20 | LimitCORE=infinity
21 | TasksMax=infinity
22 | TimeoutStartSec=0
23 | Restart=always
24 | RestartSec=5s
25 | ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service'
26 | ExecStartPre=-/sbin/modprobe br_netfilter
27 | ExecStartPre=-/sbin/modprobe overlay
28 | ExecStart=/usr/local/bin/rke2 agent
29 | ExecStopPost=-/bin/sh -c "systemd-cgls /system.slice/%n | grep -Eo '[0-9]+ (containerd|kubelet)' | awk '{print $1}' | xargs -r kill"
30 |
--------------------------------------------------------------------------------
/bundle/lib/systemd/system/rke2-server.env:
--------------------------------------------------------------------------------
1 | HOME=/root
2 |
--------------------------------------------------------------------------------
/bundle/lib/systemd/system/rke2-server.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Rancher Kubernetes Engine v2 (server)
3 | Documentation=https://github.com/rancher/rke2#readme
4 | Wants=network-online.target
5 | After=network-online.target
6 | Conflicts=rke2-agent.service
7 |
8 | [Install]
9 | WantedBy=multi-user.target
10 |
11 | [Service]
12 | Type=notify
13 | EnvironmentFile=-/etc/default/%N
14 | EnvironmentFile=-/etc/sysconfig/%N
15 | EnvironmentFile=-/usr/local/lib/systemd/system/%N.env
16 | KillMode=process
17 | Delegate=yes
18 | LimitNOFILE=1048576
19 | LimitNPROC=infinity
20 | LimitCORE=infinity
21 | TasksMax=infinity
22 | TimeoutStartSec=0
23 | Restart=always
24 | RestartSec=5s
25 | ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service'
26 | ExecStartPre=-/sbin/modprobe br_netfilter
27 | ExecStartPre=-/sbin/modprobe overlay
28 | ExecStart=/usr/local/bin/rke2 server
29 | ExecStopPost=-/bin/sh -c "systemd-cgls /system.slice/%n | grep -Eo '[0-9]+ (containerd|kubelet)' | awk '{print $1}' | xargs -r kill"
30 |
--------------------------------------------------------------------------------
/bundle/share/rke2/rke2-cis-sysctl.conf:
--------------------------------------------------------------------------------
1 | vm.panic_on_oom=0
2 | vm.overcommit_memory=1
3 | kernel.panic=10
4 | kernel.panic_on_oops=1
5 |
--------------------------------------------------------------------------------
/channels.yaml:
--------------------------------------------------------------------------------
1 | channels:
2 | - name: stable
3 | latest: v1.32.5+rke2r1
4 | - name: latest
5 | latestRegexp: .*
6 | excludeRegexp: (^[^+]+-|v1\.25\.5\+rke2r1|v1\.26\.0\+rke2r1)
7 | - name: v1.18
8 | latestRegexp: v1\.18\..*
9 | excludeRegexp: ^[^+]+-
10 | - name: v1.19
11 | latestRegexp: v1\.19\..*
12 | excludeRegexp: (^[^+]+-|v1\.19\.13\+rke2r1)
13 | - name: testing
14 | latestRegexp: -(alpha|beta|rc)
15 | - name: v1.20
16 | latestRegexp: v1\.20\..*
17 | excludeRegexp: (^[^+]+-|v1\.20\.9\+rke2r1)
18 | - name: v1.21
19 | latestRegexp: v1\.21\..*
20 | excludeRegexp: (^[^+]+-|v1\.21\.3\+rke2r2)
21 | - name: v1.22
22 | latestRegexp: v1\.22\..*
23 | excludeRegexp: ^[^+]+-
24 | - name: v1.23
25 | latestRegexp: v1\.23\..*
26 | excludeRegexp: ^[^+]+-
27 | - name: v1.24
28 | latestRegexp: v1\.24\..*
29 | excludeRegexp: (^[^+]+-|v1\.24\.9\+rke2r1)
30 | - name: v1.25
31 | latestRegexp: v1\.25\..*
32 | excludeRegexp: (^[^+]+-|v1\.25\.5\+rke2r1)
33 | - name: v1.26
34 | latestRegexp: v1\.26\..*
35 | excludeRegexp: (^[^+]+-|v1\.26\.0\+rke2r1)
36 | - name: v1.27
37 | latestRegexp: v1\.27\..*
38 | excludeRegexp: ^[^+]+-
39 | - name: v1.28
40 | latestRegexp: v1\.28\..*
41 | excludeRegexp: ^[^+]+-
42 | - name: v1.29
43 | latestRegexp: v1\.29\..*
44 | excludeRegexp: ^[^+]+-
45 | - name: v1.30
46 | latestRegexp: v1\.30\..*
47 | excludeRegexp: ^[^+]+-
48 | - name: v1.31
49 | latestRegexp: v1\.31\..*
50 | excludeRegexp: ^[^+]+-
51 | - name: v1.32
52 | latestRegexp: v1\.32\..*
53 | excludeRegexp: ^[^+]+-
54 | - name: v1.33
55 | latestRegexp: v1\.33\..*
56 | excludeRegexp: ^[^+]+-
57 | github:
58 | owner: rancher
59 | repo: rke2
60 | redirectBase: https://github.com/rancher/rke2/releases/tag/
61 |
--------------------------------------------------------------------------------
/charts/README.md:
--------------------------------------------------------------------------------
1 | # charts
2 |
3 | Place manifests or charts in this directory so that they will end up in the /charts directory in the rke2-runtime image.
4 |
5 | ---
6 |
7 | See the `charts` target in the `Dockerfile` at the root of this repository for an example of how the `./build-chart.sh` scripts works.
8 |
--------------------------------------------------------------------------------
/charts/build-chart.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eux -o pipefail
4 |
5 | : "${KUBERNETES_VERSION:=v0.0.0-0}"
6 | : "${CHART_FILE?required}"
7 | : "${CHART_NAME:="$(basename "${CHART_FILE%%.yaml}")"}"
8 | : "${CHART_PACKAGE:="${CHART_NAME%%-crd}"}"
9 | : "${TAR_OPTS:=--owner=0 --group=0 --mode=gou-s+r --numeric-owner --no-acls --no-selinux --no-xattrs}"
10 | : "${CHART_URL:="${CHART_REPO:="https://rke2-charts.rancher.io"}/assets/${CHART_PACKAGE}/${CHART_NAME}-${CHART_VERSION:="0.0.0"}.tgz"}"
11 | : "${CHART_TMP:=$(mktemp --suffix .tar.gz)}"
12 | : "${YAML_TMP:=$(mktemp --suffix .yaml)}"
13 |
14 | cleanup() {
15 | exit_code=$?
16 | trap - EXIT INT
17 | rm -rf ${CHART_TMP} ${CHART_TMP/tar.gz/tar} ${YAML_TMP}
18 | exit ${exit_code}
19 | }
20 | trap cleanup EXIT INT
21 |
22 | if [ "$CHART_VERSION" == "0.0.0" ]; then
23 | echo "# ${CHART_NAME} has been removed" > "${CHART_FILE}"
24 | exit
25 | fi
26 |
27 | curl -fsSL "${CHART_URL}" -o "${CHART_TMP}"
28 | gunzip ${CHART_TMP}
29 |
30 | # Extract out Chart.yaml, inject a version requirement and bundle-id annotation, and delete/replace the one in the original tarball
31 | tar -xOf ${CHART_TMP/.gz/} ${CHART_NAME}/Chart.yaml > ${YAML_TMP}
32 | yq -i e ".kubeVersion = \">= ${KUBERNETES_VERSION}\" | .annotations.\"fleet.cattle.io/bundle-id\" = \"rke2\"" ${YAML_TMP}
33 | tar --delete -b 8192 -f ${CHART_TMP/.gz/} ${CHART_NAME}/Chart.yaml
34 | tar --transform="s|.*|${CHART_NAME}/Chart.yaml|" ${TAR_OPTS} -vrf ${CHART_TMP/.gz/} ${YAML_TMP}
35 |
36 | pigz -11 ${CHART_TMP/.gz/}
37 |
38 | cat <<-EOF > "${CHART_FILE}"
39 | apiVersion: helm.cattle.io/v1
40 | kind: HelmChart
41 | metadata:
42 | name: "${CHART_NAME}"
43 | namespace: "${CHART_NAMESPACE:="kube-system"}"
44 | annotations:
45 | helm.cattle.io/chart-url: "${CHART_URL}"
46 | rke2.cattle.io/inject-cluster-config: "true"
47 | spec:
48 | bootstrap: ${CHART_BOOTSTRAP:=false}
49 | chartContent: $(base64 -w0 < "${CHART_TMP}")
50 | EOF
51 |
--------------------------------------------------------------------------------
/charts/build-charts.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eux -o pipefail
4 | CHARTS_DIR=$(dirname $0)
5 |
6 | while read version filename bootstrap; do
7 | CHART_VERSION=$version CHART_FILE=$CHARTS_DIR/$(basename $filename) CHART_BOOTSTRAP=$bootstrap $CHARTS_DIR/build-chart.sh
8 | done <<< $(yq e '.charts[] | [.version, .filename, .bootstrap] | join(" ")' $CHARTS_DIR/chart_versions.yaml)
9 |
--------------------------------------------------------------------------------
/charts/chart_versions.yaml:
--------------------------------------------------------------------------------
1 | charts:
2 | - version: 1.17.301
3 | filename: /charts/rke2-cilium.yaml
4 | bootstrap: true
5 | - version: v3.30.0-build2025051500
6 | filename: /charts/rke2-canal.yaml
7 | bootstrap: true
8 | - version: v3.30.001
9 | filename: /charts/rke2-calico.yaml
10 | bootstrap: true
11 | - version: v3.30.001
12 | filename: /charts/rke2-calico-crd.yaml
13 | bootstrap: true
14 | - version: 1.42.000
15 | filename: /charts/rke2-coredns.yaml
16 | bootstrap: true
17 | - version: 4.12.103
18 | filename: /charts/rke2-ingress-nginx.yaml
19 | bootstrap: false
20 | - version: 34.2.002
21 | filename: /charts/rke2-traefik.yaml
22 | bootstrap: false
23 | - version: 34.2.002
24 | filename: /charts/rke2-traefik-crd.yaml
25 | bootstrap: false
26 | - version: 3.12.201
27 | filename: /charts/rke2-metrics-server.yaml
28 | bootstrap: false
29 | - version: v4.2.002
30 | filename: /charts/rke2-multus.yaml
31 | bootstrap: true
32 | - version: v0.26.701
33 | filename: /charts/rke2-flannel.yaml
34 | bootstrap: true
35 | - version: 1.11.000
36 | filename: /charts/rancher-vsphere-cpi.yaml
37 | bootstrap: true
38 | - version: 3.3.1-rancher1000
39 | filename: /charts/rancher-vsphere-csi.yaml
40 | bootstrap: true
41 | - version: 0.2.1000
42 | filename: /charts/harvester-cloud-provider.yaml
43 | bootstrap: true
44 | - version: 0.1.2300
45 | filename: /charts/harvester-csi-driver.yaml
46 | bootstrap: true
47 | - version: 4.0.002
48 | filename: /charts/rke2-snapshot-controller.yaml
49 | bootstrap: false
50 | - version: 4.0.002
51 | filename: /charts/rke2-snapshot-controller-crd.yaml
52 | bootstrap: false
53 | - version: 0.0.0 # this empty chart addon can be removed in v1.34, after we have shipped two minor versions that have never included it.
54 | filename: /charts/rke2-snapshot-validation-webhook.yaml
55 | bootstrap: false
56 | - version: 0.1.000
57 | filename: /charts/rke2-runtimeclasses.yaml
58 | bootstrap: false
59 |
--------------------------------------------------------------------------------
/contrib/custom-image-kubelet/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 160
3 |
--------------------------------------------------------------------------------
/contrib/custom-image-kubelet/README.md:
--------------------------------------------------------------------------------
1 | RKE2 Image / Kubelet Override
2 | =====
3 |
4 | This repo contains a python script that will generate configuration files and extract binaries from a Kubernetes release manifest YAML.
5 | Releases should be in the format described by [releases.distro.eks.amazonaws.com/v1alpha1 Release](https://github.com/aws/eks-distro-build-tooling/blob/main/release/config/crds/distro.eks.amazonaws.com_releases.yaml).
6 | One example of a vendor providing releases in this format is [EKS Distro](https://github.com/aws/eks-distro#releases).
7 |
8 | The resulting configuration will override binaries and images for the following components:
9 | * coredns
10 | * etcd
11 | * kube-apiserver
12 | * kube-controller-manager
13 | * kube-proxy
14 | * kube-scheduler
15 | * kubelet
16 | * metrics-server
17 | * pause
18 |
19 | The remaining RKE2 components include:
20 | * Helm Controller
21 | * Calico+Flannel CNI
22 | * Nginx Ingress
23 |
24 | Requirements
25 | ----
26 |
27 | 1. RKE2 v1.18.13+rke2r1 or newer (with image/kubelet override support)
28 | 1. Python 3
29 | 1. Access to AWS ECR (either via AWS CLI IAM keys, or EC2 instance role)
30 | *This is only necessary if the replacement RKE2 images are stored in ECR*
31 |
32 | Installing
33 | -----
34 |
35 | On an Ubuntu host:
36 |
37 | ```bash
38 | curl -sfL https://get.rke2.io | INSTALL_RKE2_VERSION=v1.18.13+rke2r1 sh -
39 |
40 | sudo apt update
41 | sudo apt install -y python3-venv python3-wheel python3-pip
42 | python3 -m venv ~/python3
43 | . ~/python3/bin/activate
44 |
45 | git clone --shallow git@github.com:rancher/rke2.git
46 | cd rke2/contrib/custom-image-kubelet
47 |
48 | pip install -r requirements.txt
49 | ```
50 |
51 | Running
52 | -----
53 |
54 | ```bash
55 | sudo ~/python3/bin/python genconfig.py --release-url https://X/kubernetes-1-18/kubernetes-1-18.yaml
56 |
57 | systemctl start rke2-server
58 | ```
59 |
60 | You may also generate the files on an administrative host that can then be embedded into deployment pipelines or copied to multiple hosts:
61 |
62 | ```bash
63 | ./genconfig.py --prefix ./kubernetes-1-18/ --release-url https://X/kubernetes-1-18/kubernetes-1-18.yaml
64 | ```
65 |
66 | Example Output
67 | -----
68 |
69 | ```
70 | I Got Release: kubernetes-1-18-9
71 | I Writing HelmChartConfig to /var/lib/rancher/rke2/server/manifests/rke2-kube-proxy-config.yaml
72 | I Writing HelmChartConfig to /var/lib/rancher/rke2/server/manifests/rke2-coredns-config.yaml
73 | I Writing HelmChartConfig to /var/lib/rancher/rke2/server/manifests/rke2-metrics-server-config.yaml
74 | I Extracting files from https://X/kubernetes-1-18/releases/1/artifacts/kubernetes/v1.18.9/kubernetes-node-linux-amd64.tar.gz
75 | I Extracting /var/lib/rancher/rke2/opt/bin/kube-proxy
76 | I Extracting /var/lib/rancher/rke2/opt/bin/kubelet
77 | I Extracting /var/lib/rancher/rke2/opt/bin/kubeadm
78 | I Getting auth tokens for ['X'] in us-east-1
79 | I Writing credentials to /etc/rancher/rke2/registries.yaml
80 | I Writing config to /etc/rancher/rke2/config.yaml
81 | ```
82 |
--------------------------------------------------------------------------------
/contrib/custom-image-kubelet/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3
2 | click
3 | pyyaml
4 | requests
5 |
--------------------------------------------------------------------------------
/contrib/gotests_templates/call.tmpl:
--------------------------------------------------------------------------------
1 | {{define "call"}}{{with .Receiver}}{{if not .IsStruct}}tt.{{end}}{{Receiver .}}.{{end}}{{.Name}}({{range $i, $el := .Parameters}}{{if $i}}, {{end}}{{if not .IsWriter}}tt.args.{{end}}{{Param .}}{{if .Type.IsVariadic}}...{{end}}{{end}}){{end}}
2 |
--------------------------------------------------------------------------------
/contrib/gotests_templates/header.tmpl:
--------------------------------------------------------------------------------
1 | {{define "header"}}
2 | {{range .Comments}}{{.}}
3 | {{end -}}
4 | package {{.Package}}
5 |
6 | import (
7 | {{range .Imports}}{{.Name}} {{.Path}}
8 | {{end}}
9 | )
10 | {{end}}
11 |
--------------------------------------------------------------------------------
/contrib/gotests_templates/inline.tmpl:
--------------------------------------------------------------------------------
1 | {{define "inline"}} {{template "call" .}} {{end}}
2 |
--------------------------------------------------------------------------------
/contrib/gotests_templates/inputs.tmpl:
--------------------------------------------------------------------------------
1 | {{define "inputs"}}{{$f := .}}{{if not .Subtests}}tt.name, {{end}}{{if $f.PrintInputs}}{{range $f.Parameters}}tt.args.{{Param .}}, {{end}}{{end}}{{end}}
2 |
--------------------------------------------------------------------------------
/contrib/gotests_templates/message.tmpl:
--------------------------------------------------------------------------------
1 | {{define "message" -}}
2 | {{if not .Subtests}}%q. {{end}}{{with .Receiver}}{{.Type.Value}}.{{end}}{{.Name}}({{if .PrintInputs}}{{range $i, $el := .Parameters}}{{if $i}}, {{end}}%v{{end}}{{end}})
3 | {{- end}}
4 |
--------------------------------------------------------------------------------
/contrib/gotests_templates/results.tmpl:
--------------------------------------------------------------------------------
1 | {{define "results"}} {{range $i, $el := .Results}}{{if $i}}, {{end}}{{Got .}}{{end}}{{if .ReturnsError}}, err{{end}} {{if or .Results .ReturnsError}} := {{end}} {{end}}
2 |
--------------------------------------------------------------------------------
/developer-docs/about_rke2_docs.md:
--------------------------------------------------------------------------------
1 | # About RKE2 docs
2 |
3 | The RKE2 git repository currently holds two kinds of documentation:
4 |
5 | - RKE2 User Docs
6 | - RKE2 Developer Docs
7 |
8 | There are other differences than their audiences, and this document elaborates on it.
9 |
10 | ## User docs
11 |
12 | As its name suggests, the main target for this documentation is Rancher RKE2 users. Covering RKE2 topics such as architecture, installation and upgrade process, and security from an end-user perspective.
13 |
14 | The markdown files in the `docs/` directory within this repository are the documentation's source. These documents are processed using [mkdocs](https://www.mkdocs.org/) and served on as a documentation website.
15 |
16 | Since the documents use a specific markdown superset, [pymdown](https://facelessuser.github.io/pymdown-extensions/), it is preferred to process them using `mkdocs` beforehand to have a better understanding. However, any markdown preview tool should be able to render them.
17 |
18 | To serve the RKE2 user docs website locally, one can either:
19 |
20 | 1. Run `make serve-docs` [Makefile](../Makefile) target. This will start a docker container locally with all the required configurations and serve them on port `8000`.
21 | 2. Run `mkdocs serve` in the local environment. This will start the mkdocs server locally, exposing port `8000`. Since the said tool is written in [python3](https://www.python.org/), it requires installing its interpreter and the following packages beforehand `mkdocs`, `mkdocs-material`, `mkdocs-markdownextradata-plugin`, and `pymdown-extensions`.
22 |
23 | Worth noting that the second option should only be used whenever running a docker container locally does not work correctly (i.e., working on non-Linux OS's or needing to deal with shared mounts).
24 |
25 | ## Developer docs
26 |
27 | Like this file, the target audience for these documents is the RKE2 developers and contributors. The topics may not hold a specific order, technicalities may come along, and are focused on transmitting internal processes around RKE2.
28 |
29 | The developer docs are the markdown files in this repository's `developer-docs/` directory. These files are intended to be read using any markdown preview tool, being Github's web view the default one, so no enhanced versions of this markup language are allowed. The only exception to this rule is usage of [embedded mermaid diagrams supported by Github](https://github.blog/2022-02-14-include-diagrams-markdown-files-mermaid/).
30 |
31 | As hinted in the last section, the diagrams within the developer docs are written in the markdown-like [Mermaid](https://mermaidjs.github.io/) syntax and held in code blocks with the ```mermaid
language specifier. These diagrams can be created and edited with the help of the [Mermaid live editor](https://mermaid.live/); then, it is a matter of copying and pasting the result in a markdown file.
32 |
--------------------------------------------------------------------------------
/developer-docs/k3s.md:
--------------------------------------------------------------------------------
1 | At the heart of RKE2 is the embedded K3s engine which functions as a
2 | supervisor for the kubelet and containerd processes. The K3s engine also
3 | provides AddOn and Helm controllers that RKE2 leverages. So, RKE2 depends on K3s,
4 | but what does that look like from version to version? It is not yet as simple
5 | as 1.20.7+rke2r1 → 1.20.7+k3s1, but starting with the release-1.22 branch
6 | it should be.
7 |
8 | Until then, here is a handy table:
9 |
10 | | RKE2 Branch | K3s Branch | Comments |
11 | |-------------------------------------------------------------|-----------------------------------------------------------|---|
12 | | [release-1.18](https://github.com/rancher/rke2/tree/release-1.18) | [release-1.19](https://github.com/k3s-io/k3s/tree/release-1.19) | Making k3s an embeddable engine required changes developed after release-1.18 was branched. |
13 | | [release-1.19](https://github.com/rancher/rke2/tree/release-1.19) | [release-1.19](https://github.com/k3s-io/k3s/tree/release-1.19) | RKE2 development stayed on 1.18 for a long time, essentially jumping from 1.18 to 1.20 with both release-1.18 and release-1.19 forked off master close to each other. |
14 | | [release-1.20](https://github.com/rancher/rke2/tree/release-1.20) | [engine-1.21](https://github.com/k3s-io/k3s/tree/engine-1.21) | The K3s engine-1.21 branch was forked from K3s master just before master was updated to Kubernetes 1.22, and contains critical changes necessary to support RKE2 on Windows. |
15 | | [release-1.21](https://github.com/rancher/rke2/tree/release-1.21) | [engine-1.21](https://github.com/k3s-io/k3s/tree/engine-1.21) | Same K3s upstream as the RKE2 release-1.20 branch. |
16 | | [release-1.22](https://github.com/rancher/rke2/tree/release-1.22) | [engine-1.21](https://github.com/k3s-io/k3s/tree/release-1.22) | We plan to better align the K3s and RKE2 release-1.22 branches, when they are forked. |
17 | | [master](https://github.com/rancher/rke2/tree/master) | [master](https://github.com/k3s-io/k3s/tree/master) | Rolling commit from K3s master |
18 |
--------------------------------------------------------------------------------
/developer-docs/upgrading_dependencies.md:
--------------------------------------------------------------------------------
1 | # Upgrade Image Process
2 |
3 | From time to time we need to update the images that RKE2 depends on. This document serves as a how-to for that process. The following steps are laid out in order.
4 |
5 | ## Update Image
6 |
7 | Create a new release in the image repository (eg [image-build-etcd](github.com/rancher/image-build-etcd)). This is done by specifying the tag version you want built from whatever upstream this repo is using. An image will be built and pushed to Docker Hub.
8 |
9 | ## Update RKE2
10 |
11 | The following example files have references that will need to be updated in the respective locations for etcd. Replace the found version with the desired version.
12 |
13 | * build-images: `${REGISTRY}/rancher/hardened-etcd:${ETCD_VERSION}-build20220413`
14 | * scripts/version.sh: `ETCD_VERSION=${ETCD_VERSION:-v3.4.13-k3s1}`
15 |
16 | Some images may include a build date as part of the tag in format `-buildYYYYmmdd`. Trivy image scans may periodically fail as vulnerabilities are found in the base operating system. Re-tagging an image with the current build date should force an update of the base operating system and may help to resolve vulnerabilities found in image scans.
17 |
--------------------------------------------------------------------------------
/developer-docs/upgrading_go.md:
--------------------------------------------------------------------------------
1 | # Upgrade Go Image
2 |
3 |
--------------------------------------------------------------------------------
/developer-docs/upgrading_rke2.md:
--------------------------------------------------------------------------------
1 | # Upgrade RKE2 Process
2 |
3 | The steps which the user should follow to automatically upgrade rke2 are very well described in the [docs](https://docs.rke2.io/upgrade/automated_upgrade/). This documents goes a bit deeper into what happens behind the scenes.
4 |
5 | ## System upgrade controller
6 |
7 | As described in the upper link, a system-upgrade-controller is used to upgrade the cluster. The upgrade procedure can be adjusted based on the configuration plan, which is fed to the system using a CR. The controller will read that CR and create a job that will execute the upgrade. Normally, the job is using the image `rancher/rke2-upgrade`, which is generated by a project with the same name.
8 |
9 | ## rke2-upgrade project
10 |
11 | The [rke2-upgrade project](https://github.com/rancher/rke2-upgrade) basically generates an image with three important pieces:
12 | 1 - The new rke2 binary based on the version set in the plan
13 | 2 - The new kubectl binary based on the version set in the plan
14 | 3 - The upgrade.sh script
15 |
16 | That script replaces the rke2 binary and kills the current rke2 process. That way, systemd (or others) will restart the service now using the correct upgraded rke2 binary.
17 |
18 | ## rke2 binary restarting
19 |
20 | The rke2 binary will do as it does on every startup of rke2. It extracts the manifests of the runtime image into the /var/lib/rancher/rke2/server/manifests dir, replacing the existing ones coming from an old rke2 version, with the new ones. The [k3s deploy controller](https://github.com/k3s-io/k3s/blob/master/pkg/deploy/controller.go) will loop over them and apply them to the cluster creating an Addon resource. These will happen to all manifests except for:
21 |
22 | * Manifests that refer to a explicitly disabled component (e.g. via the --disable flag in the cli). In this case, the controller tries to uninstall it and removes the manifest
23 | * Manifests that refer to a component that should be skipped. For a component to be skipped, we just need to add the suffix `.skip` to the file name of the manifest (e.g. rke2-canal.yaml.skip). In this case, the controller will ignore this component and will not try to upgrade it. If you are running several control-plane nodes, remember to add the `.skip` suffix to the file in all control-plane nodes.
24 |
25 | The skips procedure can be very useful for cases such as a user switching to the enterprise version of a cni plugin. In this case, the cni plugin vendor will take care of upgrades.
26 |
--------------------------------------------------------------------------------
/docs/adrs/001-record-architecture-decisions.md:
--------------------------------------------------------------------------------
1 | # 1. Record architecture decisions
2 |
3 | Date: 2022-01-26
4 |
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | We need to record the architectural decisions made on this project.
12 |
13 | ## Decision
14 |
15 | We will use Architecture Decision Records, as [described by Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions).
16 |
17 | ## Consequences
18 |
19 | See Michael Nygard's article, linked above. For a lightweight ADR toolset, see Nat Pryce's [adr-tools](https://github.com/npryce/adr-tools).
20 |
21 |
--------------------------------------------------------------------------------
/docs/adrs/002-rke2-rpm-support.md:
--------------------------------------------------------------------------------
1 | # 2. RPM support for RKE2
2 |
3 | Date: 2022-01-20
4 |
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | RKE2 publishes RPMs for distribution of RKE2 through the https://github.com/rancher/rke2-packaging repository. These RPMs are built using automated calls to `rpmbuild` and corresponding GPG signing/publishing plugins, and publish RPMs to the `rpm.rancher.io`/`rpm-testing.rancher.io` S3-backed buckets.
12 |
13 | ## Decision
14 |
15 | Until a more robust RPM building/mechanism is established for RKE2, we will not add any new platforms for RPM publishing beyond the existing CentOS/RHEL 7 and 8 RPMs that are published. We will publish selinux policy RPMs for new platforms as needed, and ensure the selinux RPMs are compatible with the tarball installation method for the platform in question.
16 |
17 | This decision can be re-evaluated in the future if a more robust RPM publishing technique/platform is developed/made available.
18 |
19 | ## Consequences
20 |
21 | The only supported installation method for all platforms except CentOS 7/8 with selinux support will be a combination of the use of a tarball install in conjunction with an selinux policy RPM.
22 |
23 |
--------------------------------------------------------------------------------
/docs/adrs/003-rke2-rpm-sle-support.md:
--------------------------------------------------------------------------------
1 | # 3. RPM SLE support for RKE2
2 |
3 | Date: 2022-01-27
4 |
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | RKE2 publishes RPMs for SUSE OS distributions, the rpms will be installed via transactional updates if exists, this will enable two things, the installation of rke2-selinux and the extraction of the binaries in the right `/usr` paths instead of the alternative tarball installation which will extract the binaries in `/opt`.
12 |
13 | ## Decision
14 |
15 | We will add support for RPM publishing for SUSE OS distributions in rke2-packaging repo, the `rke2-server` and `rke2-agent` packages will require installing `rke2-common` which will in turn install the `rke2-selinux` RPM package which is already supported for microos.
16 |
17 | The decision will involve defaulting to the tarball installation for SUSE OS distribution in the installation script to prevent breaking current compatibility with users who currently installed via tarball installation, the RPM installation will be allowed via passing the environment variable `RKE2_INSTALL_METHOD=rpm` to the install script.
18 |
19 | The installation script will also have measures to prevent installation switching from RPM to tarball installation and vice versa, and finally the installation via the tarball method will not allow SELINUX to be enabled unless manually.
20 |
21 | ## Consequences
22 |
23 | The decision will result in some drawbacks:
24 |
25 | - The decision will not enable RPM installation by default.
26 | - The tarball installation will not enable SELINUX by default.
--------------------------------------------------------------------------------
/docs/adrs/004-rke2-rc-versioning.md:
--------------------------------------------------------------------------------
1 | # 4. Release Candidate 'RC' Version Format
2 |
3 | Date: 2022-07-14
4 |
5 | ## Status
6 |
7 | Rejected
8 |
9 | ## Question
10 |
11 | Should we remove the `-rc` text from the prerelease section of RKE2 releases so that we can reduce the overall steps necessary in releasing?
12 |
13 | ## Context
14 |
15 | Our workflow for generating Release Candidates for RKE2 is the same as our workflow for generating GA RKE2 releases,
16 | with the exception of the "-rc" text in the prerelease section of the git tag.
17 |
18 | ### Strengths
19 |
20 | - reduce CI time by producing one less release
21 | - reduce manual effort by producing one less release (no need to update KDM)
22 | - reduce the time from a release being approved to it being published
23 | - improve reliability of the artifacts by promoting the artifacts tested rather than rebuilding them
24 |
25 | ### Weaknesses
26 |
27 | - if we don't rebuild hardened images, we wouldn't have a way to know the version number of the release candidate
28 | - testing would be more difficult because we wouldn't know the difference between an "rc-1" and an "rc-2"
29 | - GitHub won't let you generate duplicate releases/tags
30 | - we would either have to delete the release and move the tag (essentially removing the rc version)
31 | - or figure out some other way to version the release candidates
32 |
33 | ### Opportunities
34 |
35 | - normalizing the process would make it easier to automate
36 | - SLSA compliance states that certification "is not transitive" predicating artifact orientation
37 |
38 | ### Threats
39 |
40 | - a customer might mistake a RC artifact for a GA artifact
41 |
42 | ## Decision
43 |
44 | We need to be able to quickly reference the differences between a release candidate and a general admission release,
45 | and the risk that a user might mistake an RC artifact for a GA artifact is too high for the benefits provided.
46 |
47 | ## Consequences
48 |
49 | We will (continue to) place `-rc` in the prerelease section of the version number for RKE2 tags and releases.
50 | For example : `v1.24.3-rc1+rke2r1`
51 |
--------------------------------------------------------------------------------
/docs/adrs/004-servicelb-support.md:
--------------------------------------------------------------------------------
1 | # Support for ServiceLB Load-Balancer Controller in RKE2
2 |
3 | Date: 2022-09-30
4 |
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | RKE2 does not currently bundle a load-balancer controller. Users that want to deploy Services of type
12 | LoadBalancer must deploy a real cloud-provider chart, or use an alternative such as MetalLB or Kube-VIP.
13 |
14 | ## Decision
15 |
16 | Taking advantage of recent changes to [move ServiceLB into the K3s stub
17 | cloud-provider](https://github.com/k3s-io/k3s/blob/master/docs/adrs/servicelb-ccm.md), we will allow RKE2 to run ServiceLB as
18 | part of a proper clould controller integration. This will require adding CLI flags to enable servicelb, as well as exposing
19 | existing K3s flags to configure its namespace. Running servicelb will be opt-in, behind a new flag, to avoid changing
20 | behavior on existing clusters.
21 |
22 | ## Consequences
23 |
24 | * RKE2 uses less resources when ServiceLB is disabled, as several core controllers are no longer started unconditionally.
25 | * The `--disable-cloud-controller` flag now disables the CCM's `cloud-node` and `cloud-node-lifecycle` controllers that were
26 | historically the only supported controllers.
27 | * The `--enable-servicelb` flag now prevents `--disable=servicelb` from being passed in to K3s, which in turn enables the CCM's
28 | `service` controller.
29 | * If the cloud-controller and servicelb are both disabled, the cloud-controller-manager is not run at all.
30 | * The K3s `--servicelb-namespace` flag is now passed through instead of dropped.
31 |
--------------------------------------------------------------------------------
/docs/adrs/006-add-flannel-support.md:
--------------------------------------------------------------------------------
1 | # Flannel support in rke2
2 |
3 | ## Established
4 | ## Revisit by
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | Currently RKE2-windows users can only deploy with one supported CNI plugin: Calico. In the last weeks, we have had several users complaining about a TCP Reset issue that breaks their applications. For example, applications that rely on a stable connectivity towards external nodes, like Jenkins Server, are impossible to operate if TCP Resets appears.
12 |
13 | The TCP Reset issue is known by Tigera (the company behind Calico) and documented [here](https://docs.tigera.io/calico/latest/getting-started/kubernetes/windows-calico/limitations#pod-to-pod-connections-are-dropped-with-tcp-reset-packets)
14 | As described in that doc, the issue should only appear when using network policies. Customers state that when using network policies the problem is constant but even without network policies, the TCP Resets appear from time to time with no clear trigger for it.
15 |
16 | It is hard to understand where are those TCP Resets coming from because the code creating the virtual network infrastructure in Windows is closed sourced and the documentation is very poor and not really explaining the details, so we can only especulate. I think the problem is that windows creates a TCP Proxy in their VMSwitches when using VFP (Virtual Filtering Platform) and whenever a rule, affecting the pod, of VFP changes, the TCP Proxy is recreated and thus all TCP connections are reset. It is probably part of the design of VFP and thus complicated to really work around it. Calico uses VFP to do the network policies.
17 |
18 | We have been communicating all this to Tigera and Microsoft but so far, the resolution to the problem does not seem to be close at hand. It feels like a complicated problem created by the VFP design.
19 |
20 | Moreover, some of these users were previous users of RKE1-windows with flannel and they were happy with it. THey are asking to have the possibility to continue with flannel in RKE2.
21 |
22 | ## Proposal
23 |
24 | Include flannel as a CNI plugin alternative for RKE2. We know that it has limitations and it is really simple but it seems it could be enough for Windows users that are feeling the pain of the Calico TCP Resets
25 |
26 | ### Strength
27 |
28 | * We can offer a plan B for customers that can't afford getting TCP Resets
29 | * We have an alternative while Microsoft and Tigera fix the problem
30 | * We are maintainers of flannel and could support it easily
31 |
32 | ### Weakness
33 | * Yet another cni plugin to support
34 | * Flannel is very limited. We should document it very well to avoid disappointment on non-knowledgeable customers
35 |
--------------------------------------------------------------------------------
/docs/adrs/007-add-kine-support.md:
--------------------------------------------------------------------------------
1 | # Add kine support to RKE2
2 |
3 | ## Established
4 |
5 | ## Revisit by
6 |
7 | ## Status
8 |
9 | Accepted
10 |
11 | ## Context
12 |
13 | This ADR is a introduction of kine support for RKE2. However, for this support to be implemented, it was necessary to add kine with TLS in K3s.
14 | Which was done in this [PR](https://github.com/k3s-io/k3s/pull/9572), It was needed since rke2 cannot connect to kine without tls via the api server.
15 |
16 | When rke2 is started with the `--datastore-endpoint` flag, it will disable the etcd pod and set the `cluster-init` flag to be `false`, to avoid the etcd part of k3s to be started.
17 | Kine will use the etcd client certificate to authenticate the connection to the kine server that will be a `unixs` socket type.
18 |
19 | ### Pros
20 |
21 | - With the integration of kine, it is now possible to use the `--datastore-endpoint` flag among others related to kine. This allows for a more versatile configuration of the datastore,
22 | providing users with the flexibility to choose their preferred storage backend.
23 |
24 | ### Cons
25 |
26 | - Kine can only be utilized with TLS due to the requirements of the API server.
27 |
28 | ## Other changes needed in k3s to better support kine in rke2
29 |
30 | When testing rke2 with kine, there was some changes to avoid panics (specially when we are talking about `etcd`) and to make it work with tls. The changes are that when the user
31 | uses `--database-endpoint` and other flags related to `etcd only` nodes, we have to ignore this flags or simply end the process with a error message.
32 |
33 | We decided to set a error message and end the process, since it is not clear to the user that the flags are being ignored.
34 |
35 | ### Pros of Ignoring the flags
36 |
37 | - It is possible to avoid panics and rke2 will run as expected.
38 |
39 | ### Cons of Ignoring the flags
40 |
41 | - It will be not very clear to the user that the flags are being ignored.
42 |
43 | ### Pros of Ending the process with a error message
44 |
45 | - Rke2 will run as expected with transparency to the user.
46 |
47 | ### Cons of Ending the process with a error message
48 |
49 | - The user will have to change the flags to make rke2 run.
50 |
--------------------------------------------------------------------------------
/docs/adrs/008-gh-branch-strategy.md:
--------------------------------------------------------------------------------
1 | # 8. Branching Strategy in Github
2 |
3 | Proposal Date: 2024-05-23
4 |
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | RKE2 is released at the same cadence as upstream Kubernetes. This requires management of multiple versions at any given point in time. The current branching strategy uses `release-v[MAJOR].[MINOR]`, with the `master` branch corresponding to the highest version released based on [semver](https://semver.org/). Github's Tags are then used to cut releases, which are just point-in-time snapshots of the specified branch at a given point. As there is the potential for bugs and regressions to be on present on any given branch, this branching and release strategy requires a code freeze to QA the branch without new potentially breaking changes going in.
12 |
13 | ## Decision
14 | All code changes go into the `master` branch. We maintain branches for all current release versions in the format `release-v[MAJOR].[MINOR]`. When changes made in master are necessary in a release, they should be backported directly into the release branches. If ever there are changes required only in the release branches and not in master, such as when bumping the kubernetes version from upstream, those can be made directly into the release branches themselves.
15 |
16 | ## Consequences
17 |
18 | - Allows for constant development, with code freeze only relevant for the release branches.
19 | - This requires maintaining one additional branch than the current workflow, which also means one additional issue.
20 | - Testing would be more constant from the master branch.
21 | - When a new minor release is available, the creation of the new release branch will be the responsibility of the engineer that merges the PR bumping Kubernetes to the new minor version. It will happen as soon as that PR is merged.
22 |
--------------------------------------------------------------------------------
/docs/adrs/008-traefik-ingress.md:
--------------------------------------------------------------------------------
1 | # Support for Alternative Ingress Controllers
2 |
3 | Date: 2024-05-21
4 |
5 | ## Status
6 |
7 | Accepted
8 |
9 | ## Context
10 |
11 | RKE2 currently supports only a single ingress controller, ingress-nginx.
12 | It has been requested RKE2 support alternative ingress controllers, similar to how RKE2 supports multiple CNIs.
13 |
14 | ## Decision
15 |
16 | * A new --ingress-controller flag will be added; the default will be only `ingress-nginx` to preserve current behavior.
17 | * All selected ingress controllers will be deployed to the cluster.
18 | * The first selected ingress controller will be set as the default, via the `ingressclass.kubernetes.io/is-default-class` annotation
19 | on the IngressClass resource.
20 | * Any packaged ingress controllers not listed in the flag value will be disabled, similar to how inactive packaged CNIs are handled.
21 | * RKE2 will package Traefik's HelmChart as a supported ingress controller, deploying as a Daemonset + ClusterIP Service
22 | for parity with the `ingress-nginx` default configuration due to RKE2's lack of a default LoadBalancer controller.
23 | * RKE2 will use mirrored upstream Traefik images; custom-rebuilt hardened-traefik images will not be provided or supported.
24 |
25 | ## Consequences
26 |
27 | * We will add an additional packaged component and CLI flag for ingress controller selection.
28 | * We will need to track updates to Traefik and the Traefik chart.
29 | * QA will need additional resources to test the new ingress controllers.
30 |
--------------------------------------------------------------------------------
/docs/adrs/adr-template.md:
--------------------------------------------------------------------------------
1 | # ADR Format Template
2 |
3 | This template provides a canvas for generating ADRs and a standard format so that we can build tools to parse them.
4 | - notes are added to this template to help elaborate on the points without a separate document
5 | - notes will be prefixed with a dash
6 |
7 | ## Established
8 |
9 | 2022-07-20
10 | - this section should contain only the YYYY-MM-DD date of when the decision is considered final
11 | - this can be added after context is given, in the PR which will wait for 1 week before merge
12 |
13 | ## Revisit by
14 |
15 | 2023-07-15
16 | - this section should contain only the YYYY-MM-DD date of when the decision is considered stale
17 | - at the next design discussion we should validate and renew this date
18 |
19 | ## Subject
20 |
21 | Given `data`, when `triggering event`, then we `do something`.
22 |
23 | - the person should be [first person plural](https://en.wikipedia.org/wiki/Grammatical_person)
24 | - "we" do something
25 | - not "I", "you", or "they"
26 | - the tense should be [simple present](https://courses.dcs.wisc.edu/wp/grammar/category/tense-and-mood/),
27 | - we "do" something
28 | - not "does", "doing", "did", or "done"
29 | - the mood should be [indicative](https://osuwritingcenter.okstate.edu/blog/2020/11/6/the-five-grammatical-moods)
30 | - we "do" something
31 | - not "go do"
32 | - Given when then statements should be used as often as possible to get as much context into the subject as possible.
33 | - Don't force 'given, when, then'; if there is no triggering event or no data given, then leave those parts out.
34 |
35 | ## Status
36 |
37 | Accepted / Rejected / Superseded by #other-issue
38 | - accepted is the decision that the subject is appropriate and we will do it.
39 | - rejected is the decision that the subject isn't appropriate and we won't do it.
40 | - superseded relates that a different decision forces this decision (for instance a decision made at a higher level of abstraction)
41 |
42 | ## Context
43 |
44 | - the following is a simple framework for judging a decision, these items are not required, but may be useful to the writer.
45 | ### Strength of doing process
46 | ### Weakness of doing process
47 | ### Threats involved in not doing process
48 | ### Threats involved in doing process
49 | ### Opportunities involved in doing process
50 |
51 | - a different approach to context framework
52 | ### Pros
53 | ### Cons
54 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 |
6 | "github.com/k3s-io/k3s/pkg/configfilearg"
7 | "github.com/rancher/rke2/pkg/cli/cmds"
8 | "github.com/sirupsen/logrus"
9 | "github.com/urfave/cli/v2"
10 | )
11 |
12 | func main() {
13 | app := cmds.NewApp()
14 | app.Commands = []*cli.Command{
15 | cmds.NewServerCommand(),
16 | cmds.NewAgentCommand(),
17 | cmds.NewEtcdSnapshotCommand(),
18 | cmds.NewCertCommand(),
19 | cmds.NewSecretsEncryptCommand(),
20 | cmds.NewTokenCommand(),
21 | cmds.NewCompletionCommand(),
22 | }
23 |
24 | if err := app.Run(configfilearg.MustParse(os.Args)); err != nil {
25 | logrus.Fatal(err)
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: RKE2 - Rancher's Next Generation Kubernetes Distribution
2 | repo_url: https://github.com/rancher/rke2
3 | site_url: https://docs.rke2.io
4 | strict: false
5 | use_directory_urls: true
6 | theme:
7 | name: material
8 | palette:
9 | primary: blue
10 | font:
11 | text: 'Work Sans'
12 | logo: 'assets/logo-stacked-white-rke2.svg'
13 | favicon: 'assets/favicon-32x32.png'
14 | #google_analytics:
15 | plugins:
16 | - search
17 | - markdownextradata: {}
18 | markdown_extensions:
19 | - codehilite
20 | - admonition
21 | - pymdownx.tabbed:
22 | alternate_style: true
23 | - pymdownx.superfences
24 | - toc:
25 | permalink: true
26 | nav:
27 | - index.md
28 | - Installation:
29 | - install/requirements.md
30 | - install/quickstart.md
31 | - install/ha.md
32 | - Install Options:
33 | - install/install_options/install_options.md
34 | - install/install_options/server_config.md
35 | - install/install_options/linux_agent_config.md
36 | - install/install_options/windows_agent_config.md
37 | - install/methods.md
38 | - install/network_options.md
39 | - install/containerd_registry_configuration.md
40 | - install/airgap.md
41 | - install/windows_airgap.md
42 | - install/linux_uninstall.md
43 | - install/windows_uninstall.md
44 | - Upgrades:
45 | - Overview: upgrade/upgrade.md
46 | - upgrade/basic_upgrade.md
47 | - upgrade/automated_upgrade.md
48 | - Security:
49 | - security/about_hardened_images.md
50 | - security/hardening_guide.md
51 | - security/cis_self_assessment16.md
52 | - security/cis_self_assessment123.md
53 | - security/fips_support.md
54 | - security/policies.md
55 | - security/pod_security_standards.md
56 | - security/selinux.md
57 | - security/secrets_encryption.md
58 | - Architecture:
59 | - architecture/architecture.md
60 | - cluster_access.md
61 | - backup_restore.md
62 | - networking.md
63 | - helm.md
64 | - advanced.md
65 | - subcommands.md
66 | - known_issues.md
67 |
--------------------------------------------------------------------------------
/pkg/auth/auth.go:
--------------------------------------------------------------------------------
1 | package auth
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/k3s-io/k3s/pkg/util"
7 | "github.com/k3s-io/k3s/pkg/version"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | "k8s.io/apiserver/pkg/authentication/authenticator"
10 | "k8s.io/apiserver/pkg/authentication/group"
11 | "k8s.io/apiserver/pkg/authentication/request/bearertoken"
12 | "k8s.io/client-go/informers"
13 | "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
14 | )
15 |
16 | // BootstrapTokenAuthenticator returns an authenticator to handle bootstrap tokens.
17 | // This requires a secret lister, which will be created from the provided kubeconfig.
18 | func BootstrapTokenAuthenticator(ctx context.Context, file string) (authenticator.Request, error) {
19 | k8s, err := util.GetClientSet(file)
20 | if err != nil {
21 | return nil, err
22 | }
23 |
24 | factory := informers.NewSharedInformerFactory(k8s, 0)
25 | lister := factory.Core().V1().Secrets().Lister().Secrets(metav1.NamespaceSystem)
26 | audiences := authenticator.Audiences{version.Program}
27 | tokenAuth := authenticator.WrapAudienceAgnosticToken(audiences, bootstrap.NewTokenAuthenticator(lister))
28 | auth := bearertoken.New(tokenAuth)
29 |
30 | go factory.Core().V1().Secrets().Informer().Run(ctx.Done())
31 | return group.NewAuthenticatedGroupAdder(auth), nil
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/cli/cmds/agent_service_linux.go:
--------------------------------------------------------------------------------
1 | //go:build !windows
2 | // +build !windows
3 |
4 | package cmds
5 |
6 | import "github.com/urfave/cli/v2"
7 |
8 | var serviceSubcommand = &cli.Command{}
9 |
--------------------------------------------------------------------------------
/pkg/cli/cmds/cert.go:
--------------------------------------------------------------------------------
1 | package cmds
2 |
3 | import (
4 | "io/ioutil"
5 |
6 | "github.com/k3s-io/k3s/pkg/cli/cert"
7 | "github.com/k3s-io/k3s/pkg/cli/cmds"
8 | "github.com/k3s-io/k3s/pkg/configfilearg"
9 | "github.com/rancher/rke2/pkg/rke2"
10 | "github.com/urfave/cli/v2"
11 | )
12 |
13 | func NewCertCommand() *cli.Command {
14 | k3sOpts := K3SFlagSet{}
15 | subCommandOpts := map[string]K3SFlagSet{
16 | "rotate": {
17 | "alsologtostderr": copyFlag,
18 | "config": copyFlag,
19 | "debug": copyFlag,
20 | "log": copyFlag,
21 | "service": copyFlag,
22 | "data-dir": {
23 | Usage: "(data) Folder to hold state",
24 | Default: rke2Path,
25 | },
26 | },
27 | "rotate-ca": {
28 | "server": {
29 | Default: "https://127.0.0.1:9345",
30 | },
31 | "path": copyFlag,
32 | "force": copyFlag,
33 | "data-dir": {
34 | Usage: "(data) Folder to hold state",
35 | Default: rke2Path,
36 | },
37 | },
38 | "check": {
39 | "alsologtostderr": copyFlag,
40 | "config": copyFlag,
41 | "debug": copyFlag,
42 | "log": copyFlag,
43 | "service": copyFlag,
44 | "output": copyFlag,
45 | "data-dir": {
46 | Usage: "(data) Folder to hold state",
47 | Default: rke2Path,
48 | },
49 | },
50 | }
51 |
52 | command := cmds.NewCertCommands(cert.Check, Rotate, cert.RotateCA)
53 | command.Usage = "Manage RKE2 certificates"
54 | configfilearg.DefaultParser.ValidFlags[command.Name] = command.Flags
55 | for i, subcommand := range command.Subcommands {
56 | if s, ok := subCommandOpts[subcommand.Name]; ok {
57 | k3sOpts.CopyInto(s)
58 | command.Subcommands[i] = mustCmdFromK3S(subcommand, s)
59 | } else {
60 | command.Subcommands[i] = mustCmdFromK3S(subcommand, k3sOpts)
61 | }
62 | }
63 | return mustCmdFromK3S(command, nil)
64 | }
65 |
66 | func Rotate(clx *cli.Context) error {
67 | dataDir := clx.String("data-dir")
68 | if dataDir == "" {
69 | dataDir = rke2Path
70 | }
71 | if err := ioutil.WriteFile(rke2.ForceRestartFile(dataDir), []byte{}, 0600); err != nil {
72 | return err
73 | }
74 | return cert.Rotate(clx)
75 | }
76 |
--------------------------------------------------------------------------------
/pkg/cli/cmds/cmds_test.go:
--------------------------------------------------------------------------------
1 | package cmds
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/urfave/cli/v2"
7 | )
8 |
9 | // Test_NewCommands confirms that all the top-level commands can be created
10 | // successfully without causing any panics in mustCmdFromK3S. Covering this
11 | // with a test allows us to catch K3s flag option mismatches in testing,
12 | // instead of not noticing until the main command crashes in functional tests.
13 | func Test_NewCommands(t *testing.T) {
14 | app := cli.NewApp()
15 | app.Name = "rke2"
16 | app.Commands = []*cli.Command{
17 | NewServerCommand(),
18 | NewAgentCommand(),
19 | NewEtcdSnapshotCommand(),
20 | NewCertCommand(),
21 | NewSecretsEncryptCommand(),
22 | NewTokenCommand(),
23 | NewCompletionCommand(),
24 | }
25 |
26 | for _, command := range app.Commands {
27 | t.Logf("Testing command: %s", command.Name)
28 | app.Run([]string{app.Name, command.Name, "--help"})
29 |
30 | for _, subcommand := range command.Subcommands {
31 | t.Logf("Testing subcommand: %s %s", command.Name, subcommand.Name)
32 | app.Run([]string{app.Name, command.Name, subcommand.Name, "--help"})
33 | }
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/pkg/cli/cmds/etcd_snapshot.go:
--------------------------------------------------------------------------------
1 | package cmds
2 |
3 | import (
4 | "github.com/k3s-io/k3s/pkg/cli/cmds"
5 | "github.com/k3s-io/k3s/pkg/cli/etcdsnapshot"
6 | "github.com/k3s-io/k3s/pkg/configfilearg"
7 | "github.com/urfave/cli/v2"
8 | )
9 |
10 | const defaultSnapshotRentention = 5
11 |
12 | func NewEtcdSnapshotCommand() *cli.Command {
13 | cmds.ServerConfig.ClusterInit = true
14 | k3sOpts := K3SFlagSet{
15 | "config": copyFlag,
16 | "debug": copyFlag,
17 | "log": copyFlag,
18 | "alsologtostderr": copyFlag,
19 | "node-name": copyFlag,
20 | "data-dir": {
21 | Usage: "(data) Folder to hold state",
22 | Default: rke2Path,
23 | },
24 | "etcd-server": {
25 | Default: "https://127.0.0.1:9345",
26 | },
27 | "etcd-token": copyFlag,
28 | "name": copyFlag,
29 | "dir": copyFlag,
30 | "snapshot-compress": copyFlag,
31 | "snapshot-retention": copyFlag,
32 | "s3": copyFlag,
33 | "s3-access-key": copyFlag,
34 | "s3-bucket": copyFlag,
35 | "s3-bucket-lookup-type": copyFlag,
36 | "s3-config-secret": copyFlag,
37 | "s3-endpoint": copyFlag,
38 | "s3-endpoint-ca": copyFlag,
39 | "s3-folder": copyFlag,
40 | "s3-insecure": copyFlag,
41 | "s3-proxy": copyFlag,
42 | "s3-region": copyFlag,
43 | "s3-secret-key": copyFlag,
44 | "s3-session-token": copyFlag,
45 | "s3-skip-ssl-verify": copyFlag,
46 | "s3-timeout": copyFlag,
47 | }
48 | subcommandOpts := map[string]K3SFlagSet{
49 | "ls": {
50 | "output": copyFlag,
51 | },
52 | }
53 |
54 | command := cmds.NewEtcdSnapshotCommands(
55 | etcdsnapshot.Delete,
56 | etcdsnapshot.List,
57 | etcdsnapshot.Prune,
58 | etcdsnapshot.Save)
59 | for i, subcommand := range command.Subcommands {
60 | if s, ok := subcommandOpts[subcommand.Name]; ok {
61 | k3sOpts.CopyInto(s)
62 | command.Subcommands[i] = mustCmdFromK3S(subcommand, s)
63 | } else {
64 | command.Subcommands[i] = mustCmdFromK3S(subcommand, k3sOpts)
65 | }
66 | }
67 | cmd := mustCmdFromK3S(command, k3sOpts)
68 | configfilearg.DefaultParser.ValidFlags[cmd.Name] = cmd.Flags
69 | return cmd
70 | }
71 |
--------------------------------------------------------------------------------
/pkg/cli/cmds/profile_windows.go:
--------------------------------------------------------------------------------
1 | //go:build windows
2 | // +build windows
3 |
4 | package cmds
5 |
6 | import (
7 | "github.com/urfave/cli/v2"
8 | )
9 |
10 | func validateProfile(clx *cli.Context, role CLIRole) {
11 | }
12 |
--------------------------------------------------------------------------------
/pkg/cli/cmds/secrets_encrypt.go:
--------------------------------------------------------------------------------
1 | package cmds
2 |
3 | import (
4 | "github.com/k3s-io/k3s/pkg/cli/cmds"
5 | "github.com/k3s-io/k3s/pkg/cli/secretsencrypt"
6 | "github.com/urfave/cli/v2"
7 | )
8 |
9 | func NewSecretsEncryptCommand() *cli.Command {
10 | k3sOpts := K3SFlagSet{
11 | "data-dir": copyFlag,
12 | "token": copyFlag,
13 | "server": {
14 | Default: "https://127.0.0.1:9345",
15 | },
16 | }
17 | subcommandOpts := map[string]K3SFlagSet{
18 | "status": {
19 | "output": copyFlag,
20 | },
21 | "prepare": {
22 | "force": copyFlag,
23 | },
24 | "rotate": {
25 | "force": copyFlag,
26 | },
27 | "reencrypt": {
28 | "force": copyFlag,
29 | "skip": copyFlag,
30 | },
31 | }
32 |
33 | command := cmds.NewSecretsEncryptCommands(
34 | secretsencrypt.Status,
35 | secretsencrypt.Enable,
36 | secretsencrypt.Disable,
37 | secretsencrypt.Prepare,
38 | secretsencrypt.Rotate,
39 | secretsencrypt.Reencrypt,
40 | secretsencrypt.RotateKeys)
41 |
42 | for i, subcommand := range command.Subcommands {
43 | if s, ok := subcommandOpts[subcommand.Name]; ok {
44 | k3sOpts.CopyInto(s)
45 | command.Subcommands[i] = mustCmdFromK3S(subcommand, s)
46 | } else {
47 | command.Subcommands[i] = mustCmdFromK3S(subcommand, k3sOpts)
48 | }
49 | }
50 | return mustCmdFromK3S(command, nil)
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/cli/cmds/token.go:
--------------------------------------------------------------------------------
1 | package cmds
2 |
3 | import (
4 | "github.com/k3s-io/k3s/pkg/cli/cmds"
5 | "github.com/k3s-io/k3s/pkg/cli/token"
6 | "github.com/k3s-io/k3s/pkg/configfilearg"
7 | "github.com/urfave/cli/v2"
8 | )
9 |
10 | func NewTokenCommand() *cli.Command {
11 | k3sOpts := K3SFlagSet{
12 | "kubeconfig": copyFlag,
13 | "data-dir": {
14 | Usage: "(data) Folder to hold state",
15 | Default: rke2Path,
16 | },
17 | }
18 | subCommandOpts := map[string]K3SFlagSet{
19 | "create": {
20 | "description": copyFlag,
21 | "groups": copyFlag,
22 | "ttl": copyFlag,
23 | "usages": copyFlag,
24 | },
25 | "list": {
26 | "output": copyFlag,
27 | },
28 | "rotate": {
29 | "token": copyFlag,
30 | "new-token": copyFlag,
31 | "server": {
32 | Default: "https://127.0.0.1:9345",
33 | },
34 | },
35 | }
36 |
37 | command := cmds.NewTokenCommands(token.Create, token.Delete, token.Generate, token.List, token.Rotate)
38 | configfilearg.DefaultParser.ValidFlags[command.Name] = command.Flags
39 | for i, subcommand := range command.Subcommands {
40 | if s, ok := subCommandOpts[subcommand.Name]; ok {
41 | k3sOpts.CopyInto(s)
42 | command.Subcommands[i] = mustCmdFromK3S(subcommand, s)
43 | } else {
44 | command.Subcommands[i] = mustCmdFromK3S(subcommand, k3sOpts)
45 | }
46 | }
47 | return mustCmdFromK3S(command, nil)
48 | }
49 |
--------------------------------------------------------------------------------
/pkg/cli/defaults/defaults.go:
--------------------------------------------------------------------------------
1 | package defaults
2 |
3 | import (
4 | "io/ioutil"
5 | "os"
6 | "path/filepath"
7 |
8 | "github.com/k3s-io/k3s/pkg/cli/cmds"
9 | pkgerrors "github.com/pkg/errors"
10 | "github.com/urfave/cli/v2"
11 | "google.golang.org/grpc/grpclog"
12 | )
13 |
14 | func Set(_ *cli.Context, dataDir string) error {
15 | if err := createDataDir(dataDir, 0755); err != nil {
16 | return pkgerrors.WithMessagef(err, "failed to create directory %s", dataDir)
17 | }
18 |
19 | logsDir := filepath.Join(dataDir, "agent", "logs")
20 | if err := os.MkdirAll(logsDir, 0750); err != nil {
21 | return pkgerrors.WithMessagef(err, "failed to create directory %s", logsDir)
22 | }
23 |
24 | cmds.ServerConfig.ClusterInit = true
25 | cmds.ServerConfig.DisableNPC = true
26 | cmds.ServerConfig.FlannelBackend = "none"
27 | cmds.ServerConfig.AdvertisePort = 6443
28 | cmds.ServerConfig.SupervisorPort = 9345
29 | cmds.ServerConfig.HTTPSPort = 6443
30 | cmds.ServerConfig.APIServerPort = 6443
31 | cmds.ServerConfig.APIServerBindAddress = "0.0.0.0"
32 | if err := AppendToStringSlice(&cmds.ServerConfig.ExtraAPIArgs, []string{
33 | "enable-admission-plugins=NodeRestriction",
34 | }); err != nil {
35 | return err
36 | }
37 | if err := AppendToStringSlice(&cmds.AgentConfig.ExtraKubeletArgs, []string{
38 | "stderrthreshold=FATAL",
39 | "log-file-max-size=50",
40 | "alsologtostderr=false",
41 | "logtostderr=false",
42 | "log-file=" + filepath.Join(logsDir, "kubelet.log"),
43 | }); err != nil {
44 | return err
45 | }
46 | if !cmds.Debug {
47 | l := grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, os.Stderr)
48 | grpclog.SetLoggerV2(l)
49 | }
50 |
51 | return nil
52 | }
53 |
54 | // With urfaveCLI/v2, we cannot directly access the []string value of a cli.StringSlice
55 | // so we need to individually append each value to the slice using the Set method
56 | func AppendToStringSlice(ss *cli.StringSlice, values []string) error {
57 | for _, v := range values {
58 | if err := ss.Set(v); err != nil {
59 | return err
60 | }
61 | }
62 | return nil
63 | }
64 |
--------------------------------------------------------------------------------
/pkg/cli/defaults/defaults_linux.go:
--------------------------------------------------------------------------------
1 | //go:build linux
2 | // +build linux
3 |
4 | package defaults
5 |
6 | import (
7 | "os"
8 |
9 | pkgerrors "github.com/pkg/errors"
10 | )
11 |
12 | func createDataDir(dataDir string, perm os.FileMode) error {
13 | if dataDir == "" {
14 | return nil
15 | }
16 |
17 | if err := os.MkdirAll(dataDir, perm); err != nil {
18 | return pkgerrors.WithMessagef(err, "failed to create directory %s", dataDir)
19 | }
20 | return nil
21 | }
22 |
--------------------------------------------------------------------------------
/pkg/cli/defaults/defaults_windows.go:
--------------------------------------------------------------------------------
1 | //go:build windows
2 | // +build windows
3 |
4 | package defaults
5 |
6 | import (
7 | "errors"
8 | "fmt"
9 | "os"
10 | "path/filepath"
11 |
12 | "github.com/rancher/permissions/pkg/access"
13 | "github.com/rancher/permissions/pkg/acl"
14 | "github.com/rancher/permissions/pkg/sid"
15 | "golang.org/x/sys/windows"
16 | )
17 |
18 | func createDataDir(dataDir string, perm os.FileMode) error {
19 | _, err := os.Stat(dataDir)
20 | doesNotExist := errors.Is(err, os.ErrNotExist)
21 | if err != nil && !doesNotExist {
22 | return fmt.Errorf("failed to create data directory %s: %v", dataDir, err)
23 | }
24 |
25 | if !doesNotExist {
26 | return nil
27 | }
28 |
29 | // only set restrictive ACLs the dataDir, not the full path
30 | path, _ := filepath.Split(dataDir)
31 | if os.MkdirAll(path, perm) != nil {
32 | return fmt.Errorf("failed to create data directory %s: %v", dataDir, err)
33 | }
34 |
35 | if err = acl.Mkdir(dataDir, []windows.EXPLICIT_ACCESS{
36 | access.GrantSid(windows.GENERIC_ALL, sid.LocalSystem()),
37 | access.GrantSid(windows.GENERIC_ALL, sid.BuiltinAdministrators()),
38 | }...); err != nil {
39 | return fmt.Errorf("failed to create data directory %s: %v", dataDir, err)
40 | }
41 |
42 | return nil
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/controllers/cisnetworkpolicy/cleanup.go:
--------------------------------------------------------------------------------
1 | package cisnetworkpolicy
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "time"
7 |
8 | "github.com/k3s-io/k3s/pkg/server"
9 | coreclient "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
10 | "github.com/sirupsen/logrus"
11 | core "k8s.io/api/core/v1"
12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
13 | "k8s.io/apimachinery/pkg/types"
14 | "k8s.io/apimachinery/pkg/util/wait"
15 | )
16 |
17 | var (
18 | finalizerKey = "wrangler.cattle.io/cisnetworkpolicy-node"
19 | )
20 |
21 | // Cleanup removes the OnRemove finalizer from any nodes.
22 | // This must be done to clean up from any previously registered OnRemove handlers that are currently disabled.
23 | func Cleanup(ctx context.Context, sc *server.Context) error {
24 | return unregister(ctx, sc.Core.Core().V1().Node())
25 | }
26 |
27 | func unregister(ctx context.Context, nodes coreclient.NodeController) error {
28 | logrus.Debugf("CISNetworkPolicyController: Removing controller hooks for NetworkPolicy %s", flannelHostNetworkPolicyName)
29 | go wait.PollImmediateUntilWithContext(ctx, time.Second*30, func(_ context.Context) (bool, error) {
30 | nodesList, err := nodes.List(metav1.ListOptions{})
31 | if err != nil {
32 | logrus.Warnf("CISNetworkPolicyController: failed to list nodes: %v", err)
33 | return false, nil
34 | }
35 | for _, node := range nodesList.Items {
36 | for _, finalizer := range node.ObjectMeta.Finalizers {
37 | if finalizer == finalizerKey {
38 | if err := removeFinalizer(nodes, node); err != nil {
39 | logrus.Warnf("CISNetworkPolicyController: failed to remove finalizer from node %s: %v", node.Name, err)
40 | return false, nil
41 | }
42 | break
43 | }
44 | }
45 | }
46 | return true, nil
47 | })
48 | return nil
49 | }
50 |
51 | func removeFinalizer(nodes coreclient.NodeController, node core.Node) error {
52 | newFinalizers := []string{}
53 | finalizers := node.ObjectMeta.Finalizers
54 | for k, v := range finalizers {
55 | if v != finalizerKey {
56 | continue
57 | }
58 | newFinalizers = append(finalizers[:k], finalizers[k+1:]...)
59 | }
60 | patch := []map[string]interface{}{
61 | {
62 | "op": "replace",
63 | "value": newFinalizers,
64 | "path": "/metadata/finalizers",
65 | },
66 | }
67 | b, err := json.Marshal(patch)
68 | if err != nil {
69 | return err
70 | }
71 | _, err = nodes.Patch(node.Name, types.JSONPatchType, b)
72 | return err
73 | }
74 |
--------------------------------------------------------------------------------
/pkg/images/images_test.go:
--------------------------------------------------------------------------------
1 | package images
2 |
3 | import (
4 | "io/ioutil"
5 | "os"
6 | "testing"
7 |
8 | "github.com/google/go-containerregistry/pkg/name"
9 | )
10 |
11 | func Test_UnitPull(t *testing.T) {
12 | type args struct {
13 | dir string
14 | name string
15 | image name.Reference
16 | }
17 | tests := []struct {
18 | name string
19 | args args
20 | setup func(a *args) error
21 | teardown func(a *args) error
22 | wantTxtFile bool
23 | wantErr bool
24 | }{
25 | {
26 | name: "Pull with no directory",
27 | args: args{
28 | name: KubeScheduler,
29 | },
30 | setup: func(a *args) error { return nil },
31 | teardown: func(a *args) error { return nil },
32 | },
33 | {
34 | name: "Pull with nonexistent directory",
35 | args: args{
36 | dir: "/tmp/DEADBEEF",
37 | name: KubeScheduler,
38 | },
39 | setup: func(a *args) error {
40 | var err error
41 | a.image, err = getDefaultImage(KubeScheduler)
42 | return err
43 | },
44 | teardown: func(a *args) error {
45 | return os.RemoveAll(a.dir)
46 | },
47 |
48 | wantTxtFile: true,
49 | },
50 | {
51 | name: "Pull with no image in directory",
52 | args: args{
53 | name: KubeScheduler,
54 | },
55 | setup: func(a *args) error {
56 | var err error
57 | a.image, err = getDefaultImage(KubeScheduler)
58 | if err != nil {
59 | return err
60 | }
61 | a.dir, err = os.MkdirTemp("", "*")
62 | return err
63 | },
64 | teardown: func(a *args) error {
65 | return os.RemoveAll(a.dir)
66 | },
67 |
68 | wantTxtFile: true,
69 | },
70 | {
71 | name: "Pull with fake image in directory",
72 | args: args{
73 | name: "kube-scheduler",
74 | },
75 | setup: func(a *args) error {
76 | var err error
77 | a.image, err = getDefaultImage(KubeScheduler)
78 | if err != nil {
79 | return err
80 | }
81 | a.dir, err = os.MkdirTemp("", "*")
82 | tempImage := a.dir + "/" + a.name + ".image"
83 | ioutil.WriteFile(tempImage, []byte(a.image.Name()+"\n"), 0644)
84 | return err
85 | },
86 | teardown: func(a *args) error {
87 | return os.RemoveAll(a.dir)
88 | },
89 | },
90 | }
91 | for _, tt := range tests {
92 | t.Run(tt.name, func(t *testing.T) {
93 |
94 | if err := tt.setup(&tt.args); err != nil {
95 | t.Errorf("Setup for Pull() failed = %v", err)
96 | }
97 | if err := Pull(tt.args.dir, tt.args.name, tt.args.image); (err != nil) != tt.wantErr {
98 | t.Errorf("Pull() error = %v, wantErr %v", err, tt.wantErr)
99 | }
100 | if tt.wantTxtFile {
101 | fileName := tt.args.name + ".txt"
102 | if _, err := os.Stat(tt.args.dir + "/" + fileName); os.IsNotExist(err) {
103 | t.Errorf("File generate by Pull() %s, does not exists, wantFile %v", fileName, tt.wantTxtFile)
104 | }
105 | }
106 | if err := tt.teardown(&tt.args); err != nil {
107 | t.Errorf("Teardown for Pull() failed = %v", err)
108 | }
109 | })
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/pkg/logging/logging.go:
--------------------------------------------------------------------------------
1 | package logging
2 |
3 | import (
4 | "flag"
5 | "io"
6 | "os"
7 | "strings"
8 |
9 | "github.com/spf13/pflag"
10 | "gopkg.in/natefinch/lumberjack.v2"
11 | cliflag "k8s.io/component-base/cli/flag"
12 | "k8s.io/klog/v2"
13 | )
14 |
15 | var (
16 | packageFlags = pflag.NewFlagSet("logging", pflag.ContinueOnError)
17 | defaultValues = map[string]string{}
18 | )
19 |
20 | // init binds klog flags (except v/vmodule) into the pflag flagset and applies normalization from upstream, and
21 | // memoize default values so that they can be reset when reusing the flag parser.
22 | // Refs:
23 | // * https://github.com/kubernetes/kubernetes/blob/release-1.25/staging/src/k8s.io/component-base/logs/logs.go#L49
24 | // * https://github.com/kubernetes/kubernetes/blob/release-1.25/staging/src/k8s.io/component-base/logs/logs.go#L83
25 | func init() {
26 | packageFlags.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
27 | fs := flag.NewFlagSet("logging", flag.ContinueOnError)
28 | klog.InitFlags(fs)
29 | fs.VisitAll(func(f *flag.Flag) {
30 | if !strings.HasPrefix(f.Name, "v") {
31 | pf := pflag.PFlagFromGoFlag(f)
32 | defaultValues[pf.Name] = pf.DefValue
33 | packageFlags.AddFlag(pf)
34 | }
35 | })
36 | }
37 |
38 | // ExtractFromArgs extracts the legacy klog flags from an args list, and returns both the remaining args,
39 | // and a similarly configured log writer configured using the klog flag values.
40 | func ExtractFromArgs(args []string) ([]string, io.Writer) {
41 | // reset values to default
42 | for name, value := range defaultValues {
43 | packageFlags.Set(name, value)
44 | }
45 |
46 | // filter out and set klog flags
47 | extraArgs := []string{}
48 | for _, arg := range args {
49 | name := strings.TrimPrefix(arg, "--")
50 | split := strings.SplitN(name, "=", 2)
51 | if flag := packageFlags.Lookup(split[0]); flag != nil {
52 | var val string
53 | if len(split) > 1 {
54 | val = split[1]
55 | } else {
56 | val = flag.NoOptDefVal
57 | }
58 | flag.Value.Set(val)
59 | continue
60 | }
61 | extraArgs = append(extraArgs, arg)
62 | }
63 |
64 | // Ignore errors on retrieving flag values; accepting the default is fine
65 | alsoToStderr, _ := packageFlags.GetBool("alsologtostderr")
66 | filename, _ := packageFlags.GetString("log-file")
67 | maxSize, _ := packageFlags.GetUint64("log-file-max-size")
68 | toStderr, _ := packageFlags.GetBool("logtostderr")
69 |
70 | if filename == "" {
71 | if toStderr || alsoToStderr {
72 | return extraArgs, os.Stderr
73 | }
74 | return extraArgs, io.Discard
75 | }
76 |
77 | logger := GetLogger(filename, int(maxSize))
78 |
79 | if alsoToStderr {
80 | return extraArgs, io.MultiWriter(os.Stderr, logger)
81 | }
82 |
83 | return extraArgs, logger
84 | }
85 |
86 | // GetLogger returns a new io.Writer that writes to the specified file
87 | func GetLogger(filename string, maxSize int) io.Writer {
88 | return &lumberjack.Logger{
89 | Filename: filename,
90 | MaxSize: int(maxSize),
91 | MaxBackups: 3,
92 | MaxAge: 28,
93 | Compress: true,
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/pkg/logging/logging_test.go:
--------------------------------------------------------------------------------
1 | package logging
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | "testing"
7 | )
8 |
9 | func Test_UnitExtractFromArgs(t *testing.T) {
10 | tests := []struct {
11 | name string
12 | args []string
13 | wantArgs []string
14 | wantLoggerType string
15 | }{
16 | {
17 | name: "Test 1",
18 | args: []string{},
19 | wantArgs: []string{},
20 | wantLoggerType: "*os.File",
21 | },
22 | {
23 | name: "Test 2",
24 | args: []string{"log-file=/dev/null"},
25 | wantArgs: []string{},
26 | wantLoggerType: "*lumberjack.Logger",
27 | },
28 | {
29 | name: "Test 3",
30 | args: []string{"logtostderr=false"},
31 | wantArgs: []string{},
32 | wantLoggerType: "io.discard",
33 | },
34 | {
35 | name: "Test 4",
36 | args: []string{"logtostderr"},
37 | wantArgs: []string{},
38 | wantLoggerType: "*os.File",
39 | },
40 | {
41 | name: "Test 5",
42 | args: []string{"log-file=/dev/null", "alsologtostderr"},
43 | wantArgs: []string{},
44 | wantLoggerType: "*io.multiWriter",
45 | },
46 | {
47 | name: "Test 6",
48 | args: []string{"v=6", "logtostderr=false", "one-output=true", "address=0.0.0.0", "anonymous-auth"},
49 | wantArgs: []string{"v=6", "address=0.0.0.0", "anonymous-auth"},
50 | wantLoggerType: "io.discard",
51 | },
52 | }
53 | for _, tt := range tests {
54 | t.Run(tt.name, func(t *testing.T) {
55 | gotArgs, gotLogger := ExtractFromArgs(tt.args)
56 | if !reflect.DeepEqual(gotArgs, tt.wantArgs) {
57 | t.Errorf("ExtractFromArgs() gotArgs = %+v\nWant = %+v", gotArgs, tt.wantArgs)
58 | }
59 | if gotLoggerType := fmt.Sprintf("%T", gotLogger); gotLoggerType != tt.wantLoggerType {
60 | t.Errorf("ExtractFromArgs() gotLogger = %+v\nWant = %+v", gotLoggerType, tt.wantLoggerType)
61 | }
62 | })
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/pkg/podexecutor/command_linux.go:
--------------------------------------------------------------------------------
1 | //go:build linux
2 | // +build linux
3 |
4 | package podexecutor
5 |
6 | import (
7 | "os/exec"
8 | "syscall"
9 | )
10 |
11 | func addDeathSig(cmd *exec.Cmd) {
12 | // not supported in this OS
13 | cmd.SysProcAttr = &syscall.SysProcAttr{
14 | Pdeathsig: syscall.SIGKILL,
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/pkg/podexecutor/command_other.go:
--------------------------------------------------------------------------------
1 | //go:build !linux
2 | // +build !linux
3 |
4 | package podexecutor
5 |
6 | import "os/exec"
7 |
8 | func addDeathSig(_ *exec.Cmd) {
9 | // not supported in this OS
10 | }
11 |
--------------------------------------------------------------------------------
/pkg/rke2/kp.go:
--------------------------------------------------------------------------------
1 | package rke2
2 |
3 | import (
4 | "context"
5 | "sync"
6 |
7 | "github.com/k3s-io/k3s/pkg/cli/cmds"
8 | )
9 |
10 | const kubeProxyChart = "rke2-kube-proxy"
11 |
12 | func setKubeProxyDisabled() cmds.StartupHook {
13 | return func(ctx context.Context, wg *sync.WaitGroup, args cmds.StartupHookArgs) error {
14 | go func() {
15 | defer wg.Done()
16 | <-args.APIServerReady
17 | args.Skips[kubeProxyChart] = true
18 | args.Disables[kubeProxyChart] = true
19 | }()
20 | return nil
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/rke2/psa.go:
--------------------------------------------------------------------------------
1 | package rke2
2 |
3 | import (
4 | "io/ioutil"
5 | "os"
6 | "path/filepath"
7 |
8 | pkgerrors "github.com/pkg/errors"
9 | "github.com/sirupsen/logrus"
10 | )
11 |
12 | const (
13 | defaultPSAConfigFile = "/etc/rancher/rke2/rke2-pss.yaml"
14 | )
15 |
16 | // setPSAs sets the default PSA's based on the mode that RKE2 is running in. There is either CIS or non
17 | // CIS mode. For CIS mode, a default PSA configuration with enforcement for restricted will be applied
18 | // for non CIS mode, a default PSA configuration will be applied that has privileged restriction
19 | func setPSAs(cisMode bool) error {
20 | logrus.Info("Applying Pod Security Admission Configuration")
21 | configDir := filepath.Dir(defaultPSAConfigFile)
22 | if err := os.MkdirAll(configDir, 0755); err != nil {
23 | return err
24 | }
25 | if !cisMode { // non-CIS mode
26 | psaConfig := unrestrictedPSAConfig()
27 | if err := ioutil.WriteFile(defaultPSAConfigFile, []byte(psaConfig), 0600); err != nil {
28 | return pkgerrors.WithMessagef(err, "psa: failed to write psa unrestricted config")
29 | }
30 |
31 | } else { // CIS mode
32 | psaConfig := restrictedPSAConfig()
33 | if err := ioutil.WriteFile(defaultPSAConfigFile, []byte(psaConfig), 0600); err != nil {
34 | return pkgerrors.WithMessagef(err, "psa: failed to write psa restricted config")
35 | }
36 | }
37 | return nil
38 | }
39 |
40 | func restrictedPSAConfig() string {
41 | psRestrictedConfig := `apiVersion: apiserver.config.k8s.io/v1
42 | kind: AdmissionConfiguration
43 | plugins:
44 | - name: PodSecurity
45 | configuration:
46 | apiVersion: pod-security.admission.config.k8s.io/v1beta1
47 | kind: PodSecurityConfiguration
48 | defaults:
49 | enforce: "restricted"
50 | enforce-version: "latest"
51 | audit: "restricted"
52 | audit-version: "latest"
53 | warn: "restricted"
54 | warn-version: "latest"
55 | exemptions:
56 | usernames: []
57 | runtimeClasses: []
58 | namespaces: [kube-system, cis-operator-system, tigera-operator]`
59 | return psRestrictedConfig
60 | }
61 |
62 | func unrestrictedPSAConfig() string {
63 | psUnrestrictedConfig := `apiVersion: apiserver.config.k8s.io/v1
64 | kind: AdmissionConfiguration
65 | plugins:
66 | - name: PodSecurity
67 | configuration:
68 | apiVersion: pod-security.admission.config.k8s.io/v1beta1
69 | kind: PodSecurityConfiguration
70 | defaults:
71 | enforce: "privileged"
72 | enforce-version: "latest"
73 | exemptions:
74 | usernames: []
75 | runtimeClasses: []
76 | namespaces: []`
77 | return psUnrestrictedConfig
78 | }
79 |
--------------------------------------------------------------------------------
/pkg/rke2/rc.go:
--------------------------------------------------------------------------------
1 | package rke2
2 |
3 | import (
4 | "context"
5 | "sync"
6 |
7 | "github.com/k3s-io/k3s/pkg/cli/cmds"
8 | "github.com/k3s-io/k3s/pkg/util"
9 | "github.com/sirupsen/logrus"
10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11 | )
12 |
13 | const (
14 | runtimeClassesChart = "rke2-runtimeclasses"
15 |
16 | // Values from upstream, see reference at -> https://github.com/helm/helm/blob/v3.16.3/pkg/action/validate.go#L34-L37
17 | appManagedByLabel = "app.kubernetes.io/managed-by"
18 | appManagedByHelm = "Helm"
19 | helmReleaseNameAnnotation = "meta.helm.sh/release-name"
20 | helmReleaseNamespaceAnnotation = "meta.helm.sh/release-namespace"
21 | )
22 |
23 | var runtimes = map[string]bool{
24 | "nvidia": true,
25 | "nvidia-experimental": true,
26 | "crun": true,
27 | }
28 |
29 | func setRuntimes() cmds.StartupHook {
30 | return func(ctx context.Context, wg *sync.WaitGroup, args cmds.StartupHookArgs) error {
31 | go func() {
32 | defer wg.Done()
33 | <-args.APIServerReady
34 | logrus.Info("Setting runtimes")
35 |
36 | client, err := util.GetClientSet(args.KubeConfigSupervisor)
37 | if err != nil {
38 | logrus.Fatalf("runtimes: new k8s client: %v", err)
39 | }
40 |
41 | rcClient := client.NodeV1().RuntimeClasses()
42 |
43 | classes, err := rcClient.List(context.Background(), metav1.ListOptions{})
44 | if err != nil {
45 | logrus.Fatalf("runtimes: failed to get runtime classes")
46 | }
47 |
48 | for _, c := range classes.Items {
49 |
50 | // verify if the runtime class is the runtime class supported by rke2
51 | if _, ok := runtimes[c.Name]; !ok {
52 | continue
53 | }
54 |
55 | if c.Labels == nil {
56 | c.Labels = map[string]string{}
57 | }
58 |
59 | if managedBy, ok := c.Labels[appManagedByLabel]; !ok || managedBy != appManagedByHelm {
60 | c.Labels[appManagedByLabel] = appManagedByHelm
61 | }
62 |
63 | if c.Annotations == nil {
64 | c.Annotations = map[string]string{}
65 | }
66 |
67 | if releaseName, ok := c.Annotations[helmReleaseNameAnnotation]; !ok || releaseName != runtimeClassesChart {
68 | c.Annotations[helmReleaseNameAnnotation] = runtimeClassesChart
69 | }
70 |
71 | if namespace, ok := c.Annotations[helmReleaseNamespaceAnnotation]; !ok || namespace != metav1.NamespaceSystem {
72 | c.Annotations[helmReleaseNamespaceAnnotation] = metav1.NamespaceSystem
73 | }
74 |
75 | _, err = rcClient.Update(context.Background(), &c, metav1.UpdateOptions{})
76 | if err != nil {
77 | logrus.Fatalf("runtimes: failed to update runtime classes")
78 | }
79 |
80 | }
81 | }()
82 |
83 | return nil
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/pkg/windows/service_linux.go:
--------------------------------------------------------------------------------
1 | //go:build !windows
2 | // +build !windows
3 |
4 | package windows
5 |
6 | func StartService() (bool, error) {
7 | return false, nil
8 | }
9 |
10 | func MonitorProcessExit() {}
11 |
--------------------------------------------------------------------------------
/pkg/windows/service_windows.go:
--------------------------------------------------------------------------------
1 | //go:build windows
2 | // +build windows
3 |
4 | package windows
5 |
6 | import (
7 | "os"
8 |
9 | "github.com/k3s-io/k3s/pkg/version"
10 | pkgerrors "github.com/pkg/errors"
11 | "github.com/rancher/wins/pkg/logs"
12 | "github.com/rancher/wins/pkg/profilings"
13 | "github.com/rancher/wrangler/v3/pkg/signals"
14 | "github.com/sirupsen/logrus"
15 | "golang.org/x/sys/windows"
16 | "golang.org/x/sys/windows/svc"
17 | "k8s.io/apimachinery/pkg/util/wait"
18 | )
19 |
20 | type service struct{}
21 |
22 | var (
23 | Service = &service{}
24 | ProcessWaitGroup wait.Group
25 | )
26 |
27 | func (h *service) Execute(_ []string, requests <-chan svc.ChangeRequest, statuses chan<- svc.Status) (bool, uint32) {
28 | statuses <- svc.Status{State: svc.StartPending}
29 | statuses <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}
30 | for c := range requests {
31 | switch c.Cmd {
32 | case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE):
33 | statuses <- c.CurrentStatus
34 | case svc.Interrogate:
35 | statuses <- c.CurrentStatus
36 | case svc.Stop, svc.Shutdown:
37 | statuses <- svc.Status{State: svc.StopPending}
38 | if !signals.RequestShutdown() {
39 | logrus.Infof("Windows Service is shutting down")
40 | statuses <- svc.Status{State: svc.Stopped}
41 | os.Exit(0)
42 | }
43 |
44 | logrus.Infof("Windows Service is shutting down gracefully")
45 | statuses <- svc.Status{State: svc.StopPending}
46 | statuses <- svc.Status{State: svc.Stopped}
47 | return false, 0
48 | }
49 | }
50 | return false, 0
51 | }
52 |
53 | func StartService() (bool, error) {
54 | if ok, err := svc.IsWindowsService(); err != nil || !ok {
55 | return ok, err
56 | }
57 |
58 | // ETW tracing
59 | etw, err := logs.NewEtwProviderHook(version.Program)
60 | if err != nil {
61 | return false, pkgerrors.WithMessage(err, "could not create ETW provider logrus hook")
62 | }
63 | logrus.AddHook(etw)
64 |
65 | el, err := logs.NewEventLogHook(version.Program)
66 | if err != nil {
67 | return false, pkgerrors.WithMessage(err, "could not create eventlog logrus hook")
68 | }
69 | logrus.AddHook(el)
70 |
71 | // Creates a Win32 event defined on a Global scope at stackdump-{pid} that can be signaled by
72 | // built-in administrators of the Windows machine or by the local system.
73 | // If this Win32 event (Global//stackdump-{pid}) is signaled, a goroutine launched by this call
74 | // will dump the current stack trace into {windowsTemporaryDirectory}/{default.WindowsServiceName}.{pid}.stack.logs
75 | profilings.SetupDumpStacks(version.Program, os.Getpid(), os.TempDir())
76 |
77 | go func() {
78 | if err := svc.Run(version.Program, Service); err != nil {
79 | logrus.Fatalf("Windows Service error, exiting: %s", err)
80 | }
81 | }()
82 |
83 | return true, nil
84 | }
85 |
86 | func MonitorProcessExit() {
87 | logrus.Info("Waiting for all processes to exit...")
88 | ProcessWaitGroup.Wait()
89 | }
90 |
--------------------------------------------------------------------------------
/pkg/windows/types.go:
--------------------------------------------------------------------------------
1 | //go:build windows
2 | // +build windows
3 |
4 | package windows
5 |
6 | import (
7 | "context"
8 |
9 | daemonconfig "github.com/k3s-io/k3s/pkg/daemons/config"
10 | opv1 "github.com/tigera/operator/api/v1"
11 | "k8s.io/client-go/rest"
12 | )
13 |
14 | type CNIPlugin interface {
15 | Setup(ctx context.Context, nodeConfig *daemonconfig.Node, restConfig *rest.Config, dataDir string) error
16 | Start(ctx context.Context) error
17 | GetConfig() *CNICommonConfig
18 | ReserveSourceVip(ctx context.Context) (string, error)
19 | }
20 |
21 | type KubeConfig struct {
22 | CertificateAuthority string
23 | Server string
24 | Token string
25 | Path string
26 | }
27 |
28 | type CNICommonConfig struct {
29 | Name string
30 | OverlayNetName string
31 | OverlayEncap string
32 | Hostname string
33 | ConfigPath string
34 | CNIConfDir string
35 | CNIBinDir string
36 | ClusterCIDR string
37 | ServiceCIDR string
38 | NodeIP string
39 | VxlanVNI string
40 | VxlanPort string
41 | Interface string
42 | IpamType string
43 | CNIVersion string
44 | KubeConfig *KubeConfig
45 | }
46 |
47 | type CalicoConfig struct {
48 | CNICommonConfig // embedded struct
49 | KubeNetwork string
50 | DNSServers string
51 | DNSSearch string
52 | DatastoreType string
53 | NodeNameFile string
54 | Platform string
55 | IPAutoDetectionMethod string
56 | ETCDEndpoints string
57 | ETCDKeyFile string
58 | ETCDCertFile string
59 | ETCDCaCertFile string
60 | }
61 |
62 | type FlannelConfig struct {
63 | CNICommonConfig // embedded struct
64 | }
65 |
66 | // Stub of Calico configuration used to extract user-provided overrides
67 | // Based off of https://github.com/tigera/operator/blob/master/api/v1/installation_types.go
68 | type CalicoInstallation struct {
69 | Installation CalicoInstallationSpec `json:"installation,omitempty"`
70 | }
71 |
72 | type CalicoInstallationSpec struct {
73 | CalicoNetwork opv1.CalicoNetworkSpec `json:"calicoNetwork,omitempty"`
74 | FlexVolumePath string `json:"flexVolumePath,omitempty"`
75 | ControlPlaneNodeSelector map[string]string `json:"controlPlaneNodeSelector,omitempty"`
76 | }
77 |
--------------------------------------------------------------------------------
/scripts/airgap/dnsNodeCache-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: helm.cattle.io/v1
2 | kind: HelmChartConfig
3 | metadata:
4 | name: rke2-coredns
5 | namespace: kube-system
6 | spec:
7 | valuesContent: |-
8 | nodelocal:
9 | enabled: true
10 |
--------------------------------------------------------------------------------
/scripts/airgap/loadbalancer-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: lb-test
5 | namespace: kube-system
6 | spec:
7 | type: LoadBalancer
8 | selector:
9 | app.kubernetes.io/name: rke2-ingress-nginx
10 | ports:
11 | - name: http
12 | protocol: TCP
13 | port: 8080
14 | targetPort: http
15 | - name: https
16 | protocol: TCP
17 | port: 8443
18 | targetPort: https
19 |
--------------------------------------------------------------------------------
/scripts/build:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 | mkdir -p build/images
8 |
9 | ./scripts/build-binary
10 | ./scripts/build-windows-binary
11 | ./scripts/build-images
12 | ./scripts/build-windows-images
13 | ./scripts/package-bundle
14 | ./scripts/package-windows-bundle
15 | ./scripts/dev-runtime-image
16 | ./scripts/dev-rpm
17 | ./scripts/build-image-test
18 |
--------------------------------------------------------------------------------
/scripts/build-image-runtime:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | DOCKER_BUILDKIT=${DOCKER_BUILDKIT:-1} docker image build \
9 | --build-arg TAG=${VERSION} \
10 | --build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
11 | --build-arg MAJOR=${VERSION_MAJOR} \
12 | --build-arg MINOR=${VERSION_MINOR} \
13 | --build-arg DAPPER_HOST_ARCH=${GOARCH} \
14 | --build-arg CACHEBUST="$(date +%s%N)" \
15 | --tag ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION} \
16 | --tag ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-${GOOS}-${GOARCH} \
17 | --target runtime \
18 | --file Dockerfile \
19 | .
20 |
21 | if [ "${GOARCH}" != "s390x" ] && [ "${GOARCH}" != "arm64" ] && [ -z "$SKIP_WINDOWS" ]; then
22 | DOCKER_BUILDKIT=${DOCKER_BUILDKIT:-1} docker image build \
23 | --build-arg TAG=${VERSION} \
24 | --build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
25 | --build-arg MAJOR=${VERSION_MAJOR} \
26 | --build-arg MINOR=${VERSION_MINOR} \
27 | --build-arg CACHEBUST="$(date +%s%N)" \
28 | --tag ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-windows-amd64 \
29 | --target windows-runtime \
30 | --file Dockerfile.windows \
31 | .
32 | # Only ever used in its compressed form for e2e tests
33 | mkdir -p build/images
34 | docker image save \
35 | ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-windows-${GOARCH} | \
36 | zstd -T0 -16 -f --long=25 --no-progress - -o build/images/${PROG}-images.windows-${GOARCH}.tar.zst
37 | fi
38 | mkdir -p build/images
39 | docker image save \
40 | --output build/images/${PROG}-runtime.tar \
41 | ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-${GOOS}-${GOARCH}
42 |
--------------------------------------------------------------------------------
/scripts/build-image-test:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ "${GOARCH}" == "s390x" ] || [ "${GOARCH}" == "arm64" ]; then
9 | exit 0
10 | fi
11 |
12 | DOCKER_BUILDKIT=${DOCKER_BUILDKIT:-1} docker image build \
13 | --build-arg TAG=${VERSION} \
14 | --build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
15 | --build-arg CACHEBUST="$(date +%s%N)" \
16 | --tag ${REPO}/${PROG}-test:${DOCKERIZED_VERSION} \
17 | --tag ${REPO}/${PROG}-test:${DOCKERIZED_VERSION}-${GOARCH} \
18 | --target test \
19 | .
20 |
--------------------------------------------------------------------------------
/scripts/build-upload:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | [ -n "$AWS_ACCESS_KEY_ID" ] || {
5 | echo "AWS_ACCESS_KEY_ID is not set"
6 | exit 0
7 | }
8 |
9 | [ -n "$AWS_SECRET_ACCESS_KEY" ] || {
10 | echo "AWS_SECRET_ACCESS_KEY is not set"
11 | exit 0
12 | }
13 |
14 | [[ $1 =~ rke2\.(linux|windows)-.+\.tar\.gz ]] || {
15 | echo "First argument should be a dist bundle tarball" >&2
16 | exit 1
17 | }
18 |
19 | [[ $2 =~ rke2-images\..+\.tar\.zst ]] || {
20 | echo "Second argument should be a compressed airgap runtime image tarball" >&2
21 | exit 1
22 | }
23 |
24 | [ -n "$3" ] || {
25 | echo "Third argument should be a commit hash" >&2
26 | exit 1
27 | }
28 |
29 | umask 077
30 |
31 | TMPDIR=$(mktemp -d)
32 | cleanup() {
33 | exit_code=$?
34 | trap - EXIT INT
35 | rm -rf ${TMPDIR}
36 | exit ${exit_code}
37 | }
38 | trap cleanup EXIT INT
39 |
40 | BUNDLE_NAME=$(basename $1 .tar.gz)-$3.tar.gz
41 | (cd $(dirname $1) && sha256sum $(basename $1)) >${TMPDIR}/${BUNDLE_NAME}.sha256sum
42 | cp $1 ${TMPDIR}/${BUNDLE_NAME}
43 |
44 | TARBALL_NAME=$(basename $2 .tar.zst)-$3.tar.zst
45 | (cd $(dirname $2) && sha256sum $(basename $2)) >${TMPDIR}/${TARBALL_NAME}.sha256sum
46 | cp $2 ${TMPDIR}/${TARBALL_NAME}
47 |
48 | for FILE in ${TMPDIR}/${BUNDLE_NAME}*; do
49 | aws s3 cp ${FILE} s3://rke2-ci-builds || exit 1
50 | done
51 |
52 | for FILE in ${TMPDIR}/${TARBALL_NAME}*; do
53 | aws s3 cp ${FILE} s3://rke2-ci-builds || exit 1
54 | done
55 | echo "Build uploaded" >&2
56 | echo "https://rke2-ci-builds.s3.amazonaws.com/${BUNDLE_NAME}"
57 |
--------------------------------------------------------------------------------
/scripts/build-windows-binary:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ "${GOARCH}" == "s390x" ] || [ "${GOARCH}" == "arm64" ]; then
9 | exit 0
10 | fi
11 |
12 | if [ -z "${GODEBUG}" ]; then
13 | EXTRA_LDFLAGS="${EXTRA_LDFLAGS} -w"
14 | DEBUG_GO_GCFLAGS=""
15 | DEBUG_TAGS=""
16 | else
17 | DEBUG_GO_GCFLAGS='-gcflags=all=-N -l'
18 | fi
19 |
20 | REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .dirty; fi)
21 | GOOS=windows
22 | RELEASE=${PROG}.${GOOS}-${GOARCH}
23 |
24 |
25 | BUILDTAGS="netgo osusergo no_stage static_build sqlite_omit_load_extension no_cri_dockerd"
26 | GO_BUILDTAGS="${GO_BUILDTAGS} ${BUILDTAGS} ${DEBUG_TAGS}"
27 |
28 | VERSION_FLAGS="
29 | -X ${K3S_PKG}/pkg/version.GitCommit=${REVISION}
30 | -X ${K3S_PKG}/pkg/version.Program=${PROG}
31 | -X ${K3S_PKG}/pkg/version.Version=${VERSION}
32 | -X ${K3S_PKG}/pkg/version.UpstreamGolang=${VERSION_GOLANG}
33 | -X ${RKE2_PKG}/pkg/images.DefaultRegistry=${REGISTRY}
34 | -X ${RKE2_PKG}/pkg/images.DefaultEtcdImage=rancher/hardened-etcd:${ETCD_VERSION}-build20250411
35 | -X ${RKE2_PKG}/pkg/images.DefaultKubernetesImage=${REPO}/hardened-kubernetes:${KUBERNETES_IMAGE_TAG}
36 | -X ${RKE2_PKG}/pkg/images.DefaultPauseImage=rancher/mirrored-pause:${PAUSE_VERSION}
37 | -X ${RKE2_PKG}/pkg/images.DefaultRuntimeImage=${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-${GOOS}-${GOARCH}
38 | "
39 |
40 | GO_LDFLAGS="${STATIC_FLAGS} ${EXTRA_LDFLAGS}"
41 | echo ${DEBUG_GO_GCFLAGS}
42 | GOOS=windows \
43 | CGO_ENABLED=1 CXX=x86_64-w64-mingw32-g++ CC=x86_64-w64-mingw32-gcc \
44 | go build \
45 | -tags "${GO_BUILDTAGS}" \
46 | ${GO_GCFLAGS} ${GO_BUILD_FLAGS} \
47 | -o bin/${PROG}.exe \
48 | -ldflags "${GO_LDFLAGS} ${VERSION_FLAGS}" \
49 | ${GO_TAGS}
50 |
--------------------------------------------------------------------------------
/scripts/build-windows-images:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ "${GOARCH}" == "s390x" ] || [ "${GOARCH}" == "arm64" ]; then
9 | exit 0
10 | fi
11 |
12 | mkdir -p build
13 |
14 | cat <build/windows-images.txt
15 | ${REGISTRY}/${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-windows-amd64
16 | ${REGISTRY}/${REPO}/mirrored-pause:${PAUSE_VERSION}
17 | EOF
18 |
--------------------------------------------------------------------------------
/scripts/checksum:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | CHECKSUM_DIR=${CHECKSUM_DIR:-dist/artifacts}
9 |
10 | mkdir -p ${CHECKSUM_DIR}
11 | sumfile="${CHECKSUM_DIR}/sha256sum-${ARCH}.txt"
12 | echo -n "" > "${sumfile}"
13 |
14 | files=$(ls ${CHECKSUM_DIR} | grep "${ARCH}" | grep -v "sha256sum-${ARCH}.txt")
15 | for file in ${files}; do
16 | sha256sum "${CHECKSUM_DIR}/${file}" | sed "s;$(dirname ${CHECKSUM_DIR}/${file})/;;g" >> "${sumfile}"
17 | done
18 |
19 | cat "${sumfile}"
--------------------------------------------------------------------------------
/scripts/clean:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)/..
4 |
5 | rm -rf bin dist build
6 |
--------------------------------------------------------------------------------
/scripts/clean-cache:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)/..
4 |
5 | source ./scripts/version.sh
6 |
7 | docker rm -fv ${PROG}-dev-shell
8 | docker volume rm ${PROG}-cache ${PROG}-pkg
9 |
--------------------------------------------------------------------------------
/scripts/copy-images.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | TARGET_REGISTRY=''
5 | IMAGE_LIST=''
6 | DRY_RUN=''
7 |
8 | has_crane() {
9 | CRANE="$(command -v crane || true)"
10 | if [ -z "${CRANE}" ]; then
11 | echo "crane is not installed"
12 | exit 1
13 | fi
14 | }
15 |
16 | usage() {
17 | echo "Syncs images to a registry.
18 | usage: $0 [options]
19 | -t target registry
20 | -i image list file path
21 | -d dry run
22 | -h show help
23 |
24 | list format:
25 | [REGISTRY]/[REPOSITORY]:[TAG]
26 |
27 | examples:
28 | $0 -t registry.example.com -i build/images-all.txt
29 | $0 -d -t registry.example.com -i build/images-all.txt"
30 | }
31 |
32 | while getopts 't:i:dh' c; do
33 | case $c in
34 | t)
35 | TARGET_REGISTRY=$OPTARG
36 | ;;
37 | i)
38 | IMAGE_LIST=$OPTARG
39 | ;;
40 | d)
41 | DRY_RUN=true
42 | ;;
43 | h)
44 | usage
45 | exit 0
46 | ;;
47 | *)
48 | usage
49 | exit 1
50 | ;;
51 | esac
52 | done
53 |
54 | if [ -z "${TARGET_REGISTRY}" ]; then
55 | echo "target registry is required"
56 | usage
57 | exit 1
58 | fi
59 |
60 | if [ -z "${IMAGE_LIST}" ]; then
61 | echo "image list file is required"
62 | usage
63 | exit 1
64 | fi
65 |
66 | if [ ! -f "${IMAGE_LIST}" ]; then
67 | echo "image listfile ${IMAGE_LIST} not found"
68 | exit 1
69 | fi
70 |
71 | has_crane
72 |
73 | if [ -n "${DRY_RUN}" ]; then
74 | echo "Dry run, no images will be copied"
75 | fi
76 |
77 | while read -r source_image; do
78 | if [ -z "${source_image}" ]; then
79 | continue
80 | fi
81 |
82 | image_without_registry=$(echo "${source_image}" | cut -d'/' -f2-)
83 | target_image="${TARGET_REGISTRY}/${image_without_registry}"
84 |
85 | if [ -n "${DRY_RUN}" ]; then
86 | echo "crane copy \"${source_image}\" \"${target_image}\" --no-clobber"
87 | else
88 | if ! crane copy "${source_image}" "${target_image}" --no-clobber; then
89 | echo "failed to copy ${source_image}"
90 | continue
91 | fi
92 | fi
93 | done < "${IMAGE_LIST}"
94 |
--------------------------------------------------------------------------------
/scripts/dev-peer:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | PEER=${PEER:-1}
5 |
6 | cd $(dirname $0)/..
7 |
8 | . ./scripts/version.sh
9 |
10 | docker run --rm --link \
11 | ${PROG}-dev-shell:${PROG}-server \
12 | --name ${PROG}-peer${PEER} \
13 | --hostname ${PROG}-peer${PEER} \
14 | -p 127.0.0.1:234${PEER}:2345 \
15 | -ti -e WORKSPACE=$(pwd) \
16 | -v ${HOME}:${HOME} \
17 | -v ${PROG} -w $(pwd) \
18 | -v ${PWD}/build/images:/var/lib/rancher/rke2/agent/images \
19 | --privileged -v ${PROG}-pkg:/go/pkg \
20 | -v ${PROG}-cache:/root/.cache/go-build ${PROG}-dev bash
21 |
--------------------------------------------------------------------------------
/scripts/dev-peer-enter:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | PEER=${PEER:-1}
5 |
6 | cd $(dirname $0)/..
7 |
8 | . ./scripts/version.sh
9 |
10 | docker exec -it ${PROG}-peer${PEER} bash
11 |
--------------------------------------------------------------------------------
/scripts/dev-rpm:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ "${GOARCH}" != "amd64" ]; then
9 | exit 0
10 | fi
11 |
12 | if [ -z "${SKIP_DEV_RPM}" ] && [ -z "${DRONE_TAG}" ]; then
13 | scripts/package-dev-rpm
14 | scripts/publish-dev-rpm
15 | fi
16 |
--------------------------------------------------------------------------------
/scripts/dev-runtime-image:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ "${GOARCH}" == "s390x" ] || [ "${GOARCH}" == "arm64" ]; then
9 | exit 0
10 | fi
11 |
12 | docker image save \
13 | ${REGISTRY}/${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION} | \
14 | zstd -T0 -16 -f --long=25 --no-progress - -o build/images/${PROG}-images.${PLATFORM}.tar.zst
15 | ./scripts/build-upload dist/artifacts/${RELEASE}.tar.gz build/images/${PROG}-images.${PLATFORM}.tar.zst ${COMMIT}
16 |
17 | docker image save \
18 | ${REGISTRY}/${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-windows-${ARCH} | \
19 | zstd -T0 -16 -f --long=25 --no-progress - -o build/images/${PROG}-images.windows-${ARCH}.tar.zst
20 | ./scripts/build-upload dist/artifacts/${PROG}.windows-${ARCH}.tar.gz build/images/${PROG}-images.windows-${ARCH}.tar.zst ${COMMIT}
21 |
--------------------------------------------------------------------------------
/scripts/dev-shell:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | set -- docker container run \
9 | --env WORKSPACE=${PWD} \
10 | --hostname ${PROG}-server \
11 | --interactive \
12 | --name ${PROG}-dev-shell \
13 | --privileged \
14 | --publish ":2345:2345" \
15 | --rm \
16 | --tty \
17 | --volume "${HOME}:${HOME}:ro" \
18 | --volume "${PROG}-pkg:/go/pkg" \
19 | --volume "${PROG}-cache:/root/.cache/go-build" \
20 | --volume "${PWD}:${PWD}" \
21 | --volume "/run/k3s" \
22 | --volume "/var/lib/rancher/rke2" \
23 | --workdir "${PWD}"
24 |
25 | if [ -z "${SKIP_PRELOAD_IMAGE}" ]; then
26 | set -- "${@}" "--volume" "${PWD}/build/images:/var/lib/rancher/rke2/agent/images"
27 | fi
28 |
29 | exec "${@}" "${PROG}-dev" "bash"
30 |
--------------------------------------------------------------------------------
/scripts/dev-shell-build:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ ! -d build/images ]; then
9 | ./scripts/build-images
10 | fi
11 |
12 | # build the dev shell image
13 | DOCKER_BUILDKIT=${DOCKER_BUILDKIT:-1} docker image build -t ${PROG}-dev --target shell .
14 |
--------------------------------------------------------------------------------
/scripts/dev-shell-enter:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | docker exec -it ${PROG}-dev-shell bash
9 |
--------------------------------------------------------------------------------
/scripts/package:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | mkdir -p dist/{artifacts,bundle}
9 |
10 | ./scripts/package-binary
11 | ./scripts/package-windows-binary
12 | ./scripts/package-bundle
13 | ./scripts/package-windows-bundle
14 |
--------------------------------------------------------------------------------
/scripts/package-binary:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | mkdir -p dist/artifacts
9 |
10 | install -s bin/${PROG} dist/artifacts/${RELEASE}
11 |
--------------------------------------------------------------------------------
/scripts/package-bundle:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | mkdir -p dist/bundle/share/${PROG}
9 |
10 | ### bundle underlay
11 | rsync -a bundle/ dist/bundle/
12 |
13 | ### bin/rke2
14 | install -s bin/${PROG} dist/bundle/bin/
15 |
16 | ### share/rke2/LICENSE
17 | cp -vf LICENSE dist/bundle/share/${PROG}/LICENSE.txt
18 |
19 | ### (setup directory)
20 | mkdir -p dist/artifacts
21 |
22 | ### (make the tarball)
23 | if [ -z "${PACKAGE_SKIP_TARBALL}" ]; then
24 | tar -czf dist/artifacts/${RELEASE}.tar.gz -C dist/bundle --exclude share/rke2-windows --exclude '*.exe' --exclude '*.ps*' $(find dist/bundle -mindepth 1 -maxdepth 1 -type d -exec basename {} \;)
25 | fi
26 |
--------------------------------------------------------------------------------
/scripts/package-dev-rpm:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | export COMMIT
9 | export DAPPER_SOURCE=${DAPPER_SOURCE:-$(realpath -q .)}
10 | export COMBARCH='x86_64-amd64'
11 |
12 | RPM_VERSION="${VERSION}.testing.0"
13 |
14 | TMPDIR=$(mktemp -d -t)
15 | SCRIPT_LIST=$(mktemp "${TMPDIR}/XXXXXX")
16 | cleanup() {
17 | exit_code=$?
18 | trap - EXIT INT
19 | rm -rf "${TMPDIR}"
20 | git tag -d "${RPM_VERSION}" || true
21 | exit ${exit_code}
22 | }
23 | trap cleanup EXIT INT
24 |
25 | curl -L https://github.com/rancher/rke2-packaging/archive/master.tar.gz | tar --strip-components=1 -xzC "${TMPDIR}"
26 |
27 | export SRC_PATH="${TMPDIR}/source"
28 | DIST_PATH=$(realpath -mq ./dist/rpms)
29 | USER=$(whoami)
30 |
31 | [ -d "${DIST_PATH}" ] || mkdir "${DIST_PATH}"
32 | [ -d "${SRC_PATH}" ] || mkdir "${SRC_PATH}"
33 | cp ./dist/artifacts/* "${SRC_PATH}"
34 |
35 | # Mock spectool, not needed for local builds
36 | mkdir "${TMPDIR}/bin"
37 | echo 'exit 0' > "${TMPDIR}/bin/spectool"
38 | chmod +x "${TMPDIR}/bin/spectool"
39 | cp "${TMPDIR}/bin/spectool" "${TMPDIR}/bin/rpmdev-spectool"
40 | export PATH="${TMPDIR}/bin:${PATH}"
41 |
42 | # Set rpmmacros that differ in Alpine from RHEL distributions
43 | echo "%_topdir ${HOME}/rpmbuild" > ~/.rpmmacros
44 | echo "%_sharedstatedir /var/lib" >> ~/.rpmmacros
45 | echo "%_localstatedir /var" >> ~/.rpmmacros
46 |
47 | # Set rpm version as lightweight tag
48 | git tag "${RPM_VERSION}"
49 |
50 | find -L "${TMPDIR}" -name 'build-*' -print >"${SCRIPT_LIST}"
51 | while IFS= read -r script; do
52 | if [ "${USER}" != 'root' ]; then
53 | # Use /home/$USER instead of /root when running outside of dapper
54 | sed -i -e "s%/root%${HOME}%g" "${script}"
55 | fi
56 |
57 | # Modify rpmbuild flags
58 | # --nodeps do not check for build dependencies, systemd-rpm-macros should suffice
59 | # -bb do not build src packages, not needed for commit rpms
60 | sed -i -e 's/^rpmbuild/rpmbuild --nodeps/' \
61 | -e '/^rpmbuild/,/.spec$/{s/-ba/-bb/}' -e '/rpmbuild\/SRPMS\/\*/d' \
62 | "${script}"
63 |
64 | # Replace hardcoded paths and remove inline rpm macros
65 | sed -i -e "s%/source%${TMPDIR}%g" -e "s%${TMPDIR}/dist%$DIST_PATH%g" -e '/SRC_PATH=/d' \
66 | -e '/rpmmacros/d' \
67 | "${script}"
68 |
69 | # Build rpm
70 | TAG=${RPM_VERSION} bash "${script}"
71 | done <"${SCRIPT_LIST}"
72 |
73 | if [ "${DAPPER_UID:--1}" -ne "-1" ]; then
74 | chown -R "$DAPPER_UID:$DAPPER_GID" "${DIST_PATH}"
75 | fi
76 |
--------------------------------------------------------------------------------
/scripts/package-image-runtime:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ "${GOARCH}" == "s390x" ] || [ "${GOARCH}" == "arm64" ]; then
9 | exit 0
10 | fi
11 |
12 | docker image save \
13 | ${REGISTRY}/${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION} | \
14 | zstd -T0 -16 -f --long=25 --no-progress - -o build/images/${PROG}-images.${PLATFORM}.tar.zst
--------------------------------------------------------------------------------
/scripts/package-images:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | mkdir -p dist/artifacts
9 |
10 | # We reorder the tar file so that the metadata files are at the start of the archive, which should make loading
11 | # the runtime image faster. By default `docker image save` puts these at the end of the archive, which means the entire
12 | # tarball needs to be read even if you're just loading a single image.
13 | for FILE in build/images*.txt; do
14 | BASE=$(basename ${FILE} .txt)
15 | DEST=build/images/${PROG}-${BASE}.tar
16 | docker image save --output ${DEST}.tmp $(<${FILE})
17 | bsdtar -c -f ${DEST} --include=manifest.json --include=repositories @${DEST}.tmp
18 | bsdtar -r -f ${DEST} --exclude=manifest.json --exclude=repositories @${DEST}.tmp
19 | rm -f ${DEST}.tmp
20 |
21 | BASE=$(basename ${FILE} .txt)
22 | TARFILE=build/images/${PROG}-${BASE}.tar
23 | cp -f ${FILE} dist/artifacts/${PROG}-${BASE}.${PLATFORM}.txt
24 | zstd -T0 -16 -f --long=25 --no-progress ${TARFILE} -o dist/artifacts/${PROG}-${BASE}.${PLATFORM}.tar.zst
25 | pigz -v -c ${TARFILE} > dist/artifacts/${PROG}-${BASE}.${PLATFORM}.tar.gz
26 | done
27 |
28 | cat build/images*.txt | sort -V | uniq > dist/artifacts/${PROG}-images-all.${PLATFORM}.txt
29 |
--------------------------------------------------------------------------------
/scripts/package-windows-binary:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ "${GOARCH}" == "s390x" ] || [ "${GOARCH}" == "arm64" ]; then
9 | exit 0
10 | fi
11 |
12 | mkdir -p dist/artifacts
13 |
14 | install -s bin/${PROG}.exe dist/artifacts/${PROG}-windows-amd64.exe
15 |
--------------------------------------------------------------------------------
/scripts/package-windows-bundle:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | if [ "${GOARCH}" == "s390x" ] || [ "${GOARCH}" == "arm64" ]; then
9 | exit 0
10 | fi
11 |
12 | mkdir -p dist/bundle/share/${PROG}-windows
13 |
14 | ### bundle underlay
15 | rsync -a bundle/ dist/bundle/
16 |
17 | ### bin/rke2.exe
18 | install -s bin/${PROG}.exe dist/bundle/bin/
19 |
20 | ### share/rke2/LICENSE
21 | cp -vf LICENSE dist/bundle/share/${PROG}/LICENSE.txt
22 |
23 | ### (setup directory)
24 | mkdir -p dist/artifacts
25 |
26 | ### (make the tarball)
27 | if [ -z "${PACKAGE_SKIP_TARBALL}" ]; then
28 | tar -czf dist/artifacts/${PROG}.windows-${ARCH}.tar.gz -C dist/bundle $(find dist/bundle -mindepth 1 -maxdepth 1 -type d -exec basename {} \;)
29 | fi
30 |
--------------------------------------------------------------------------------
/scripts/package-windows-images:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | mkdir -p dist/artifacts
9 |
10 | # ltsc1809 / Server 2019 1809
11 | crane pull --platform windows/amd64 \
12 | ${REGISTRY}/${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-windows-amd64 \
13 | ${REGISTRY}/${REPO}/mirrored-pause:${PAUSE_VERSION}-amd64-windows-10.0.17763.2114 \
14 | rke2-windows-1809-amd64-images.tar
15 |
16 | # ltsc2022 / Server 2022 21H2
17 | crane pull --platform windows/amd64 \
18 | ${REGISTRY}/${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-windows-amd64 \
19 | ${REGISTRY}/${REPO}/mirrored-pause:${PAUSE_VERSION}-amd64-windows-10.0.20348.169 \
20 | rke2-windows-ltsc2022-amd64-images.tar
21 |
22 | WINDOWS_TARFILES=(rke2-windows-1809-amd64-images.tar rke2-windows-ltsc2022-amd64-images.tar)
23 | for TARFILE in "${WINDOWS_TARFILES[@]}"; do
24 | zstd -T0 -16 -f --long=25 --no-progress ${TARFILE} -o dist/artifacts/${TARFILE}.zst
25 | pigz -v -c ${TARFILE} > dist/artifacts/${TARFILE}.gz
26 | done
27 | cat build/windows-images.txt | sort -V | uniq > dist/artifacts/${PROG}-images.windows-amd64.txt
28 |
29 |
--------------------------------------------------------------------------------
/scripts/publish-binary:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | mkdir -p dist/artifacts
9 |
10 | gh release upload ${VERSION} dist/artifacts/*
11 |
--------------------------------------------------------------------------------
/scripts/publish-dev-rpm:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | [ -n "$AWS_ACCESS_KEY_ID" ] || {
5 | echo "AWS_ACCESS_KEY_ID is not set"
6 | exit 0
7 | }
8 |
9 | [ -n "$AWS_SECRET_ACCESS_KEY" ] || {
10 | echo "AWS_SECRET_ACCESS_KEY is not set"
11 | exit 0
12 | }
13 |
14 | cd $(dirname $0)/..
15 |
16 | source ./scripts/version.sh
17 |
18 | RPM_LIST=$(mktemp -t)
19 | TMPDIR=$(mktemp -d)
20 | cleanup() {
21 | exit_code=$?
22 | trap - EXIT INT
23 | rm -rf "${TMPDIR}" "${RPM_LIST}"
24 | exit ${exit_code}
25 | }
26 | trap cleanup EXIT INT
27 |
28 | RPM_DIR='./dist/rpms'
29 |
30 | find "${RPM_DIR}" -mindepth 2 -type f \( -name '*.rpm' -not -name '*.src.rpm' \) -print >"${RPM_LIST}"
31 | while IFS= read -r FILE; do
32 | KIND=$(echo "${FILE}" | grep -oE 'rke2-\w+')
33 | DISTRO=$(basename "${FILE%%x86_64*}")
34 | cp "${FILE}" "${RPM_DIR}/${KIND}-${COMMIT}.${DISTRO}.rpm"
35 | done <"${RPM_LIST}"
36 |
37 | umask 077
38 |
39 | aws s3 cp ${RPM_DIR}/ s3://rke2-ci-builds --exclude "*" --include "*.rpm" --recursive || exit 1
40 |
41 | echo "Build uploaded" >&2
42 | echo "https://rke2-ci-builds.s3.amazonaws.com/${BUNDLE_NAME}"
43 |
--------------------------------------------------------------------------------
/scripts/publish-image-runtime:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | docker buildx build ${IID_FILE_FLAG} \
9 | --sbom=true \
10 | --attest type=provenance,mode=max \
11 | --build-arg TAG=${VERSION} \
12 | --build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
13 | --build-arg MAJOR=${VERSION_MAJOR} \
14 | --build-arg MINOR=${VERSION_MINOR} \
15 | --build-arg DAPPER_HOST_ARCH=${GOARCH} \
16 | --build-arg CACHEBUST="$(date +%s%N)" \
17 | --tag ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-${GOOS}-${GOARCH} \
18 | --target runtime \
19 | --file Dockerfile \
20 | --push \
21 | .
22 |
--------------------------------------------------------------------------------
/scripts/publish-image-runtime-windows:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | docker buildx build ${IID_FILE_FLAG} \
9 | --sbom=true \
10 | --attest type=provenance,mode=max \
11 | --build-arg TAG=${VERSION} \
12 | --build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
13 | --build-arg MAJOR=${VERSION_MAJOR} \
14 | --build-arg MINOR=${VERSION_MINOR} \
15 | --build-arg CACHEBUST="$(date +%s%N)" \
16 | --tag ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-windows-amd64 \
17 | --target windows-runtime \
18 | --file Dockerfile.windows \
19 | --platform windows/amd64 \
20 | --push \
21 | .
22 |
--------------------------------------------------------------------------------
/scripts/publish-manifest-kubernetes:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 | export DOCKER_CLI_EXPERIMENTAL=enabled
8 |
9 | docker manifest create \
10 | --amend ${REPO}/hardened-kubernetes:${DOCKERIZED_VERSION} \
11 | ${REPO}/hardened-kubernetes:${DOCKERIZED_VERSION}-${GOARCH}
12 |
13 | docker manifest push ${REPO}/hardened-kubernetes:${DOCKERIZED_VERSION}
14 |
--------------------------------------------------------------------------------
/scripts/publish-manifest-runtime:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 | export DOCKER_CLI_EXPERIMENTAL=enabled
8 |
9 | set +x
10 | docker login -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
11 | set -x
12 |
13 | docker buildx imagetools create \
14 | --tag ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION} \
15 | ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-linux-amd64 \
16 | ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-linux-arm64 \
17 | ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION}-windows-amd64
18 |
19 | if [ -n "${IID_FILE}" ]; then
20 | docker buildx imagetools inspect --format "{{json .Manifest}}" ${REPO}/${PROG}-runtime:${DOCKERIZED_VERSION} | jq -r '.digest' > ${IID_FILE}
21 | fi
22 |
--------------------------------------------------------------------------------
/scripts/remote-debug:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | source ./scripts/version.sh
7 |
8 | GODEBUG=y ./scripts/build-binary
9 | CATTLE_DEV_MODE=true dlv \
10 | --listen=:2345 \
11 | --headless=true \
12 | --api-version=2 \
13 | --check-go-version=false \
14 | --accept-multiclient exec -- ./bin/${PROG} ${COMMAND} ${ARGS}
15 |
--------------------------------------------------------------------------------
/scripts/remote-debug-exit:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | dlv connect :2345 <<< '
7 | q
8 | y
9 | '
10 |
--------------------------------------------------------------------------------
/scripts/run:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | cd $(dirname $0)/..
5 |
6 | . ./scripts/version.sh
7 |
8 | COMMAND="server"
9 | GODEBUG="y"
10 |
11 | ./bin/${PROG} ${COMMAND} ${ARGS}
12 |
--------------------------------------------------------------------------------
/scripts/scan-images:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd $(dirname $0)/..
4 |
5 | SCAN_OUTPUT="trivy_scan_report.txt"
6 | rm "$SCAN_OUTPUT"
7 |
8 | # Download the Rancher OpenVEX Trivy report
9 | curl -fsSO https://raw.githubusercontent.com/rancher/vexhub/refs/heads/main/reports/rancher.openvex.json
10 |
11 | for IMAGE in $(cat build/images*.txt); do
12 | echo "Scanning image: $IMAGE"
13 |
14 | # Run Trivy scan and append the report to the output file
15 | trivy image "${IMAGE}" -q --no-progress \
16 | --severity ${SEVERITIES:-CRITICAL,HIGH} \
17 | --ignore-unfixed --show-suppressed \
18 | --vex rancher.openvex.json >> "$SCAN_OUTPUT"
19 |
20 | if [ "$1" = "dump-report" ]; then
21 | trivy image "${IMAGE}" -q --no-progress \
22 | --severity ${SEVERITIES:-CRITICAL,HIGH} \
23 | --ignore-unfixed \
24 | -f json \
25 | --exit-code 1 \
26 | --vex rancher.openvex.json > "temp.json"
27 | RC=$?
28 | if [ ${RC} -gt 0 ]; then
29 | echo -e "\nSev\tPackage\tVulnID\tInstalled\tFixed"
30 | jq -rc '.Results[].Vulnerabilities | select( . != null ) | .[] | "\(.Severity)\t\(.PkgName)\t\(.VulnerabilityID)\t\(.InstalledVersion)\t\(.FixedVersion)"' "temp.json" | sort
31 | echo
32 | fi
33 | fi
34 | done
35 |
36 | rm rancher.openvex.json
37 | [ "$1" = "dump-report" ] && rm temp.json
38 | echo "Trivy scan completed. Reports are saved in $SCAN_OUTPUT."
--------------------------------------------------------------------------------
/scripts/semver-parse.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 |
--------------------------------------------------------------------------------
/scripts/sonobuoy-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "Plugins": [
3 | {
4 | "name": "e2e"
5 | }
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/scripts/test:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e -x
4 | cd $(dirname $0)/..
5 |
6 | docker ps
7 |
8 | . ./tests/docker/test-helpers
9 |
10 | artifacts=$(pwd)/dist/artifacts
11 | mkdir -p $artifacts
12 |
13 | # ---
14 |
15 | if [ "$ARCH" != 'amd64' ]; then
16 | early-exit "Skipping remaining tests, images not available for $ARCH."
17 | fi
18 |
19 | E2E_OUTPUT=$artifacts test-run-sonobuoy
20 | echo "Did test-run-sonobuoy $?"
21 | # ---
22 |
23 | #test-run-sonobuoy mysql
24 | #test-run-sonobuoy postgres
25 |
26 | exit 0
27 |
--------------------------------------------------------------------------------
/scripts/test-unit:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -ex
4 |
5 | BUILDTAGS="selinux netgo osusergo no_stage static_build sqlite_omit_load_extension no_embedded_executor no_cri_dockerd"
6 | GO_BUILDTAGS="${GO_BUILDTAGS} ${BUILDTAGS} ${DEBUG_TAGS}"
7 |
8 | go test -tags "${GO_BUILDTAGS}" -v -cover ./pkg/... -run Unit
9 |
--------------------------------------------------------------------------------
/scripts/validate:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 |
4 | if [ -n "${SKIP_VALIDATE}" ]; then
5 | echo "skipping validation. continuing..."
6 | exit 0
7 | fi
8 |
9 | fatal() {
10 | echo '[ERROR] ' "$@" >&2
11 | exit 1
12 | }
13 |
14 | function check_win_binaries() {
15 | # Upstream hasn't released a new version of crictl for 1.26 yet; skip the check entirely until they do.
16 | # I'm not honestly even sure that there is any guarantee of a cri-tools release for every Kubernetes minor.
17 | #if [ -z "${RC}" ]; then
18 | # CRICTL_WINDOWS_VERSION=$(grep 'CRICTL_VERSION=' Dockerfile.windows | cut -d '=' -f 2- | grep -oE "v([0-9]+)\.([0-9]+)")
19 | # if [ ! "$CRICTL_WINDOWS_VERSION" = "v$VERSION_MAJOR.$VERSION_MINOR" ]; then
20 | # fatal "crictl windows binary version [$CRICTL_WINDOWS_VERSION] does not match kubernetes version"
21 | # fi
22 | #fi
23 | #
24 |
25 | CALICO_WINDOWS_VERSION=$(grep 'CALICO_VERSION=' Dockerfile.windows | cut -d '=' -f 2- | grep -oE "v([0-9]+)\.([0-9]+)\.([0-9]+)")
26 | CALICO_LINUX_VERSION=$(yq '.charts[] | select(.filename == "/charts/rke2-calico.yaml").version' charts/chart_versions.yaml | cut -d ',' -f 1 | sed 's/..$//')
27 | if [ ! "$CALICO_WINDOWS_VERSION" = "$CALICO_LINUX_VERSION" ]; then
28 | fatal "Calico windows binary version [$CALICO_WINDOWS_VERSION] does not match Calico chart version [$CALICO_LINUX_VERSION]"
29 | fi
30 |
31 | CONTAINERD_WINDOWS_VERSION=$(grep "rancher/hardened-containerd" Dockerfile.windows | grep ':v' | cut -d '=' -f 2- | grep -oE "([0-9]+)\.([0-9]+)\.([0-9]+)")
32 | CONTAINERD_LINUX_VERSION=$(grep "rancher/hardened-containerd" Dockerfile | grep ':v' | cut -d '=' -f 2- | grep -oE "([0-9]+)\.([0-9]+)\.([0-9]+)")
33 | if [ "$CONTAINERD_LINUX_VERSION" != "$CONTAINERD_WINDOWS_VERSION" ]; then
34 | fatal "Containerd windows binary version [$CONTAINERD_WINDOWS_VERSION] does not match Containerd linux version [$CONTAINERD_LINUX_VERSION]"
35 | fi
36 |
37 | FLANNEL_WINDOWS_VERSION=$(grep 'ENV FLANNEL_VERSION=' Dockerfile.windows | cut -d '=' -f 2- | grep -oE "v([0-9]+)\.([0-9]+)\.([0-9]+)")
38 | FLANNEL_LINUX_VERSION=$(yq '.charts[] | select(.filename == "/charts/rke2-flannel.yaml").version' charts/chart_versions.yaml | cut -d ',' -f 1- | sed 's/..$//')
39 | if [ ! "$FLANNEL_WINDOWS_VERSION" = "$FLANNEL_LINUX_VERSION" ]; then
40 | fatal "Flanneld windows binary version [$FLANNEL_WINDOWS_VERSION] does not match Flanneld chart version [$FLANNEL_LINUX_VERSION]"
41 | fi
42 | }
43 |
44 | if ! command -v golangci-lint; then
45 | echo Skipping validation: no golangci-lint available
46 | exit
47 | fi
48 |
49 | cd $(dirname $0)/..
50 | GO=${GO-go}
51 |
52 | echo Running validation
53 |
54 | echo Running: golangci-lint
55 | #CGO_ENABLED=0 golangci-lint run -v
56 |
57 | echo Running: go mod tidy
58 | go mod tidy
59 |
60 | . ./scripts/version.sh
61 |
62 | if [ -n "$DIRTY" ]; then
63 | echo Source dir is dirty
64 | git status --porcelain --untracked-files=no
65 | exit 1
66 | fi
67 |
68 | check_win_binaries
69 |
--------------------------------------------------------------------------------
/scripts/validate-release:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | BUILD_REGEX="build[0-9]+"
5 |
6 | info() {
7 | echo '[INFO] ' "$@"
8 | }
9 |
10 | fatal() {
11 | echo '[ERROR] ' "$@" >&2
12 | exit 1
13 | }
14 |
15 | function parse_tag() {
16 | if [ -z $1 ]; then
17 | fatal "tag required as argument"
18 | exit 1
19 | fi
20 | tag=$1
21 | if [[ "${tag}" =~ ^v([0-9]+)\.([0-9]+)\.([0-9]+)([-+][a-zA-Z0-9]+)?[-+](rke2r[0-9]+)$ ]]; then
22 | MAJOR=${BASH_REMATCH[1]}
23 | MINOR=${BASH_REMATCH[2]}
24 | PATCH=${BASH_REMATCH[3]}
25 | RC=${BASH_REMATCH[4]}
26 | RKE2_PATCH=${BASH_REMATCH[5]}
27 | fi
28 | }
29 |
30 | function get-module-version() {
31 | go list -m -f '{{if .Replace}}{{.Replace.Version}}{{else}}{{.Version}}{{end}}' $1
32 | }
33 |
34 | function check_release_branch() {
35 | TAG_BRANCH=$(git branch --all -q --contains $GIT_TAG | grep origin | grep -vE 'dependabot|updatecli|origin$|HEAD' | sed -e 's/^[[:space:]]*//' | tail -1)
36 | if [ "$TAG_BRANCH" == "remotes/origin/master" ]; then
37 | K8S_VERSION_GO_MOD=$(get-module-version k8s.io/kubernetes | cut -d. -f1-2)
38 | if [ "v$MAJOR.$MINOR" == "$K8S_VERSION_GO_MOD" ]; then
39 | info "Tag $GIT_TAG is cut from master"
40 | return
41 | fi
42 | fi
43 | if [ ! "$TAG_BRANCH" = "remotes/origin/release-$MAJOR.$MINOR" ]; then
44 | fatal "Tag is cut from the wrong branch $TAG_BRANCH"
45 | fi
46 | }
47 |
48 | function check_kubernetes_version() {
49 | if [[ ! "$KUBERNETES_IMAGE_TAG" =~ v$MAJOR.$MINOR.$PATCH-$RKE2_PATCH-$BUILD_REGEX ]]; then
50 | fatal "Kubernetes image tag [$KUBERNETES_IMAGE_TAG] is incorrect for this tag"
51 | fi
52 |
53 | if [[ ! "$KUBERNETES_VERSION" =~ v$MAJOR.$MINOR.$PATCH ]]; then
54 | fatal "Kubernetes version variable [$KUBERNETES_VERSION] is incorrect, please correct the version to v$MAJOR.$MINOR.$PATCH"
55 | fi
56 |
57 | }
58 |
59 | . ./scripts/version.sh
60 |
61 | git fetch origin -f --tags
62 | parse_tag $GITHUB_ACTION_TAG
63 | check_release_branch
64 | check_kubernetes_version
65 |
--------------------------------------------------------------------------------
/scripts/version.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x
3 |
4 | PROG=rke2
5 | REGISTRY=${REGISTRY:-docker.io}
6 | REPO=${REPO:-rancher}
7 | K3S_PKG=github.com/k3s-io/k3s
8 | RKE2_PKG=github.com/rancher/rke2
9 | GO=${GO-go}
10 | GOARCH=${GOARCH:-$("${GO}" env GOARCH)}
11 | ARCH=${ARCH:-"${GOARCH}"}
12 | GOOS=${GOOS:-$("${GO}" env GOOS)}
13 | if [ -z "$GOOS" ]; then
14 | if [ "${OS}" == "Windows_NT" ]; then
15 | GOOS="windows"
16 | else
17 | UNAME_S=$(shell uname -s)
18 | if [ "${UNAME_S}" == "Linux" ]; then
19 | GOOS="linux"
20 | elif [ "${UNAME_S}" == "Darwin" ]; then
21 | GOOS="darwin"
22 | elif [ "${UNAME_S}" == "FreeBSD" ]; then
23 | GOOS="freebsd"
24 | fi
25 | fi
26 | fi
27 |
28 | GIT_TAG=$GITHUB_ACTION_TAG
29 | TREE_STATE=clean
30 | COMMIT=$DRONE_COMMIT
31 | REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .dirty; fi)
32 | PLATFORM=${GOOS}-${GOARCH}
33 | RELEASE=${PROG}.${PLATFORM}
34 | # hardcode versions unless set specifically
35 | KUBERNETES_VERSION=${KUBERNETES_VERSION:-v1.33.1}
36 | KUBERNETES_IMAGE_TAG=${KUBERNETES_IMAGE_TAG:-v1.33.1-rke2r1-build20250515}
37 | ETCD_VERSION=${ETCD_VERSION:-v3.5.21-k3s1}
38 | PAUSE_VERSION=${PAUSE_VERSION:-3.6}
39 | CCM_VERSION=${CCM_VERSION:-v1.33.0-rc1.0.20250430074337-dc03cb4b3faa-build20250430}
40 |
41 | if [ -d .git ]; then
42 | if [ -z "$GIT_TAG" ]; then
43 | GIT_TAG=$(git tag -l --contains HEAD | head -n 1)
44 | fi
45 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
46 | DIRTY="-dirty"
47 | TREE_STATE=dirty
48 | fi
49 |
50 | COMMIT=$(git log -n3 --pretty=format:"%H %ae" | grep -v ' drone@localhost$' | cut -f1 -d\ | head -1)
51 | if [ -z "${COMMIT}" ]; then
52 | COMMIT=$(git rev-parse HEAD || true)
53 | fi
54 | fi
55 |
56 | if [[ -n "$GIT_TAG" ]]; then
57 | VERSION=$GIT_TAG
58 | else
59 | VERSION="${KUBERNETES_VERSION}+dev.${COMMIT:0:8}$DIRTY"
60 | fi
61 |
62 | if [[ "${VERSION}" =~ ^v([0-9]+)\.([0-9]+)\.([0-9]+)([-+][a-zA-Z0-9.]+)?[-+]((rke2r[0-9]+|dev.*))$ ]]; then
63 | VERSION_MAJOR=${BASH_REMATCH[1]}
64 | VERSION_MINOR=${BASH_REMATCH[2]}
65 | PATCH=${BASH_REMATCH[3]}
66 | RC=${BASH_REMATCH[4]}
67 | RKE2_PATCH=${BASH_REMATCH[5]}
68 | echo "VERSION=${VERSION} parsed as MAJOR=${MAJOR} MINOR=${MINOR} PATCH=${PATCH} RC=${RC} RKE2_PATCH=${RKE2_PATCH}"
69 | fi
70 |
71 | DEPENDENCIES_URL="https://raw.githubusercontent.com/kubernetes/kubernetes/${KUBERNETES_VERSION}/build/dependencies.yaml"
72 | VERSION_GOLANG="go"$(curl -sL "${DEPENDENCIES_URL}" | yq e '.dependencies[] | select(.name == "golang: upstream version").version' -)
73 |
74 | DOCKERIZED_VERSION="${VERSION/+/-}" # this mimics what kubernetes builds do
75 |
--------------------------------------------------------------------------------
/tests/docker/log-upload:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ -n "$AWS_ACCESS_KEY_ID" ] || {
4 | echo "AWS_ACCESS_KEY_ID is not set"
5 | exit 0
6 | }
7 |
8 | [ -n "$AWS_SECRET_ACCESS_KEY" ] || {
9 | echo "AWS_SECRET_ACCESS_KEY is not set"
10 | exit 0
11 | }
12 |
13 | [ -d "$1" ] || {
14 | echo "First argument should be a directory" >&2
15 | exit 1
16 | }
17 |
18 | umask 077
19 |
20 | GO=${GO-go}
21 |
22 | TMPDIR=$(mktemp -d)
23 | cleanup() {
24 | exit_code=$?
25 | trap - EXIT INT
26 | rm -rf ${TMPDIR}
27 | exit ${exit_code}
28 | }
29 | trap cleanup EXIT INT
30 |
31 |
32 | LOG_TGZ=rke2-log-$(date +%s)-$("${GO}" env GOARCH)-$(git rev-parse --short HEAD)-$(basename $1).tgz
33 |
34 | tar -cz -f ${TMPDIR}/${LOG_TGZ} -C $(dirname $1) $(basename $1)
35 | aws s3 cp ${TMPDIR}/${LOG_TGZ} s3://rke2-ci-logs || exit 1
36 | echo "Logs uploaded" >&2
37 | echo "https://rke2-ci-logs.s3.amazonaws.com/${LOG_TGZ}"
38 |
39 |
--------------------------------------------------------------------------------
/tests/docker/resources/clusterip.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: test-clusterip
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: nginx-app-clusterip
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-app-clusterip
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: ranchertest/mytestcontainer
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | k8s-app: nginx-app-clusterip
26 | name: nginx-clusterip-svc
27 | namespace: default
28 | spec:
29 | type: ClusterIP
30 | ports:
31 | - port: 80
32 | selector:
33 | k8s-app: nginx-app-clusterip
34 |
--------------------------------------------------------------------------------
/tests/docker/resources/dns-node-cache.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: helm.cattle.io/v1
2 | kind: HelmChartConfig
3 | metadata:
4 | name: rke2-coredns
5 | namespace: kube-system
6 | spec:
7 | valuesContent: |-
8 | nodelocal:
9 | enabled: true
10 |
--------------------------------------------------------------------------------
/tests/docker/resources/dnsutils.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: dnsutils
5 | namespace: default
6 | spec:
7 | containers:
8 | - name: dnsutils
9 | image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
10 | command:
11 | - sleep
12 | - "3600"
13 | imagePullPolicy: IfNotPresent
14 | restartPolicy: Always
15 |
--------------------------------------------------------------------------------
/tests/docker/resources/dualstack_clusterip.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: ds-clusterip-pod
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: nginx-app-clusterip
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-app-clusterip
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: ranchertest/mytestcontainer
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | k8s-app: nginx-app-clusterip
26 | name: ds-clusterip-svc
27 | namespace: default
28 | spec:
29 | type: ClusterIP
30 | ipFamilyPolicy: PreferDualStack
31 | ports:
32 | - protocol: TCP
33 | port: 80
34 | targetPort: 80
35 | selector:
36 | k8s-app: nginx-app-clusterip
37 |
--------------------------------------------------------------------------------
/tests/docker/resources/dualstack_ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: ds-ingress
5 | spec:
6 | rules:
7 | - host: testds.com
8 | http:
9 | paths:
10 | - backend:
11 | service:
12 | # Reliant on dualstack_clusterip.yaml
13 | name: ds-clusterip-svc
14 | port:
15 | number: 80
16 | pathType: ImplementationSpecific
--------------------------------------------------------------------------------
/tests/docker/resources/dualstack_nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: ds-nodeport-pod
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: nginx-app-nodeport
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-app-nodeport
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: ranchertest/mytestcontainer
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | k8s-app: nginx-app-nodeport
26 | name: ds-nodeport-svc
27 | namespace: default
28 | spec:
29 | type: NodePort
30 | ipFamilyPolicy: PreferDualStack
31 | ports:
32 | - port: 80
33 | nodePort: 30096
34 | name: http
35 | selector:
36 | k8s-app: nginx-app-nodeport
37 |
--------------------------------------------------------------------------------
/tests/docker/resources/loadbalancer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: lb-test
5 | namespace: kube-system
6 | spec:
7 | type: LoadBalancer
8 | selector:
9 | app.kubernetes.io/name: rke2-ingress-nginx
10 | ports:
11 | - name: http
12 | protocol: TCP
13 | port: 8080
14 | targetPort: http
15 | - name: https
16 | protocol: TCP
17 | port: 8443
18 | targetPort: https
19 |
--------------------------------------------------------------------------------
/tests/docker/resources/nodecache.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.cattle.io/v1
3 | kind: HelmChartConfig
4 | metadata:
5 | name: rke2-coredns
6 | namespace: kube-system
7 | spec:
8 | valuesContent: |-
9 | nodelocal:
10 | enabled: true
--------------------------------------------------------------------------------
/tests/docker/resources/nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: test-nodeport
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: nginx-app-nodeport
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-app-nodeport
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: ranchertest/mytestcontainer
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | k8s-app: nginx-app-nodeport
26 | name: nginx-nodeport-svc
27 | namespace: default
28 | spec:
29 | type: NodePort
30 | ports:
31 | - port: 80
32 | nodePort: 30096
33 | name: http
34 | selector:
35 | k8s-app: nginx-app-nodeport
36 |
--------------------------------------------------------------------------------
/tests/docker/resources/secrets.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: docker-secret1
5 | type: Opaque
6 | stringData:
7 | config.yaml: |
8 | key: "hello"
9 | val: "world"
10 | ---
11 | apiVersion: v1
12 | kind: Secret
13 | metadata:
14 | name: docker-secret2
15 | type: Opaque
16 | stringData:
17 | config.yaml: |
18 | key: "good"
19 | val: "day"
20 | ---
21 | apiVersion: v1
22 | kind: Secret
23 | metadata:
24 | name: docker-secret3
25 | type: Opaque
26 | stringData:
27 | config.yaml: |
28 | key: "top-secret"
29 | val: "information"
30 | ---
31 | apiVersion: v1
32 | kind: Secret
33 | metadata:
34 | name: docker-secret4
35 | type: Opaque
36 | stringData:
37 | config.yaml: |
38 | key: "lock"
39 | val: "key"
40 | ---
41 | apiVersion: v1
42 | kind: Secret
43 | metadata:
44 | name: docker-secret5
45 | type: Opaque
46 | stringData:
47 | config.yaml: |
48 | key: "last"
49 | val: "call"
--------------------------------------------------------------------------------
/tests/docker/resources/volume-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: local-path-pvc
5 | namespace: kube-system
6 | spec:
7 | accessModes:
8 | - ReadWriteOnce
9 | storageClassName: local-path
10 | resources:
11 | requests:
12 | storage: 2Gi
13 | ---
14 | apiVersion: v1
15 | kind: Pod
16 | metadata:
17 | name: volume-test
18 | namespace: kube-system
19 | spec:
20 | containers:
21 | - name: volume-test
22 | image: rancher/mirrored-pause:3.6
23 | imagePullPolicy: IfNotPresent
24 | volumeMounts:
25 | - name: volv
26 | mountPath: /data
27 | volumes:
28 | - name: volv
29 | persistentVolumeClaim:
30 | claimName: local-path-pvc
31 |
--------------------------------------------------------------------------------
/tests/docker/test-runner:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x -e
3 | cd $(dirname $0)/../..
4 |
5 | # ---
6 |
7 | for include in $TEST_INCLUDES; do
8 | . $include
9 | done
10 |
11 | test-setup
12 | provision-cluster
13 | start-test $@
14 |
--------------------------------------------------------------------------------
/tests/docker/test-setup-sonobuoy:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | all_services=(
4 | calico-node
5 | coredns
6 | etcd
7 | kube-apiserver
8 | kube-controller-manager
9 | kube-flannel
10 | kube-proxy
11 | kube-scheduler
12 | metrics-server
13 | )
14 | export SERVER_ARGS='--disable=rke2-ingress-nginx --kube-apiserver-arg=kubelet-preferred-address-types=InternalIP'
15 |
16 | export NUM_SERVERS=1
17 | export NUM_AGENTS=1
18 | export WAIT_SERVICES="${all_services[@]}"
19 |
20 | export sonobuoyParallelArgs=(--e2e-focus='\[Conformance\]' --e2e-skip='\[Serial\]' --e2e-parallel=y)
21 | export sonobuoySerialArgs=(--e2e-focus='\[Serial\].*\[Conformance\]')
22 |
23 | start-test() {
24 | sonobuoy-test $@
25 | }
26 | export -f start-test
27 |
--------------------------------------------------------------------------------
/tests/e2e/mixedos/README.md:
--------------------------------------------------------------------------------
1 | # How to switch CNI
2 |
3 | Calico is the default CNI plugin for this E2E test. If you want to use Flannel instead, add "flannel" as the value for `E2E_CNI`
4 |
5 | Example:
6 |
7 | ```
8 | E2E_CNI=flannel go test -v -timeout=30m tests/e2e/mixedos/mixedos_test.go
9 | ```
10 |
--------------------------------------------------------------------------------
/tests/e2e/report/s3upload.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "log"
7 | "os"
8 |
9 | "github.com/aws/aws-sdk-go/aws"
10 | "github.com/aws/aws-sdk-go/aws/session"
11 | "github.com/aws/aws-sdk-go/service/s3"
12 | "github.com/sirupsen/logrus"
13 | )
14 |
15 | const bucketName = "e2e-results-log"
16 |
17 | var fileName string
18 |
19 | func main() {
20 | flag.StringVar(&fileName, "f", "", "path to the go test json logs file")
21 | flag.Parse()
22 |
23 | if fileName == "" {
24 | log.Fatal("--f flag is required")
25 | }
26 |
27 | logFile, err := readLogsFromFile(fileName)
28 | if err != nil {
29 | log.Fatalf("Error reading log file: %v", err)
30 | }
31 | defer logFile.Close()
32 |
33 | if err = uploadReport(logFile); err != nil {
34 | log.Fatalf("Error uploading report: %v", err)
35 | }
36 | }
37 |
38 | func uploadReport(file *os.File) error {
39 | sess, err := session.NewSession(&aws.Config{
40 | Region: aws.String("us-east-2"),
41 | })
42 | if err != nil {
43 | return fmt.Errorf("failed to create AWS session: %w", err)
44 | }
45 |
46 | s3Client := s3.New(sess)
47 | params := &s3.PutObjectInput{
48 | Bucket: aws.String(bucketName),
49 | Key: aws.String(file.Name()),
50 | ContentType: aws.String("text/plain"),
51 | Body: file,
52 | }
53 |
54 | _, err = s3Client.PutObject(params)
55 | if err != nil {
56 | return fmt.Errorf("failed to upload to S3: %w", err)
57 | }
58 |
59 | logrus.Infof("Successfully uploaded %s to S3\n", file.Name())
60 |
61 | return nil
62 | }
63 |
64 | func readLogsFromFile(fileName string) (*os.File, error) {
65 | file, err := os.Open(fileName)
66 | if err != nil {
67 | return nil, fmt.Errorf("error opening file: %w", err)
68 | }
69 |
70 | return file, nil
71 | }
72 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/clusterip.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: test-clusterip
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: nginx-app-clusterip
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-app-clusterip
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: ranchertest/mytestcontainer
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | k8s-app: nginx-app-clusterip
26 | name: nginx-clusterip-svc
27 | namespace: default
28 | spec:
29 | type: ClusterIP
30 | ports:
31 | - port: 80
32 | selector:
33 | k8s-app: nginx-app-clusterip
34 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/csi-driver-host-path.txt:
--------------------------------------------------------------------------------
1 | https://github.com/kubernetes-csi/external-provisioner/raw/v3.4.0/deploy/kubernetes/rbac.yaml
2 | https://github.com/kubernetes-csi/external-attacher/raw/v4.2.0/deploy/kubernetes/rbac.yaml
3 | https://github.com/kubernetes-csi/external-snapshotter/raw/v6.2.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml
4 | https://github.com/kubernetes-csi/external-resizer/raw/v1.7.0/deploy/kubernetes/rbac.yaml
5 | https://github.com/kubernetes-csi/external-health-monitor/raw/v0.8.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml
6 | https://github.com/kubernetes-csi/csi-driver-host-path/raw/v1.11.0/deploy/kubernetes-1.24/hostpath/csi-hostpath-driverinfo.yaml
7 | https://github.com/kubernetes-csi/csi-driver-host-path/raw/v1.11.0/deploy/kubernetes-1.24/hostpath/csi-hostpath-plugin.yaml
8 | https://github.com/kubernetes-csi/csi-driver-host-path/raw/v1.11.0/deploy/kubernetes-1.24/hostpath/csi-hostpath-snapshotclass.yaml
9 | https://github.com/kubernetes-csi/csi-driver-host-path/raw/v1.11.0/examples/csi-storageclass.yaml
10 | https://github.com/kubernetes-csi/csi-driver-host-path/raw/v1.11.0/examples/csi-pvc.yaml
11 | https://github.com/kubernetes-csi/csi-driver-host-path/raw/v1.11.0/examples/csi-app.yaml
12 | https://github.com/kubernetes-csi/csi-driver-host-path/raw/v1.11.0/examples/csi-snapshot-v1.yaml
13 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: test-daemonset
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: test-daemonset
9 | template:
10 | metadata:
11 | labels:
12 | k8s-app: test-daemonset
13 | spec:
14 | containers:
15 | - name: webserver
16 | image: nginx
17 | ports:
18 | - containerPort: 80
19 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/dnsutils.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: dnsutils
5 | namespace: default
6 | spec:
7 | containers:
8 | - name: dnsutils
9 | image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
10 | command:
11 | - sleep
12 | - "3600"
13 | imagePullPolicy: IfNotPresent
14 | restartPolicy: Always
15 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/dualstack_clusterip.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: ds-clusterip-pod
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: nginx-app-clusterip
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-app-clusterip
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: ranchertest/mytestcontainer
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | k8s-app: nginx-app-clusterip
26 | name: ds-clusterip-svc
27 | namespace: default
28 | spec:
29 | type: ClusterIP
30 | ipFamilyPolicy: PreferDualStack
31 | ports:
32 | - protocol: TCP
33 | port: 80
34 | targetPort: 80
35 | selector:
36 | k8s-app: nginx-app-clusterip
37 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/dualstack_ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: ds-ingress
5 | spec:
6 | rules:
7 | - host: testds.com
8 | http:
9 | paths:
10 | - backend:
11 | service:
12 | # Reliant on dualstack_clusterip.yaml
13 | name: ds-clusterip-svc
14 | port:
15 | number: 80
16 | pathType: ImplementationSpecific
--------------------------------------------------------------------------------
/tests/e2e/resource_files/dualstack_nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: ds-nodeport-pod
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: nginx-app-nodeport
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-app-nodeport
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: ranchertest/mytestcontainer
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | k8s-app: nginx-app-nodeport
26 | name: ds-nodeport-svc
27 | namespace: default
28 | spec:
29 | type: NodePort
30 | ipFamilyPolicy: PreferDualStack
31 | ports:
32 | - port: 80
33 | nodePort: 30096
34 | name: http
35 | selector:
36 | k8s-app: nginx-app-nodeport
37 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: test-ingress
5 | spec:
6 | rules:
7 | - host: foo1.bar.com
8 | http:
9 | paths:
10 | - backend:
11 | service:
12 | name: nginx-ingress-svc
13 | port:
14 | number: 80
15 | path: /
16 | pathType: ImplementationSpecific
17 | ---
18 | apiVersion: v1
19 | kind: Service
20 | metadata:
21 | name: nginx-ingress-svc
22 | labels:
23 | k8s-app: nginx-app-ingress
24 | spec:
25 | ports:
26 | - port: 80
27 | targetPort: 80
28 | protocol: TCP
29 | name: http
30 | selector:
31 | k8s-app: nginx-app-ingress
32 | ---
33 | apiVersion: v1
34 | kind: ReplicationController
35 | metadata:
36 | name: test-ingress
37 | spec:
38 | replicas: 2
39 | selector:
40 | k8s-app: nginx-app-ingress
41 | template:
42 | metadata:
43 | labels:
44 | k8s-app: nginx-app-ingress
45 | spec:
46 | terminationGracePeriodSeconds: 60
47 | containers:
48 | - name: testcontainer
49 | image: ranchertest/mytestcontainer
50 | ports:
51 | - containerPort: 80
52 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/loadbalancer.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: test-loadbalancer
6 | spec:
7 | selector:
8 | matchLabels:
9 | k8s-app: nginx-app-loadbalancer
10 | replicas: 2
11 | template:
12 | metadata:
13 | labels:
14 | k8s-app: nginx-app-loadbalancer
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: ranchertest/mytestcontainer
19 | ports:
20 | - containerPort: 80
21 | ---
22 | apiVersion: v1
23 | kind: Service
24 | metadata:
25 | name: nginx-loadbalancer-svc
26 | labels:
27 | k8s-app: nginx-app-loadbalancer
28 | spec:
29 | type: LoadBalancer
30 | externalIPs:
31 | - 10.10.10.100
32 | ports:
33 | - port: 81
34 | targetPort: 80
35 | protocol: TCP
36 | name: http
37 | selector:
38 | k8s-app: nginx-app-loadbalancer
39 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/local-path-provisioner.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: PersistentVolume
4 | metadata:
5 | name: local-pv-volume
6 | labels:
7 | type: local
8 | spec:
9 | storageClassName: manual
10 | capacity:
11 | storage: 500Mi
12 | accessModes:
13 | - ReadWriteOnce
14 | hostPath:
15 | path: "/data"
16 | claimRef:
17 | namespace: default
18 | name: local-path-pvc
19 | ---
20 | apiVersion: v1
21 | kind: PersistentVolumeClaim
22 | metadata:
23 | name: local-path-pvc
24 | namespace: default
25 | spec:
26 | accessModes:
27 | - ReadWriteOnce
28 | storageClassName: ""
29 | resources:
30 | requests:
31 | storage: 500Mi
32 | ---
33 | apiVersion: v1
34 | kind: Pod
35 | metadata:
36 | name: volume-test
37 | namespace: default
38 | spec:
39 | containers:
40 | - name: volume-test
41 | image: nginx:stable-alpine
42 | imagePullPolicy: IfNotPresent
43 | volumeMounts:
44 | - name: volv
45 | mountPath: /data
46 | ports:
47 | - containerPort: 80
48 | volumes:
49 | - name: volv
50 | persistentVolumeClaim:
51 | claimName: local-path-pvc
52 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/multus-pods.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "k8s.cni.cncf.io/v1"
2 | kind: NetworkAttachmentDefinition
3 | metadata:
4 | name: macvlan-conf
5 | spec:
6 | config: '{
7 | "cniVersion": "0.3.1",
8 | "plugins": [
9 | {
10 | "type": "macvlan",
11 | "capabilities": { "ips": true },
12 | "master": "eth1",
13 | "mode": "bridge",
14 | "ipam": {
15 | "type": "static",
16 | "routes": [
17 | {
18 | "dst": "0.0.0.0/0",
19 | "gw": "10.1.1.1"
20 | }
21 | ]
22 | }
23 | }, {
24 | "capabilities": { "mac": true },
25 | "type": "tuning"
26 | }
27 | ]
28 | }'
29 | ---
30 |
31 | apiVersion: v1
32 | kind: Pod
33 | metadata:
34 | labels:
35 | app: pod-macvlan
36 | name: pod-macvlan
37 | annotations:
38 | k8s.v1.cni.cncf.io/networks: '[
39 | { "name": "macvlan-conf",
40 | "ips": [ "10.1.1.101/24" ],
41 | "mac": "c2:b0:57:49:47:f1",
42 | "gateway": [ "10.1.1.1" ]
43 | }]'
44 | spec:
45 | containers:
46 | - image: praqma/network-multitool
47 | imagePullPolicy: Always
48 | name: multitool
49 | securityContext:
50 | capabilities:
51 | add: ["NET_ADMIN","NET_RAW"]
52 | ---
53 |
54 | apiVersion: v1
55 | kind: Pod
56 | metadata:
57 | labels:
58 | app: pod2-macvlan
59 | name: pod2-macvlan
60 | annotations:
61 | k8s.v1.cni.cncf.io/networks: '[
62 | { "name": "macvlan-conf",
63 | "ips": [ "10.1.1.102/24" ],
64 | "mac": "c2:b0:57:45:47:f1",
65 | "gateway": [ "10.1.1.1" ]
66 | }]'
67 | spec:
68 | containers:
69 | - image: praqma/network-multitool
70 | imagePullPolicy: Always
71 | name: multitool
72 | securityContext:
73 | capabilities:
74 | add: ["NET_ADMIN","NET_RAW"]
75 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/netpol-fail.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: test-network-policy
5 | namespace: default
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | k8s-app: nginx-app-clusterip
10 | policyTypes:
11 | - Ingress
12 | ingress:
13 | - from:
14 | - podSelector:
15 | matchLabels:
16 | app: whatever
17 | ports:
18 | - protocol: TCP
19 | port: 80
20 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/netpol-work.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: test-network-policy
5 | namespace: default
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | k8s-app: nginx-app-clusterip
10 | policyTypes:
11 | - Ingress
12 | ingress:
13 | - from:
14 | - podSelector:
15 | matchLabels:
16 | app: client
17 | ports:
18 | - protocol: TCP
19 | port: 80
20 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: test-nodeport
5 | spec:
6 | selector:
7 | matchLabels:
8 | k8s-app: nginx-app-nodeport
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | k8s-app: nginx-app-nodeport
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: ranchertest/mytestcontainer
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | labels:
25 | k8s-app: nginx-app-nodeport
26 | name: nginx-nodeport-svc
27 | namespace: default
28 | spec:
29 | type: NodePort
30 | ports:
31 | - port: 80
32 | nodePort: 30096
33 | name: http
34 | selector:
35 | k8s-app: nginx-app-nodeport
36 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/pod_client.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: client
6 | name: client-deployment
7 | spec:
8 | replicas: 2
9 | selector:
10 | matchLabels:
11 | app: client
12 | template:
13 | metadata:
14 | labels:
15 | app: client
16 | spec:
17 | containers:
18 | - image: ranchertest/mytestcontainer
19 | imagePullPolicy: Always
20 | name: client-curl
21 | affinity:
22 | podAntiAffinity:
23 | requiredDuringSchedulingIgnoredDuringExecution:
24 | - labelSelector:
25 | matchExpressions:
26 | - key: app
27 | operator: In
28 | values:
29 | - client
30 | topologyKey: kubernetes.io/hostname
31 | nodeAffinity:
32 | requiredDuringSchedulingIgnoredDuringExecution:
33 | nodeSelectorTerms:
34 | - matchExpressions:
35 | - key: kubernetes.io/os
36 | operator: In
37 | values:
38 | - linux
39 | ---
40 | apiVersion: v1
41 | kind: Service
42 | metadata:
43 | name: client-curl
44 | labels:
45 | app: client
46 | service: client-curl
47 | spec:
48 | type: ClusterIP
49 | selector:
50 | app: client
51 | ports:
52 | - port: 8080
53 | protocol: TCP
54 | targetPort: 80
55 |
--------------------------------------------------------------------------------
/tests/e2e/resource_files/secrets.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: e2e-secret1
5 | type: Opaque
6 | stringData:
7 | config.yaml: |
8 | key: "hello"
9 | val: "world"
10 | ---
11 | apiVersion: v1
12 | kind: Secret
13 | metadata:
14 | name: e2e-secret2
15 | type: Opaque
16 | stringData:
17 | config.yaml: |
18 | key: "good"
19 | val: "day"
20 | ---
21 | apiVersion: v1
22 | kind: Secret
23 | metadata:
24 | name: e2e-secret3
25 | type: Opaque
26 | stringData:
27 | config.yaml: |
28 | key: "top-secret"
29 | val: "information"
30 | ---
31 | apiVersion: v1
32 | kind: Secret
33 | metadata:
34 | name: e2e-secret4
35 | type: Opaque
36 | stringData:
37 | config.yaml: |
38 | key: "lock"
39 | val: "key"
40 | ---
41 | apiVersion: v1
42 | kind: Secret
43 | metadata:
44 | name: e2e-secret5
45 | type: Opaque
46 | stringData:
47 | config.yaml: |
48 | key: "last"
49 | val: "call"
--------------------------------------------------------------------------------
/tests/e2e/resource_files/windows_app_deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: windows-app-deployment
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: windows-app
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | app: windows-app
14 | spec:
15 | containers:
16 | - name: windows-app
17 | image: mbuilsuse/pstools:v0.2.0
18 | ports:
19 | - containerPort: 3000
20 | affinity:
21 | nodeAffinity:
22 | requiredDuringSchedulingIgnoredDuringExecution:
23 | nodeSelectorTerms:
24 | - matchExpressions:
25 | - key: kubernetes.io/os
26 | operator: In
27 | values:
28 | - windows
29 | ---
30 | apiVersion: v1
31 | kind: Service
32 | metadata:
33 | labels:
34 | app: windows-app-svc
35 | name: windows-app-svc
36 | namespace: default
37 | spec:
38 | type: NodePort
39 | ports:
40 | - port: 3000
41 | nodePort: 30096
42 | name: http
43 | selector:
44 | app: windows-app
45 |
--------------------------------------------------------------------------------
/tests/e2e/scripts/calico_manifest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ip4_addr=$1
3 |
4 | # Override default calico and specify the interface for windows-agent
5 | # by default, the windows-agent use a different interface name than the linux-agent
6 | mkdir -p /var/lib/rancher/rke2/server/manifests
7 |
8 | echo "Creating calico chart"
9 | echo "apiVersion: helm.cattle.io/v1
10 | kind: HelmChartConfig
11 | metadata:
12 | name: rke2-calico
13 | namespace: kube-system
14 | spec:
15 | valuesContent: |-
16 | installation:
17 | calicoNetwork:
18 | nodeAddressAutodetectionV4:
19 | canReach: $ip4_addr" >> /var/lib/rancher/rke2/server/manifests/e2e-calico.yaml
20 |
--------------------------------------------------------------------------------
/tests/e2e/scripts/calico_manifestbgp.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ip4_addr=$1
3 |
4 | # Override default calico and specify the interface for windows-agent
5 | # by default, the windows-agent use a different interface name than the linux-agent
6 | mkdir -p /var/lib/rancher/rke2/server/manifests
7 |
8 | echo "Creating calico chart"
9 | echo "apiVersion: helm.cattle.io/v1
10 | kind: HelmChartConfig
11 | metadata:
12 | name: rke2-calico
13 | namespace: kube-system
14 | spec:
15 | valuesContent: |-
16 | installation:
17 | calicoNetwork:
18 | bgp: Enabled
19 | ipPools:
20 | - cidr: 10.42.0.0/16
21 | encapsulation: None
22 | nodeAddressAutodetectionV4:
23 | canReach: $ip4_addr" >> /var/lib/rancher/rke2/server/manifests/e2e-calico.yaml
24 |
--------------------------------------------------------------------------------
/tests/e2e/scripts/cilium_nokubeproxy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ip4_addr=$1
3 |
4 | # Set Cilium parameters to get as much BPF as possible and as a consequence
5 | # as less iptables rules as possible
6 | mkdir -p /var/lib/rancher/rke2/server/manifests
7 |
8 | echo "Creating cilium chart"
9 | echo "apiVersion: helm.cattle.io/v1
10 | kind: HelmChartConfig
11 | metadata:
12 | name: rke2-cilium
13 | namespace: kube-system
14 | spec:
15 | valuesContent: |-
16 | ipv6:
17 | enabled: true
18 | devices: eth1
19 | kubeProxyReplacement: true
20 | k8sServiceHost: $ip4_addr
21 | k8sServicePort: 6443
22 | cni:
23 | chainingMode: none
24 | bpf:
25 | masquerade: true" > /var/lib/rancher/rke2/server/manifests/e2e-cilium.yaml
--------------------------------------------------------------------------------
/tests/e2e/scripts/install-bgp.ps1:
--------------------------------------------------------------------------------
1 | echo "Installing RemoteAccess, RSAT-RemoteAccess-PowerShell and Routing packages"
2 | Install-WindowsFeature RemoteAccess
3 | Install-WindowsFeature RSAT-RemoteAccess-PowerShell
4 | Install-WindowsFeature Routing
5 | echo "Installing remoteAccess vpntype: routingOnly"
6 | Install-RemoteAccess -VpnType RoutingOnly
7 |
--------------------------------------------------------------------------------
/tests/e2e/scripts/install_sonobuoy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | git clone https://github.com/phillipsj/my-sonobuoy-plugins.git
3 | wget -q https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.56.0/sonobuoy_0.56.0_linux_amd64.tar.gz
4 | tar -xvf sonobuoy_0.56.0_linux_amd64.tar.gz
5 | chmod +x sonobuoy && mv sonobuoy /usr/local/bin/sonobuoy
--------------------------------------------------------------------------------
/tests/e2e/scripts/ipv6.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ip4_addr=$1
3 | ip6_addr=$2
4 | ip6_addr_gw=$3
5 | cni=$4
6 | os=$5
7 |
8 | sysctl -w net.ipv6.conf.all.disable_ipv6=0
9 | sysctl -w net.ipv6.conf.eth1.accept_dad=0
10 |
11 |
12 |
13 | if [ -z "${os##*ubuntu*}" ]; then
14 | netplan set ethernets.eth1.accept-ra=false
15 | netplan set ethernets.eth1.addresses=["$ip4_addr"/24,"$ip6_addr"/64]
16 | netplan set ethernets.eth1.gateway6="$ip6_addr_gw"
17 | netplan apply
18 | elif [ -z "${os##*alpine*}" ]; then
19 | iplink set eth1 down
20 | iplink set eth1 up
21 | ip -6 addr add "$ip6_addr"/64 dev eth1
22 | ip -6 r add default via "$ip6_addr_gw"
23 | else
24 | ip -6 addr add "$ip6_addr"/64 dev eth1
25 | ip -6 r add default via "$ip6_addr_gw"
26 | fi
27 | ip addr show dev eth1
28 | ip -6 r
29 |
30 | echo "net.ipv6.conf.all.disable_ipv6=0
31 | net.ipv6.conf.eth1.accept_dad=0" > /etc/sysctl.conf
32 |
33 | # Override default CNI and specify the interface since we don't have a default IPv6 route
34 | mkdir -p /var/lib/rancher/rke2/server/manifests
35 |
36 | case "$cni" in
37 | *canal*)
38 | echo "Creating canal chart"
39 | echo "apiVersion: helm.cattle.io/v1
40 | kind: HelmChartConfig
41 | metadata:
42 | name: rke2-canal
43 | namespace: kube-system
44 | spec:
45 | valuesContent: |-
46 | flannel:
47 | iface: \"eth1\"
48 | calico:
49 | ipAutoDetectionMethod: \"interface=eth1.*\"
50 | ip6AutoDetectionMethod: \"interface=eth1.*\"" >> /var/lib/rancher/rke2/server/manifests/e2e-canal.yaml
51 | ;;
52 |
53 | *cilium*)
54 | echo "Creating cilium chart"
55 | echo "apiVersion: helm.cattle.io/v1
56 | kind: HelmChartConfig
57 | metadata:
58 | name: rke2-cilium
59 | namespace: kube-system
60 | spec:
61 | valuesContent: |-
62 | devices: eth1
63 | ipv6:
64 | enabled: true">> /var/lib/rancher/rke2/server/manifests/e2e-cilium.yaml
65 | ;;
66 |
67 | *calico*)
68 | echo "Creating calico chart"
69 | echo "apiVersion: helm.cattle.io/v1
70 | kind: HelmChartConfig
71 | metadata:
72 | name: rke2-calico
73 | namespace: kube-system
74 | spec:
75 | valuesContent: |-
76 | installation:
77 | calicoNetwork:
78 | nodeAddressAutodetectionV4:
79 | interface: eth1.*
80 | nodeAddressAutodetectionV6:
81 | interface: eth1.* " >> /var/lib/rancher/rke2/server/manifests/e2e-calico.yaml
82 | ;;
83 | esac
84 |
--------------------------------------------------------------------------------
/tests/e2e/scripts/latest_commit.ps1:
--------------------------------------------------------------------------------
1 | # Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
2 | param ($Branch, $CommitFile)
3 | $response = (Invoke-RestMethod "https://api.github.com/repos/rancher/rke2/commits?per_page=5&sha=$Branch")
4 | if ($response -is [System.Array]) {
5 | $response.sha | Out-File -FilePath $CommitFile
6 | } if ($response -is [PSCustomObject]) {
7 | if ($response.message -like "API rate limit exceeded for *") {
8 | Write-Host "Github API rate limit exceeded"
9 | Exit 1
10 | }
11 | Write-Host "Github API returned a non-expected response $($response.message)"
12 | Exit 1
13 | }
14 |
15 | $StorageUrl = "https://rke2-ci-builds.s3.amazonaws.com/rke2-images.windows-amd64-"
16 | $TopCommit = (Get-Content -TotalCount 1 $CommitFile)
17 | $StatusCode = Invoke-WebRequest $StorageUrl$TopCommit".tar.zst.sha256sum" -DisableKeepAlive -UseBasicParsing -Method head | % {$_.StatusCode}
18 | $Iterations = 0
19 | while (($StatusCode -ne 200) -AND ($Iterations -lt 6)) {
20 | $Iterations++
21 | (Get-Content $CommitFile | Select-Object -Skip 1) | Set-Content $CommitFile
22 | $TopCommit = (Get-Content -TotalCount 1 $CommitFile)
23 | $StatusCode = Invoke-WebRequest $StorageUrl$TopCommit".tar.zst.sha256sum" -DisableKeepAlive -UseBasicParsing -Method head | % {$_.StatusCode}
24 | }
25 |
26 | if ($Iterations -ge 6){
27 | Write-Host echo "No valid commits found"
28 | Exit 1
29 | }
30 |
--------------------------------------------------------------------------------
/tests/e2e/scripts/latest_commit.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | branch=$1
4 | output_file=$2
5 | # Grabs the last 10 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
6 | iterations=0
7 |
8 | # The VMs take time on startup to hit aws, wait loop until we can
9 | while ! curl -s --fail https://rke2-ci-builds.s3.amazonaws.com?max-keys=0 > /dev/null; do
10 | ((iterations++))
11 | if [ "$iterations" -ge 30 ]; then
12 | echo "Unable to hit https://rke2-ci-builds.s3.amazonaws.com"
13 | exit 1
14 | fi
15 | sleep 1
16 | done
17 |
18 | if [ -n "$GH_TOKEN" ]; then
19 | response=$(curl -s -H "Authorization: token $GH_TOKEN" -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/rancher/rke2/commits?per_page=10&sha=$branch")
20 | else
21 | response=$(curl -s -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/rancher/rke2/commits?per_page=10&sha=$branch")
22 | fi
23 | type=$(echo "$response" | jq -r type)
24 |
25 | # Verify if the response is an array with the rke2 commits
26 | if [[ $type == "object" ]]; then
27 | message=$(echo "$response" | jq -r .message)
28 | if [[ $message == "API rate limit exceeded for "* ]]; then
29 | echo "Github API rate limit exceeded"
30 | exit 1
31 | fi
32 | echo "Github API returned a non-expected response ${message}"
33 | exit 1
34 | elif [[ $type == "array" ]]; then
35 | commits_str=$(echo "$response" | jq -j -r '.[] | .sha, " "')
36 | fi
37 |
38 | read -a commits <<< "$commits_str"
39 |
40 | for commit in "${commits[@]}"; do
41 | if curl -s --fail https://rke2-ci-builds.s3.amazonaws.com/rke2-images.linux-amd64-$commit.tar.zst.sha256sum > /dev/null; then
42 | echo "$commit" > "$output_file"
43 | exit 0
44 | fi
45 | done
46 |
47 | echo "Failed to find a valid commit, checked: " "${commits[@]}"
48 | exit 1
49 |
--------------------------------------------------------------------------------
/tests/e2e/scripts/registry.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Script to to point rke2 to the docker registry running on the host
4 | # This is used to avoid hitting dockerhub rate limits on E2E runners
5 | ip_addr=$1
6 |
7 | mkdir -p /etc/rancher/rke2/
8 | echo "mirrors:
9 | docker.io:
10 | endpoint:
11 | - \"http://$ip_addr:5000\"" >> /etc/rancher/rke2/registries.yaml
--------------------------------------------------------------------------------
/tests/e2e/scripts/run_tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Usage: ./run_tests.sh
3 | # This script runs all the rke2 e2e tests and generates a report with the log
4 | # The generated log is placed in report/rke2_${OS}.log
5 | #
6 | # This script must be run inside the rke2 directory where the tests exist
7 | #
8 | # Example:
9 | # To run the script with default settings:
10 | # ./run_tests.sh
11 | #
12 | set -x
13 |
14 | # tests to run
15 | tests=("ciliumnokp" "mixedos" "mixedosbgp" "multus" "secretsencryption" "splitserver" "upgradecluster" "validatecluster" "kine")
16 | nodeOS=${1:-"bento/ubuntu-24.04"}
17 | OS=$(echo "$nodeOS"|cut -d'/' -f2)
18 |
19 | E2E_REGISTRY=true && export E2E_REGISTRY
20 | cd rke2
21 | git pull --rebase origin master
22 | /usr/local/go/bin/go mod tidy
23 | cd tests/e2e
24 |
25 | # To reduce GH API requests, we grab the latest commit on the host and pass it to the tests
26 | ./scripts/latest_commit.sh master latest_commit.txt
27 | E2E_RELEASE_VERSION=$(cat latest_commit.txt) && export E2E_RELEASE_VERSION
28 |
29 | # create directory to store reports if it does not exists
30 | if [ ! -d report ]
31 | then
32 | mkdir report
33 | fi
34 |
35 | cleanup() {
36 | for net in $(virsh net-list --all | tail -n +2 | tr -s ' ' | cut -d ' ' -f2 | grep -v default); do
37 | virsh net-destroy "$net"
38 | virsh net-undefine "$net"
39 | done
40 |
41 | for domain in $(virsh list --all | tail -n +2 | tr -s ' ' | cut -d ' ' -f3); do
42 | virsh destroy "$domain"
43 | virsh undefine "$domain" --remove-all-storage
44 | done
45 |
46 | for vm in `vagrant global-status |tr -s ' '|tail +3 |grep "/" |cut -d ' ' -f5`; do
47 | cd $vm
48 | vagrant destroy -f
49 | cd ..
50 | done
51 | }
52 |
53 |
54 | # Remove VMs which are in invalid state
55 | vagrant global-status --prune
56 |
57 | count=0
58 | run_tests(){
59 |
60 | count=$(( count + 1 ))
61 | rm report/rke2_${OS}.log 2>/dev/null
62 |
63 | for i in ${!tests[@]}; do
64 | pushd ${tests[$i]}
65 | vagrant destroy -f
66 |
67 | echo "RUNNING ${tests[$i]} TEST"
68 | /usr/local/go/bin/go test -v ${tests[$i]}_test.go -timeout=2h -nodeOS="$nodeOS" -json -ci > ./report/rke2_${OS}.log
69 |
70 | popd
71 | done
72 | }
73 |
74 | ls report/rke2_${OS}.log 2>/dev/null && rm report/rke2_${OS}.log
75 | cleanup
76 | run_tests
77 |
78 | # re-run test if first run fails and keep record of repeatedly failed test to debug
79 | while [ -f report/rke2_${OS}.log ] && grep -w " FAIL:" report/rke2_${OS}.log && [ $count -le 2 ]
80 | do
81 | cp report/rke2_${OS}.log report/rke2_${OS}_${count}.log
82 | run_tests
83 | done
84 |
85 | # Upload to s3 bucket
86 | cd report && /usr/local/go/bin/go run -v s3upload.go -f rke2_${OS}.log
--------------------------------------------------------------------------------
/tests/install/README.md:
--------------------------------------------------------------------------------
1 | ## Install Tests
2 |
3 | These tests are used to validate the installation and operation of RKE2 on a variety of operating systems. The test themselves are Vagrantfiles describing single-node installations that are easily spun up with Vagrant for the `libvirt` and `virtualbox` providers:
4 |
5 | - [Install Script](install) :arrow_right: scheduled nightly and on an install script change
6 | - [CentOS 9 Stream](install/centos-9)
7 | - [Rocky Linux 8](install/rocky-8) (stand-in for RHEL 8)
8 | - [Oracle 9](install/oracle-9)
9 | - [Leap 15.6](install/opensuse-leap) (stand-in for SLES)
10 | - [Ubuntu 24.04](install/ubuntu-2404)
11 | - [Windows Server 2019](install/windows-2019)
12 | - [Windows Server 2022](install/windows-2022)
13 |
14 | ## Format
15 | When adding new installer test(s) please copy the prevalent style for the `Vagrantfile`.
16 | Ideally, the boxes used for additional assertions will support the default `libvirt` provider which
17 | enables them to be used by our GitHub Actions [Nightly Install Test Workflow](../../.github/workflows/nightly-install.yaml).
18 |
19 | ### Framework
20 |
21 | If you are new to Vagrant, Hashicorp has written some pretty decent introductory tutorials and docs, see:
22 | - https://learn.hashicorp.com/collections/vagrant/getting-started
23 | - https://www.vagrantup.com/docs/installation
24 |
25 | #### Plugins and Providers
26 |
27 | The `libvirt`provider cannot be used without first [installing the `vagrant-libvirt` plugin](https://github.com/vagrant-libvirt/vagrant-libvirt). Libvirtd service must be installed and running on the host machine as well.
28 |
29 | This can be installed with:
30 | ```shell
31 | vagrant plugin install vagrant-libvirt
32 | ```
33 |
34 | #### Environment Variables
35 |
36 | These can be set on the CLI or exported before invoking Vagrant:
37 | - `TEST_VM_CPUS` (default :arrow_right: 2)
38 | The number of vCPU for the guest to use.
39 | - `TEST_VM_MEMORY` (default :arrow_right: 3072)
40 | The number of megabytes of memory for the guest to use.
41 | - `TEST_VM_BOOT_TIMEOUT` (default :arrow_right: 600)
42 | The time in seconds that Vagrant will wait for the machine to boot and be accessible.
43 |
44 | ### Running
45 |
46 | The **Install Script** tests can be run by changing to the fixture directory and invoking `vagrant up`, e.g.:
47 | ```shell
48 | cd tests/install/rocky-8
49 | vagrant up
50 | # The following provisioners are optional. In GitHub Actions CI they are invoked
51 | # explicitly to avoid certain timeout issues on slow runners
52 | vagrant provision --provision-with=rke2-wait-for-node
53 | vagrant provision --provision-with=rke2-wait-for-coredns
54 | vagrant provision --provision-with=rke2-wait-for-local-storage
55 | vagrant provision --provision-with=rke2-wait-for-metrics-server
56 | vagrant provision --provision-with=rke2-wait-for-traefik
57 | vagrant provision --provision-with=rke2-status
58 | vagrant provision --provision-with=rke2-procps
59 | ```
--------------------------------------------------------------------------------
/tests/install/centos-9/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | ENV['TEST_INSTALL_SH'] ||= '../../../install.sh'
5 |
6 | Vagrant.configure("2") do |config|
7 | config.vm.box = "eurolinux-vagrant/centos-stream-9"
8 | config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 600 # seconds
9 | config.vm.synced_folder '.', '/vagrant', type: 'rsync', disabled: false
10 | %w[libvirt virtualbox vmware_desktop].each do |p|
11 | config.vm.provider p do |v, o|
12 | v.cpus = ENV['TEST_VM_CPUS'] || 2
13 | v.memory = ENV['TEST_VM_MEMORY'] || 3072
14 | end
15 | end
16 |
17 | # Load in helper functions
18 | load "../install_util.rb"
19 |
20 | external_env = ""
21 | ENV.select{|k,v| k.start_with?('RKE2_') || k.start_with?('INSTALL_RKE2_')}.each {|key,value| external_env << "#{key.to_s}=#{value.to_s}"}
22 |
23 | config.vm.define "install-centos-9", primary: true do |test|
24 | test.vm.hostname = 'smoke'
25 | test.vm.provision 'rke2-upload-installer', type: 'file', run: 'always', source: ENV['TEST_INSTALL_SH'], destination: 'install.sh'
26 | test.vm.provision"rke2-install", type: 'rke2', run: "once" do |rke2|
27 | rke2.installer_url = 'file:///home/vagrant/install.sh'
28 | rke2.env = %W[ #{external_env} INSTALL_RKE2_TYPE=server]
29 | rke2.config = <<~YAML
30 | token: 'vagrant'
31 | YAML
32 | rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
33 | end
34 |
35 | waitForControlPlane(test.vm, config.vm.box.to_s)
36 | waitForCanal(test.vm)
37 | waitForCoreDNS(test.vm)
38 | waitForIngressNginx(test.vm)
39 | waitForMetricsServer(test.vm)
40 |
41 | kubectlStatus(test.vm)
42 | checkRKE2Processes(test.vm)
43 | end
44 |
45 | config.vm.provision "install-packages", type: "shell", run: "once" do |sh|
46 | sh.upload_path = "/tmp/vagrant-install-packages"
47 | sh.env = {
48 | 'INSTALL_PACKAGES': ENV['INSTALL_PACKAGES'],
49 | }
50 | sh.inline = <<~SHELL
51 | #!/usr/bin/env bash
52 | set -eux -o pipefail
53 | yum -y install \
54 | curl \
55 | iptables \
56 | less \
57 | lsof \
58 | nc \
59 | socat \
60 | ${INSTALL_PACKAGES}
61 | SHELL
62 | end
63 |
64 | end
65 |
--------------------------------------------------------------------------------
/tests/install/opensuse-leap/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | ENV['TEST_INSTALL_SH'] ||= '../../../install.sh'
5 |
6 | Vagrant.configure("2") do |config|
7 | config.vm.box = 'opensuse/Leap-15.6.x86_64'
8 | config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 600 # seconds
9 | config.vm.synced_folder '.', '/vagrant', type: 'rsync', disabled: false
10 | %w[libvirt virtualbox vmware_desktop].each do |p|
11 | config.vm.provider p do |v, o|
12 | v.cpus = ENV['TEST_VM_CPUS'] || 2
13 | v.memory = ENV['TEST_VM_MEMORY'] || 3072
14 | end
15 | end
16 |
17 | # Load in helper functions
18 | load "../install_util.rb"
19 |
20 | external_env = ""
21 | ENV.select{|k,v| k.start_with?('RKE2_') || k.start_with?('INSTALL_RKE2_')}.each {|key,value| external_env << "#{key.to_s}=#{value.to_s}"}
22 |
23 | config.vm.define "install-leap-15.6", primary: true do |test|
24 | test.vm.hostname = 'smoke'
25 | test.vm.provision 'rke2-upload-installer', type: 'file', run: 'always', source: ENV['TEST_INSTALL_SH'], destination: 'install.sh'
26 | # Leap 15.6+ VM images are missing procps for some reason.
27 | test.vm.provision 'rke2-prepare', type: 'shell', run: 'once', inline: 'zypper install -y apparmor-parser procps'
28 | test.vm.provision"rke2-install", type: 'rke2', run: "once" do |rke2|
29 | rke2.installer_url = 'file:///home/vagrant/install.sh'
30 | rke2.env = %W[ #{external_env} INSTALL_RKE2_TYPE=server]
31 | rke2.config = <<~YAML
32 | token: 'vagrant'
33 | YAML
34 | rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
35 | end
36 |
37 | waitForControlPlane(test.vm, config.vm.box.to_s)
38 | waitForCanal(test.vm)
39 | waitForCoreDNS(test.vm)
40 | waitForIngressNginx(test.vm)
41 | waitForMetricsServer(test.vm)
42 |
43 | kubectlStatus(test.vm)
44 | checkRKE2Processes(test.vm)
45 |
46 | mountDirs(test.vm)
47 | runKillAllScript(test.vm)
48 | checkMountPoint(test.vm)
49 | end
50 |
51 | config.vm.provision "install-packages", type: "shell", run: "once" do |sh|
52 | sh.upload_path = "/tmp/vagrant-install-packages"
53 | sh.env = {
54 | 'INSTALL_PACKAGES': ENV['INSTALL_PACKAGES'],
55 | }
56 | sh.inline = <<~SHELL
57 | #!/usr/bin/env bash
58 | set -eux -o pipefail
59 | zypper install -y \
60 | curl \
61 | iptables \
62 | less \
63 | lsof \
64 | socat \
65 | restorecond \
66 | policycoreutils
67 | ${INSTALL_PACKAGES}
68 | SHELL
69 | end
70 |
71 | config.vm.provision "selinux-status", type: "shell", run: "once", inline: "sestatus -v"
72 | config.vm.provision "rke2-profile-env", type: "shell", run: "once" do |sh|
73 | sh.inline = <<~SHELL
74 | #!/usr/bin/env bash
75 | cat <<-EOF > /etc/profile.d/rke2.sh
76 | export KUBECONFIG=/etc/rancher/rke2/rke2.yaml PATH=/usr/local/bin:$PATH:/var/lib/rancher/rke2/bin
77 | EOF
78 | SHELL
79 | end
80 |
81 | end
82 |
--------------------------------------------------------------------------------
/tests/install/oracle-9/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | ENV['TEST_INSTALL_SH'] ||= '../../../install.sh'
5 |
6 | Vagrant.configure("2") do |config|
7 | config.vm.box = "eurolinux-vagrant/oracle-linux-9"
8 | config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 600 # seconds
9 | config.vm.synced_folder '.', '/vagrant', type: 'rsync', disabled: false
10 | %w[libvirt virtualbox vmware_desktop].each do |p|
11 | config.vm.provider p do |v, o|
12 | v.cpus = ENV['TEST_VM_CPUS'] || 2
13 | v.memory = ENV['TEST_VM_MEMORY'] || 4096
14 | end
15 | end
16 |
17 | # Load in helper functions
18 | load "../install_util.rb"
19 | external_env = ""
20 | ENV.select{|k,v| k.start_with?('RKE2_') || k.start_with?('INSTALL_RKE2_')}.each {|key,value| external_env << "#{key.to_s}=#{value.to_s}"}
21 |
22 | config.vm.define "install-oracle-9", primary: true do |test|
23 | test.vm.hostname = 'smoke'
24 | test.vm.provision "disable-firewall", type: "shell", inline: "systemctl stop firewalld"
25 | test.vm.provision "enable-selinux", type: "shell", inline: "setenforce 1"
26 | test.vm.provision 'rke2-upload-installer', type: 'file', run: 'always', source: ENV['TEST_INSTALL_SH'], destination: 'install.sh'
27 | test.vm.provision"rke2-install", type: 'rke2', run: "once" do |rke2|
28 | rke2.installer_url = 'file:///home/vagrant/install.sh'
29 | rke2.env = %W[ #{external_env} INSTALL_RKE2_TYPE=server]
30 | rke2.config = <<~YAML
31 | token: 'vagrant'
32 | selinux: 'true'
33 | YAML
34 | rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
35 | end
36 |
37 | waitForControlPlane(test.vm, config.vm.box.to_s)
38 | waitForCanal(test.vm)
39 | waitForCoreDNS(test.vm)
40 | waitForIngressNginx(test.vm)
41 | waitForMetricsServer(test.vm)
42 |
43 | kubectlStatus(test.vm)
44 | checkRKE2Processes(test.vm)
45 |
46 | mountDirs(test.vm)
47 | runKillAllScript(test.vm)
48 | checkMountPoint(test.vm)
49 | end
50 |
51 | config.vm.provision "install-packages", type: "shell", run: "once" do |sh|
52 | sh.upload_path = "/tmp/vagrant-install-packages"
53 | sh.env = {
54 | 'INSTALL_PACKAGES': ENV['INSTALL_PACKAGES'],
55 | }
56 | sh.inline = <<~SHELL
57 | #!/usr/bin/env bash
58 | set -eux -o pipefail
59 | dnf -y install \
60 | curl \
61 | iptables \
62 | less \
63 | lsof \
64 | nc \
65 | socat \
66 | ${INSTALL_PACKAGES}
67 | SHELL
68 | end
69 |
70 | config.vm.provision "selinux-status", type: "shell", run: "once", inline: "sestatus -v"
71 | config.vm.provision "rke2-profile-env", type: "shell", run: "once" do |sh|
72 | sh.inline = <<~SHELL
73 | #!/usr/bin/env bash
74 | cat <<-EOF > /etc/profile.d/rke2.sh
75 | export KUBECONFIG=/etc/rancher/rke2/rke2.yaml PATH=/usr/local/bin:$PATH:/var/lib/rancher/rke2/bin
76 | EOF
77 | SHELL
78 | end
79 |
80 | end
81 |
--------------------------------------------------------------------------------
/tests/install/rocky-8/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | ENV['TEST_INSTALL_SH'] ||= '../../../install.sh'
5 |
6 | Vagrant.configure("2") do |config|
7 | config.vm.box = "eurolinux-vagrant/rocky-8"
8 | config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 600 # seconds
9 | config.vm.synced_folder '.', '/vagrant', type: 'rsync', disabled: false
10 | %w[libvirt virtualbox vmware_desktop].each do |p|
11 | config.vm.provider p do |v, o|
12 | v.cpus = ENV['TEST_VM_CPUS'] || 2
13 | v.memory = ENV['TEST_VM_MEMORY'] || 3072
14 | end
15 | end
16 |
17 | # Load in helper functions
18 | load "../install_util.rb"
19 |
20 | external_env = ""
21 | ENV.select{|k,v| k.start_with?('RKE2_') || k.start_with?('INSTALL_RKE2_')}.each {|key,value| external_env << "#{key.to_s}=#{value.to_s}"}
22 |
23 | config.vm.define "install-rocky-8", primary: true do |test|
24 | test.vm.hostname = 'smoke'
25 | test.vm.provision "disable-firewall", type: "shell", inline: "systemctl stop firewalld"
26 | test.vm.provision "add-bin-path", type: "shell", inline: "echo \"export PATH=/usr/local/bin:\$PATH\" >> ~/.bashrc"
27 | test.vm.provision 'rke2-upload-installer', type: 'file', run: 'always', source: ENV['TEST_INSTALL_SH'], destination: 'install.sh'
28 | test.vm.provision"rke2-install", type: 'rke2', run: "once" do |rke2|
29 | rke2.installer_url = 'file:///home/vagrant/install.sh'
30 | rke2.env = %W[ #{external_env} INSTALL_RKE2_TYPE=server]
31 | rke2.config = <<~YAML
32 | token: 'vagrant'
33 | YAML
34 | rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
35 | end
36 |
37 | waitForControlPlane(test.vm, config.vm.box.to_s)
38 | waitForCanal(test.vm)
39 | waitForCoreDNS(test.vm)
40 | waitForIngressNginx(test.vm)
41 | waitForMetricsServer(test.vm)
42 |
43 | kubectlStatus(test.vm)
44 | checkRKE2Processes(test.vm)
45 |
46 | mountDirs(test.vm)
47 | runKillAllScript(test.vm)
48 | checkMountPoint(test.vm)
49 | end
50 |
51 | config.vm.provision "install-packages", type: "shell", run: "once" do |sh|
52 | sh.upload_path = "/tmp/vagrant-install-packages"
53 | sh.env = {
54 | 'INSTALL_PACKAGES': ENV['INSTALL_PACKAGES'],
55 | }
56 | sh.inline = <<~SHELL
57 | #!/usr/bin/env bash
58 | set -eux -o pipefail
59 | dnf -y install \
60 | curl \
61 | iptables \
62 | less \
63 | lsof \
64 | nc \
65 | socat \
66 | ${INSTALL_PACKAGES}
67 | SHELL
68 | end
69 |
70 | end
71 |
--------------------------------------------------------------------------------
/tests/install/ubuntu-2404/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | ENV['TEST_INSTALL_SH'] ||= '../../../install.sh'
5 |
6 | Vagrant.configure("2") do |config|
7 | config.vm.box = "bento/ubuntu-24.04"
8 | config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 600 # seconds
9 | config.vm.synced_folder '.', '/vagrant', type: 'rsync', disabled: false
10 | %w[libvirt virtualbox vmware_desktop].each do |p|
11 | config.vm.provider p do |v, o|
12 | v.cpus = ENV['TEST_VM_CPUS'] || 2
13 | v.memory = ENV['TEST_VM_MEMORY'] || 3072
14 | end
15 | end
16 |
17 | # Load in helper functions
18 | load "../install_util.rb"
19 |
20 | external_env = ""
21 | ENV.select{|k,v| k.start_with?('RKE2_') || k.start_with?('INSTALL_RKE2_')}.each {|key,value| external_env << "#{key.to_s}=#{value.to_s}"}
22 |
23 | config.vm.define "install-ubuntu-2404", primary: true do |test|
24 | test.vm.hostname = 'smoke'
25 | test.vm.provision 'rke2-upload-installer', type: 'file', run: 'always', source: ENV['TEST_INSTALL_SH'], destination: 'install.sh'
26 | test.vm.provision"rke2-install", type: 'rke2', run: "once" do |rke2|
27 | rke2.installer_url = 'file:///home/vagrant/install.sh'
28 | rke2.env = %W[ #{external_env} INSTALL_RKE2_TYPE=server]
29 | rke2.config = <<~YAML
30 | token: 'vagrant'
31 | YAML
32 | rke2.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
33 | end
34 |
35 | waitForControlPlane(test.vm, config.vm.box.to_s)
36 | waitForCanal(test.vm)
37 | waitForCoreDNS(test.vm)
38 | waitForIngressNginx(test.vm)
39 | waitForMetricsServer(test.vm)
40 |
41 | kubectlStatus(test.vm)
42 | test.vm.provision "rke2-procps", type: "shell", run: ENV['CI'] == 'true' ? 'never' : 'once' do |sh|
43 | sh.inline = <<~SHELL
44 | #!/usr/bin/env bash
45 | set -eux -o pipefail
46 | ps auxZ | grep -E 'etcd|kube|rke2|container|confined' | grep -v grep
47 | SHELL
48 | end
49 |
50 | mountDirs(test.vm)
51 | runKillAllScript(test.vm)
52 | checkMountPoint(test.vm)
53 | end
54 |
55 | config.vm.provision "install-packages", type: "shell", run: "once" do |sh|
56 | sh.upload_path = "/tmp/vagrant-install-packages"
57 | sh.env = {
58 | 'INSTALL_PACKAGES': ENV['INSTALL_PACKAGES'],
59 | }
60 | sh.inline = <<~SHELL
61 | #!/usr/bin/env bash
62 | set -eux -o pipefail
63 | apt-get -y update
64 | apt-get -y install \
65 | curl \
66 | iptables \
67 | less \
68 | lsof \
69 | socat \
70 | ${INSTALL_PACKAGES}
71 | SHELL
72 | end
73 |
74 | config.vm.provision "rke2-profile-env", type: "shell", run: "once" do |sh|
75 | sh.inline = <<~SHELL
76 | #!/usr/bin/env bash
77 | cat <<-EOF > /etc/profile.d/rke2.sh
78 | export KUBECONFIG=/etc/rancher/rke2/rke2.yaml PATH=/usr/local/bin:$PATH:/var/lib/rancher/rke2/bin
79 | EOF
80 | SHELL
81 | end
82 |
83 | end
84 |
--------------------------------------------------------------------------------
/tests/install/windows-2019/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | ENV['TEST_INSTALL_PS1'] ||= '../../../install.ps1'
5 |
6 | Vagrant.configure("2") do |config|
7 | config.vagrant.plugins = ["vagrant-reload"]
8 | config.vm.box = "jborean93/WindowsServer2019"
9 | config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 600 # seconds
10 | config.vm.synced_folder '.', '/vagrant', disabled: true
11 | %w[libvirt virtualbox hyperv].each do |p|
12 | config.vm.provider p do |v, o|
13 | v.cpus = ENV['TEST_VM_CPUS'] || 2
14 | v.memory = ENV['TEST_VM_MEMORY'] || 3072
15 | end
16 | end
17 |
18 | config.vm.define "install-windows-2019", primary: true do |test|
19 | test.vm.hostname = 'smoke'
20 | test.vm.provision :shell, privileged: true, run: "once", inline: "Install-WindowsFeature -Name Containers"
21 | test.vm.provision :reload
22 | test.vm.provision 'rke2-upload-installer', type: 'file', run: 'always', source: ENV['TEST_INSTALL_PS1'], destination: 'install.ps1'
23 | test.vm.provision "rke2-install", type: "shell", privileged: true, run: "once" do |sh|
24 | sh.env = ENV.select{|k,v| k.start_with?('RKE2_') || k.start_with?('INSTALL_RKE2_')}.merge({
25 | :INSTALL_RKE2_TYPE => 'AGENT'
26 | })
27 | sh.inline = <<~SHELL
28 | Write-Host "Installing RKE2 as an agent..."
29 | Push-Location C:\\Users\\vagrant\\Documents
30 | ./install.ps1
31 |
32 | Write-Host "Adding RKE2 Windows Service..."
33 | $env:PATH+=";C:\\usr\\local\\bin;C:\\var\\lib\\rancher\\rke2\\bin"
34 | Push-Location c:\\usr\\local\\bin
35 | rke2.exe agent service --add
36 | Pop-Location
37 | Start-Sleep -s 5
38 |
39 | if(Get-Service rke2 -ErrorAction Ignore) {
40 | exit 0
41 | }
42 | else {
43 | exit 1
44 | }
45 | SHELL
46 | end
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/tests/install/windows-2022/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | ENV['TEST_INSTALL_PS1'] ||= '../../../install.ps1'
5 |
6 | Vagrant.configure("2") do |config|
7 | config.vagrant.plugins = ["vagrant-reload"]
8 | config.vm.box = "jborean93/WindowsServer2022"
9 | config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 900 # seconds
10 | config.vm.synced_folder '.', '/vagrant', disabled: true
11 | %w[libvirt virtualbox hyperv].each do |p|
12 | config.vm.provider p do |v, o|
13 | v.cpus = ENV['TEST_VM_CPUS'] || 2
14 | v.memory = ENV['TEST_VM_MEMORY'] || 3072
15 | end
16 | end
17 |
18 | config.vm.define "install-windows-2022", primary: true do |test|
19 | test.vm.hostname = 'smoke'
20 | test.vm.provision :shell, privileged: true, run: "once", inline: "Install-WindowsFeature -Name Containers"
21 | test.vm.provision :reload
22 | test.vm.provision 'rke2-upload-installer', type: 'file', run: 'always', source: ENV['TEST_INSTALL_PS1'], destination: 'install.ps1'
23 | test.vm.provision "rke2-install", type: "shell", privileged: true, run: "once" do |sh|
24 | sh.env = ENV.select{|k,v| k.start_with?('RKE2_') || k.start_with?('INSTALL_RKE2_')}.merge({
25 | :INSTALL_RKE2_TYPE => 'AGENT'
26 | })
27 | sh.inline = <<~SHELL
28 | Write-Host "Installing RKE2 as an agent..."
29 | Push-Location C:\\Users\\vagrant\\Documents
30 | ./install.ps1
31 |
32 | Write-Host "Adding RKE2 Windows Service..."
33 | $env:PATH+=";C:\\usr\\local\\bin;C:\\var\\lib\\rancher\\rke2\\bin"
34 | Push-Location c:\\usr\\local\\bin
35 | rke2.exe agent service --add
36 | Pop-Location
37 | Start-Sleep -s 5
38 |
39 | if(Get-Service rke2 -ErrorAction Ignore) {
40 | exit 0
41 | }
42 | else {
43 | exit 1
44 | }
45 | SHELL
46 | end
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/tests/integration/startup/startup_test.go:
--------------------------------------------------------------------------------
1 | package startup
2 |
3 | import (
4 | "os"
5 | "testing"
6 |
7 | . "github.com/onsi/ginkgo/v2"
8 | . "github.com/onsi/gomega"
9 | testutil "github.com/rancher/rke2/tests/integration"
10 | "github.com/sirupsen/logrus"
11 | utilnet "k8s.io/apimachinery/pkg/util/net"
12 | )
13 |
14 | var (
15 | serverLog *os.File
16 | serverArgs = []string{"--debug"}
17 | testLock int
18 | )
19 |
20 | var _ = BeforeSuite(func() {
21 | var err error
22 | testLock, err = testutil.AcquireTestLock()
23 | Expect(err).ToNot(HaveOccurred())
24 | })
25 |
26 | var _ = Describe("startup tests", Ordered, func() {
27 | When("a default server is created", func() {
28 | It("starts successfully", func() {
29 | var err error
30 | serverLog, err = testutil.StartServer(serverArgs...)
31 | Expect(err).ToNot(HaveOccurred())
32 | })
33 | It("has the default components deployed", func() {
34 | Eventually(func() error {
35 | err := testutil.ServerReady()
36 | if err != nil {
37 | logrus.Info(err)
38 | }
39 | return err
40 | }, "240s", "15s").Should(Succeed())
41 | })
42 | It("dies cleanly", func() {
43 | Expect(testutil.KillServer(serverLog)).To(Succeed())
44 | Expect(testutil.Cleanup(testLock)).To(Succeed())
45 | })
46 | })
47 | When("a server is created with bind-address", func() {
48 | It("starts successfully", func() {
49 | hostIP, _ := utilnet.ChooseHostInterface()
50 | var err error
51 | serverLog, err = testutil.StartServer(append(serverArgs, "--bind-address", hostIP.String())...)
52 | Expect(err).ToNot(HaveOccurred())
53 | })
54 | It("has the default components deployed", func() {
55 | Eventually(func() error {
56 | err := testutil.ServerReady()
57 | if err != nil {
58 | logrus.Info(err)
59 | }
60 | return err
61 | }, "240s", "15s").Should(Succeed())
62 | })
63 | It("dies cleanly", func() {
64 | Expect(testutil.KillServer(serverLog)).To(Succeed())
65 | Expect(testutil.Cleanup(testLock)).To(Succeed())
66 | })
67 | })
68 | })
69 |
70 | var failed bool
71 | var _ = AfterEach(func() {
72 | failed = failed || CurrentSpecReport().Failed()
73 | })
74 |
75 | var _ = AfterSuite(func() {
76 | if failed {
77 | testutil.SaveLog(serverLog, false)
78 | serverLog = nil
79 | }
80 | Expect(testutil.KillServer(serverLog)).To(Succeed())
81 | Expect(testutil.Cleanup(testLock)).To(Succeed())
82 | })
83 |
84 | func Test_IntegrationStartup(t *testing.T) {
85 | RegisterFailHandler(Fail)
86 | RunSpecs(t, "Startup Suite")
87 | }
88 |
--------------------------------------------------------------------------------
/updatecli/scripts/retrieve_chart_version.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | fatal() {
6 | echo '[ERROR] ' "$@" >&2
7 | exit 1
8 | }
9 |
10 | CHART_INDEX_FILE_URL="https://rke2-charts.rancher.io/index.yaml"
11 | CHART_NAME="${1}"
12 | # Versions are unordered inside the charts file, so we must sort by version in
13 | # reverse order and get the highest.
14 | CHART_VERSION=$(curl -sfL "${CHART_INDEX_FILE_URL}" | yq -r '.entries.'"${CHART_NAME}"'[].version' | sort -rV | head -n 1)
15 |
16 | if [[ "${CHART_VERSION}" = "null" ]] || [[ -z "${CHART_VERSION}" ]]; then
17 | fatal "failed to retrieve the charts' index file or to parse it"
18 | fi
19 |
20 | echo "${CHART_VERSION}"
21 |
--------------------------------------------------------------------------------
/updatecli/scripts/validate_version.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | info()
4 | {
5 | echo '[INFO] ' "$@"
6 | }
7 | warn()
8 | {
9 | echo '[WARN] ' "$@" >&2
10 | }
11 | fatal()
12 | {
13 | echo '[ERROR] ' "$@" >&2
14 | exit 1
15 | }
16 |
17 |
18 | CHART_VERSIONS_FILE="charts/chart_versions.yaml"
19 |
20 |
21 | CHART_NAME=${1}
22 | CHART_VERSION=${2}
23 |
24 | CURRENT_VERSION=$(yq -r '.charts[] | select(.filename == "/charts/'"${1}"'.yaml") | .version' ${CHART_VERSIONS_FILE})
25 | if [ "${CURRENT_VERSION}" != "${CHART_VERSION}" ]; then
26 | info "chart ${CHART_NAME} should be updated from version ${CURRENT_VERSION} to ${CHART_VERSION}"
27 | exit 0
28 | fi
29 | fatal "chart ${CHART_NAME} has the latest version"
--------------------------------------------------------------------------------
/updatecli/updatecli.d/vsphere-cpi.yml:
--------------------------------------------------------------------------------
1 | # This small test makes sure that updatecli is working properly on a repo.
2 | # To test this:
3 | # have "UPDATECLI_GITHUB_ACTOR" env set to your github username
4 | # have "UPDATECLI_GITHUB_TOKEN" env set to your github token
5 | # have the latest version of updatecli installed
6 | # 'updatecli diff -v updatecli/values.yaml -c updatecli/updatecli.d/vsphere-cpi.yml'
7 | # In the future, more useful files should be added to this directory.
8 | ---
9 | name: "Update vsphere csi/cpi charts and images"
10 | # Make sure we can pull in github repos from multiple orgs
11 | scms:
12 | rke2:
13 | kind: "github"
14 | spec:
15 | user: "{{ .github.user }}"
16 | email: "{{ .github.email }}"
17 | username: "{{ requiredEnv .github.username }}"
18 | token: '{{ requiredEnv .github.token }}'
19 | owner: rancher
20 | repository: rke2
21 | branch: master
22 |
23 | sources:
24 | vsphere-cpi:
25 | name: "Get vsphere-cpi chart version"
26 | kind: "shell"
27 | spec:
28 | command: bash ./updatecli/scripts/retrieve_chart_version.sh rancher-vsphere-cpi
29 |
30 | conditions:
31 | vsphereCPIVersionShouldBeUpdated:
32 | name: "Check if vsphere-cpi chart should be updated or not"
33 | kind: shell
34 | sourceid: vsphere-cpi
35 | spec:
36 | command: bash ./updatecli/scripts/validate_version.sh rancher-vsphere-cpi
37 |
38 | targets:
39 | updateVsphereCPI:
40 | name: "Update the vsphere-cpi airgap images"
41 | kind: "shell"
42 | scmid: "rke2"
43 | sourceid: vsphere-cpi
44 | spec:
45 | command: bash ./updatecli/scripts/update_chart_and_images.sh rancher-vsphere-cpi
46 |
47 | actions:
48 | github:
49 | kind: "github/pullrequest"
50 | scmid: "rke2"
51 | spec:
52 | automerge: false
53 | draft: false
54 | mergemethod: squash
55 | parent: false
56 | title: "Update vsphere-cpi chart"
57 |
--------------------------------------------------------------------------------
/updatecli/updatecli.d/vsphere-csi.yml:
--------------------------------------------------------------------------------
1 | # This small test makes sure that updatecli is working properly on a repo.
2 | # To test this:
3 | # have "UPDATECLI_GITHUB_ACTOR" env set to your github username
4 | # have "UPDATECLI_GITHUB_TOKEN" env set to your github token
5 | # have the latest version of updatecli installed
6 | # 'updatecli diff -v updatecli/values.yaml -c updatecli/updatecli.d/vsphere-csi.yml'
7 | # In the future, more useful files should be added to this directory.
8 | ---
9 | name: "Update vsphere csi/cpi charts and images"
10 | # Make sure we can pull in github repos from multiple orgs
11 | scms:
12 | rke2:
13 | kind: "github"
14 | spec:
15 | user: "{{ .github.user }}"
16 | email: "{{ .github.email }}"
17 | username: "{{ requiredEnv .github.username }}"
18 | token: '{{ requiredEnv .github.token }}'
19 | owner: rancher
20 | repository: rke2
21 | branch: master
22 |
23 | sources:
24 | vsphere-csi:
25 | name: "Get vsphere-csi chart version"
26 | kind: "shell"
27 | spec:
28 | command: bash ./updatecli/scripts/retrieve_chart_version.sh rancher-vsphere-csi
29 |
30 | conditions:
31 | vsphereCSIVersionShouldBeUpdated:
32 | name: "Check if vsphere-csi chart should be updated or not"
33 | kind: shell
34 | sourceid: vsphere-csi
35 | spec:
36 | command: bash ./updatecli/scripts/validate_version.sh rancher-vsphere-csi
37 |
38 | targets:
39 | updateVsphereCSI:
40 | name: "Update the vsphere-csi airgap images"
41 | kind: "shell"
42 | scmid: "rke2"
43 | sourceid: vsphere-csi
44 | spec:
45 | command: bash ./updatecli/scripts/update_chart_and_images.sh rancher-vsphere-csi
46 |
47 | actions:
48 | github:
49 | kind: "github/pullrequest"
50 | scmid: "rke2"
51 | spec:
52 | automerge: false
53 | draft: false
54 | mergemethod: squash
55 | parent: false
56 | title: "Update vsphere-csi chart"
57 |
--------------------------------------------------------------------------------
/updatecli/values.yaml:
--------------------------------------------------------------------------------
1 | github:
2 | user: "github-actions[bot]"
3 | email: "41898282+github-actions[bot]@users.noreply.github.com"
4 | username: "UPDATECLI_GITHUB_ACTOR"
5 | token: "UPDATECLI_GITHUB_TOKEN"
6 |
--------------------------------------------------------------------------------