├── .chainsaw.yaml ├── .gitattributes ├── .github ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml ├── filters.yml ├── labels.yml ├── release-drafter.yml └── workflows │ ├── automerge.yml │ ├── build-test.yml │ ├── ci.yml │ ├── helm.yml │ ├── label-sync.yml │ ├── pr-labeler.yml │ ├── release-drafter.yml │ └── release.yml ├── .gitignore ├── .golangci.yml ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── _config.yaml ├── cloud ├── annotations │ └── annotations.go ├── api.go └── linode │ ├── cilium_loadbalancers.go │ ├── cilium_loadbalancers_test.go │ ├── client │ ├── client.go │ ├── client_with_metrics.go │ └── mocks │ │ └── mock_client.go │ ├── cloud.go │ ├── cloud_test.go │ ├── common.go │ ├── common_test.go │ ├── fake_linode_test.go │ ├── firewall │ ├── firewalls.go │ └── firewalls_test.go │ ├── health_check.go │ ├── health_check_test.go │ ├── instances.go │ ├── instances_test.go │ ├── loadbalancers.go │ ├── loadbalancers_deprecated.go │ ├── loadbalancers_helpers.go │ ├── loadbalancers_test.go │ ├── metrics.go │ ├── node_controller.go │ ├── node_controller_test.go │ ├── nodeipamcontroller.go │ ├── nodeipamcontroller_test.go │ ├── route_controller.go │ ├── route_controller_test.go │ ├── service_controller.go │ ├── service_controller_test.go │ ├── tools.go │ ├── vpc.go │ └── vpc_test.go ├── codecov.yml ├── deploy ├── .gitignore ├── ccm-linode-template.yaml ├── chart │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── _helpers.tpl │ │ ├── ccm-linode.yaml │ │ ├── clusterrole-rbac.yaml │ │ ├── clusterrolebinding-rbac.yaml │ │ ├── daemonset.yaml │ │ └── serviceaccount.yaml │ └── values.yaml ├── generate-manifest.sh └── uninstall.sh ├── devbox.json ├── devbox.lock ├── docs ├── configuration │ ├── README.md │ ├── annotations.md │ ├── environment.md │ ├── firewall.md │ ├── loadbalancer.md │ ├── nodeipam.md │ ├── nodes.md │ ├── routes.md │ └── session-affinity.md ├── development │ └── README.md ├── examples │ ├── README.md │ ├── advanced.md │ └── basic.md └── getting-started │ ├── README.md │ ├── helm-installation.md │ ├── installation.md │ ├── manual-installation.md │ ├── overview.md │ ├── requirements.md │ ├── troubleshooting.md │ └── verification.md ├── e2e ├── bgp-test │ └── lb-cilium-bgp │ │ ├── chainsaw-test.yaml │ │ └── create-pod-service.yaml ├── setup │ ├── cilium-setup.sh │ └── ctlptl-config.yaml ├── subnet-test │ └── chainsaw-test.yaml └── test │ ├── assert-ccm-resources.yaml │ ├── ccm-resources │ └── chainsaw-test.yaml │ ├── certificates │ ├── ca.crt │ ├── ca.key │ ├── server.crt │ ├── server.csr │ └── server.key │ ├── fw-use-specified-nb │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-created-with-new-nb-id │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-created-with-specified-nb-id │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-delete-svc-no-nb │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-delete-svc-use-new-nbid │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-delete-svc-use-specified-nb │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-fw-delete-acl │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-fw-update-acl │ ├── chainsaw-test.yaml │ ├── create-pods-services.yaml │ └── update-service.yaml │ ├── lb-hostname-only-ingress │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-http-body-health-check │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-http-status-health-check │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-passive-health-check │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-premium-nb │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-preserve-annotation-new-nb-specified │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-preserve-annotation-svc-delete │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-simple │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-single-tls │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-tcp-connection-health-check │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-update-port │ ├── chainsaw-test.yaml │ ├── create-pods-services.yaml │ └── update-port-service.yaml │ ├── lb-updated-with-nb-id │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-http-to-https │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-multiple-http-https-ports │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-node-addition │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-proxyprotocol-default-annotation │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-proxyprotocol-override │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-proxyprotocol-port-specific │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-proxyprotocol-set │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-udp-ports-algorithm │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-udp-ports-change-port │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-udp-ports-mode │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-udp-ports-stickiness │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-udp-ports │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── lb-with-vpc-backends │ ├── chainsaw-test.yaml │ └── create-pods-services.yaml │ ├── route-controller-test │ └── chainsaw-test.yaml │ └── scripts │ ├── get-nb-config.sh │ └── get-nb-id.sh ├── examples ├── .gitignore ├── http-nginx-firewalled.yaml ├── http-nginx.yaml ├── https-nginx.yaml ├── tcp-nginx.yaml ├── test.sh └── udp-example.yaml ├── go.mod ├── go.sum ├── hack ├── builddeps.sh ├── coverage.sh └── templates │ └── prometheus.go.gotpl ├── main.go └── sentry ├── sentry.go └── sentry_test.go /.chainsaw.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/configuration-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Configuration 4 | metadata: 5 | name: configuration 6 | spec: 7 | timeouts: 8 | assert: 5m0s 9 | cleanup: 5m0s 10 | delete: 5m0s 11 | error: 5m0s 12 | exec: 5m0s 13 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.sh text eol=lf -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | :+1::tada: First off, we appreciate you taking the time to contribute! THANK YOU! :tada::+1: 4 | 5 | We put together the handy guide below to help you get support for your work. Read on! 6 | 7 | ## I Just Want to Ask the Maintainers a Question 8 | 9 | The [Linode Community](https://www.linode.com/community/questions/) is a great place to get additional support. 10 | 11 | ## How Do I Submit A (Good) Bug Report or Feature Request 12 | 13 | Please open a [github issue](https://guides.github.com/features/issues/) to report bugs or suggest features. 14 | 15 | When filing an issue or feature request, help us avoid duplication and redundant effort -- check existing open or recently closed issues first. 16 | 17 | Detailed bug reports and requests are easier for us to work with. Please include the following in your issue: 18 | 19 | * A reproducible test case or series of steps 20 | * The version of our code being used 21 | * Any modifications you've made, relevant to the bug 22 | * Anything unusual about your environment or deployment 23 | * Screenshots and code samples where illustrative and helpful 24 | 25 | ## How to Open a Pull Request 26 | 27 | We follow the [fork and pull model](https://opensource.guide/how-to-contribute/#opening-a-pull-request) for open source contributions. 28 | 29 | Tips for a faster merge: 30 | * address one feature or bug per pull request. 31 | * large formatting changes make it hard for us to focus on your work. 32 | * follow language coding conventions. 33 | * make sure that tests pass. 34 | * make sure your commits are atomic, [addressing one change per commit](https://chris.beams.io/posts/git-commit/). 35 | * add tests! 36 | 37 | ## Cutting Releases 38 | 39 | Everytime a commit is merged into master, a new patch release is [automatically drafted](https://github.com/linode/linode-cloud-controller-manager/actions/workflows/release-drafter.yml) with a changelog. You can modify (tag name, changelog, title, etc.) and publish the release via the [releases page](https://github.com/linode/linode-cloud-controller-manager/releases). 40 | 41 | When a release is published, the [release workflow](https://github.com/linode/linode-cloud-controller-manager/actions/workflows/docker-hub.yml) builds and pushes the docker image to Dockerhub. 42 | 43 | ## Code of Conduct 44 | 45 | This project follows the [Linode Community Code of Conduct](https://www.linode.com/community/questions/conduct). 46 | 47 | ## Vulnerability Reporting 48 | 49 | If you discover a potential security issue in this project we ask that you notify Linode Security via our [vulnerability reporting process](https://hackerone.com/linode). Please do **not** create a public github issue. 50 | 51 | ## Licensing 52 | 53 | See the [LICENSE file](/LICENSE) for our project's licensing. 54 | 55 | 56 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## General: 2 | 3 | * [ ] Have you removed all sensitive information, including but not limited to access keys and passwords? 4 | * [ ] Have you checked to ensure there aren't other open or closed [Pull Requests](../../pulls) for the same bug/feature/question? 5 | 6 | ---- 7 | 8 | ## Feature Requests: 9 | * [ ] Have you explained your rationale for why this feature is needed? 10 | * [ ] Have you offered a proposed implementation/solution? 11 | 12 | ---- 13 | 14 | ## Bug Reporting 15 | 16 | ### Expected Behavior 17 | 18 | ### Actual Behavior 19 | 20 | ### Steps to Reproduce the Problem 21 | 22 | 1. 23 | 1. 24 | 1. 25 | 26 | ### Environment Specifications 27 | 28 | #### Screenshots, Code Blocks, and Logs 29 | 30 | #### Additional Notes 31 | 32 | ---- 33 | 34 | For general help or discussion, join the [Kubernetes Slack team](https://kubernetes.slack.com/messages/CD4B15LUR/details/) channel `#linode`. To sign up, use the [Kubernetes Slack inviter](http://slack.kubernetes.io/). 35 | 36 | The [Linode Community](https://www.linode.com/community/questions/) is a great place to get additional support. 37 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 14 | ### General: 15 | 16 | * [ ] Have you removed all sensitive information, including but not limited to access keys and passwords? 17 | * [ ] Have you checked to ensure there aren't other open or closed [Pull Requests](../../pulls) for the same bug/feature/question? 18 | 19 | ### Pull Request Guidelines: 20 | 21 | 1. [ ] Does your submission pass tests? 22 | 1. [ ] Have you added tests? 23 | 1. [ ] Are you addressing a single feature in this PR? 24 | 1. [ ] Are your commits atomic, addressing one change per commit? 25 | 1. [ ] Are you following the conventions of the language? 26 | 1. [ ] Have you saved your large formatting changes for a different PR, so we can focus on your work? 27 | 1. [ ] Have you explained your rationale for why this feature is needed? 28 | 1. [ ] Have you linked your PR to an [open issue](https://blog.github.com/2013-05-14-closing-issues-via-pull-requests/) 29 | 30 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | # Go - root directory 5 | - package-ecosystem: "gomod" 6 | directory: "/" 7 | schedule: 8 | interval: "weekly" 9 | ## group all dependencies with a k8s.io prefix into a single PR. 10 | groups: 11 | kubernetes: 12 | patterns: [ "k8s.io/*", "sigs.k8s.io/*" ] 13 | otel: 14 | patterns: ["go.opentelemetry.io/*"] 15 | commit-message: 16 | prefix: ":seedling:" 17 | labels: 18 | - "dependencies" 19 | 20 | # Docker 21 | - package-ecosystem: "docker" 22 | directory: "/" 23 | schedule: 24 | interval: "weekly" 25 | commit-message: 26 | prefix: ":seedling:" 27 | labels: 28 | - "dependencies" 29 | 30 | # github-actions 31 | - package-ecosystem: "github-actions" 32 | directory: "/" 33 | schedule: 34 | interval: "weekly" 35 | commit-message: 36 | prefix: ":seedling:" 37 | labels: 38 | - "dependencies" 39 | -------------------------------------------------------------------------------- /.github/filters.yml: -------------------------------------------------------------------------------- 1 | # Any file that is not a doc *.md file 2 | src: 3 | - "!**/*.md" 4 | -------------------------------------------------------------------------------- /.github/labels.yml: -------------------------------------------------------------------------------- 1 | # PR Labels 2 | - name: new-feature 3 | description: for new features in the changelog. 4 | color: 225fee 5 | - name: improvement 6 | description: for improvements in existing functionality in the changelog. 7 | color: 22ee47 8 | - name: repo-ci-improvement 9 | description: for improvements in the repository or CI workflow in the changelog. 10 | color: c922ee 11 | - name: bugfix 12 | description: for any bug fixes in the changelog. 13 | color: ed8e21 14 | - name: documentation 15 | description: for updates to the documentation in the changelog. 16 | color: d3e1e6 17 | - name: dependencies 18 | description: dependency updates including security fixes 19 | color: 5c9dff 20 | - name: testing 21 | description: for updates to the testing suite in the changelog. 22 | color: 933ac9 23 | - name: breaking-change 24 | description: for breaking changes in the changelog. 25 | color: ff0000 26 | - name: ignore-for-release 27 | description: PRs you do not want to render in the changelog. 28 | color: 7b8eac 29 | # Issue Labels 30 | - name: enhancement 31 | description: issues that request a enhancement. 32 | color: 22ee47 33 | - name: bug 34 | description: issues that report a bug. 35 | color: ed8e21 36 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: 'v$NEXT_PATCH_VERSION' 2 | tag-template: 'v$NEXT_PATCH_VERSION' 3 | exclude-labels: 4 | - ignore-for-release 5 | categories: 6 | - title: ⚠️ Breaking Change 7 | labels: 8 | - breaking-change 9 | - title: 🐛 Bug Fixes 10 | labels: 11 | - bugfix 12 | - title: 🚀 New Features 13 | labels: 14 | - new-feature 15 | - title: 💡 Improvements 16 | labels: 17 | - improvement 18 | - title: 🧪 Testing Improvements 19 | labels: 20 | - testing 21 | - title: ⚙️ Repo/CI Improvements 22 | labels: 23 | - repo-ci-improvement 24 | - title: 📖 Documentation 25 | labels: 26 | - documentation 27 | - title: 📦 Dependency Updates 28 | labels: 29 | - dependencies 30 | - title: Other Changes 31 | labels: 32 | - "*" 33 | autolabeler: 34 | - label: 'breaking-change' 35 | title: 36 | - '/.*\[breaking\].+/' 37 | - label: 'deprecation' 38 | title: 39 | - '/.*\[deprecation\].+/' 40 | - label: 'bugfix' 41 | title: 42 | - '/.*\[fix\].+/' 43 | - label: 'new-feature' 44 | title: 45 | - '/.*\[feat\].+/' 46 | - label: 'improvement' 47 | title: 48 | - '/.*\[improvement\].+/' 49 | - label: 'testing' 50 | title: 51 | - '/.*\[test\].+/' 52 | - label: 'repo-ci-improvement' 53 | title: 54 | - '/.*\[CI\].+/' 55 | - '/.*\[ci\].+/' 56 | - label: 'documentation' 57 | title: 58 | - '/.*\[docs\].+/' 59 | - label: 'dependencies' 60 | title: 61 | - '/.*\[deps\].+/' 62 | 63 | change-template: '- $TITLE by @$AUTHOR in #$NUMBER' 64 | no-changes-template: "- No changes" 65 | template: | 66 | ## What's Changed 67 | $CHANGES 68 | -------------------------------------------------------------------------------- /.github/workflows/automerge.yml: -------------------------------------------------------------------------------- 1 | name: 'Auto Merge GH-Pages' 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | merge-main-to-gh-pages: 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | with: 17 | fetch-depth: 0 # Full clone necessary for proper merge 18 | 19 | - name: Set git config 20 | run: | 21 | git config --local user.email "actions@github.com" 22 | git config --local user.name "Github Actions" 23 | 24 | - name: Merge main into gh-pages 25 | run: | 26 | git checkout gh-pages 27 | git pull 28 | git merge --no-ff main -m "Automatically merge main into gh-pages" 29 | git push -------------------------------------------------------------------------------- /.github/workflows/build-test.yml: -------------------------------------------------------------------------------- 1 | name: Build Test 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: null 7 | 8 | permissions: 9 | contents: read 10 | pull-requests: read 11 | actions: read 12 | 13 | concurrency: 14 | group: build-test-${{ github.event.pull_request.number || github.ref_name }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | changes: 19 | runs-on: ubuntu-latest 20 | outputs: 21 | paths: ${{ steps.filter.outputs.changes }} 22 | steps: 23 | - uses: actions/checkout@v4.2.2 24 | with: 25 | ref: ${{ github.event.pull_request.head.sha }} 26 | - name: Harden Runner 27 | uses: step-security/harden-runner@v2 28 | with: 29 | disable-sudo: true 30 | egress-policy: block 31 | allowed-endpoints: > 32 | api.github.com:443 33 | github.com:443 34 | - uses: dorny/paths-filter@v3 35 | id: filter 36 | with: 37 | base: ${{ github.ref }} 38 | filters: .github/filters.yml 39 | 40 | build-test: 41 | runs-on: ubuntu-latest 42 | needs: changes 43 | if: ${{ contains(fromJSON(needs.changes.outputs.paths), 'src') }} 44 | steps: 45 | - name: Harden Runner 46 | uses: step-security/harden-runner@v2 47 | with: 48 | disable-sudo: true 49 | egress-policy: block 50 | allowed-endpoints: > 51 | api.github.com:443 52 | github.com:443 53 | golang.org:443 54 | proxy.golang.org:443 55 | sum.golang.org:443 56 | objects.githubusercontent.com:443 57 | storage.googleapis.com:443 58 | cli.codecov.io:443 59 | api.codecov.io:443 60 | ingest.codecov.io:443 61 | raw.githubusercontent.com:443 62 | get.helm.sh:443 63 | golangci-lint.run:443 64 | 65 | - uses: actions/checkout@v4.2.2 66 | with: 67 | fetch-depth: 0 68 | - uses: actions/setup-go@v5 69 | with: 70 | go-version-file: go.mod 71 | check-latest: true 72 | 73 | - name: Vet 74 | run: make vet 75 | 76 | - name: lint 77 | uses: golangci/golangci-lint-action@v8 78 | with: 79 | version: v2.1.5 80 | 81 | - name: Helm Lint 82 | run: make helm-lint 83 | 84 | - name: Test 85 | run: make test 86 | 87 | - name: Build 88 | run: make build 89 | 90 | docker-build: 91 | runs-on: ubuntu-latest 92 | steps: 93 | - uses: actions/checkout@v4.2.2 94 | with: 95 | fetch-depth: 0 96 | - name: Docker Meta 97 | id: meta 98 | uses: docker/metadata-action@v5 99 | with: 100 | images: | 101 | linode/linode-cloud-controller-manager 102 | tags: | 103 | type=raw,value=pr-${{ github.event.pull_request.number }},enable=${{ github.event_name == 'pull_request' }} 104 | type=raw,value=latest,enable=${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} 105 | - name: Build Dockerfile 106 | uses: docker/build-push-action@v6 107 | with: 108 | context: . 109 | push: false 110 | tags: ${{ steps.meta.outputs.tags }} 111 | labels: ${{ steps.meta.outputs.labels }} 112 | build-args: | 113 | REV=${{ github.ref_name }} 114 | -------------------------------------------------------------------------------- /.github/workflows/helm.yml: -------------------------------------------------------------------------------- 1 | name: Helm 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'deploy/chart/**' 7 | workflow_dispatch: {} 8 | release: {} 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | helm-test: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4.2.2 19 | with: 20 | fetch-depth: 0 21 | 22 | - name: Set Version 23 | run: | 24 | TAG=$(git describe --tags --abbrev=0) 25 | sed -ie "s/appVersion: \"latest\"/appVersion: ${TAG#helm-}/g" deploy/chart/Chart.yaml 26 | sed -ie "s/version: 0.0.0/version: ${TAG#helm-}/g" deploy/chart/Chart.yaml 27 | 28 | - name: Set up Helm 29 | uses: azure/setup-helm@v4 30 | 31 | - uses: actions/setup-python@v5 32 | with: 33 | python-version: '3.10' 34 | check-latest: true 35 | 36 | - name: Set up chart-testing 37 | uses: helm/chart-testing-action@v2.7.0 38 | 39 | - name: Run chart-testing (lint) 40 | run: ct lint --check-version-increment=false --chart-dirs deploy --target-branch ${{ github.event.repository.default_branch }} 41 | 42 | # we cannot test a helm install without a valid linode 43 | # - name: Create kind cluster 44 | # uses: helm/kind-action@v1.8.0 45 | 46 | #- name: Run chart-testing (install) 47 | # run: ct install --chart-dirs helm-chart --namespace kube-system --helm-extra-set-args "--set=apiToken=test --set=region=us-east" --target-branch ${{ github.event.repository.default_branch }} 48 | 49 | helm-release: 50 | if: ${{ startsWith(github.ref, 'refs/tags/') }} 51 | needs: helm-test 52 | permissions: 53 | contents: write # for helm/chart-releaser-action to push chart release and create a release 54 | runs-on: ubuntu-latest 55 | steps: 56 | - name: Checkout 57 | uses: actions/checkout@v4.2.2 58 | with: 59 | fetch-depth: 0 60 | 61 | - name: Set Version 62 | run: | 63 | TAG=$(git describe --tags --abbrev=0) 64 | sed -ie "s/appVersion: \"latest\"/appVersion: ${TAG#helm-}/g" deploy/chart/Chart.yaml 65 | sed -ie "s/version: 0.0.0/version: ${TAG#helm-}/g" deploy/chart/Chart.yaml 66 | 67 | - name: Configure Git 68 | run: | 69 | git config user.name "$GITHUB_ACTOR" 70 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 71 | 72 | - name: Set up Helm 73 | uses: azure/setup-helm@v4 74 | 75 | - name: Run chart-releaser 76 | uses: helm/chart-releaser-action@v1.7.0 77 | env: 78 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 79 | CR_RELEASE_NAME_TEMPLATE: "helm-{{ .Version }}" 80 | with: 81 | charts_dir: deploy 82 | skip_existing: true 83 | -------------------------------------------------------------------------------- /.github/workflows/label-sync.yml: -------------------------------------------------------------------------------- 1 | name: Sync labels 2 | on: 3 | push: 4 | branches: 5 | - main 6 | paths: 7 | - .github/labels.yml 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@cbb722410c2e876e24abbe8de2cc27693e501dcb # pin@v2 13 | - uses: micnncim/action-label-syncer@3abd5ab72fda571e69fffd97bd4e0033dd5f495c # pin@v1 14 | env: 15 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 16 | with: 17 | manifest: .github/labels.yml 18 | -------------------------------------------------------------------------------- /.github/workflows/pr-labeler.yml: -------------------------------------------------------------------------------- 1 | name: PR labeler 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request_target: 6 | types: [opened, reopened, synchronize] 7 | 8 | jobs: 9 | label-pr: 10 | name: Update PR labels 11 | permissions: 12 | contents: write 13 | pull-requests: write 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v4.2.2 18 | with: 19 | fetch-depth: 0 20 | - name: Label PR 21 | uses: release-drafter/release-drafter@v6 22 | with: 23 | disable-releaser: github.ref != 'refs/heads/main' 24 | env: 25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 26 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | update_release_draft: 14 | permissions: 15 | contents: write 16 | pull-requests: write 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: release-drafter/release-drafter@v6 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | tags: 5 | - "v*.*.*" 6 | 7 | jobs: 8 | release: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4.2.2 12 | with: 13 | fetch-depth: 0 14 | - name: Create Release Artifacts 15 | run: make release 16 | env: 17 | IMAGE_VERSION: ${{ github.ref_name }} 18 | - name: Upload Release Artifacts 19 | uses: softprops/action-gh-release@v2 20 | with: 21 | files: | 22 | ./release/helm-chart-${{ github.ref_name }}.tgz 23 | - name: Docker Meta 24 | id: meta 25 | uses: docker/metadata-action@v5 26 | with: 27 | images: | 28 | linode/linode-cloud-controller-manager 29 | tags: | 30 | type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'main') }} 31 | type=semver,pattern={{raw}},value=${{ github.ref_name }} 32 | - name: Login to Docker Hub 33 | uses: docker/login-action@v3 34 | with: 35 | username: ${{ secrets.DOCKER_USERNAME }} 36 | password: ${{ secrets.DOCKER_PASSWORD }} 37 | - name: Build and Push to Docker Hub 38 | uses: docker/build-push-action@v6 39 | with: 40 | context: . 41 | push: true 42 | tags: ${{ steps.meta.outputs.tags }} 43 | labels: ${{ steps.meta.outputs.labels }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Go template 3 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 4 | *.o 5 | *.a 6 | *.so 7 | linode-cloud-controller-manager 8 | 9 | # Folders 10 | _obj 11 | _test 12 | bin 13 | 14 | # Architecture specific extensions/prefixes 15 | *.[568vq] 16 | [568vq].out 17 | 18 | *.cgo1.go 19 | *.cgo2.c 20 | _cgo_defun.c 21 | _cgo_gotypes.go 22 | _cgo_export.* 23 | 24 | _testmain.go 25 | 26 | *.exe 27 | *.test 28 | *.prof 29 | 30 | # Output of the go coverage tool, specifically when used with LiteIDE 31 | *.out 32 | 33 | .idea/ 34 | dist/ 35 | .vscode/ 36 | coverage.txt 37 | *.coverprofile 38 | 39 | junit.xml 40 | 41 | .DS_Store 42 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Release notes for this project are kept here: https://github.com/linode/linode-cloud-controller-manager/releases 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24-alpine AS builder 2 | RUN mkdir -p /linode 3 | WORKDIR /linode 4 | 5 | COPY go.mod . 6 | COPY go.sum . 7 | COPY main.go . 8 | COPY cloud ./cloud 9 | COPY sentry ./sentry 10 | 11 | RUN go mod download 12 | RUN go build -a -ldflags '-extldflags "-static"' -o /bin/linode-cloud-controller-manager-linux /linode 13 | 14 | FROM alpine:3.22.0 15 | RUN apk add --update --no-cache ca-certificates 16 | LABEL maintainers="Linode" 17 | LABEL description="Linode Cloud Controller Manager" 18 | COPY --from=builder /bin/linode-cloud-controller-manager-linux /linode-cloud-controller-manager-linux 19 | ENTRYPOINT ["/linode-cloud-controller-manager-linux"] 20 | -------------------------------------------------------------------------------- /_config.yaml: -------------------------------------------------------------------------------- 1 | markdown: GFM 2 | -------------------------------------------------------------------------------- /cloud/annotations/annotations.go: -------------------------------------------------------------------------------- 1 | package annotations 2 | 3 | const ( 4 | // AnnLinodeDefaultProtocol is the annotation used to specify the default protocol 5 | // for Linode load balancers. Options are tcp, http and https. Defaults to tcp. 6 | AnnLinodeDefaultProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-protocol" 7 | AnnLinodePortConfigPrefix = "service.beta.kubernetes.io/linode-loadbalancer-port-" 8 | AnnLinodeDefaultProxyProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol" 9 | AnnLinodeDefaultAlgorithm = "service.beta.kubernetes.io/linode-loadbalancer-default-algorithm" 10 | AnnLinodeDefaultStickiness = "service.beta.kubernetes.io/linode-loadbalancer-default-stickiness" 11 | 12 | AnnLinodeCheckPath = "service.beta.kubernetes.io/linode-loadbalancer-check-path" 13 | AnnLinodeCheckBody = "service.beta.kubernetes.io/linode-loadbalancer-check-body" 14 | AnnLinodeHealthCheckType = "service.beta.kubernetes.io/linode-loadbalancer-check-type" 15 | 16 | AnnLinodeHealthCheckInterval = "service.beta.kubernetes.io/linode-loadbalancer-check-interval" 17 | AnnLinodeHealthCheckTimeout = "service.beta.kubernetes.io/linode-loadbalancer-check-timeout" 18 | AnnLinodeHealthCheckAttempts = "service.beta.kubernetes.io/linode-loadbalancer-check-attempts" 19 | AnnLinodeHealthCheckPassive = "service.beta.kubernetes.io/linode-loadbalancer-check-passive" 20 | 21 | AnnLinodeUDPCheckPort = "service.beta.kubernetes.io/linode-loadbalancer-udp-check-port" 22 | 23 | // AnnLinodeThrottle is the annotation specifying the value of the Client Connection 24 | // Throttle, which limits the number of subsequent new connections per second from the 25 | // same client IP. Options are a number between 1-20, or 0 to disable. Defaults to 20. 26 | AnnLinodeThrottle = "service.beta.kubernetes.io/linode-loadbalancer-throttle" 27 | 28 | AnnLinodeLoadBalancerPreserve = "service.beta.kubernetes.io/linode-loadbalancer-preserve" 29 | AnnLinodeNodeBalancerID = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id" 30 | AnnLinodeNodeBalancerType = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-type" 31 | 32 | AnnLinodeHostnameOnlyIngress = "service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress" 33 | AnnLinodeLoadBalancerTags = "service.beta.kubernetes.io/linode-loadbalancer-tags" 34 | AnnLinodeCloudFirewallID = "service.beta.kubernetes.io/linode-loadbalancer-firewall-id" 35 | AnnLinodeCloudFirewallACL = "service.beta.kubernetes.io/linode-loadbalancer-firewall-acl" 36 | 37 | // AnnLinodeEnableIPv6Ingress is the annotation used to specify that a service should include both IPv4 and IPv6 38 | // addresses for its LoadBalancer ingress. When set to "true", both addresses will be included in the status. 39 | AnnLinodeEnableIPv6Ingress = "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress" 40 | 41 | AnnLinodeNodePrivateIP = "node.k8s.linode.com/private-ip" 42 | AnnLinodeHostUUID = "node.k8s.linode.com/host-uuid" 43 | 44 | AnnLinodeNodeIPSharingUpdated = "node.k8s.linode.com/ip-sharing-updated" 45 | 46 | NodeBalancerBackendIPv4Range = "service.beta.kubernetes.io/linode-loadbalancer-backend-ipv4-range" 47 | 48 | NodeBalancerBackendVPCName = "service.beta.kubernetes.io/linode-loadbalancer-backend-vpc-name" 49 | NodeBalancerBackendSubnetName = "service.beta.kubernetes.io/linode-loadbalancer-backend-subnet-name" 50 | ) 51 | -------------------------------------------------------------------------------- /cloud/api.go: -------------------------------------------------------------------------------- 1 | package cloud 2 | 3 | import "errors" 4 | 5 | var ( 6 | ErrNotImplemented = errors.New("not implemented") 7 | ErrLBUnsupported = errors.New("loadbalancer unsupported") 8 | ) 9 | -------------------------------------------------------------------------------- /cloud/linode/common.go: -------------------------------------------------------------------------------- 1 | package linode 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/linode/linodego" 10 | ) 11 | 12 | const ( 13 | providerIDPrefix = "linode://" 14 | DNS1123LabelMaxLength int = 63 15 | ) 16 | 17 | type invalidProviderIDError struct { 18 | value string 19 | } 20 | 21 | func (e invalidProviderIDError) Error() string { 22 | return fmt.Sprintf("invalid provider ID %q", e.value) 23 | } 24 | 25 | func isLinodeProviderID(providerID string) bool { 26 | return strings.HasPrefix(providerID, providerIDPrefix) 27 | } 28 | 29 | func parseProviderID(providerID string) (int, error) { 30 | if !isLinodeProviderID(providerID) { 31 | return 0, invalidProviderIDError{providerID} 32 | } 33 | id, err := strconv.Atoi(strings.TrimPrefix(providerID, providerIDPrefix)) 34 | if err != nil { 35 | return 0, invalidProviderIDError{providerID} 36 | } 37 | return id, nil 38 | } 39 | 40 | // IgnoreLinodeAPIError returns the error except matches to status code 41 | func IgnoreLinodeAPIError(err error, code int) error { 42 | apiErr := linodego.Error{Code: code} 43 | if apiErr.Is(err) { 44 | err = nil 45 | } 46 | 47 | return err 48 | } 49 | 50 | func isPrivate(ip *net.IP) bool { 51 | if Options.LinodeExternalNetwork == nil { 52 | return ip.IsPrivate() 53 | } 54 | 55 | return ip.IsPrivate() && !Options.LinodeExternalNetwork.Contains(*ip) 56 | } 57 | -------------------------------------------------------------------------------- /cloud/linode/common_test.go: -------------------------------------------------------------------------------- 1 | package linode 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/linode/linodego" 8 | ) 9 | 10 | func TestParseProviderID(t *testing.T) { 11 | for _, tc := range []struct { 12 | name string 13 | providerID string 14 | expectedID int 15 | errExpected bool 16 | }{ 17 | { 18 | name: "empty string is invalid", 19 | providerID: "", 20 | errExpected: true, 21 | }, 22 | { 23 | name: "malformed provider id", 24 | providerID: "invalidproviderid!", 25 | errExpected: true, 26 | }, 27 | { 28 | name: "wrong prefix", 29 | providerID: "notlinode://123", 30 | errExpected: true, 31 | }, 32 | { 33 | name: "valid", 34 | providerID: "linode://123", 35 | expectedID: 123, 36 | }, 37 | } { 38 | t.Run(tc.name, func(t *testing.T) { 39 | id, err := parseProviderID(tc.providerID) 40 | if err != nil { 41 | if !tc.errExpected { 42 | t.Errorf("unexpected error: %v", err) 43 | } 44 | } else if tc.errExpected { 45 | t.Error("expected an error; got nil") 46 | } 47 | 48 | if id != tc.expectedID { 49 | t.Errorf("expected id to be %d; got %d", tc.expectedID, id) 50 | } 51 | }) 52 | } 53 | } 54 | 55 | func TestIgnoreLinodeAPIError(t *testing.T) { 56 | t.Parallel() 57 | tests := []struct { 58 | name string 59 | err error 60 | code int 61 | shouldFilter bool 62 | }{{ 63 | name: "Not Linode API error", 64 | err: errors.New("foo"), 65 | code: 0, 66 | shouldFilter: false, 67 | }, { 68 | name: "Ignore not found Linode API error", 69 | err: linodego.Error{ 70 | Response: nil, 71 | Code: 400, 72 | Message: "not found", 73 | }, 74 | code: 400, 75 | shouldFilter: true, 76 | }, { 77 | name: "Don't ignore not found Linode API error", 78 | err: linodego.Error{ 79 | Response: nil, 80 | Code: 400, 81 | Message: "not found", 82 | }, 83 | code: 500, 84 | shouldFilter: false, 85 | }} 86 | for _, tt := range tests { 87 | testcase := tt 88 | t.Run(testcase.name, func(t *testing.T) { 89 | t.Parallel() 90 | err := IgnoreLinodeAPIError(testcase.err, testcase.code) 91 | if testcase.shouldFilter && err != nil { 92 | t.Error("expected err but got nil") 93 | } 94 | }) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /cloud/linode/firewall/firewalls_test.go: -------------------------------------------------------------------------------- 1 | package firewall 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/linode/linodego" 8 | v1 "k8s.io/api/core/v1" 9 | ) 10 | 11 | // makeOldRuleSet constructs a FirewallRuleSet with the given IPs, ports string, and policy. 12 | func makeOldRuleSet(ipList []string, ports string, policy string) linodego.FirewallRuleSet { 13 | ips := linodego.NetworkAddresses{IPv4: &ipList} 14 | rule := linodego.FirewallRule{ 15 | Protocol: "TCP", 16 | Ports: ports, 17 | Addresses: ips, 18 | } 19 | return linodego.FirewallRuleSet{ 20 | InboundPolicy: policy, 21 | Inbound: []linodego.FirewallRule{rule}, 22 | } 23 | } 24 | 25 | func TestRuleChanged(t *testing.T) { 26 | tests := []struct { 27 | name string 28 | oldIPs []string 29 | oldPorts string 30 | policy string 31 | newACL aclConfig 32 | svcPorts []int32 33 | wantChange bool 34 | }{ 35 | { 36 | name: "NoChange", 37 | oldIPs: []string{"1.2.3.4/32"}, 38 | oldPorts: "80,8080", 39 | policy: drop, 40 | newACL: aclConfig{AllowList: &linodego.NetworkAddresses{IPv4: &[]string{"1.2.3.4/32"}}}, 41 | svcPorts: []int32{80, 8080}, 42 | wantChange: false, 43 | }, 44 | { 45 | name: "IPChange", 46 | oldIPs: []string{"1.2.3.4/32"}, 47 | oldPorts: "80", 48 | policy: drop, 49 | newACL: aclConfig{AllowList: &linodego.NetworkAddresses{IPv4: &[]string{"5.6.7.8/32"}}}, 50 | svcPorts: []int32{80}, 51 | wantChange: true, 52 | }, 53 | { 54 | name: "PortChange", 55 | oldIPs: []string{"1.2.3.4/32"}, 56 | oldPorts: "80", 57 | policy: drop, 58 | newACL: aclConfig{AllowList: &linodego.NetworkAddresses{IPv4: &[]string{"1.2.3.4/32"}}}, 59 | svcPorts: []int32{80, 8080}, 60 | wantChange: true, 61 | }, 62 | } 63 | 64 | for _, tc := range tests { 65 | t.Run(tc.name, func(t *testing.T) { 66 | old := makeOldRuleSet(tc.oldIPs, tc.oldPorts, tc.policy) 67 | svc := &v1.Service{Spec: v1.ServiceSpec{Ports: func() []v1.ServicePort { 68 | ps := make([]v1.ServicePort, len(tc.svcPorts)) 69 | for i, p := range tc.svcPorts { 70 | ps[i] = v1.ServicePort{Port: p} 71 | } 72 | return ps 73 | }()}} 74 | got := ruleChanged(old, tc.newACL, svc) 75 | if got != tc.wantChange { 76 | t.Errorf("ruleChanged() = %v, want %v", got, tc.wantChange) 77 | } 78 | }) 79 | } 80 | } 81 | 82 | func TestParsePorts(t *testing.T) { 83 | tests := []struct { 84 | name string 85 | input string 86 | want []int32 87 | wantErr bool 88 | }{ 89 | {"ValidSingle", "80", []int32{80}, false}, 90 | {"ValidMultiple", "80,443", []int32{80, 443}, false}, 91 | {"ValidRange", "100-102", []int32{100, 101, 102}, false}, 92 | {"Combined", "80,100-102,8080", []int32{80, 100, 101, 102, 8080}, false}, 93 | {"Spaces", " 80 , 443-445 ", []int32{80, 443, 444, 445}, false}, 94 | {"InvalidRangeFormat", "1-2-3", nil, true}, 95 | {"InvalidRangeFormat2", "100-", nil, true}, 96 | {"NonNumeric", "abc", nil, true}, 97 | {"NonNumeric2", "80,a", nil, true}, 98 | {"NonNumeric3", "a-100", nil, true}, 99 | {"StartGreaterThanEnd", "200-100", nil, true}, 100 | } 101 | 102 | for _, tc := range tests { 103 | t.Run(tc.name, func(t *testing.T) { 104 | got, err := parsePorts(tc.input) 105 | if (err != nil) != tc.wantErr { 106 | t.Fatalf("parsePorts(%q) error = %v, wantErr %v", tc.input, err, tc.wantErr) 107 | } 108 | if !tc.wantErr && !reflect.DeepEqual(got, tc.want) { 109 | t.Errorf("parsePorts(%q) = %v, want %v", tc.input, got, tc.want) 110 | } 111 | }) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /cloud/linode/health_check.go: -------------------------------------------------------------------------------- 1 | package linode 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "k8s.io/apimachinery/pkg/util/wait" 8 | "k8s.io/klog/v2" 9 | 10 | "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" 11 | ) 12 | 13 | type healthChecker struct { 14 | period time.Duration 15 | linodeClient client.Client 16 | stopCh chan<- struct{} 17 | } 18 | 19 | func newHealthChecker(client client.Client, period time.Duration, stopCh chan<- struct{}) *healthChecker { 20 | return &healthChecker{ 21 | period: period, 22 | linodeClient: client, 23 | stopCh: stopCh, 24 | } 25 | } 26 | 27 | func (r *healthChecker) Run(stopCh <-chan struct{}) { 28 | ctx := wait.ContextForChannel(stopCh) 29 | wait.Until(r.worker(ctx), r.period, stopCh) 30 | } 31 | 32 | func (r *healthChecker) worker(ctx context.Context) func() { 33 | return func() { 34 | r.do(ctx) 35 | } 36 | } 37 | 38 | func (r *healthChecker) do(ctx context.Context) { 39 | if r.stopCh == nil { 40 | klog.Errorf("stop signal already fired. nothing to do") 41 | return 42 | } 43 | 44 | authenticated, err := client.CheckClientAuthenticated(ctx, r.linodeClient) 45 | if err != nil { 46 | klog.Warningf("unable to determine linode client authentication status: %s", err.Error()) 47 | return 48 | } 49 | 50 | if !authenticated { 51 | klog.Error("detected invalid linode api token: stopping controllers") 52 | 53 | close(r.stopCh) 54 | r.stopCh = nil 55 | return 56 | } 57 | 58 | klog.Info("linode api token is healthy") 59 | } 60 | -------------------------------------------------------------------------------- /cloud/linode/loadbalancers_deprecated.go: -------------------------------------------------------------------------------- 1 | package linode 2 | 3 | const ( 4 | annLinodeProxyProtocolDeprecated = "service.beta.kubernetes.io/linode-loadbalancer-proxy-protocol" 5 | ) 6 | -------------------------------------------------------------------------------- /cloud/linode/metrics.go: -------------------------------------------------------------------------------- 1 | package linode 2 | 3 | import ( 4 | "sync" 5 | 6 | "k8s.io/component-base/metrics/legacyregistry" 7 | 8 | "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" 9 | ) 10 | 11 | var registerOnce sync.Once 12 | 13 | func registerMetrics() { 14 | registerOnce.Do(func() { 15 | legacyregistry.RawMustRegister(client.ClientMethodCounterVec) 16 | }) 17 | } 18 | -------------------------------------------------------------------------------- /cloud/linode/tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | // +build tools 3 | 4 | package tools 5 | 6 | import ( 7 | _ "github.com/golang/mock/mockgen" 8 | ) 9 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "cloud/linode/client/mocks" 3 | - "cloud/linode/client/client_with_metrics.go" 4 | -------------------------------------------------------------------------------- /deploy/.gitignore: -------------------------------------------------------------------------------- 1 | ccm-linode.yaml 2 | .DS_Store 3 | -------------------------------------------------------------------------------- /deploy/chart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /deploy/chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ccm-linode 3 | description: The Linode Cloud Controller Manager (CCM) provides a way for Kubernetes clusters to access additional Linode services. Linode's CCM will automatically provision a Linode NodeBalancer for Kubernetes Services of type "LoadBalancer". 4 | type: application 5 | version: 0.0.0 6 | appVersion: "latest" 7 | maintainers: 8 | - name: linode 9 | -------------------------------------------------------------------------------- /deploy/chart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "chart.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "chart.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "chart.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "chart.labels" -}} 37 | helm.sh/chart: {{ include "chart.chart" . }} 38 | {{ include "chart.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "chart.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "chart.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "chart.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "chart.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /deploy/chart/templates/ccm-linode.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.secretRef }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ccm-linode 6 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 7 | stringData: 8 | apiToken: {{ required ".Values.apiToken required" .Values.apiToken }} 9 | region: {{ required ".Values.region required" .Values.region }} 10 | type: Opaque 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /deploy/chart/templates/clusterrole-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if ne .Values.rbacEnabled false }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: ccm-linode-clusterrole 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["endpoints"] 9 | verbs: ["get", "watch", "list", "update", "create"] 10 | - apiGroups: ["coordination.k8s.io"] 11 | resources: ["leases"] 12 | verbs: ["get", "watch", "list", "update", "create"] 13 | - apiGroups: [""] 14 | resources: ["nodes"] 15 | verbs: ["get", "watch", "list", "update", "delete", "patch"] 16 | - apiGroups: [""] 17 | resources: ["nodes/status"] 18 | verbs: ["get", "watch", "list", "update", "delete", "patch"] 19 | - apiGroups: [""] 20 | resources: ["events"] 21 | verbs: ["get", "watch", "list", "update", "create", "patch"] 22 | - apiGroups: [""] 23 | resources: ["persistentvolumes"] 24 | verbs: ["get", "watch", "list", "update"] 25 | - apiGroups: [""] 26 | resources: ["secrets"] 27 | verbs: ["get"] 28 | - apiGroups: [""] 29 | resources: ["services"] 30 | verbs: ["get", "watch", "list"] 31 | - apiGroups: [""] 32 | resources: ["services/status"] 33 | verbs: ["get", "watch", "list", "update", "patch"] 34 | {{- if .Values.sharedIPLoadBalancing }} 35 | - apiGroups: ["cilium.io"] 36 | resources: ["ciliumloadbalancerippools"] 37 | verbs: ["get", "watch", "list", "update", "create", "delete"] 38 | - apiGroups: ["cilium.io"] 39 | resources: ["ciliumbgppeeringpolicies"] 40 | verbs: ["get", "watch", "list", "create"] 41 | {{- end }} 42 | {{- end }} 43 | -------------------------------------------------------------------------------- /deploy/chart/templates/clusterrolebinding-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if ne .Values.rbacEnabled false }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: ccm-linode-clusterrolebinding 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: ccm-linode-clusterrole 10 | subjects: 11 | - kind: ServiceAccount 12 | name: ccm-linode 13 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /deploy/chart/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if ne .Values.rbacEnabled false }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: ccm-linode 6 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 7 | {{- end }} 8 | -------------------------------------------------------------------------------- /deploy/generate-manifest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o pipefail -o noclobber -o nounset 4 | 5 | die() { echo "$*" 1>&2; exit 1; } 6 | 7 | echo -e "\n********************************************************************" 8 | echo -e "WARNING: This script is deprecated and may be removed in future." 9 | echo -e "Please use helm for installs, or refer to the docs for alternatives." 10 | echo -e "********************************************************************\n" 11 | 12 | [ "$#" -eq 2 ] || die "First argument must be a Linode APIv4 Personal Access Token with all permissions. 13 | (https://cloud.linode.com/profile/tokens) 14 | 15 | Second argument must be a Linode region. 16 | (https://api.linode.com/v4/regions) 17 | 18 | Example: 19 | $ ./generate-manifest.sh \$LINODE_API_TOKEN us-east" 20 | 21 | BASE64FLAGS="" 22 | longstring="1234567890123456789012345678901234567890123456789012345678901234567890" 23 | if [ $(echo "$longstring" | base64 | wc -l) -gt 1 ]; then 24 | BASE64FLAGS="-w0" 25 | fi 26 | 27 | echo $BASE64FLAGS 28 | 29 | ENCODED_TOKEN=$(echo -n $1 | base64 $BASE64FLAGS) 30 | ENCODED_REGION=$(echo -n $2 | base64 $BASE64FLAGS) 31 | 32 | cat "$(dirname "$0")/ccm-linode-template.yaml" | 33 | sed -e "s|{{ .Values.apiTokenB64 }}|$ENCODED_TOKEN|" | 34 | sed -e "s|{{ .Values.linodeRegionB64 }}|$ENCODED_REGION|" > ccm-linode.yaml 35 | -------------------------------------------------------------------------------- /deploy/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | kubectl delete secret ccm-linode -n kube-system 6 | kubectl delete serviceaccount ccm-linode -n kube-system 7 | kubectl delete clusterrolebinding system:ccm-linode -n kube-system 8 | kubectl delete daemonset ccm-linode -n kube-system 9 | -------------------------------------------------------------------------------- /devbox.json: -------------------------------------------------------------------------------- 1 | { 2 | "packages": [ 3 | "ctlptl@latest", 4 | "clusterctl@latest", 5 | "docker@latest", 6 | "envsubst@latest", 7 | "go@1.24.1", 8 | "golangci-lint@latest", 9 | "jq@latest", 10 | "kind@latest", 11 | "kubectl@latest", 12 | "kustomize@latest", 13 | "kyverno-chainsaw@latest", 14 | "mockgen@latest", 15 | "yq-go@latest" 16 | ], 17 | "shell": { 18 | "init_hook": [ 19 | "export \"GOROOT=$(go env GOROOT)\"" 20 | ], 21 | "scripts": { 22 | "mgmt-and-capl-cluster": "make mgmt-and-capl-cluster", 23 | "e2e-test": "make e2e-test", 24 | "e2e-test-bgp": "make e2e-test-bgp", 25 | "e2e-test-subnet": "make e2e-test-subnet", 26 | "cleanup-cluster": "make cleanup-cluster" 27 | } 28 | }, 29 | "env": { 30 | "EXP_CLUSTER_RESOURCE_SET": "true" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /docs/configuration/README.md: -------------------------------------------------------------------------------- 1 | # Configuration Guide 2 | 3 | The Linode Cloud Controller Manager (CCM) offers extensive configuration options to customize its behavior. This section covers all available configuration methods and options. 4 | 5 | ## Configuration Areas 6 | 7 | 1. **[LoadBalancer Services](loadbalancer.md)** 8 | - NodeBalancer implementation 9 | - BGP-based IP sharing 10 | - Protocol configuration 11 | - Health checks 12 | - SSL/TLS setup 13 | - Connection throttling 14 | - [See examples](../examples/basic.md#loadbalancer-services) 15 | 16 | 2. **[Service Annotations](annotations.md)** 17 | - NodeBalancer configuration 18 | - Protocol settings 19 | - Health check options 20 | - Port configuration 21 | - Firewall settings 22 | - [See annotation reference](annotations.md#available-annotations) 23 | 24 | 3. **[Node Configuration](nodes.md)** 25 | - Node labels and topology 26 | - Private networking setup 27 | - VPC configuration 28 | - Node controller behavior 29 | - [See node management](nodes.md#node-controller-behavior) 30 | 31 | 4. **[Environment Variables and Flags](environment.md)** 32 | - Cache settings 33 | - API configuration 34 | - Network settings 35 | - BGP configuration 36 | - IPv6 configuration 37 | - [See configuration reference](environment.md#flags) 38 | 39 | 5. **[Firewall Setup](firewall.md)** 40 | - CCM-managed firewalls 41 | - User-managed firewalls 42 | - Allow/deny lists 43 | - [See firewall options](firewall.md#ccm-managed-firewalls) 44 | 45 | 6. **[Route Configuration](routes.md)** 46 | - VPC routing 47 | - Pod CIDR management 48 | - Route controller setup 49 | - [See route management](routes.md#route-management) 50 | 51 | 7. **[Session Affinity](session-affinity.md)** 52 | - Client IP affinity 53 | - Timeout configuration 54 | - Service configuration 55 | - [See affinity setup](session-affinity.md#configuration) 56 | 57 | For installation instructions, see the [Installation Guide](../getting-started/installation.md). 58 | For troubleshooting help, see the [Troubleshooting Guide](../getting-started/troubleshooting.md). 59 | -------------------------------------------------------------------------------- /docs/configuration/firewall.md: -------------------------------------------------------------------------------- 1 | # Firewall Setup 2 | 3 | ## Overview 4 | 5 | The CCM provides two methods for securing NodeBalancers with firewalls: 6 | 1. CCM-managed Cloud Firewalls (using `firewall-acl` annotation) 7 | 2. User-managed Cloud Firewalls (using `firewall-id` annotation) 8 | 9 | ## CCM-Managed Firewalls 10 | 11 | ### Configuration 12 | 13 | Use the `firewall-acl` annotation to specify firewall rules. The rules should be provided as a JSON object with either an `allowList` or `denyList` (but not both). 14 | 15 | #### Allow List Configuration 16 | ```yaml 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: restricted-service 21 | annotations: 22 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 23 | { 24 | "allowList": { 25 | "ipv4": ["192.168.0.0/16", "10.0.0.0/8"], 26 | "ipv6": ["2001:db8::/32"] 27 | } 28 | } 29 | ``` 30 | 31 | #### Deny List Configuration 32 | ```yaml 33 | metadata: 34 | annotations: 35 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 36 | { 37 | "denyList": { 38 | "ipv4": ["203.0.113.0/24"], 39 | "ipv6": ["2001:db8:1234::/48"] 40 | } 41 | } 42 | ``` 43 | 44 | ### Behavior 45 | - Only one type of list (allow or deny) can be used per service 46 | - Rules are automatically created and managed by the CCM 47 | - Rules are updated when the annotation changes 48 | - Firewall is deleted when the service is deleted (unless preserved) 49 | 50 | ## User-Managed Firewalls 51 | 52 | ### Configuration 53 | 54 | 1. Create a Cloud Firewall in Linode Cloud Manager 55 | 2. Attach it to the service using the `firewall-id` annotation: 56 | 57 | ```yaml 58 | metadata: 59 | annotations: 60 | service.beta.kubernetes.io/linode-loadbalancer-firewall-id: "12345" 61 | ``` 62 | 63 | ### Management 64 | - User maintains full control over firewall rules 65 | - Firewall persists after service deletion 66 | - Manual updates required for rule changes 67 | 68 | ## Best Practices 69 | 70 | 1. **Rule Management** 71 | - Use descriptive rule labels 72 | - Document rule changes 73 | - Regular security audits 74 | 75 | 2. **IP Range Planning** 76 | - Plan CIDR ranges carefully 77 | - Document allowed/denied ranges 78 | - Consider future expansion 79 | 80 | For more information: 81 | - [Service Annotations](annotations.md#firewall-configuration) 82 | - [LoadBalancer Configuration](loadbalancer.md) 83 | - [Linode Cloud Firewall Documentation](https://www.linode.com/docs/products/networking/cloud-firewall/) 84 | -------------------------------------------------------------------------------- /docs/configuration/nodeipam.md: -------------------------------------------------------------------------------- 1 | # Node IPAM using CCM 2 | 3 | ## Overview 4 | 5 | Linode CCM supports configuring and managing pod CIDRs allocated to nodes. This includes both ipv4 and ipv6 CIDRs. It is disabled by default. It can be enabled by starting CCM with `--allocate-node-cidrs` and `--cluster-cidr` flags. 6 | 7 | ```yaml 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: ccm-linode 13 | args: 14 | - --allocate-node-cidrs=true 15 | - --cluster-cidr=10.192.0.0/10 16 | ``` 17 | 18 | Once enabled, CCM will manage and allocate pod CIDRs to nodes. 19 | 20 | Note: 21 | Make sure node IPAM allocation is disabled in kube-controller-manager to avoid both controllers competing to assign CIDRs to nodes. To make sure its disabled, check and make sure kube-controller-manager is not started with `--allocate-node-cidrs` flag. 22 | 23 | ## Allocated subnet size 24 | By default, CCM allocates /24 subnet for ipv4 addresses and /64 for ipv6 addresses to nodes. If one wants different subnet range, it can be configured by using `--node-cidr-mask-size-ipv4` and `--node-cidr-mask-size-ipv6` flags. 25 | 26 | ```yaml 27 | spec: 28 | template: 29 | spec: 30 | containers: 31 | - name: ccm-linode 32 | args: 33 | - --allocate-node-cidrs=true 34 | - --cluster-cidr=10.192.0.0/10,fd00::/56 35 | - --node-cidr-mask-size-ipv4=25 36 | - --node-cidr-mask-size-ipv6=64 37 | ``` 38 | -------------------------------------------------------------------------------- /docs/configuration/nodes.md: -------------------------------------------------------------------------------- 1 | # Node Configuration 2 | 3 | ## Overview 4 | 5 | The Node Controller in CCM manages node-specific configurations and lifecycle operations for Kubernetes nodes running on Linode instances. 6 | 7 | ## Node Labels 8 | 9 | The CCM automatically adds the following labels to nodes: 10 | 11 | ### Topology Labels 12 | Current: 13 | - `topology.kubernetes.io/region`: Linode region (e.g., "us-east") 14 | - `topology.kubernetes.io/zone`: Linode availability zone 15 | 16 | Legacy (deprecated): 17 | - `failure-domain.beta.kubernetes.io/region`: Linode region 18 | - `failure-domain.beta.kubernetes.io/zone`: Linode availability zone 19 | 20 | ### Provider Labels 21 | - `node.kubernetes.io/instance-type`: Linode instance type (e.g., "g6-standard-4") 22 | 23 | ## Node Annotations 24 | 25 | All node annotations must be prefixed with: `node.k8s.linode.com/` 26 | 27 | ### Available Annotations 28 | 29 | | Annotation | Type | Default | Description | 30 | |------------|------|---------|-------------| 31 | | `private-ip` | IPv4 | none | Overrides default detection of Node InternalIP | 32 | 33 | ### Use Cases 34 | 35 | #### Private Network Configuration 36 | ```yaml 37 | apiVersion: v1 38 | kind: Node 39 | metadata: 40 | name: my-node 41 | annotations: 42 | node.k8s.linode.com/private-ip: "192.168.1.100" 43 | ``` 44 | 45 | #### VPC Configuration 46 | When using CCM with [Linode VPC](https://www.linode.com/docs/products/networking/vpc/), internal ip will be set to VPC ip. To use a different ip-address as internal ip, you may need to manually configure the node's InternalIP: 47 | ```yaml 48 | apiVersion: v1 49 | kind: Node 50 | metadata: 51 | name: vpc-node 52 | annotations: 53 | node.k8s.linode.com/private-ip: "10.0.0.5" 54 | ``` 55 | 56 | ## Node Networking 57 | 58 | ### Private Network Requirements 59 | - NodeBalancers require nodes to have linode specific [private IP addresses](https://techdocs.akamai.com/cloud-computing/docs/managing-ip-addresses-on-a-compute-instance#types-of-ip-addresses) 60 | - Private IPs must be configured in the Linode Cloud Manager or via the API 61 | - The CCM will use private IPs for inter-node communication 62 | 63 | ### VPC Configuration 64 | When using VPC: 65 | 1. Configure network interfaces in Linode Cloud Manager 66 | 2. Add appropriate node annotations for private IPs 67 | 3. Ensure proper routing configuration 68 | 4. Configure security groups if needed 69 | 70 | For VPC routing setup, see [Route Configuration](routes.md). 71 | 72 | ## Node Controller Behavior 73 | 74 | ### Node Initialization 75 | - Configures node with Linode-specific information 76 | - Sets node addresses (public/private IPs) 77 | - Applies region/zone labels 78 | - Configures node hostnames 79 | 80 | ### Node Lifecycle Management 81 | - Monitors node health 82 | - Updates node status 83 | - Handles node termination 84 | - Manages node cleanup 85 | 86 | ### Node Updates 87 | - Updates node labels when region/zone changes 88 | - Updates node addresses when IP configuration changes 89 | - Maintains node conditions based on Linode instance status 90 | 91 | For more information: 92 | - [Linode Instance Types](https://www.linode.com/docs/products/compute/compute-instances/plans/) 93 | - [Private Networking](https://www.linode.com/docs/products/networking/private-networking/) 94 | - [VPC Documentation](https://www.linode.com/docs/products/networking/vpc/) 95 | - [Route Configuration](routes.md) 96 | - [Environment Variables](environment.md) 97 | -------------------------------------------------------------------------------- /docs/configuration/routes.md: -------------------------------------------------------------------------------- 1 | # Route Configuration 2 | 3 | ## Overview 4 | 5 | The Route Controller manages network routes for pod communication in VPC environments. It ensures proper connectivity between nodes and pods across the cluster by configuring routes in Linode VPC. 6 | 7 | ## Prerequisites 8 | 9 | - Kubernetes cluster running in Linode VPC 10 | - CCM with route controller enabled 11 | - Proper API permissions 12 | 13 | ## Configuration 14 | 15 | ### Enable Route Controller 16 | 17 | 1. Via Helm chart in `values.yaml`: 18 | ```yaml 19 | routeController: 20 | vpcNames: "vpc-prod,vpc-staging" # Comma separated names of VPCs managed by CCM 21 | clusterCIDR: "10.0.0.0/8" # Pod CIDR range 22 | configureCloudRoutes: true # Enable route controller 23 | ``` 24 | 25 | 2. Via command line flags in CCM deployment: 26 | ```yaml 27 | spec: 28 | template: 29 | spec: 30 | containers: 31 | - name: ccm-linode 32 | args: 33 | - --configure-cloud-routes=true 34 | - --vpc-names=vpc-prod,vpc-staging 35 | - --cluster-cidr=10.0.0.0/8 36 | ``` 37 | 38 | ### Environment Variables 39 | 40 | | Variable | Default | Description | 41 | |----------|---------|-------------| 42 | | `LINODE_ROUTES_CACHE_TTL_SECONDS` | `60` | Default timeout of route cache in seconds | 43 | 44 | ## Route Management 45 | 46 | ### Automatic Operations 47 | 48 | The Route Controller: 49 | - Creates routes for pod CIDR ranges assigned to nodes 50 | - Updates routes when nodes are added/removed 51 | - Manages route tables in specified VPCs 52 | - Handles route cleanup during node removal 53 | - Maintains route cache for performance 54 | 55 | ### Route Types 56 | 57 | 1. **Pod CIDR Routes** 58 | - Created for each node's pod CIDR allocation 59 | - Target is node's private IP address 60 | - Automatically managed based on node lifecycle 61 | 62 | 2. **VPC Routes** 63 | - Managed within specified VPCs 64 | - Enables cross-node pod communication 65 | - Automatically updated with topology changes 66 | 67 | ## Best Practices 68 | 69 | ### CIDR Planning 70 | - Ensure pod CIDR range doesn't overlap with node's VPC ip-address 71 | - Plan for future cluster growth 72 | - Document CIDR allocations 73 | 74 | ### VPC Configuration 75 | - Use clear, descriptive VPC names 76 | - Configure proper VPC security settings 77 | - Ensure proper API permissions 78 | 79 | ## Troubleshooting 80 | 81 | ### Common Issues 82 | 83 | 1. **Route Creation Failures** 84 | - Verify API permissions 85 | - Check for CIDR conflicts 86 | - Validate VPC configuration 87 | - Ensure node private IPs are configured 88 | 89 | 2. **Pod Communication Issues** 90 | - Verify route table entries 91 | - Check VPC network ACLs 92 | - Validate node networking 93 | - Confirm pod CIDR assignments 94 | 95 | ## Related Documentation 96 | 97 | - [VPC Configuration](https://www.linode.com/docs/products/networking/vpc/) 98 | - [Node Configuration](nodes.md) 99 | - [Environment Variables](environment.md) 100 | - [Kubernetes Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 101 | -------------------------------------------------------------------------------- /docs/configuration/session-affinity.md: -------------------------------------------------------------------------------- 1 | # Session Affinity 2 | 3 | ## Overview 4 | 5 | Session affinity (also known as sticky sessions) ensures that requests from the same client are consistently routed to the same backend pod. In Kubernetes, sessionAffinity refers to a mechanism that allows a client to always be redirected to the same pod when the client hits a service. 6 | 7 | ## Configuration 8 | 9 | ### Basic Setup 10 | 11 | Enable session affinity by setting `service.spec.sessionAffinity` to `ClientIP`: 12 | ```yaml 13 | apiVersion: v1 14 | kind: Service 15 | metadata: 16 | name: wordpress-lsmnl-wordpress 17 | namespace: wordpress-lsmnl 18 | labels: 19 | app: wordpress-lsmnl-wordpress 20 | spec: 21 | type: LoadBalancer 22 | selector: 23 | app: wordpress-lsmnl-wordpress 24 | sessionAffinity: ClientIP 25 | ``` 26 | 27 | ### Setting Timeout 28 | 29 | Configure the maximum session sticky time using `sessionAffinityConfig`: 30 | ```yaml 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: my-service 35 | spec: 36 | type: LoadBalancer 37 | sessionAffinity: ClientIP 38 | sessionAffinityConfig: 39 | clientIP: 40 | timeoutSeconds: 10800 # 3 hours 41 | ``` 42 | 43 | ## Configuration Options 44 | 45 | ### Session Affinity Types 46 | - `None`: No session affinity (default) 47 | - `ClientIP`: Route based on client's IP address. All requests from the same client IP will be directed to the same pod. 48 | 49 | ### Timeout Configuration 50 | - `timeoutSeconds`: Duration to maintain affinity 51 | - Default: 10800 seconds (3 hours) 52 | - Valid range: 1 to 86400 seconds (24 hours) 53 | - After the timeout period, client requests may be routed to a different pod 54 | 55 | ## Related Documentation 56 | 57 | - [Service Configuration](annotations.md) 58 | - [LoadBalancer Configuration](loadbalancer.md) 59 | - [Kubernetes Services Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#session-affinity) 60 | - [Service Selectors](https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service) 61 | -------------------------------------------------------------------------------- /docs/examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | This section provides working examples of common CCM configurations. Each example includes a complete service and deployment configuration. 4 | 5 | ## Available Examples 6 | 7 | 1. **[Basic Services](basic.md)** 8 | - HTTP LoadBalancer 9 | - HTTPS LoadBalancer with TLS termination 10 | - UDP LoadBalancer 11 | 12 | 2. **[Advanced Configuration](advanced.md)** 13 | - Custom Health Checks 14 | - Firewalled Services 15 | - Session Affinity 16 | - Shared IP Load-Balancing 17 | - Custom Node Selection 18 | 19 | Note: To test UDP based NBs, one can use [test-server](https://github.com/rahulait/test-server) repo to run server using UDP protocol and then use the client commands in repo's readme to connect to the server. 20 | 21 | For testing these examples, see the [test script](https://github.com/linode/linode-cloud-controller-manager/blob/master/examples/test.sh). 22 | 23 | For more configuration options, see: 24 | - [Service Annotations](../configuration/annotations.md) 25 | - [LoadBalancer Configuration](../configuration/loadbalancer.md) 26 | - [Firewall Configuration](../configuration/firewall.md) 27 | -------------------------------------------------------------------------------- /docs/examples/advanced.md: -------------------------------------------------------------------------------- 1 | # Advanced Configuration 2 | 3 | ## Custom Health Checks 4 | 5 | Service with custom health check configuration: 6 | 7 | ```yaml 8 | apiVersion: v1 9 | kind: Service 10 | metadata: 11 | name: web-healthcheck 12 | annotations: 13 | service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" 14 | service.beta.kubernetes.io/linode-loadbalancer-check-path: "/healthz" 15 | service.beta.kubernetes.io/linode-loadbalancer-check-interval: "5" 16 | service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "3" 17 | service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "2" 18 | service.beta.kubernetes.io/linode-loadbalancer-check-passive: "true" 19 | spec: 20 | type: LoadBalancer 21 | ports: 22 | - port: 80 23 | selector: 24 | app: web 25 | ``` 26 | 27 | ## Firewalled Services 28 | 29 | Service with firewall rules: 30 | 31 | ```yaml 32 | kind: Service 33 | apiVersion: v1 34 | metadata: 35 | name: restricted-access 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 38 | { 39 | "allowList": { 40 | "ipv4": ["192.166.0.0/16", "172.23.41.0/24"], 41 | "ipv6": ["2001:DB8::/128"] 42 | } 43 | } 44 | spec: 45 | type: LoadBalancer 46 | selector: 47 | app: restricted-app 48 | ports: 49 | - name: http 50 | port: 80 51 | targetPort: 8080 52 | ``` 53 | 54 | ## Session Affinity 55 | 56 | Service with sticky sessions: 57 | 58 | ```yaml 59 | apiVersion: v1 60 | kind: Service 61 | metadata: 62 | name: sticky-service 63 | spec: 64 | type: LoadBalancer 65 | sessionAffinity: ClientIP 66 | sessionAffinityConfig: 67 | clientIP: 68 | timeoutSeconds: 100 69 | selector: 70 | app: sticky-app 71 | ports: 72 | - port: 80 73 | targetPort: 8080 74 | ``` 75 | 76 | ## Shared IP Load-Balancing 77 | 78 | ```yaml 79 | apiVersion: v1 80 | kind: Service 81 | metadata: 82 | name: shared-ip-service 83 | spec: 84 | type: LoadBalancer 85 | selector: 86 | app: web 87 | ports: 88 | - port: 80 89 | targetPort: 8080 90 | --- 91 | # Required DaemonSet configuration for shared IP 92 | apiVersion: apps/v1 93 | kind: DaemonSet 94 | metadata: 95 | name: ccm-linode 96 | namespace: kube-system 97 | spec: 98 | template: 99 | spec: 100 | containers: 101 | - image: linode/linode-cloud-controller-manager:latest 102 | name: ccm-linode 103 | env: 104 | - name: LINODE_URL 105 | value: https://api.linode.com/v4 106 | args: 107 | - --bgp-node-selector=cilium-bgp-peering=true 108 | - --load-balancer-type=cilium-bgp 109 | - --ip-holder-suffix=myclustername1 110 | ``` 111 | 112 | ## Custom Node Selection 113 | 114 | ```yaml 115 | apiVersion: v1 116 | kind: Service 117 | metadata: 118 | name: custom-nodes 119 | spec: 120 | type: LoadBalancer 121 | selector: 122 | app: custom-app 123 | ports: 124 | - port: 80 125 | # Only use nodes with specific labels 126 | externalTrafficPolicy: Local 127 | --- 128 | # Example node with custom annotation 129 | apiVersion: v1 130 | kind: Node 131 | metadata: 132 | name: custom-node 133 | annotations: 134 | node.k8s.linode.com/private-ip: "192.168.1.100" 135 | ``` 136 | 137 | For more examples, see: 138 | - [Service Annotations](../configuration/annotations.md) 139 | - [Firewall Configuration](../configuration/firewall.md) 140 | - [LoadBalancer Configuration](../configuration/loadbalancer.md) 141 | -------------------------------------------------------------------------------- /docs/examples/basic.md: -------------------------------------------------------------------------------- 1 | # Basic Services 2 | 3 | ## HTTP LoadBalancer 4 | 5 | Basic HTTP LoadBalancer service with nginx: 6 | 7 | ```yaml 8 | kind: Service 9 | apiVersion: v1 10 | metadata: 11 | name: http-lb 12 | annotations: 13 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" 14 | spec: 15 | type: LoadBalancer 16 | selector: 17 | app: nginx-http-example 18 | ports: 19 | - name: http 20 | protocol: TCP 21 | port: 80 22 | targetPort: 80 23 | 24 | --- 25 | apiVersion: apps/v1 26 | kind: Deployment 27 | metadata: 28 | name: nginx-http-deployment 29 | spec: 30 | replicas: 2 31 | selector: 32 | matchLabels: 33 | app: nginx-http-example 34 | template: 35 | metadata: 36 | labels: 37 | app: nginx-http-example 38 | spec: 39 | containers: 40 | - name: nginx 41 | image: nginx 42 | ports: 43 | - containerPort: 80 44 | protocol: TCP 45 | ``` 46 | 47 | ## HTTPS LoadBalancer 48 | 49 | HTTPS LoadBalancer with TLS termination: 50 | 51 | ```yaml 52 | kind: Service 53 | apiVersion: v1 54 | metadata: 55 | name: https-lb 56 | annotations: 57 | service.beta.kubernetes.io/linode-loadbalancer-throttle: "4" 58 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" 59 | service.beta.kubernetes.io/linode-loadbalancer-port-443: | 60 | { 61 | "tls-secret-name": "example-secret", 62 | "protocol": "https" 63 | } 64 | spec: 65 | type: LoadBalancer 66 | selector: 67 | app: nginx-https-example 68 | ports: 69 | - name: http 70 | protocol: TCP 71 | port: 80 72 | targetPort: http 73 | - name: https 74 | protocol: TCP 75 | port: 443 76 | targetPort: https 77 | 78 | --- 79 | apiVersion: apps/v1 80 | kind: Deployment 81 | metadata: 82 | name: nginx-https-deployment 83 | spec: 84 | replicas: 2 85 | selector: 86 | matchLabels: 87 | app: nginx-https-example 88 | template: 89 | metadata: 90 | labels: 91 | app: nginx-https-example 92 | spec: 93 | containers: 94 | - name: nginx 95 | image: nginx 96 | ports: 97 | - name: http 98 | containerPort: 80 99 | protocol: TCP 100 | - name: https 101 | containerPort: 443 102 | protocol: TCP 103 | ``` 104 | 105 | ## UDP LoadBalancer 106 | 107 | Basic UDP LoadBalancer service: 108 | 109 | ```yaml 110 | kind: Service 111 | apiVersion: v1 112 | metadata: 113 | name: udp-lb 114 | spec: 115 | type: LoadBalancer 116 | selector: 117 | app: udp-example 118 | ports: 119 | - name: udp 120 | protocol: UDP 121 | port: 7070 122 | targetPort: 7070 123 | --- 124 | apiVersion: apps/v1 125 | kind: Deployment 126 | metadata: 127 | name: udp-deployment 128 | spec: 129 | replicas: 2 130 | selector: 131 | matchLabels: 132 | app: udp-example 133 | template: 134 | metadata: 135 | labels: 136 | app: udp-example 137 | spec: 138 | containers: 139 | - name: test-server 140 | image: rahulait/test-server:0.1 141 | ports: 142 | - containerPort: 7070 143 | protocol: UDP 144 | ``` 145 | 146 | For more configuration options, see: 147 | - [Service Annotations](../configuration/annotations.md) 148 | - [LoadBalancer Configuration](../configuration/loadbalancer.md) 149 | -------------------------------------------------------------------------------- /docs/getting-started/README.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | 3 | This section will guide you through: 4 | - Understanding the CCM's requirements 5 | - Installing the CCM using either Helm or manual installation 6 | - Verifying your installation 7 | - Troubleshooting common issues 8 | 9 | Choose the installation method that best suits your needs: 10 | - **Helm Installation**: Recommended for most users, provides easier upgrades and configuration 11 | - **Manual Installation**: Offers more control over the deployment process 12 | 13 | Before proceeding with installation, make sure to review the requirements section to ensure your environment is properly configured. 14 | -------------------------------------------------------------------------------- /docs/getting-started/helm-installation.md: -------------------------------------------------------------------------------- 1 | # Helm Installation 2 | 3 | ## Prerequisites 4 | - Helm 3.x installed 5 | - kubectl configured to access your cluster 6 | - Linode API token 7 | - Target region identified 8 | 9 | ## Installation Steps 10 | 11 | 1. Add the CCM Helm repository: 12 | ```bash 13 | helm repo add ccm-linode https://linode.github.io/linode-cloud-controller-manager/ 14 | helm repo update ccm-linode 15 | ``` 16 | 17 | 2. Create a values file (values.yaml): 18 | ```yaml 19 | apiToken: "your-api-token" 20 | region: "us-east" 21 | 22 | # Optional: Configure route controller 23 | routeController: 24 | vpcNames: "" # Comma separated VPC names 25 | clusterCIDR: "10.0.0.0/8" 26 | configureCloudRoutes: true 27 | 28 | # Optional: Assign node internal IPs from VPCs without enabling route controller 29 | # Not required if specified in routeController 30 | vpcNames: "" # Comma separated VPC names 31 | 32 | # Optional: Configure shared IP load balancing instead of NodeBalancers (requires Cilium CNI and BGP Control Plane enabled) 33 | sharedIPLoadBalancing: 34 | loadBalancerType: cilium-bgp 35 | bgpNodeSelector: cilium-bgp-peering=true 36 | ipHolderSuffix: "" 37 | 38 | # Optional: Allow /metrics scraping without authorization on secure HTTPS port (10253 by default) 39 | allowUnauthorizedMetrics=true 40 | ``` 41 | 42 | 3. Install the CCM: 43 | ```bash 44 | helm install ccm-linode \ 45 | --namespace kube-system \ 46 | -f values.yaml \ 47 | ccm-linode/ccm-linode 48 | ``` 49 | 50 | ## Upgrading 51 | 52 | To upgrade an existing installation: 53 | ```bash 54 | helm upgrade ccm-linode \ 55 | --namespace kube-system \ 56 | -f values.yaml \ 57 | ccm-linode/ccm-linode 58 | ``` 59 | 60 | ## Uninstalling 61 | 62 | To remove the CCM: 63 | ```bash 64 | helm uninstall ccm-linode -n kube-system 65 | ``` 66 | -------------------------------------------------------------------------------- /docs/getting-started/installation.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | The CCM can be installed using either Helm (recommended) or by manually applying manifests. Choose the method that best suits your needs: 4 | 5 | ## Installation Methods 6 | 7 | ### [Helm Installation](helm-installation.md) 8 | - Easier to manage and upgrade 9 | - Configurable through values.yaml 10 | - Supports templating for different environments 11 | 12 | ### [Manual Installation](manual-installation.md) 13 | - More control over the deployment 14 | - Better for customized setups 15 | - Useful for understanding the components 16 | 17 | ## Post-Installation 18 | After installing the CCM, proceed to the [Verification](verification.md) section to ensure everything is working correctly. 19 | 20 | If you encounter any issues, check the [Troubleshooting](troubleshooting.md) guide. 21 | -------------------------------------------------------------------------------- /docs/getting-started/manual-installation.md: -------------------------------------------------------------------------------- 1 | # Manual Installation 2 | 3 | ## Prerequisites 4 | - kubectl configured to access your cluster 5 | - Linode API token 6 | - Target region identified 7 | 8 | ## Installation Steps 9 | 10 | 1. Generate the manifest: 11 | ```bash 12 | ./deploy/generate-manifest.sh $LINODE_API_TOKEN $REGION 13 | ``` 14 | 15 | 2. Review the generated manifest: 16 | The script creates `ccm-linode.yaml` containing: 17 | - ServiceAccount 18 | - ClusterRole and ClusterRoleBinding 19 | - Secret with API token 20 | - DaemonSet for the CCM 21 | 22 | 3. Apply the manifest: 23 | ```bash 24 | kubectl apply -f ccm-linode.yaml 25 | ``` 26 | 27 | ## Customization 28 | 29 | ### Environment Variables 30 | You can modify the DaemonSet to include custom environment variables: 31 | ```yaml 32 | env: 33 | - name: LINODE_INSTANCE_CACHE_TTL 34 | value: "15" 35 | - name: LINODE_ROUTES_CACHE_TTL_SECONDS 36 | value: "60" 37 | ``` 38 | 39 | ### Resource Limits 40 | Adjust compute resources as needed: 41 | ```yaml 42 | resources: 43 | requests: 44 | cpu: 100m 45 | memory: 128Mi 46 | limits: 47 | cpu: 200m 48 | memory: 256Mi 49 | ``` 50 | 51 | ### Prometheus metrics 52 | 53 | Cloud Controller Manager exposes metrics by default on port given by 54 | `--secure-port` flag. The endpoint is protected from unauthenticated access by 55 | default. To allow unauthenticated clients (`system:anonymous`) access 56 | Prometheus metrics, use `--authorization-always-allow-paths="/metrics"` 57 | command-line flag. 58 | 59 | Linode API calls can be monitored using `ccm_linode_client_requests_total` metric. 60 | 61 | ## Uninstalling 62 | 63 | To remove the CCM: 64 | ```bash 65 | kubectl delete -f ccm-linode.yaml 66 | ``` 67 | -------------------------------------------------------------------------------- /docs/getting-started/overview.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | The Linode Cloud Controller Manager provides several key features that enable a fully supported Kubernetes experience on Linode infrastructure. 4 | 5 | ## Features 6 | 7 | ### LoadBalancer Services 8 | - Automatic deployment and configuration of Linode NodeBalancers 9 | - Support for HTTP, HTTPS, and TCP traffic 10 | - SSL/TLS termination 11 | - Custom health checks and session affinity 12 | 13 | ### Node Management 14 | - Automatic configuration of node hostnames and network addresses 15 | - Proper node state management for Linode shutdowns 16 | - Region-based node annotation for failure domain scheduling 17 | 18 | ### Network Integration 19 | - Support for private networking 20 | - VPC and VLAN compatibility 21 | - BGP-based IP sharing capabilities 22 | 23 | ### Security 24 | - Integrated firewall management 25 | - Support for TLS termination 26 | - Custom security rules and ACLs 27 | 28 | ## When to Use CCM 29 | 30 | The Linode CCM is essential when: 31 | - Running Kubernetes clusters on Linode infrastructure 32 | - Requiring automated load balancer provisioning 33 | - Needing integrated cloud provider features 34 | - Managing multi-node clusters with complex networking requirements -------------------------------------------------------------------------------- /docs/getting-started/requirements.md: -------------------------------------------------------------------------------- 1 | # Requirements 2 | 3 | Before installing the Linode Cloud Controller Manager, ensure your environment meets the following requirements. 4 | 5 | ## Kubernetes Cluster Requirements 6 | 7 | ### Version Compatibility 8 | - Kubernetes version 1.22 or higher 9 | - Kubernetes cluster running on Linode infrastructure 10 | 11 | ### Kubernetes Components Configuration 12 | The following Kubernetes components must be started with the `--cloud-provider=external` flag: 13 | - Kubelet 14 | - Kube Controller Manager 15 | - Kube API Server 16 | 17 | ## Linode Requirements 18 | 19 | ### API Token 20 | You need a Linode APIv4 Personal Access Token with the following scopes: 21 | - Linodes - Read/Write 22 | - NodeBalancers - Read/Write 23 | - IPs - Read/Write 24 | - Volumes - Read/Write 25 | - Firewalls - Read/Write (if using firewall features) 26 | - VPCs - Read/Write (if using VPC features) 27 | - VLANs - Read/Write (if using VLAN features) 28 | 29 | To create a token: 30 | 1. Log into the [Linode Cloud Manager](https://cloud.linode.com) 31 | 2. Go to your profile 32 | 3. Select the "API Tokens" tab 33 | 4. Click "Create a Personal Access Token" 34 | 5. Select the required scopes 35 | 6. Set an expiry (optional) 36 | 37 | ### Region Support 38 | Your cluster must be in a [supported Linode region](https://api.linode.com/v4/regions). 39 | 40 | ## Network Requirements 41 | 42 | ### Private Networking 43 | - If using NodeBalancers, nodes must have private IP addresses 44 | - VPC or VLAN configurations require additional network configuration 45 | 46 | ### Firewall Considerations 47 | - Ensure required ports are open for Kubernetes components 48 | - If using Cloud Firewalls, ensure the API token has firewall management permissions 49 | 50 | ## Resource Quotas 51 | Ensure your Linode account has sufficient quota for: 52 | - NodeBalancers (if using LoadBalancer services) 53 | - Additional IP addresses (if using shared IP features) 54 | - Cloud Firewalls (if using firewall features) 55 | -------------------------------------------------------------------------------- /docs/getting-started/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Troubleshooting 2 | 3 | ## Common Issues and Solutions 4 | 5 | ### CCM Pod Issues 6 | 7 | #### Pod Won't Start 8 | ```bash 9 | kubectl get pods -n kube-system -l app=ccm-linode 10 | kubectl describe pod -n kube-system -l app=ccm-linode 11 | ``` 12 | 13 | Common causes: 14 | - Invalid API token 15 | - Missing RBAC permissions 16 | - Resource constraints 17 | 18 | #### Pod Crashes 19 | Check the logs: 20 | ```bash 21 | kubectl logs -n kube-system -l app=ccm-linode 22 | ``` 23 | 24 | Common causes: 25 | - API rate limiting 26 | - Network connectivity issues 27 | - Configuration errors 28 | 29 | ### LoadBalancer Service Issues 30 | 31 | #### Service Stuck in Pending 32 | ```bash 33 | kubectl describe service 34 | ``` 35 | 36 | Check for: 37 | - API token permissions 38 | - NodeBalancer quota limits 39 | - Network configuration 40 | 41 | #### Health Checks Failing 42 | Verify: 43 | - Backend pod health 44 | - Service port configuration 45 | - Health check path configuration 46 | 47 | ### Node Issues 48 | 49 | #### Missing Node Labels 50 | ```bash 51 | kubectl get nodes --show-labels 52 | ``` 53 | 54 | Verify: 55 | - CCM node controller logs 56 | - Node annotations 57 | - API permissions 58 | 59 | #### Network Problems 60 | Check: 61 | - Private IP configuration 62 | - VPC/VLAN setup 63 | - Firewall rules 64 | 65 | ## Gathering Information 66 | 67 | ### Useful Commands 68 | ```bash 69 | # Get CCM version 70 | kubectl get pods -n kube-system -l app=ccm-linode -o jsonpath='{.items[0].spec.containers[0].image}' 71 | 72 | # Check events 73 | kubectl get events -n kube-system 74 | 75 | # Get CCM logs with timestamps 76 | kubectl logs -n kube-system -l app=ccm-linode --timestamps 77 | ``` 78 | 79 | ### Debug Mode 80 | Set the following environment variable in the CCM deployment: 81 | ```yaml 82 | env: 83 | - name: LINODE_DEBUG 84 | value: "1" 85 | ``` 86 | 87 | ## Getting Help 88 | 89 | If issues persist: 90 | 1. Join #linode on [Kubernetes Slack](https://kubernetes.slack.com) 91 | 2. Check [GitHub Issues](https://github.com/linode/linode-cloud-controller-manager/issues) 92 | 3. Submit a new issue with: 93 | - CCM version 94 | - Kubernetes version 95 | - Relevant logs 96 | - Steps to reproduce 97 | -------------------------------------------------------------------------------- /docs/getting-started/verification.md: -------------------------------------------------------------------------------- 1 | # Verification 2 | 3 | After installing the CCM, follow these steps to verify it's working correctly. 4 | 5 | ## Check CCM Pod Status 6 | 7 | 1. Verify the CCM pods are running: 8 | ```bash 9 | kubectl get pods -n kube-system -l app=ccm-linode 10 | ``` 11 | 12 | Expected output: 13 | ``` 14 | NAME READY STATUS RESTARTS AGE 15 | ccm-linode-xxxxx 1/1 Running 0 2m 16 | ``` 17 | 18 | 2. Check CCM logs: 19 | ```bash 20 | kubectl logs -n kube-system -l app=ccm-linode 21 | ``` 22 | 23 | Look for successful initialization messages and no errors. 24 | 25 | ## Verify Node Configuration 26 | 27 | 1. Check node annotations: 28 | ```bash 29 | kubectl get nodes -o yaml 30 | ``` 31 | 32 | Look for: 33 | - Proper region labels 34 | - Node addresses 35 | - Provider ID 36 | 37 | ## Test LoadBalancer Service 38 | 39 | 1. Create a test service: 40 | ```yaml 41 | apiVersion: v1 42 | kind: Service 43 | metadata: 44 | name: test-lb 45 | spec: 46 | type: LoadBalancer 47 | ports: 48 | - port: 80 49 | selector: 50 | app: test 51 | ``` 52 | 53 | 2. Verify NodeBalancer creation: 54 | ```bash 55 | kubectl get svc test-lb 56 | ``` 57 | 58 | The service should receive an external IP address. 59 | 60 | ## Common Issues 61 | - Pods in CrashLoopBackOff: Check logs for API token or permissions issues 62 | - Service stuck in 'Pending': Verify API token has NodeBalancer permissions 63 | - Missing node annotations: Check CCM logs for node controller issues 64 | -------------------------------------------------------------------------------- /e2e/bgp-test/lb-cilium-bgp/create-pod-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test-pod-1 5 | labels: 6 | app: test-bgp 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | ports: 12 | - containerPort: 80 13 | --- 14 | apiVersion: v1 15 | kind: Service 16 | metadata: 17 | name: test-bgp-svc 18 | spec: 19 | type: LoadBalancer 20 | ports: 21 | - port: 80 22 | targetPort: 80 23 | selector: 24 | app: test-bgp 25 | -------------------------------------------------------------------------------- /e2e/setup/cilium-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # Add bgp peering label to non control plane nodes. Needed to update the shared IP on the nodes 5 | kubectl get nodes --no-headers | grep -v control-plane |\ 6 | awk '{print $1}' | xargs -I {} kubectl label nodes {} cilium-bgp-peering=true --overwrite 7 | 8 | # Add RBAC permissions 9 | kubectl patch clusterrole ccm-linode-clusterrole --type='json' -p='[{ 10 | "op": "add", 11 | "path": "/rules/-", 12 | "value": { 13 | "apiGroups": ["cilium.io"], 14 | "resources": ["ciliumloadbalancerippools", "ciliumbgppeeringpolicies"], 15 | "verbs": ["get", "list", "watch", "create", "update", "patch", "delete"] 16 | } 17 | }]' 18 | 19 | # Patch DaemonSet 20 | kubectl patch daemonset ccm-linode -n kube-system --type='json' -p='[{ 21 | "op": "add", 22 | "path": "/spec/template/spec/containers/0/args/-", 23 | "value": "--bgp-node-selector=cilium-bgp-peering=true" 24 | }, { 25 | "op": "add", 26 | "path": "/spec/template/spec/containers/0/args/-", 27 | "value": "--load-balancer-type=cilium-bgp" 28 | }, { 29 | "op": "add", 30 | "path": "/spec/template/spec/containers/0/args/-", 31 | "value": "--ip-holder-suffix='"${CLUSTER_SUFFIX}"'" 32 | }]' 33 | -------------------------------------------------------------------------------- /e2e/setup/ctlptl-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: ctlptl.dev/v1alpha1 3 | kind: Cluster 4 | product: kind 5 | kindV1Alpha4Cluster: 6 | name: caplccm 7 | nodes: 8 | - role: control-plane 9 | image: kindest/node:v1.31.2 10 | -------------------------------------------------------------------------------- /e2e/test/assert-ccm-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: ccm-linode 5 | namespace: kube-system 6 | status: 7 | numberAvailable: 1 8 | numberReady: 1 9 | -------------------------------------------------------------------------------- /e2e/test/ccm-resources/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: check-ccm-resources 6 | labels: 7 | all: 8 | spec: 9 | steps: 10 | - name: Check if CCM is deployed 11 | try: 12 | - assert: 13 | file: ../assert-ccm-resources.yaml 14 | -------------------------------------------------------------------------------- /e2e/test/certificates/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFejCCA2KgAwIBAgIJAN7D2Ju254yUMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV 3 | BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX 4 | aWRnaXRzIFB0eSBMdGQxCzAJBgNVBAMMAmNhMB4XDTE5MDQwOTA5MzYxNFoXDTI5 5 | MDQwNjA5MzYxNFowUjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx 6 | ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2Ew 7 | ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDoTwE1kijjrhCcGXSPyHlf 8 | 7NngxPCFuFqVdRvG4DrrdL7YW3iEovAXTbuoyiPpF/U9T5BfDVs2dCEHGlpiOADR 9 | tA/Z5mFbVcefOCBL+rL2sTN2o19U7eimcZjH1xN1L5j2RkYmRAoI+nwG/g5NehOu 10 | YM930oPqe3vOYevOHBCebHuKc7zaM31AtKcDG0IjIJ1ZdJy91+rx8Prb+IxTIKZl 11 | Ca/e0e6iZWCPp5kaJyNUGZkjjcRVzFM79xVf34DEuS+N1RZP7EevM0bfHehJfSpU 12 | M6gfsrL9WctD0nGJd2YsH9hLCub2G7emgiV7dvN1R0QW9ijguwZ9aBemiat5AnGs 13 | QHSR+WRijZNjHTWY4DEaTNWecDd2Tz37RNN9Ow8FThERwZVnpji1kcijEg4g7Ppy 14 | 9P6tdavjkFVW0xOieInjS/m5Bxj2a44UT1JshNr1M4HGXvqUcCFS4vhytIc05lOv 15 | X20NR+C+RgNy7G14Hz/3+qRo9hlkonyTJAoU++2vgsaNmmhcU6fGgYpARHm1Y675 16 | pGrgZAcjFcsG84q0dSdr6AeY+6+1UyS6pktBobXIiciSPmseHJ24dRd06OYQMxQ3 17 | ccOZhZ3cNy8OMT9eUwcjnif36BVmZdCObJexqXq/cSVX3IhhaQhLLfN9ZyGDkxWl 18 | N5ehRMCabgv3mQCDd/9HMwIDAQABo1MwUTAdBgNVHQ4EFgQUC2AMOf90/zpuQ588 19 | rPLfe7EukIUwHwYDVR0jBBgwFoAUC2AMOf90/zpuQ588rPLfe7EukIUwDwYDVR0T 20 | AQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAHopjHkeIciVtlAyAPEfh/pnf 21 | r91H1aQMPmHisqlveM3Bz9MOIa9a26YO+ZzCPozALxkJIjdp7L3L8Q8CuLmkC4YV 22 | 6nHvSLaC/82UGoiRGyjdFh30puqekWMZ62ZrQLpCr0DzOJrarslLM0fONqpjDTWP 23 | 8OXyRcnVSbFB1n5XUoviMTTxYOQ3HQe8b3Tt7GO/9w6dWkkSX1Vy4RmzNt7fb9K5 24 | mxu/n+SVu+2iQX9oEWq2rpvsD3RGnhewCPlZU8NQYKb72K00kEcG/J+WU1IPtkq0 25 | JaU5TDMMzfp3PMYxCzYD9pdM8J0N0zJac2t9hkx7H83jy/TfLrmDvB6nCK8N3+6j 26 | 8In6RwYw4XJ41AWsJpGXBpvYCq5GJjdogEi9IaBXSmtVPYm0NURYbephk+Wg0oyk 27 | ESk4cyWUhYG8mcMyORc8lzOQ79YT6A5QnitTGCVQGTlnNRjevtfhAFEXr9e8UZFq 28 | oWtfEdltH6ElGDpivwuOERAN9v3GoPlifpo1UDElnPJft+C0cRv0YpPwvwJTy1MU 29 | q1op/4Z/7SHzFWTSyRZqvI41AsLImylzfZ0w9U8sogd4pHv30kGc9+LhqrsfLDvK 30 | 9XedVoWJx/x3i8BUhVDyd4FyVWHCf9N/6a9HzbFWT8QZTBk5pErTaFiTi5TQxoi7 31 | ER4ILjvRX7mLWUGhN58= 32 | -----END CERTIFICATE----- 33 | -------------------------------------------------------------------------------- /e2e/test/certificates/ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEA6E8BNZIo464QnBl0j8h5X+zZ4MTwhbhalXUbxuA663S+2Ft4 3 | hKLwF027qMoj6Rf1PU+QXw1bNnQhBxpaYjgA0bQP2eZhW1XHnzggS/qy9rEzdqNf 4 | VO3opnGYx9cTdS+Y9kZGJkQKCPp8Bv4OTXoTrmDPd9KD6nt7zmHrzhwQnmx7inO8 5 | 2jN9QLSnAxtCIyCdWXScvdfq8fD62/iMUyCmZQmv3tHuomVgj6eZGicjVBmZI43E 6 | VcxTO/cVX9+AxLkvjdUWT+xHrzNG3x3oSX0qVDOoH7Ky/VnLQ9JxiXdmLB/YSwrm 7 | 9hu3poIle3bzdUdEFvYo4LsGfWgXpomreQJxrEB0kflkYo2TYx01mOAxGkzVnnA3 8 | dk89+0TTfTsPBU4REcGVZ6Y4tZHIoxIOIOz6cvT+rXWr45BVVtMToniJ40v5uQcY 9 | 9muOFE9SbITa9TOBxl76lHAhUuL4crSHNOZTr19tDUfgvkYDcuxteB8/9/qkaPYZ 10 | ZKJ8kyQKFPvtr4LGjZpoXFOnxoGKQER5tWOu+aRq4GQHIxXLBvOKtHUna+gHmPuv 11 | tVMkuqZLQaG1yInIkj5rHhyduHUXdOjmEDMUN3HDmYWd3DcvDjE/XlMHI54n9+gV 12 | ZmXQjmyXsal6v3ElV9yIYWkISy3zfWchg5MVpTeXoUTAmm4L95kAg3f/RzMCAwEA 13 | AQKCAgA7X6XheT+2IVDhdKgT75sF51UwW2VV5+5O8KVuyPSGJUp5pbdBvxs5+YYS 14 | 98cB1ju5tdIiWRmM61VdqWCcZc/fyPOcL4OGgoAIs6Gdia+f2V5b3hmJbVze3X28 15 | 5b11X/rqRr+7G/ysmsiESP6C0sag9OFRsJ5CmdjkwKYyAX5F31gb5MTJyAslNIbT 16 | Bm565+UPbfx5kOvddkfHXNkQjfG5NSab/m/NVbe+P4mj44jl/7ziF2OswhIr61St 17 | MkSJAwOibmnLd4pCvwpIEC7ESZc9+g3oPBX/JqoK5YhbKAcXE/SGCX1KPX9MsUq/ 18 | KYHD4bkHXVbJSUO5XZdd2yXWQcenUAiDWl5gRSp5eMdG2JDCCemChOen/m9KLvAk 19 | CkVG5PRaqxAJwQ9ZtabgOtlrFMM6W10WOG2I2FWL5W/D+kJAGmUmH2FkJSnXa8Ue 20 | rNBSYus1hHcch57nlXZhA5vt8hfiz/FIb4DHm1q3VLlK2yz6nUJnLylZoGTeoSL/ 21 | 2tawA+Ke8ODB0yOc/1jkx3oR1rH+WFmFFRztuvt9UwM4tdd3rc4CHel08AG8m6JW 22 | i86nbOYixc9zi5fM293KqojI/CLHgdttdLHy9wvDguEl99jrZeFDOXG6y7kFBSH1 23 | 4MONldROS3r4e54qL+xx/Rug+3+hAnkvox0zxQ6B5CUX9bwqIQKCAQEA+cw7DUCY 24 | Z/osHtwlFAvNTHS15LKqjpQ4b+JJZJBvN9JmoyWH1rbPIzWKOR7mi5QwlPFazRVD 25 | e2oZ7I+MVxBNxY621eJKBZ/w+orIK6p+tSWLhWb3m4hyrBfonOabd5eqFGrm/t80 26 | dUURhBQpmmmKHfXX0PQUK/jUamX5tYQqhianARjlaAX8mCAxphqngmnDMk9s8kjO 27 | xQUpbZ6wqYdTADqflbEhkbb6ZQ7qYvv/k+SVpiAe45JUaRjnL7vZEGwvZCFIaql2 28 | 7ZgD1k77srgEJoJxCae2JCXu7gOPiFFWwxzk/mDUJkG4Rz2dhp1U0iwS848lj9mb 29 | geOMgrSgyyUj6wKCAQEA7hOb7fpcgmtqW5kwy/AS+YnUytIWJtRcu+cZslgNyMyR 30 | JU3HhgHP26jsf6c2fuRcvuFAWzIxVsEsiDDca2nWIlo/9dwMjdIRhDY6KLmhRMcl 31 | q/3JqUrJ9LMj0JMoFtNr2TSUYEeU0Un4Yyxo88yHUiLOn1CGNDT1Mhyl9fN8knSD 32 | aoRGGzP9R86uqaM6vamEqW+UmZpWwyL33UZIrXPvYDWjK28scl8pO2naQPBUG+w+ 33 | rpKv0u36cUDgT5BRKh5NIBUTklvujKSqNi/r41wgIKxD6nzoQ5CB316vvHOe4oS7 34 | nvPspudgGOReVxebT5cvYSFNVOiaMyFvO/h/Isc/2QKCAQEA9Gxpj9MYDg3WJDAn 35 | O9E8hkZhQTGuACt6ecgDJprJqOaAtbzgiTGBx6L46GGbFxLe4q6zuZ7H1DgCu93O 36 | Zda3MsstjmWJJq3U/2LS8R84MopVVihlulOuWURfwT9I+qF1s9rSwqunSeRWelW8 37 | S7JfimdnULe0ET6+oom76Gg1r+ScX098EehhvH0r96t3Iv3YGR9srDqf0Fr3qaKw 38 | nIGkrsGuK6wR++H+aupIYKjR8WWnJNGjR+yEGf5bIPL/N7mUObbzhCLkEyI/BvFs 39 | CflTXlcewmLPr9lKTK1BQhLNAcgBLgOTMwf1iAf7XqAOQkUMGjSEfZhhOJxufeq+ 40 | t2hVFwKCAQEAj5CDlx5CauMbX+Eg69n5l0DmcOf/M6sbwLO7azBuH4g8fqJIjybI 41 | kU1sUA4lbp+GzhRHGrdJyWbz06yjPqKz3kNODpSDfYIPzgplR+motjDhJrYDxXK0 42 | FjtwfXY/1oPnpTNvxh9kPlC82BEPuPS9JWqX7rpAM3s74NwluAr8M9TO6oyZOg4/ 43 | ze10phcodPakaQhAM+A+Ma4Eigq4CODAmOO/YZHI269Gze0nD8Hc9nmf0uXh4PBg 44 | 9aqJmXENgLiFCbOr+bY2Q9mISuTa+l+zgABhdCWeah/aMTiIALw4M6CXA2NUcdFU 45 | wa30tdAmd/hTOd87ItihI/Qlf6R6Bh7hAQKCAQBaSscYKj1g2i0KIGVumtSW1lXX 46 | P91Fdu2c5KjvrYsQayjz6jHt7QvtL6mv/qxlux0xOApWo10UJrDap/2KzHVYPSe4 47 | t3vwOQCGD3vmheeOCI+WpCqMv9Si9MZG/CyVvVRB96Ohp4VYd965mBE/sPtMZ7MW 48 | T8zdYjwsuA8giRi9ljK8yDe5xCM1WTHPZKZckWH/G7h2fNvppytQRTYpctJlzPC+ 49 | AGFkBLByGAcTfUA8P+0CBj/EoVeUAoCpjzkIj7Osv9GhaCWt6TMpcbbf3wXs9Jn0 50 | 3kAIEga2Es1ywiTCB5Nf/7OPibG5O3Bx0mQ36w60kpPoj16ala+9yFTKDETr 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /e2e/test/certificates/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFvTCCA6WgAwIBAgIUBpS47ArkUC0MXYK3LvXU3eRh/CowDQYJKoZIhvcNAQEL 3 | BQAwUjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM 4 | GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjQxMTE1 5 | MTcxNjI1WhcNMjcxMTE1MTcxNjI1WjByMQswCQYDVQQGEwJVUzELMAkGA1UECAwC 6 | UEExFTATBgNVBAcMDFBoaWxhZGVscGhpYTETMBEGA1UECgwKTGlub2RlIExMQzEU 7 | MBIGA1UECwwLTGlub2RlIFRlc3QxFDASBgNVBAMMC2xpbm9kZS50ZXN0MIICIjAN 8 | BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA27JxXbiT+0aptSvE2uFakJQf+qwT 9 | 5mGFCNaQFRBDdxxLTUF6UyglZflT9KdVtJ9kmsyZj9vhFnxszWnoUK1Y/runOYTf 10 | JlNBVp28fO43HrUtaHFCZncvu0C4Tdc09721p2pP5nhgXv8BtZeDAVY/hjSIGgP1 11 | 1WNLSWP2eZn4+q4hr7iUqVqLRYVz5e489b1sEXpCiSDWuq6GWRzvEBquHX0F82mW 12 | 84DMfa2TrcG4bw0i0r4nKWcgB3at7sR32DvEPFsFiEreFgNsx7b1KcG+ngzA3ZKL 13 | 9bviQKSLjjn48VPoV/w5lT3PYGIAjwu2tbNY8J6dUcni4aHnIwhwBFVb5299eIEC 14 | nccueVExw8LtXBYOUKT4A8doKy3ZBq4B+WY8N0QhE6H8tuLrAl6IUh8rduuvJc38 15 | +QIDD6IKr58zuest6q0/lNvjruOfUMa+EsBPX795wyDuqL4tUyfySyUyYNXcQ4ip 16 | 2nFTBYXoB75jLsXHULhOC+7AbxzWeM76mjeNgKzUJaz+1EUMLYOSsfiYFMlWfoiL 17 | ilf7WMdR3bLHccFAA/Qg3CZETU/B20amYDI/+0TvY1td01gzoUx3UjDPB6mpntgr 18 | DoTISDNAvZgPOt9ebs7AEM6/iHgIQtAnCQULTzQ48i3WZlpPYb2IeWOsNCXiOZPN 19 | +STXedL5M3IUwUcCAwEAAaNrMGkwJwYDVR0RBCAwHoILbGlub2RlLnRlc3SCD3d3 20 | dy5saW5vZGUudGVzdDAdBgNVHQ4EFgQUgNqzhL/JpxllvFu18qvlg/usDrEwHwYD 21 | VR0jBBgwFoAUC2AMOf90/zpuQ588rPLfe7EukIUwDQYJKoZIhvcNAQELBQADggIB 22 | AL38v8A0Yfi3Qcr7JtMJ+EOgiHo+W1PW05CAKrswqIZGb9pLwcc46N1ICX4/wItH 23 | DfOmiLHEJ+eEaf07XWy1G+orvqsz6FLh2lfr1cne2DH1udiBXw2VyHDeaighgqTX 24 | rHPcV9lLPcRgQgE8AC2WSn3Rmjd4eU+twlqYcJTLt3cy+TulwXxGBjn7CSmRamRA 25 | AaURnVpsMhw9baINrN6+3zbjw1LKpMO3JfPx9NPw0iUYYbUWFMli2RTEwdR0o9Fu 26 | Om6ogyYHHLTUDv2+cHYY4TKJ0LGz9PGB3iwdGbSSpLadjV7xkFERio5B4o/FedLB 27 | CuECSIoWqjScSrVWjpIpG6b7LVkuDI7ZrZ6Rvkwcv4Zezx5TkynQUw9EezEgGRQf 28 | RiBSKoPGKJfRGiYGNXDjqENX3kxqt5cuVe/Z0czrb+2zOMfaTZwJtp2rrJqckxBh 29 | CK4CXQz2nsfGRW/lyJ1Jyc+ul0obXXhynDBA9dE5woCIwgTCRL9M0ZOHjoQi1tDh 30 | 27i0j4YzIvlIDIi6iex/XVZi9mhuRvDR7f7c5RVpHsu38znCLyQetFnwOQOmIVZI 31 | lEUQvU1Jnk+e5+RqvOcZ0ZcLppBa71XjUdYm56mzY1ph04n1VUO4rmaI3wNBETGd 32 | jJ3K7XuBBL/YT+02AzsZR/0fiHLdA9DbLUdhtRs0mb5u 33 | -----END CERTIFICATE----- 34 | -------------------------------------------------------------------------------- /e2e/test/certificates/server.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIEtzCCAp8CAQAwcjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlBBMRUwEwYDVQQH 3 | DAxQaGlsYWRlbHBoaWExEzARBgNVBAoMCkxpbm9kZSBMTEMxFDASBgNVBAsMC0xp 4 | bm9kZSBUZXN0MRQwEgYDVQQDDAtsaW5vZGUudGVzdDCCAiIwDQYJKoZIhvcNAQEB 5 | BQADggIPADCCAgoCggIBANuycV24k/tGqbUrxNrhWpCUH/qsE+ZhhQjWkBUQQ3cc 6 | S01BelMoJWX5U/SnVbSfZJrMmY/b4RZ8bM1p6FCtWP67pzmE3yZTQVadvHzuNx61 7 | LWhxQmZ3L7tAuE3XNPe9tadqT+Z4YF7/AbWXgwFWP4Y0iBoD9dVjS0lj9nmZ+Pqu 8 | Ia+4lKlai0WFc+XuPPW9bBF6Qokg1rquhlkc7xAarh19BfNplvOAzH2tk63BuG8N 9 | ItK+JylnIAd2re7Ed9g7xDxbBYhK3hYDbMe29SnBvp4MwN2Si/W74kCki445+PFT 10 | 6Ff8OZU9z2BiAI8LtrWzWPCenVHJ4uGh5yMIcARVW+dvfXiBAp3HLnlRMcPC7VwW 11 | DlCk+APHaCst2QauAflmPDdEIROh/Lbi6wJeiFIfK3brryXN/PkCAw+iCq+fM7nr 12 | LeqtP5Tb467jn1DGvhLAT1+/ecMg7qi+LVMn8kslMmDV3EOIqdpxUwWF6Ae+Yy7F 13 | x1C4TgvuwG8c1njO+po3jYCs1CWs/tRFDC2DkrH4mBTJVn6Ii4pX+1jHUd2yx3HB 14 | QAP0INwmRE1PwdtGpmAyP/tE72NbXdNYM6FMd1IwzwepqZ7YKw6EyEgzQL2YDzrf 15 | Xm7OwBDOv4h4CELQJwkFC080OPIt1mZaT2G9iHljrDQl4jmTzfkk13nS+TNyFMFH 16 | AgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAgEApsfWctJGSTDRVr/9aiYk8IGYlYLc 17 | xsUJ3FjsT2hmKiAnBrRi2JICTU3HXp0rBlclsWsO0Oc/XyKyfHHxH/t4efDWVo49 18 | qM1KaAilqFVG1rNI+E7jFfheosgmV19xOBhI5rEL0vWraumEr8DSj76+Em0dbvev 19 | f+dUV5cwbV9B9eAvymR0wvv2kr2zHl4ExdAr1KzIKH/juiVJID1SfQcAxSKLgVU+ 20 | 2z09R84EWI495+UZBUNlcQPANh+R7VsrKrSBYuP9ZFioZkWNwMDFsGCOlyqYu9kb 21 | QhikKpUIBPmOYmcDd7PsqtmJWD/jfn6tPoR3KdP70F4008boG5h4Jcu3syP32QiX 22 | TNb7Fk/EygTCaGrSsynoc0LmF40kJ5xt+hKNAjh8L52MOcMjYdthFKz/V4XJ/xJf 23 | up3MoMRl32II2hqO7t4vHvKFanfHB91M5mLq3H6ZI8amNORZyf5K1mNDj4eOnPxT 24 | d5v1KHFYGmpMaVdYGCN8IXJCShI4gC+BnCZUjHiGuhOosmNLjDVwg1gpUI2kDa0D 25 | rVZVCTwE1ugFGic34VTxD6OmuRGZmoeLSl6cfc/NC+tpWfT+1d4lTAWa/F0pL5yX 26 | PVu0CvIqV6PfAw+f2IAts1HZW3chEX7+TYB9oJplQk/HbJKHWb5j6No1Im0eSuSt 27 | xKxoKTlLRyxpRJQ= 28 | -----END CERTIFICATE REQUEST----- 29 | -------------------------------------------------------------------------------- /e2e/test/certificates/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKAIBAAKCAgEA27JxXbiT+0aptSvE2uFakJQf+qwT5mGFCNaQFRBDdxxLTUF6 3 | UyglZflT9KdVtJ9kmsyZj9vhFnxszWnoUK1Y/runOYTfJlNBVp28fO43HrUtaHFC 4 | Zncvu0C4Tdc09721p2pP5nhgXv8BtZeDAVY/hjSIGgP11WNLSWP2eZn4+q4hr7iU 5 | qVqLRYVz5e489b1sEXpCiSDWuq6GWRzvEBquHX0F82mW84DMfa2TrcG4bw0i0r4n 6 | KWcgB3at7sR32DvEPFsFiEreFgNsx7b1KcG+ngzA3ZKL9bviQKSLjjn48VPoV/w5 7 | lT3PYGIAjwu2tbNY8J6dUcni4aHnIwhwBFVb5299eIECnccueVExw8LtXBYOUKT4 8 | A8doKy3ZBq4B+WY8N0QhE6H8tuLrAl6IUh8rduuvJc38+QIDD6IKr58zuest6q0/ 9 | lNvjruOfUMa+EsBPX795wyDuqL4tUyfySyUyYNXcQ4ip2nFTBYXoB75jLsXHULhO 10 | C+7AbxzWeM76mjeNgKzUJaz+1EUMLYOSsfiYFMlWfoiLilf7WMdR3bLHccFAA/Qg 11 | 3CZETU/B20amYDI/+0TvY1td01gzoUx3UjDPB6mpntgrDoTISDNAvZgPOt9ebs7A 12 | EM6/iHgIQtAnCQULTzQ48i3WZlpPYb2IeWOsNCXiOZPN+STXedL5M3IUwUcCAwEA 13 | AQKCAgBgau3p7cm0K4zrX+wjC2fNr9RhFQgewYm7GT9enyacraQ2oZfnyuSu3j+E 14 | TbQFczaZ4VU7l4ovbifp9qLoVUuLcBux2Kh+j2dLdip0wa8bIPRus9YqVgBys7Kv 15 | JtWuLGn+sV+jjAzvZAcCBR6PhaSXZ5KbqEVJgyxVZzOSpopoqedK0T0dHgmlVy5I 16 | KMhEKP+2o+tzdyAGCfYYQeSBMtRbSLVF4H9JGqukNHttdGlXA3LW/nD9cK7T17f5 17 | 4+uc0I4M1v2UlRbmnlYtSBRMYSUhBAPYuioGjJB9QjmlD7g7YVHE24MCBoBuklQg 18 | c0macL2FzHbKoEmcMIvaCifvHu8X0J5qjZghmi7Zozh/Skg9B4XINdHpX7vX7INZ 19 | A7z2nx5x4xaNPO3hJJJkbpCcpSIEQkuqe8a/GYcn0tTMTqoGXr/OFz+ut1ZzZThs 20 | YL8YWh2SqVOzR8xJE3cR9qd/ISTl1CPrxWyWm3eOZ0WGOKZTzUIN3p8gcDIDucs4 21 | kXGDCh7tj7EsYWpa0fnEp5n8kupLWPY050aal898xPP4RDNQFx/VdDBfa/PVKKMy 22 | OzXFq801UoOdF9d6FR3p3YS5O0Zd8UILJQui3s2dpY6/BzuWa2ch9PwvEFI8rsT6 23 | 8VxRCEG9gJxA/GSV/ZNU4hH3Tiv7fSG/aED/uUSvI/t7AWgQgQKCAQEA+Xrshwnt 24 | Cp0cDdkHde/0WnT3DUEvYM0tlJY6z1YR5Kx0GL4zR+yhBuTfmgCMsbkNLvHsc3Us 25 | UbwM4OSAD0oHMa6LCYer6fiYWfv4c19gCtLCZhjBPYHSwXGaQxdjiEE4N6J+mnPW 26 | n39DCjXhl//WlatbLkZRbGYnbORfcE2Kx72OAJt2ujp0Jr/Loi1px6KMbKnzhEhy 27 | mI6FPejx1h8KC5xlCq6faUnal1ZvdNc5WkxtZ1YOCzaKbVuGEok3bFK986aSYYlP 28 | AI4SMo0M/Sy/5tlb9CL5H8s4Dbz35CRyKmXYMQYeGtJ/7HTSdrU7qcp4EZTu5RVX 29 | 1xtq6S+w4/V3JwKCAQEA4XBDaxw2B5ica9xxTAzzq7H9QtGgtYaBIQmkBVqVvoDs 30 | ywGbe7ueJFY7id2rWdeDB7Nxt6feoTuoyXmA3YYAeUBQZGtLKc3MZfdIFJt6yM1D 31 | 6FZyITwo0Zl6ShPxIYsc94BRA7YzmQWaucByrRFLX+y463u2UGqD9s3aPZm921mb 32 | oweIkEQiD2lJNqhx0gRphN+Le+0z7Gh+1ZxI8XikSIkuQ+nvuh5zQA/lqmWr4E9m 33 | EICTP6D5lvJj3EpKZ1pUgHvPEy/fyUq+i7nu0hS394blI6amv2iwmrLhe2NafCHu 34 | +Nux305uO8jqHzEl+l1CvGf0BqNXCM3x5CgLMJW44QKCAQBpmRpc3lqzT2T8h4yc 35 | 4wBu+WtI9Pp04uQULLKf6DKStFw/zOIv430VSfNLYEgtQcLOyB/pjwM/ZXWeC5oY 36 | 3qDE6rh3RDIESvFRxVGYpBom+qbGSFwjCLyInOlK1K+QkOqWwfUMs1N5F4js3Xmr 37 | uOK/X1Ss9Z6pX2P4t4GeK3Q+r4FXyHYsxWk8rZon/0jy81608ArfRzsaT9keJ2eV 38 | 1nWODJjIOLnI+zXHMRLkReVEz2zPfKFdJazaNQ8+8U3AUBWO+EalelUySvBw7Ts+ 39 | Pp7Lu90sLVF9n6sORZo3uyWHxKwJtCkx+T+kep5LGNM0PzsrVfr4hFw19KkAIuug 40 | 0dmpAoIBAQCbbix9b+DskdLfJwjSV2e1bC1iYWe9YDQtlBkLO+5cf0VDniMWRz/8 41 | a5v3LOdUNRt5NsZjypDbd2ejKWuo0BgJgUcsRTF4bBTOBJUk6CHaynNUgC2GLpUy 42 | FfBTnLY221QobMbumTOwAEYyZbZrDq56P5sreIs1nIrJohojOJnG31xIJgyI8wDM 43 | wVmiHrcDBtm9q+belaekClPQcUV1fyk9fZ9xYZxQJWhutccyGZFMQVHsdMmRKCqN 44 | YSdqnan44jW6tCIMZ4iSnz8K1TIMlA5W0iGv19nFxKdmsYh26wRa64Z4+/gCL3Af 45 | NiH9SYSWvrAheEauQPXj8yIgnV9BqyjhAoIBAA0NGugiXqloQD4tKFYROZ2rm1kx 46 | IlbC5rVePSeMz59Qty79dODAvGuJxOb/vKOlQqcULfgidpctBdtZJ/oencwOf/49 47 | e0R5uYpvsxyvAro5OKxk0SD2YSgkdBf8gF5+opG6ZjcBcRk3jp8cdYDTIpViJco5 48 | IJwbMqoWpJxuilj0imxDNQPPoN6yf3mkD2tyYp2YL9X5bgSB58l1LCBJDdJDC4tR 49 | rrXq0Btn9jpwwW/AJ6mIFWWGQKDpkGhLRHxOOK4dC+XgbkEogDSOlZDOEALLvFI9 50 | OVIIxvytGW/Qy6AEzsMnsTPUJMyPsktCQ2YI628dytmqXOniZe1QQ2R7dzw= 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /e2e/test/fw-use-specified-nb/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: fw-use-specified-nb 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: fw-use-specified-nb 13 | template: 14 | metadata: 15 | labels: 16 | app: fw-use-specified-nb 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | annotations: 36 | name: svc-test 37 | labels: 38 | app: fw-use-specified-nb 39 | spec: 40 | type: LoadBalancer 41 | selector: 42 | app: fw-use-specified-nb 43 | ports: 44 | - name: http-1 45 | protocol: TCP 46 | port: 80 47 | targetPort: 8080 48 | sessionAffinity: None 49 | -------------------------------------------------------------------------------- /e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: created-with-new-nb-id 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: created-with-new-nb-id 13 | template: 14 | metadata: 15 | labels: 16 | app: created-with-new-nb-id 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) 38 | labels: 39 | app: created-with-new-nb-id 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: created-with-new-nb-id 44 | ports: 45 | - name: http-1 46 | protocol: TCP 47 | port: 80 48 | targetPort: 8080 49 | sessionAffinity: None 50 | -------------------------------------------------------------------------------- /e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-created-with-specified-nb-id 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-created-with-specified-nb-id" 11 | steps: 12 | - name: Create nodebalancer and create resources 13 | try: 14 | - script: 15 | outputs: 16 | - name: nbconf 17 | value: (json_parse($stdout)) 18 | content: | 19 | set -e 20 | 21 | re='^[0-9]+$' 22 | LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" 23 | 24 | nbid=$(curl -s --request POST \ 25 | -H "Authorization: Bearer $LINODE_TOKEN" \ 26 | -H "Content-Type: application/json" \ 27 | -H "accept: application/json" \ 28 | "https://api.linode.com/v4/nodebalancers" \ 29 | --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) 30 | 31 | if ! [[ $nbid =~ $re ]]; then 32 | echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" 33 | exit 1 34 | fi 35 | 36 | echo "{\"id\": \"$nbid\"}" 37 | check: 38 | ($error == null): true 39 | - apply: 40 | file: create-pods-services.yaml 41 | catch: 42 | - describe: 43 | apiVersion: v1 44 | kind: Pod 45 | - describe: 46 | apiVersion: v1 47 | kind: Service 48 | - name: Check that loadbalancer ip is assigned 49 | try: 50 | - assert: 51 | resource: 52 | apiVersion: v1 53 | kind: Service 54 | metadata: 55 | name: svc-test 56 | status: 57 | (loadBalancer.ingress[0].ip != null): true 58 | - name: Validate nodebalancer id 59 | try: 60 | - script: 61 | content: | 62 | set -e 63 | 64 | expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) 65 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 66 | 67 | if [[ $nbid == $expectedId ]]; then 68 | echo "Condition met" 69 | fi 70 | check: 71 | ($error == null): true 72 | (contains($stdout, 'Condition met')): true 73 | -------------------------------------------------------------------------------- /e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: created-with-specified-nb-id 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: created-with-specified-nb-id 13 | template: 14 | metadata: 15 | labels: 16 | app: created-with-specified-nb-id 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) 38 | labels: 39 | app: created-with-specified-nb-id 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: created-with-specified-nb-id 44 | ports: 45 | - name: http-1 46 | protocol: TCP 47 | port: 80 48 | targetPort: 8080 49 | sessionAffinity: None 50 | -------------------------------------------------------------------------------- /e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: delete-svc-no-nb 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: delete-svc-no-nb 13 | template: 14 | metadata: 15 | labels: 16 | app: delete-svc-no-nb 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) 38 | labels: 39 | app: delete-svc-no-nb 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: delete-svc-no-nb 44 | ports: 45 | - name: http-1 46 | protocol: TCP 47 | port: 80 48 | targetPort: 8080 49 | sessionAffinity: None 50 | -------------------------------------------------------------------------------- /e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: delete-svc-use-new-nbid 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: delete-svc-use-new-nbid 13 | template: 14 | metadata: 15 | labels: 16 | app: delete-svc-use-new-nbid 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) 38 | labels: 39 | app: delete-svc-use-new-nbid 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: delete-svc-use-new-nbid 44 | ports: 45 | - name: http-1 46 | protocol: TCP 47 | port: 80 48 | targetPort: 8080 49 | sessionAffinity: None 50 | -------------------------------------------------------------------------------- /e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-delete-svc-use-specified-nb 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-delete-svc-use-specified-nb" 11 | steps: 12 | - name: Create nodebalancer and create resources 13 | try: 14 | - script: 15 | outputs: 16 | - name: nbconf 17 | value: (json_parse($stdout)) 18 | content: | 19 | set -e 20 | 21 | re='^[0-9]+$' 22 | LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" 23 | 24 | nbid=$(curl -s --request POST \ 25 | -H "Authorization: Bearer $LINODE_TOKEN" \ 26 | -H "Content-Type: application/json" \ 27 | -H "accept: application/json" \ 28 | "https://api.linode.com/v4/nodebalancers" \ 29 | --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) 30 | 31 | if ! [[ $nbid =~ $re ]]; then 32 | echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" 33 | exit 1 34 | fi 35 | 36 | echo "{\"id\": \"$nbid\"}" 37 | check: 38 | ($error == null): true 39 | - apply: 40 | file: create-pods-services.yaml 41 | catch: 42 | - describe: 43 | apiVersion: v1 44 | kind: Pod 45 | - describe: 46 | apiVersion: v1 47 | kind: Service 48 | - name: Check that loadbalancer ip is assigned 49 | try: 50 | - assert: 51 | resource: 52 | apiVersion: v1 53 | kind: Service 54 | metadata: 55 | name: svc-test 56 | status: 57 | (loadBalancer.ingress[0].ip != null): true 58 | - name: Validate nodebalancer id 59 | try: 60 | - script: 61 | content: | 62 | set -e 63 | 64 | expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) 65 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 66 | 67 | if [[ $nbid == $expectedId ]]; then 68 | echo "Condition met" 69 | fi 70 | check: 71 | ($error == null): true 72 | (contains($stdout, 'Condition met')): true 73 | -------------------------------------------------------------------------------- /e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: delete-svc-use-specified-nb 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: delete-svc-use-specified-nb 13 | template: 14 | metadata: 15 | labels: 16 | app: delete-svc-use-specified-nb 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) 38 | labels: 39 | app: delete-svc-use-specified-nb 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: delete-svc-use-specified-nb 44 | ports: 45 | - name: http-1 46 | protocol: TCP 47 | port: 80 48 | targetPort: 8080 49 | sessionAffinity: None 50 | -------------------------------------------------------------------------------- /e2e/test/lb-fw-delete-acl/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-fw-delete-acl 7 | name: test 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: lb-fw-delete-acl 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-fw-delete-acl 17 | spec: 18 | affinity: 19 | podAntiAffinity: 20 | preferredDuringSchedulingIgnoredDuringExecution: 21 | - podAffinityTerm: 22 | labelSelector: 23 | matchExpressions: 24 | - key: app 25 | operator: In 26 | values: 27 | - simple-lb 28 | topologyKey: kubernetes.io/hostname 29 | weight: 100 30 | containers: 31 | - image: appscode/test-server:2.3 32 | name: test 33 | ports: 34 | - name: http-1 35 | containerPort: 8080 36 | protocol: TCP 37 | env: 38 | - name: POD_NAME 39 | valueFrom: 40 | fieldRef: 41 | apiVersion: v1 42 | fieldPath: metadata.name 43 | --- 44 | apiVersion: v1 45 | kind: Service 46 | metadata: 47 | name: svc-test 48 | annotations: 49 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 50 | { 51 | "denyList": { 52 | "ipv4": ["8.8.8.8/32", 53 | "9.9.9.9/32", 54 | "7.7.7.7/32"] 55 | } 56 | } 57 | labels: 58 | app: lb-fw-delete-acl 59 | spec: 60 | type: LoadBalancer 61 | selector: 62 | app: lb-fw-delete-acl 63 | ports: 64 | - name: http-1 65 | protocol: TCP 66 | port: 80 67 | targetPort: 8080 68 | sessionAffinity: None 69 | -------------------------------------------------------------------------------- /e2e/test/lb-fw-update-acl/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-fw-update-acl 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-fw-update-acl" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch Nodebalancer ID, make sure it has firewall attached 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | 39 | for i in {1..10}; do 40 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 41 | 42 | fw=$(curl -s --request GET \ 43 | -H "Authorization: Bearer $LINODE_TOKEN" \ 44 | -H "Content-Type: application/json" \ 45 | -H "accept: application/json" \ 46 | "https://api.linode.com/v4/nodebalancers/${nbid}/firewalls" || true) 47 | 48 | fwCount=$(echo $fw | jq '.data | length') 49 | ips=$(echo $fw | jq '.data[].rules.inbound[].addresses.ipv4[]') 50 | if [[ $fwCount -eq 1 && -n $ips && $ips == *"7.7.7.7/32"* ]]; then 51 | echo "firewall attached and rule has specified ip" 52 | break 53 | fi 54 | sleep 10 55 | done 56 | check: 57 | ($error == null): true 58 | (contains($stdout, 'firewall attached and rule has specified ip')): true 59 | - name: Update service with new ACL 60 | try: 61 | - apply: 62 | file: update-service.yaml 63 | catch: 64 | - describe: 65 | apiVersion: v1 66 | kind: Service 67 | - name: Fetch firewall ID and check rules are updated 68 | try: 69 | - script: 70 | content: | 71 | set -e 72 | 73 | for i in {1..10}; do 74 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 75 | 76 | fw=$(curl -s --request GET \ 77 | -H "Authorization: Bearer $LINODE_TOKEN" \ 78 | -H "Content-Type: application/json" \ 79 | -H "accept: application/json" \ 80 | "https://api.linode.com/v4/nodebalancers/${nbid}/firewalls" || true) 81 | 82 | fwCount=$(echo $fw | jq -r '.data | length') 83 | ips=$(echo $fw | jq -r '.data[].rules.inbound[].addresses.ipv4[]') 84 | if [[ $fwCount -eq 1 && -n $ips && ! $ips == *"7.7.7.7/32"* ]]; then 85 | echo "firewall attached and rule updated" 86 | break 87 | fi 88 | sleep 10 89 | done 90 | check: 91 | ($error == null): true 92 | (contains($stdout, 'firewall attached and rule updated')): true 93 | -------------------------------------------------------------------------------- /e2e/test/lb-fw-update-acl/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-fw-update-acl 7 | name: test 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: lb-fw-update-acl 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-fw-update-acl 17 | spec: 18 | affinity: 19 | podAntiAffinity: 20 | preferredDuringSchedulingIgnoredDuringExecution: 21 | - podAffinityTerm: 22 | labelSelector: 23 | matchExpressions: 24 | - key: app 25 | operator: In 26 | values: 27 | - simple-lb 28 | topologyKey: kubernetes.io/hostname 29 | weight: 100 30 | containers: 31 | - image: appscode/test-server:2.3 32 | name: test 33 | ports: 34 | - name: http-1 35 | containerPort: 8080 36 | protocol: TCP 37 | env: 38 | - name: POD_NAME 39 | valueFrom: 40 | fieldRef: 41 | apiVersion: v1 42 | fieldPath: metadata.name 43 | --- 44 | apiVersion: v1 45 | kind: Service 46 | metadata: 47 | name: svc-test 48 | annotations: 49 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 50 | { 51 | "denyList": { 52 | "ipv4": ["8.8.8.8/32", 53 | "9.9.9.9/32", 54 | "7.7.7.7/32"] 55 | } 56 | } 57 | labels: 58 | app: lb-fw-update-acl 59 | spec: 60 | type: LoadBalancer 61 | selector: 62 | app: lb-fw-update-acl 63 | ports: 64 | - name: http-1 65 | protocol: TCP 66 | port: 80 67 | targetPort: 8080 68 | sessionAffinity: None 69 | -------------------------------------------------------------------------------- /e2e/test/lb-fw-update-acl/update-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: svc-test 6 | annotations: 7 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 8 | { 9 | "denyList": { 10 | "ipv4": ["8.8.8.8/32", 11 | "9.9.9.9/32"] 12 | } 13 | } 14 | labels: 15 | app: lb-fw-update-acl 16 | spec: 17 | type: LoadBalancer 18 | selector: 19 | app: lb-fw-update-acl 20 | ports: 21 | - name: http-1 22 | protocol: TCP 23 | port: 80 24 | targetPort: 8080 25 | sessionAffinity: None 26 | -------------------------------------------------------------------------------- /e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-hostname-only-ingress 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-hostname-only-ingress" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that svc-test-1 loadbalancer ingress contains only hostname 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test-1 31 | status: 32 | (loadBalancer.ingress[0].ip != null): false 33 | (loadBalancer.ingress[0].hostname != null): true 34 | - name: Check that svc-test-2 loadbalancer ingress contains ip 35 | try: 36 | - assert: 37 | resource: 38 | apiVersion: v1 39 | kind: Service 40 | metadata: 41 | name: svc-test-2 42 | status: 43 | (loadBalancer.ingress[0].ip != null): true 44 | (loadBalancer.ingress[0].hostname != null): true 45 | - name: Annotate service 46 | try: 47 | - script: 48 | content: | 49 | set -e 50 | kubectl annotate svc svc-test-2 -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress=true 51 | check: 52 | ($error == null): true 53 | - name: Check and make sure svc-test-2 ingress only contains hostname 54 | try: 55 | - assert: 56 | resource: 57 | apiVersion: v1 58 | kind: Service 59 | metadata: 60 | name: svc-test-2 61 | status: 62 | (loadBalancer.ingress[0].ip != null): false 63 | (loadBalancer.ingress[0].hostname != null): true 64 | -------------------------------------------------------------------------------- /e2e/test/lb-hostname-only-ingress/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: hostname-ingress 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: hostname-ingress 13 | template: 14 | metadata: 15 | labels: 16 | app: hostname-ingress 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 80 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test-1 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress: "true" 38 | labels: 39 | app: hostname-ingress 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: hostname-ingress 44 | ports: 45 | - name: http 46 | protocol: TCP 47 | port: 80 48 | targetPort: 80 49 | sessionAffinity: None 50 | --- 51 | apiVersion: v1 52 | kind: Service 53 | metadata: 54 | name: svc-test-2 55 | labels: 56 | app: hostname-ingress 57 | spec: 58 | type: LoadBalancer 59 | selector: 60 | app: hostname-ingress 61 | ports: 62 | - name: http 63 | protocol: TCP 64 | port: 80 65 | targetPort: 80 66 | sessionAffinity: None 67 | -------------------------------------------------------------------------------- /e2e/test/lb-http-body-health-check/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-http-body-health-check 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-http-body-health-check" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 80 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | 39 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 40 | 41 | echo "Nodebalancer ID: $nbid" 42 | 43 | for i in {1..20}; do 44 | nbconfig=$(curl -s \ 45 | -H "Authorization: Bearer $LINODE_TOKEN" \ 46 | -H "Content-Type: application/json" \ 47 | "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) 48 | 49 | if [[ -z $nbconfig ]]; then 50 | echo "Failed fetching nodebalancer config for port 80" 51 | fi 52 | 53 | port_80_check=$(echo $nbconfig | jq '.check == "http_body"') 54 | port_80_path=$(echo $nbconfig | jq '.check_path == "/"') 55 | port_80_body=$(echo $nbconfig | jq '.check_body == "nginx"') 56 | port_80_protocol=$(echo $nbconfig | jq '.protocol == "http"') 57 | port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') 58 | 59 | if [[ $port_80_check == "true" && $port_80_path == "true" && $port_80_body == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then 60 | echo "All conditions met" 61 | break 62 | fi 63 | echo "Conditions not met, retrying in 20 seconds..." 64 | echo "check: $port_80_check" 65 | echo "path: $port_80_path" 66 | echo "body: $port_80_body" 67 | echo "protocol: $port_80_protocol" 68 | echo "up_nodes: $port_80_up_nodes" 69 | sleep 20 70 | done 71 | check: 72 | ($error == null): true 73 | (contains($stdout, 'All conditions met')): true 74 | -------------------------------------------------------------------------------- /e2e/test/lb-http-body-health-check/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: http-body-health-check 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: http-body-health-check 13 | template: 14 | metadata: 15 | labels: 16 | app: http-body-health-check 17 | spec: 18 | containers: 19 | - image: nginx 20 | name: test 21 | ports: 22 | - name: http 23 | containerPort: 80 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | annotations: 36 | service.beta.kubernetes.io/linode-loadbalancer-check-body: nginx 37 | service.beta.kubernetes.io/linode-loadbalancer-check-path: / 38 | service.beta.kubernetes.io/linode-loadbalancer-check-type: http_body 39 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: http 40 | name: svc-test 41 | labels: 42 | app: http-body-health-check 43 | spec: 44 | type: LoadBalancer 45 | selector: 46 | app: http-body-health-check 47 | ports: 48 | - name: http 49 | protocol: TCP 50 | port: 80 51 | targetPort: 80 52 | sessionAffinity: None 53 | -------------------------------------------------------------------------------- /e2e/test/lb-http-status-health-check/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-http-status-health-check 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-http-status-health-check" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 80 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | 39 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 40 | echo "Nodebalancer id: $nbid" 41 | 42 | for i in {1..20}; do 43 | nbconfig=$(curl -s \ 44 | -H "Authorization: Bearer $LINODE_TOKEN" \ 45 | -H "Content-Type: application/json" \ 46 | "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) 47 | 48 | if [[ -z $nbconfig ]]; then 49 | echo "Failed fetching nodebalancer config for port 80" 50 | fi 51 | 52 | port_80_check=$(echo $nbconfig | jq '.check == "http"') 53 | port_80_path=$(echo $nbconfig | jq '.check_path == "/"') 54 | port_80_protocol=$(echo $nbconfig | jq '.protocol == "http"') 55 | port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') 56 | 57 | if [[ $port_80_check == "true" && $port_80_path == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then 58 | echo "All conditions met" 59 | break 60 | fi 61 | echo "Conditions not met, retrying in 20 seconds..." 62 | echo "check: $port_80_check" 63 | echo "path: $port_80_path" 64 | echo "protocol: $port_80_protocol" 65 | echo "up_nodes: $port_80_up_nodes" 66 | sleep 20 67 | done 68 | check: 69 | ($error == null): true 70 | (contains($stdout, 'All conditions met')): true 71 | -------------------------------------------------------------------------------- /e2e/test/lb-http-status-health-check/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: http-status-health-check 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: http-status-health-check 13 | template: 14 | metadata: 15 | labels: 16 | app: http-status-health-check 17 | spec: 18 | containers: 19 | - image: nginx 20 | name: test 21 | ports: 22 | - name: http 23 | containerPort: 80 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | annotations: 36 | service.beta.kubernetes.io/linode-loadbalancer-check-path: "/" 37 | service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" 38 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" 39 | name: svc-test 40 | labels: 41 | app: http-status-health-check 42 | spec: 43 | type: LoadBalancer 44 | selector: 45 | app: http-status-health-check 46 | ports: 47 | - name: http 48 | protocol: TCP 49 | port: 80 50 | targetPort: 80 51 | sessionAffinity: None 52 | -------------------------------------------------------------------------------- /e2e/test/lb-passive-health-check/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-passive-health-check 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-passive-health-check" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 80 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | 39 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 40 | 41 | echo "Nodebalancer ID: $nbid" 42 | 43 | for i in {1..20}; do 44 | nbconfig=$(curl -s \ 45 | -H "Authorization: Bearer $LINODE_TOKEN" \ 46 | -H "Content-Type: application/json" \ 47 | "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) 48 | 49 | if [[ -z $nbconfig ]]; then 50 | echo "Failed fetching nodebalancer config for port 80" 51 | fi 52 | 53 | port_80_check=$(echo $nbconfig | jq '.check == "none"') 54 | port_80_passive=$(echo $nbconfig | jq '.check_passive == true') 55 | port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') 56 | 57 | if [[ $port_80_check == "true" && $port_80_passive == "true" && $port_80_up_nodes == "true" ]]; then 58 | echo "All conditions met" 59 | break 60 | fi 61 | echo "Conditions not met, retrying in 20 seconds..." 62 | echo "port_80_check: $port_80_check" 63 | echo "port_80_passive: $port_80_passive" 64 | echo "port_80_up_nodes: $port_80_up_nodes" 65 | sleep 20 66 | done 67 | check: 68 | ($error == null): true 69 | (contains($stdout, 'All conditions met')): true 70 | -------------------------------------------------------------------------------- /e2e/test/lb-passive-health-check/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: passive-health-check 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: passive-health-check 13 | template: 14 | metadata: 15 | labels: 16 | app: passive-health-check 17 | spec: 18 | containers: 19 | - image: nginx 20 | name: test 21 | ports: 22 | - name: http 23 | containerPort: 80 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | annotations: 36 | service.beta.kubernetes.io/linode-loadbalancer-check-passive: "true" 37 | service.beta.kubernetes.io/linode-loadbalancer-check-type: none 38 | name: svc-test 39 | labels: 40 | app: passive-health-check 41 | spec: 42 | type: LoadBalancer 43 | selector: 44 | app: passive-health-check 45 | ports: 46 | - name: http 47 | protocol: TCP 48 | port: 80 49 | targetPort: 80 50 | sessionAffinity: None 51 | -------------------------------------------------------------------------------- /e2e/test/lb-premium-nb/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-premium-nb 6 | labels: 7 | all: 8 | spec: 9 | namespace: "lb-premium-nb" 10 | steps: 11 | - name: Create pods and services 12 | try: 13 | - apply: 14 | file: create-pods-services.yaml 15 | catch: 16 | - describe: 17 | apiVersion: v1 18 | kind: Pod 19 | - describe: 20 | apiVersion: v1 21 | kind: Service 22 | - name: Check that loadbalancer ip is assigned 23 | try: 24 | - assert: 25 | resource: 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: svc-test 30 | status: 31 | (loadBalancer.ingress[0].ip != null): true 32 | - name: Fetch loadbalancer ip and check both pods reachable 33 | try: 34 | - script: 35 | content: | 36 | set -e 37 | IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) 38 | 39 | podnames=() 40 | 41 | for i in {1..10}; do 42 | if [[ ${#podnames[@]} -lt 2 ]]; then 43 | output=$(curl -s $IP:80 | jq -e .podName || true) 44 | 45 | if [[ "$output" == *"test-"* ]]; then 46 | unique=true 47 | for i in "${array[@]}"; do 48 | if [[ "$i" == "$output" ]]; then 49 | unique=false 50 | break 51 | fi 52 | done 53 | if [[ "$unique" == true ]]; then 54 | podnames+=($output) 55 | fi 56 | fi 57 | else 58 | break 59 | fi 60 | sleep 10 61 | done 62 | 63 | if [[ ${#podnames[@]} -lt 2 ]]; then 64 | echo "all pods failed to respond" 65 | else 66 | echo "all pods responded" 67 | fi 68 | check: 69 | ($error == null): true 70 | (contains($stdout, 'all pods responded')): true 71 | - name: Check nodebalancer type 72 | try: 73 | - script: 74 | content: | 75 | set -e 76 | 77 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 78 | for i in {1..10}; do 79 | type=$(curl -s --request GET \ 80 | -H "Authorization: Bearer $LINODE_TOKEN" \ 81 | -H "Content-Type: application/json" \ 82 | -H "accept: application/json" \ 83 | "https://api.linode.com/v4/nodebalancers/${nbid}" | jq -r '.type') 84 | 85 | if [[ $type == "premium" ]]; then 86 | echo "nodebalancer type is premium" 87 | break 88 | fi 89 | sleep 5 90 | done 91 | check: 92 | ($error == null): true 93 | (contains($stdout, 'nodebalancer type is premium')): true 94 | -------------------------------------------------------------------------------- /e2e/test/lb-premium-nb/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-premium-nb 7 | name: test 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: lb-premium-nb 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-premium-nb 17 | spec: 18 | affinity: 19 | podAntiAffinity: 20 | preferredDuringSchedulingIgnoredDuringExecution: 21 | - podAffinityTerm: 22 | labelSelector: 23 | matchExpressions: 24 | - key: app 25 | operator: In 26 | values: 27 | - simple-lb 28 | topologyKey: kubernetes.io/hostname 29 | weight: 100 30 | containers: 31 | - image: appscode/test-server:2.3 32 | name: test 33 | ports: 34 | - name: http-1 35 | containerPort: 8080 36 | protocol: TCP 37 | env: 38 | - name: POD_NAME 39 | valueFrom: 40 | fieldRef: 41 | apiVersion: v1 42 | fieldPath: metadata.name 43 | --- 44 | apiVersion: v1 45 | kind: Service 46 | metadata: 47 | name: svc-test 48 | annotations: 49 | service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-type: premium 50 | labels: 51 | app: lb-premium-nb 52 | spec: 53 | type: LoadBalancer 54 | selector: 55 | app: lb-premium-nb 56 | ports: 57 | - name: http-1 58 | protocol: TCP 59 | port: 80 60 | targetPort: 8080 61 | sessionAffinity: None 62 | -------------------------------------------------------------------------------- /e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: preserve-annotation-new-nb-specified 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: preserve-annotation-new-nb-specified 13 | template: 14 | metadata: 15 | labels: 16 | app: preserve-annotation-new-nb-specified 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" 38 | labels: 39 | app: preserve-annotation-new-nb-specified 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: preserve-annotation-new-nb-specified 44 | ports: 45 | - name: http-1 46 | protocol: TCP 47 | port: 80 48 | targetPort: 8080 49 | sessionAffinity: None 50 | -------------------------------------------------------------------------------- /e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-preserve-annotation-svc-delete 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-preserve-annotation-svc-delete" 11 | steps: 12 | - name: Create resources 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Delete pods, delete service and validate nb still exists 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | 39 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 40 | 41 | kubectl --timeout=60s -n $NAMESPACE delete deploy test 42 | kubectl --timeout=60s -n $NAMESPACE delete svc svc-test 43 | sleep 20 44 | 45 | get_resp=$(curl --write-out "%{http_code}\n" \ 46 | --silent --output /dev/null \ 47 | -X GET \ 48 | -H "Authorization: Bearer $LINODE_TOKEN" \ 49 | "https://api.linode.com/v4/nodebalancers/$nbid") 50 | 51 | if [[ $get_resp == "200" ]]; then 52 | echo "nodebalancer exists" 53 | fi 54 | 55 | # cleanup remaining nodebalancer 56 | delete_resp=$(curl --write-out "%{http_code}\n" \ 57 | --silent --output /dev/null \ 58 | -X DELETE \ 59 | -H "Authorization: Bearer $LINODE_TOKEN" \ 60 | "https://api.linode.com/v4/nodebalancers/$nbid") 61 | 62 | if ! [[ $delete_resp == "200" ]]; then 63 | echo "failed deleting nodebalancer" 64 | fi 65 | check: 66 | ($error == null): true 67 | (contains($stdout, 'nodebalancer exists')): true 68 | -------------------------------------------------------------------------------- /e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: preserve-annotation-svc-delete 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: preserve-annotation-svc-delete 13 | template: 14 | metadata: 15 | labels: 16 | app: preserve-annotation-svc-delete 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" 38 | labels: 39 | app: preserve-annotation-svc-delete 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: preserve-annotation-svc-delete 44 | ports: 45 | - name: http-1 46 | protocol: TCP 47 | port: 80 48 | targetPort: 8080 49 | sessionAffinity: None 50 | -------------------------------------------------------------------------------- /e2e/test/lb-simple/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-simple 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-simple" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch loadbalancer ip and check both pods reachable 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) 39 | 40 | podnames=() 41 | 42 | for i in {1..10}; do 43 | if [[ ${#podnames[@]} -lt 2 ]]; then 44 | output=$(curl -s $IP:80 | jq -e .podName || true) 45 | 46 | if [[ "$output" == *"test-"* ]]; then 47 | unique=true 48 | for i in "${array[@]}"; do 49 | if [[ "$i" == "$output" ]]; then 50 | unique=false 51 | break 52 | fi 53 | done 54 | if [[ "$unique" == true ]]; then 55 | podnames+=($output) 56 | fi 57 | fi 58 | else 59 | break 60 | fi 61 | sleep 10 62 | done 63 | 64 | if [[ ${#podnames[@]} -lt 2 ]]; then 65 | echo "all pods failed to respond" 66 | else 67 | echo "all pods responded" 68 | fi 69 | check: 70 | ($error == null): true 71 | (contains($stdout, 'all pods responded')): true 72 | - name: Delete Pods 73 | try: 74 | - delete: 75 | ref: 76 | apiVersion: v1 77 | kind: Pod 78 | - name: Delete Service 79 | try: 80 | - delete: 81 | ref: 82 | apiVersion: v1 83 | kind: Service 84 | -------------------------------------------------------------------------------- /e2e/test/lb-simple/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-simple 7 | name: test 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: lb-simple 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-simple 17 | spec: 18 | affinity: 19 | podAntiAffinity: 20 | preferredDuringSchedulingIgnoredDuringExecution: 21 | - podAffinityTerm: 22 | labelSelector: 23 | matchExpressions: 24 | - key: app 25 | operator: In 26 | values: 27 | - simple-lb 28 | topologyKey: kubernetes.io/hostname 29 | weight: 100 30 | containers: 31 | - image: appscode/test-server:2.3 32 | name: test 33 | ports: 34 | - name: http-1 35 | containerPort: 8080 36 | protocol: TCP 37 | env: 38 | - name: POD_NAME 39 | valueFrom: 40 | fieldRef: 41 | apiVersion: v1 42 | fieldPath: metadata.name 43 | --- 44 | apiVersion: v1 45 | kind: Service 46 | metadata: 47 | name: svc-test 48 | labels: 49 | app: lb-simple 50 | spec: 51 | type: LoadBalancer 52 | selector: 53 | app: lb-simple 54 | ports: 55 | - name: http-1 56 | protocol: TCP 57 | port: 80 58 | targetPort: 8080 59 | sessionAffinity: None 60 | -------------------------------------------------------------------------------- /e2e/test/lb-single-tls/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-single-tls 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-single-tls" 11 | steps: 12 | - name: Create secret 13 | try: 14 | - script: 15 | content: | 16 | set -e 17 | kubectl -n $NAMESPACE create secret tls tls-secret --cert=../certificates/server.crt --key=../certificates/server.key 18 | check: 19 | ($error == null): true 20 | - name: Create pods and services 21 | try: 22 | - apply: 23 | file: create-pods-services.yaml 24 | catch: 25 | - describe: 26 | apiVersion: v1 27 | kind: Pod 28 | - describe: 29 | apiVersion: v1 30 | kind: Service 31 | - name: Check that loadbalancer ip is assigned 32 | try: 33 | - assert: 34 | resource: 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: svc-test 39 | status: 40 | (loadBalancer.ingress[0].ip != null): true 41 | - name: Fetch loadbalancer ip and check if pod is reachable 42 | try: 43 | - script: 44 | content: | 45 | set -e 46 | IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) 47 | 48 | podnames=() 49 | 50 | for i in {1..10}; do 51 | if [[ ${#podnames[@]} -lt 1 ]]; then 52 | output=$(curl --resolve linode.test:80:$IP --cacert ../certificates/ca.crt -s https://linode.test:80 | jq -e .podName || true) 53 | 54 | if [[ "$output" == *"test-"* ]]; then 55 | unique=true 56 | for i in "${array[@]}"; do 57 | if [[ "$i" == "$output" ]]; then 58 | unique=false 59 | break 60 | fi 61 | done 62 | if [[ "$unique" == true ]]; then 63 | podnames+=($output) 64 | fi 65 | fi 66 | else 67 | break 68 | fi 69 | sleep 10 70 | done 71 | 72 | if [[ ${#podnames[@]} -lt 1 ]]; then 73 | echo "all pods failed to respond" 74 | else 75 | echo "all pods responded" 76 | fi 77 | check: 78 | ($error == null): true 79 | (contains($stdout, 'all pods responded')): true 80 | - name: Delete Pods 81 | try: 82 | - delete: 83 | ref: 84 | apiVersion: v1 85 | kind: Pod 86 | - name: Delete Service 87 | try: 88 | - delete: 89 | ref: 90 | apiVersion: v1 91 | kind: Service 92 | -------------------------------------------------------------------------------- /e2e/test/lb-single-tls/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-single-tls 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: lb-single-tls 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-single-tls 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: https 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https 38 | service.beta.kubernetes.io/linode-loadbalancer-port-80: '{ "tls-secret-name": "tls-secret" }' 39 | labels: 40 | app: lb-single-tls 41 | spec: 42 | type: LoadBalancer 43 | selector: 44 | app: lb-single-tls 45 | ports: 46 | - name: https 47 | protocol: TCP 48 | port: 80 49 | targetPort: 8080 50 | sessionAffinity: None 51 | -------------------------------------------------------------------------------- /e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-tcp-connection-health-check 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-tcp-connection-health-check" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 80 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | 39 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 40 | 41 | echo "Nodebalancer ID: $nbid" 42 | 43 | for i in {1..20}; do 44 | nbconfig=$(curl -s \ 45 | -H "Authorization: Bearer $LINODE_TOKEN" \ 46 | -H "Content-Type: application/json" \ 47 | "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) 48 | 49 | if [[ -z $nbconfig ]]; then 50 | echo "Failed fetching nodebalancer config for port 80" 51 | fi 52 | 53 | port_80_check=$(echo $nbconfig | jq '.check == "connection"') 54 | port_80_interval=$(echo $nbconfig | jq '.check_interval == 10') 55 | port_80_timeout=$(echo $nbconfig | jq '.check_timeout == 5') 56 | port_80_attempts=$(echo $nbconfig | jq '.check_attempts == 4') 57 | port_80_protocol=$(echo $nbconfig | jq '.protocol == "tcp"') 58 | port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') 59 | 60 | if [[ $port_80_check == "true" && $port_80_interval == "true" && $port_80_timeout == "true" && $port_80_attempts == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then 61 | echo "All conditions met" 62 | break 63 | fi 64 | echo "Conditions not met, retrying in 20 seconds..." 65 | echo "check: $port_80_check" 66 | echo "interval: $port_80_interval" 67 | echo "timeout: $port_80_timeout" 68 | echo "attempts: $port_80_attempts" 69 | echo "protocol: $port_80_protocol" 70 | echo "up_nodes: $port_80_up_nodes" 71 | sleep 20 72 | done 73 | check: 74 | ($error == null): true 75 | (contains($stdout, 'All conditions met')): true 76 | -------------------------------------------------------------------------------- /e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: tcp-connection-health-check 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: tcp-connection-health-check 13 | template: 14 | metadata: 15 | labels: 16 | app: tcp-connection-health-check 17 | spec: 18 | containers: 19 | - image: nginx 20 | name: test 21 | ports: 22 | - name: http 23 | containerPort: 80 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | annotations: 36 | service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "4" 37 | service.beta.kubernetes.io/linode-loadbalancer-check-interval: "10" 38 | service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "5" 39 | service.beta.kubernetes.io/linode-loadbalancer-check-type: connection 40 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: tcp 41 | name: svc-test 42 | labels: 43 | app: tcp-connection-health-check 44 | spec: 45 | type: LoadBalancer 46 | selector: 47 | app: tcp-connection-health-check 48 | ports: 49 | - name: http 50 | protocol: TCP 51 | port: 80 52 | targetPort: 80 53 | sessionAffinity: None 54 | -------------------------------------------------------------------------------- /e2e/test/lb-update-port/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-update-port 7 | name: test 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: lb-update-port 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-update-port 17 | spec: 18 | affinity: 19 | podAntiAffinity: 20 | preferredDuringSchedulingIgnoredDuringExecution: 21 | - podAffinityTerm: 22 | labelSelector: 23 | matchExpressions: 24 | - key: app 25 | operator: In 26 | values: 27 | - lb-update-port 28 | topologyKey: kubernetes.io/hostname 29 | weight: 100 30 | containers: 31 | - image: appscode/test-server:2.3 32 | name: test 33 | ports: 34 | - name: http-1 35 | containerPort: 8080 36 | protocol: TCP 37 | env: 38 | - name: POD_NAME 39 | valueFrom: 40 | fieldRef: 41 | apiVersion: v1 42 | fieldPath: metadata.name 43 | --- 44 | apiVersion: v1 45 | kind: Service 46 | metadata: 47 | name: svc-test 48 | labels: 49 | app: lb-update-port 50 | annotations: 51 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 52 | { 53 | "denyList": { 54 | "ipv4": ["8.8.8.8/32", 55 | "9.9.9.9/32"] 56 | } 57 | } 58 | spec: 59 | type: LoadBalancer 60 | selector: 61 | app: lb-update-port 62 | ports: 63 | - name: http-1 64 | protocol: TCP 65 | port: 80 66 | targetPort: 8080 67 | sessionAffinity: None 68 | -------------------------------------------------------------------------------- /e2e/test/lb-update-port/update-port-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: svc-test 6 | labels: 7 | app: lb-update-port 8 | annotations: 9 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 10 | { 11 | "denyList": { 12 | "ipv4": ["8.8.8.8/32", 13 | "9.9.9.9/32"] 14 | } 15 | } 16 | spec: 17 | type: LoadBalancer 18 | selector: 19 | app: lb-update-port 20 | ports: 21 | - name: http-1 22 | protocol: TCP 23 | port: 80 24 | targetPort: 8080 25 | - name: http-2 26 | protocol: TCP 27 | port: 8080 28 | targetPort: 8080 29 | sessionAffinity: None 30 | ... 31 | -------------------------------------------------------------------------------- /e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-updated-with-nb-id 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-updated-with-nb-id" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Create nodebalancer, annotate svc with nodebalancer id and validate 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | 39 | re='^[0-9]+$' 40 | LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" 41 | 42 | nbid=$(curl -s --request POST \ 43 | -H "Authorization: Bearer $LINODE_TOKEN" \ 44 | -H "Content-Type: application/json" \ 45 | -H "accept: application/json" \ 46 | "https://api.linode.com/v4/nodebalancers" \ 47 | --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) 48 | 49 | if ! [[ $nbid =~ $re ]]; then 50 | echo "Nodebalancer id [$nbid] for label [$lABEL] is incorrect, failed to create nodebalancer" 51 | exit 1 52 | fi 53 | 54 | kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid 55 | sleep 5 56 | 57 | for i in {1..20}; do 58 | nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 59 | 60 | if [[ $nbid == $nbid2 ]]; then 61 | echo "Condition met" 62 | break 63 | fi 64 | echo "Condition not met, waiting for 20 seconds and retrying..." 65 | echo "nbid: $nbid" 66 | echo "nbid2: $nbid2" 67 | sleep 20 68 | done 69 | check: 70 | ($error == null): true 71 | (contains($stdout, 'Condition met')): true 72 | -------------------------------------------------------------------------------- /e2e/test/lb-updated-with-nb-id/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: updated-with-nb-id 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: updated-with-nb-id 13 | template: 14 | metadata: 15 | labels: 16 | app: updated-with-nb-id 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | labels: 37 | app: updated-with-nb-id 38 | spec: 39 | type: LoadBalancer 40 | selector: 41 | app: updated-with-nb-id 42 | ports: 43 | - name: http-1 44 | protocol: TCP 45 | port: 80 46 | targetPort: 8080 47 | sessionAffinity: None 48 | -------------------------------------------------------------------------------- /e2e/test/lb-with-http-to-https/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-http-to-https 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-http-to-https" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Create secrets 24 | try: 25 | - script: 26 | content: | 27 | set -e 28 | kubectl -n $NAMESPACE create secret tls tls-secret-1 --cert=../certificates/server.crt --key=../certificates/server.key 29 | check: 30 | ($error == null): true 31 | - name: Update service to have another annotation and port 32 | try: 33 | - script: 34 | content: | 35 | set -e 36 | kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-443='{"tls-secret-name": "tls-secret-1", "protocol": "https"}' 37 | kubectl patch svc svc-test -n $NAMESPACE --type='json' -p='[{"op": "add", "path": "/spec/ports/-", "value": {"name": "https", "port": 443, "targetPort": 8080, "protocol": "TCP"}}]' 38 | sleep 10 39 | check: 40 | ($error == null): true 41 | - name: Check endpoints 42 | try: 43 | - assert: 44 | resource: 45 | apiVersion: v1 46 | kind: Endpoints 47 | metadata: 48 | name: svc-test 49 | (subsets[0].addresses != null): true 50 | (subsets[0].ports != null): true 51 | (length(subsets[0].ports)): 2 52 | catch: 53 | - describe: 54 | apiVersion: v1 55 | kind: Pod 56 | - describe: 57 | apiVersion: v1 58 | kind: Service 59 | - name: Check that loadbalancer ip is assigned 60 | try: 61 | - assert: 62 | resource: 63 | apiVersion: v1 64 | kind: Service 65 | metadata: 66 | name: svc-test 67 | status: 68 | (loadBalancer.ingress[0].ip != null): true 69 | - name: Fetch loadbalancer ip and check if pod reachable on different ports with different protocols 70 | try: 71 | - script: 72 | content: | 73 | set -e 74 | IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) 75 | echo "loadbalancer ip: $IP" 76 | 77 | for i in {1..20}; do 78 | port_80=$(curl -s $IP:80 | grep "test-" || true) 79 | port_443=$(curl --resolve linode.test:443:$IP --cacert ../certificates/ca.crt -s https://linode.test:443 | grep "test-" || true) 80 | 81 | if [[ -z $port_80 || -z $port_443 ]]; then 82 | echo "pod not reachable on port 80 or 443, retrying..." 83 | echo "port 80: $port_80" 84 | echo "port 443: $port_443" 85 | sleep 20 86 | else 87 | echo "all pods responded" 88 | break 89 | fi 90 | done 91 | check: 92 | ($error == null): true 93 | (contains($stdout, 'all pods responded')): true 94 | -------------------------------------------------------------------------------- /e2e/test/lb-with-http-to-https/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: http-to-https 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: http-to-https 13 | template: 14 | metadata: 15 | labels: 16 | app: http-to-https 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: alpha 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | annotations: 36 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https 37 | service.beta.kubernetes.io/linode-loadbalancer-port-80: '{"protocol": "http"}' 38 | name: svc-test 39 | labels: 40 | app: http-to-https 41 | spec: 42 | type: LoadBalancer 43 | selector: 44 | app: http-to-https 45 | ports: 46 | - name: http 47 | protocol: TCP 48 | port: 80 49 | targetPort: 8080 50 | sessionAffinity: None 51 | -------------------------------------------------------------------------------- /e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-multiple-http-https-ports 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-multiple-http-https-ports" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Create secrets 24 | try: 25 | - script: 26 | content: | 27 | set -e 28 | kubectl -n $NAMESPACE create secret tls tls-secret-1 --cert=../certificates/server.crt --key=../certificates/server.key 29 | kubectl -n $NAMESPACE create secret tls tls-secret-2 --cert=../certificates/server.crt --key=../certificates/server.key 30 | sleep 2 31 | check: 32 | ($error == null): true 33 | - name: Check endpoints exist 34 | try: 35 | - assert: 36 | resource: 37 | apiVersion: v1 38 | kind: Endpoints 39 | metadata: 40 | name: svc-test 41 | (subsets[0].addresses != null): true 42 | (subsets[0].ports != null): true 43 | (length(subsets[0].ports)): 4 44 | catch: 45 | - describe: 46 | apiVersion: v1 47 | kind: Pod 48 | - describe: 49 | apiVersion: v1 50 | kind: Service 51 | - name: Check that loadbalancer ip is assigned 52 | try: 53 | - assert: 54 | resource: 55 | apiVersion: v1 56 | kind: Service 57 | metadata: 58 | name: svc-test 59 | status: 60 | (loadBalancer.ingress[0].ip != null): true 61 | - name: Fetch loadbalancer ip and check if pod reachable on different ports with different protocols 62 | try: 63 | - script: 64 | content: | 65 | set -e 66 | IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) 67 | echo "loadbalancer ip: $IP" 68 | 69 | for i in {1..20}; do 70 | port_80=$(curl -s $IP:80 | grep "test-" || true) 71 | port_8080=$(curl -s $IP:8080 | grep "test-" || true) 72 | port_443=$(curl --resolve linode.test:443:$IP --cacert ../certificates/ca.crt -s https://linode.test:443 | grep "test-" || true) 73 | port_8443=$(curl --resolve linode.test:8443:$IP --cacert ../certificates/ca.crt -s https://linode.test:8443 | grep "test-" || true) 74 | 75 | if [[ -z $port_80 || -z $port_8080 || -z $port_443 || -z $port_8443 ]]; then 76 | echo "pod not reachable on all ports, retrying in 20 seconds..." 77 | echo "port 80: $port_80" 78 | echo "port 8080: $port_8080" 79 | echo "port 443: $port_443" 80 | echo "port 8443: $port_8443" 81 | sleep 20 82 | else 83 | echo "all pods responded" 84 | break 85 | fi 86 | done 87 | check: 88 | ($error == null): true 89 | (contains($stdout, 'all pods responded')): true 90 | -------------------------------------------------------------------------------- /e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: multiple-http-https-ports 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: multiple-http-https-ports 13 | template: 14 | metadata: 15 | labels: 16 | app: multiple-http-https-ports 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: alpha 23 | containerPort: 8080 24 | protocol: TCP 25 | - name: beta 26 | containerPort: 8989 27 | protocol: TCP 28 | env: 29 | - name: POD_NAME 30 | valueFrom: 31 | fieldRef: 32 | apiVersion: v1 33 | fieldPath: metadata.name 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | annotations: 39 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https 40 | service.beta.kubernetes.io/linode-loadbalancer-port-80: '{"protocol": "http"}' 41 | service.beta.kubernetes.io/linode-loadbalancer-port-443: '{"tls-secret-name": "tls-secret-1"}' 42 | service.beta.kubernetes.io/linode-loadbalancer-port-8080: '{"protocol": "http"}' 43 | service.beta.kubernetes.io/linode-loadbalancer-port-8443: '{"tls-secret-name": "tls-secret-2", "protocol": "https"}' 44 | name: svc-test 45 | labels: 46 | app: multiple-http-https-ports 47 | spec: 48 | type: LoadBalancer 49 | selector: 50 | app: multiple-http-https-ports 51 | ports: 52 | - name: http-1 53 | protocol: TCP 54 | port: 80 55 | targetPort: 8989 56 | - name: http-2 57 | protocol: TCP 58 | port: 8080 59 | targetPort: 8080 60 | - name: https-1 61 | protocol: TCP 62 | port: 443 63 | targetPort: 8080 64 | - name: https-2 65 | protocol: TCP 66 | port: 8443 67 | targetPort: 8989 68 | sessionAffinity: None 69 | -------------------------------------------------------------------------------- /e2e/test/lb-with-node-addition/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: with-node-addition 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: with-node-addition 13 | template: 14 | metadata: 15 | labels: 16 | app: with-node-addition 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 8080 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | labels: 37 | app: with-node-addition 38 | spec: 39 | type: LoadBalancer 40 | selector: 41 | app: with-node-addition 42 | ports: 43 | - name: http-1 44 | protocol: TCP 45 | port: 80 46 | targetPort: 8080 47 | sessionAffinity: None 48 | -------------------------------------------------------------------------------- /e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: proxyprotocol-default-annotation 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: proxyprotocol-default-annotation 13 | template: 14 | metadata: 15 | labels: 16 | app: proxyprotocol-default-annotation 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 80 24 | protocol: TCP 25 | - name: http-2 26 | containerPort: 8080 27 | protocol: TCP 28 | env: 29 | - name: POD_NAME 30 | valueFrom: 31 | fieldRef: 32 | apiVersion: v1 33 | fieldPath: metadata.name 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: svc-test 39 | labels: 40 | app: proxyprotocol-default-annotation 41 | spec: 42 | type: LoadBalancer 43 | selector: 44 | app: proxyprotocol-default-annotation 45 | ports: 46 | - name: http-1 47 | protocol: TCP 48 | port: 80 49 | targetPort: 80 50 | - name: http-2 51 | protocol: TCP 52 | port: 8080 53 | targetPort: 8080 54 | sessionAffinity: None 55 | -------------------------------------------------------------------------------- /e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: proxyprotocol-override 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: proxyprotocol-override 13 | template: 14 | metadata: 15 | labels: 16 | app: proxyprotocol-override 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 80 24 | protocol: TCP 25 | - name: http-2 26 | containerPort: 8080 27 | protocol: TCP 28 | env: 29 | - name: POD_NAME 30 | valueFrom: 31 | fieldRef: 32 | apiVersion: v1 33 | fieldPath: metadata.name 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: svc-test 39 | labels: 40 | app: proxyprotocol-override 41 | spec: 42 | type: LoadBalancer 43 | selector: 44 | app: proxyprotocol-override 45 | ports: 46 | - name: http-1 47 | protocol: TCP 48 | port: 80 49 | targetPort: 80 50 | - name: http-2 51 | protocol: TCP 52 | port: 8080 53 | targetPort: 8080 54 | sessionAffinity: None 55 | -------------------------------------------------------------------------------- /e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-proxyprotocol-port-specific 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-proxyprotocol-port-specific" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check endpoints exist 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Endpoints 29 | metadata: 30 | name: svc-test 31 | (subsets[0].addresses != null): true 32 | (subsets[0].ports != null): true 33 | - name: Annotate service port 80 with v1 and 8080 with v2 34 | try: 35 | - script: 36 | content: | 37 | set -e 38 | kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' 39 | sleep 10 40 | check: 41 | ($error == null): true 42 | - name: Check NodeBalancerConfig for port 80 to not have ProxyProtocol and port 8080 to have ProxyProtocol v2 43 | try: 44 | - script: 45 | content: | 46 | set -e 47 | 48 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 49 | 50 | nbconfig=$(curl -s \ 51 | -H "Authorization: Bearer $LINODE_TOKEN" \ 52 | -H "Content-Type: application/json" \ 53 | "https://api.linode.com/v4/nodebalancers/$nbid/configs") 54 | 55 | port_80_none=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "none"') 56 | port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') 57 | 58 | if [[ $port_80_none == "true" && $port_8080_v2 == "true" ]]; then 59 | echo "Conditions met" 60 | else 61 | echo "Conditions not met" 62 | fi 63 | check: 64 | ($error): ~ 65 | (contains($stdout, 'Conditions met')): true 66 | -------------------------------------------------------------------------------- /e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: proxyprotocol-port-specific 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: proxyprotocol-port-specific 13 | template: 14 | metadata: 15 | labels: 16 | app: proxyprotocol-port-specific 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 80 24 | protocol: TCP 25 | - name: http-2 26 | containerPort: 8080 27 | protocol: TCP 28 | env: 29 | - name: POD_NAME 30 | valueFrom: 31 | fieldRef: 32 | apiVersion: v1 33 | fieldPath: metadata.name 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: svc-test 39 | labels: 40 | app: proxyprotocol-port-specific 41 | spec: 42 | type: LoadBalancer 43 | selector: 44 | app: proxyprotocol-port-specific 45 | ports: 46 | - name: http-1 47 | protocol: TCP 48 | port: 80 49 | targetPort: 80 50 | - name: http-2 51 | protocol: TCP 52 | port: 8080 53 | targetPort: 8080 54 | sessionAffinity: None 55 | -------------------------------------------------------------------------------- /e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-proxyprotocol-set 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-proxyprotocol-set" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check endpoints exist 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Endpoints 29 | metadata: 30 | name: svc-test 31 | (subsets[0].addresses != null): true 32 | (subsets[0].ports != null): true 33 | - name: Check that loadbalancer ip is assigned 34 | try: 35 | - assert: 36 | resource: 37 | apiVersion: v1 38 | kind: Service 39 | metadata: 40 | name: svc-test 41 | status: 42 | (loadBalancer.ingress[0].ip != null): true 43 | - name: Annotate service port 80 with v1 and 8080 with v2 44 | try: 45 | - script: 46 | content: | 47 | set -e 48 | kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80='{"proxy-protocol": "v1"}' 49 | kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' 50 | sleep 10 51 | check: 52 | ($error == null): true 53 | - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v1 and port 8080 to have ProxyProtocol v2 54 | try: 55 | - script: 56 | content: | 57 | set -e 58 | 59 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 60 | 61 | nbconfig=$(curl -s \ 62 | -H "Authorization: Bearer $LINODE_TOKEN" \ 63 | -H "Content-Type: application/json" \ 64 | "https://api.linode.com/v4/nodebalancers/$nbid/configs") 65 | 66 | port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') 67 | port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') 68 | 69 | if [[ $port_80_v1 == "true" && $port_8080_v2 == "true" ]]; then 70 | echo "Conditions met" 71 | else 72 | echo "Conditions not met" 73 | fi 74 | check: 75 | ($error): ~ 76 | (contains($stdout, 'Conditions met')): true 77 | -------------------------------------------------------------------------------- /e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: proxyprotocol-set 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: proxyprotocol-set 13 | template: 14 | metadata: 15 | labels: 16 | app: proxyprotocol-set 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 80 24 | protocol: TCP 25 | - name: http-2 26 | containerPort: 8080 27 | protocol: TCP 28 | env: 29 | - name: POD_NAME 30 | valueFrom: 31 | fieldRef: 32 | apiVersion: v1 33 | fieldPath: metadata.name 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: svc-test 39 | labels: 40 | app: proxyprotocol-set 41 | spec: 42 | type: LoadBalancer 43 | selector: 44 | app: proxyprotocol-set 45 | ports: 46 | - name: http-1 47 | protocol: TCP 48 | port: 80 49 | targetPort: 80 50 | - name: http-2 51 | protocol: TCP 52 | port: 8080 53 | targetPort: 8080 54 | sessionAffinity: None 55 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports-algorithm/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-udp-ports-algorithm 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-udp-ports-algorithm" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 7070 34 | try: 35 | - script: 36 | content: | 37 | set -euo pipefail 38 | 39 | echo "Starting e2e test" 40 | 41 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 42 | 43 | echo "Nodebalancer ID: $nbid" 44 | 45 | nbconfig=$(LINODE_TOKEN=$LINODE_TOKEN NBID=$nbid ../scripts/get-nb-config.sh) 46 | 47 | echo "Nodebalancer config found, updating config algorithm" 48 | 49 | kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-default-algorithm=ring_hash 50 | sleep 5s 51 | 52 | echo "Verifying that algorithm is set to ring hash" 53 | nbconfig=$(LINODE_TOKEN=$LINODE_TOKEN NBID=$nbid ../scripts/get-nb-config.sh) 54 | algorithm=$(echo $nbconfig | jq -r '.algorithm') 55 | echo "algorithm is $algorithm" 56 | check: 57 | ($error == null): true 58 | (contains($stdout, 'algorithm is ring_hash')): true 59 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports-algorithm/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-with-udp-ports-algorithm 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: lb-with-udp-ports-algorithm 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-with-udp-ports-algorithm 17 | spec: 18 | containers: 19 | - image: rahulait/test-server:0.1 20 | name: test 21 | ports: 22 | - name: udp 23 | containerPort: 7070 24 | protocol: UDP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | labels: 37 | app: lb-with-udp-ports-algorithm 38 | spec: 39 | type: LoadBalancer 40 | selector: 41 | app: lb-with-udp-ports-algorithm 42 | ports: 43 | - name: udp 44 | protocol: UDP 45 | port: 7070 46 | targetPort: 7070 47 | sessionAffinity: None 48 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports-change-port/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-udp-ports-change-port 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-udp-ports-change-port" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 7070 34 | try: 35 | - script: 36 | content: | 37 | set -euo pipefail 38 | 39 | echo "Starting e2e test" 40 | 41 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 42 | 43 | echo "Nodebalancer ID: $nbid" 44 | 45 | nbconfig=$(LINODE_TOKEN=$LINODE_TOKEN NBID=$nbid ../scripts/get-nb-config.sh) 46 | 47 | echo "Nodebalancer config found, updating config udp_check_port" 48 | 49 | kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-udp-check-port=4500 50 | sleep 5s 51 | 52 | echo "Verifying that udp_check_port is set to 4500" 53 | nbconfig=$(LINODE_TOKEN=$LINODE_TOKEN NBID=$nbid ../scripts/get-nb-config.sh) 54 | udp_check_port=$(echo $nbconfig | jq -r '.udp_check_port') 55 | echo "udp_check_port is $udp_check_port" 56 | check: 57 | ($error == null): true 58 | (contains($stdout, 'udp_check_port is 4500')): true 59 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports-change-port/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-with-udp-ports-change-port 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: lb-with-udp-ports-change-port 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-with-udp-ports-change-port 17 | spec: 18 | containers: 19 | - image: rahulait/test-server:0.1 20 | name: test 21 | ports: 22 | - name: udp 23 | containerPort: 7070 24 | protocol: UDP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | labels: 37 | app: lb-with-udp-ports-change-port 38 | spec: 39 | type: LoadBalancer 40 | selector: 41 | app: lb-with-udp-ports-change-port 42 | ports: 43 | - name: udp 44 | protocol: UDP 45 | port: 7070 46 | targetPort: 7070 47 | sessionAffinity: None 48 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports-mode/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-udp-ports-mode 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-udp-ports-mode" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 7070 34 | try: 35 | - script: 36 | content: | 37 | set -euo pipefail 38 | 39 | echo "Starting e2e test" 40 | 41 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 42 | 43 | echo "Nodebalancer ID: $nbid" 44 | 45 | nbconfig=$(LINODE_TOKEN=$LINODE_TOKEN NBID=$nbid ../scripts/get-nb-config.sh) 46 | config_id=$(echo $nbconfig | jq -r '.id') 47 | 48 | mode=$(curl -s \ 49 | --url $LINODE_URL/v4beta/nodebalancers/$nbid/configs/$config_id/nodes \ 50 | -H 'accept: application/json' \ 51 | -H "Authorization: Bearer $LINODE_TOKEN" | jq -r '.data[0].mode') 52 | echo "mode is $mode" 53 | check: 54 | ($error == null): true 55 | (contains($stdout, 'mode is none')): true 56 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports-mode/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-with-udp-ports-mode 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: lb-with-udp-ports-mode 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-with-udp-ports-mode 17 | spec: 18 | containers: 19 | - image: rahulait/test-server:0.1 20 | name: test 21 | ports: 22 | - name: udp 23 | containerPort: 7070 24 | protocol: UDP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | labels: 37 | app: lb-with-udp-ports-mode 38 | spec: 39 | type: LoadBalancer 40 | selector: 41 | app: lb-with-udp-ports-mode 42 | ports: 43 | - name: udp 44 | protocol: UDP 45 | port: 7070 46 | targetPort: 7070 47 | sessionAffinity: None 48 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports-stickiness/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-udp-ports-stickiness 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-udp-ports-stickiness" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 7070 34 | try: 35 | - script: 36 | content: | 37 | set -euo pipefail 38 | 39 | echo "Starting e2e test" 40 | 41 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 42 | 43 | echo "Nodebalancer ID: $nbid" 44 | 45 | nbconfig=$(LINODE_TOKEN=$LINODE_TOKEN NBID=$nbid ../scripts/get-nb-config.sh) 46 | 47 | echo "Nodebalancer config found, updating config stickiness" 48 | 49 | kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-default-stickiness=source_ip 50 | sleep 5s 51 | 52 | echo "Verifying that stickiness is set to source_ip" 53 | nbconfig=$(LINODE_TOKEN=$LINODE_TOKEN NBID=$nbid ../scripts/get-nb-config.sh) 54 | stickiness=$(echo $nbconfig | jq -r '.stickiness') 55 | echo "stickiness is $stickiness" 56 | check: 57 | ($error == null): true 58 | (contains($stdout, 'stickiness is source_ip')): true 59 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports-stickiness/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-with-udp-ports-stickiness 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: lb-with-udp-ports-stickiness 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-with-udp-ports-stickiness 17 | spec: 18 | containers: 19 | - image: rahulait/test-server:0.1 20 | name: test 21 | ports: 22 | - name: udp 23 | containerPort: 7070 24 | protocol: UDP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | labels: 37 | app: lb-with-udp-ports-stickiness 38 | spec: 39 | type: LoadBalancer 40 | selector: 41 | app: lb-with-udp-ports-stickiness 42 | ports: 43 | - name: udp 44 | protocol: UDP 45 | port: 7070 46 | targetPort: 7070 47 | sessionAffinity: None 48 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-udp-ports 6 | labels: 7 | all: 8 | lke: 9 | spec: 10 | namespace: "lb-with-udp-ports" 11 | steps: 12 | - name: Create pods and services 13 | try: 14 | - apply: 15 | file: create-pods-services.yaml 16 | catch: 17 | - describe: 18 | apiVersion: v1 19 | kind: Pod 20 | - describe: 21 | apiVersion: v1 22 | kind: Service 23 | - name: Check that loadbalancer ip is assigned 24 | try: 25 | - assert: 26 | resource: 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: svc-test 31 | status: 32 | (loadBalancer.ingress[0].ip != null): true 33 | - name: Fetch nodebalancer config for port 7070 34 | try: 35 | - script: 36 | content: | 37 | set -euo pipefail 38 | 39 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 40 | 41 | echo "Nodebalancer ID: $nbid" 42 | 43 | for i in {1..20}; do 44 | nbconfig=$(LINODE_TOKEN=$LINODE_TOKEN NBID=$nbid ../scripts/get-nb-config.sh) 45 | 46 | if [[ -z $nbconfig ]]; then 47 | echo "Failed fetching nodebalancer config for port 7070" 48 | fi 49 | 50 | port_7070_check=$(echo $nbconfig | jq '.check == "none"') 51 | port_7070_interval=$(echo $nbconfig | jq '.check_interval == 5') 52 | port_7070_timeout=$(echo $nbconfig | jq '.check_timeout == 3') 53 | port_7070_attempts=$(echo $nbconfig | jq '.check_attempts == 2') 54 | port_7070_protocol=$(echo $nbconfig | jq '.protocol == "udp"') 55 | # TODO: Implement the actual check for UDP node health when support is added 56 | # port_7070_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') 57 | port_7070_up_nodes="true" 58 | 59 | if [[ $port_7070_check == "true" && $port_7070_interval == "true" && $port_7070_timeout == "true" && $port_7070_attempts == "true" && $port_7070_protocol == "true" && $port_7070_up_nodes == "true" ]]; then 60 | echo "All conditions met" 61 | break 62 | fi 63 | echo "Conditions not met, retrying in 20 seconds..." 64 | echo "check: $port_7070_check" 65 | echo "interval: $port_7070_interval" 66 | echo "timeout: $port_7070_timeout" 67 | echo "attempts: $port_7070_attempts" 68 | echo "protocol: $port_7070_protocol" 69 | echo "up_nodes: $port_7070_up_nodes" 70 | sleep 20 71 | done 72 | check: 73 | ($error == null): true 74 | (contains($stdout, 'All conditions met')): true 75 | -------------------------------------------------------------------------------- /e2e/test/lb-with-udp-ports/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: lb-with-udp-ports 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: lb-with-udp-ports 13 | template: 14 | metadata: 15 | labels: 16 | app: lb-with-udp-ports 17 | spec: 18 | containers: 19 | - image: rahulait/test-server:0.1 20 | name: test 21 | ports: 22 | - name: udp 23 | containerPort: 7070 24 | protocol: UDP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | labels: 37 | app: lb-with-udp-ports 38 | spec: 39 | type: LoadBalancer 40 | selector: 41 | app: lb-with-udp-ports 42 | ports: 43 | - name: udp 44 | protocol: UDP 45 | port: 7070 46 | targetPort: 7070 47 | sessionAffinity: None 48 | -------------------------------------------------------------------------------- /e2e/test/lb-with-vpc-backends/chainsaw-test.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Test 4 | metadata: 5 | name: lb-with-vpc-backends 6 | labels: 7 | all: 8 | spec: 9 | namespace: "lb-with-vpc-backends" 10 | steps: 11 | - name: Create pods and services 12 | try: 13 | - apply: 14 | file: create-pods-services.yaml 15 | catch: 16 | - describe: 17 | apiVersion: v1 18 | kind: Pod 19 | - describe: 20 | apiVersion: v1 21 | kind: Service 22 | - name: Check endpoints exist 23 | try: 24 | - assert: 25 | resource: 26 | apiVersion: v1 27 | kind: Endpoints 28 | metadata: 29 | name: svc-test 30 | (subsets[0].addresses != null): true 31 | (subsets[0].ports != null): true 32 | - name: Check that loadbalancer ip is assigned 33 | try: 34 | - assert: 35 | resource: 36 | apiVersion: v1 37 | kind: Service 38 | metadata: 39 | name: svc-test 40 | status: 41 | (loadBalancer.ingress[0].ip != null): true 42 | - name: Check NodeBalancerConfig for backend ips 43 | try: 44 | - script: 45 | content: | 46 | set -e 47 | 48 | nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) 49 | 50 | nbconfig=$(curl -s \ 51 | -H "Authorization: Bearer $LINODE_TOKEN" \ 52 | -H "Content-Type: application/json" \ 53 | "https://api.linode.com/v4/nodebalancers/$nbid/configs") 54 | 55 | config_id=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .id') 56 | 57 | # Get nodes from the config 58 | nodes=$(curl -s \ 59 | -H "Authorization: Bearer $LINODE_TOKEN" \ 60 | -H "Content-Type: application/json" \ 61 | "https://api.linode.com/v4/nodebalancers/$nbid/configs/$config_id/nodes") 62 | 63 | # Extract all addresses and remove ports 64 | addresses=$(echo "$json_data" | jq -r '.data[].address' | sed 's/:[0-9]*$//') 65 | 66 | for ip in $addresses; do 67 | if [[ $ip =~ ^10\.0\.0\.[0-9]+$ ]]; then 68 | echo "$ip is in the 10.0.0.0/8 subnet" 69 | else 70 | echo "$ip is NOT in the 10.0.0.0/8 subnet" 71 | fi 72 | done 73 | check: 74 | ($error): ~ 75 | (contains($stdout, 'is NOT in the 10.0.0.0/8 subnet')): false 76 | -------------------------------------------------------------------------------- /e2e/test/lb-with-vpc-backends/create-pods-services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: vpc-backends 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: vpc-backends 13 | template: 14 | metadata: 15 | labels: 16 | app: vpc-backends 17 | spec: 18 | containers: 19 | - image: appscode/test-server:2.3 20 | name: test 21 | ports: 22 | - name: http-1 23 | containerPort: 80 24 | protocol: TCP 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.name 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: svc-test 36 | annotations: 37 | service.beta.kubernetes.io/linode-loadbalancer-backend-ipv4-range: "10.100.0.0/30" 38 | labels: 39 | app: vpc-backends 40 | spec: 41 | type: LoadBalancer 42 | selector: 43 | app: vpc-backends 44 | ports: 45 | - name: http-1 46 | protocol: TCP 47 | port: 80 48 | targetPort: 80 49 | sessionAffinity: None 50 | -------------------------------------------------------------------------------- /e2e/test/scripts/get-nb-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | nbconfig=$(curl -s \ 6 | -H "Authorization: Bearer $LINODE_TOKEN" \ 7 | -H "Content-Type: application/json" \ 8 | "$LINODE_URL/v4/nodebalancers/$NBID/configs" | jq '.data[] | select(.port == 7070)' || true ) 9 | 10 | echo $nbconfig 11 | -------------------------------------------------------------------------------- /e2e/test/scripts/get-nb-id.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | re='^[0-9]+$' 6 | 7 | hostname=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].hostname) 8 | ip=$(echo $hostname | awk -F'.' '{gsub("-", ".", $1); print $1}') 9 | nbid=$(curl -s \ 10 | -H "Authorization: Bearer $LINODE_TOKEN" \ 11 | -H "Content-Type: application/json" \ 12 | -H "X-Filter: {\"ipv4\": \"$ip\"}" \ 13 | "$LINODE_URL/v4/nodebalancers" | jq .data[].id) 14 | 15 | if ! [[ $nbid =~ $re ]]; then 16 | echo "Nodebalancer id [$nbid] is incorrect" 17 | exit 1 18 | fi 19 | 20 | echo $nbid 21 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | example.key 2 | example.crt 3 | -------------------------------------------------------------------------------- /examples/http-nginx-firewalled.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: http-lb 6 | namespace: kube-system 7 | annotations: 8 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" 9 | service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | 10 | { 11 | "allowList": { 12 | "ipv4": ["8.8.8.8/32"], 13 | "ipv6": ["dead:beef::/64"] 14 | } 15 | } 16 | spec: 17 | type: LoadBalancer 18 | selector: 19 | app: nginx-http-example 20 | ports: 21 | - name: http 22 | protocol: TCP 23 | port: 80 24 | targetPort: 80 25 | 26 | --- 27 | apiVersion: apps/v1 28 | kind: Deployment 29 | metadata: 30 | name: nginx-http-deployment 31 | namespace: kube-system 32 | spec: 33 | replicas: 2 34 | selector: 35 | matchLabels: 36 | app: nginx-http-example 37 | template: 38 | metadata: 39 | labels: 40 | app: nginx-http-example 41 | spec: 42 | containers: 43 | - name: nginx 44 | image: nginx 45 | ports: 46 | - containerPort: 80 47 | protocol: TCP 48 | -------------------------------------------------------------------------------- /examples/http-nginx.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: http-lb 6 | annotations: 7 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" 8 | spec: 9 | type: LoadBalancer 10 | selector: 11 | app: nginx-http-example 12 | ports: 13 | - name: http 14 | protocol: TCP 15 | port: 80 16 | targetPort: 80 17 | 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: nginx-http-deployment 23 | spec: 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: nginx-http-example 28 | template: 29 | metadata: 30 | labels: 31 | app: nginx-http-example 32 | spec: 33 | containers: 34 | - name: nginx 35 | image: nginx 36 | ports: 37 | - containerPort: 80 38 | protocol: TCP 39 | -------------------------------------------------------------------------------- /examples/https-nginx.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: https-lb 6 | annotations: 7 | service.beta.kubernetes.io/linode-loadbalancer-throttle: "4" 8 | service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" 9 | service.beta.kubernetes.io/linode-loadbalancer-port-443: | 10 | { 11 | "tls-secret-name": "example-secret", 12 | "protocol": "https" 13 | } 14 | spec: 15 | type: LoadBalancer 16 | selector: 17 | app: nginx-https-example 18 | ports: 19 | - name: http 20 | protocol: TCP 21 | port: 80 22 | targetPort: http 23 | - name: https 24 | protocol: TCP 25 | port: 443 26 | targetPort: https 27 | 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: nginx-https-deployment 33 | spec: 34 | replicas: 2 35 | selector: 36 | matchLabels: 37 | app: nginx-https-example 38 | template: 39 | metadata: 40 | labels: 41 | app: nginx-https-example 42 | spec: 43 | containers: 44 | - name: nginx 45 | image: nginx 46 | ports: 47 | - name: http 48 | containerPort: 80 49 | protocol: TCP 50 | - name: https 51 | containerPort: 80 52 | protocol: TCP 53 | -------------------------------------------------------------------------------- /examples/tcp-nginx.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: tcp-lb 6 | spec: 7 | type: LoadBalancer 8 | selector: 9 | app: nginx-tcp-example 10 | ports: 11 | - name: http 12 | protocol: TCP 13 | port: 80 14 | targetPort: 80 15 | 16 | --- 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: nginx-tcp-deployment 21 | spec: 22 | replicas: 2 23 | selector: 24 | matchLabels: 25 | app: nginx-tcp-example 26 | template: 27 | metadata: 28 | labels: 29 | app: nginx-tcp-example 30 | spec: 31 | containers: 32 | - name: nginx 33 | image: nginx 34 | ports: 35 | - containerPort: 80 36 | protocol: TCP 37 | -------------------------------------------------------------------------------- /examples/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | kubectl apply -f ./http-nginx.yaml 6 | kubectl apply -f ./tcp-nginx.yaml 7 | kubectl apply -f ./udp-example.yaml 8 | 9 | openssl req -newkey rsa:4096 \ 10 | -x509 \ 11 | -sha256 \ 12 | -days 3650 \ 13 | -nodes \ 14 | -out example.crt \ 15 | -keyout example.key \ 16 | -subj "/C=na/ST=na/L=na/O=na/OU=na/CN=na" 17 | 18 | kubectl delete secret example-secret || true 19 | kubectl create secret tls example-secret --cert=example.crt --key=example.key 20 | kubectl apply -f ./https-nginx.yaml 21 | 22 | -------------------------------------------------------------------------------- /examples/udp-example.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: udp-lb 6 | spec: 7 | type: LoadBalancer 8 | selector: 9 | app: udp-example 10 | ports: 11 | - name: udp 12 | protocol: UDP 13 | port: 7070 14 | targetPort: 7070 15 | --- 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: udp-deployment 20 | spec: 21 | replicas: 2 22 | selector: 23 | matchLabels: 24 | app: udp-example 25 | template: 26 | metadata: 27 | labels: 28 | app: udp-example 29 | spec: 30 | containers: 31 | - name: test-server 32 | image: rahulait/test-server:0.1 33 | ports: 34 | - containerPort: 7070 35 | protocol: UDP 36 | 37 | -------------------------------------------------------------------------------- /hack/builddeps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | go get -u golang.org/x/tools/cmd/goimports 4 | go get github.com/onsi/ginkgo/ginkgo 5 | go install github.com/onsi/ginkgo/ginkgo 6 | go get -u github.com/jteeuwen/go-bindata/... 7 | -------------------------------------------------------------------------------- /hack/coverage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eou pipefail 3 | 4 | GOPATH=$(go env GOPATH) 5 | REPO_ROOT="$GOPATH/src/github.com/linode/linode-cloud-controller-manager" 6 | 7 | pushd $REPO_ROOT 8 | 9 | echo "" >coverage.txt 10 | 11 | for d in $(go list ./... | grep -v -e vendor -e test); do 12 | go test -v -race -coverprofile=profile.out -covermode=atomic "$d" 13 | if [ -f profile.out ]; then 14 | cat profile.out >>coverage.txt 15 | rm profile.out 16 | fi 17 | done 18 | 19 | popd 20 | -------------------------------------------------------------------------------- /hack/templates/prometheus.go.gotpl: -------------------------------------------------------------------------------- 1 | import ( 2 | "github.com/prometheus/client_golang/prometheus" 3 | "github.com/prometheus/client_golang/prometheus/promauto" 4 | ) 5 | 6 | {{ $decorator := (or .Vars.DecoratorName (printf "%sWithPrometheus" .Interface.Name)) }} 7 | {{ $metric_name := (or .Vars.MetricName (printf "ccm_linode_%s_requests_total" (down .Interface.Name))) }} 8 | 9 | // {{$decorator}} implements {{.Interface.Type}} interface with all methods wrapped 10 | // with Prometheus counters 11 | type {{$decorator}} struct { 12 | base {{.Interface.Type}} 13 | } 14 | 15 | var {{upFirst .Interface.Name}}MethodCounterVec = promauto.NewCounterVec( 16 | prometheus.CounterOpts{ 17 | Name: "{{$metric_name}}", 18 | Help: "{{ down .Interface.Name }} counters for each operation and its result", 19 | }, 20 | []string{"method", "result"}) 21 | 22 | // New{{.Interface.Name}}WithPrometheus returns an instance of the {{.Interface.Type}} decorated with prometheus metrics 23 | func New{{$decorator}}(base {{.Interface.Type}}) {{$decorator}} { 24 | return {{$decorator}} { 25 | base: base, 26 | } 27 | } 28 | 29 | {{range $method := .Interface.Methods}} 30 | // {{$method.Name}} implements {{$.Interface.Type}} 31 | func (_d {{$decorator}}) {{$method.Declaration}} { 32 | defer func() { 33 | result := "ok" 34 | {{- if $method.ReturnsError}} 35 | if err != nil { 36 | result = "error" 37 | } 38 | {{end}} 39 | {{upFirst $.Interface.Name}}MethodCounterVec.WithLabelValues("{{$method.Name}}", result).Inc() 40 | }() 41 | {{$method.Pass "_d.base."}} 42 | } 43 | {{end}} 44 | -------------------------------------------------------------------------------- /sentry/sentry.go: -------------------------------------------------------------------------------- 1 | // Package sentry implements logic for using Sentry for error reporting. 2 | package sentry 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | 8 | "github.com/getsentry/sentry-go" 9 | "k8s.io/klog/v2" 10 | ) 11 | 12 | var initialized bool 13 | 14 | // Initialize initializes a Sentry connection with the given client option values. 15 | func Initialize(dsn, environment, release string) error { 16 | if initialized { 17 | return fmt.Errorf("sentry Initialize called after initialization") 18 | } 19 | 20 | var clientOptions sentry.ClientOptions 21 | 22 | clientOptions.Dsn = dsn 23 | clientOptions.Environment = environment 24 | clientOptions.Release = release 25 | 26 | if err := sentry.Init(clientOptions); err != nil { 27 | return err 28 | } 29 | 30 | initialized = true 31 | 32 | return nil 33 | } 34 | 35 | // SetHubOnContext clones the current hub and sets it on the given context. 36 | func SetHubOnContext(ctx context.Context) context.Context { 37 | hub := sentry.CurrentHub().Clone() 38 | 39 | return sentry.SetHubOnContext(ctx, hub) 40 | } 41 | 42 | // getHubFromContext gets the current Sentry hub from the given context. If the context is missing a 43 | // Sentry hub, this function logs an error and returns nil. If Sentry has not been initialized, this 44 | // function also returns nil. 45 | func getHubFromContext(ctx context.Context) *sentry.Hub { 46 | if !initialized { 47 | klog.V(5).Info("getHubFromContext: Sentry not initialized") 48 | return nil 49 | } 50 | 51 | if !sentry.HasHubOnContext(ctx) { 52 | klog.Error("getHubFromContext: context is missing Sentry hub") 53 | return nil 54 | } 55 | 56 | return sentry.GetHubFromContext(ctx) 57 | } 58 | 59 | // SetTag sets a tag for the hub associated with the given context. If Sentry is not enabled or the 60 | // context has no associated hub, this function will have no effect. 61 | func SetTag(ctx context.Context, key, value string) { 62 | hub := getHubFromContext(ctx) 63 | 64 | if hub == nil { 65 | return 66 | } 67 | 68 | hub.Scope().SetTag(key, value) 69 | } 70 | 71 | // CaptureError captures the current error and sends it to Sentry using the hub from the current 72 | // context. This should only be used for actionable errors to avoid flooding Sentry with useless 73 | // reports. If Sentry is not enabled or the context has no associated hub, this function will 74 | // have no effect. 75 | func CaptureError(ctx context.Context, err error) { 76 | hub := getHubFromContext(ctx) 77 | 78 | if hub == nil { 79 | return 80 | } 81 | 82 | hub.CaptureException(err) 83 | } 84 | --------------------------------------------------------------------------------