├── .chainsaw.yaml ├── .gitattributes ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml ├── filters.yml ├── labels.yml ├── release-drafter.yml └── workflows │ ├── ci.yml │ ├── helm.yml │ ├── label-sync.yml │ ├── pr-labeler.yml │ ├── release-drafter.yml │ └── release.yml ├── .gitignore ├── .golangci.yml ├── CHANGELOG.md ├── Dockerfile ├── Dockerfile.dev ├── LICENSE ├── Makefile ├── README.md ├── codecov.yml ├── deploy └── kubernetes │ ├── base │ ├── accounts-roles-bindings.yaml │ ├── csi-driver-instance.yaml │ ├── csi-storageclass.yaml │ ├── ds-csi-linode-node.yaml │ ├── kustomization.yaml │ └── ss-csi-linode-controller.yaml │ ├── overlays │ ├── dev │ │ └── kustomization.yaml │ └── release │ │ ├── .gitignore │ │ └── kustomization.yaml.template │ └── sidecars │ ├── external-attacher │ ├── kustomization.yaml │ └── rbac.yaml │ ├── external-provisioner │ ├── kustomization.yaml │ └── rbac.yaml │ └── external-resizer │ ├── kustomization.yaml │ └── rbac.yaml ├── devbox.json ├── devbox.lock ├── docs ├── contributing.md ├── deployment.md ├── development-setup.md ├── encrypted-drives.md ├── example-images │ ├── controller-server │ │ ├── create-volume.jpg │ │ ├── delete-volume.jpg │ │ ├── publish-volume.jpg │ │ └── unpublish-volume.jpg │ ├── node-server │ │ ├── expand-volume.jpg │ │ ├── publish-volume.jpg │ │ ├── stage-volume.jpg │ │ ├── unpublish-volume.jpg │ │ └── unstage-volume.jpg │ ├── sidecars │ │ ├── create-volume.jpg │ │ ├── delete-volume.jpg │ │ ├── expand-volume.jpg │ │ ├── operations-sum.jpg │ │ ├── publish-volume.jpg │ │ ├── pvc.jpg │ │ ├── pvr.jpg │ │ ├── runtime-error.jpg │ │ ├── tt-create-volume.jpg │ │ ├── tt-delete-volume.jpg │ │ ├── tt-expand-volume.jpg │ │ ├── tt-publish-volume.jpg │ │ ├── tt-unpublish-volume.jpg │ │ └── unpublish-volume.jpg │ └── tracing │ │ ├── create-volume-continued.jpg │ │ ├── create-volume.jpg │ │ └── landing-page.jpg ├── metrics-documentation.md ├── observability.md ├── testing.md ├── topology-aware-provisioning.md ├── tracing-documentation.md ├── usage.md └── volume-tags.md ├── envrc.example ├── go.mod ├── go.sum ├── hack ├── fetch-manifests.sh ├── generate-yaml.sh ├── install-grafana.sh ├── install-prometheus.sh ├── release-yaml.sh ├── setup-dashboard.sh └── setup-tracing.sh ├── helm-chart └── csi-driver │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ ├── _helpers.tpl │ ├── csi-controller-attacher-binding-rbac.yaml │ ├── csi-controller-provisioner-binding-rbac.yaml │ ├── csi-controller-resizer-binding-rbac.yaml │ ├── csi-controller-serviceaccount.yaml │ ├── csi-linode-controller-metrics.yaml │ ├── csi-linode-controller.yaml │ ├── csi-node-serviceaccount.yaml │ ├── csi-secrets.yaml │ ├── daemonset.yaml │ ├── external-attacher-rbac.yaml │ ├── external-provisioner-rbac.yaml │ ├── external-resizer-rbac.yaml │ ├── linode-block-storage-retain.yaml │ ├── linode-block-storage.yaml │ ├── linode-csi-binding-rbac.yaml │ ├── linode-csi-rbac.yaml │ └── linodebs.csi.linode.com.yaml │ └── values.yaml ├── internal └── driver │ ├── capabilities.go │ ├── controllerserver.go │ ├── controllerserver_helper.go │ ├── controllerserver_helper_test.go │ ├── controllerserver_test.go │ ├── deploy │ └── releases │ │ └── linode-blockstorage-csi-driver.yaml │ ├── driver.go │ ├── driver_test.go │ ├── errors.go │ ├── examples │ └── kubernetes │ │ ├── csi-app-block.yaml │ │ ├── csi-app.yaml │ │ ├── csi-linode-blockstorage-encrypted.yaml │ │ ├── csi-pvc-block.yaml │ │ ├── csi-pvc.yaml │ │ ├── luks-enabled-vol.yaml │ │ └── topology-aware.yaml │ ├── identityserver.go │ ├── identityserver_test.go │ ├── lifecycle.go │ ├── limits.go │ ├── limits_test.go │ ├── luks.go │ ├── metadata.go │ ├── metadata_test.go │ ├── nodeserver.go │ ├── nodeserver_all.go │ ├── nodeserver_all_test.go │ ├── nodeserver_helpers.go │ ├── nodeserver_helpers_linux_test.go │ ├── nodeserver_helpers_test.go │ ├── nodeserver_luks_encryption_test.go │ ├── nodeserver_test.go │ ├── nodeserver_windows.go │ └── server.go ├── main.go ├── mocks ├── mock_cryptsetupclient.go ├── mock_device.go ├── mock_filesystem.go ├── mock_hwinfo.go ├── mock_linodeclient.go ├── mock_metadata.go └── mock_safe-mounter.go ├── observability ├── metrics │ ├── dashboard.json │ └── loadBalancer.yaml └── tracing │ ├── jager-deployment.yaml │ ├── jager-service.yaml │ ├── otel-configmap.yaml │ ├── otel-deployment.yaml │ └── otel-service.yaml ├── pkg ├── cryptsetup-client │ └── cryptsetup_client.go ├── device-manager │ ├── device.go │ └── device_test.go ├── filesystem │ └── filesystem.go ├── hwinfo │ └── hwinfo.go ├── linode-client │ ├── linode_client.go │ └── linode_client_test.go ├── linode-volumes │ ├── utils.go │ └── utils_test.go ├── logger │ ├── logger.go │ └── logger_test.go ├── mount-manager │ ├── safe_mounter.go │ └── safe_mounter_test.go └── observability │ ├── metrics.go │ └── tracker.go └── tests ├── csi-sanity ├── mkdir_in_pod.sh ├── rmdir_in_pod.sh ├── run-tests.sh └── socat.yaml ├── e2e ├── setup │ ├── ctlptl-config.yaml │ └── linode-secret.yaml └── test │ ├── check-volume-deleted.sh │ ├── check-volume-detached.sh │ ├── check-volume-size.sh │ ├── pod-pvc-basic-filesystem │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ └── create-pvc-pod.yaml │ ├── pod-pvc-create-ext4-filesystem │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ └── create-pvc-pod.yaml │ ├── pod-pvc-create-xfs-filesystem │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ └── create-pvc-pod.yaml │ ├── pod-pvc-expand-raw-block-storage │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ ├── create-pvc-pod.yaml │ └── update-pvc.yaml │ ├── pod-pvc-expand-storage-size │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ ├── create-pvc-pod.yaml │ └── update-pvc.yaml │ ├── pod-pvc-linode-encryption │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ └── create-pvc-pod.yaml │ ├── pod-pvc-luks-mov-volume │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ └── create-storage-class-statefulset.yaml │ ├── pod-pvc-luks-remount │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ ├── create-pod.yaml │ └── create-pvc.yaml │ ├── pod-pvc-luks │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ └── create-pvc-pod.yaml │ ├── pod-pvc-readonly │ ├── assert-csi-driver-resources.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ └── create-pvc-pod.yaml │ ├── pod-pvc-unexpected-reboot │ ├── assert-csi-driver-resources.yaml │ ├── assert-pod.yaml │ ├── assert-pvc-pod.yaml │ ├── chainsaw-test.yaml │ └── create-pvc-pod.yaml │ ├── statefulset-pvc │ ├── assert-csi-driver-resources.yaml │ ├── assert-statefulset-pvc.yaml │ ├── chainsaw-test.yaml │ └── create-redis-statefulset.yaml │ └── sts-pvc-unexpected-reboot │ ├── assert-csi-driver-resources.yaml │ ├── assert-sts.yaml │ ├── chainsaw-test.yaml │ └── create-sts.yaml └── upstream-e2e ├── run-tests.sh └── test-driver.yaml /.chainsaw.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/configuration-chainsaw-v1alpha1.json 2 | apiVersion: chainsaw.kyverno.io/v1alpha1 3 | kind: Configuration 4 | metadata: 5 | name: configuration 6 | spec: 7 | timeouts: 8 | assert: 5m0s 9 | cleanup: 5m0s 10 | delete: 5m0s 11 | error: 5m0s 12 | exec: 5m0s 13 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.sh text eol=lf -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @linode/cloud-native-services @linode/open-source 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## General: 2 | 3 | * [ ] Have you removed all sensitive information, including but not limited to access keys and passwords? 4 | * [ ] Have you checked to ensure there aren't other open or closed [Pull Requests](../../pulls) for the same bug/feature/question? 5 | 6 | ---- 7 | 8 | ## Feature Requests: 9 | * [ ] Have you explained your rationale for why this feature is needed? 10 | * [ ] Have you offered a proposed implementation/solution? 11 | 12 | ---- 13 | 14 | ## Bug Reporting 15 | 16 | ### Expected Behavior 17 | 18 | ### Actual Behavior 19 | 20 | ### Steps to Reproduce the Problem 21 | 22 | 1. 23 | 1. 24 | 1. 25 | 26 | ### Environment Specifications 27 | 28 | #### Screenshots, Code Blocks, and Logs 29 | 30 | #### Additional Notes 31 | 32 | ---- 33 | 34 | For general help or discussion, join the [Kubernetes Slack team](https://kubernetes.slack.com/messages/CD4B15LUR/details/) channel `#linode`. To sign up, use the [Kubernetes Slack inviter](http://slack.kubernetes.io/). 35 | 36 | The [Linode Community](https://www.linode.com/community/questions/) is a great place to get additional support. 37 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### General: 2 | 3 | * [ ] Have you removed all sensitive information, including but not limited to access keys and passwords? 4 | * [ ] Have you checked to ensure there aren't other open or closed [Pull Requests](../../pulls) for the same bug/feature/question? 5 | 6 | ### Pull Request Guidelines: 7 | 8 | 1. [ ] Does your submission pass tests? 9 | 1. [ ] Have you added tests? 10 | 1. [ ] Are you addressing a single feature in this PR? 11 | 1. [ ] Are your commits atomic, addressing one change per commit? 12 | 1. [ ] Are you following the conventions of the language? 13 | 1. [ ] Have you saved your large formatting changes for a different PR, so we can focus on your work? 14 | 1. [ ] Have you explained your rationale for why this feature is needed? 15 | 1. [ ] Have you linked your PR to an [open issue](https://blog.github.com/2013-05-14-closing-issues-via-pull-requests/) 16 | 17 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | # Go - root directory 5 | - package-ecosystem: "gomod" 6 | directory: "/" 7 | schedule: 8 | interval: "weekly" 9 | ## group all dependencies with a k8s.io prefix into a single PR. 10 | groups: 11 | kubernetes: 12 | patterns: [ "k8s.io/*", "sigs.k8s.io/*" ] 13 | otel: 14 | patterns: ["go.opentelemetry.io/*"] 15 | commit-message: 16 | prefix: ":seedling:" 17 | labels: 18 | - "dependencies" 19 | 20 | # Docker 21 | - package-ecosystem: "docker" 22 | directory: "/" 23 | schedule: 24 | interval: "weekly" 25 | commit-message: 26 | prefix: ":seedling:" 27 | labels: 28 | - "dependencies" 29 | 30 | # github-actions 31 | - package-ecosystem: "github-actions" 32 | directory: "/" 33 | schedule: 34 | interval: "weekly" 35 | commit-message: 36 | prefix: ":seedling:" 37 | labels: 38 | - "dependencies" 39 | -------------------------------------------------------------------------------- /.github/filters.yml: -------------------------------------------------------------------------------- 1 | # Any file that is not a doc *.md file 2 | src: 3 | - "!**/**.md" 4 | -------------------------------------------------------------------------------- /.github/labels.yml: -------------------------------------------------------------------------------- 1 | # PR Labels 2 | - name: new-feature 3 | description: for new features in the changelog. 4 | color: 225fee 5 | - name: improvement 6 | description: for improvements in existing functionality in the changelog. 7 | color: 22ee47 8 | - name: repo-ci-improvement 9 | description: for improvements in the repository or CI workflow in the changelog. 10 | color: c922ee 11 | - name: bugfix 12 | description: for any bug fixes in the changelog. 13 | color: ed8e21 14 | - name: documentation 15 | description: for updates to the documentation in the changelog. 16 | color: d3e1e6 17 | - name: dependencies 18 | description: dependency updates including security fixes 19 | color: 5c9dff 20 | - name: testing 21 | description: for updates to the testing suite in the changelog. 22 | color: 933ac9 23 | - name: breaking-change 24 | description: for breaking changes in the changelog. 25 | color: ff0000 26 | - name: ignore-for-release 27 | description: PRs you do not want to render in the changelog. 28 | color: 7b8eac 29 | - name: deprecated 30 | description: for deprecated features in the changelog. 31 | color: e4e669 32 | - name: security 33 | description: for security fixes in the changelog. 34 | color: dd4739 35 | # Issue Labels 36 | - name: enhancement 37 | description: issues that request a enhancement. 38 | color: 22ee47 39 | - name: bug 40 | description: issues that report a bug. 41 | color: ed8e21 42 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: 'v$NEXT_PATCH_VERSION' 2 | tag-template: 'v$NEXT_PATCH_VERSION' 3 | exclude-labels: 4 | - ignore-for-release 5 | categories: 6 | - title: ⚠️ Breaking Change 7 | labels: 8 | - breaking-change 9 | - title: 🐛 Bug Fixes 10 | labels: 11 | - bugfix 12 | - title: 🚀 New Features 13 | labels: 14 | - new-feature 15 | - title: 💡 Improvements 16 | labels: 17 | - improvement 18 | - title: 🧪 Testing Improvements 19 | labels: 20 | - testing 21 | - title: ⚙️ Repo/CI Improvements 22 | labels: 23 | - repo-ci-improvement 24 | - title: 📖 Documentation 25 | labels: 26 | - documentation 27 | - title: 📦 Dependency Updates 28 | labels: 29 | - dependencies 30 | - title: Other Changes 31 | labels: 32 | - "*" 33 | autolabeler: 34 | - label: 'breaking-change' 35 | title: 36 | - '/.*\[breaking\].+/' 37 | - label: 'deprecation' 38 | title: 39 | - '/.*\[deprecation\].+/' 40 | - label: 'bugfix' 41 | title: 42 | - '/.*\[fix\].+/' 43 | - label: 'new-feature' 44 | title: 45 | - '/.*\[feat\].+/' 46 | - label: 'improvement' 47 | title: 48 | - '/.*\[improvement\].+/' 49 | - label: 'testing' 50 | title: 51 | - '/.*\[test\].+/' 52 | - label: 'repo-ci-improvement' 53 | title: 54 | - '/.*\[CI\].+/' 55 | - '/.*\[ci\].+/' 56 | - label: 'documentation' 57 | title: 58 | - '/.*\[docs\].+/' 59 | - label: 'dependencies' 60 | title: 61 | - '/.*\[deps\].+/' 62 | 63 | change-template: '- $TITLE by @$AUTHOR in #$NUMBER' 64 | no-changes-template: "- No changes" 65 | template: | 66 | ## What's Changed 67 | $CHANGES 68 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | on: 3 | workflow_dispatch: null 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - "*" 10 | 11 | jobs: 12 | changes: 13 | runs-on: ubuntu-latest 14 | outputs: 15 | paths: ${{ steps.filter.outputs.changes }} 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Harden Runner 19 | uses: step-security/harden-runner@v2 20 | with: 21 | disable-sudo: true 22 | egress-policy: block 23 | allowed-endpoints: > 24 | api.github.com:443 25 | github.com:443 26 | - uses: dorny/paths-filter@v3 27 | id: filter 28 | with: 29 | base: ${{ github.ref }} 30 | filters: .github/filters.yml 31 | ci: 32 | runs-on: ubuntu-latest 33 | needs: changes 34 | if: ${{ contains(fromJSON(needs.changes.outputs.paths), 'src') }} 35 | steps: 36 | - name: Install cryptsetup 37 | run: | 38 | sudo apt-get update 39 | sudo apt install libcryptsetup12 libcryptsetup-dev 40 | - uses: actions/checkout@v4 41 | with: 42 | fetch-depth: 0 43 | - uses: actions/setup-go@v5 44 | with: 45 | go-version-file: go.mod 46 | - run: go vet ./... 47 | - name: Run golangci-lint 48 | uses: golangci/golangci-lint-action@v8 49 | with: 50 | version: latest 51 | verify: false 52 | - run: make build 53 | - run: go test -cover ./... -coverprofile ./coverage.out 54 | - name: Upload coverage reports to Codecov 55 | uses: codecov/codecov-action@v5 56 | with: 57 | files: ./coverage.out 58 | fail_ci_if_error: true 59 | verbose: true 60 | token: ${{ secrets.CODECOV_TOKEN }} 61 | slug: linode/linode-blockstorage-csi-driver 62 | e2e-tests: 63 | runs-on: ubuntu-latest 64 | needs: changes 65 | if: ${{ contains(fromJSON(needs.changes.outputs.paths), 'src') }} 66 | environment: ${{ github.event.pull_request.head.repo.fork == true && 'prod-external' || 'prod' }} 67 | env: 68 | GITHUB_TOKEN: ${{ secrets.github_token }} 69 | LINODE_TOKEN: ${{ secrets.LINODE_TOKEN }} 70 | IMAGE_VERSION: ${{ github.ref == 'refs/heads/main' && 'latest' || format('pr-{0}', github.event.number) || github.ref_name }} 71 | LINODE_REGION: us-lax 72 | LINODE_CONTROL_PLANE_MACHINE_TYPE: g6-standard-2 73 | LINODE_MACHINE_TYPE: g6-standard-2 74 | WORKER_NODES: ${{ github.ref == 'refs/heads/main' && '3' || '1' }} 75 | steps: 76 | - name: Install cryptsetup 77 | run: | 78 | sudo apt-get update 79 | sudo apt install libcryptsetup12 libcryptsetup-dev 80 | - uses: actions/checkout@v4 81 | with: 82 | fetch-depth: 0 83 | 84 | - name: Set up Go 85 | uses: actions/setup-go@v5 86 | with: 87 | go-version-file: "go.mod" 88 | check-latest: true 89 | 90 | - name: Login to Docker Hub 91 | uses: docker/login-action@v3 92 | with: 93 | username: ${{ secrets.DOCKER_USERNAME }} 94 | password: ${{ secrets.DOCKER_PASSWORD }} 95 | 96 | - name: Install devbox 97 | uses: jetify-com/devbox-install-action@v0.13.0 98 | 99 | - name: Setup CAPL Management Kind Cluster and CAPL Child Cluster For Testing 100 | run: devbox run mgmt-and-capl-cluster 101 | 102 | - name: Run E2E Tests 103 | run: devbox run e2e-test 104 | 105 | - name: Run CSI-Sanity Tests 106 | run: devbox run csi-sanity-test 107 | 108 | - name: run upstream E2E Tests 109 | if: github.ref == 'refs/heads/main' 110 | run: devbox run upstream-e2e-tests 111 | 112 | - name: Cleanup Resources 113 | if: always() 114 | run: devbox run cleanup-cluster 115 | -------------------------------------------------------------------------------- /.github/workflows/helm.yml: -------------------------------------------------------------------------------- 1 | name: Helm 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'helm-chart/**' 7 | workflow_dispatch: {} 8 | release: {} 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | helm-test: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 0 21 | 22 | - name: Set Version 23 | run: | 24 | TAG=$(git describe --tags --abbrev=0) 25 | sed -ie "s/appVersion: \"latest\"/appVersion: ${TAG#helm-}/g" ./helm-chart/csi-driver/Chart.yaml 26 | sed -ie "s/version: 0.0.0/version: ${TAG#helm-}/g" ./helm-chart/csi-driver/Chart.yaml 27 | 28 | - name: Set up Helm 29 | uses: azure/setup-helm@v4 30 | 31 | - uses: actions/setup-python@v5 32 | with: 33 | python-version: '3.10' 34 | check-latest: true 35 | 36 | - name: Set up chart-testing 37 | uses: helm/chart-testing-action@v2.7.0 38 | 39 | - name: Run chart-testing (lint) 40 | run: ct lint --check-version-increment=false --chart-dirs helm-chart --target-branch ${{ github.event.repository.default_branch }} 41 | 42 | # we cannot test a helm install without a valid linode 43 | # - name: Create kind cluster 44 | # uses: helm/kind-action@v1.8.0 45 | 46 | #- name: Run chart-testing (install) 47 | # run: ct install --chart-dirs helm-chart --namespace kube-system --helm-extra-set-args "--set=apiToken=test --set=region=us-east" --target-branch ${{ github.event.repository.default_branch }} 48 | 49 | helm-release: 50 | if: ${{ startsWith(github.ref, 'refs/tags/') }} 51 | needs: helm-test 52 | permissions: 53 | contents: write # for helm/chart-releaser-action to push chart release and create a release 54 | runs-on: ubuntu-latest 55 | steps: 56 | - name: Checkout 57 | uses: actions/checkout@v4 58 | with: 59 | fetch-depth: 0 60 | 61 | - name: Set Version 62 | run: | 63 | TAG=$(git describe --tags --abbrev=0) 64 | sed -ie "s/appVersion: \"latest\"/appVersion: ${TAG#helm-}/g" ./helm-chart/csi-driver/Chart.yaml 65 | sed -ie "s/version: 0.0.0/version: ${TAG#helm-}/g" ./helm-chart/csi-driver/Chart.yaml 66 | 67 | - name: Configure Git 68 | run: | 69 | git config user.name "$GITHUB_ACTOR" 70 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 71 | 72 | - name: Set up Helm 73 | uses: azure/setup-helm@v4 74 | 75 | - name: Run chart-releaser 76 | uses: helm/chart-releaser-action@v1.7.0 77 | env: 78 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 79 | CR_RELEASE_NAME_TEMPLATE: "helm-{{ .Version }}" 80 | with: 81 | charts_dir: helm-chart 82 | skip_existing: true 83 | -------------------------------------------------------------------------------- /.github/workflows/label-sync.yml: -------------------------------------------------------------------------------- 1 | name: Sync labels 2 | on: 3 | push: 4 | branches: 5 | - main 6 | paths: 7 | - .github/labels.yml 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | - uses: micnncim/action-label-syncer@3abd5ab72fda571e69fffd97bd4e0033dd5f495c # pin@v1 14 | env: 15 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 16 | with: 17 | manifest: .github/labels.yml 18 | -------------------------------------------------------------------------------- /.github/workflows/pr-labeler.yml: -------------------------------------------------------------------------------- 1 | name: PR labeler 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request_target: 6 | types: [opened, reopened, synchronize] 7 | 8 | jobs: 9 | label-pr: 10 | name: Update PR labels 11 | permissions: 12 | contents: write 13 | pull-requests: write 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | - name: Label PR 21 | uses: release-drafter/release-drafter@v6 22 | with: 23 | disable-releaser: github.ref != 'refs/heads/main' 24 | env: 25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 26 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | update_release_draft: 14 | permissions: 15 | contents: write 16 | pull-requests: write 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: release-drafter/release-drafter@v6 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | tags: 5 | - "v*.*.*" 6 | 7 | jobs: 8 | release: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | with: 13 | fetch-depth: 0 14 | - name: Create Release Artifacts 15 | run: make release 16 | env: 17 | IMAGE_VERSION: ${{ github.ref_name }} 18 | - name: Upload Release Artifacts 19 | uses: softprops/action-gh-release@v2.2.2 20 | with: 21 | files: | 22 | ./release/helm-chart-${{ github.ref_name }}.tgz 23 | ./release/linode-blockstorage-csi-driver-${{ github.ref_name }}.yaml 24 | - name: Docker Meta 25 | id: meta 26 | uses: docker/metadata-action@v5 27 | with: 28 | images: | 29 | linode/linode-blockstorage-csi-driver 30 | tags: | 31 | type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'main') }} 32 | type=semver,pattern={{raw}},value=${{ github.ref_name }} 33 | - name: Login to Docker Hub 34 | uses: docker/login-action@v3 35 | with: 36 | username: ${{ secrets.DOCKER_USERNAME }} 37 | password: ${{ secrets.DOCKER_PASSWORD }} 38 | - name: Login to GitHub Container Registry 39 | uses: docker/login-action@v3 40 | with: 41 | registry: ghcr.io 42 | username: ${{github.actor}} 43 | password: ${{secrets.GITHUB_TOKEN}} 44 | - name: Build and Push to Docker Hub & GHCR 45 | uses: docker/build-push-action@v6 46 | with: 47 | context: . 48 | push: true 49 | file: "./Dockerfile" 50 | tags: | 51 | ${{ steps.meta.outputs.tags }} 52 | ghcr.io/${{steps.meta.outputs.tags}} 53 | labels: ${{ steps.meta.outputs.labels }} 54 | build-args: | 55 | REV=${{ github.ref_name }} 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### Go template 2 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 3 | *.o 4 | *.a 5 | *.so 6 | 7 | # Folders 8 | _obj 9 | _test 10 | bin 11 | 12 | # Architecture specific extensions/prefixes 13 | *.[568vq] 14 | [568vq].out 15 | 16 | # GO 17 | vendor 18 | *.cgo1.go 19 | *.cgo2.c 20 | _cgo_defun.c 21 | _cgo_gotypes.go 22 | _cgo_export.* 23 | 24 | _output 25 | _testmain.go 26 | linode-blockstorage-csi-driver 27 | test-cluster-kubeconfig.yaml 28 | capl-cluster-manifests.yaml 29 | csi-manifests.yaml 30 | 31 | *.exe 32 | *.test 33 | *.prof 34 | 35 | # Output of the go coverage tool, specifically when used with LiteIDE 36 | *.out 37 | 38 | release/ 39 | .idea/ 40 | dist/ 41 | .vscode/ 42 | coverage.txt 43 | *.coverprofile 44 | 45 | *junit.xml 46 | .devbox/* 47 | .envrc 48 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | run: 3 | issues-exit-code: 1 4 | output: 5 | formats: 6 | text: 7 | path: stdout 8 | linters: 9 | enable: 10 | - asasalint 11 | - asciicheck 12 | - bidichk 13 | - bodyclose 14 | - containedctx 15 | - contextcheck 16 | - copyloopvar 17 | - decorder 18 | - dogsled 19 | - dupl 20 | - dupword 21 | - durationcheck 22 | - errchkjson 23 | - errname 24 | - errorlint 25 | - exhaustive 26 | - forbidigo 27 | - forcetypeassert 28 | - gocheckcompilerdirectives 29 | - gochecksumtype 30 | - gocognit 31 | - goconst 32 | - gocritic 33 | - goprintffuncname 34 | - gosec 35 | - gosmopolitan 36 | - loggercheck 37 | - maintidx 38 | - makezero 39 | - misspell 40 | - musttag 41 | - nestif 42 | - nilerr 43 | - nilnesserr 44 | - nilnil 45 | - noctx 46 | - nolintlint 47 | - prealloc 48 | - predeclared 49 | - protogetter 50 | - reassign 51 | - recvcheck 52 | - rowserrcheck 53 | - sqlclosecheck 54 | - testifylint 55 | - thelper 56 | - unconvert 57 | - unparam 58 | - usestdlibvars 59 | - varnamelen 60 | - whitespace 61 | - zerologlint 62 | - unused 63 | disable: 64 | - spancheck 65 | settings: 66 | dupl: 67 | threshold: 100 68 | errcheck: 69 | check-type-assertions: true 70 | check-blank: true 71 | goconst: 72 | min-len: 3 73 | min-occurrences: 3 74 | gocritic: 75 | enabled-tags: 76 | - diagnostic 77 | - experimental 78 | - opinionated 79 | - performance 80 | - style 81 | settings: 82 | captLocal: 83 | paramsOnly: true 84 | rangeValCopy: 85 | sizeThreshold: 32 86 | gosec: 87 | excludes: 88 | - G115 89 | confidence: medium 90 | govet: 91 | enable: 92 | - shadow 93 | nolintlint: 94 | require-explanation: true 95 | require-specific: true 96 | prealloc: 97 | simple: true 98 | range-loops: true 99 | for-loops: true 100 | varnamelen: 101 | min-name-length: 2 102 | exclusions: 103 | generated: lax 104 | rules: 105 | - linters: 106 | - copyloopvar 107 | - dupl 108 | - errcheck 109 | - gocyclo 110 | - gosec 111 | - maintidx 112 | - unparam 113 | - varnamelen 114 | path: _test(ing)?\.go 115 | - linters: 116 | - gocritic 117 | path: _test\.go 118 | text: (unnamedResult|exitAfterDefer) 119 | - linters: 120 | - gosec 121 | text: 'G101:' 122 | - linters: 123 | - gosec 124 | text: 'G104:' 125 | - linters: 126 | - govet 127 | text: 'shadow: declaration of "(err|ctx)" shadows declaration at' 128 | paths: 129 | - zz_generated\..+\.go$ 130 | - third_party$ 131 | - builtin$ 132 | - examples$ 133 | issues: 134 | max-same-issues: 0 135 | new: false 136 | formatters: 137 | enable: 138 | - gci 139 | - gofmt 140 | - goimports 141 | settings: 142 | gci: 143 | sections: 144 | - standard 145 | - default 146 | - blank 147 | - dot 148 | - prefix(github.com/linode/linode-blockstorage-csi-driver) 149 | gofmt: 150 | simplify: true 151 | goimports: 152 | local-prefixes: 153 | - github.com/linode/linode-blockstorage-csi-driver 154 | exclusions: 155 | generated: lax 156 | paths: 157 | - zz_generated\..+\.go$ 158 | - third_party$ 159 | - builtin$ 160 | - examples$ 161 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Release notes for this project are kept here: https://github.com/linode/linode-blockstorage-csi-driver/releases 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24.4-alpine AS builder 2 | # from makefile 3 | ARG REV 4 | 5 | RUN mkdir -p /linode 6 | WORKDIR /linode 7 | 8 | RUN apk add cryptsetup cryptsetup-libs cryptsetup-dev gcc musl-dev pkgconfig 9 | 10 | COPY go.mod go.sum ./ 11 | RUN go mod download 12 | 13 | COPY main.go . 14 | COPY pkg ./pkg 15 | COPY internal ./internal 16 | 17 | RUN CGO_ENABLED=1 go build -a -ldflags "-w -s -X main.vendorVersion=${REV}" -o /bin/linode-blockstorage-csi-driver /linode 18 | 19 | FROM alpine:3.20.3 20 | LABEL maintainers="Linode" 21 | LABEL description="Linode CSI Driver" 22 | 23 | RUN apk add --no-cache e2fsprogs e2fsprogs-extra findmnt blkid cryptsetup 24 | RUN apk add --no-cache xfsprogs=6.2.0-r2 xfsprogs-extra=6.2.0-r2 --repository=http://dl-cdn.alpinelinux.org/alpine/v3.18/main 25 | 26 | COPY --from=builder /bin/linode-blockstorage-csi-driver /linode 27 | 28 | ENTRYPOINT ["/linode"] 29 | -------------------------------------------------------------------------------- /Dockerfile.dev: -------------------------------------------------------------------------------- 1 | FROM golang:1.24.4-alpine AS builder 2 | # from makefile 3 | ARG REV 4 | ARG GOLANGCI_LINT_VERSION 5 | 6 | RUN mkdir -p /linode 7 | WORKDIR /linode 8 | 9 | RUN apk add \ 10 | blkid \ 11 | cryptsetup \ 12 | cryptsetup-libs \ 13 | cryptsetup-dev \ 14 | curl \ 15 | e2fsprogs \ 16 | e2fsprogs-extra \ 17 | findmnt \ 18 | gcc \ 19 | lsblk \ 20 | make \ 21 | musl-dev \ 22 | pkgconfig \ 23 | xfsprogs \ 24 | xfsprogs-extra 25 | 26 | COPY go.mod go.sum ./ 27 | RUN go mod tidy 28 | 29 | COPY . . 30 | 31 | RUN CGO_ENABLED=1 go build -a -ldflags "-w -s -X main.vendorVersion=${REV}" -o /bin/linode-blockstorage-csi-driver /linode 32 | RUN CGO_ENABLED=1 go install go.uber.org/mock/mockgen@latest 33 | RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_LINT_VERSION} 34 | 35 | CMD ["sh"] 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Linode Block Storage CSI Driver 2 | 3 | [![Go Report Card](https://goreportcard.com/badge/github.com/linode/linode-blockstorage-csi-driver)](https://goreportcard.com/report/github.com/linode/linode-blockstorage-csi-driver) 4 | [![codecov](https://codecov.io/gh/linode/linode-blockstorage-csi-driver/graph/badge.svg?token=b5HeEgMdAd)](https://codecov.io/gh/linode/linode-blockstorage-csi-driver) 5 | [![Docker Pulls](https://img.shields.io/docker/pulls/linode/linode-blockstorage-csi-driver.svg)](https://hub.docker.com/r/linode/linode-blockstorage-csi-driver/) 6 | 7 | ## Table of Contents 8 | 9 | - [Overview](#overview) 10 | - [Deployment](docs/deployment.md) 11 | - [Requirements](docs/deployment.md#-requirements) 12 | - [Secure a Linode API Access Token](docs/deployment.md#-secure-a-linode-api-access-token) 13 | - [Deployment Methods](docs/deployment.md#️-deployment-methods) 14 | - [Using Helm (Recommended)](docs/deployment.md#1-using-helm) 15 | - [Using kubectl](docs/deployment.md#2-using-kubectl) 16 | - [Advanced Configuration and Operational Details](docs/deployment.md#-advanced-configuration-and-operational-details) 17 | - [Usage Examples](docs/usage.md) 18 | - [Creating a PersistentVolumeClaim](docs/usage.md#creating-a-persistentvolumeclaim) 19 | - [Encrypted Drives using LUKS](docs/encrypted-drives.md) 20 | - [Adding Tags to Created Volumes](docs/volume-tags.md) 21 | - [Topology-Aware Provisioning](docs/topology-aware-provisioning.md) 22 | - [Development Setup](docs/development-setup.md) 23 | - [Prerequisites](docs/development-setup.md#-prerequisites) 24 | - [Setting Up the Local Development Environment](docs/development-setup.md#-setting-up-the-local-development-environment) 25 | - [Building the Project](docs/development-setup.md#️-building-the-project) 26 | - [Running Unit Tests](docs/development-setup.md#️-running-unit-tests) 27 | - [Creating a Development Cluster](docs/development-setup.md#️-creating-a-development-cluster) 28 | - [Running E2E Tests](docs/testing.md) 29 | - [Contributing](docs/contributing.md) 30 | - [Observability](docs/observability.md) 31 | - [Metrics](docs/metrics-documentation.md) 32 | - [How to opt-in for Metrics](docs/observability.md#steps-to-opt-in-for-the-csi-driver-metrics) 33 | - [Tracing](docs/tracing-documentation.md) 34 | - [How to opt-in for Tracing](docs/observability.md#steps-to-opt-in-for-tracing-in-the-csi-driver) 35 | - [License](#license) 36 | - [Disclaimers](#-disclaimers) 37 | - [Community](#-join-us-on-slack) 38 | 39 | ## 📚 Overview 40 | 41 | The Container Storage Interface ([CSI](https://github.com/container-storage-interface/spec)) Driver for Linode Block Storage enables container orchestrators such as Kubernetes to manage the lifecycle of persistent storage claims. 42 | 43 | For more information about Kubernetes CSI, refer to the [Kubernetes CSI](https://kubernetes-csi.github.io/docs/introduction.html) and [CSI Spec](https://github.com/container-storage-interface/spec/) repositories. 44 | 45 | ## ⚠️ Disclaimers 46 | 47 | - **Volume Size Constraints**: 48 | - Requests for Persistent Volumes with a require_size less than the Linode minimum Block Storage size will be fulfilled with a Linode Block Storage volume of the minimum size (currently 10Gi) in accordance with the CSI specification. 49 | - The upper-limit size constraint (`limit_bytes`) will also be honored, so the size of Linode Block Storage volumes provisioned will not exceed this parameter. 50 | - **Volume Attachment Persistence**: Block storage volume attachments are no longer persisted across reboots to support a higher number of attachments on larger instances. 51 | 52 | 53 | _For more details, refer to the [CSI specification](https://github.com/container-storage-interface/spec/blob/v1.0.0/spec.md#createvolume)._ 54 | 55 | ## 💬 Join Us on Slack 56 | 57 | - **General Help/Discussion**: [Kubernetes Slack - #linode](https://kubernetes.slack.com/messages/CD4B15LUR) 58 | - **Development/Debugging**: [Gopher's Slack - #linodego](https://gophers.slack.com/messages/CAG93EB2S) 59 | 60 | ## License 61 | 62 | This project is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. 63 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "mocks/*" 3 | 4 | github_checks: 5 | annotations: false 6 | -------------------------------------------------------------------------------- /deploy/kubernetes/base/accounts-roles-bindings.yaml: -------------------------------------------------------------------------------- 1 | ##### Node Service Account, Roles, RoleBindings 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: csi-node-sa 6 | namespace: kube-system 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: linode-csi-role 12 | namespace: kube-system 13 | rules: 14 | - apiGroups: [""] 15 | resources: ["events"] 16 | verbs: ["get", "list", "watch", "create", "update", "patch"] 17 | - apiGroups: [""] 18 | resources: ["nodes"] 19 | verbs: ["get", "list", "watch"] 20 | --- 21 | kind: ClusterRoleBinding 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | metadata: 24 | name: linode-csi-binding 25 | namespace: kube-system 26 | subjects: 27 | - kind: ServiceAccount 28 | name: csi-node-sa 29 | namespace: kube-system 30 | roleRef: 31 | kind: ClusterRole 32 | name: linode-csi-role 33 | apiGroup: rbac.authorization.k8s.io 34 | --- 35 | ##### Controller Service Account, Roles, Rolebindings 36 | apiVersion: v1 37 | kind: ServiceAccount 38 | metadata: 39 | name: csi-controller-sa 40 | namespace: kube-system 41 | -------------------------------------------------------------------------------- /deploy/kubernetes/base/csi-driver-instance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: CSIDriver 3 | metadata: 4 | name: linodebs.csi.linode.com 5 | spec: 6 | attachRequired: true 7 | podInfoOnMount: true 8 | -------------------------------------------------------------------------------- /deploy/kubernetes/base/csi-storageclass.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: linode-block-storage 5 | namespace: kube-system 6 | provisioner: linodebs.csi.linode.com 7 | allowVolumeExpansion: true 8 | --- 9 | kind: StorageClass 10 | apiVersion: storage.k8s.io/v1 11 | metadata: 12 | name: linode-block-storage-retain 13 | namespace: kube-system 14 | annotations: 15 | storageclass.kubernetes.io/is-default-class: "true" 16 | provisioner: linodebs.csi.linode.com 17 | reclaimPolicy: Retain 18 | allowVolumeExpansion: true 19 | -------------------------------------------------------------------------------- /deploy/kubernetes/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../sidecars/external-provisioner 5 | - ../sidecars/external-attacher 6 | - ../sidecars/external-resizer 7 | - accounts-roles-bindings.yaml 8 | - csi-driver-instance.yaml 9 | - csi-storageclass.yaml 10 | - ss-csi-linode-controller.yaml 11 | - ds-csi-linode-node.yaml 12 | -------------------------------------------------------------------------------- /deploy/kubernetes/base/ss-csi-linode-controller.yaml: -------------------------------------------------------------------------------- 1 | kind: StatefulSet 2 | apiVersion: apps/v1 3 | metadata: 4 | name: csi-linode-controller 5 | namespace: kube-system 6 | labels: 7 | app: csi-linode-controller 8 | spec: 9 | serviceName: "csi-linode" 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: csi-linode-controller 14 | template: 15 | metadata: 16 | labels: 17 | app: csi-linode-controller 18 | role: csi-linode 19 | spec: 20 | securityContext: 21 | seccompProfile: 22 | type: RuntimeDefault 23 | serviceAccount: csi-controller-sa 24 | containers: 25 | - name: csi-provisioner 26 | image: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0 27 | imagePullPolicy: IfNotPresent 28 | args: 29 | - "--default-fstype=ext4" 30 | - "--volume-name-prefix=pvc" 31 | - "--volume-name-uuid-length=16" 32 | - "--csi-address=$(ADDRESS)" 33 | - "--feature-gates=Topology=true" 34 | - "--v=2" 35 | env: 36 | - name: ADDRESS 37 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 38 | securityContext: 39 | allowPrivilegeEscalation: false 40 | capabilities: 41 | drop: 42 | - ALL 43 | volumeMounts: 44 | - name: socket-dir 45 | mountPath: /var/lib/csi/sockets/pluginproxy/ 46 | - name: csi-attacher 47 | image: registry.k8s.io/sig-storage/csi-attacher:v4.8.1 48 | imagePullPolicy: IfNotPresent 49 | args: 50 | - "--v=2" 51 | - "--csi-address=$(ADDRESS)" 52 | - "--timeout=30s" 53 | env: 54 | - name: ADDRESS 55 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 56 | securityContext: 57 | allowPrivilegeEscalation: false 58 | capabilities: 59 | drop: 60 | - ALL 61 | volumeMounts: 62 | - name: socket-dir 63 | mountPath: /var/lib/csi/sockets/pluginproxy/ 64 | - name: csi-resizer 65 | image: registry.k8s.io/sig-storage/csi-resizer:v1.12.0 66 | imagePullPolicy: IfNotPresent 67 | args: 68 | - "--v=2" 69 | - "--csi-address=$(ADDRESS)" 70 | env: 71 | - name: ADDRESS 72 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 73 | securityContext: 74 | allowPrivilegeEscalation: false 75 | capabilities: 76 | drop: 77 | - ALL 78 | volumeMounts: 79 | - name: socket-dir 80 | mountPath: /var/lib/csi/sockets/pluginproxy/ 81 | - name: csi-linode-plugin 82 | image: linode/linode-blockstorage-csi-driver:latest 83 | args: 84 | - "--v=2" 85 | env: 86 | - name: CSI_ENDPOINT 87 | value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock 88 | - name: LINODE_URL 89 | value: https://api.linode.com/v4 90 | - name: DRIVER_ROLE 91 | value: controller 92 | - name: LINODE_BS_PREFIX 93 | - name: NODE_NAME 94 | valueFrom: 95 | fieldRef: 96 | apiVersion: v1 97 | fieldPath: spec.nodeName 98 | - name: LINODE_TOKEN 99 | valueFrom: 100 | secretKeyRef: 101 | name: linode 102 | key: token 103 | securityContext: 104 | allowPrivilegeEscalation: false 105 | capabilities: 106 | drop: 107 | - ALL 108 | volumeMounts: 109 | - name: socket-dir 110 | mountPath: /var/lib/csi/sockets/pluginproxy/ 111 | tolerations: 112 | - effect: NoSchedule 113 | operator: Exists 114 | - key: CriticalAddonsOnly 115 | operator: Exists 116 | - effect: NoExecute 117 | operator: Exists 118 | volumes: 119 | - name: socket-dir 120 | emptyDir: {} 121 | -------------------------------------------------------------------------------- /deploy/kubernetes/overlays/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | images: 4 | - name: linode/linode-blockstorage-csi-driver 5 | newTag: canary 6 | resources: 7 | - ../../base 8 | -------------------------------------------------------------------------------- /deploy/kubernetes/overlays/release/.gitignore: -------------------------------------------------------------------------------- 1 | kustomization.yaml 2 | -------------------------------------------------------------------------------- /deploy/kubernetes/overlays/release/kustomization.yaml.template: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | images: 4 | - name: linode/linode-blockstorage-csi-driver 5 | newName: ${CSI_IMAGE_NAME} 6 | newTag: ${CSI_VERSION} 7 | resources: 8 | - ../../base 9 | -------------------------------------------------------------------------------- /deploy/kubernetes/sidecars/external-attacher/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - rbac.yaml 5 | namespace: kube-system 6 | patches: 7 | - patch: |- 8 | - op: replace 9 | path: /metadata/name 10 | value: external-attacher-role 11 | target: 12 | group: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: external-attacher-runner 15 | version: v1 16 | - patch: |- 17 | - op: replace 18 | path: /metadata/name 19 | value: csi-controller-attacher-binding 20 | - op: replace 21 | path: /subjects/0/name 22 | value: csi-controller-sa 23 | - op: replace 24 | path: /subjects/0/namespace 25 | value: kube-system 26 | target: 27 | group: rbac.authorization.k8s.io 28 | kind: ClusterRoleBinding 29 | name: csi-attacher-role 30 | version: v1 31 | - patch: |- 32 | kind: RoleBinding 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | metadata: 35 | name: csi-attacher-role-cfg 36 | $patch: delete 37 | - patch: |- 38 | kind: Role 39 | apiVersion: rbac.authorization.k8s.io/v1 40 | metadata: 41 | name: external-attacher-cfg 42 | $patch: delete 43 | - patch: |- 44 | kind: ServiceAccount 45 | apiVersion: v1 46 | metadata: 47 | name: csi-attacher 48 | $patch: delete 49 | -------------------------------------------------------------------------------- /deploy/kubernetes/sidecars/external-attacher/rbac.yaml: -------------------------------------------------------------------------------- 1 | # xref: https://raw.githubusercontent.com/kubernetes-csi/external-attacher/release-2.2/deploy/kubernetes/rbac.yaml 2 | 3 | # This YAML file contains all RBAC objects that are necessary to run external 4 | # CSI attacher. 5 | # 6 | # In production, each CSI driver deployment has to be customized: 7 | # - to avoid conflicts, use non-default namespace and different names 8 | # for non-namespaced entities like the ClusterRole 9 | # - decide whether the deployment replicates the external CSI 10 | # attacher, in which case leadership election must be enabled; 11 | # this influences the RBAC setup, see below 12 | 13 | apiVersion: v1 14 | kind: ServiceAccount 15 | metadata: 16 | name: csi-attacher 17 | # replace with non-default namespace name 18 | namespace: default 19 | 20 | --- 21 | # Attacher must be able to work with PVs, CSINodes and VolumeAttachments 22 | kind: ClusterRole 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | metadata: 25 | name: external-attacher-runner 26 | rules: 27 | - apiGroups: [""] 28 | resources: ["persistentvolumes"] 29 | verbs: ["get", "list", "watch", "update", "patch"] 30 | - apiGroups: ["storage.k8s.io"] 31 | resources: ["csinodes"] 32 | verbs: ["get", "list", "watch"] 33 | - apiGroups: ["storage.k8s.io"] 34 | resources: ["volumeattachments", "volumeattachments/status"] 35 | verbs: ["get", "list", "watch", "update", "patch"] 36 | #Secret permission is optional. 37 | #Enable it if you need value from secret. 38 | #For example, you have key `csi.storage.k8s.io/controller-publish-secret-name` in StorageClass.parameters 39 | #see https://kubernetes-csi.github.io/docs/secrets-and-credentials.html 40 | # - apiGroups: [""] 41 | # resources: ["secrets"] 42 | # verbs: ["get", "list"] 43 | 44 | --- 45 | kind: ClusterRoleBinding 46 | apiVersion: rbac.authorization.k8s.io/v1 47 | metadata: 48 | name: csi-attacher-role 49 | subjects: 50 | - kind: ServiceAccount 51 | name: csi-attacher 52 | # replace with non-default namespace name 53 | namespace: default 54 | roleRef: 55 | kind: ClusterRole 56 | name: external-attacher-runner 57 | apiGroup: rbac.authorization.k8s.io 58 | 59 | --- 60 | # Attacher must be able to work with configmaps or leases in the current namespace 61 | # if (and only if) leadership election is enabled 62 | kind: Role 63 | apiVersion: rbac.authorization.k8s.io/v1 64 | metadata: 65 | # replace with non-default namespace name 66 | namespace: default 67 | name: external-attacher-cfg 68 | rules: 69 | - apiGroups: ["coordination.k8s.io"] 70 | resources: ["leases"] 71 | verbs: ["get", "watch", "list", "delete", "update", "create"] 72 | 73 | --- 74 | kind: RoleBinding 75 | apiVersion: rbac.authorization.k8s.io/v1 76 | metadata: 77 | name: csi-attacher-role-cfg 78 | # replace with non-default namespace name 79 | namespace: default 80 | subjects: 81 | - kind: ServiceAccount 82 | name: csi-attacher 83 | # replace with non-default namespace name 84 | namespace: default 85 | roleRef: 86 | kind: Role 87 | name: external-attacher-cfg 88 | apiGroup: rbac.authorization.k8s.io 89 | -------------------------------------------------------------------------------- /deploy/kubernetes/sidecars/external-provisioner/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - rbac.yaml 5 | namespace: kube-system 6 | patches: 7 | - patch: |- 8 | - op: replace 9 | path: /metadata/name 10 | value: external-provisioner-role 11 | target: 12 | group: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: external-provisioner-runner 15 | version: v1 16 | - patch: |- 17 | - op: replace 18 | path: /metadata/name 19 | value: csi-controller-provisioner-binding 20 | - op: replace 21 | path: /subjects/0/name 22 | value: csi-controller-sa 23 | - op: replace 24 | path: /subjects/0/namespace 25 | value: kube-system 26 | target: 27 | group: rbac.authorization.k8s.io 28 | kind: ClusterRoleBinding 29 | name: csi-provisioner-role 30 | version: v1 31 | - patch: |- 32 | kind: RoleBinding 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | metadata: 35 | name: csi-provisioner-role-cfg 36 | $patch: delete 37 | - patch: |- 38 | kind: Role 39 | apiVersion: rbac.authorization.k8s.io/v1 40 | metadata: 41 | name: external-provisioner-cfg 42 | $patch: delete 43 | - patch: |- 44 | kind: ServiceAccount 45 | apiVersion: v1 46 | metadata: 47 | name: csi-provisioner 48 | $patch: delete 49 | -------------------------------------------------------------------------------- /deploy/kubernetes/sidecars/external-provisioner/rbac.yaml: -------------------------------------------------------------------------------- 1 | # xref: https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/release-1.6/deploy/kubernetes/rbac.yaml 2 | 3 | # This YAML file contains all RBAC objects that are necessary to run external 4 | # CSI provisioner. 5 | # 6 | # In production, each CSI driver deployment has to be customized: 7 | # - to avoid conflicts, use non-default namespace and different names 8 | # for non-namespaced entities like the ClusterRole 9 | # - decide whether the deployment replicates the external CSI 10 | # provisioner, in which case leadership election must be enabled; 11 | # this influences the RBAC setup, see below 12 | 13 | apiVersion: v1 14 | kind: ServiceAccount 15 | metadata: 16 | name: csi-provisioner 17 | # replace with non-default namespace name 18 | namespace: default 19 | 20 | --- 21 | kind: ClusterRole 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | metadata: 24 | name: external-provisioner-runner 25 | rules: 26 | # The following rule should be uncommented for plugins that require secrets 27 | # for provisioning. 28 | # - apiGroups: [""] 29 | # resources: ["secrets"] 30 | # verbs: ["get", "list"] 31 | - apiGroups: [""] 32 | resources: ["persistentvolumes"] 33 | verbs: ["get", "list", "watch", "create", "delete"] 34 | - apiGroups: [""] 35 | resources: ["persistentvolumeclaims"] 36 | verbs: ["get", "list", "watch", "update"] 37 | - apiGroups: ["storage.k8s.io"] 38 | resources: ["storageclasses"] 39 | verbs: ["get", "list", "watch"] 40 | - apiGroups: [""] 41 | resources: ["events"] 42 | verbs: ["list", "watch", "create", "update", "patch"] 43 | - apiGroups: ["snapshot.storage.k8s.io"] 44 | resources: ["volumesnapshots"] 45 | verbs: ["get", "list"] 46 | - apiGroups: ["snapshot.storage.k8s.io"] 47 | resources: ["volumesnapshotcontents"] 48 | verbs: ["get", "list"] 49 | - apiGroups: ["storage.k8s.io"] 50 | resources: ["csinodes"] 51 | verbs: ["get", "list", "watch"] 52 | - apiGroups: [""] 53 | resources: ["nodes"] 54 | verbs: ["get", "list", "watch"] 55 | 56 | --- 57 | kind: ClusterRoleBinding 58 | apiVersion: rbac.authorization.k8s.io/v1 59 | metadata: 60 | name: csi-provisioner-role 61 | subjects: 62 | - kind: ServiceAccount 63 | name: csi-provisioner 64 | # replace with non-default namespace name 65 | namespace: default 66 | roleRef: 67 | kind: ClusterRole 68 | name: external-provisioner-runner 69 | apiGroup: rbac.authorization.k8s.io 70 | 71 | --- 72 | # Provisioner must be able to work with endpoints in current namespace 73 | # if (and only if) leadership election is enabled 74 | kind: Role 75 | apiVersion: rbac.authorization.k8s.io/v1 76 | metadata: 77 | # replace with non-default namespace name 78 | namespace: default 79 | name: external-provisioner-cfg 80 | rules: 81 | # Only one of the following rules for endpoints or leases is required based on 82 | # what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases. 83 | - apiGroups: [""] 84 | resources: ["endpoints"] 85 | verbs: ["get", "watch", "list", "delete", "update", "create"] 86 | - apiGroups: ["coordination.k8s.io"] 87 | resources: ["leases"] 88 | verbs: ["get", "watch", "list", "delete", "update", "create"] 89 | 90 | --- 91 | kind: RoleBinding 92 | apiVersion: rbac.authorization.k8s.io/v1 93 | metadata: 94 | name: csi-provisioner-role-cfg 95 | # replace with non-default namespace name 96 | namespace: default 97 | subjects: 98 | - kind: ServiceAccount 99 | name: csi-provisioner 100 | # replace with non-default namespace name 101 | namespace: default 102 | roleRef: 103 | kind: Role 104 | name: external-provisioner-cfg 105 | apiGroup: rbac.authorization.k8s.io 106 | -------------------------------------------------------------------------------- /deploy/kubernetes/sidecars/external-resizer/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - rbac.yaml 5 | namespace: kube-system 6 | patches: 7 | - patch: |- 8 | - op: replace 9 | path: /metadata/name 10 | value: external-resizer-role 11 | target: 12 | group: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: external-resizer-runner 15 | version: v1 16 | - patch: |- 17 | - op: replace 18 | path: /metadata/name 19 | value: csi-controller-resizer-binding 20 | - op: replace 21 | path: /subjects/0/name 22 | value: csi-controller-sa 23 | - op: replace 24 | path: /subjects/0/namespace 25 | value: kube-system 26 | target: 27 | group: rbac.authorization.k8s.io 28 | kind: ClusterRoleBinding 29 | name: csi-resizer-role 30 | version: v1 31 | - patch: |- 32 | kind: RoleBinding 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | metadata: 35 | name: csi-resizer-role-cfg 36 | $patch: delete 37 | - patch: |- 38 | kind: Role 39 | apiVersion: rbac.authorization.k8s.io/v1 40 | metadata: 41 | name: external-resizer-cfg 42 | $patch: delete 43 | - patch: |- 44 | apiVersion: v1 45 | kind: ServiceAccount 46 | metadata: 47 | name: csi-resizer 48 | $patch: delete 49 | -------------------------------------------------------------------------------- /deploy/kubernetes/sidecars/external-resizer/rbac.yaml: -------------------------------------------------------------------------------- 1 | # xref: https://raw.githubusercontent.com/kubernetes-csi/external-resizer/v1.0.1/deploy/kubernetes/rbac.yaml 2 | 3 | # This YAML file contains all RBAC objects that are necessary to run external 4 | # CSI resizer. 5 | # 6 | # In production, each CSI driver deployment has to be customized: 7 | # - to avoid conflicts, use non-default namespace and different names 8 | # for non-namespaced entities like the ClusterRole 9 | # - decide whether the deployment replicates the external CSI 10 | # resizer, in which case leadership election must be enabled; 11 | # this influences the RBAC setup, see below 12 | 13 | apiVersion: v1 14 | kind: ServiceAccount 15 | metadata: 16 | name: csi-resizer 17 | # replace with non-default namespace name 18 | namespace: default 19 | 20 | --- 21 | # Resizer must be able to work with PVCs, PVs, SCs. 22 | kind: ClusterRole 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | metadata: 25 | name: external-resizer-runner 26 | rules: 27 | # The following rule should be uncommented for plugins that require secrets 28 | # for provisioning. 29 | # - apiGroups: [""] 30 | # resources: ["secrets"] 31 | # verbs: ["get", "list", "watch"] 32 | - apiGroups: [""] 33 | resources: ["persistentvolumes"] 34 | verbs: ["get", "list", "watch", "patch"] 35 | - apiGroups: [""] 36 | resources: ["persistentvolumeclaims"] 37 | verbs: ["get", "list", "watch"] 38 | - apiGroups: [""] 39 | resources: ["pods"] 40 | verbs: ["get", "list", "watch"] 41 | - apiGroups: [""] 42 | resources: ["persistentvolumeclaims/status"] 43 | verbs: ["patch"] 44 | - apiGroups: [""] 45 | resources: ["events"] 46 | verbs: ["list", "watch", "create", "update", "patch"] 47 | 48 | --- 49 | kind: ClusterRoleBinding 50 | apiVersion: rbac.authorization.k8s.io/v1 51 | metadata: 52 | name: csi-resizer-role 53 | subjects: 54 | - kind: ServiceAccount 55 | name: csi-resizer 56 | # replace with non-default namespace name 57 | namespace: default 58 | roleRef: 59 | kind: ClusterRole 60 | name: external-resizer-runner 61 | apiGroup: rbac.authorization.k8s.io 62 | 63 | --- 64 | # Resizer must be able to work with end point in current namespace 65 | # if (and only if) leadership election is enabled 66 | kind: Role 67 | apiVersion: rbac.authorization.k8s.io/v1 68 | metadata: 69 | # replace with non-default namespace name 70 | namespace: default 71 | name: external-resizer-cfg 72 | rules: 73 | - apiGroups: ["coordination.k8s.io"] 74 | resources: ["leases"] 75 | verbs: ["get", "watch", "list", "delete", "update", "create"] 76 | 77 | --- 78 | kind: RoleBinding 79 | apiVersion: rbac.authorization.k8s.io/v1 80 | metadata: 81 | name: csi-resizer-role-cfg 82 | # replace with non-default namespace name 83 | namespace: default 84 | subjects: 85 | - kind: ServiceAccount 86 | name: csi-resizer 87 | # replace with non-default namespace name 88 | namespace: default 89 | roleRef: 90 | kind: Role 91 | name: external-resizer-cfg 92 | apiGroup: rbac.authorization.k8s.io 93 | -------------------------------------------------------------------------------- /devbox.json: -------------------------------------------------------------------------------- 1 | { 2 | "packages": [ 3 | "ginkgo@latest", 4 | "ctlptl@latest", 5 | "clusterctl@latest", 6 | "kustomize@latest", 7 | "kubectl@latest", 8 | "kind@latest", 9 | "kustomize@latest", 10 | "kyverno-chainsaw@latest", 11 | "yq-go@latest", 12 | "envsubst@latest", 13 | "mockgen@latest", 14 | "jq@latest", 15 | "openssl@latest", 16 | "go@1.24" 17 | ], 18 | "shell": { 19 | "init_hook": [ 20 | "export \"GOROOT=$(go env GOROOT)\"" 21 | ], 22 | "scripts": { 23 | "mgmt-and-capl-cluster": "make mgmt-and-capl-cluster", 24 | "mgmt-cluster": "make mgmt-cluster", 25 | "capl-cluster": "make capl-cluster", 26 | "e2e-test": "make e2e-test", 27 | "cleanup-cluster": "make cleanup-cluster", 28 | "csi-sanity-test": "make csi-sanity-test", 29 | "upstream-e2e-tests": "make upstream-e2e-tests" 30 | } 31 | }, 32 | "env": { 33 | "EXP_CLUSTER_RESOURCE_SET": "true" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | # 🤝 Contributing Guidelines 2 | 3 | :+1::tada: First off, we appreciate you taking the time to contribute! THANK YOU! :tada::+1: 4 | 5 | We put together the handy guide below to help you get support for your work. Read on! 6 | 7 | ## I Just Want to Ask the Maintainers a Question 8 | 9 | The [Linode Community](https://www.linode.com/community/questions/) is a great place to get additional support. 10 | 11 | ## How Do I Submit A (Good) Bug Report or Feature Request 12 | 13 | Please open a [github issue](https://guides.github.com/features/issues/) to report bugs or suggest features. 14 | 15 | When filing an issue or feature request, help us avoid duplication and redundant effort -- check existing open or recently closed issues first. 16 | 17 | Detailed bug reports and requests are easier for us to work with. Please include the following in your issue: 18 | 19 | * A reproducible test case or series of steps 20 | * The version of our code being used 21 | * Any modifications you've made, relevant to the bug 22 | * Anything unusual about your environment or deployment 23 | * Screenshots and code samples where illustrative and helpful 24 | 25 | ## How to Open a Pull Request 26 | 27 | We follow the [fork and pull model](https://opensource.guide/how-to-contribute/#opening-a-pull-request) for open source contributions. 28 | 29 | Tips for a faster merge: 30 | * address one feature or bug per pull request. 31 | * large formatting changes make it hard for us to focus on your work. 32 | * follow language coding conventions. 33 | * make sure that tests pass. 34 | * make sure your commits are atomic, [addressing one change per commit](https://chris.beams.io/posts/git-commit/). 35 | * add tests! 36 | 37 | ## Cutting Releases 38 | When a release is published, the [release workflow](https://github.com/linode/linode-blockstorage-csi-driver/actions/workflows/release.yml) builds and pushes the docker image to Dockerhub. 39 | 40 | ## Code of Conduct 41 | This project follows the [Linode Community Code of Conduct](https://www.linode.com/community/questions/conduct). 42 | 43 | ## Vulnerability Reporting 44 | If you discover a potential security issue in this project we ask that you notify Linode Security via our [vulnerability reporting process](https://hackerone.com/linode). Please do **not** create a public github issue. 45 | 46 | ## Licensing 47 | See the [LICENSE file](/LICENSE) for our project's licensing. 48 | -------------------------------------------------------------------------------- /docs/development-setup.md: -------------------------------------------------------------------------------- 1 | ## 🛠️ Developer Setup 2 | 3 | ### 📦 Prerequisites 4 | 5 | - **Go**: Ensure you have Go installed. You can download it from [here](https://golang.org/dl/). 6 | - **Docker**: Required for building and testing Docker images. Download from [here](https://www.docker.com/get-started). 7 | - **kubectl**: Kubernetes command-line tool. Install instructions [here](https://kubernetes.io/docs/tasks/tools/). 8 | - **Helm**: Package manager for Kubernetes. Install instructions [here](https://helm.sh/docs/intro/install/). 9 | - **Devbox**: For managing development environments. Install instructions [here](https://www.jetify.com/devbox/docs/installing_devbox/). 10 | 11 | ### 🚀 Setting Up the Local Development Environment 12 | 13 | 1. **Clone the Repository** 14 | 15 | ```sh 16 | git clone https://github.com/linode/linode-blockstorage-csi-driver.git 17 | cd linode-blockstorage-csi-driver 18 | ``` 19 | 20 | 2. **Install Devbox** 21 | 22 | Follow the [Devbox installation guide](https://www.jetify.com/devbox/docs/installing_devbox/) to set up Devbox on your machine. 23 | 24 | 3. **Setup Environment Variables** 25 | 26 | Create a `.env` file in the root directory or export them directly in your shell: 27 | 28 | ```sh 29 | export LINODE_API_TOKEN="your-linode-api-token" 30 | export LINODE_REGION="your-preferred-region" 31 | export KUBERNETES_VERSION=v1.21.0 32 | export LINODE_CONTROL_PLANE_MACHINE_TYPE=g6-standard-2 33 | export LINODE_MACHINE_TYPE=g6-standard-2 34 | ``` 35 | 36 | 4. **Start Devbox Environment** 37 | 38 | ```sh 39 | devbox shell 40 | ``` 41 | 42 | This command initializes the development environment with all necessary dependencies. 43 | 44 | ### 🛠️ Building the Project 45 | 46 | To build the project binaries in a container(builds are run in a docker container to allow consistent builds regardless of underlying unix/linux systems): 47 | 48 | ```sh 49 | make docker-build 50 | ``` 51 | 52 | ### 🧪 Running Unit Tests 53 | 54 | To run the unit tests, use the Dockerfile.dev that copies the directory into the container allowing us to run make targets: 55 | 56 | ```sh 57 | export DOCKERFILE=Dockerfile.dev 58 | make docker-build && make test 59 | ``` 60 | 61 | ### 🧪 Create a Development Cluster 62 | 63 | To set up a development cluster for running any e2e testing/workflows, follow these steps: 64 | 65 | 1. **Setup a CAPL Management Cluster** 66 | 67 | ```sh 68 | devbox run mgmt-cluster 69 | ``` 70 | 71 | 2. **Build and Push Test Image** 72 | 73 | Before building and pushing the test image, ensure you've made the necessary changes to the codebase for your testing purposes. 74 | 75 | ```sh 76 | # Build the Docker image with your changes 77 | make docker-build IMAGE_TAG=ghcr.io/yourusername/linode-blockstorage-csi-driver:test 78 | 79 | # Push the image to the container registry 80 | make docker-push IMAGE_TAG=ghcr.io/yourusername/linode-blockstorage-csi-driver:test 81 | ``` 82 | 83 | Note: Replace `yourusername` with your actual GitHub username or organization name. 84 | 85 | If you need to make changes to the Dockerfile or build process: 86 | 1. Modify the `Dockerfile` in the project root if needed. 87 | 2. Update the `Makefile` if you need to change build arguments or processes. 88 | 3. If you've added new dependencies, ensure they're properly included in the build. 89 | 90 | After pushing, verify that your image is available in the GitHub Container Registry before proceeding to create the test cluster. 91 | 92 | 3. **Create a CAPL Child Test Cluster** 93 | 94 | ```sh 95 | IMAGE_NAME=ghcr.io/yourusername/linode-blockstorage-csi-driver IMAGE_VERSION=test devbox run capl-cluster 96 | ``` 97 | 98 | This will create a testing cluster with the necessary components to run end-to-end testing or workflows for the Linode BlockStorage CSI Driver. 99 | 100 | For more detailed instructions on running the actual end-to-end tests, refer to the [e2e Tests README](./testing.md). 101 | 102 | ### 🔧 Linting and Formatting 103 | 104 | Ensure your code adheres to the project's coding standards by running: 105 | 106 | ```sh 107 | make lint 108 | ``` 109 | 110 | ### 📝 Documentation 111 | 112 | Update and maintain documentation as you develop new features or make changes. Ensure that all new functionalities are well-documented in the `README.md` or relevant documentation files. 113 | 114 | -------------------------------------------------------------------------------- /docs/example-images/controller-server/create-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/controller-server/create-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/controller-server/delete-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/controller-server/delete-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/controller-server/publish-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/controller-server/publish-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/controller-server/unpublish-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/controller-server/unpublish-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/node-server/expand-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/node-server/expand-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/node-server/publish-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/node-server/publish-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/node-server/stage-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/node-server/stage-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/node-server/unpublish-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/node-server/unpublish-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/node-server/unstage-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/node-server/unstage-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/create-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/create-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/delete-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/delete-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/expand-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/expand-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/operations-sum.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/operations-sum.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/publish-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/publish-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/pvc.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/pvc.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/pvr.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/pvr.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/runtime-error.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/runtime-error.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/tt-create-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/tt-create-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/tt-delete-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/tt-delete-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/tt-expand-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/tt-expand-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/tt-publish-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/tt-publish-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/tt-unpublish-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/tt-unpublish-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/sidecars/unpublish-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/sidecars/unpublish-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/tracing/create-volume-continued.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/tracing/create-volume-continued.jpg -------------------------------------------------------------------------------- /docs/example-images/tracing/create-volume.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/tracing/create-volume.jpg -------------------------------------------------------------------------------- /docs/example-images/tracing/landing-page.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/82d8bfcf3a9f13dac814bc5d9fa4284b2a01fcd0/docs/example-images/tracing/landing-page.jpg -------------------------------------------------------------------------------- /docs/testing.md: -------------------------------------------------------------------------------- 1 | ## 🚀 How to Run End-to-End (e2e) Tests 2 | 3 | In order to run these e2e tests, you'll need the following: 4 | - CAPL Management Cluster 5 | - CAPL Child Test Cluster 6 | - Test Image 7 | 8 | ### 📋 Pre-requisites: Setup Development Environment 9 | 10 | Follow the steps outlined in the [development setup](./development-setup.md) to setup your development environment. 11 | 12 | ### 🏗️ Setup a CAPL Management Cluster 13 | 14 | We will be using a kind cluster and install CAPL plus various other providers. 15 | 16 | Setup the env vars and run the following command to create a kind mgmt cluster: 17 | 18 | ```sh 19 | # Make sure to set the following env vars 20 | export LINODE_TOKEN="your linode api token" 21 | export LINODE_REGION="your preferred region" 22 | export KUBERNETES_VERSION=v1.29.1 23 | export LINODE_CONTROL_PLANE_MACHINE_TYPE=g6-standard-2 24 | export LINODE_MACHINE_TYPE=g6-standard-2 25 | 26 | devbox run mgmt-cluster 27 | ``` 28 | This will download all the necessary binaries to local bin and create a local mgmt cluster. 29 | 30 | ### 📦 Build and Push Test Image 31 | 32 | If you have a PR open, GHA will build & push to docker hub and tag it with the current branch name. 33 | 34 | If you do not have PR open, follow the steps below: 35 | - Build a docker image passing the `IMAGE_TAG` argument to the make target 36 | so a custom tag is applied. Then push the image to a public repository. 37 | 38 | > You can use any public repository that you have access to. The tags used below are just examples 39 | 40 | ``` 41 | make docker-build IMAGE_TAG=ghcr.io/avestuk/linode-blockstorage-csi-driver:test-e2e 42 | make docker-push IMAGE_TAG=ghcr.io/avestuk/linode-blockstorage-csi-driver:test-e2e 43 | ``` 44 | 45 | ### 🔄 Setup a CAPL Child Test Cluster 46 | 47 | In order create a test cluster, run the following command: 48 | 49 | ```sh 50 | IMAGE_NAME=ghcr.io/avestuk/linode-blockstorage-csi-driver IMAGE_VERSION=test-e2e devbox run capl-cluster 51 | ``` 52 | > You don't need to pass IMAGE_NAME and IMAGE_VERSION if you have a PR open 53 | 54 | The above command will create a test cluster, install CSI driver using the test image, and export kubeconfig of test-cluster to the root directory 55 | 56 | ### 🧪 Run E2E Tests 57 | 58 | Run the following command to run all e2e tests: 59 | 60 | ```sh 61 | devbox run e2e-test 62 | ``` 63 | This will run the chainsaw e2e tests under the `e2e/test` directory 64 | 65 | We also label our e2e tests. The labels can be found in the `chainsaw-test.yaml` file under `metadata` in each of the individual chainsaw test directories. 66 | This always users to select and run specific tests. 67 | For example: 68 | If you would like to only run the test that creates a luks volume and shuffles it between the CP and worker nodes, you could run 69 | ```sh 70 | export E2E_SELECTOR=luksmove 71 | devbox run e2e-test 72 | ``` 73 | 74 | ### 🧹 Cleanup 75 | 76 | Run the following command to cleanup the test cluster: 77 | 78 | ```sh 79 | devbox run cleanup-cluster 80 | ``` 81 | *Its will destroy the CAPL test cluster and kind mgmt cluster* 82 | -------------------------------------------------------------------------------- /docs/topology-aware-provisioning.md: -------------------------------------------------------------------------------- 1 | ## 🌐 Topology-Aware Provisioning 2 | 3 | This CSI driver supports topology-aware provisioning, optimizing volume placement based on the physical infrastructure layout. 4 | 5 | **Notes:** 6 | 7 | 1. **Volume Cloning**: Cloning only works within the same region, not across regions. 8 | 2. **Volume Migration**: We can't move volumes across regions. 9 | 3. **Remote Provisioning**: Volume provisioning is supported in remote regions (nodes or clusters outside of the region where the controller server is deployed). 10 | 11 | > [!IMPORTANT] 12 | > Make sure you are using the latest release v0.8.6+ to utilize the remote provisioning feature. 13 | 14 | #### 📝 Example StorageClass and PVC 15 | 16 | ```yaml 17 | allowVolumeExpansion: true 18 | apiVersion: storage.k8s.io/v1 19 | kind: StorageClass 20 | metadata: 21 | name: linode-block-storage-wait-for-consumer 22 | provisioner: linodebs.csi.linode.com 23 | reclaimPolicy: Delete 24 | volumeBindingMode: WaitForFirstConsumer 25 | --- 26 | apiVersion: v1 27 | kind: PersistentVolumeClaim 28 | metadata: 29 | name: pvc-filesystem 30 | spec: 31 | accessModes: 32 | - ReadWriteOnce 33 | resources: 34 | requests: 35 | storage: 10Gi 36 | storageClassName: linode-block-storage-wait-for-consumer 37 | ``` 38 | 39 | > **Important**: The `volumeBindingMode: WaitForFirstConsumer` setting is crucial for topology-aware provisioning. It delays volume binding and creation until a pod using the PVC is scheduled. This allows the system to consider the pod's scheduling requirements and node assignment when selecting the most appropriate storage location, ensuring optimal data locality and performance. 40 | 41 | #### 🖥️ Example Pod 42 | 43 | ```yaml 44 | apiVersion: v1 45 | kind: Pod 46 | metadata: 47 | name: e2e-pod 48 | spec: 49 | nodeSelector: 50 | topology.linode.com/region: us-ord 51 | tolerations: 52 | - key: "node-role.kubernetes.io/control-plane" 53 | operator: "Exists" 54 | effect: "NoSchedule" 55 | containers: 56 | - name: e2e-pod 57 | image: ubuntu 58 | command: 59 | - sleep 60 | - "1000000" 61 | volumeMounts: 62 | - mountPath: /data 63 | name: csi-volume 64 | volumes: 65 | - name: csi-volume 66 | persistentVolumeClaim: 67 | claimName: pvc-filesystem 68 | ``` 69 | 70 | This example demonstrates how to set up topology-aware provisioning using the Linode Block Storage CSI Driver. The StorageClass defines the provisioner and reclaim policy, while the PersistentVolumeClaim requests storage from this class. The Pod specification shows how to use the PVC and includes a node selector for region-specific deployment. 71 | 72 | > [!IMPORTANT] 73 | > To enable topology-aware provisioning, make sure to pass the following argument to the csi-provisioner sidecar: 74 | > ``` 75 | > --feature-gates=CSINodeInfo=true 76 | > ``` 77 | > This enables the CSINodeInfo feature gate, which is required for topology-aware provisioning to function correctly. 78 | > 79 | > Note: This feature is enabled by default in release v0.8.6 and later versions. 80 | 81 | #### Provisioning Process 82 | 83 | 1. CO (Kubernetes) determines required topology based on application needs (pod scheduled region) and cluster layout. 84 | 2. external-provisioner gathers topology requirements from CO and includes `TopologyRequirement` in `CreateVolume` call. 85 | 3. CSI driver creates volume satisfying topology requirements. 86 | 4. Driver returns actual topology of created volume. 87 | 88 | By leveraging topology-aware provisioning, CSI drivers ensure optimal volume placement within the infrastructure, improving performance, availability, and data locality. 89 | -------------------------------------------------------------------------------- /docs/usage.md: -------------------------------------------------------------------------------- 1 | ## 💡 Example Usage 2 | 3 | This repository contains example manifests that demonstrate the usage of the Linode BlockStorage CSI Driver. These manifests create a PersistentVolumeClaim (PVC) using the `linode-block-storage-retain` storage class and then consume it in a minimal pod. 4 | 5 | You can find more example manifests [here](https://github.com/linode/linode-blockstorage-csi-driver/tree/main/internal/driver/examples/kubernetes). 6 | 7 | ### Creating a PersistentVolumeClaim 8 | 9 | ```sh 10 | kubectl create -f https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/master/internal/driver/examples/kubernetes/csi-pvc.yaml 11 | kubectl create -f https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/master/internal/driver/examples/kubernetes/csi-app.yaml 12 | ``` 13 | 14 | **Verify the Pod and PVC:** 15 | 16 | ```sh 17 | kubectl get pvc/csi-example-pvc pods/csi-example-pod 18 | kubectl describe pvc/csi-example-pvc pods/csi-example-pod 19 | ``` 20 | 21 | **Persist Data Example:** 22 | 23 | 1. **Write Data:** 24 | 25 | ```sh 26 | kubectl exec -it csi-example-pod -- /bin/sh -c "echo persistence > /data/example.txt; ls -l /data" 27 | ``` 28 | 29 | 2. **Delete and Recreate Pod:** 30 | 31 | ```sh 32 | kubectl delete pods/csi-example-pod 33 | kubectl create -f https://raw.githubusercontent.com/linode/linode-blockstorage-csi-driver/master/pkg/linode-bs/examples/kubernetes/csi-app.yaml 34 | ``` 35 | 36 | 3. **Verify Data Persistence:** 37 | 38 | ```sh 39 | sleep 30 40 | kubectl exec -it csi-example-pod -- /bin/sh -c "ls -l /data; cat /data/example.txt" 41 | ``` 42 | -------------------------------------------------------------------------------- /docs/volume-tags.md: -------------------------------------------------------------------------------- 1 | ## 🏷️ Adding Tags to Created Volumes 2 | 3 | Add tags to volumes for better tracking by specifying the `linodebs.csi.linode.com/volumeTags` parameter. 4 | 5 | #### 🔑 Example StorageClass with Tags 6 | 7 | ```yaml 8 | allowVolumeExpansion: true 9 | apiVersion: storage.k8s.io/v1 10 | kind: StorageClass 11 | metadata: 12 | annotations: 13 | storageclass.kubernetes.io/is-default-class: "true" 14 | name: linode-block-storage 15 | namespace: kube-system 16 | provisioner: linodebs.csi.linode.com 17 | parameters: 18 | linodebs.csi.linode.com/volumeTags: "foo, bar" 19 | ``` 20 | -------------------------------------------------------------------------------- /envrc.example: -------------------------------------------------------------------------------- 1 | # Automatically sets up your devbox environment whenever you cd into this 2 | # directory via our direnv integration: 3 | 4 | eval "$(devbox generate direnv --print-envrc)" 5 | 6 | # check out https://www.jetpack.io/devbox/docs/ide_configuration/direnv/ 7 | # for more details 8 | 9 | ################################################ 10 | # Set Linode API related env vars 11 | ################################################ 12 | export LINODE_TOKEN="" 13 | export LINODE_REGION="" # us-ord 14 | export LINODE_CONTROL_PLANE_MACHINE_TYPE="" # g6-standard-2 15 | export LINODE_MACHINE_TYPE="" # g6-standard-2 16 | 17 | ######################## 18 | # For the dev/testing 19 | ######################## 20 | 21 | # Sets the username/imagename on dockerhub for local development 22 | export IMAGE_NAME="" # user/imagename eg: linode/linode-blockstorage-csi-driver 23 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/linode/linode-blockstorage-csi-driver 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.1 6 | 7 | require ( 8 | github.com/container-storage-interface/spec v1.11.0 9 | github.com/go-logr/logr v1.4.3 10 | github.com/golang/mock v1.6.0 11 | github.com/google/uuid v1.6.0 12 | github.com/ianschenck/envflag v0.0.0-20140720210342-9111d830d133 13 | github.com/jaypipes/ghw v0.16.0 14 | github.com/linode/go-metadata v0.2.2 15 | github.com/linode/linodego v1.52.1 16 | github.com/martinjungblut/go-cryptsetup v0.0.0-20220520180014-fd0874fd07a6 17 | github.com/prometheus/client_golang v1.22.0 18 | github.com/stretchr/testify v1.10.0 19 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 20 | go.opentelemetry.io/otel v1.36.0 21 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 22 | go.opentelemetry.io/otel/sdk v1.36.0 23 | go.opentelemetry.io/otel/trace v1.36.0 24 | go.uber.org/automaxprocs v1.6.0 25 | go.uber.org/mock v0.5.2 26 | golang.org/x/net v0.40.0 27 | golang.org/x/sys v0.33.0 28 | google.golang.org/grpc v1.72.2 29 | google.golang.org/protobuf v1.36.6 30 | k8s.io/api v0.33.1 31 | k8s.io/apimachinery v0.33.1 32 | k8s.io/client-go v0.33.1 33 | k8s.io/klog/v2 v2.130.1 34 | k8s.io/mount-utils v0.33.1 35 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 36 | ) 37 | 38 | require ( 39 | github.com/StackExchange/wmi v1.2.1 // indirect 40 | github.com/beorn7/perks v1.0.1 // indirect 41 | github.com/cenkalti/backoff/v5 v5.0.2 // indirect 42 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 43 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 44 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 45 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 46 | github.com/go-logr/stdr v1.2.2 // indirect 47 | github.com/go-ole/go-ole v1.2.6 // indirect 48 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 49 | github.com/go-openapi/jsonreference v0.20.2 // indirect 50 | github.com/go-openapi/swag v0.23.0 // indirect 51 | github.com/go-resty/resty/v2 v2.16.5 // indirect 52 | github.com/gogo/protobuf v1.3.2 // indirect 53 | github.com/google/gnostic-models v0.6.9 // indirect 54 | github.com/google/go-cmp v0.7.0 // indirect 55 | github.com/google/go-querystring v1.1.0 // indirect 56 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect 57 | github.com/jaypipes/pcidb v1.0.1 // indirect 58 | github.com/josharian/intern v1.0.0 // indirect 59 | github.com/json-iterator/go v1.1.12 // indirect 60 | github.com/mailru/easyjson v0.7.7 // indirect 61 | github.com/mitchellh/go-homedir v1.1.0 // indirect 62 | github.com/moby/sys/mountinfo v0.7.2 // indirect 63 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 64 | github.com/modern-go/reflect2 v1.0.2 // indirect 65 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 66 | github.com/pkg/errors v0.9.1 // indirect 67 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 68 | github.com/prometheus/client_model v0.6.1 // indirect 69 | github.com/prometheus/common v0.62.0 // indirect 70 | github.com/prometheus/procfs v0.15.1 // indirect 71 | github.com/x448/float16 v0.8.4 // indirect 72 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 73 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect 74 | go.opentelemetry.io/otel/metric v1.36.0 // indirect 75 | go.opentelemetry.io/proto/otlp v1.6.0 // indirect 76 | golang.org/x/oauth2 v0.30.0 // indirect 77 | golang.org/x/term v0.32.0 // indirect 78 | golang.org/x/text v0.25.0 // indirect 79 | golang.org/x/time v0.9.0 // indirect 80 | google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect 81 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect 82 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 83 | gopkg.in/inf.v0 v0.9.1 // indirect 84 | gopkg.in/ini.v1 v1.66.6 // indirect 85 | gopkg.in/yaml.v3 v3.0.1 // indirect 86 | howett.net/plist v1.0.0 // indirect 87 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect 88 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect 89 | sigs.k8s.io/randfill v1.0.0 // indirect 90 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect 91 | sigs.k8s.io/yaml v1.4.0 // indirect 92 | ) 93 | -------------------------------------------------------------------------------- /hack/fetch-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | manifest_directory=$(cd "${0%/*}/../deploy/kubernetes/sidecars"; pwd) 5 | 6 | function fetch_manifest { 7 | local source_file=$1 8 | local target_file=$2 9 | printf "# xref: %s\n\n" $source_file > $target_file 10 | wget "$source_file" -O - >> $target_file 11 | } 12 | 13 | function external_provisioner { 14 | local version=$1 15 | local source_directory="https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/release-$version/deploy/kubernetes" 16 | local target_directory="$manifest_directory/external-provisioner" 17 | fetch_manifest "$source_directory/rbac.yaml" "$target_directory/rbac.yaml" 18 | } 19 | function external_attacher { 20 | local version=$1 21 | local source_directory="https://raw.githubusercontent.com/kubernetes-csi/external-attacher/release-$version/deploy/kubernetes" 22 | local target_directory="$manifest_directory/external-attacher" 23 | fetch_manifest "$source_directory/rbac.yaml" "$target_directory/rbac.yaml" 24 | } 25 | 26 | function external_resizer { 27 | local version=$1 28 | local source_directory="https://raw.githubusercontent.com/kubernetes-csi/external-resizer/v$version/deploy/kubernetes" 29 | local target_directory="$manifest_directory/external-resizer" 30 | fetch_manifest "$source_directory/rbac.yaml" "$target_directory/rbac.yaml" 31 | } 32 | 33 | external_provisioner "1.6" 34 | external_attacher "2.2" 35 | external_resizer "1.0.1" 36 | -------------------------------------------------------------------------------- /hack/generate-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | set -o pipefail 3 | # Generate manifests for deployment on Kubernetes 4 | 5 | # A tag name _must_ be supplied as the first argument 6 | TAG="${1}" 7 | if [[ -z "${TAG}" ]]; then 8 | echo "Tag name to release must be supplied as the first argument" 9 | echo "e.g. $ hack/release-yaml.sh v1.0.0" 10 | exit 1 11 | fi 12 | 13 | # An image name may be supplied as the second argument in order to pass a custom image name 14 | IMAGE_NAME=${2:-"linode/linode-blockstorage-csi-driver"} 15 | if [[ -z "${2}" ]]; then 16 | echo "Image name not supplied" >&2 17 | echo "default to $ hack/release-yaml.sh ${TAG} linode/linode-blockstorage-csi-driver" >&2 18 | fi 19 | 20 | cd $(dirname "$0")/../ 21 | file=./deploy/kubernetes/overlays/release/kustomization.yaml 22 | CSI_VERSION=$TAG CSI_IMAGE_NAME=$IMAGE_NAME envsubst <"$file.template" >$file 23 | 24 | kustomize build "$(dirname $file)" 25 | -------------------------------------------------------------------------------- /hack/install-grafana.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf -o pipefail 4 | 5 | # Default Values 6 | DEFAULT_GRAFANA_PORT=3000 7 | DEFAULT_GRAFANA_USERNAME="admin" 8 | DEFAULT_GRAFANA_PASSWORD="admin" 9 | 10 | # Configuration Variables (Environment Variables) 11 | GRAFANA_PORT=${GRAFANA_PORT:-$DEFAULT_GRAFANA_PORT} 12 | GRAFANA_USERNAME=${GRAFANA_USERNAME:-$DEFAULT_GRAFANA_USERNAME} 13 | GRAFANA_PASSWORD=${GRAFANA_PASSWORD:-$DEFAULT_GRAFANA_PASSWORD} 14 | NAMESPACE="monitoring" 15 | 16 | # Validate Grafana Port 17 | if ! [[ "$GRAFANA_PORT" =~ ^[0-9]+$ ]]; then 18 | echo "Error: Grafana port must be a number." 19 | exit 1 20 | fi 21 | 22 | # Determine if worker nodes exist 23 | echo "Checking for worker nodes..." 24 | 25 | # Get all nodes 26 | ALL_NODES=$(kubectl get nodes --no-headers -o custom-columns=NAME:.metadata.name) 27 | 28 | # Get control-plane nodes (assuming 'control-plane' in the name) 29 | CONTROL_PLANE_NODES=$(echo "${ALL_NODES}" | grep "control-plane" || true) 30 | 31 | # Get worker nodes (nodes not containing 'control-plane' in the name) 32 | WORKER_NODES=$(echo "${ALL_NODES}" | grep -v "control-plane" || true) 33 | 34 | if [ -z "${WORKER_NODES}" ]; then 35 | echo "No worker nodes found. Untainting control-plane node(s) to allow scheduling of Grafana pods..." 36 | for NODE in ${CONTROL_PLANE_NODES}; do 37 | echo "Untainting node: ${NODE}" 38 | kubectl taint nodes "${NODE}" node-role.kubernetes.io/control-plane:NoSchedule- || true 39 | done 40 | else 41 | echo "Worker nodes detected. Grafana will be installed on them." 42 | fi 43 | 44 | # Add Helm repositories if not already added 45 | echo "Adding Helm repository for Grafana..." 46 | helm repo add grafana https://grafana.github.io/helm-charts || true 47 | 48 | # Update Helm repositories 49 | echo "Updating Helm repositories..." 50 | helm repo update 51 | 52 | # Install or Upgrade Grafana with Prometheus data source 53 | echo "Installing or upgrading Grafana with Prometheus data source..." 54 | helm upgrade --install grafana grafana/grafana \ 55 | --namespace ${NAMESPACE} \ 56 | --set adminUser="${GRAFANA_USERNAME}" \ 57 | --set adminPassword="${GRAFANA_PASSWORD}" \ 58 | --set service.type=NodePort \ 59 | --set service.port="${GRAFANA_PORT}" \ 60 | --set sidecar.dashboards.enabled=true \ 61 | --set sidecar.dashboards.label=grafana_dashboard \ 62 | --set datasources."datasources\.yaml".apiVersion=1 \ 63 | --set datasources."datasources\.yaml".datasources[0].name=Prometheus \ 64 | --set datasources."datasources\.yaml".datasources[0].type=prometheus \ 65 | --set datasources."datasources\.yaml".datasources[0].url=http://prometheus-server.${NAMESPACE}.svc.cluster.local:80 \ 66 | --set datasources."datasources\.yaml".datasources[0].access=proxy \ 67 | --set datasources."datasources\.yaml".datasources[0].isDefault=true \ 68 | --wait \ 69 | --timeout=10m0s \ 70 | --debug 71 | 72 | echo "Grafana installation and configuration completed successfully." 73 | 74 | exit 0 75 | -------------------------------------------------------------------------------- /hack/install-prometheus.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf -o pipefail 4 | 5 | # Default Values 6 | DEFAULT_DATA_RETENTION_PERIOD="15d" 7 | 8 | # Configuration Variables (Environment Variables) 9 | DATA_RETENTION_PERIOD=${DATA_RETENTION_PERIOD:-$DEFAULT_DATA_RETENTION_PERIOD} 10 | NAMESPACE="monitoring" 11 | 12 | # Determine if worker nodes exist 13 | echo "Checking for worker nodes..." 14 | 15 | # Get all nodes 16 | ALL_NODES=$(kubectl get nodes --no-headers -o custom-columns=NAME:.metadata.name) 17 | 18 | # Get control-plane nodes (assuming 'control-plane' in the name) 19 | CONTROL_PLANE_NODES=$(echo "${ALL_NODES}" | grep "control-plane" || true) 20 | 21 | # Get worker nodes (nodes not containing 'control-plane' in the name) 22 | WORKER_NODES=$(echo "${ALL_NODES}" | grep -v "control-plane" || true) 23 | 24 | if [ -z "${WORKER_NODES}" ]; then 25 | echo "No worker nodes found. Untainting control-plane node(s) to allow scheduling of Prometheus pods..." 26 | for NODE in ${CONTROL_PLANE_NODES}; do 27 | echo "Untainting node: ${NODE}" 28 | kubectl taint nodes "${NODE}" node-role.kubernetes.io/control-plane:NoSchedule- || true 29 | done 30 | else 31 | echo "Worker nodes detected. Prometheus will be installed on them." 32 | fi 33 | 34 | # Add Helm repositories if not already added 35 | echo "Adding Helm repository for Prometheus..." 36 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts || true 37 | 38 | # Update Helm repositories 39 | echo "Updating Helm repositories..." 40 | helm repo update 41 | 42 | # Create a namespace for monitoring tools 43 | echo "Creating namespace '${NAMESPACE}'..." 44 | kubectl create namespace ${NAMESPACE} --dry-run=client -o yaml | kubectl apply -f - 45 | 46 | # Install or Upgrade Prometheus 47 | echo "Installing or upgrading Prometheus..." 48 | helm upgrade --install prometheus prometheus-community/prometheus \ 49 | --namespace ${NAMESPACE} \ 50 | --set server.persistentVolume.enabled=false \ 51 | --set server.retention="${DATA_RETENTION_PERIOD}" \ 52 | --wait \ 53 | --timeout=10m0s \ 54 | --debug 55 | 56 | echo "Prometheus installation and configuration completed successfully." 57 | 58 | exit 0 59 | -------------------------------------------------------------------------------- /hack/release-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | set -o pipefail 3 | # Generate manifests for deployment on Kubernetes 4 | 5 | # A tag name _must_ be supplied as the first argument 6 | TAG="${1}" 7 | if [[ -z "${TAG}" ]]; then 8 | echo "Tag name to release must be supplied as the first argument" 9 | echo "e.g. $ hack/release-yaml.sh v1.0.0" 10 | exit 1 11 | fi 12 | 13 | RELEASES="internal/driver/deploy/releases" 14 | TAGGED_RELEASE="linode-blockstorage-csi-driver-${TAG}.yaml" 15 | GENERIC_RELEASE="linode-blockstorage-csi-driver.yaml" 16 | 17 | $(dirname "$0")/generate-yaml.sh "$1" > "${RELEASES}/${TAGGED_RELEASE}" 18 | 19 | # Create generic manifest from tagged release manifest 20 | cp "${RELEASES}/${TAGGED_RELEASE}" "${RELEASES}/${GENERIC_RELEASE}" 21 | -------------------------------------------------------------------------------- /hack/setup-dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf -o pipefail 4 | 5 | # Default Values 6 | DEFAULT_NAMESPACE="monitoring" 7 | DEFAULT_DASHBOARD_FILE="observability/metrics/dashboard.json" 8 | DEFAULT_LB_FILE="observability/metrics/loadBalancer.yaml" 9 | 10 | # Function to display usage 11 | usage() { 12 | echo "Usage: $0 --namespace= --dashboard-file= --lb-file=" 13 | exit 1 14 | } 15 | 16 | # Parse command-line arguments 17 | for arg in "$@" 18 | do 19 | case $arg in 20 | --namespace=*) 21 | NAMESPACE="${arg#*=}" 22 | shift 23 | ;; 24 | --dashboard-file=*) 25 | DASHBOARD_FILE="${arg#*=}" 26 | shift 27 | ;; 28 | --lb-file=*) 29 | LB_FILE="${arg#*=}" 30 | shift 31 | ;; 32 | *) 33 | usage 34 | ;; 35 | esac 36 | done 37 | 38 | # Set default values if not provided 39 | NAMESPACE=${NAMESPACE:-$DEFAULT_NAMESPACE} 40 | DASHBOARD_FILE=${DASHBOARD_FILE:-$DEFAULT_DASHBOARD_FILE} 41 | LB_FILE=${LB_FILE:-$DEFAULT_LB_FILE} 42 | 43 | # Function to retrieve Grafana LoadBalancer External IP 44 | get_grafana_lb_ip() { 45 | kubectl get svc grafana-lb -n ${NAMESPACE} -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 46 | } 47 | 48 | # Ensure the namespace exists 49 | if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then 50 | echo "Namespace '${NAMESPACE}' does not exist. Creating..." 51 | kubectl create namespace "${NAMESPACE}" 52 | else 53 | echo "Namespace '${NAMESPACE}' already exists." 54 | fi 55 | 56 | # Validate that the dashboard file exists 57 | if [[ ! -f "${DASHBOARD_FILE}" ]]; then 58 | echo "Error: Dashboard file '${DASHBOARD_FILE}' does not exist." 59 | exit 1 60 | fi 61 | 62 | # Validate that the LoadBalancer YAML file exists 63 | if [[ ! -f "${LB_FILE}" ]]; then 64 | echo "Error: LoadBalancer file '${LB_FILE}' does not exist." 65 | exit 1 66 | fi 67 | 68 | # Delete the existing ConfigMap if it exists 69 | echo "Deleting existing Grafana dashboard ConfigMap if it exists..." 70 | kubectl delete configmap grafana-dashboard \ 71 | --namespace ${NAMESPACE} \ 72 | --ignore-not-found 73 | 74 | # Create or update the ConfigMap containing the dashboard JSON from the local file 75 | echo "Creating or updating Grafana dashboard ConfigMap from local file..." 76 | kubectl create configmap grafana-dashboard \ 77 | --from-file=dashboard.json=${DASHBOARD_FILE} \ 78 | --namespace ${NAMESPACE} \ 79 | --dry-run=client -o yaml | kubectl apply -f - 80 | 81 | # Add the label to the ConfigMap 82 | kubectl label configmap grafana-dashboard \ 83 | --namespace ${NAMESPACE} \ 84 | grafana_dashboard=1 --overwrite 85 | 86 | # Apply the LoadBalancer YAML file to create the LoadBalancer service 87 | echo "Applying LoadBalancer service from file..." 88 | kubectl apply -f ${LB_FILE} --namespace ${NAMESPACE} 89 | 90 | # Wait for the LoadBalancer to get an external IP 91 | echo "Waiting for LoadBalancer to get an external IP..." 92 | EXTERNAL_IP="" 93 | while [[ -z "$EXTERNAL_IP" ]]; do 94 | EXTERNAL_IP=$(get_grafana_lb_ip) 95 | if [[ -z "$EXTERNAL_IP" ]]; then 96 | echo "Waiting for LoadBalancer external IP..." 97 | sleep 10 98 | fi 99 | done 100 | 101 | # Output the Grafana dashboard access URL 102 | echo "------------------------------------------------------------" 103 | echo "Grafana Dashboard Setup Complete!" 104 | echo "Access Grafana using the following URL:" 105 | echo " - http://${EXTERNAL_IP}" 106 | echo "" 107 | echo "Grafana Admin Credentials:" 108 | echo " - Username: ${GRAFANA_USERNAME:-admin}" 109 | echo " - Password: ${GRAFANA_PASSWORD:-admin}" 110 | echo "------------------------------------------------------------" 111 | -------------------------------------------------------------------------------- /hack/setup-tracing.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euf -o pipefail 4 | 5 | # Default Values 6 | NAMESPACE="kube-system" 7 | RETRIES=5 8 | TRACING_FILES=("observability/tracing/otel-configmap.yaml" 9 | "observability/tracing/otel-deployment.yaml" 10 | "observability/tracing/otel-service.yaml" 11 | "observability/tracing/jager-deployment.yaml" 12 | "observability/tracing/jager-service.yaml") 13 | 14 | # Ensure namespace exists 15 | if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then 16 | echo "Namespace '${NAMESPACE}' does not exist. Creating..." 17 | kubectl create namespace "${NAMESPACE}" 18 | else 19 | echo "Namespace '${NAMESPACE}' already exists." 20 | fi 21 | 22 | # Apply each file 23 | echo "Applying tracing YAML files..." 24 | for file in "${TRACING_FILES[@]}"; do 25 | if [[ -f "$file" ]]; then 26 | echo "Applying $file..." 27 | kubectl apply -f "$file" --namespace ${NAMESPACE} 28 | else 29 | echo "Error: File $file not found. Exiting..." 30 | exit 1 31 | fi 32 | done 33 | 34 | # Retrieve and print the Jaeger LoadBalancer IP 35 | get_jaeger_lb_ip() { 36 | kubectl get svc jaeger-collector -n ${NAMESPACE} -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "" 37 | } 38 | 39 | echo "Waiting for Jaeger LoadBalancer to get an external IP..." 40 | EXTERNAL_IP="" 41 | attempt=0 42 | while [[ -z "$EXTERNAL_IP" && $attempt -lt $RETRIES ]]; do 43 | EXTERNAL_IP=$(get_jaeger_lb_ip) 44 | if [[ -z "$EXTERNAL_IP" ]]; then 45 | echo "Attempt $((attempt + 1))/$RETRIES: Waiting for LoadBalancer external IP..." 46 | attempt=$((attempt + 1)) 47 | sleep 10 48 | fi 49 | done 50 | 51 | if [[ -z "$EXTERNAL_IP" ]]; then 52 | echo "Error: Failed to retrieve Jaeger LoadBalancer external IP after $RETRIES attempts. Exiting..." 53 | exit 1 54 | fi 55 | 56 | echo "------------------------------------------------------------" 57 | echo "Jaeger Dashboard Setup Complete!" 58 | echo "Access Jaeger using the following URL:" 59 | echo " - http://${EXTERNAL_IP}:16686" 60 | echo "------------------------------------------------------------" 61 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: linode-blockstorage-csi-driver 3 | description: The Container Storage Interface (CSI) Driver for Linode Block Storage enables container orchestrators such as Kubernetes to manage the life-cycle of persistant storage claims. 4 | type: application 5 | version: 0.0.0 6 | home: https://github.com/linode/linode-blockstorage-csi-driver 7 | appVersion: "latest" 8 | maintainers: 9 | - name: linode 10 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "csi-driver.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "csi-driver.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "csi-driver.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "csi-driver.labels" -}} 37 | helm.sh/chart: {{ include "csi-driver.chart" . }} 38 | {{ include "csi-driver.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "csi-driver.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "csi-driver.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "csi-driver.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "csi-driver.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/csi-controller-attacher-binding-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: csi-controller-attacher-binding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: external-attacher-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: csi-controller-sa 12 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 13 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/csi-controller-provisioner-binding-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: csi-controller-provisioner-binding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: external-provisioner-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: csi-controller-sa 12 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 13 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/csi-controller-resizer-binding-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: csi-controller-resizer-binding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: external-resizer-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: csi-controller-sa 12 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 13 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/csi-controller-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: csi-controller-sa 5 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 6 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/csi-linode-controller-metrics.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.enableMetrics}} 2 | --- 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: csi-provisioner-metrics 7 | namespace: kube-system 8 | labels: 9 | app: csi-linode-controller 10 | service: csi-provisioner-metrics 11 | annotations: 12 | prometheus.io/scrape: 'true' 13 | prometheus.io/port: '10248' 14 | prometheus.io/path: '/metrics' 15 | spec: 16 | selector: 17 | app: csi-linode-controller 18 | ports: 19 | - name: provisioner-metrics-port 20 | port: 10248 21 | targetPort: 10248 22 | protocol: TCP 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: csi-attacher-metrics 28 | namespace: kube-system 29 | labels: 30 | app: csi-linode-controller 31 | service: csi-attacher-metrics 32 | annotations: 33 | prometheus.io/scrape: 'true' 34 | prometheus.io/port: '10249' 35 | prometheus.io/path: '/metrics' 36 | spec: 37 | selector: 38 | app: csi-linode-controller 39 | ports: 40 | - name: attacher-metrics-port 41 | port: 10249 42 | targetPort: 10249 43 | protocol: TCP 44 | --- 45 | apiVersion: v1 46 | kind: Service 47 | metadata: 48 | name: csi-resizer-metrics 49 | namespace: kube-system 50 | labels: 51 | app: csi-linode-controller 52 | service: csi-resizer-metrics 53 | annotations: 54 | prometheus.io/scrape: 'true' 55 | prometheus.io/port: '10250' 56 | prometheus.io/path: '/metrics' 57 | spec: 58 | selector: 59 | app: csi-linode-controller 60 | ports: 61 | - name: resizer-metrics-port 62 | port: 10250 63 | targetPort: 10250 64 | protocol: TCP 65 | --- 66 | apiVersion: v1 67 | kind: Service 68 | metadata: 69 | name: csi-node-server-metrics 70 | namespace: kube-system 71 | labels: 72 | app: csi-linode-node 73 | service: csi-node-server-metrics 74 | annotations: 75 | prometheus.io/scrape: 'true' 76 | prometheus.io/port: {{ .Values.metricsPort | quote }} 77 | prometheus.io/path: '/metrics' 78 | spec: 79 | selector: 80 | app: csi-linode-node 81 | ports: 82 | - name: node-server-metrics-port 83 | port: {{ .Values.metricsPort }} 84 | targetPort: {{ .Values.metricsPort }} 85 | protocol: TCP 86 | {{- end }} 87 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/csi-node-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: csi-node-sa 5 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 6 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/csi-secrets.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.secretRef }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: linode 6 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 7 | stringData: 8 | token: {{ required ".Values.apiToken required" .Values.apiToken }} 9 | region: {{ required ".Values.region required" .Values.region }} 10 | type: Opaque 11 | {{- end }} 12 | 13 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/external-attacher-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: external-attacher-role 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - persistentvolumes 10 | verbs: 11 | - get 12 | - list 13 | - watch 14 | - update 15 | - patch 16 | - apiGroups: 17 | - storage.k8s.io 18 | resources: 19 | - csinodes 20 | verbs: 21 | - get 22 | - list 23 | - watch 24 | - apiGroups: 25 | - storage.k8s.io 26 | resources: 27 | - volumeattachments 28 | - volumeattachments/status 29 | verbs: 30 | - get 31 | - list 32 | - watch 33 | - update 34 | - patch 35 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/external-provisioner-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: external-provisioner-role 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - persistentvolumes 10 | verbs: 11 | - get 12 | - list 13 | - watch 14 | - create 15 | - delete 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - persistentvolumeclaims 20 | verbs: 21 | - get 22 | - list 23 | - watch 24 | - update 25 | - apiGroups: 26 | - storage.k8s.io 27 | resources: 28 | - storageclasses 29 | verbs: 30 | - get 31 | - list 32 | - watch 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - events 37 | verbs: 38 | - list 39 | - watch 40 | - create 41 | - update 42 | - patch 43 | - apiGroups: 44 | - snapshot.storage.k8s.io 45 | resources: 46 | - volumesnapshots 47 | verbs: 48 | - get 49 | - list 50 | - apiGroups: 51 | - snapshot.storage.k8s.io 52 | resources: 53 | - volumesnapshotcontents 54 | verbs: 55 | - get 56 | - list 57 | - apiGroups: 58 | - storage.k8s.io 59 | resources: 60 | - csinodes 61 | verbs: 62 | - get 63 | - list 64 | - watch 65 | - apiGroups: 66 | - "" 67 | resources: 68 | - nodes 69 | verbs: 70 | - get 71 | - list 72 | - watch 73 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/external-resizer-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: external-resizer-role 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - persistentvolumes 10 | verbs: 11 | - get 12 | - list 13 | - watch 14 | - patch 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - persistentvolumeclaims 19 | verbs: 20 | - get 21 | - list 22 | - watch 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - pods 27 | verbs: 28 | - get 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - persistentvolumeclaims/status 35 | verbs: 36 | - patch 37 | - apiGroups: 38 | - "" 39 | resources: 40 | - events 41 | verbs: 42 | - list 43 | - watch 44 | - create 45 | - update 46 | - patch 47 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/linode-block-storage-retain.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: linode-block-storage-retain 5 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 6 | {{- if eq .Values.defaultStorageClass "linode-block-storage-retain" }} 7 | annotations: 8 | storageclass.kubernetes.io/is-default-class: "true" 9 | {{- end }} 10 | {{- if .Values.volumeTags }} 11 | parameters: 12 | linodebs.csi.linode.com/volumeTags: {{ join "," .Values.volumeTags }} 13 | {{- end}} 14 | allowVolumeExpansion: true 15 | provisioner: linodebs.csi.linode.com 16 | reclaimPolicy: Retain 17 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/linode-block-storage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: linode-block-storage 5 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 6 | {{- if eq .Values.defaultStorageClass "linode-block-storage" }} 7 | annotations: 8 | storageclass.kubernetes.io/is-default-class: "true" 9 | {{- end }} 10 | {{- if .Values.volumeTags }} 11 | parameters: 12 | linodebs.csi.linode.com/volumeTags: {{ join "," .Values.volumeTags }} 13 | {{- end}} 14 | allowVolumeExpansion: true 15 | provisioner: linodebs.csi.linode.com 16 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/linode-csi-binding-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: linode-csi-binding 5 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: linode-csi-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: csi-node-sa 13 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 14 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/linode-csi-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: linode-csi-role 5 | namespace: {{ required ".Values.namespace required" .Values.namespace }} 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - events 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - nodes 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/templates/linodebs.csi.linode.com.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: CSIDriver 3 | metadata: 4 | name: linodebs.csi.linode.com 5 | spec: 6 | attachRequired: true 7 | podInfoOnMount: true 8 | -------------------------------------------------------------------------------- /helm-chart/csi-driver/values.yaml: -------------------------------------------------------------------------------- 1 | # Secrets: 2 | # apiToken [Required if secretRef is not set] - Must be a Linode APIv4 Personal Access Token with all permissions. (https://cloud.linode.com/profile/tokens) 3 | apiToken: "" 4 | 5 | # region [Required if secretRef is not set] - Must be a Linode region. (https://api.linode.com/v4/regions) 6 | region: "" 7 | 8 | # enableMetrics: This variable must be set to true to get metrics 9 | enableMetrics: false 10 | 11 | # default metrics address port 12 | metricsPort: 8081 13 | 14 | # enableTracing: This variable must be set to true to get metrics 15 | enableTracing: false 16 | 17 | # default tracing address port 18 | tracingPort: 4318 19 | 20 | # (OPTIONAL) Label prefix for the Linode Block Storage volumes created by this driver. 21 | volumeLabelPrefix: "" 22 | 23 | # Default namespace is "kube-system" but it can be set to another namespace 24 | namespace: kube-system 25 | 26 | # Add a kubelet path variable to be used across containers for alternate k8s distros (e.g K0s, K3s) 27 | kubeletPath: "/var/lib/kubelet" 28 | 29 | # Set these values if your APIToken and region are already present in a k8s secret. 30 | # secretRef: 31 | # name: "linode" 32 | # apiTokenRef: "apiToken" 33 | # regionRef: "region" 34 | 35 | # Default storageClass is "linode-block-storage-retain" but it can be set to 36 | # "linode-block-storage" or left as an empty string 37 | defaultStorageClass: linode-block-storage-retain 38 | 39 | # set these value to a comma separated string if you'd like to add tags to the created volumes 40 | # volumeTags: 41 | # - example 42 | # - test 43 | 44 | # Images - Default 45 | csiProvisioner: 46 | image: registry.k8s.io/sig-storage/csi-provisioner 47 | tag: v5.1.0 48 | pullPolicy: IfNotPresent 49 | metrics: 50 | address: "0.0.0.0:10248" 51 | port: 10248 # Metrics port for csi-provisioner 52 | 53 | csiAttacher: 54 | image: registry.k8s.io/sig-storage/csi-attacher 55 | tag: v4.8.1 56 | pullPolicy: IfNotPresent 57 | metrics: 58 | address: "0.0.0.0:10249" 59 | port: 10249 # Metrics port for csi-attacher 60 | 61 | csiResizer: 62 | image: registry.k8s.io/sig-storage/csi-resizer 63 | tag: v1.12.0 64 | pullPolicy: IfNotPresent 65 | metrics: 66 | address: "0.0.0.0:10250" 67 | port: 10250 # Metrics port for csi-resizer 68 | 69 | csiLinodePlugin: 70 | image: linode/linode-blockstorage-csi-driver 71 | tag: # only set if required, defaults to .Chart.AppVersion set during release or "latest" by default 72 | pullPolicy: IfNotPresent 73 | podsMountDir: /var/lib/kubelet 74 | # This section adds the ability to pass environment variables to adjust CSI defaults 75 | env: 76 | # - name: EXAMPLE_ENV_VAR 77 | # value: "true" 78 | # This section adds the ability to pass volumes to the DaemonSet 79 | volumes: 80 | # - name: test-volume 81 | # emptyDir: 82 | # sizeLimit: 500Mi 83 | # this section adds the ability to pass volumeMounts to the container 84 | volumeMounts: 85 | # - mountPath: /tmp/ 86 | # name: test-volume 87 | # This section adds the ability to pass resources to the container 88 | resources: 89 | 90 | kubectl: 91 | image: alpine/k8s # This needs to be alpine based and have both kubectl and curl installed. 92 | tag: 1.25.14 93 | 94 | csiNodeDriverRegistrar: 95 | image: registry.k8s.io/sig-storage/csi-node-driver-registrar 96 | tag: v2.12.0 97 | # Additional environment variables for the node driver registrar container 98 | env: [] 99 | # Additional volume mounts for the node driver registrar container 100 | volumeMounts: [] 101 | 102 | controller: 103 | nodeSelector: {} 104 | affinity: {} 105 | tolerations: [] 106 | 107 | # Add custom annotations to all pods 108 | podAnnotations: {} 109 | 110 | # Add custom labels to all pods 111 | podLabels: {} 112 | -------------------------------------------------------------------------------- /internal/driver/capabilities.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import "github.com/container-storage-interface/spec/lib/go/csi" 4 | 5 | // ControllerServiceCapabilities returns the list of capabilities supported by 6 | // this driver's controller service. 7 | func ControllerServiceCapabilities() []*csi.ControllerServiceCapability { 8 | capabilities := []csi.ControllerServiceCapability_RPC_Type{ 9 | csi.ControllerServiceCapability_RPC_PUBLISH_READONLY, 10 | csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, 11 | csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, 12 | csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, 13 | csi.ControllerServiceCapability_RPC_CLONE_VOLUME, 14 | csi.ControllerServiceCapability_RPC_LIST_VOLUMES, 15 | csi.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES, 16 | csi.ControllerServiceCapability_RPC_VOLUME_CONDITION, 17 | csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER, 18 | csi.ControllerServiceCapability_RPC_GET_VOLUME, 19 | } 20 | 21 | cc := make([]*csi.ControllerServiceCapability, 0, len(capabilities)) 22 | for _, c := range capabilities { 23 | cc = append(cc, &csi.ControllerServiceCapability{ 24 | Type: &csi.ControllerServiceCapability_Rpc{ 25 | Rpc: &csi.ControllerServiceCapability_RPC{ 26 | Type: c, 27 | }, 28 | }, 29 | }) 30 | } 31 | return cc 32 | } 33 | 34 | // NodeServiceCapabilities returns the list of capabilities supported by this 35 | // driver's node service. 36 | func NodeServiceCapabilities() []*csi.NodeServiceCapability { 37 | capabilities := []csi.NodeServiceCapability_RPC_Type{ 38 | csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, 39 | csi.NodeServiceCapability_RPC_EXPAND_VOLUME, 40 | csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, 41 | csi.NodeServiceCapability_RPC_VOLUME_CONDITION, 42 | csi.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER, 43 | } 44 | 45 | cc := make([]*csi.NodeServiceCapability, 0, len(capabilities)) 46 | for _, c := range capabilities { 47 | cc = append(cc, &csi.NodeServiceCapability{ 48 | Type: &csi.NodeServiceCapability_Rpc{ 49 | Rpc: &csi.NodeServiceCapability_RPC{ 50 | Type: c, 51 | }, 52 | }, 53 | }) 54 | } 55 | return cc 56 | } 57 | 58 | // VolumeCapabilityAccessModes returns the allowed access modes for a volume 59 | // created by the driver. 60 | func VolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode { 61 | modes := []csi.VolumeCapability_AccessMode_Mode{ 62 | csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, 63 | csi.VolumeCapability_AccessMode_SINGLE_NODE_SINGLE_WRITER, 64 | csi.VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER, 65 | } 66 | 67 | mm := make([]*csi.VolumeCapability_AccessMode, 0, len(modes)) 68 | for _, m := range modes { 69 | mm = append(mm, &csi.VolumeCapability_AccessMode{ 70 | Mode: m, 71 | }) 72 | } 73 | return mm 74 | } 75 | -------------------------------------------------------------------------------- /internal/driver/driver_test.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "testing" 8 | 9 | "go.uber.org/mock/gomock" 10 | "k8s.io/mount-utils" 11 | 12 | "github.com/linode/linode-blockstorage-csi-driver/mocks" 13 | linodeclient "github.com/linode/linode-blockstorage-csi-driver/pkg/linode-client" 14 | mountmanager "github.com/linode/linode-blockstorage-csi-driver/pkg/mount-manager" 15 | ) 16 | 17 | var ( 18 | driver = "linodebs.csi.linode.com" 19 | vendorVersion = "test-vendor" 20 | ) 21 | 22 | func TestDriverSuite(t *testing.T) { 23 | socket := "/tmp/csi.sock" 24 | endpoint := "unix://" + socket 25 | if err := os.Remove(socket); err != nil && !os.IsNotExist(err) { 26 | t.Fatalf("failed to remove unix domain socket file %s, error: %s", socket, err) 27 | } 28 | 29 | bsPrefix := "test-" 30 | 31 | mockCtrl := gomock.NewController(t) 32 | defer mockCtrl.Finish() 33 | 34 | mounter := &mountmanager.SafeFormatAndMount{ 35 | SafeFormatAndMount: &mount.SafeFormatAndMount{ 36 | Interface: mocks.NewMockMounter(mockCtrl), 37 | Exec: mocks.NewMockExecutor(mockCtrl), 38 | }, 39 | } 40 | deviceUtils := mocks.NewMockDeviceUtils(mockCtrl) 41 | fileSystem := mocks.NewMockFileSystem(mockCtrl) 42 | cryptSetup := mocks.NewMockCryptSetupClient(mockCtrl) 43 | encrypt := NewLuksEncryption(mounter.Exec, fileSystem, cryptSetup) 44 | resizeFs := mocks.NewMockResizeFSer(mockCtrl) 45 | 46 | fakeCloudProvider, err := linodeclient.NewLinodeClient("dummy", fmt.Sprintf("LinodeCSI/%s", vendorVersion), "") 47 | if err != nil { 48 | t.Fatalf("Failed to setup Linode client: %s", err) 49 | } 50 | 51 | // TODO fake metadata 52 | md := Metadata{ 53 | ID: 123, 54 | Label: "linode123", 55 | Region: "us-east", 56 | Memory: 4 << 30, // 4GiB 57 | } 58 | linodeDriver := GetLinodeDriver(context.Background()) 59 | // variables that are picked up from the environment 60 | enableMetrics := "true" 61 | metricsPort := "10251" 62 | enableTracing := "true" 63 | tracingPort := "4318" 64 | if err := linodeDriver.SetupLinodeDriver(context.Background(), fakeCloudProvider, mounter, deviceUtils, resizeFs, md, driver, vendorVersion, bsPrefix, encrypt, enableMetrics, metricsPort, enableTracing, tracingPort); err != nil { 65 | t.Fatalf("Failed to setup Linode Driver: %v", err) 66 | } 67 | 68 | go linodeDriver.Run(context.Background(), endpoint) 69 | 70 | // TODO: fix sanity checks for e2e, disable for ci 71 | // cfg := sanity.NewTestConfig() 72 | // cfg.Address = endpoint 73 | // sanity.Test(t, cfg) 74 | } 75 | -------------------------------------------------------------------------------- /internal/driver/examples/kubernetes/csi-app-block.yaml: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | name: csi-block-example-pod 5 | spec: 6 | containers: 7 | - name: csi-block-example-container 8 | image: busybox 9 | volumeMounts: 10 | volumeDevices: 11 | - name: csi-block-example-volume 12 | devicePath: /dev/linode/csi-block-example-dev 13 | command: [ "/bin/sh", "-c", "stat /dev/linode/csi-block-example-dev && sleep 1000000" ] 14 | volumes: 15 | - name: csi-block-example-volume 16 | persistentVolumeClaim: 17 | claimName: csi-block-example-pvc 18 | -------------------------------------------------------------------------------- /internal/driver/examples/kubernetes/csi-app.yaml: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | name: csi-example-pod 5 | spec: 6 | containers: 7 | - name: csi-example-container 8 | image: busybox 9 | volumeMounts: 10 | - mountPath: "/data" 11 | name: csi-example-volume 12 | command: ["sleep", "1000000"] 13 | tolerations: 14 | - effect: NoSchedule 15 | key: node-role.kubernetes.io/control-plane 16 | operator: Exists 17 | volumes: 18 | - name: csi-example-volume 19 | persistentVolumeClaim: 20 | claimName: csi-example-pvc 21 | -------------------------------------------------------------------------------- /internal/driver/examples/kubernetes/csi-linode-blockstorage-encrypted.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: csi-example-encryption-pod 5 | spec: 6 | containers: 7 | - name: csi-example-encryption-pod 8 | image: ubuntu 9 | command: 10 | - sleep 11 | - "1000000" 12 | volumeMounts: 13 | - mountPath: /data 14 | name: csi-volume 15 | tolerations: 16 | - key: "node-role.kubernetes.io/control-plane" 17 | operator: "Exists" 18 | effect: "NoSchedule" 19 | volumes: 20 | - name: csi-volume 21 | persistentVolumeClaim: 22 | claimName: pvc-encrypted-example 23 | --- 24 | apiVersion: storage.k8s.io/v1 25 | kind: StorageClass 26 | metadata: 27 | name: linode-block-storage-encrypted 28 | namespace: kube-system 29 | parameters: 30 | linodebs.csi.linode.com/encrypted: "true" 31 | allowVolumeExpansion: true 32 | provisioner: linodebs.csi.linode.com 33 | --- 34 | apiVersion: v1 35 | kind: PersistentVolumeClaim 36 | metadata: 37 | name: pvc-encrypted-example 38 | spec: 39 | accessModes: 40 | - ReadWriteOnce 41 | resources: 42 | requests: 43 | storage: 10Gi 44 | storageClassName: linode-block-storage-encrypted 45 | -------------------------------------------------------------------------------- /internal/driver/examples/kubernetes/csi-pvc-block.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: csi-block-example-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | volumeMode: Block 9 | storageClassName: linode-block-storage-retain 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | -------------------------------------------------------------------------------- /internal/driver/examples/kubernetes/csi-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: csi-example-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 10Gi 11 | storageClassName: linode-block-storage-retain 12 | -------------------------------------------------------------------------------- /internal/driver/examples/kubernetes/luks-enabled-vol.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Pod 3 | apiVersion: v1 4 | metadata: 5 | name: csi-example-pod-luks 6 | spec: 7 | containers: 8 | - name: csi-example-container 9 | image: busybox 10 | volumeMounts: 11 | - mountPath: "/data" 12 | name: csi-example-volume-luks 13 | command: ["sleep", "1000000"] 14 | tolerations: 15 | - effect: NoSchedule 16 | key: node-role.kubernetes.io/control-plane 17 | operator: Exists 18 | volumes: 19 | - name: csi-example-volume-luks 20 | persistentVolumeClaim: 21 | claimName: csi-example-pvc-luks 22 | --- 23 | allowVolumeExpansion: true 24 | apiVersion: storage.k8s.io/v1 25 | kind: StorageClass 26 | metadata: 27 | annotations: 28 | storageclass.kubernetes.io/is-default-class: "true" 29 | name: linode-block-storage-luks-luks 30 | namespace: kube-system 31 | provisioner: linodebs.csi.linode.com 32 | reclaimPolicy: Delete 33 | parameters: 34 | linodebs.csi.linode.com/luks-encrypted: "true" 35 | linodebs.csi.linode.com/luks-cipher: "aes-xts-plain64" 36 | linodebs.csi.linode.com/luks-key-size: "512" 37 | csi.storage.k8s.io/node-stage-secret-namespace: default 38 | csi.storage.k8s.io/node-stage-secret-name: csi-encrypt-example-luks-key 39 | linodebs.csi.linode.com/volumeTags: default 40 | --- 41 | apiVersion: v1 42 | kind: Secret 43 | metadata: 44 | name: csi-encrypt-example-luks-key 45 | stringData: 46 | luksKey: klOMgxvP7v2lJ1LXjQQ4oMQ0n/axuGAjT311C6qbJO/Z7D6dIYYlz/qFn7+AF0oZm5Y2qJos+R/kZaEk/MA9vw== 47 | --- 48 | apiVersion: v1 49 | kind: PersistentVolumeClaim 50 | metadata: 51 | name: csi-example-pvc-luks 52 | spec: 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 10Gi 58 | storageClassName: linode-block-storage-luks-luks 59 | -------------------------------------------------------------------------------- /internal/driver/examples/kubernetes/topology-aware.yaml: -------------------------------------------------------------------------------- 1 | # This StorageClass is for topology-aware provisioning based on the pod region 2 | allowVolumeExpansion: true 3 | apiVersion: storage.k8s.io/v1 4 | kind: StorageClass 5 | metadata: 6 | name: linode-block-storage-wait-for-consumer 7 | provisioner: linodebs.csi.linode.com 8 | reclaimPolicy: Delete 9 | volumeBindingMode: WaitForFirstConsumer 10 | --- 11 | apiVersion: v1 12 | kind: PersistentVolumeClaim 13 | metadata: 14 | name: pvc-filesystem 15 | spec: 16 | accessModes: 17 | - ReadWriteOnce 18 | resources: 19 | requests: 20 | storage: 10Gi 21 | storageClassName: linode-block-storage-wait-for-consumer 22 | --- 23 | apiVersion: v1 24 | kind: Pod 25 | metadata: 26 | name: pod-topology-aware 27 | spec: 28 | nodeSelector: 29 | topology.linode.com/region: us-ord 30 | tolerations: 31 | - key: "node-role.kubernetes.io/control-plane" 32 | operator: "Exists" 33 | effect: "NoSchedule" 34 | containers: 35 | - name: pod-topology-aware 36 | image: ubuntu 37 | command: 38 | - sleep 39 | - "1000000" 40 | volumeMounts: 41 | - mountPath: /data 42 | name: csi-volume 43 | volumes: 44 | - name: csi-volume 45 | persistentVolumeClaim: 46 | claimName: pvc-filesystem 47 | -------------------------------------------------------------------------------- /internal/driver/lifecycle.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | // VolumeLifecycle is a type used to indicate the phase a volume is at when 4 | // it is published and/or staged to a node. 5 | type VolumeLifecycle string 6 | 7 | const ( 8 | VolumeLifecycleNodeStageVolume VolumeLifecycle = "NodeStageVolume" 9 | VolumeLifecycleNodePublishVolume VolumeLifecycle = "NodePublishVolume" 10 | VolumeLifecycleNodeUnstageVolume VolumeLifecycle = "NodeUnstageVolume" 11 | VolumeLifecycleNodeUnpublishVolume VolumeLifecycle = "NodeUnpublishVolume" 12 | ) 13 | -------------------------------------------------------------------------------- /internal/driver/limits.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/linode/linode-blockstorage-csi-driver/pkg/hwinfo" 7 | ) 8 | 9 | // maxVolumeAttachments returns the maximum number of block storage volumes 10 | // that can be attached to a Linode instance, given the amount of memory the 11 | // instance has. 12 | // 13 | // TODO: This code should be cleaned up to use the built-in max and min 14 | // functions once the project is updated to Go 1.21. See 15 | // https://go.dev/ref/spec#Min_and_max. 16 | func maxVolumeAttachments(memoryBytes uint) int { 17 | attachments := memoryBytes >> 30 18 | if attachments > maxAttachments { 19 | return maxAttachments 20 | } 21 | if attachments < maxPersistentAttachments { 22 | return maxPersistentAttachments 23 | } 24 | return int(attachments) 25 | } 26 | 27 | const ( 28 | // maxPersistentAttachments is the default number of volume attachments 29 | // allowed when they are persisted to an instance/boot config. This is 30 | // also the maximum number of allowed volume attachments when the 31 | // instance type has < 16GiB of RAM. 32 | maxPersistentAttachments = 8 33 | 34 | // maxAttachments it the hard limit of volumes that can be attached to 35 | // a single Linode instance. 36 | maxAttachments = 64 37 | ) 38 | 39 | func attachedVolumeCount(hw hwinfo.HardwareInfo) (int, error) { 40 | bdev, err := hw.Block() 41 | if err != nil { 42 | return 0, err 43 | } 44 | count := 0 45 | for _, disk := range bdev.Disks { 46 | driveType := strings.ToLower(disk.DriveType.String()) 47 | controllerType := strings.ToLower(disk.StorageController.String()) 48 | if driveType == "virtual" || driveType == "cdrom" || controllerType == "loop" || controllerType == "unknown" { 49 | continue 50 | } 51 | count++ 52 | } 53 | return count, nil 54 | } 55 | -------------------------------------------------------------------------------- /internal/driver/nodeserver_all.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | 3 | package driver 4 | 5 | import ( 6 | "context" 7 | "errors" 8 | "fmt" 9 | 10 | "github.com/container-storage-interface/spec/lib/go/csi" 11 | "golang.org/x/sys/unix" 12 | "google.golang.org/grpc/codes" 13 | "google.golang.org/grpc/status" 14 | 15 | "github.com/linode/linode-blockstorage-csi-driver/pkg/logger" 16 | ) 17 | 18 | // unixStatfs is used to mock the unix.Statfs function. 19 | var unixStatfs = unix.Statfs 20 | 21 | func nodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { 22 | log, _ := logger.GetLogger(ctx) 23 | 24 | if req.GetVolumeId() == "" || req.GetVolumePath() == "" { 25 | return nil, status.Error(codes.InvalidArgument, "volume ID or path empty") 26 | } 27 | 28 | var statfs unix.Statfs_t 29 | // See http://man7.org/linux/man-pages/man2/statfs.2.html for details. 30 | err := unixStatfs(req.GetVolumePath(), &statfs) 31 | switch { 32 | case errors.Is(err, unix.EIO): 33 | // EIO is returned when the filesystem is not mounted. 34 | return &csi.NodeGetVolumeStatsResponse{ 35 | VolumeCondition: &csi.VolumeCondition{ 36 | Abnormal: true, 37 | Message: fmt.Sprintf("failed to get stats: %v", err.Error()), 38 | }, 39 | }, nil 40 | case errors.Is(err, unix.ENOENT): 41 | // ENOENT is returned when the volume path does not exist. 42 | return nil, errNotFound("volume path not found: %v", err.Error()) 43 | case err != nil: 44 | // Any other error is considered an internal error. 45 | return nil, errInternal("failed to get stats: %v", err.Error()) 46 | } 47 | 48 | response := &csi.NodeGetVolumeStatsResponse{ 49 | Usage: []*csi.VolumeUsage{ 50 | { 51 | Available: int64(statfs.Bavail) * int64(statfs.Bsize), //nolint:unconvert // probably false positive because uint32 and int64 dont match 52 | Total: int64(statfs.Blocks) * int64(statfs.Bsize), //nolint:unconvert // probably false positive because uint32 and int64 dont match 53 | Used: int64(statfs.Blocks-statfs.Bfree) * int64(statfs.Bsize), //nolint:unconvert // probably false positive because uint32 and int64 dont match 54 | Unit: csi.VolumeUsage_BYTES, 55 | }, 56 | { 57 | Available: int64(statfs.Ffree), 58 | Total: int64(statfs.Files), 59 | Used: int64(statfs.Files) - int64(statfs.Ffree), 60 | Unit: csi.VolumeUsage_INODES, 61 | }, 62 | }, 63 | VolumeCondition: &csi.VolumeCondition{ 64 | Abnormal: false, 65 | Message: "healthy", 66 | }, 67 | } 68 | 69 | log.V(2).Info("Successfully retrieved volume stats", "volumeID", req.GetVolumeId(), "volumePath", req.GetVolumePath(), "response", response) 70 | return response, nil 71 | } 72 | -------------------------------------------------------------------------------- /internal/driver/nodeserver_all_test.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/container-storage-interface/spec/lib/go/csi" 9 | "github.com/golang/mock/gomock" 10 | "github.com/stretchr/testify/assert" 11 | "github.com/stretchr/testify/require" 12 | "golang.org/x/sys/unix" 13 | "google.golang.org/grpc/codes" 14 | "google.golang.org/grpc/status" 15 | ) 16 | 17 | func TestNodeGetVolumeStats(t *testing.T) { 18 | ctrl := gomock.NewController(t) 19 | defer ctrl.Finish() 20 | 21 | mockStatfs := func(path string, stat *unix.Statfs_t) error { 22 | switch path { 23 | case "/valid/path": 24 | stat.Blocks = 1000 25 | stat.Bfree = 200 26 | stat.Bavail = 150 27 | stat.Files = 500 28 | stat.Ffree = 100 29 | stat.Bsize = 4096 30 | return nil 31 | case "/not/mounted": 32 | return unix.EIO 33 | case "/not/exist": 34 | return unix.ENOENT 35 | default: 36 | return errors.New("internal error") 37 | } 38 | } 39 | 40 | unixStatfs = mockStatfs 41 | 42 | testCases := []struct { 43 | name string 44 | volumeID string 45 | volumePath string 46 | expectedErr error 47 | expectedRes *csi.NodeGetVolumeStatsResponse 48 | }{ 49 | { 50 | name: "Valid request with healthy volume", 51 | volumeID: "valid-volume", 52 | volumePath: "/valid/path", 53 | expectedErr: nil, 54 | expectedRes: &csi.NodeGetVolumeStatsResponse{ 55 | Usage: []*csi.VolumeUsage{ 56 | { 57 | Available: 150 * 4096, 58 | Total: 1000 * 4096, 59 | Used: (1000 - 200) * 4096, 60 | Unit: csi.VolumeUsage_BYTES, 61 | }, 62 | { 63 | Available: 100, 64 | Total: 500, 65 | Used: 500 - 100, 66 | Unit: csi.VolumeUsage_INODES, 67 | }, 68 | }, 69 | VolumeCondition: &csi.VolumeCondition{ 70 | Abnormal: false, 71 | Message: "healthy", 72 | }, 73 | }, 74 | }, 75 | { 76 | name: "Request with empty volume ID or path", 77 | volumeID: "", 78 | volumePath: "", 79 | expectedErr: status.Error(codes.InvalidArgument, "volume ID or path empty"), 80 | expectedRes: nil, 81 | }, 82 | { 83 | name: "Filesystem not mounted", 84 | volumeID: "not-mounted-volume", 85 | volumePath: "/not/mounted", 86 | expectedErr: nil, 87 | expectedRes: &csi.NodeGetVolumeStatsResponse{ 88 | VolumeCondition: &csi.VolumeCondition{ 89 | Abnormal: true, 90 | Message: "failed to get stats: input/output error", 91 | }, 92 | }, 93 | }, 94 | { 95 | name: "Volume path does not exist", 96 | volumeID: "non-existent-volume", 97 | volumePath: "/not/exist", 98 | expectedErr: status.Errorf(codes.NotFound, "volume path not found: no such file or directory"), 99 | expectedRes: nil, 100 | }, 101 | { 102 | name: "Internal error during Statfs call", 103 | volumeID: "internal-error-volume", 104 | volumePath: "/internal/error", 105 | expectedErr: status.Errorf(codes.Internal, "failed to get stats: internal error"), 106 | expectedRes: nil, 107 | }, 108 | } 109 | 110 | for _, tc := range testCases { 111 | t.Run(tc.name, func(t *testing.T) { 112 | ctx := context.Background() 113 | req := &csi.NodeGetVolumeStatsRequest{ 114 | VolumeId: tc.volumeID, 115 | VolumePath: tc.volumePath, 116 | } 117 | 118 | resp, err := nodeGetVolumeStats(ctx, req) 119 | 120 | if tc.expectedErr != nil { 121 | require.EqualError(t, err, tc.expectedErr.Error()) 122 | } else { 123 | require.NoError(t, err) 124 | } 125 | 126 | assert.Equal(t, tc.expectedRes, resp) 127 | }) 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /internal/driver/nodeserver_windows.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/container-storage-interface/spec/lib/go/csi" 8 | "google.golang.org/grpc/codes" 9 | "google.golang.org/grpc/status" 10 | ) 11 | 12 | func nodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { 13 | return nil, status.Error(codes.Unimplemented, fmt.Sprintf("NodeGetVolumeStats is not yet implemented on Windows")) 14 | } 15 | -------------------------------------------------------------------------------- /mocks/mock_device.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: pkg/device-manager/device.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -source=pkg/device-manager/device.go -destination=mocks/mock_device.go -package=mocks 7 | // 8 | 9 | // Package mocks is a generated GoMock package. 10 | package mocks 11 | 12 | import ( 13 | reflect "reflect" 14 | 15 | gomock "go.uber.org/mock/gomock" 16 | ) 17 | 18 | // MockDeviceUtils is a mock of DeviceUtils interface. 19 | type MockDeviceUtils struct { 20 | ctrl *gomock.Controller 21 | recorder *MockDeviceUtilsMockRecorder 22 | } 23 | 24 | // MockDeviceUtilsMockRecorder is the mock recorder for MockDeviceUtils. 25 | type MockDeviceUtilsMockRecorder struct { 26 | mock *MockDeviceUtils 27 | } 28 | 29 | // NewMockDeviceUtils creates a new mock instance. 30 | func NewMockDeviceUtils(ctrl *gomock.Controller) *MockDeviceUtils { 31 | mock := &MockDeviceUtils{ctrl: ctrl} 32 | mock.recorder = &MockDeviceUtilsMockRecorder{mock} 33 | return mock 34 | } 35 | 36 | // EXPECT returns an object that allows the caller to indicate expected use. 37 | func (m *MockDeviceUtils) EXPECT() *MockDeviceUtilsMockRecorder { 38 | return m.recorder 39 | } 40 | 41 | // GetDiskByIdPaths mocks base method. 42 | func (m *MockDeviceUtils) GetDiskByIdPaths(deviceName, partition string) []string { 43 | m.ctrl.T.Helper() 44 | ret := m.ctrl.Call(m, "GetDiskByIdPaths", deviceName, partition) 45 | ret0, _ := ret[0].([]string) 46 | return ret0 47 | } 48 | 49 | // GetDiskByIdPaths indicates an expected call of GetDiskByIdPaths. 50 | func (mr *MockDeviceUtilsMockRecorder) GetDiskByIdPaths(deviceName, partition any) *gomock.Call { 51 | mr.mock.ctrl.T.Helper() 52 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDiskByIdPaths", reflect.TypeOf((*MockDeviceUtils)(nil).GetDiskByIdPaths), deviceName, partition) 53 | } 54 | 55 | // VerifyDevicePath mocks base method. 56 | func (m *MockDeviceUtils) VerifyDevicePath(devicePaths []string) (string, error) { 57 | m.ctrl.T.Helper() 58 | ret := m.ctrl.Call(m, "VerifyDevicePath", devicePaths) 59 | ret0, _ := ret[0].(string) 60 | ret1, _ := ret[1].(error) 61 | return ret0, ret1 62 | } 63 | 64 | // VerifyDevicePath indicates an expected call of VerifyDevicePath. 65 | func (mr *MockDeviceUtilsMockRecorder) VerifyDevicePath(devicePaths any) *gomock.Call { 66 | mr.mock.ctrl.T.Helper() 67 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyDevicePath", reflect.TypeOf((*MockDeviceUtils)(nil).VerifyDevicePath), devicePaths) 68 | } 69 | -------------------------------------------------------------------------------- /mocks/mock_hwinfo.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: pkg/hwinfo/hwinfo.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -source=pkg/hwinfo/hwinfo.go -destination=mocks/mock_hwinfo.go -package=mocks 7 | // 8 | 9 | // Package mocks is a generated GoMock package. 10 | package mocks 11 | 12 | import ( 13 | reflect "reflect" 14 | 15 | ghw "github.com/jaypipes/ghw" 16 | gomock "go.uber.org/mock/gomock" 17 | ) 18 | 19 | // MockHardwareInfo is a mock of HardwareInfo interface. 20 | type MockHardwareInfo struct { 21 | ctrl *gomock.Controller 22 | recorder *MockHardwareInfoMockRecorder 23 | isgomock struct{} 24 | } 25 | 26 | // MockHardwareInfoMockRecorder is the mock recorder for MockHardwareInfo. 27 | type MockHardwareInfoMockRecorder struct { 28 | mock *MockHardwareInfo 29 | } 30 | 31 | // NewMockHardwareInfo creates a new mock instance. 32 | func NewMockHardwareInfo(ctrl *gomock.Controller) *MockHardwareInfo { 33 | mock := &MockHardwareInfo{ctrl: ctrl} 34 | mock.recorder = &MockHardwareInfoMockRecorder{mock} 35 | return mock 36 | } 37 | 38 | // EXPECT returns an object that allows the caller to indicate expected use. 39 | func (m *MockHardwareInfo) EXPECT() *MockHardwareInfoMockRecorder { 40 | return m.recorder 41 | } 42 | 43 | // Block mocks base method. 44 | func (m *MockHardwareInfo) Block() (*ghw.BlockInfo, error) { 45 | m.ctrl.T.Helper() 46 | ret := m.ctrl.Call(m, "Block") 47 | ret0, _ := ret[0].(*ghw.BlockInfo) 48 | ret1, _ := ret[1].(error) 49 | return ret0, ret1 50 | } 51 | 52 | // Block indicates an expected call of Block. 53 | func (mr *MockHardwareInfoMockRecorder) Block() *gomock.Call { 54 | mr.mock.ctrl.T.Helper() 55 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Block", reflect.TypeOf((*MockHardwareInfo)(nil).Block)) 56 | } 57 | -------------------------------------------------------------------------------- /mocks/mock_metadata.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: internal/driver/metadata.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -source=internal/driver/metadata.go -destination=mocks/mock_metadata.go -package=mocks 7 | // 8 | 9 | // Package mocks is a generated GoMock package. 10 | package mocks 11 | 12 | import ( 13 | context "context" 14 | reflect "reflect" 15 | 16 | metadata "github.com/linode/go-metadata" 17 | gomock "go.uber.org/mock/gomock" 18 | v1 "k8s.io/api/core/v1" 19 | ) 20 | 21 | // MockMetadataClient is a mock of MetadataClient interface. 22 | type MockMetadataClient struct { 23 | ctrl *gomock.Controller 24 | recorder *MockMetadataClientMockRecorder 25 | } 26 | 27 | // MockMetadataClientMockRecorder is the mock recorder for MockMetadataClient. 28 | type MockMetadataClientMockRecorder struct { 29 | mock *MockMetadataClient 30 | } 31 | 32 | // NewMockMetadataClient creates a new mock instance. 33 | func NewMockMetadataClient(ctrl *gomock.Controller) *MockMetadataClient { 34 | mock := &MockMetadataClient{ctrl: ctrl} 35 | mock.recorder = &MockMetadataClientMockRecorder{mock} 36 | return mock 37 | } 38 | 39 | // EXPECT returns an object that allows the caller to indicate expected use. 40 | func (m *MockMetadataClient) EXPECT() *MockMetadataClientMockRecorder { 41 | return m.recorder 42 | } 43 | 44 | // GetInstance mocks base method. 45 | func (m *MockMetadataClient) GetInstance(ctx context.Context) (*metadata.InstanceData, error) { 46 | m.ctrl.T.Helper() 47 | ret := m.ctrl.Call(m, "GetInstance", ctx) 48 | ret0, _ := ret[0].(*metadata.InstanceData) 49 | ret1, _ := ret[1].(error) 50 | return ret0, ret1 51 | } 52 | 53 | // GetInstance indicates an expected call of GetInstance. 54 | func (mr *MockMetadataClientMockRecorder) GetInstance(ctx any) *gomock.Call { 55 | mr.mock.ctrl.T.Helper() 56 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInstance", reflect.TypeOf((*MockMetadataClient)(nil).GetInstance), ctx) 57 | } 58 | 59 | // MockKubeClient is a mock of KubeClient interface. 60 | type MockKubeClient struct { 61 | ctrl *gomock.Controller 62 | recorder *MockKubeClientMockRecorder 63 | } 64 | 65 | // MockKubeClientMockRecorder is the mock recorder for MockKubeClient. 66 | type MockKubeClientMockRecorder struct { 67 | mock *MockKubeClient 68 | } 69 | 70 | // NewMockKubeClient creates a new mock instance. 71 | func NewMockKubeClient(ctrl *gomock.Controller) *MockKubeClient { 72 | mock := &MockKubeClient{ctrl: ctrl} 73 | mock.recorder = &MockKubeClientMockRecorder{mock} 74 | return mock 75 | } 76 | 77 | // EXPECT returns an object that allows the caller to indicate expected use. 78 | func (m *MockKubeClient) EXPECT() *MockKubeClientMockRecorder { 79 | return m.recorder 80 | } 81 | 82 | // GetNode mocks base method. 83 | func (m *MockKubeClient) GetNode(ctx context.Context, name string) (*v1.Node, error) { 84 | m.ctrl.T.Helper() 85 | ret := m.ctrl.Call(m, "GetNode", ctx, name) 86 | ret0, _ := ret[0].(*v1.Node) 87 | ret1, _ := ret[1].(error) 88 | return ret0, ret1 89 | } 90 | 91 | // GetNode indicates an expected call of GetNode. 92 | func (mr *MockKubeClientMockRecorder) GetNode(ctx, name any) *gomock.Call { 93 | mr.mock.ctrl.T.Helper() 94 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNode", reflect.TypeOf((*MockKubeClient)(nil).GetNode), ctx, name) 95 | } 96 | -------------------------------------------------------------------------------- /observability/metrics/loadBalancer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana-lb 5 | namespace: monitoring 6 | labels: 7 | app: grafana 8 | spec: 9 | type: LoadBalancer 10 | selector: 11 | app.kubernetes.io/instance: grafana 12 | app.kubernetes.io/name: grafana 13 | ports: 14 | - port: 80 # The port to expose externally on the NodeBalancer 15 | targetPort: 3000 # The Grafana target port 16 | protocol: TCP 17 | name: http 18 | -------------------------------------------------------------------------------- /observability/tracing/jager-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: jaeger 5 | namespace: kube-system 6 | labels: 7 | app: jaeger 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: jaeger 13 | template: 14 | metadata: 15 | labels: 16 | app: jaeger 17 | spec: 18 | containers: 19 | - name: jaeger 20 | image: jaegertracing/all-in-one:latest 21 | args: 22 | - "--collector.zipkin.host-port=:9411" # Enable Zipkin API on port 9411 23 | ports: 24 | - containerPort: 16686 # Jaeger UI 25 | - containerPort: 14268 # HTTP ingestion 26 | - containerPort: 14250 # gRPC ingestion 27 | - containerPort: 9411 # Zipkin API 28 | -------------------------------------------------------------------------------- /observability/tracing/jager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: jaeger-collector 5 | namespace: kube-system 6 | labels: 7 | app: jaeger 8 | spec: 9 | type: LoadBalancer 10 | selector: 11 | app: jaeger 12 | ports: 13 | - name: jaeger-ui 14 | port: 16686 15 | targetPort: 16686 16 | - name: jaeger-collector-http 17 | port: 14268 18 | targetPort: 14268 19 | - name: jaeger-collector-grpc 20 | port: 14250 21 | targetPort: 14250 22 | - name: zipkin 23 | port: 9411 24 | targetPort: 9411 25 | -------------------------------------------------------------------------------- /observability/tracing/otel-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: otel-collector-config 5 | namespace: kube-system 6 | data: 7 | otel-collector-config.yaml: | 8 | receivers: 9 | otlp: 10 | protocols: 11 | http: 12 | endpoint: "0.0.0.0:4318" 13 | 14 | processors: 15 | batch: 16 | 17 | exporters: 18 | otlphttp: 19 | endpoint: "http://localhost:4318" 20 | 21 | debug: 22 | verbosity: detailed 23 | 24 | zipkin: 25 | endpoint: "http://jaeger-collector.kube-system.svc.cluster.local:9411/api/v2/spans" # Zipkin-compatible endpoint 26 | tls: 27 | insecure: true 28 | service: 29 | pipelines: 30 | traces: 31 | receivers: [otlp] 32 | processors: [batch] 33 | exporters: [otlphttp, debug, zipkin] 34 | -------------------------------------------------------------------------------- /observability/tracing/otel-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: otel-collector 5 | namespace: kube-system 6 | labels: 7 | app: otel-collector 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: otel-collector 13 | template: 14 | metadata: 15 | labels: 16 | app: otel-collector 17 | spec: 18 | containers: 19 | - name: otel-collector 20 | image: otel/opentelemetry-collector-contrib:latest 21 | args: ["--config=/etc/otel-collector-config.yaml"] 22 | ports: 23 | - containerPort: 4318 # OTLP HTTP 24 | volumeMounts: 25 | - name: otel-collector-config-volume 26 | mountPath: /etc/otel-collector-config.yaml 27 | subPath: otel-collector-config.yaml 28 | volumes: 29 | - name: otel-collector-config-volume 30 | configMap: 31 | name: otel-collector-config 32 | -------------------------------------------------------------------------------- /observability/tracing/otel-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: otel-collector 5 | namespace: kube-system 6 | labels: 7 | app: otel-collector 8 | spec: 9 | ports: 10 | - name: otlp-http 11 | port: 4318 12 | targetPort: 4318 13 | selector: 14 | app: otel-collector 15 | -------------------------------------------------------------------------------- /pkg/cryptsetup-client/cryptsetup_client.go: -------------------------------------------------------------------------------- 1 | package cryptsetupclient 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/martinjungblut/go-cryptsetup" 7 | ) 8 | 9 | type Device interface { 10 | Format(cryptsetup.DeviceType, cryptsetup.GenericParams) error 11 | KeyslotAddByVolumeKey(int, string, string) error 12 | ActivateByVolumeKey(deviceName string, volumeKey string, volumeKeySize int, flags int) error 13 | ActivateByPassphrase(deviceName string, keyslot int, passphrase string, flags int) error 14 | VolumeKeyGet(keyslot int, passphrase string) ([]byte, int, error) 15 | Load(cryptsetup.DeviceType) error 16 | Free() bool 17 | Dump() int 18 | Type() string 19 | Deactivate(string) error 20 | } 21 | 22 | type CryptSetupClient interface { 23 | Init(string) (Device, error) 24 | InitByName(string) (Device, error) 25 | } 26 | 27 | // CryptSetup manages encrypted devices. 28 | type CryptSetup struct { 29 | _ CryptSetupClient 30 | } 31 | 32 | // Init opens a crypt device by device path. 33 | func (c CryptSetup) Init(devicePath string) (Device, error) { 34 | device, err := cryptsetup.Init(devicePath) 35 | if err != nil { 36 | return nil, fmt.Errorf("init cryptsetup by device path %q: %w", devicePath, err) 37 | } 38 | return device, nil 39 | } 40 | 41 | // InitByName opens an active crypt device using its mapped name. 42 | func (c CryptSetup) InitByName(name string) (Device, error) { 43 | device, err := cryptsetup.InitByName(name) 44 | if err != nil { 45 | return nil, fmt.Errorf("init cryptsetup by name %q: %w", name, err) 46 | } 47 | return device, nil 48 | } 49 | 50 | type LuksDevice struct { 51 | Identifier string 52 | Device Device 53 | } 54 | 55 | func NewLuksDevice(crypt CryptSetupClient, path string) (LuksDevice, error) { 56 | dev, err := crypt.Init(path) 57 | if err != nil { 58 | return LuksDevice{}, err 59 | } 60 | return LuksDevice{Identifier: path, Device: dev}, nil 61 | } 62 | 63 | func NewLuksDeviceByName(crypt CryptSetupClient, name string) (LuksDevice, error) { 64 | dev, err := crypt.InitByName(name) 65 | if err != nil { 66 | return LuksDevice{}, err 67 | } 68 | return LuksDevice{Identifier: name, Device: dev}, nil 69 | } 70 | 71 | func NewCryptSetup() CryptSetup { 72 | return CryptSetup{} 73 | } 74 | -------------------------------------------------------------------------------- /pkg/filesystem/filesystem.go: -------------------------------------------------------------------------------- 1 | package filesystem 2 | 3 | import ( 4 | "io/fs" 5 | "os" 6 | "path/filepath" 7 | ) 8 | 9 | type FileInterface interface { 10 | Read([]byte) (int, error) 11 | Write([]byte) (int, error) 12 | Close() error 13 | } 14 | 15 | // FileSystem defines the methods for file system operations. 16 | type FileSystem interface { 17 | IsNotExist(err error) bool 18 | MkdirAll(path string, perm os.FileMode) error 19 | Stat(name string) (fs.FileInfo, error) 20 | Remove(path string) error 21 | OpenFile(name string, flag int, perm os.FileMode) (FileInterface, error) 22 | Open(name string) (FileInterface, error) 23 | Glob(pattern string) ([]string, error) 24 | EvalSymlinks(path string) (string, error) 25 | } 26 | 27 | // OSFileSystem implements FileSystemInterface using the os package. 28 | type OSFileSystem struct{} 29 | 30 | func NewFileSystem() FileSystem { 31 | return OSFileSystem{} 32 | } 33 | 34 | func (OSFileSystem) Glob(pattern string) ([]string, error) { 35 | return filepath.Glob(pattern) 36 | } 37 | 38 | func (OSFileSystem) EvalSymlinks(path string) (string, error) { 39 | return filepath.EvalSymlinks(path) 40 | } 41 | 42 | func (OSFileSystem) IsNotExist(err error) bool { 43 | return os.IsNotExist(err) 44 | } 45 | 46 | func (OSFileSystem) MkdirAll(path string, perm os.FileMode) error { 47 | return os.MkdirAll(path, perm) 48 | } 49 | 50 | func (OSFileSystem) Stat(name string) (fs.FileInfo, error) { 51 | return os.Stat(name) 52 | } 53 | 54 | func (OSFileSystem) Remove(path string) error { 55 | return os.Remove(path) 56 | } 57 | 58 | //nolint:gosec // intentional variable to open file 59 | func (OSFileSystem) Open(name string) (FileInterface, error) { 60 | return os.Open(name) 61 | } 62 | 63 | //nolint:gosec // intentional variable to open file 64 | func (OSFileSystem) OpenFile(name string, flag int, perm os.FileMode) (FileInterface, error) { 65 | return os.OpenFile(name, flag, perm) 66 | } 67 | -------------------------------------------------------------------------------- /pkg/hwinfo/hwinfo.go: -------------------------------------------------------------------------------- 1 | package hwinfo 2 | 3 | import ( 4 | "github.com/jaypipes/ghw" 5 | ) 6 | 7 | type HardwareInfo interface { 8 | Block() (*ghw.BlockInfo, error) 9 | } 10 | 11 | func Block() (*ghw.BlockInfo, error) { 12 | return ghw.Block() 13 | } 14 | 15 | type hwInfo struct{} 16 | 17 | func (h *hwInfo) Block() (*ghw.BlockInfo, error) { 18 | return ghw.Block() 19 | } 20 | 21 | func NewHardwareInfo() HardwareInfo { 22 | return &hwInfo{} 23 | } 24 | -------------------------------------------------------------------------------- /pkg/linode-client/linode_client.go: -------------------------------------------------------------------------------- 1 | package linodeclient 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/linode/linodego" 7 | ) 8 | 9 | type LinodeClient interface { 10 | ListInstances(context.Context, *linodego.ListOptions) ([]linodego.Instance, error) // Needed for metadata 11 | ListVolumes(context.Context, *linodego.ListOptions) ([]linodego.Volume, error) 12 | ListInstanceVolumes(ctx context.Context, instanceID int, options *linodego.ListOptions) ([]linodego.Volume, error) 13 | ListInstanceDisks(ctx context.Context, instanceID int, options *linodego.ListOptions) ([]linodego.InstanceDisk, error) 14 | 15 | GetRegion(ctx context.Context, regionID string) (*linodego.Region, error) 16 | GetInstance(context.Context, int) (*linodego.Instance, error) 17 | GetVolume(context.Context, int) (*linodego.Volume, error) 18 | 19 | CreateVolume(context.Context, linodego.VolumeCreateOptions) (*linodego.Volume, error) 20 | CloneVolume(context.Context, int, string) (*linodego.Volume, error) 21 | 22 | AttachVolume(context.Context, int, *linodego.VolumeAttachOptions) (*linodego.Volume, error) 23 | DetachVolume(context.Context, int) error 24 | 25 | WaitForVolumeLinodeID(context.Context, int, *int, int) (*linodego.Volume, error) 26 | WaitForVolumeStatus(context.Context, int, linodego.VolumeStatus, int) (*linodego.Volume, error) 27 | DeleteVolume(context.Context, int) error 28 | 29 | ResizeVolume(context.Context, int, int) error 30 | 31 | NewEventPoller(context.Context, any, linodego.EntityType, linodego.EventAction) (*linodego.EventPoller, error) 32 | } 33 | 34 | func NewLinodeClient(token, ua, apiURL string) (*linodego.Client, error) { 35 | // Use linodego built-in http client which supports setting root CA cert 36 | linodeClient := linodego.NewClient(nil) 37 | client, err := linodeClient.UseURL(apiURL) 38 | if err != nil { 39 | return nil, err 40 | } 41 | client.SetUserAgent(ua) 42 | client.SetToken(token) 43 | 44 | return client, nil 45 | } 46 | -------------------------------------------------------------------------------- /pkg/linode-client/linode_client_test.go: -------------------------------------------------------------------------------- 1 | package linodeclient 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/linode/linodego" 7 | ) 8 | 9 | func TestNewLinodeClient(t *testing.T) { 10 | type args struct { 11 | token string 12 | ua string 13 | apiURL string 14 | } 15 | tests := []struct { 16 | name string 17 | args args 18 | want *linodego.Client 19 | wantErr bool 20 | }{ 21 | { 22 | name: "Valid input without custom API URL", 23 | args: args{ 24 | token: "test-token", 25 | ua: "test-user-agent", 26 | apiURL: "", 27 | }, 28 | want: &linodego.Client{}, 29 | wantErr: false, 30 | }, 31 | { 32 | name: "Valid input with custom API URL", 33 | args: args{ 34 | token: "test-token", 35 | ua: "test-user-agent", 36 | apiURL: "https://api.linode.com/v4", 37 | }, 38 | want: &linodego.Client{}, 39 | wantErr: false, 40 | }, 41 | { 42 | name: "Invalid API URL", 43 | args: args{ 44 | token: "test-token", 45 | ua: "test-user-agent", 46 | apiURL: "://invalid-url", 47 | }, 48 | want: nil, 49 | wantErr: true, 50 | }, 51 | } 52 | for _, tt := range tests { 53 | t.Run(tt.name, func(t *testing.T) { 54 | got, err := NewLinodeClient(tt.args.token, tt.args.ua, tt.args.apiURL) 55 | if (err != nil) != tt.wantErr { 56 | t.Errorf("NewLinodeClient() error = %v, wantErr %v", err, tt.wantErr) 57 | return 58 | } 59 | if tt.wantErr { 60 | return 61 | } 62 | if got == nil { 63 | t.Errorf("NewLinodeClient() returned nil, expected non-nil") 64 | return 65 | } 66 | }) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /pkg/linode-volumes/utils.go: -------------------------------------------------------------------------------- 1 | package linodevolumes 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "hash/fnv" 7 | "strconv" 8 | "strings" 9 | 10 | "google.golang.org/grpc/codes" 11 | "google.golang.org/grpc/status" 12 | ) 13 | 14 | type ( 15 | withVolume interface { 16 | GetVolumeId() string 17 | } 18 | 19 | withNode interface { 20 | GetNodeId() string 21 | } 22 | ) 23 | 24 | // TODO: Rename this variable 25 | const LinodeVolumeLabelLength = 32 26 | 27 | var ( 28 | ErrInvalidLinodeVolume = errors.New("invalid linode volume key") 29 | ) 30 | 31 | func hashStringToInt(b string) int { 32 | algorithm := fnv.New32a() 33 | _, _ = algorithm.Write([]byte(b)) 34 | i := algorithm.Sum32() 35 | return int(i) 36 | } 37 | 38 | func VolumeIdAsInt(caller string, w withVolume) (int, error) { 39 | strVolID := w.GetVolumeId() 40 | if caller != "" { 41 | caller += " " 42 | } 43 | 44 | if strVolID == "" { 45 | return 0, status.Errorf(codes.InvalidArgument, "%sVolume ID must be provided", caller) 46 | } 47 | 48 | volID := 0 49 | if key, err := ParseLinodeVolumeKey(strVolID); err == nil { 50 | volID = key.GetVolumeID() 51 | } else { 52 | // hack to permit csi-test to use ill-formatted volumeids 53 | volID = hashStringToInt(strVolID) 54 | } 55 | 56 | return volID, nil 57 | } 58 | 59 | func NodeIdAsInt(caller string, w withNode) (int, error) { 60 | strNodeID := w.GetNodeId() 61 | if caller != "" { 62 | caller += " " 63 | } 64 | 65 | if strNodeID == "" { 66 | return 0, status.Errorf(codes.InvalidArgument, "%sNode ID must be provided", caller) 67 | } 68 | 69 | nodeID, err := strconv.Atoi(strNodeID) 70 | if err != nil { 71 | nodeID = hashStringToInt(strNodeID) 72 | } 73 | 74 | return nodeID, nil 75 | } 76 | 77 | type LinodeVolumeKey struct { 78 | VolumeID int 79 | Label string 80 | } 81 | 82 | func CreateLinodeVolumeKey(id int, label string) LinodeVolumeKey { 83 | return LinodeVolumeKey{id, label} 84 | } 85 | 86 | func ParseLinodeVolumeKey(key string) (*LinodeVolumeKey, error) { 87 | keys := strings.SplitN(key, "-", 2) 88 | if len(keys) != 2 { 89 | return nil, ErrInvalidLinodeVolume 90 | } 91 | 92 | volumeID, err := strconv.Atoi(keys[0]) 93 | if err != nil { 94 | return nil, fmt.Errorf("invalid linode volume id: %q", keys[0]) 95 | } 96 | 97 | lvk := LinodeVolumeKey{volumeID, keys[1]} 98 | return &lvk, nil 99 | } 100 | 101 | func (key *LinodeVolumeKey) GetVolumeID() int { 102 | return key.VolumeID 103 | } 104 | 105 | func (key *LinodeVolumeKey) GetVolumeLabel() string { 106 | return key.Label 107 | } 108 | 109 | func (key *LinodeVolumeKey) GetNormalizedLabel() string { 110 | label := key.Label 111 | if len(label) > LinodeVolumeLabelLength { 112 | label = label[:LinodeVolumeLabelLength] 113 | } 114 | 115 | return label 116 | } 117 | 118 | func (key *LinodeVolumeKey) GetNormalizedLabelWithPrefix(prefix string) string { 119 | label := prefix + key.GetNormalizedLabel() 120 | if len(label) > LinodeVolumeLabelLength { 121 | label = label[:LinodeVolumeLabelLength] 122 | } 123 | return label 124 | } 125 | 126 | func (key *LinodeVolumeKey) GetVolumeKey() string { 127 | volumeName := key.GetNormalizedLabel() 128 | return fmt.Sprintf("%d-%s", key.VolumeID, volumeName) 129 | } 130 | -------------------------------------------------------------------------------- /pkg/logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | "github.com/google/uuid" 8 | "google.golang.org/grpc" 9 | "k8s.io/klog/v2" 10 | ) 11 | 12 | // NewLogger creates a new Logger instance with a klogr logger. 13 | func NewLogger(ctx context.Context) (logr.Logger, context.Context) { 14 | log := klog.NewKlogr() 15 | return log, context.WithValue(ctx, logr.Logger{}, log) 16 | } 17 | 18 | // WithMethod returns a new Logger with method and traceID values, 19 | // a context containing the new Logger, and a function to log method completion. 20 | func WithMethod(log logr.Logger, method string) (logger logr.Logger, completionFunc func()) { 21 | traceID := uuid.New().String() 22 | 23 | logger = log.WithValues("method", method, "traceID", traceID) 24 | completionFunc = func() { 25 | logger.V(4).Info("Method completed") 26 | } 27 | return 28 | } 29 | 30 | // GetLogger retrieves the Logger from the context, or creates a new one if not present. 31 | func GetLogger(ctx context.Context) (logr.Logger, context.Context) { 32 | if logger, ok := ctx.Value(logr.Logger{}).(logr.Logger); ok { 33 | return logger, ctx 34 | } 35 | return NewLogger(ctx) 36 | } 37 | 38 | func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { 39 | logger, ctx := GetLogger(ctx) 40 | logger.V(3).Info("GRPC call", "method", info.FullMethod) 41 | logger.V(5).Info("GRPC request", "request", req) 42 | resp, err := handler(ctx, req) 43 | if err != nil { 44 | logger.Error(err, "GRPC error") 45 | } else { 46 | logger.V(5).Info("GRPC response", "response", resp) 47 | } 48 | return resp, err 49 | } 50 | -------------------------------------------------------------------------------- /pkg/logger/logger_test.go: -------------------------------------------------------------------------------- 1 | package logger_test 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "testing" 7 | 8 | "github.com/go-logr/logr" 9 | "google.golang.org/grpc" 10 | "google.golang.org/grpc/codes" 11 | "google.golang.org/grpc/status" 12 | 13 | "github.com/linode/linode-blockstorage-csi-driver/pkg/logger" 14 | ) 15 | 16 | func TestLogGRPC(t *testing.T) { 17 | type args struct { 18 | req interface{} 19 | info *grpc.UnaryServerInfo 20 | handler grpc.UnaryHandler 21 | } 22 | tests := []struct { 23 | name string 24 | args args 25 | want interface{} 26 | wantErr bool 27 | }{ 28 | { 29 | name: "Successful GRPC call", 30 | args: args{ 31 | req: "test request", 32 | info: &grpc.UnaryServerInfo{ 33 | FullMethod: "/test.Service/TestMethod", 34 | }, 35 | handler: func(ctx context.Context, req interface{}) (interface{}, error) { 36 | return "test response", nil 37 | }, 38 | }, 39 | want: "test response", 40 | wantErr: false, 41 | }, 42 | { 43 | name: "GRPC call with error", 44 | args: args{ 45 | req: "test request", 46 | info: &grpc.UnaryServerInfo{ 47 | FullMethod: "/test.Service/TestMethod", 48 | }, 49 | handler: func(ctx context.Context, req interface{}) (interface{}, error) { 50 | return nil, status.Errorf(codes.Internal, "test error") 51 | }, 52 | }, 53 | want: nil, 54 | wantErr: true, 55 | }, 56 | } 57 | for _, tt := range tests { 58 | t.Run(tt.name, func(t *testing.T) { 59 | got, err := logger.LogGRPC(context.Background(), tt.args.req, tt.args.info, tt.args.handler) 60 | if (err != nil) != tt.wantErr { 61 | t.Errorf("LogGRPC() error = %v, wantErr %v", err, tt.wantErr) 62 | return 63 | } 64 | if !reflect.DeepEqual(got, tt.want) { 65 | t.Errorf("LogGRPC() = %v, want %v", got, tt.want) 66 | } 67 | }) 68 | } 69 | } 70 | 71 | func TestLogger_WithMethod(t *testing.T) { 72 | tests := []struct { 73 | name string 74 | method string 75 | }{ 76 | { 77 | name: "WithMethod with valid input", 78 | method: "TestMethod", 79 | }, 80 | { 81 | name: "WithMethod with empty method", 82 | method: "", 83 | }, 84 | } 85 | for _, tt := range tests { 86 | t.Run(tt.name, func(t *testing.T) { 87 | l, ctx := logger.NewLogger(context.Background()) 88 | _, done := logger.WithMethod(l, tt.method) 89 | 90 | if ctx == nil { 91 | t.Error("Logger.WithMethod() returned nil context") 92 | } 93 | if done == nil { 94 | t.Error("Logger.WithMethod() returned nil function") 95 | } 96 | 97 | // Check if the context contains the logger 98 | if ctx != nil { 99 | contextLogger, ok := ctx.Value(logr.Logger{}).(logr.Logger) 100 | if !ok || contextLogger != l { 101 | t.Error("Logger.WithMethod() context does not contain the correct logger") 102 | } 103 | } 104 | 105 | // Call the returned function and check if it doesn't panic 106 | if done != nil { 107 | func() { 108 | defer func() { 109 | if r := recover(); r != nil { 110 | t.Errorf("Logger.WithMethod() returned function panicked: %v", r) 111 | } 112 | }() 113 | done() 114 | }() 115 | } 116 | }) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /pkg/mount-manager/safe_mounter.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package mountmanager 16 | 17 | import ( 18 | "k8s.io/mount-utils" 19 | "k8s.io/utils/exec" 20 | ) 21 | 22 | type Mounter interface { 23 | mount.Interface 24 | } 25 | 26 | type Executor interface { 27 | exec.Interface 28 | } 29 | 30 | type Command interface { 31 | exec.Cmd 32 | } 33 | 34 | type Formater interface { 35 | FormatAndMount(source string, target string, fstype string, options []string) error 36 | } 37 | 38 | type ResizeFSer interface { 39 | Resize(devicePath string, deviceMountPath string) (bool, error) 40 | NeedResize(devicePath string, deviceMountPath string) (bool, error) 41 | } 42 | 43 | // alias mount.SafeFormatAndMount struct to add the Formater interface 44 | type SafeFormatAndMount struct { 45 | *mount.SafeFormatAndMount 46 | Formater 47 | } 48 | 49 | func NewSafeMounter() *SafeFormatAndMount { 50 | realMounter := mount.New("") 51 | realExec := exec.New() 52 | sfm := mount.SafeFormatAndMount{ 53 | Interface: realMounter, 54 | Exec: realExec, 55 | } 56 | return &SafeFormatAndMount{ 57 | SafeFormatAndMount: &sfm, 58 | Formater: &sfm, 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /pkg/mount-manager/safe_mounter_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | */ 14 | 15 | package mountmanager 16 | 17 | import ( 18 | "testing" 19 | ) 20 | 21 | func TestNewSafeMounter(t *testing.T) { 22 | safeMounter := NewSafeMounter() 23 | 24 | if safeMounter == nil { 25 | t.Fatal("Expected non-nil SafeFormatAndMount, got nil") 26 | return 27 | } 28 | 29 | if safeMounter.Interface == nil { 30 | t.Fatal("Expected non-nil Interface, got nil") 31 | } 32 | 33 | if safeMounter.Exec == nil { 34 | t.Fatal("Expected non-nil Exec, got nil") 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /tests/csi-sanity/mkdir_in_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | kubectl exec csi-linode-controller-0 -n kube-system -c csi-linode-plugin -- mktemp -d /tmp/csi-sanity.XXXXXX 3 | -------------------------------------------------------------------------------- /tests/csi-sanity/rmdir_in_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | kubectl exec csi-linode-controller-0 -n kube-system -c csi-linode-plugin -- rmdir "$@" 3 | -------------------------------------------------------------------------------- /tests/csi-sanity/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | # Define the CSI endpoint for the sanity tests 5 | CSI_ENDPOINT="dns:///127.0.0.1:10000" 6 | 7 | # Define the scripts for creating and deleting directories in the pod 8 | CREATE_DIRECTORY="./tests/csi-sanity/mkdir_in_pod.sh" 9 | DELETE_DIRECTORY="./tests/csi-sanity/rmdir_in_pod.sh" 10 | 11 | # Define the list of tests to skip as an array 12 | SKIP_TESTS=( 13 | "WithCapacity" 14 | # Need to skip it because we do not support volume snapshots 15 | "should fail when the volume source volume is not found" 16 | # This case fails because we currently do not support read only volume creation on the linode side 17 | # but we are supporting it in the CSI driver by mounting the volume as read only 18 | "should fail when the volume is already published but is incompatible" 19 | ) 20 | 21 | # Join the array into a single string with '|' as the separator 22 | SKIP_TESTS_STRING=$( 23 | IFS='|' 24 | echo "${SKIP_TESTS[*]}" 25 | ) 26 | 27 | # Install the latest version of csi-sanity 28 | go install github.com/kubernetes-csi/csi-test/v5/cmd/csi-sanity@latest 29 | 30 | # Create socat statefulset 31 | kubectl apply -f tests/csi-sanity/socat.yaml 32 | 33 | # Wait for pod to be ready 34 | kubectl wait --for=condition=ready --timeout=60s pods/csi-socat-0 35 | 36 | # Patch the NodeServer daemonset and add the LINODE_TOKEN and LINODE_API env vars. 37 | # The csi-sanity check require CrateVolume and ListVolume calls to be working. 38 | 39 | # Warning: This patch expects the csi-linode-plugin container to be the 2nd container 40 | # in the manifest 41 | kubectl patch daemonset csi-linode-node \ 42 | -n kube-system \ 43 | --type='json' \ 44 | -p='[ 45 | { 46 | "op": "add", 47 | "path": "/spec/template/spec/containers/1/env/-", 48 | "value": { 49 | "name": "LINODE_URL", 50 | "value": "https://api.linode.com/v4" 51 | } 52 | }, 53 | { 54 | "op": "add", 55 | "path": "/spec/template/spec/containers/1/env/-", 56 | "value": { 57 | "name": "LINODE_TOKEN", 58 | "valueFrom": { 59 | "secretKeyRef": { 60 | "name": "linode", 61 | "key": "token" 62 | } 63 | } 64 | } 65 | } 66 | ]' 67 | 68 | echo "Waiting for daemonset csi-linode-node to be ready" 69 | kubectl rollout status daemonset/csi-linode-node -n kube-system 70 | kubectl wait --namespace kube-system --for=condition=Ready pods --selector=app=csi-linode-node --timeout=180s 71 | 72 | # Start the port forwarding in the background and log output to a file 73 | nohup kubectl port-forward pods/csi-socat-0 10000:10000 >port-forward.log 2>&1 & 74 | 75 | # Run the csi-sanity tests with the specified parameters 76 | csi-sanity --ginkgo.vv --ginkgo.trace --ginkgo.skip "$SKIP_TESTS_STRING" --csi.endpoint="$CSI_ENDPOINT" --csi.createstagingpathcmd="$CREATE_DIRECTORY" --csi.createmountpathcmd="$CREATE_DIRECTORY" --csi.removestagingpathcmd="$DELETE_DIRECTORY" --csi.removemountpathcmd="$DELETE_DIRECTORY" 77 | 78 | # Find the process ID (PID) of the kubectl port-forward command using the specified port 79 | PID=$(lsof -t -i :10000 -sTCP:LISTEN) 80 | 81 | # Check if a PID was found and kill the process if it exists 82 | if [ -z "$PID" ]; then 83 | echo "No process found on port 10000." 84 | else 85 | kill -9 "$PID" 86 | echo "Process on port 10000 with PID $PID has been killed." 87 | fi 88 | 89 | # Remove the socat statefulset 90 | kubectl delete -f tests/csi-sanity/socat.yaml 91 | -------------------------------------------------------------------------------- /tests/csi-sanity/socat.yaml: -------------------------------------------------------------------------------- 1 | kind: StatefulSet 2 | apiVersion: apps/v1 3 | metadata: 4 | name: csi-socat 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: csi-socat 10 | template: 11 | metadata: 12 | labels: 13 | app: csi-socat 14 | spec: 15 | tolerations: 16 | - key: "node-role.kubernetes.io/control-plane" 17 | operator: "Exists" 18 | effect: "NoSchedule" 19 | affinity: 20 | podAffinity: 21 | requiredDuringSchedulingIgnoredDuringExecution: 22 | - labelSelector: 23 | matchLabels: 24 | app: csi-linode-controller 25 | topologyKey: kubernetes.io/hostname 26 | namespaces: 27 | - kube-system 28 | containers: 29 | - name: socat 30 | image: alpine/socat:1.0.3 31 | args: 32 | - tcp-listen:10000,fork,reuseaddr 33 | - unix-connect:/csi/csi.sock 34 | volumeMounts: 35 | - mountPath: /csi 36 | name: plugin-dir 37 | volumes: 38 | - hostPath: 39 | path: /var/lib/kubelet/plugins/linodebs.csi.linode.com 40 | type: DirectoryOrCreate 41 | name: plugin-dir 42 | -------------------------------------------------------------------------------- /tests/e2e/setup/ctlptl-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: ctlptl.dev/v1alpha1 3 | kind: Cluster 4 | product: kind 5 | kindV1Alpha4Cluster: 6 | name: capl 7 | nodes: 8 | - role: control-plane 9 | image: kindest/node:v1.29.2 10 | -------------------------------------------------------------------------------- /tests/e2e/setup/linode-secret.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: linode 6 | namespace: kube-system 7 | stringData: 8 | token: ${LINODE_TOKEN} 9 | region: us-lax 10 | type: Opaque 11 | -------------------------------------------------------------------------------- /tests/e2e/test/check-volume-deleted.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if namespace is provided 4 | if [ "$#" -ne 1 ]; then 5 | echo "Usage: $0 " >&2 6 | exit 1 7 | fi 8 | 9 | # Set environment variables (if not already set) 10 | TARGET_API=${TARGET_API:-https://api.linode.com} 11 | TARGET_API_VERSION=${TARGET_API_VERSION:-v4} 12 | URI=${URI:-volumes} 13 | FILTER=$1 14 | MAX_RETRIES=5 15 | RETRY_DELAY=5 16 | 17 | curl_command() { 18 | curl -s \ 19 | -H "Authorization: Bearer $LINODE_TOKEN" \ 20 | -H "X-Filter: $FILTER" \ 21 | -H "Content-Type: application/json" \ 22 | "$TARGET_API/$TARGET_API_VERSION/$URI" 23 | } 24 | 25 | echo "Checking Linode API for volume status..." 26 | 27 | for ((i=1; i<=$MAX_RETRIES; i++)); do 28 | response=$(curl_command) 29 | 30 | if [ $? -eq 0 ]; then 31 | # Check if the response is valid JSON 32 | if jq -e . >/dev/null 2>&1 <<< "$response"; then 33 | # Extract results and check if it's null 34 | results=$(echo "$response" | jq -r '.results') 35 | 36 | if [ "$results" = "0" ]; then 37 | echo "Volume deleted in Linode" 38 | exit 0 39 | else 40 | echo "Volume still available in Linode. Response:" 41 | echo "$response" 42 | if [ $i -lt $MAX_RETRIES ]; then 43 | echo "Retrying in $RETRY_DELAY seconds..." 44 | sleep $RETRY_DELAY 45 | else 46 | echo "Max retries reached. Volume is still attached in Linode." >&2 47 | exit 1 48 | fi 49 | fi 50 | else 51 | echo "Invalid JSON response. Retrying..." 52 | fi 53 | else 54 | echo "Curl command failed. Retrying..." 55 | fi 56 | 57 | if [ $i -lt $MAX_RETRIES ]; then 58 | sleep $RETRY_DELAY 59 | else 60 | echo "Max retries reached. Exiting." >&2 61 | exit 1 62 | fi 63 | done 64 | 65 | -------------------------------------------------------------------------------- /tests/e2e/test/check-volume-detached.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the correct number of arguments are provided 4 | if [ "$#" -ne 1 ]; then 5 | echo "Usage: $0 " >&2 6 | exit 1 7 | fi 8 | 9 | INTERVAL=3 # Time in seconds between checks 10 | 11 | # Set environment variables for the second part 12 | TARGET_API=${TARGET_API:-https://api.linode.com} 13 | TARGET_API_VERSION=${TARGET_API_VERSION:-v4} 14 | URI=${URI:-volumes} 15 | FILTER=$1 16 | MAX_RETRIES=5 17 | RETRY_DELAY=5 18 | 19 | curl_command() { 20 | curl -s \ 21 | -H "Authorization: Bearer $LINODE_TOKEN" \ 22 | -H "X-Filter: $FILTER" \ 23 | -H "Content-Type: application/json" \ 24 | "$TARGET_API/$TARGET_API_VERSION/$URI" 25 | } 26 | 27 | echo "Checking Linode API for volume status..." 28 | 29 | for ((i=1; i<=$MAX_RETRIES; i++)); do 30 | response=$(curl_command) 31 | if [ $? -eq 0 ]; then 32 | # Check if the response is valid JSON 33 | if jq -e . >/dev/null 2>&1 <<< "$response"; then 34 | # Extract linode_id & volume_name 35 | linode_id=$(echo "$response" | jq -r '.data[0].linode_id') 36 | volume_name=$(echo "$response" | jq -r '.data[0].label' | sed 's/^pvc//') 37 | 38 | if [ "$linode_id" = "null" ]; then 39 | echo "Volume detached in Linode" 40 | break 41 | else 42 | echo "Volume still attached in Linode. Response:" 43 | echo "$response" 44 | if [ $i -lt $MAX_RETRIES ]; then 45 | echo "Retrying in $RETRY_DELAY seconds..." 46 | sleep $RETRY_DELAY 47 | else 48 | echo "Max retries reached. Volume is still attached in Linode." >&2 49 | exit 1 50 | fi 51 | fi 52 | else 53 | echo "Invalid JSON response. Retrying..." 54 | fi 55 | else 56 | echo "Curl command failed. Retrying..." 57 | fi 58 | 59 | if [ $i -lt $MAX_RETRIES ]; then 60 | sleep $RETRY_DELAY 61 | else 62 | echo "Max retries reached. Exiting." >&2 63 | exit 1 64 | fi 65 | done 66 | 67 | 68 | echo "Checking for volume pvc-$volume_name in Kubernetes..." 69 | 70 | for ((i=1; i<=$MAX_RETRIES; i++)); do 71 | # Check if any node has both volumesInUse and volumesAttached fields 72 | NODE_HAS_FIELDS=$(kubectl get nodes -o json | jq ' 73 | .items | map( 74 | .status | 75 | (has("volumesInUse") and has("volumesAttached")) 76 | ) | any 77 | ') 78 | 79 | if [ "$NODE_HAS_FIELDS" = "false" ]; then 80 | echo "No nodes have both volumesInUse and volumesAttached fields." 81 | break 82 | else 83 | # Check if the volume is in volumesInUse or volumesAttached of any node 84 | VOLUME_PRESENT=$(kubectl get nodes -o json | jq --arg vol "$volume_name" ' 85 | .items[] | 86 | select(.status | (has("volumesInUse") and has("volumesAttached"))) | 87 | (.status.volumesAttached | map(.name) | any(contains($vol))) or 88 | (.status.volumesInUse | any(contains($vol))) 89 | ' | grep -q true && echo "true" || echo "false") 90 | 91 | if [ "$VOLUME_PRESENT" = "true" ]; then 92 | echo "Volume $volume_name is still attached or in use. Waiting..." 93 | if [ $i -lt $MAX_RETRIES ]; then 94 | echo "Retrying in $RETRY_DELAY seconds..." 95 | sleep $RETRY_DELAY 96 | else 97 | echo "Max retries reached. Volume is still attached to the Node." >&2 98 | exit 1 99 | fi 100 | else 101 | echo "Volume $volume_name is not attached or in use by any node." 102 | break 103 | fi 104 | fi 105 | 106 | if [ $i -lt $MAX_RETRIES ]; then 107 | sleep $RETRY_DELAY 108 | else 109 | echo "Max retries reached. Exiting." >&2 110 | exit 1 111 | fi 112 | done 113 | 114 | echo "Check completed successfully. Volume was successfully detached from all nodes!" 115 | -------------------------------------------------------------------------------- /tests/e2e/test/check-volume-size.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if namespace is provided 4 | if [ "$#" -ne 2 ]; then 5 | echo "Usage: $0 " >&2 6 | exit 1 7 | fi 8 | 9 | # Set environment variables (if not already set) 10 | TARGET_API=${TARGET_API:-https://api.linode.com} 11 | TARGET_API_VERSION=${TARGET_API_VERSION:-v4} 12 | URI=${URI:-volumes} 13 | FILTER=$1 14 | SIZE=$2 15 | MAX_RETRIES=5 16 | RETRY_DELAY=5 17 | 18 | curl_command() { 19 | curl -s \ 20 | -H "Authorization: Bearer $LINODE_TOKEN" \ 21 | -H "X-Filter: $FILTER" \ 22 | -H "Content-Type: application/json" \ 23 | "$TARGET_API/$TARGET_API_VERSION/$URI" 24 | } 25 | 26 | echo "Checking Linode API for volume status..." 27 | 28 | for ((i=1; i<=$MAX_RETRIES; i++)); do 29 | response=$(curl_command) 30 | 31 | if [ $? -eq 0 ]; then 32 | # Check if the response is valid JSON 33 | if jq -e . >/dev/null 2>&1 <<< "$response"; then 34 | # Extract results and check if it's null 35 | volume_size=$(echo "$response" | jq -r '.data[0].size') 36 | 37 | if [ "$volume_size" = $SIZE ]; then 38 | echo "Volume size was changed successfully. Current size: $volume_size" 39 | exit 0 40 | else 41 | echo "Volume size has not been changed. Current size: $volume_size" 42 | if [ $i -lt $MAX_RETRIES ]; then 43 | echo "Retrying in $RETRY_DELAY seconds..." 44 | sleep $RETRY_DELAY 45 | else 46 | echo "Max retries reached. Volume is still attached in Linode." >&2 47 | exit 1 48 | fi 49 | fi 50 | else 51 | echo "Invalid JSON response. Retrying..." 52 | fi 53 | else 54 | echo "Curl command failed. Retrying..." 55 | fi 56 | 57 | if [ $i -lt $MAX_RETRIES ]; then 58 | sleep $RETRY_DELAY 59 | else 60 | echo "Max retries reached. Exiting." >&2 61 | exit 1 62 | fi 63 | done 64 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-basic-filesystem/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-basic-filesystem/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-basic-filesystem/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem 16 | spec: 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 23 | --- 24 | apiVersion: v1 25 | kind: Pod 26 | metadata: 27 | name: e2e-pod 28 | spec: 29 | containers: 30 | - name: e2e-pod 31 | image: ubuntu 32 | command: 33 | - sleep 34 | - "1000000" 35 | volumeMounts: 36 | - mountPath: /data 37 | name: csi-volume 38 | tolerations: 39 | - key: "node-role.kubernetes.io/control-plane" 40 | operator: "Exists" 41 | effect: "NoSchedule" 42 | volumes: 43 | - name: csi-volume 44 | persistentVolumeClaim: 45 | claimName: pvc-filesystem 46 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-create-ext4-filesystem/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-create-ext4-filesystem/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-block 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-create-ext4-filesystem/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-block 16 | spec: 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 23 | volumeMode: Block 24 | --- 25 | apiVersion: v1 26 | kind: Pod 27 | metadata: 28 | name: e2e-pod 29 | spec: 30 | containers: 31 | - name: e2e-pod 32 | image: ubuntu 33 | command: ["/bin/sh"] 34 | args: ["-xc", "/bin/dd if=/dev/block of=/dev/null bs=1K count=10; /bin/sleep 1000000"] 35 | volumeDevices: 36 | - name: csi-volume 37 | devicePath: /dev/block 38 | tolerations: 39 | - key: "node-role.kubernetes.io/control-plane" 40 | operator: "Exists" 41 | effect: "NoSchedule" 42 | volumes: 43 | - name: csi-volume 44 | persistentVolumeClaim: 45 | claimName: pvc-block 46 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-create-xfs-filesystem/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-create-xfs-filesystem/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod-xfs 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod-xfs 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-xfs 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-create-xfs-filesystem/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | fstype: xfs 12 | --- 13 | apiVersion: v1 14 | kind: PersistentVolumeClaim 15 | metadata: 16 | name: pvc-xfs 17 | spec: 18 | accessModes: 19 | - ReadWriteOnce 20 | resources: 21 | requests: 22 | storage: 10Gi 23 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 24 | --- 25 | apiVersion: v1 26 | kind: Pod 27 | metadata: 28 | name: e2e-pod-xfs 29 | spec: 30 | containers: 31 | - name: e2e-pod-xfs 32 | image: ubuntu 33 | command: ["/bin/sh"] 34 | args: ["-xc", "/bin/dd if=/dev/block of=/dev/null bs=1K count=10; /bin/sleep 1000000"] 35 | volumeMounts: 36 | - name: csi-volume 37 | mountPath: "/data" 38 | tolerations: 39 | - key: "node-role.kubernetes.io/control-plane" 40 | operator: "Exists" 41 | effect: "NoSchedule" 42 | volumes: 43 | - name: csi-volume 44 | persistentVolumeClaim: 45 | claimName: pvc-xfs 46 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-expand-raw-block-storage/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-expand-raw-block-storage/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-block 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-expand-raw-block-storage/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-block 16 | spec: 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 23 | volumeMode: Block 24 | --- 25 | apiVersion: v1 26 | kind: Pod 27 | metadata: 28 | name: e2e-pod 29 | spec: 30 | containers: 31 | - name: e2e-pod 32 | image: ubuntu 33 | command: ["/bin/sh"] 34 | args: ["-xc", "/bin/dd if=/dev/block of=/dev/null bs=1K count=10; /bin/sleep 1000000"] 35 | volumeDevices: 36 | - name: csi-volume 37 | devicePath: /dev/block 38 | tolerations: 39 | - key: "node-role.kubernetes.io/control-plane" 40 | operator: "Exists" 41 | effect: "NoSchedule" 42 | volumes: 43 | - name: csi-volume 44 | persistentVolumeClaim: 45 | claimName: pvc-block 46 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-expand-raw-block-storage/update-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-block 5 | spec: 6 | resources: 7 | requests: 8 | storage: 20Gi 9 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-expand-storage-size/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-expand-storage-size/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-expand-storage-size/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem 16 | spec: 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 23 | --- 24 | apiVersion: v1 25 | kind: Pod 26 | metadata: 27 | name: e2e-pod 28 | spec: 29 | containers: 30 | - name: e2e-pod 31 | image: ubuntu 32 | command: 33 | - sleep 34 | - "1000000" 35 | volumeMounts: 36 | - mountPath: /data 37 | name: csi-volume 38 | tolerations: 39 | - key: "node-role.kubernetes.io/control-plane" 40 | operator: "Exists" 41 | effect: "NoSchedule" 42 | volumes: 43 | - name: csi-volume 44 | persistentVolumeClaim: 45 | claimName: pvc-filesystem 46 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-expand-storage-size/update-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-filesystem 5 | spec: 6 | resources: 7 | requests: 8 | storage: 20Gi 9 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-linode-encryption/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-linode-encryption/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem-encrypted 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-linode-encryption/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | name: (join('-', ['linode-blockstorage-encrypted', $namespace])) 8 | namespace: kube-system 9 | provisioner: linodebs.csi.linode.com 10 | reclaimPolicy: Delete 11 | parameters: 12 | linodebs.csi.linode.com/encrypted: "true" 13 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: pvc-filesystem-encrypted 19 | spec: 20 | accessModes: 21 | - ReadWriteOnce 22 | resources: 23 | requests: 24 | storage: 10Gi 25 | storageClassName: (join('-', ['linode-blockstorage-encrypted', $namespace])) 26 | --- 27 | apiVersion: v1 28 | kind: Pod 29 | metadata: 30 | name: e2e-pod 31 | spec: 32 | containers: 33 | - name: e2e-pod 34 | image: ubuntu 35 | command: 36 | - sleep 37 | - "1000000" 38 | volumeMounts: 39 | - mountPath: /data 40 | name: csi-volume 41 | tolerations: 42 | - key: "node-role.kubernetes.io/control-plane" 43 | operator: "Exists" 44 | effect: "NoSchedule" 45 | volumes: 46 | - name: csi-volume 47 | persistentVolumeClaim: 48 | claimName: pvc-filesystem-encrypted 49 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks-mov-volume/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks-mov-volume/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test-statefulset-0 5 | status: 6 | containerStatuses: 7 | - name: example 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: data-test-statefulset-0 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks-mov-volume/create-storage-class-statefulset.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | name: (join('-', ['linode-block-storage-luks', $namespace])) 8 | namespace: kube-system 9 | provisioner: linodebs.csi.linode.com 10 | reclaimPolicy: Delete 11 | parameters: 12 | linodebs.csi.linode.com/luks-encrypted: "true" 13 | linodebs.csi.linode.com/luks-cipher: "aes-xts-plain64" 14 | linodebs.csi.linode.com/luks-key-size: "512" 15 | csi.storage.k8s.io/node-stage-secret-namespace: ($namespace) 16 | csi.storage.k8s.io/node-stage-secret-name: csi-encrypt-example-luks-key 17 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 18 | --- 19 | apiVersion: v1 20 | kind: Secret 21 | metadata: 22 | name: csi-encrypt-example-luks-key 23 | stringData: 24 | luksKey: ($lukskey) 25 | --- 26 | apiVersion: apps/v1 27 | kind: StatefulSet 28 | metadata: 29 | name: test-statefulset 30 | spec: 31 | serviceName: "example" 32 | replicas: 1 33 | selector: 34 | matchLabels: 35 | app: example 36 | template: 37 | metadata: 38 | labels: 39 | app: example 40 | spec: 41 | affinity: 42 | nodeAffinity: 43 | requiredDuringSchedulingIgnoredDuringExecution: 44 | nodeSelectorTerms: 45 | - matchExpressions: 46 | - key: node-role.kubernetes.io/control-plane 47 | operator: Exists 48 | tolerations: 49 | - key: "node-role.kubernetes.io/control-plane" 50 | operator: "Exists" 51 | effect: "NoSchedule" 52 | containers: 53 | - name: example 54 | image: nginx 55 | securityContext: 56 | privileged: true 57 | capabilities: 58 | add: ["SYS_ADMIN"] 59 | allowPrivilegeEscalation: true 60 | volumeMounts: 61 | - name: data 62 | mountPath: /data 63 | volumeClaimTemplates: 64 | - metadata: 65 | name: data 66 | spec: 67 | accessModes: ["ReadWriteOnce"] 68 | storageClassName: (join('-', ['linode-block-storage-luks', $namespace])) 69 | resources: 70 | requests: 71 | storage: 10Gi 72 | 73 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks-remount/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks-remount/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem-luks 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks-remount/create-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | spec: 6 | containers: 7 | - name: e2e-pod 8 | image: ubuntu 9 | command: 10 | - sleep 11 | - "1000000" 12 | volumeMounts: 13 | - mountPath: /data 14 | name: csi-volume 15 | securityContext: 16 | privileged: true 17 | capabilities: 18 | add: ["SYS_ADMIN"] 19 | allowPrivilegeEscalation: true 20 | tolerations: 21 | - key: "node-role.kubernetes.io/control-plane" 22 | operator: "Exists" 23 | effect: "NoSchedule" 24 | volumes: 25 | - name: csi-volume 26 | persistentVolumeClaim: 27 | claimName: pvc-filesystem-luks 28 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks-remount/create-pvc.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | name: (join('-', ['linode-block-storage-luks', $namespace])) 8 | namespace: kube-system 9 | provisioner: linodebs.csi.linode.com 10 | reclaimPolicy: Retain 11 | parameters: 12 | linodebs.csi.linode.com/luks-encrypted: "true" 13 | linodebs.csi.linode.com/luks-cipher: "aes-xts-plain64" 14 | linodebs.csi.linode.com/luks-key-size: "512" 15 | csi.storage.k8s.io/node-stage-secret-namespace: ($namespace) 16 | csi.storage.k8s.io/node-stage-secret-name: csi-encrypt-example-luks-key 17 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 18 | --- 19 | apiVersion: v1 20 | kind: Secret 21 | metadata: 22 | name: csi-encrypt-example-luks-key 23 | stringData: 24 | luksKey: ($lukskey) 25 | --- 26 | apiVersion: v1 27 | kind: PersistentVolumeClaim 28 | metadata: 29 | name: pvc-filesystem-luks 30 | spec: 31 | accessModes: 32 | - ReadWriteOnce 33 | resources: 34 | requests: 35 | storage: 10Gi 36 | storageClassName: (join('-', ['linode-block-storage-luks', $namespace])) 37 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem-luks 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-luks/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | name: (join('-', ['linode-block-storage-luks', $namespace])) 8 | namespace: kube-system 9 | provisioner: linodebs.csi.linode.com 10 | reclaimPolicy: Delete 11 | parameters: 12 | linodebs.csi.linode.com/luks-encrypted: "true" 13 | linodebs.csi.linode.com/luks-cipher: "aes-xts-plain64" 14 | linodebs.csi.linode.com/luks-key-size: "512" 15 | csi.storage.k8s.io/node-stage-secret-namespace: ($namespace) 16 | csi.storage.k8s.io/node-stage-secret-name: csi-encrypt-example-luks-key 17 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 18 | --- 19 | apiVersion: v1 20 | kind: Secret 21 | metadata: 22 | name: csi-encrypt-example-luks-key 23 | stringData: 24 | luksKey: ($lukskey) 25 | --- 26 | apiVersion: v1 27 | kind: PersistentVolumeClaim 28 | metadata: 29 | name: pvc-filesystem-luks 30 | spec: 31 | accessModes: 32 | - ReadWriteOnce 33 | resources: 34 | requests: 35 | storage: 10Gi 36 | storageClassName: (join('-', ['linode-block-storage-luks', $namespace])) 37 | --- 38 | apiVersion: v1 39 | kind: Pod 40 | metadata: 41 | name: e2e-pod 42 | spec: 43 | containers: 44 | - name: e2e-pod 45 | image: ubuntu 46 | command: 47 | - sleep 48 | - "1000000" 49 | volumeMounts: 50 | - mountPath: /data 51 | name: csi-volume 52 | securityContext: 53 | privileged: true 54 | capabilities: 55 | add: ["SYS_ADMIN"] 56 | allowPrivilegeEscalation: true 57 | tolerations: 58 | - key: "node-role.kubernetes.io/control-plane" 59 | operator: "Exists" 60 | effect: "NoSchedule" 61 | volumes: 62 | - name: csi-volume 63 | persistentVolumeClaim: 64 | claimName: pvc-filesystem-luks 65 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-readonly/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-readonly/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-readonly/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem 16 | spec: 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 23 | --- 24 | apiVersion: v1 25 | kind: Pod 26 | metadata: 27 | name: e2e-pod 28 | spec: 29 | containers: 30 | - name: e2e-pod 31 | image: ubuntu 32 | command: 33 | - sleep 34 | - "1000000" 35 | volumeMounts: 36 | - mountPath: /data 37 | name: csi-volume 38 | readOnly: true 39 | securityContext: 40 | privileged: true 41 | capabilities: 42 | add: ["SYS_ADMIN"] 43 | allowPrivilegeEscalation: true 44 | tolerations: 45 | - key: "node-role.kubernetes.io/control-plane" 46 | operator: "Exists" 47 | effect: "NoSchedule" 48 | volumes: 49 | - name: csi-volume 50 | persistentVolumeClaim: 51 | claimName: pvc-filesystem 52 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-unexpected-reboot/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-unexpected-reboot/assert-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-unexpected-reboot/assert-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: e2e-pod 5 | status: 6 | containerStatuses: 7 | - name: e2e-pod 8 | ready: true 9 | started: true 10 | phase: Running 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem 16 | status: 17 | capacity: 18 | storage: 10Gi 19 | phase: Bound 20 | -------------------------------------------------------------------------------- /tests/e2e/test/pod-pvc-unexpected-reboot/create-pvc-pod.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | --- 12 | apiVersion: v1 13 | kind: PersistentVolumeClaim 14 | metadata: 15 | name: pvc-filesystem 16 | spec: 17 | accessModes: 18 | - ReadWriteOnce 19 | resources: 20 | requests: 21 | storage: 10Gi 22 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 23 | --- 24 | apiVersion: v1 25 | kind: Pod 26 | metadata: 27 | name: e2e-pod 28 | spec: 29 | containers: 30 | - name: e2e-pod 31 | image: ubuntu 32 | command: 33 | - sleep 34 | - "1000000" 35 | volumeMounts: 36 | - mountPath: /data 37 | name: csi-volume 38 | tolerations: 39 | - key: "node-role.kubernetes.io/control-plane" 40 | operator: "Exists" 41 | effect: "NoSchedule" 42 | volumes: 43 | - name: csi-volume 44 | persistentVolumeClaim: 45 | claimName: pvc-filesystem 46 | -------------------------------------------------------------------------------- /tests/e2e/test/statefulset-pvc/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/statefulset-pvc/assert-statefulset-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: redis-test 5 | status: 6 | availableReplicas: 1 7 | readyReplicas: 1 8 | --- 9 | apiVersion: v1 10 | kind: PersistentVolumeClaim 11 | metadata: 12 | name: data-redis-test-0 13 | spec: 14 | accessModes: 15 | - ReadWriteOnce 16 | resources: 17 | requests: 18 | storage: 10Gi 19 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 20 | status: 21 | accessModes: 22 | - ReadWriteOnce 23 | capacity: 24 | storage: 10Gi 25 | phase: Bound 26 | -------------------------------------------------------------------------------- /tests/e2e/test/statefulset-pvc/create-redis-statefulset.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | --- 12 | apiVersion: apps/v1 13 | kind: StatefulSet 14 | metadata: 15 | name: redis-test 16 | labels: 17 | app.kubernetes.io/name: redis-test 18 | app.kubernetes.io/instance: redis-test 19 | spec: 20 | serviceName: redis-test 21 | selector: 22 | matchLabels: 23 | app.kubernetes.io/name: redis-test 24 | template: 25 | metadata: 26 | name: redis-test 27 | labels: 28 | app.kubernetes.io/name: redis-test 29 | spec: 30 | securityContext: 31 | fsGroup: 1001 32 | automountServiceAccountToken: false 33 | containers: 34 | - name: redis-test 35 | image: bitnami/redis 36 | env: 37 | - name: ALLOW_EMPTY_PASSWORD 38 | value: "true" 39 | securityContext: 40 | runAsUser: 1001 41 | volumeMounts: 42 | - name: data 43 | mountPath: /data 44 | tolerations: 45 | - key: "node-role.kubernetes.io/control-plane" 46 | operator: "Exists" 47 | effect: "NoSchedule" 48 | volumeClaimTemplates: 49 | - metadata: 50 | name: data 51 | spec: 52 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 10Gi 58 | -------------------------------------------------------------------------------- /tests/e2e/test/sts-pvc-unexpected-reboot/assert-csi-driver-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: csi-linode-node 5 | namespace: kube-system 6 | status: 7 | numberAvailable: ($nodes) 8 | numberReady: ($nodes) 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: csi-linode-controller 14 | namespace: kube-system 15 | status: 16 | availableReplicas: 1 17 | readyReplicas: 1 18 | -------------------------------------------------------------------------------- /tests/e2e/test/sts-pvc-unexpected-reboot/assert-sts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: redis-test 5 | status: 6 | availableReplicas: 1 7 | readyReplicas: 1 8 | -------------------------------------------------------------------------------- /tests/e2e/test/sts-pvc-unexpected-reboot/create-sts.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: (join('-', ['linode-block-storage', $namespace])) 6 | provisioner: linodebs.csi.linode.com 7 | reclaimPolicy: Delete 8 | volumeBindingMode: Immediate 9 | parameters: 10 | linodebs.csi.linode.com/volumeTags: (to_string($namespace)) 11 | --- 12 | apiVersion: apps/v1 13 | kind: StatefulSet 14 | metadata: 15 | name: redis-test 16 | labels: 17 | app.kubernetes.io/name: redis-test 18 | app.kubernetes.io/instance: redis-test 19 | spec: 20 | serviceName: redis-test 21 | selector: 22 | matchLabels: 23 | app.kubernetes.io/name: redis-test 24 | template: 25 | metadata: 26 | name: redis-test 27 | labels: 28 | app.kubernetes.io/name: redis-test 29 | spec: 30 | securityContext: 31 | fsGroup: 1001 32 | automountServiceAccountToken: false 33 | containers: 34 | - name: redis-test 35 | image: bitnami/redis 36 | env: 37 | - name: ALLOW_EMPTY_PASSWORD 38 | value: "true" 39 | securityContext: 40 | runAsUser: 1001 41 | volumeMounts: 42 | - name: data 43 | mountPath: /data 44 | tolerations: 45 | - key: "node-role.kubernetes.io/control-plane" 46 | operator: "Exists" 47 | effect: "NoSchedule" 48 | volumeClaimTemplates: 49 | - metadata: 50 | name: data 51 | spec: 52 | storageClassName: (join('-', ['linode-block-storage', $namespace])) 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 10Gi 58 | -------------------------------------------------------------------------------- /tests/upstream-e2e/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euf -o pipefail 3 | 4 | URL="https://dl.k8s.io/release/${K8S_VERSION}/kubernetes-test-${OS}-${ARCH}.tar.gz" 5 | 6 | # output dir where downloaded files will be stored 7 | OUT_DIR="./binaries" 8 | mkdir -p ${OUT_DIR} 9 | OUT_TAR="${OUT_DIR}/k8s.tar.gz" 10 | 11 | # Download k8s test tar archive 12 | curl -L ${URL} -o ${OUT_TAR} 13 | tar xzvf ${OUT_TAR} -C ${OUT_DIR} 14 | 15 | # Run k8s e2e tests for storage driver 16 | ./${OUT_DIR}/kubernetes/test/bin/e2e.test `# runs kubernetes e2e tests` \ 17 | --ginkgo.vv `# enables verbose output` \ 18 | --ginkgo.focus='External.Storage' `# only run external storage tests` \ 19 | --ginkgo.skip='\[Disruptive\]' `# skip disruptive tests as they need ssh access to nodes` \ 20 | --ginkgo.skip='volume-expand' `# skip volume-expand as its done manually for now` \ 21 | --ginkgo.skip='snapshottable' `# skip as we don't support snapshots` \ 22 | --ginkgo.skip='snapshottable-stress' `# skip as we don't support snapshots` \ 23 | --ginkgo.skip='\[Feature:VolumeSnapshotDataSource\]' `# skip as we don't support snapshots` \ 24 | --ginkgo.skip='\[Feature:Windows\]' `# skip as we don't support windows` \ 25 | --ginkgo.flake-attempts=3 `# retry 3 times for flaky tests` \ 26 | --ginkgo.timeout=2h `# tests can run for max 2 hours` \ 27 | -storage.testdriver=tests/upstream-e2e/test-driver.yaml `# configuration file for storage driver capabilities` 28 | 29 | # Remove downloaded files and binaries 30 | rm -rf ${OUT_DIR} 31 | -------------------------------------------------------------------------------- /tests/upstream-e2e/test-driver.yaml: -------------------------------------------------------------------------------- 1 | StorageClass: 2 | FromName: true 3 | DriverInfo: 4 | Name: linodebs.csi.linode.com 5 | SupportedFsType: {"ext2", "ext3", "ext4", "xfs"} 6 | Capabilities: 7 | block: true 8 | controllerExpansion: false 9 | exec: true 10 | multipods: true 11 | persistence: true 12 | pvcDataSource: true 13 | snapshotDataSource: false 14 | SupportedSizeRange: 15 | Min: 10G 16 | Max: 10T 17 | --------------------------------------------------------------------------------