├── test ├── config.yml ├── volume.template.yml ├── sanity-crc │ ├── Dockerfile │ ├── sanity-crc-template.yaml │ ├── sanity-cli │ └── sanity-crc.sh ├── csc ├── .env-example ├── sanity-go ├── sanity_test.go ├── sanity-cli └── secrets.template.yml ├── .whitesource ├── docs ├── pics │ ├── new_pv.png │ ├── new_snapshot.png │ └── exos-x-csi-prometheus-query.png ├── iscsi │ ├── snapshot.yaml │ ├── pvc.yaml │ ├── secret-seagate.yaml │ ├── clonepvc.yaml │ ├── restoresnapshot.yaml │ ├── snapshot-class.yaml │ ├── app.yaml │ ├── storage-class.yaml │ └── multipath.conf ├── sas │ ├── pvc.yaml │ ├── secret-seagate.yaml │ ├── clonepvc.yaml │ ├── restoresnapshot.yaml │ ├── app.yaml │ ├── storage-class.yaml │ ├── README.md │ └── multipath.conf ├── volume-snapshots.md └── troubleshooting.md ├── testapp ├── Dockerfile.testapp ├── testapp.js └── Makefile ├── licenses ├── EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf ├── seagate-license.txt └── enix-license.txt ├── pkg ├── common │ ├── version.go │ ├── identity.go │ ├── stringlock.go │ ├── system_test.go │ ├── system.go │ └── driver.go ├── node_service │ ├── node_servicepb │ │ ├── node_rpc.proto │ │ └── node_rpc_grpc.pb.go │ ├── node_service_server.go │ └── node_service_client.go ├── exporter │ ├── collector.go │ └── exporter.go ├── controller │ ├── expander.go │ ├── publisher.go │ ├── snapshotter.go │ └── provisioner.go └── storage │ ├── fcNode.go │ └── sasNode.go ├── CONTRIBUTING.md ├── helm └── csi-charts │ ├── templates │ ├── _helpers.tpl │ ├── NOTES.txt │ ├── podmonitor.yaml │ ├── psp.yaml │ ├── servicemonitor.yaml │ ├── deployment.yaml │ ├── daemonset.yaml │ └── rbac.yaml │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md.gotmpl │ └── values.yaml ├── example ├── secret-example1.yaml ├── pvc-hell.sh ├── common.sh ├── pod.yaml ├── testpod-example1.yaml ├── block-volume-pod.yaml ├── statefulset.yaml ├── testpod-stop.sh ├── pvc-hell.yaml ├── secret-example2-CHAP.yaml ├── 2-pods-1-pvc.yaml ├── fifo.yml ├── snapshot.yaml ├── storage-class.yaml ├── block-volume-storageclass.yaml ├── storageclass-example1.yaml ├── storageclass-example2-CHAP.yaml ├── jobs.yaml ├── serviceaccount-prom.yaml ├── monitor-pvc-hell.sh └── testpod-start.sh ├── cmd ├── controller │ └── controller.go └── node │ └── node.go ├── .gitignore ├── .github ├── workflows │ ├── release.yml │ └── publish.yml └── ISSUE_TEMPLATE │ └── bug_report.md ├── package.json ├── scc └── exos-x-csi-access-scc.yaml ├── LICENSE.old ├── go.mod ├── Dockerfile.redhat ├── README.md ├── Makefile └── LICENSE /test/config.yml: -------------------------------------------------------------------------------- 1 | pool: B 2 | fsType: ext4 3 | -------------------------------------------------------------------------------- /test/volume.template.yml: -------------------------------------------------------------------------------- 1 | pool: $TEST_POOL 2 | fsType: $TEST_FSTYPE 3 | -------------------------------------------------------------------------------- /.whitesource: -------------------------------------------------------------------------------- 1 | { 2 | "settingsInheritedFrom": "Seagate/whitesource-config@main" 3 | } -------------------------------------------------------------------------------- /docs/pics/new_pv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Seagate/seagate-exos-x-csi/HEAD/docs/pics/new_pv.png -------------------------------------------------------------------------------- /docs/pics/new_snapshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Seagate/seagate-exos-x-csi/HEAD/docs/pics/new_snapshot.png -------------------------------------------------------------------------------- /testapp/Dockerfile.testapp: -------------------------------------------------------------------------------- 1 | FROM node:15 2 | ADD testapp.js /testapp.js 3 | ENTRYPOINT ["node", "testapp.js"] 4 | -------------------------------------------------------------------------------- /docs/pics/exos-x-csi-prometheus-query.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Seagate/seagate-exos-x-csi/HEAD/docs/pics/exos-x-csi-prometheus-query.png -------------------------------------------------------------------------------- /test/sanity-crc/Dockerfile: -------------------------------------------------------------------------------- 1 | from registry.access.redhat.com/ubi8/ubi 2 | 3 | RUN yum update -y && yum install -y sudo gettext 4 | 5 | ADD . / 6 | CMD ["./sanity-cli", "all"] 7 | -------------------------------------------------------------------------------- /licenses/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Seagate/seagate-exos-x-csi/HEAD/licenses/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf -------------------------------------------------------------------------------- /pkg/common/version.go: -------------------------------------------------------------------------------- 1 | // Do not edit this file or it will break automatic version control 2 | 3 | package common 4 | 5 | // Version : version of the packages 6 | var Version = "0.0.0-DIRTY" 7 | -------------------------------------------------------------------------------- /docs/iscsi/snapshot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapshot.storage.k8s.io/v1 2 | kind: VolumeSnapshot 3 | metadata: 4 | name: snapshot-seagate 5 | namespace: demo 6 | spec: 7 | volumeSnapshotClassName: snapshotclass-seagate 8 | source: 9 | persistentVolumeClaimName: pvc-seagate 10 | -------------------------------------------------------------------------------- /docs/sas/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-seagate 5 | namespace: demo 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: storageclass-seagate 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | -------------------------------------------------------------------------------- /docs/iscsi/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-seagate 5 | namespace: demo 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: storageclass-seagate 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute on this project 2 | 3 | * Create a new branch from `main`. 4 | * Commit using [the angular commit messages](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines). This matters because it will be parsed by the CI. 5 | * Pull request your branch into `main`. 6 | -------------------------------------------------------------------------------- /docs/sas/secret-seagate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: secret-seagate 5 | namespace: seagate 6 | type: Opaque 7 | data: 8 | apiAddress: TBD # base64 encoded api address http://192.168.0.1 9 | username: TBD # base64 encoded username 10 | password: TBD # base64 encoded password 11 | -------------------------------------------------------------------------------- /helm/csi-charts/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "csidriver.labels" -}} 2 | app.kubernetes.io/name: {{ .Chart.Name | kebabcase }} 3 | app.kubernetes.io/instance: {{ .Release.Name }} 4 | {{- end -}} 5 | 6 | {{- define "csidriver.extraArgs" -}} 7 | {{- range .extraArgs }} 8 | - {{ toYaml . }} 9 | {{- end }} 10 | {{- end -}} -------------------------------------------------------------------------------- /docs/iscsi/secret-seagate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: seagate-enclosure001 5 | namespace: seagate 6 | type: Opaque 7 | data: 8 | apiAddress: aHR0cDovLzE5Mi4xNjguMC4x # base64 encoded api address http://192.168.0.1 9 | username: dXNlcm5hbWU= # base64 encoded username 10 | password: cGFzc3dvcmQ= # base64 encoded password 11 | -------------------------------------------------------------------------------- /docs/iscsi/clonepvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-clone-seagate 5 | namespace: demo 6 | spec: 7 | dataSource: 8 | name: pvc-seagate 9 | kind: PersistentVolumeClaim 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: storageclass-seagate 13 | resources: 14 | requests: 15 | storage: 1Gi 16 | -------------------------------------------------------------------------------- /docs/sas/clonepvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-clone-seagate 5 | namespace: demo 6 | spec: 7 | dataSource: 8 | name: pvc-seagate 9 | kind: PersistentVolumeClaim 10 | accessModes: 11 | - ReadWriteOnce 12 | storageClassName: storageclass-seagate 13 | resources: 14 | requests: 15 | storage: 1Gi 16 | -------------------------------------------------------------------------------- /test/csc: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | csc=$(which csc) 4 | 5 | fsType="ext4" 6 | pool="B" 7 | 8 | function setup { 9 | cd $(dirname $0) 10 | export CSI_ENDPOINT="tcp://localhost:10000" 11 | export X_CSI_SECRETS="apiAddress=${STORAGE_API_ADDR}, username=${STORAGE_USERNAME}, password=${STORAGE_PASSWORD}" 12 | } 13 | 14 | setup 15 | ${csc} --params "fsType=${fsType},pool=${pool}" $@ 16 | -------------------------------------------------------------------------------- /helm/csi-charts/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for using Seagate Exos X provisioner. It will be up and running shortly. 2 | Run 'kubectl get pods' to verify that the new pods have a 'STATUS' of 'Running'. 3 | 4 | In order to dynamically provide a persistant volume, create a storage class first. 5 | Please refer to this example to do so: https://github.com/Seagate/seagate-exos-x-csi/blob/main/example/storage-class.yaml 6 | -------------------------------------------------------------------------------- /docs/sas/restoresnapshot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-restore-seagate 5 | namespace: demo 6 | spec: 7 | dataSource: 8 | name: snapshot-seagate 9 | kind: VolumeSnapshot 10 | apiGroup: "snapshot.storage.k8s.io" 11 | accessModes: 12 | - ReadWriteOnce 13 | storageClassName: storageclass-seagate 14 | resources: 15 | requests: 16 | storage: 1Gi 17 | -------------------------------------------------------------------------------- /helm/csi-charts/templates/podmonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PodMonitor 4 | metadata: 5 | name: seagate-exos-x-csi-node-exporter 6 | labels: 7 | {{ include "csidriver.labels" . | indent 4 }} 8 | spec: 9 | selector: 10 | matchLabels: 11 | name: seagate-exos-x-csi-node-server 12 | podMetricsEndpoints: 13 | - port: metrics 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /docs/iscsi/restoresnapshot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-restore-seagate 5 | namespace: demo 6 | spec: 7 | dataSource: 8 | name: snapshot-seagate 9 | kind: VolumeSnapshot 10 | apiGroup: "snapshot.storage.k8s.io" 11 | accessModes: 12 | - ReadWriteOnce 13 | storageClassName: storageclass-seagate 14 | resources: 15 | requests: 16 | storage: 1Gi 17 | -------------------------------------------------------------------------------- /docs/iscsi/snapshot-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapshot.storage.k8s.io/v1 2 | kind: VolumeSnapshotClass 3 | metadata: 4 | name: snapshotclass-seagate 5 | driver: csi-exos-x.seagate.com 6 | deletionPolicy: Delete 7 | parameters: 8 | csi.storage.k8s.io/snapshotter-secret-name: secret-seagate 9 | csi.storage.k8s.io/snapshotter-secret-namespace: seagate 10 | volPrefix: snp # Prefix for snapshot volumes, an underscore is appended 11 | -------------------------------------------------------------------------------- /test/.env-example: -------------------------------------------------------------------------------- 1 | # 2 | # Fill in all values below using the example as a template. 3 | # Passwords with special characters must be escaped and quoted. 4 | # 5 | # Example: 6 | # TEST_USERNAME=username 7 | # TEST_PASSWORD=\"\!password\" 8 | # TEST_IP=http://1.2.3.4 9 | # TEST_POOL=A|B 10 | # TEST_FSTYPE=ext3|ext4|xfs 11 | # 12 | TEST_USERNAME= 13 | TEST_PASSWORD= 14 | TEST_IP= 15 | TEST_POOL= 16 | TEST_FSTYPE= 17 | -------------------------------------------------------------------------------- /docs/iscsi/app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-seagate 5 | namespace: demo 6 | spec: 7 | containers: 8 | - image: gcr.io/google-containers/busybox:latest 9 | name: theapp 10 | volumeMounts: 11 | - mountPath: /vol 12 | name: volume-seagate 13 | command: [ "sleep", "1000" ] 14 | volumes: 15 | - name: volume-seagate 16 | persistentVolumeClaim: 17 | claimName: pvc-seagate 18 | -------------------------------------------------------------------------------- /docs/sas/app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-seagate 5 | namespace: demo 6 | spec: 7 | containers: 8 | - image: gcr.io/google-containers/busybox:latest 9 | name: theapp 10 | volumeMounts: 11 | - mountPath: /vol 12 | name: volume-seagate 13 | command: [ "sleep", "1000" ] 14 | volumes: 15 | - name: volume-seagate 16 | persistentVolumeClaim: 17 | claimName: pvc-seagate 18 | -------------------------------------------------------------------------------- /helm/csi-charts/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /example/secret-example1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: seagate-exos-x-csi-secrets 5 | namespace: default 6 | type: Opaque 7 | data: 8 | apiAddress: aHR0cDovLzxpcGFkZHJlc3M+ # base64 encoded api address 'http://' 9 | apiAddressB: aHR0cDovLzxpcGFkZHJlc3M+ # Optional second base64 encoded api address 'http://' 10 | username: dXNlcm5hbWU= # base64 encoded 'username' 11 | password: cGFzc3dvcmQ= # base64 encoded 'password' 12 | -------------------------------------------------------------------------------- /testapp/testapp.js: -------------------------------------------------------------------------------- 1 | // 2 | // Simple NodeJS Test App 3 | // 4 | 5 | const http = require('http'); 6 | const os = require('os'); 7 | 8 | console.log("[] testapp server starting..."); 9 | 10 | var handler = function(request, response) { 11 | console.log(" Received request from (" + request.connection.remoteAddress + ")"); 12 | response.writeHead(200, {'Content-Type': 'text/plain'}); 13 | response.end(" Hostname (" + os.hostname + ")\n"); 14 | }; 15 | 16 | var www = http.createServer(handler); 17 | www.listen(8080); 18 | -------------------------------------------------------------------------------- /example/pvc-hell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | number_regex='^[0-9]+$' 4 | if [[ ("$1" != "apply" && "$1" != "delete") || ! "$2" =~ $number_regex ]]; then 5 | echo "usage: $0 [apply|delete] quantity" 6 | exit 1 7 | fi 8 | 9 | ACTION=$1 10 | QUANTITY=$2 11 | 12 | echo "" 13 | echo "pvc-hell ${ACTION} ${QUANTITY} pods and PVC" 14 | echo "" 15 | 16 | rm -f /tmp/pvc-hell.yaml 17 | for ((i=1; i <= QUANTITY; i++)); do 18 | sed -e "s/{TEST_ID}/$i/" pvc-hell.yaml >> /tmp/pvc-hell.yaml 19 | done 20 | 21 | kubectl $ACTION -f /tmp/pvc-hell.yaml 22 | rm /tmp/pvc-hell.yaml 23 | -------------------------------------------------------------------------------- /helm/csi-charts/templates/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.pspAdmissionControllerEnabled -}} 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: seagate-exos-x-csi 6 | spec: 7 | privileged: true 8 | hostNetwork: true 9 | hostIPC: true 10 | hostPID: true 11 | seLinux: 12 | rule: RunAsAny 13 | supplementalGroups: 14 | rule: RunAsAny 15 | runAsUser: 16 | rule: RunAsAny 17 | fsGroup: 18 | rule: RunAsAny 19 | hostPorts: 20 | - min: 0 21 | max: 65535 22 | volumes: 23 | - '*' 24 | allowedCapabilities: 25 | - '*' 26 | {{ end }} -------------------------------------------------------------------------------- /licenses/seagate-license.txt: -------------------------------------------------------------------------------- 1 | Copyright 2021 Seagate Technology LLC 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /cmd/controller/controller.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | 7 | "github.com/Seagate/seagate-exos-x-csi/pkg/common" 8 | "github.com/Seagate/seagate-exos-x-csi/pkg/controller" 9 | "k8s.io/klog/v2" 10 | ) 11 | 12 | var bind = flag.String("bind", fmt.Sprintf("unix:///var/run/%s/csi-controller.sock", common.PluginName), "RPC bind URI (can be a UNIX socket path or any URI)") 13 | 14 | func main() { 15 | klog.InitFlags(nil) 16 | flag.Set("logtostderr", "true") 17 | flag.Parse() 18 | klog.Infof("starting storage controller (%s)", common.Version) 19 | c := controller.New() 20 | defer c.Stop() 21 | c.Start(*bind) 22 | } 23 | -------------------------------------------------------------------------------- /example/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function setNamespace() 4 | { 5 | namespace=$CSI_NAMESPACE 6 | if [ -z "$namespace" ] 7 | then 8 | export CSI_NAMESPACE=csi 9 | namespace=$CSI_NAMESPACE 10 | fi 11 | echo "[] using namespace=$namespace" 12 | } 13 | 14 | function runCommand() 15 | { 16 | echo "" 17 | echo "RUN>> $1" 18 | eval $1 19 | } 20 | 21 | function pause() 22 | { 23 | echo "" 24 | read -s -n 1 -p "===== Press any key to contine =====" 25 | echo "" 26 | } 27 | 28 | function banner() 29 | { 30 | echo "" 31 | echo "==================== $1 ====================" 32 | echo "" 33 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # go dependencies 2 | /vendor 3 | 4 | # v1.x files 5 | _legacy 6 | 7 | # binaries 8 | seagate-exos-x-csi-node 9 | seagate-exos-x-csi-controller 10 | 11 | # generated documentation 12 | /helm/*/README.md 13 | 14 | # vs code 15 | .vscode/ 16 | 17 | node_modules/ 18 | 19 | # test files 20 | test/secrets.yml 21 | test/volume.yml 22 | test/sanity.log 23 | test/.env 24 | test/sanity-crc/secrets.yml 25 | test/sanity-crc/volume.yml 26 | test/sanity-crc/csi-sanity 27 | test/sanity-crc/sanity-crc.yaml 28 | 29 | # certification-related files 30 | .redhat_* 31 | .preflight* 32 | preflight* 33 | *.tgz 34 | 35 | # Backup files 36 | *~ 37 | ~.bak 38 | artifacts* 39 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | 8 | release: 9 | name: Update Release 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v3 15 | 16 | - name: Release 17 | env: 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | uses: ridedott/release-me-action@master 20 | with: 21 | release-branches: '["main"]' 22 | disable-changelog: false 23 | 24 | - name: Output 25 | run: echo "released version is ${{ steps.build_package.outputs.version }}, type is ${{ steps.build_package.outputs.level }}" 26 | 27 | -------------------------------------------------------------------------------- /example/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: claim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: my-marvelous-storage 9 | resources: 10 | requests: 11 | storage: 5Gi 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | name: pod 17 | spec: 18 | containers: 19 | - image: alpine 20 | command: ["/bin/sh", "-c", "while sleep 1; do echo hello > /vol/test && ls -l /vol && cat /vol/test; done"] 21 | name: container 22 | volumeMounts: 23 | - mountPath: /vol 24 | name: volume 25 | volumes: 26 | - name: volume 27 | persistentVolumeClaim: 28 | claimName: claim 29 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "seagate-exos-x-csi", 3 | "version": "0.0.0-development", 4 | "description": "Seagate Exos X CSI Driver", 5 | "scripts": { 6 | "test": "echo \"Error: no test specified\" && exit 1", 7 | "semantic-release": "semantic-release" 8 | }, 9 | "repository": { 10 | "type": "git", 11 | "url": "git+https://github.com/Seagate/seagate-exos-x-csi.git" 12 | }, 13 | "author": "Seagate", 14 | "license": "Apache-2.0", 15 | "bugs": { 16 | "url": "https://github.com/Seagate/seagate-exos-x-csi/issues" 17 | }, 18 | "homepage": "https://github.com/Seagate/seagate-exos-x-csi#readme", 19 | "devDependencies": { 20 | "semantic-release": "^21.0.1" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /pkg/node_service/node_servicepb/node_rpc.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package node_service; 3 | 4 | option go_package = "github.com/Seagate/seagate-exos-x-csi/pkg/node_service/protocolbuffers/node_servicepb"; 5 | 6 | service NodeService { 7 | rpc GetInitiators(InitiatorRequest) returns (Initiators){} 8 | rpc NotifyUnmap(UnmappedVolume) returns (Ack){} 9 | } 10 | 11 | enum InitiatorType{ 12 | UNSPECIFIED = 0; 13 | FC = 1; 14 | SAS = 2; 15 | ISCSI = 3; 16 | } 17 | 18 | message InitiatorRequest { 19 | InitiatorType type = 1; 20 | } 21 | 22 | message Initiators { 23 | repeated string initiators = 1; 24 | } 25 | 26 | message UnmappedVolume { 27 | string volumeName = 1; 28 | } 29 | 30 | message Ack { 31 | int32 ack = 1; 32 | } 33 | -------------------------------------------------------------------------------- /example/testpod-example1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: systems-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: systems-storageclass 9 | resources: 10 | requests: 11 | storage: 5Gi 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | name: test-pod 17 | spec: 18 | containers: 19 | - image: ghcr.io/seagate/seagate-exos-x-testapp 20 | command: ["/bin/sh", "-c", "while sleep 60; do echo hello > /vol/test && ls -l /vol && cat /vol/test && rm /vol/test; done"] 21 | name: test-pod-container 22 | volumeMounts: 23 | - mountPath: /vol 24 | name: volume 25 | ports: 26 | - containerPort: 8080 27 | volumes: 28 | - name: volume 29 | persistentVolumeClaim: 30 | claimName: systems-pvc 31 | -------------------------------------------------------------------------------- /helm/csi-charts/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceMonitor.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: systems-controller-metrics 6 | labels: 7 | name: systems-controller-metrics 8 | {{ include "csidriver.labels" . | indent 4 }} 9 | spec: 10 | ports: 11 | - name: metrics 12 | port: 9842 13 | targetPort: metrics 14 | protocol: TCP 15 | selector: 16 | app: seagate-exos-x-csi-controller-server 17 | --- 18 | apiVersion: monitoring.coreos.com/v1 19 | kind: ServiceMonitor 20 | metadata: 21 | name: seagate-exos-x-csi-controller-exporter 22 | labels: 23 | {{ include "csidriver.labels" . | indent 4 }} 24 | spec: 25 | selector: 26 | matchLabels: 27 | name: systems-controller-metrics 28 | endpoints: 29 | - port: metrics 30 | interval: 1s 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /helm/csi-charts/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: seagate-exos-x-csi 3 | version: 1.0.0 4 | appVersion: v2 5 | description: A dynamic persistent volume (PV) provisioner for Seagate Exos X storage systems. 6 | type: application 7 | home: https://github.com/Seagate/seagate-exos-x-csi 8 | sources: 9 | - https://github.com/Seagate/seagate-exos-x-csi/tree/main/helm/csi-charts 10 | keywords: 11 | - storage 12 | - iscsi 13 | - fc 14 | - sas 15 | - plugin 16 | - csi 17 | maintainers: 18 | - name: Seagate 19 | url: https://github.com/Seagate 20 | email: css-host-software@seagate.com 21 | - name: Joe Skazinski 22 | email: joseph.skazinski@seagate.com 23 | annotations: 24 | artifacthub.io/images: | 25 | - name: csi-driver 26 | image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} 27 | -------------------------------------------------------------------------------- /test/sanity-go: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -e 4 | 5 | function teardown { 6 | rm -vf ${secretsFile} /tmp/{controller,node}.sock 7 | } 8 | 9 | trap teardown EXIT 10 | 11 | secretsFileTmpl="secrets.template.yml" 12 | secretsFile="secrets.yml" 13 | inititatorNameFile="/etc/iscsi/initiatorname.iscsi" 14 | 15 | function setup { 16 | cd $(dirname $0) 17 | set -a; . .env; set +a 18 | envsubst < ${secretsFileTmpl} > ${secretsFile} 19 | if [ ! -f ${inititatorNameFile} ]; then 20 | >&2 echo "/etc/iscsi/initiatorname.iscsi is missing, please run the following commands" 21 | >&2 echo -e " sudo mkdir -p /etc/iscsi" 22 | >&2 echo -e " sudo sh -c 'echo \"InitiatorName=iqn.2021-06.io.seagate:sanity-test-cluster\" > ${inititatorNameFile}'" 23 | exit 1 24 | fi 25 | } 26 | 27 | setup 28 | go test . $@ 29 | out=$? 30 | 31 | exit ${out} 32 | -------------------------------------------------------------------------------- /example/block-volume-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: systems-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | volumeMode: Block 9 | storageClassName: block-vol-storageclass 10 | resources: 11 | requests: 12 | storage: 5Gi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: test-pod 18 | spec: 19 | containers: 20 | - image: ghcr.io/seagate/seagate-exos-x-testapp 21 | command: ["/bin/sh", "-c", "while sleep 60; do echo hello > /vol/test && ls -l /vol && cat /vol/test && rm /vol/test; done"] 22 | name: test-pod-container 23 | volumeDevices: 24 | - devicePath: /block-vol 25 | name: volume 26 | ports: 27 | - containerPort: 8080 28 | volumes: 29 | - name: volume 30 | persistentVolumeClaim: 31 | claimName: systems-pvc -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Screenshots** 20 | If applicable, add screenshots to help explain your problem. 21 | 22 | **Storage System (please complete the following information):** 23 | - Vendor: [e.g. Seagate] 24 | - Model [e.g. Exos X 5U84] 25 | - Firmware Version [e.g. G280] 26 | 27 | **Environment:** 28 | - Kubernetes version: [e.g. OpenShift 4.x] 29 | - Host OS: [e.g. CentOS 7.9] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /cmd/node/node.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "syscall" 7 | 8 | "github.com/Seagate/seagate-exos-x-csi/pkg/common" 9 | "github.com/Seagate/seagate-exos-x-csi/pkg/node" 10 | "k8s.io/klog/v2" 11 | ) 12 | 13 | var bind = flag.String("bind", fmt.Sprintf("unix:///var/run/%s/csi-node.sock", common.PluginName), "RPC bind URI (can be a UNIX socket path or any URI)") 14 | var chroot = flag.String("chroot", "", "Chroot into a directory at startup (used when running in a container)") 15 | 16 | func main() { 17 | klog.InitFlags(nil) 18 | flag.Set("logtostderr", "true") 19 | flag.Parse() 20 | 21 | if *chroot != "" { 22 | if err := syscall.Chroot(*chroot); err != nil { 23 | panic(err) 24 | } 25 | } 26 | 27 | klog.Infof("starting storage node plugin (%s)", common.Version) 28 | n := node.New() 29 | defer n.Stop() 30 | n.Start(*bind) 31 | } 32 | -------------------------------------------------------------------------------- /docs/sas/storage-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | provisioner: csi-exos-x.seagate.com 4 | allowVolumeExpansion: true 5 | metadata: 6 | name: storageclass-seagate 7 | parameters: 8 | csi.storage.k8s.io/provisioner-secret-name: secret-seagate 9 | csi.storage.k8s.io/provisioner-secret-namespace: seagate 10 | csi.storage.k8s.io/controller-publish-secret-name: secret-seagate 11 | csi.storage.k8s.io/controller-publish-secret-namespace: seagate 12 | csi.storage.k8s.io/controller-expand-secret-name: secret-seagate 13 | csi.storage.k8s.io/controller-expand-secret-namespace: seagate 14 | fsType: ext4 # Desired filesystem 15 | pool: A # Pool for volumes provisioning 16 | volPrefix: stx # Desired prefix for volume naming, an underscore is appended 17 | storageProtocol: sas # The storage interface (iscsi, fc, sas) being used for storage i/o 18 | -------------------------------------------------------------------------------- /docs/iscsi/storage-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | provisioner: csi-exos-x.seagate.com 4 | allowVolumeExpansion: true 5 | metadata: 6 | name: storageclass-seagate 7 | parameters: 8 | csi.storage.k8s.io/provisioner-secret-name: secret-seagate 9 | csi.storage.k8s.io/provisioner-secret-namespace: seagate 10 | csi.storage.k8s.io/controller-publish-secret-name: secret-seagate 11 | csi.storage.k8s.io/controller-publish-secret-namespace: seagate 12 | csi.storage.k8s.io/controller-expand-secret-name: secret-seagate 13 | csi.storage.k8s.io/controller-expand-secret-namespace: seagate 14 | fsType: ext4 # Desired filesystem 15 | pool: A # Pool for volumes provisioning 16 | volPrefix: stx # Desired prefix for volume naming, an underscore is appended 17 | storageProtocol: iscsi # The storage interface (iscsi, fc, sas) being used for storage i/o 18 | -------------------------------------------------------------------------------- /test/sanity_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/Seagate/seagate-exos-x-csi/pkg/controller" 7 | "github.com/Seagate/seagate-exos-x-csi/pkg/node" 8 | "github.com/kubernetes-csi/csi-test/pkg/sanity" 9 | ) 10 | 11 | // Test starts the drivers in background and runs k8s sanity checks 12 | func Test(t *testing.T) { 13 | controllerSocketPath := "unix:///tmp/controller.sock" 14 | nodeSocketPath := "unix:///tmp/node.sock" 15 | 16 | ctrl := controller.New() 17 | node := node.New() 18 | 19 | go ctrl.Start(controllerSocketPath) 20 | defer ctrl.Stop() 21 | 22 | go node.Start(nodeSocketPath) 23 | defer node.Stop() 24 | 25 | sanity.Test(t, &sanity.Config{ 26 | Address: nodeSocketPath, 27 | ControllerAddress: controllerSocketPath, 28 | SecretsFile: "./secrets.yml", 29 | TestVolumeParametersFile: "./config.yml", 30 | }) 31 | } 32 | -------------------------------------------------------------------------------- /example/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: date-log 5 | spec: 6 | serviceName: date-log 7 | selector: 8 | matchLabels: 9 | app: date-log 10 | replicas: 3 11 | podManagementPolicy: Parallel 12 | template: 13 | metadata: 14 | labels: 15 | app: date-log 16 | spec: 17 | containers: 18 | - name: logger 19 | image: alpine 20 | imagePullPolicy: IfNotPresent 21 | command: ["/bin/sh", "-c", "(echo '=== START ===' && while true; do sleep 1 && date; done) | tee -a /vol/logs"] 22 | volumeMounts: 23 | - mountPath: /vol 24 | name: data 25 | volumeClaimTemplates: 26 | - metadata: 27 | name: data 28 | spec: 29 | accessModes: 30 | - ReadWriteOnce 31 | storageClassName: my-marvelous-storage 32 | resources: 33 | requests: 34 | storage: 100Mi 35 | -------------------------------------------------------------------------------- /example/testpod-stop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source common.sh 4 | 5 | echo "[] testpod-stop ($1)" 6 | 7 | setNamespace 8 | 9 | if [ -z ${1+x} ]; then 10 | echo "" 11 | echo "Usage: testpod-stop [id]"; 12 | echo "Where:" 13 | echo " [id] - specifies a string used to clean up a test pod configuration." 14 | echo "" 15 | echo "Example: 'testpod-stop system1'" 16 | echo "" 17 | exit 18 | else 19 | system=$1 20 | fi 21 | 22 | banner "Delete Resources" 23 | runCommand "kubectl delete -f testpod-$system.yaml --namespace $namespace" 24 | 25 | runCommand "sleep 20" 26 | 27 | runCommand "kubectl delete -f storageclass-$system.yaml --namespace $namespace" 28 | runCommand "kubectl delete -f secret-$system.yaml --namespace $namespace" 29 | runCommand "helm uninstall --namespace $namespace test-release" 30 | 31 | banner "Check Resources" 32 | 33 | runCommand "kubectl get all --namespace $namespace" 34 | -------------------------------------------------------------------------------- /scc/exos-x-csi-access-scc.yaml: -------------------------------------------------------------------------------- 1 | kind: SecurityContextConstraints 2 | apiVersion: security.openshift.io/v1 3 | metadata: 4 | annotations: 5 | kubernetes.io/description: allow hostpath and host network to be accessible. 6 | generation: 1 7 | name: exos-x-csi-access 8 | selfLink: /apis/security.openshift.io/v1/securitycontextconstraints/exos-x-csi-access 9 | allowHostDirVolumePlugin: true 10 | allowHostIPC: true 11 | allowHostNetwork: true 12 | allowHostPID: false 13 | allowHostPorts: true 14 | allowPrivilegeEscalation: true 15 | allowPrivilegedContainer: true 16 | readOnlyRootFilesystem: false 17 | requiredDropCapabilities: 18 | - KILL 19 | - MKNOD 20 | - SETUID 21 | - SETGID 22 | allowedCapabilities: 23 | - SYS_ADMIN 24 | defaultAddCapabilities: null 25 | fsGroup: 26 | type: RunAsAny 27 | runAsUser: 28 | type: RunAsAny 29 | seLinuxContext: 30 | type: RunAsAny 31 | supplementalGroups: 32 | type: RunAsAny 33 | volumes: 34 | - '*' 35 | -------------------------------------------------------------------------------- /testapp/Makefile: -------------------------------------------------------------------------------- 1 | ifndef DOCKER_HUB_REPOSITORY 2 | DOCKER_HUB_REPOSITORY = ghcr.io/seagate 3 | endif 4 | 5 | ifndef VERSION 6 | VERSION = latest 7 | endif 8 | 9 | ifndef BIN 10 | BIN = seagate-exos-x-testapp 11 | endif 12 | 13 | IMAGE = $(DOCKER_HUB_REPOSITORY)/$(BIN):$(VERSION) 14 | 15 | help: 16 | @echo "" 17 | @echo "Build Targets:" 18 | @echo "-----------------------------------------------------------------------------------" 19 | @echo "make clean - remove $(BIN)" 20 | @echo "make image - create a repo docker image ($(IMAGE))" 21 | @echo "make push - push the docker image to '$(DOCKER_HUB_REPOSITORY)'" 22 | @echo "make all - clean, image, push" 23 | @echo "" 24 | 25 | all: clean image push 26 | .PHONY: all 27 | 28 | image: 29 | docker build -f Dockerfile.testapp -t $(IMAGE) --build-arg version="$(VERSION)" . 30 | .PHONY: image 31 | 32 | push: 33 | docker push $(IMAGE) 34 | .PHONY: push 35 | 36 | clean: 37 | rm -vf $(BIN) 38 | .PHONY: clean 39 | -------------------------------------------------------------------------------- /test/sanity-crc/sanity-crc-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: csi-sanity-crc 5 | spec: 6 | restartPolicy: Never 7 | containers: 8 | - image: default-route-openshift-image-registry.apps-crc.testing/seagate/csi-sanity 9 | command: ["./sanity-cli", "all"] 10 | name: csi-sanity 11 | securityContext: 12 | privileged: true 13 | allowPrivilegeEscalation: true 14 | volumeMounts: 15 | - name: controller-socket 16 | mountPath: /csi/controller.sock 17 | - name: node-socket 18 | mountPath: /csi/node.sock 19 | - name: target-directory 20 | mountPath: /tmp 21 | volumes: 22 | - name: controller-socket 23 | hostPath: 24 | path: /var/lib/kubelet/pods/{{CONTROLLER_POD_UID}}/volumes/kubernetes.io~empty-dir/socket-dir/csi.sock 25 | - name: node-socket 26 | hostPath: 27 | path: /var/lib/kubelet/plugins/csi-exos-x.seagate.com/csi.sock 28 | - name: target-directory 29 | hostPath: 30 | path: /tmp 31 | -------------------------------------------------------------------------------- /docs/sas/README.md: -------------------------------------------------------------------------------- 1 | The CSI driver SAS support requires that all nodes have access to the SAS array. 2 | 3 | # Specifying SAS Initiators 4 | 5 | ## SAS Initiator Discovery 6 | 7 | The node driver will attempt to discover the address of any available SAS initiators. This may not work for all brands/models of SAS HBA, so if you need or prefer to specify these values manually, you can do so in the file `/etc/kubernetes/sas-addresses`. You must create or update this file on each Kubernetes node where you have SAS connections to a storage array. 8 | 9 | Example of finding the SAS host address on your node: 10 | ``` 11 | # lsscsi -t -H 12 | [0] ata_piix ata: 13 | [1] ata_piix ata: 14 | [2] mpt3sas sas:0x500605b00b4ec831 15 | ``` 16 | Note: The `lsscsi` command may only find the 1st SAS host address. In this case we needed to add one to that value to get the 2nd SAS host address. 17 | 18 | Example contents of the `sas-addresses` file, **note that the '0x' prefix is omitted**: 19 | ``` 20 | 500605b00b4ec831 21 | 500605b00b4ec832 22 | ``` 23 | -------------------------------------------------------------------------------- /example/pvc-hell.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: claim-{TEST_ID} 5 | labels: 6 | testName: pvc-hell 7 | 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | storageClassName: my-marvelous-storage 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | --- 16 | apiVersion: v1 17 | kind: Pod 18 | metadata: 19 | name: pod-{TEST_ID} 20 | labels: 21 | testName: pvc-hell 22 | 23 | spec: 24 | restartPolicy: Never 25 | initContainers: 26 | - image: alpine 27 | imagePullPolicy: IfNotPresent 28 | command: ["/bin/sh", "-c", "echo hello > /vol/test"] 29 | name: write 30 | volumeMounts: 31 | - mountPath: /vol 32 | name: volume 33 | containers: 34 | - image: alpine 35 | imagePullPolicy: IfNotPresent 36 | command: ["/bin/sh", "-c", "echo hello > /vol/test2 && diff /vol/test /vol/test2 -q && echo 'SUCCESS'"] 37 | name: read 38 | volumeMounts: 39 | - mountPath: /vol 40 | name: volume 41 | volumes: 42 | - name: volume 43 | persistentVolumeClaim: 44 | claimName: claim-{TEST_ID} 45 | --- 46 | -------------------------------------------------------------------------------- /LICENSE.old: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Enix SAS 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /example/secret-example2-CHAP.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: seagate-exos-x-csi-secrets 5 | namespace: default 6 | type: Opaque 7 | data: 8 | apiAddress: aHR0cDovLzxpcGFkZHJlc3M+ # base64 encoded api address 'http://' 9 | username: dXNlcm5hbWU= # base64 encoded 'username' 10 | password: cGFzc3dvcmQ= # base64 encoded 'password' 11 | CHAPusername: ZXhhbXBsZVVzZXJuYW1l # base64 'exampleUsername'. The CHAP username 12 | CHAPpassword: ZXhhbXBsZXNlY3JldA== # base64 'examplesecret'. The secret that the recipient uses to authenticate the originator. The secret is case sensitive and can include from 12 to 16 bytes. The value can include spaces and printable UTF-8 characters except: " < 13 | CHAPusernameIn: aXFuLjE5OTItMDkuY29tLmV4YW1wbGU6MDEuYXJyYXkuMDAxMjM0YQ== # base64 'iqn.1992-09.com.example:01.array.001234a'. The target name, typically in IQN format. This value is optional, used for Mutual CHAP. 14 | CHAPpasswordIn: bXV0dWFsc2VjcmV0 # base64 encoded 'mutualsecret'. The secret is case sensitive, can include from 12 to 16 bytes, and must differ from the originator secret. This value is optional, used for Mutual CHAP -------------------------------------------------------------------------------- /licenses/enix-license.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Enix SAS 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /example/2-pods-1-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: 2-pods-1-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: my-marvelous-storage 9 | resources: 10 | requests: 11 | storage: 10Gi 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | name: pod-1 17 | spec: 18 | restartPolicy: Never 19 | nodeSelector: 20 | id: "1" 21 | containers: 22 | - image: alpine 23 | command: ["/bin/sh", "-c", "echo one > /vol/pod-1 && ls -l /vol && cat /vol/pod-1"] 24 | name: container 25 | volumeMounts: 26 | - mountPath: /vol 27 | name: volume 28 | volumes: 29 | - name: volume 30 | persistentVolumeClaim: 31 | claimName: 2-pods-1-pvc 32 | --- 33 | apiVersion: v1 34 | kind: Pod 35 | metadata: 36 | name: pod-2 37 | spec: 38 | restartPolicy: Never 39 | nodeSelector: 40 | id: "2" 41 | containers: 42 | - image: alpine 43 | command: ["/bin/sh", "-c", "echo two > /vol/pod-2 && ls -l /vol && cat /vol/pod-2"] 44 | name: container 45 | volumeMounts: 46 | - mountPath: /vol 47 | name: volume 48 | volumes: 49 | - name: volume 50 | persistentVolumeClaim: 51 | claimName: 2-pods-1-pvc 52 | -------------------------------------------------------------------------------- /pkg/common/identity.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/container-storage-interface/spec/lib/go/csi" 7 | ) 8 | 9 | // GetPluginInfo returns metadata of the plugin 10 | func (driver *Driver) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { 11 | return &csi.GetPluginInfoResponse{ 12 | Name: PluginName, 13 | VendorVersion: Version, 14 | }, nil 15 | } 16 | 17 | // GetPluginCapabilities returns available capabilities of the plugin 18 | func (driver *Driver) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { 19 | return &csi.GetPluginCapabilitiesResponse{ 20 | Capabilities: []*csi.PluginCapability{ 21 | { 22 | Type: &csi.PluginCapability_Service_{ 23 | Service: &csi.PluginCapability_Service{ 24 | Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, 25 | }, 26 | }, 27 | }, 28 | { 29 | Type: &csi.PluginCapability_VolumeExpansion_{ 30 | VolumeExpansion: &csi.PluginCapability_VolumeExpansion{ 31 | Type: csi.PluginCapability_VolumeExpansion_ONLINE, 32 | }, 33 | }, 34 | }, 35 | }, 36 | }, nil 37 | } 38 | -------------------------------------------------------------------------------- /example/fifo.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: fifo-claim 5 | 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: my-marvelous-storage 10 | resources: 11 | requests: 12 | storage: 10Mi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: fifo-pod 18 | 19 | spec: 20 | initContainers: 21 | - image: alpine 22 | imagePullPolicy: IfNotPresent 23 | command: ["/bin/sh", "-c", "rm -f /vol/fifo && mkfifo /vol/fifo"] 24 | name: mkfifo 25 | volumeMounts: 26 | - mountPath: /vol 27 | name: volume 28 | containers: 29 | - image: alpine 30 | imagePullPolicy: IfNotPresent 31 | command: ["/bin/sh", "-c", "while read line /vol/fifo"] 39 | name: write 40 | volumeMounts: 41 | - mountPath: /vol 42 | name: volume 43 | volumes: 44 | - name: volume 45 | persistentVolumeClaim: 46 | claimName: fifo-claim 47 | -------------------------------------------------------------------------------- /example/snapshot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: claim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: my-marvelous-storage 9 | resources: 10 | requests: 11 | storage: 5Gi 12 | --- 13 | apiVersion: v1 14 | kind: Pod 15 | metadata: 16 | name: pod 17 | spec: 18 | containers: 19 | - image: alpine 20 | command: ["/bin/sh", "-c", "while sleep 1; do echo hello > /vol/test && ls -l /vol && cat /vol/test; done"] 21 | name: container 22 | volumeMounts: 23 | - mountPath: /vol 24 | name: volume 25 | volumes: 26 | - name: volume 27 | persistentVolumeClaim: 28 | claimName: claim 29 | --- 30 | apiVersion: snapshot.storage.k8s.io/v1beta1 31 | kind: VolumeSnapshotClass 32 | metadata: 33 | name: snapshot-class-seagate-exos-x-csi 34 | driver: csi-exos-x.seagate.com 35 | deletionPolicy: Delete 36 | parameters: 37 | csi.storage.k8s.io/snapshotter-secret-name: snapshotter-secrets 38 | csi.storage.k8s.io/snapshotter-secret-namespace: seagate-exos-x-csi-system 39 | --- 40 | apiVersion: snapshot.storage.k8s.io/v1 41 | kind: VolumeSnapshot 42 | metadata: 43 | name: test-snapshot 44 | spec: 45 | volumeSnapshotClassName: snapshot-class-seagate-exos-x-csi 46 | source: 47 | persistentVolumeClaimName: claim 48 | -------------------------------------------------------------------------------- /example/storage-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | provisioner: csi-exos-x.seagate.com # Check pkg/driver.go, Required for the plugin to recognize this storage class as handled by itself. 4 | volumeBindingMode: WaitForFirstConsumer # Prefer this value to avoid unschedulable pods (https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode) 5 | allowVolumeExpansion: true 6 | metadata: 7 | name: gallium-storage # Choose the name that fits the best with your StorageClass. 8 | parameters: 9 | # Secrets name and namespace, they can be the same for provisioner, controller-publish and controller-expand sections. 10 | csi.storage.k8s.io/provisioner-secret-name: seagate-exos-x-csi-secrets 11 | csi.storage.k8s.io/provisioner-secret-namespace: default 12 | csi.storage.k8s.io/controller-publish-secret-name: seagate-exos-x-csi-secrets 13 | csi.storage.k8s.io/controller-publish-secret-namespace: default 14 | csi.storage.k8s.io/controller-expand-secret-name: seagate-exos-x-csi-secrets 15 | csi.storage.k8s.io/controller-expand-secret-namespace: default 16 | csi.storage.k8s.io/fstype: ext4 # Desired filesystem 17 | pool: A # Pool to use on the IQN to provision volumes 18 | storageProtocol: iscsi # The storage interface (iscsi, fc, sas) being used for storage i/o 19 | -------------------------------------------------------------------------------- /example/block-volume-storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | provisioner: csi-exos-x.seagate.com # Check pkg/driver.go, Required for the plugin to recognize this storage class as handled by itself. 4 | volumeBindingMode: WaitForFirstConsumer # Prefer this value to avoid unschedulable pods (https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode) 5 | allowVolumeExpansion: true 6 | metadata: 7 | name: block-vol-storageclass 8 | parameters: 9 | # Secrets name and namespace, they can be the same for provisioner, controller-publish and controller-expand sections. 10 | csi.storage.k8s.io/provisioner-secret-name: seagate-exos-x-csi-secrets 11 | csi.storage.k8s.io/provisioner-secret-namespace: default 12 | csi.storage.k8s.io/controller-publish-secret-name: seagate-exos-x-csi-secrets 13 | csi.storage.k8s.io/controller-publish-secret-namespace: default 14 | csi.storage.k8s.io/controller-expand-secret-name: seagate-exos-x-csi-secrets 15 | csi.storage.k8s.io/controller-expand-secret-namespace: default 16 | csi.storage.k8s.io/node-publish-secret-name: seagate-exos-x-csi-secrets 17 | csi.storage.k8s.io/node-publish-secret-namespace: default 18 | pool: A # Pool to use on the IQN to provision volumes 19 | volPrefix: stx # Desired prefix for volume naming, an underscore is appended 20 | storageProtocol: iscsi # iscsi, fc or sas 21 | AccessType: block -------------------------------------------------------------------------------- /example/storageclass-example1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | provisioner: csi-exos-x.seagate.com # Check pkg/driver.go, Required for the plugin to recognize this storage class as handled by itself. 4 | volumeBindingMode: WaitForFirstConsumer # Prefer this value to avoid unschedulable pods (https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode) 5 | allowVolumeExpansion: true 6 | metadata: 7 | name: systems-storageclass # Choose the name that fits the best with your StorageClass. 8 | parameters: 9 | # Secrets name and namespace, they can be the same for provisioner, controller-publish and controller-expand sections. 10 | csi.storage.k8s.io/provisioner-secret-name: seagate-exos-x-csi-secrets 11 | csi.storage.k8s.io/provisioner-secret-namespace: default 12 | csi.storage.k8s.io/controller-publish-secret-name: seagate-exos-x-csi-secrets 13 | csi.storage.k8s.io/controller-publish-secret-namespace: default 14 | csi.storage.k8s.io/controller-expand-secret-name: seagate-exos-x-csi-secrets 15 | csi.storage.k8s.io/controller-expand-secret-namespace: default 16 | csi.storage.k8s.io/fstype: ext4 # Desired filesystem 17 | pool: A # Pool to use on the IQN to provision volumes 18 | volPrefix: csi # Desired prefix for volume naming. 3 chars max; an underscore will be appended. 19 | storageProtocol: iscsi # The storage interface (iscsi, fc, sas) being used for storage i/o 20 | -------------------------------------------------------------------------------- /example/storageclass-example2-CHAP.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | provisioner: csi-exos-x.seagate.com # Check pkg/driver.go, Required for the plugin to recognize this storage class as handled by itself. 4 | volumeBindingMode: WaitForFirstConsumer # Prefer this value to avoid unschedulable pods (https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode) 5 | allowVolumeExpansion: true 6 | metadata: 7 | name: systems-storageclass # Choose the name that fits the best with your StorageClass. 8 | parameters: 9 | # Secrets name and namespace, they can be the same for provisioner, controller-publish and controller-expand sections. node-publish secrets are for CHAP authentication 10 | csi.storage.k8s.io/provisioner-secret-name: seagate-exos-x-csi-secrets 11 | csi.storage.k8s.io/provisioner-secret-namespace: default 12 | csi.storage.k8s.io/controller-publish-secret-name: seagate-exos-x-csi-secrets 13 | csi.storage.k8s.io/controller-publish-secret-namespace: default 14 | csi.storage.k8s.io/controller-expand-secret-name: seagate-exos-x-csi-secrets 15 | csi.storage.k8s.io/controller-expand-secret-namespace: default 16 | csi.storage.k8s.io/node-publish-secret-name: seagate-exos-x-csi-secrets # Secrets for CHAP authentication 17 | csi.storage.k8s.io/node-publish-secret-namespace: default # If you are not using CHAP authentication, these lines may be omitted. 18 | csi.storage.k8s.io/fstype: ext4 # Desired filesystem 19 | pool: A # Pool to use on the IQN to provision volumes 20 | volPrefix: csi # Desired prefix for volume naming. 3 chars max; an underscore will be appended. 21 | storageProtocol: iscsi # The storage interface (iscsi, fc, sas) being used for storage i/o 22 | -------------------------------------------------------------------------------- /pkg/exporter/collector.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/prometheus/client_golang/prometheus" 8 | ) 9 | 10 | type Collector struct { 11 | csiRPCCall *prometheus.CounterVec 12 | csiRPCCallDuration *prometheus.CounterVec 13 | } 14 | 15 | const ( 16 | csiRPCCallMetric = "seagate_csi_rpc_call" 17 | csiRPCCallHelp = "How many CSI RPC calls have been executed" 18 | 19 | csiRPCCallDurationMetric = "seagate_csi_rpc_call_duration" 20 | csiRPCCallDurationHelp = "The total duration of CSI RPC calls" 21 | ) 22 | 23 | func NewCollector() *Collector { 24 | return &Collector{ 25 | csiRPCCall: prometheus.NewCounterVec( 26 | prometheus.CounterOpts{ 27 | Name: csiRPCCallMetric, 28 | Help: csiRPCCallHelp, 29 | }, 30 | []string{"endpoint", "success"}, 31 | ), 32 | csiRPCCallDuration: prometheus.NewCounterVec( 33 | prometheus.CounterOpts{ 34 | Name: csiRPCCallDurationMetric, 35 | Help: csiRPCCallDurationHelp, 36 | }, 37 | []string{"endpoint"}, 38 | ), 39 | } 40 | } 41 | 42 | func (collector *Collector) Describe(ch chan<- *prometheus.Desc) { 43 | collector.csiRPCCall.Describe(ch) 44 | collector.csiRPCCallDuration.Describe(ch) 45 | } 46 | 47 | func (collector *Collector) Collect(ch chan<- prometheus.Metric) { 48 | collector.csiRPCCall.Collect(ch) 49 | collector.csiRPCCallDuration.Collect(ch) 50 | } 51 | 52 | func (collector *Collector) IncCSIRPCCall(method string, success bool) { 53 | collector.csiRPCCall.WithLabelValues(method, fmt.Sprintf("%t", success)).Inc() 54 | } 55 | 56 | func (collector *Collector) AddCSIRPCCallDuration(method string, duration time.Duration) { 57 | collector.csiRPCCallDuration.WithLabelValues(method).Add(float64(duration.Nanoseconds()) / 1000 / 1000 / 1000) 58 | } 59 | -------------------------------------------------------------------------------- /test/sanity-crc/sanity-cli: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # Adapted version of the sanity-cli script for use in the sanity pod 4 | # 5 | # Usage: sanity-cli [all] 6 | # 7 | # Running ./sanity-cli 8 | # Will fail fast (-ginkgo.failFast) and use -ginkgo.focus based on TEST_FOCUS 9 | # Use `export TEST_FOCUS=`, such as = CreateVolume to limit test cases 10 | # 11 | # Running ./sanity-cli all 12 | # Will run all test cases and continue past failures 13 | # 14 | 15 | opt=$1 16 | 17 | echo "" 18 | echo "[] sanity-cli $opt" 19 | 20 | 21 | secretsTemplate="secrets.template.yml" 22 | secrets="secrets.yml" 23 | volumeTemplate="volume.template.yml" 24 | volume="volume.yml" 25 | 26 | set -e 27 | 28 | function setVariables() 29 | { 30 | echo "" 31 | echo "env variables:" 32 | 33 | test_focus=$TEST_FOCUS 34 | echo "-- TEST_FOCUS = $test_focus" 35 | } 36 | 37 | setVariables 38 | 39 | controller=unix:///csi/controller.sock 40 | node=unix:///csi/node.sock 41 | sanity=/csi-sanity 42 | 43 | focus=${test_focus} 44 | 45 | echo "" 46 | echo "[] csi-sanity" 47 | 48 | if [ "$opt" == "all" ]; then 49 | echo "sudo ${sanity} -csi.controllerendpoint ${controller} -csi.endpoint ${node} -csi.secrets ${secrets} -csi.testvolumeparameters ${volume}" 50 | sudo ${sanity} -csi.controllerendpoint ${controller} -csi.endpoint ${node} -csi.secrets ${secrets} -csi.testvolumeparameters ${volume} 51 | 52 | else 53 | echo "sudo ${sanity} -csi.controllerendpoint ${controller} -csi.endpoint ${node} -csi.secrets ${secrets} -ginkgo.focus \"${focus}\" -csi.testvolumeparameters ${volume} -ginkgo.failFast" 54 | sudo ${sanity} -csi.controllerendpoint ${controller} -csi.endpoint ${node} -csi.secrets ${secrets} -ginkgo.focus "${focus}" -csi.testvolumeparameters ${volume} -ginkgo.failFast 55 | 56 | fi 57 | 58 | out=$? 59 | 60 | exit ${out} 61 | -------------------------------------------------------------------------------- /pkg/node_service/node_service_server.go: -------------------------------------------------------------------------------- 1 | package node_service 2 | 3 | import ( 4 | "context" 5 | "net" 6 | 7 | pb "github.com/Seagate/seagate-exos-x-csi/pkg/node_service/node_servicepb" 8 | "github.com/Seagate/seagate-exos-x-csi/pkg/storage" 9 | "google.golang.org/grpc" 10 | "k8s.io/klog/v2" 11 | ) 12 | 13 | type server struct { 14 | pb.UnimplementedNodeServiceServer 15 | } 16 | 17 | // Retrieve initiator addresses from the node 18 | func (s *server) GetInitiators(ctx context.Context, in *pb.InitiatorRequest) (*pb.Initiators, error) { 19 | initiators := []string{} 20 | var err error 21 | switch in.GetType() { 22 | case pb.InitiatorType_FC: 23 | initiators, err = storage.GetFCInitiators() 24 | case pb.InitiatorType_SAS: 25 | initiators, err = storage.GetSASInitiators() 26 | case pb.InitiatorType_ISCSI: 27 | initiators, err = storage.GetISCSIInitiators() 28 | case pb.InitiatorType_UNSPECIFIED: 29 | klog.InfoS("Unspecified initiator type in initiator request, defaulting to iSCSI") 30 | initiators, err = storage.GetISCSIInitiators() 31 | } 32 | if err != nil { 33 | return nil, err 34 | } 35 | return &pb.Initiators{Initiators: initiators}, nil 36 | } 37 | 38 | // Notify node that a volume has been unmapped from the controller 39 | func (s *server) NotifyUnmap(ctx context.Context, in *pb.UnmappedVolume) (*pb.Ack, error) { 40 | storage.CheckPreviouslyRemovedDevices(ctx) 41 | delete(storage.SASandFCRemovedDevicesMap, in.GetVolumeName()) 42 | klog.V(4).InfoS("Previously unmapped device - ControllerUnpublishComplete Notification", "deviceMap", storage.SASandFCRemovedDevicesMap, "volumeName", in.GetVolumeName()) 43 | return &pb.Ack{Ack: 1}, nil 44 | } 45 | 46 | func ListenAndServe(s *grpc.Server, port string) { 47 | lis, err := net.Listen("tcp", ":"+port) 48 | if err != nil { 49 | klog.ErrorS(err, "Node Service gRPC server failed to listen") 50 | } 51 | pb.RegisterNodeServiceServer(s, &server{}) 52 | klog.V(0).InfoS("Node Service gRPC server listening", "address", lis.Addr()) 53 | s.Serve(lis) 54 | } 55 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/Seagate/seagate-exos-x-csi 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/Seagate/csi-lib-iscsi v1.1.0 7 | github.com/Seagate/csi-lib-sas v1.0.2 8 | github.com/Seagate/seagate-exos-x-api-go/v2 v2.4.1 9 | github.com/container-storage-interface/spec v1.8.0 10 | github.com/golang/protobuf v1.5.4 11 | github.com/google/uuid v1.6.0 12 | github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 13 | github.com/kubernetes-csi/csi-test v0.0.0-20191016154743-6931aedb3df0 14 | github.com/onsi/gomega v1.28.1 15 | github.com/pkg/errors v0.9.1 16 | github.com/prometheus/client_golang v1.17.0 17 | golang.org/x/sync v0.8.0 18 | google.golang.org/grpc v1.69.2 19 | google.golang.org/protobuf v1.35.1 20 | k8s.io/klog/v2 v2.100.1 21 | ) 22 | 23 | require ( 24 | github.com/beorn7/perks v1.0.1 // indirect 25 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 26 | github.com/fsnotify/fsnotify v1.4.7 // indirect 27 | github.com/go-logr/logr v1.4.2 // indirect 28 | github.com/google/go-cmp v0.6.0 // indirect 29 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 30 | github.com/namsral/flag v1.7.4-pre // indirect 31 | github.com/nxadm/tail v1.4.4 // indirect 32 | github.com/onsi/ginkgo v1.12.1 // indirect 33 | github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect 34 | github.com/prometheus/common v0.44.0 // indirect 35 | github.com/prometheus/procfs v0.11.1 // indirect 36 | golang.org/x/net v0.30.0 // indirect 37 | golang.org/x/sys v0.26.0 // indirect 38 | golang.org/x/text v0.19.0 // indirect 39 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect 40 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 41 | gopkg.in/yaml.v2 v2.4.0 // indirect 42 | gopkg.in/yaml.v3 v3.0.1 // indirect 43 | ) 44 | 45 | //replace github.com/Seagate/seagate-exos-x-api-go/v2 => ./seagate-exos-x-api-go 46 | // replace github.com/Seagate/csi-lib-iscsi => ../csi-lib-iscsi 47 | // replace github.com/Seagate/csi-lib-sas => ../csi-lib-sas 48 | -------------------------------------------------------------------------------- /pkg/node_service/node_service_client.go: -------------------------------------------------------------------------------- 1 | package node_service 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "time" 7 | 8 | "github.com/Seagate/seagate-exos-x-csi/pkg/common" 9 | pb "github.com/Seagate/seagate-exos-x-csi/pkg/node_service/node_servicepb" 10 | "google.golang.org/grpc" 11 | "google.golang.org/grpc/credentials/insecure" 12 | "k8s.io/klog/v2" 13 | ) 14 | 15 | func InitializeClient(nodeAddress string) (conn *grpc.ClientConn, err error) { 16 | port, envFound := os.LookupEnv(common.NodeServicePortEnvVar) 17 | if !envFound { 18 | port = "978" 19 | klog.InfoS("no node service port found in environment. using default", "port", port) 20 | } 21 | nodeServiceAddr := nodeAddress + ":" + port 22 | conn, err = grpc.Dial(nodeServiceAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) 23 | if err != nil { 24 | klog.ErrorS(err, "Error connecting to node service", "node ip", nodeAddress, "port", port) 25 | return 26 | } 27 | return 28 | } 29 | 30 | // Connect to the node_service gRPC server at the given address and retrieve initiators 31 | func GetNodeInitiators(ctx context.Context, conn *grpc.ClientConn, reqType pb.InitiatorType) ([]string, error) { 32 | client := pb.NewNodeServiceClient(conn) 33 | initiatorReq := pb.InitiatorRequest{Type: reqType} 34 | ctx, cancel := context.WithTimeout(ctx, 10*time.Second) 35 | defer cancel() 36 | initiators, err := client.GetInitiators(ctx, &initiatorReq) 37 | if err != nil { 38 | klog.ErrorS(err, "Error during GetInitiators", "reqType", initiatorReq.Type) 39 | return nil, err 40 | } 41 | return initiators.Initiators, nil 42 | } 43 | 44 | func NotifyUnmap(ctx context.Context, conn *grpc.ClientConn, volumeWWN string) (err error) { 45 | client := pb.NewNodeServiceClient(conn) 46 | unmappedVolumePb := pb.UnmappedVolume{VolumeName: volumeWWN} 47 | ctx, cancel := context.WithTimeout(ctx, 10*time.Second) 48 | defer cancel() 49 | _, err = client.NotifyUnmap(ctx, &unmappedVolumePb) 50 | if err != nil { 51 | klog.ErrorS(err, "Error during unmap notification", "unmappedVolumeName", volumeWWN) 52 | } 53 | return 54 | } 55 | -------------------------------------------------------------------------------- /pkg/controller/expander.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/Seagate/seagate-exos-x-csi/pkg/common" 8 | "github.com/container-storage-interface/spec/lib/go/csi" 9 | "google.golang.org/grpc/codes" 10 | "google.golang.org/grpc/status" 11 | "k8s.io/klog/v2" 12 | ) 13 | 14 | // ControllerExpandVolume expands a volume to the given new size 15 | func (controller *Controller) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { 16 | volumeName, _ := common.VolumeIdGetName(req.GetVolumeId()) 17 | if volumeName == "" { 18 | return nil, status.Error(codes.InvalidArgument, "cannot expand a volume with an empty ID") 19 | } 20 | klog.Infof("expanding volume %q", volumeName) 21 | 22 | newSize := req.GetCapacityRange().GetRequiredBytes() 23 | if newSize == 0 { 24 | newSize = req.GetCapacityRange().GetLimitBytes() 25 | } 26 | klog.V(2).Infof("requested size: %d bytes", newSize) 27 | 28 | response, _, err := controller.client.ShowVolumes(volumeName) 29 | var expansionSize int64 30 | if err != nil { 31 | return nil, err 32 | } else if len(response) == 0 { 33 | return nil, fmt.Errorf("volume %q not found", volumeName) 34 | } else if response[0].SizeNumeric == 0 { 35 | return nil, fmt.Errorf("could not get current volume size, thus volume expansion is not possible") 36 | } else if response[0].Blocks == 0 { 37 | return nil, fmt.Errorf("could not parse volume size: %v", err) 38 | } else { 39 | currentSize := response[0].Blocks * response[0].BlockSize 40 | klog.V(2).Infof("current size: %d bytes", currentSize) 41 | expansionSize = newSize - currentSize 42 | klog.V(2).Infof("expanding volume by %d bytes", expansionSize) 43 | } 44 | 45 | expansionSizeStr := getSizeStr(expansionSize) 46 | if _, err := controller.client.ExpandVolume(volumeName, expansionSizeStr); err != nil { 47 | return nil, err 48 | } 49 | 50 | klog.Infof("volume %q successfully expanded", volumeName) 51 | 52 | return &csi.ControllerExpandVolumeResponse{ 53 | CapacityBytes: newSize, 54 | NodeExpansionRequired: true, 55 | }, nil 56 | } 57 | -------------------------------------------------------------------------------- /pkg/exporter/exporter.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "net/http" 8 | 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/prometheus/client_golang/prometheus/promhttp" 11 | "k8s.io/klog/v2" 12 | ) 13 | 14 | // Exporter : Configuration (from command-line) 15 | type Exporter struct { 16 | Port int 17 | Collector *Collector 18 | 19 | listener net.Listener 20 | server *http.Server 21 | collectors []prometheus.Collector 22 | } 23 | 24 | func New(port int) *Exporter { 25 | exporter := &Exporter{ 26 | Port: port, 27 | Collector: NewCollector(), 28 | } 29 | exporter.RegisterCollector(exporter.Collector) 30 | return exporter 31 | } 32 | 33 | // ListenAndServe : Convenience function to start exporter 34 | func (exporter *Exporter) ListenAndServe() error { 35 | if err := exporter.Listen(); err != nil { 36 | return err 37 | } 38 | 39 | return exporter.Serve() 40 | } 41 | 42 | // Listen : Listen for requests 43 | func (exporter *Exporter) Listen() error { 44 | for _, collector := range exporter.collectors { 45 | err := prometheus.Register(collector) 46 | if err != nil { 47 | if registered, ok := err.(prometheus.AlreadyRegisteredError); ok { 48 | prometheus.Unregister(registered.ExistingCollector) 49 | prometheus.MustRegister(collector) 50 | } 51 | } 52 | } 53 | 54 | listen := fmt.Sprintf(":%d", exporter.Port) 55 | klog.Infof("listening on %s", listen) 56 | 57 | listener, err := net.Listen("tcp", listen) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | exporter.listener = listener 63 | return nil 64 | } 65 | 66 | // Serve : Actually reply to requests 67 | func (exporter *Exporter) Serve() error { 68 | mux := http.NewServeMux() 69 | mux.Handle("/metrics", promhttp.Handler()) 70 | 71 | exporter.server = &http.Server{ 72 | Handler: mux, 73 | } 74 | 75 | return exporter.server.Serve(exporter.listener) 76 | } 77 | 78 | // Shutdown : Properly tear down server 79 | func (exporter *Exporter) Shutdown() error { 80 | return exporter.server.Shutdown(context.Background()) 81 | } 82 | 83 | func (exporter *Exporter) RegisterCollector(collector prometheus.Collector) { 84 | exporter.collectors = append(exporter.collectors, collector) 85 | } 86 | -------------------------------------------------------------------------------- /pkg/common/stringlock.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | // For any questions about this software or licensing, 17 | // please email opensource@seagate.com or cortx-questions@seagate.com. 18 | 19 | package common 20 | 21 | import ( 22 | "sync" 23 | 24 | "k8s.io/klog/v2" 25 | ) 26 | 27 | type MyMutex struct { 28 | lock sync.Mutex 29 | refCount int 30 | } 31 | 32 | type MyMap struct { 33 | lock sync.Mutex 34 | things map[string]*MyMutex 35 | } 36 | 37 | // Lock: lock our reference counting mutex 38 | func (mm *MyMap) Lock(s string) { 39 | klog.V(5).Infof("getting ready to lock (%s)", s) 40 | mm.lock.Lock() 41 | 42 | m := mm.things[s] 43 | if m != nil { 44 | m.refCount += 1 45 | } else { 46 | klog.V(5).Infof("creating new reference counted mutex for (%s)", s) 47 | m = &MyMutex{refCount: 1} 48 | mm.things[s] = m 49 | } 50 | mm.lock.Unlock() 51 | klog.V(5).Infof("locking (%s) refCount (%d)", s, m.refCount) 52 | m.lock.Lock() 53 | } 54 | 55 | // Unlock: unlock our reference counting mutex 56 | func (mm *MyMap) Unlock(s string) { 57 | mm.lock.Lock() 58 | defer mm.lock.Unlock() 59 | 60 | m := mm.things[s] 61 | if m == nil { 62 | klog.V(5).Infof("cannot unlock (%s) because it's not there anymore", s) 63 | return 64 | } 65 | 66 | m.refCount -= 1 67 | 68 | if m.refCount > 0 { 69 | klog.V(5).Infof("unlocking (%s)", s) 70 | m.lock.Unlock() 71 | } else { 72 | klog.V(5).Infof("unlocking & deleting (%s)", s) 73 | delete(mm.things, s) 74 | } 75 | } 76 | 77 | // New: Create a new reference counting mutex 78 | func NewStringLock() *MyMap { 79 | mm := &MyMap{} 80 | x := make(map[string]*MyMutex) 81 | mm.things = x 82 | return mm 83 | } 84 | -------------------------------------------------------------------------------- /helm/csi-charts/README.md.gotmpl: -------------------------------------------------------------------------------- 1 | {{ template "chart.header" . }} 2 | {{ template "chart.deprecationWarning" . }} 3 | {{ template "chart.description" . }} 4 | 5 | {{ template "chart.badgesSection" . }} 6 | [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/Seagate)](https://artifacthub.io/packages/search?repo=Seagate) 7 | 8 | # Introduction 9 | As of version `1.0.0`, this `csi` driver and the associated helm charts are released as open-source projects under the Apache 2.0 license. 10 | 11 | Your contribution is most welcome! 12 | 13 | {{ template "chart.homepageLine" . }} 14 | 15 | ## This helm chart 16 | Is part of the project and is published on [Seagate](https://seagate.io)'s charts repository. 17 | 18 | {{ template "chart.sourcesSection" . }} 19 | 20 | # Installing the Chart 21 | 22 | Create a file named `{{ template "chart.name" . }}.values.yaml` with your values, with the help of [Chart Values](#values). 23 | 24 | Add our Charts repository: 25 | ``` 26 | $ helm repo add seagate https://charts.seagate.io 27 | ``` 28 | 29 | Install the {{ template "chart.name" . }} with release name `{{ template "chart.name" . }}` in the `seagate-exos-x-csi-system` namespace: 30 | ``` 31 | $ helm install -n seagate-exos-x-csi-system {{ template "chart.name" . }} seagate/{{ template "chart.name" . }} --values {{ template "chart.name" . }}.values.yaml 32 | ``` 33 | 34 | The `upgrade` command is used to change configuration when values are modified: 35 | ``` 36 | $ helm upgrade -n seagate-exos-x-csi-system {{ template "chart.name" . }} seagate/{{ template "chart.name" . }} --values {{ template "chart.name" . }}.values.yaml 37 | ``` 38 | 39 | # Upgrading the Chart 40 | 41 | Update Helm repositories: 42 | ``` 43 | $ helm repo update 44 | ``` 45 | 46 | Upgrade release names `{{ template "chart.name" . }}` to the latest version: 47 | ``` 48 | $ helm upgrade {{ template "chart.name" . }} seagate/{{ template "chart.name" . }} 49 | ``` 50 | 51 | # Creating a storage class 52 | 53 | In order to dynamically provision persistants volumes, you first need to create a storage class. To do so, please refer to the project [documentation](https://github.com/Seagate/seagate-exos-x-csi). 54 | 55 | {{ template "chart.maintainersSection" . }} 56 | 57 | {{ template "chart.requirementsSection" . }} 58 | 59 | {{ template "chart.valuesSection" . }} 60 | -------------------------------------------------------------------------------- /docs/volume-snapshots.md: -------------------------------------------------------------------------------- 1 | # Volume snapshots 2 | 3 | ## Installation 4 | 5 | In order to enable volume snapshotting feature one your cluster, you first need to install the snapshot-controller as well as snapshot CRDs. You can do so by following those [instructions](https://github.com/kubernetes-csi/external-snapshotter#usage). 6 | 7 | You will also need to install the snapshot validation webhook, by following those [instructions](https://github.com/kubernetes-csi/external-snapshotter/tree/master/deploy/kubernetes/webhook-example). 8 | 9 | ## Create a snapshot 10 | 11 | To create a snapshot of a volume, you first have to create a `VolumeSnapshotClass`, which is equivalent of a `StorageClass` but for snapshots. Then you can create a `VolumeSnapshot` which use the newly created `VolumeSnapshotClass`. You can follow this [snapshot example](../example/snapshot.yaml). For more informations, please refer to the kubernetes [documentation](https://kubernetes.io/docs/concepts/storage/volume-snapshots/). 12 | 13 | ## Restore a snapshot 14 | 15 | To restore a snapshot, you have to create a new `PersistantVolumeClaim` and specify the desired snapshot as a dataSource. You can find an example [here](https://github.com/kubernetes-csi/external-snapshotter/blob/release-4.0/examples/kubernetes/restore.yaml). You can also refer to the kubernetes [documentation](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#volume-snapshot-and-restore-volume-from-snapshot-support). 16 | 17 | ## Clone a volume 18 | 19 | To clone a volume, you can follow the same procedure than to restore a snapshot, but configure another volume instead of a snapshot. An example can be found [here](https://github.com/kubernetes-csi/csi-driver-host-path/blob/master/examples/csi-clone.yaml) and the kubernetes documentation [here](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#volume-cloning). 20 | 21 | --- 22 | 23 | References: 24 | - https://kubernetes.io/docs/concepts/storage/volume-snapshots 25 | - https://github.com/kubernetes-csi/external-snapshotter 26 | - https://kubernetes-csi.github.io/docs/snapshot-controller 27 | - https://kubernetes-csi.github.io/docs/snapshot-validation-webhook 28 | - https://kubernetes-csi.github.io/docs/snapshot-restore-feature 29 | - https://kubernetes-csi.github.io/docs/volume-cloning 30 | - https://github.com/kubernetes-csi/external-snapshotter/tree/release-4.0/examples/kubernetes 31 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish a Docker Image 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Release"] 6 | types: [completed] 7 | 8 | env: 9 | REGISTRY: ghcr.io 10 | IMAGE_NAME: ${{ github.repository }} 11 | 12 | jobs: 13 | build-and-push-image: 14 | runs-on: ubuntu-20.04 15 | permissions: 16 | contents: read 17 | packages: write 18 | 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v3 22 | 23 | - name: Fetch latest release version 24 | id: fetch-latest-release 25 | run: | 26 | latest_release=$(curl -Ls \ 27 | -H "Accept: application/vnd.github+json" \ 28 | -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ 29 | -H "X-GitHub-Api-Version: 2022-11-28" \ 30 | $GITHUB_API_URL/repos/$GITHUB_REPOSITORY/releases/latest | jq --raw-output '.tag_name') 31 | echo "latest_release is $latest_release" 32 | echo "latest-release=$latest_release" >> $GITHUB_OUTPUT 33 | 34 | - name: Lowercase 35 | id: imagelc 36 | uses: ASzc/change-string-case-action@v5 37 | with: 38 | string: ${{ github.repository }} 39 | 40 | - name: Output 41 | run: | 42 | echo "released version is ${{ steps.fetch-latest-release.outputs.latest-release }}" 43 | echo "image name is ${{ steps.imagelc.outputs.lowercase }}" 44 | 45 | - name: Log in to the Container registry 46 | uses: docker/login-action@v2 47 | with: 48 | registry: ${{ env.REGISTRY }} 49 | username: ${{ github.actor }} 50 | password: ${{ secrets.GITHUB_TOKEN }} 51 | 52 | - name: Extract metadata (tags, labels) for Docker 53 | id: meta 54 | uses: docker/metadata-action@v4 55 | with: 56 | images: ${{ env.REGISTRY }}/${{ steps.imagelc.outputs.lowercase }} 57 | 58 | - name: Build and Push Docker Image 59 | id: docker_build 60 | uses: docker/build-push-action@v4 61 | with: 62 | context: . 63 | push: true 64 | tags: ${{ env.REGISTRY }}/${{ steps.imagelc.outputs.lowercase }}:${{ steps.fetch-latest-release.outputs.latest-release }} 65 | labels: ${{ steps.meta.outputs.labels }} 66 | file: Dockerfile.redhat 67 | 68 | - name: Inspect 69 | run: | 70 | docker image inspect ${{ env.REGISTRY }}/${{ steps.imagelc.outputs.lowercase }}:${{ steps.fetch-latest-release.outputs.latest-release }} 71 | -------------------------------------------------------------------------------- /example/jobs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: jobclaim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: my-marvelous-storage 9 | resources: 10 | requests: 11 | storage: 100Gi 12 | --- 13 | apiVersion: batch/v1beta1 14 | kind: CronJob 15 | metadata: 16 | name: read-write-a 17 | spec: 18 | schedule: "*/1 * * * *" 19 | jobTemplate: 20 | spec: 21 | template: 22 | spec: 23 | restartPolicy: Never 24 | nodeSelector: 25 | id: "1" 26 | containers: 27 | - image: alpine 28 | name: read-write-a 29 | args: 30 | - sh 31 | - -c 32 | - 'ls -la /vol > /vol/ls-$(date +%s) && cat /vol/* | head -n 1000 > /vol/cat-$(date +%s)' 33 | volumeMounts: 34 | - mountPath: /vol 35 | name: volume 36 | volumes: 37 | - name: volume 38 | persistentVolumeClaim: 39 | claimName: jobclaim 40 | --- 41 | apiVersion: batch/v1beta1 42 | kind: CronJob 43 | metadata: 44 | name: read-write-b 45 | spec: 46 | schedule: "*/3 * * * *" 47 | jobTemplate: 48 | spec: 49 | template: 50 | spec: 51 | restartPolicy: Never 52 | nodeSelector: 53 | id: "2" 54 | containers: 55 | - image: alpine 56 | name: read-write-b 57 | args: 58 | - sh 59 | - -c 60 | - 'ls -la /vol > /vol/ls-$(date +%s) && cat /vol/* | head -n 1000 > /vol/cat-$(date +%s)' 61 | volumeMounts: 62 | - mountPath: /vol 63 | name: volume 64 | volumes: 65 | - name: volume 66 | persistentVolumeClaim: 67 | claimName: jobclaim 68 | --- 69 | apiVersion: batch/v1beta1 70 | kind: CronJob 71 | metadata: 72 | name: read-write-c 73 | spec: 74 | schedule: "*/5 * * * *" 75 | jobTemplate: 76 | spec: 77 | template: 78 | spec: 79 | restartPolicy: Never 80 | nodeSelector: 81 | id: "3" 82 | containers: 83 | - image: alpine 84 | name: read-write-c 85 | args: 86 | - sh 87 | - -c 88 | - 'ls -la /vol > /vol/ls-$(date +%s) && cat /vol/* | head -n 1000 > /vol/cat-$(date +%s)' 89 | volumeMounts: 90 | - mountPath: /vol 91 | name: volume 92 | volumes: 93 | - name: volume 94 | persistentVolumeClaim: 95 | claimName: jobclaim 96 | -------------------------------------------------------------------------------- /test/sanity-cli: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # 4 | # Usage: sanity-cli [all] 5 | # 6 | # Running ./sanity-cli 7 | # Will fail fast (-ginkgo.failFast) and use -ginkgo.focus based on TEST_FOCUS 8 | # Use `export TEST_FOCUS=`, such as = CreateVolume to limit test cases 9 | # 10 | # Running ./sanity-cli all 11 | # Will run all test cases and continue past failures 12 | # 13 | 14 | if [ -z ${1+x} ]; then 15 | opt= 16 | else 17 | opt=$1 18 | fi 19 | 20 | echo "" 21 | echo "[] sanity-cli $opt" 22 | 23 | 24 | secretsTemplate="secrets.template.yml" 25 | secrets="secrets.yml" 26 | volumeTemplate="volume.template.yml" 27 | volume="volume.yml" 28 | 29 | set -e 30 | 31 | function pause() 32 | { 33 | echo "" 34 | read -s -n 1 -p "===== Press any key to contine =====" 35 | echo "" 36 | } 37 | 38 | function setup { 39 | cd $(dirname $0) 40 | set -a; . .env; set +a 41 | 42 | echo "" 43 | 44 | envsubst < ${secretsTemplate} > ${secrets} 45 | echo "===== ${secrets} =====" 46 | cat ${secrets} 47 | echo "===== END =====" 48 | 49 | echo "" 50 | 51 | envsubst < ${volumeTemplate} > ${volume} 52 | echo "===== ${volume} =====" 53 | cat ${volume} 54 | echo "===== END =====" 55 | pause 56 | } 57 | 58 | function setVariables() 59 | { 60 | echo "" 61 | echo "env variables:" 62 | 63 | # export TEST_FOCUS=DeleteVolume 64 | # export TEST_FOCUS=CreateVolume 65 | 66 | test_focus=$TEST_FOCUS 67 | if [ -z "$test_focus" ] 68 | then 69 | test_focus= 70 | fi 71 | echo "-- TEST_FOCUS = $test_focus" 72 | pause 73 | } 74 | 75 | setVariables 76 | setup 77 | 78 | controller=unix:///var/run/csi-exos-x.seagate.com/csi-controller.sock 79 | node=unix:///var/run/csi-exos-x.seagate.com/csi-node.sock 80 | sanity=/home/seagate/github.com/kubernetes-csi/csi-test/cmd/csi-sanity/csi-sanity 81 | # sanity=/home/seagate/github.com/jskazinski/csi-test/cmd/csi-sanity/csi-sanity 82 | focus=${test_focus} 83 | 84 | echo "" 85 | echo "[] csi-sanity" 86 | 87 | if [ "$opt" == "all" ]; then 88 | echo "sudo ${sanity} -csi.controllerendpoint ${controller} -csi.endpoint ${node} -csi.secrets ${secrets} -csi.testvolumeparameters ${volume} > sanity.log" 89 | sudo ${sanity} -csi.controllerendpoint ${controller} -csi.endpoint ${node} -csi.secrets ${secrets} -csi.testvolumeparameters ${volume} > sanity.log 90 | else 91 | echo "sudo ${sanity} -csi.controllerendpoint ${controller} -csi.endpoint ${node} -csi.secrets ${secrets} -ginkgo.focus \"${focus}\" -csi.testvolumeparameters ${volume} -ginkgo.failFast > sanity.log" 92 | sudo ${sanity} -csi.controllerendpoint ${controller} -csi.endpoint ${node} -csi.secrets ${secrets} -ginkgo.focus "${focus}" -csi.testvolumeparameters ${volume} -ginkgo.failFast > sanity.log 93 | fi 94 | 95 | out=$? 96 | 97 | exit ${out} 98 | -------------------------------------------------------------------------------- /example/serviceaccount-prom.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/name: prometheus 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.25.0 9 | name: prometheus-k8s 10 | namespace: default 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - services 16 | - endpoints 17 | - pods 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - extensions 24 | resources: 25 | - ingresses 26 | verbs: 27 | - get 28 | - list 29 | - watch 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: Role 33 | metadata: 34 | labels: 35 | app.kubernetes.io/component: prometheus 36 | app.kubernetes.io/name: prometheus 37 | app.kubernetes.io/part-of: kube-prometheus 38 | app.kubernetes.io/version: 2.25.0 39 | name: prometheus-k8s-config 40 | namespace: monitoring 41 | rules: 42 | - apiGroups: 43 | - "" 44 | resources: 45 | - configmaps 46 | verbs: 47 | - get 48 | --- 49 | apiVersion: v1 50 | kind: ServiceAccount 51 | metadata: 52 | labels: 53 | app.kubernetes.io/component: prometheus 54 | app.kubernetes.io/name: prometheus 55 | app.kubernetes.io/part-of: kube-prometheus 56 | app.kubernetes.io/version: 2.25.0 57 | name: prometheus-k8s 58 | namespace: monitoring 59 | secrets: 60 | - name: prometheus-k8s-token-jldqz # Replace with the correct name 61 | --- 62 | apiVersion: rbac.authorization.k8s.io/v1 63 | kind: RoleBinding 64 | metadata: 65 | labels: 66 | app.kubernetes.io/component: prometheus 67 | app.kubernetes.io/name: prometheus 68 | app.kubernetes.io/part-of: kube-prometheus 69 | app.kubernetes.io/version: 2.25.0 70 | name: prometheus-k8s 71 | namespace: monitoring 72 | roleRef: 73 | apiGroup: rbac.authorization.k8s.io 74 | kind: Role 75 | name: prometheus-k8s 76 | subjects: 77 | - kind: ServiceAccount 78 | name: prometheus-k8s 79 | namespace: monitoring 80 | --- 81 | apiVersion: rbac.authorization.k8s.io/v1 82 | kind: RoleBinding 83 | metadata: 84 | labels: 85 | app.kubernetes.io/component: prometheus 86 | app.kubernetes.io/name: prometheus 87 | app.kubernetes.io/part-of: kube-prometheus 88 | app.kubernetes.io/version: 2.25.0 89 | name: prometheus-k8s-config 90 | namespace: monitoring 91 | roleRef: 92 | apiGroup: rbac.authorization.k8s.io 93 | kind: Role 94 | name: prometheus-k8s-config 95 | subjects: 96 | - kind: ServiceAccount 97 | name: prometheus-k8s 98 | namespace: monitoring 99 | --- 100 | apiVersion: rbac.authorization.k8s.io/v1 101 | kind: ClusterRoleBinding 102 | metadata: 103 | name: k8s-monitoring 104 | roleRef: 105 | apiGroup: rbac.authorization.k8s.io 106 | kind: ClusterRole 107 | name: cluster-admin 108 | subjects: 109 | - kind: ServiceAccount 110 | name: prometheus-k8s 111 | namespace: monitoring 112 | -------------------------------------------------------------------------------- /docs/iscsi/multipath.conf: -------------------------------------------------------------------------------- 1 | defaults { 2 | polling_interval 2 3 | #path_selector "round-robin 0" 4 | #path_grouping_policy failover 5 | #prio alua 6 | #path_checker tur 7 | #rr_min_io 3 8 | #flush_on_last_del no 9 | #max_fds max 10 | #rr_weight priorities 11 | #failback immediate 12 | #no_path_retry 18 13 | #queue_without_daemon no 14 | user_friendly_names "no" 15 | find_multipaths "greedy" 16 | retain_attached_hw_handler "no" 17 | disable_changed_wwids "yes" 18 | } 19 | 20 | blacklist { 21 | devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" 22 | devnode "^hd[a-z][[0-9]*]" 23 | wwid _USB_DISK_2.0_070C352825C2DA48-0:0 24 | #wwid iDRAC_OEMDRV_20120731-0:0 25 | } 26 | devices { 27 | device { 28 | vendor "VERITAS" 29 | product "STX_5005" 30 | uid_attribute ID_SERIAL 31 | path_grouping_policy "group_by_prio" 32 | path_checker "tur" 33 | features "0" 34 | hardware_handler "0" 35 | prio "alua" 36 | failback immediate 37 | rr_weight "uniform" 38 | no_path_retry 18 39 | } 40 | device { 41 | vendor "DellEMC" 42 | product "^ME[45].*" 43 | uid_attribute ID_SERIAL 44 | path_grouping_policy "group_by_prio" 45 | path_checker "tur" 46 | features "0" 47 | hardware_handler "0" 48 | prio "alua" 49 | failback immediate 50 | rr_weight "uniform" 51 | no_path_retry 18 52 | } 53 | device { 54 | vendor "SEAGATE" 55 | product "^[345][0-9][0-9][56]$" 56 | uid_attribute ID_SERIAL 57 | path_grouping_policy "group_by_prio" 58 | path_checker "tur" 59 | features "0" 60 | hardware_handler "0" 61 | prio "alua" 62 | failback immediate 63 | rr_weight "uniform" 64 | no_path_retry 18 65 | } 66 | device { 67 | vendor "HPE?" 68 | product "^MSA [12]0[456]0" 69 | uid_attribute ID_SERIAL 70 | path_grouping_policy "group_by_prio" 71 | path_checker "tur" 72 | features "0" 73 | hardware_handler "0" 74 | prio "alua" 75 | failback immediate 76 | rr_weight "uniform" 77 | no_path_retry 18 78 | } 79 | device { 80 | vendor "Lenovo" 81 | product "DS[2346]200" 82 | uid_attribute ID_SERIAL 83 | path_grouping_policy "group_by_prio" 84 | path_checker "tur" 85 | features "0" 86 | hardware_handler "0" 87 | prio "alua" 88 | failback immediate 89 | rr_weight "uniform" 90 | no_path_retry 18 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /Dockerfile.redhat: -------------------------------------------------------------------------------- 1 | # -*- compile-command: "make openshift" -*- 2 | 3 | # This is a multi-stage build that expects build-args on the command 4 | # line, or for args to be updated in this file prior to building. See 5 | # the ARG section below. 6 | 7 | FROM registry.access.redhat.com/ubi8/go-toolset:1.22 AS build 8 | 9 | USER root 10 | ENV PATH="${PATH}:/opt/rh/go-toolset-1.22/root/usr/bin/" 11 | RUN printenv 12 | RUN echo "glibc build image info:" && ldd --version 13 | RUN go version 14 | 15 | #WORKDIR /app 16 | COPY ./go.* ./ 17 | COPY cmd cmd 18 | COPY pkg pkg 19 | COPY Makefile ./ 20 | 21 | RUN echo -e "package common\nconst Version = \"${version}\"" > ./pkg/common/version.go 22 | 23 | RUN make controller node; cp seagate-exos-x-csi-controller seagate-exos-x-csi-node / 24 | 25 | RUN ls -l / 26 | 27 | ######################################################################## 28 | 29 | FROM registry.access.redhat.com/ubi8 30 | 31 | ARG version=1.10.0 32 | ARG vcs_ref=71db5f6b9884e931092b35230b698562b6382f94 33 | ARG build_date=2025-01-13T00:04:13+00:00 34 | ARG vendor=Seagate 35 | ARG family="Exos X" 36 | ARG app="${family} CSI Driver" 37 | ARG email=frontline@seagate.com 38 | ARG source=https://github.com/seagate/seagate-exos-x-csi 39 | ARG summary="CSI Driver for Seagate Exos X storage arrays" 40 | ARG description="The Seagate Exos X CSI Driver enables Kubernetes to provision storage for containerized workloads from Seagate Exos X storage systems." 41 | 42 | # In addition to OCI labels, we add certain labels required for OpenShift certification, and others recommended for K8s apps, 43 | # and a few just to override labels from the base container. 44 | # 45 | # * https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/program-on-boarding/technical-prerequisites 46 | # * https://github.com/opencontainers/image-spec/blob/main/annotations.md 47 | # * http://label-schema.org/rc1/ (deprecated) 48 | # * https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/#labels 49 | 50 | LABEL name="${app}" \ 51 | maintainer="${vendor} <${email}>" \ 52 | vendor="${vendor}" \ 53 | version="${version}" \ 54 | release="${version}" \ 55 | summary="${summary}" \ 56 | vcs-ref="${vcs_ref}" \ 57 | vcs-type="git" \ 58 | description="${description}" \ 59 | org.opencontainers.image.authors="${email}" \ 60 | org.opencontainers.image.version="${version}" \ 61 | org.opencontainers.image.revision="${vcs_ref}" \ 62 | org.opencontainers.image.created="$build_date" \ 63 | org.opencontainers.image.vendor="${vendor}" \ 64 | org.opencontainers.image.source="${source}" \ 65 | io.k8s.display-name="${app}" \ 66 | io.k8s.description="${description}" 67 | 68 | USER root 69 | RUN yum update -y && \ 70 | yum -y install iscsi-initiator-utils && \ 71 | yum clean all && \ 72 | rm -rf /var/cache 73 | 74 | ENV PATH="${PATH}:/lib/udev" 75 | 76 | COPY --from=build /seagate-exos-x-csi-controller /seagate-exos-x-csi-node /usr/local/bin/ 77 | 78 | # Red Hat requires licenses to be in this folder 79 | COPY licenses /licenses 80 | 81 | CMD [ "/usr/local/bin/seagate-exos-x-csi-controller" ] 82 | -------------------------------------------------------------------------------- /docs/sas/multipath.conf: -------------------------------------------------------------------------------- 1 | defaults { 2 | polling_interval 2 3 | #path_selector "round-robin 0" 4 | #path_grouping_policy failover 5 | #prio alua 6 | #path_checker tur 7 | #rr_min_io 3 8 | #flush_on_last_del no 9 | #max_fds max 10 | #rr_weight priorities 11 | #failback immediate 12 | #no_path_retry 18 13 | #queue_without_daemon no 14 | user_friendly_names "no" 15 | find_multipaths "greedy" 16 | retain_attached_hw_handler "no" 17 | disable_changed_wwids "yes" 18 | } 19 | 20 | blacklist { 21 | devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" 22 | devnode "^hd[a-z][[0-9]*]" 23 | wwid _USB_DISK_2.0_070C352825C2DA48-0:0 24 | #wwid iDRAC_OEMDRV_20120731-0:0 25 | } 26 | devices { 27 | device { 28 | vendor "VERITAS" 29 | product "STX_5005" 30 | uid_attribute ID_SERIAL 31 | path_grouping_policy "group_by_prio" 32 | path_checker "tur" 33 | features "0" 34 | hardware_handler "0" 35 | prio "alua" 36 | failback immediate 37 | rr_weight "uniform" 38 | no_path_retry 18 39 | rr_min_io 3 40 | } 41 | device { 42 | vendor "DellEMC" 43 | product "ME[45].*" 44 | uid_attribute ID_SERIAL 45 | path_grouping_policy "group_by_prio" 46 | path_checker "tur" 47 | features "0" 48 | hardware_handler "0" 49 | prio "alua" 50 | failback immediate 51 | rr_weight "uniform" 52 | no_path_retry 18 53 | rr_min_io 3 54 | } 55 | device { 56 | vendor "SEAGATE" 57 | product "[456][0-9][0-9][456]" 58 | uid_attribute ID_SERIAL 59 | path_grouping_policy "group_by_prio" 60 | path_checker "tur" 61 | features "0" 62 | hardware_handler "0" 63 | prio "alua" 64 | failback immediate 65 | rr_weight "uniform" 66 | no_path_retry 18 67 | rr_min_io 3 68 | } 69 | device { 70 | vendor "HPE" 71 | product "MSA [12]0[456]0.*" 72 | uid_attribute ID_SERIAL 73 | path_grouping_policy "group_by_prio" 74 | path_checker "tur" 75 | features "0" 76 | hardware_handler "0" 77 | prio "alua" 78 | failback immediate 79 | rr_weight "uniform" 80 | no_path_retry 18 81 | rr_min_io 3 82 | } 83 | device { 84 | vendor "Lenovo" 85 | product "[D]?S[2346]200" 86 | uid_attribute ID_SERIAL 87 | path_grouping_policy "group_by_prio" 88 | path_checker "tur" 89 | features "0" 90 | hardware_handler "0" 91 | prio "alua" 92 | failback immediate 93 | rr_weight "uniform" 94 | no_path_retry 18 95 | rr_min_io 100 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /helm/csi-charts/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values CSI Driver. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | # -- Path to kubelet 6 | kubeletPath: /var/lib/kubelet 7 | # -- Wether psp admission controller has been enabled in the cluster or not 8 | pspAdmissionControllerEnabled: false 9 | image: 10 | # -- Docker repository to use for nodes and controller 11 | repository: ghcr.io/seagate/seagate-exos-x-csi 12 | # -- Tag to use for nodes and controller 13 | # @default -- Uses Chart.appVersion value by default if tag does not specify a new version. 14 | tag: "v1.10.0" 15 | # -- Default is set to IfNotPresent, to override use Always here to always pull the specified version 16 | pullPolicy: Always 17 | # -- Controller sidecar for provisioning 18 | # AKA external-provisioner 19 | csiProvisioner: 20 | image: 21 | repository: registry.k8s.io/sig-storage/csi-provisioner 22 | tag: v5.0.1 23 | # -- Timeout for gRPC calls from the csi-provisioner to the controller 24 | timeout: 60s 25 | # -- Extra arguments for csi-provisioner controller sidecar 26 | extraArgs: [] 27 | # -- Controller sidecar for attachment handling 28 | csiAttacher: 29 | image: 30 | repository: registry.k8s.io/sig-storage/csi-attacher 31 | tag: v4.6.1 32 | # -- Timeout for gRPC calls from the csi-attacher to the controller 33 | timeout: 60s 34 | # -- Extra arguments for csi-attacher controller sidecar 35 | extraArgs: [] 36 | # -- Controller sidecar for volume expansion 37 | csiResizer: 38 | image: 39 | repository: registry.k8s.io/sig-storage/csi-resizer 40 | tag: v1.11.1 41 | # -- Extra arguments for csi-resizer controller sidecar 42 | extraArgs: [] 43 | # -- Controller sidecar for snapshots handling 44 | csiSnapshotter: 45 | image: 46 | repository: registry.k8s.io/sig-storage/csi-snapshotter 47 | tag: v8.0.1 48 | # -- Extra arguments for csi-snapshotter controller sidecar 49 | extraArgs: [] 50 | # -- Node sidecar for plugin registration 51 | csiNodeRegistrar: 52 | image: 53 | repository: registry.k8s.io/sig-storage/csi-node-driver-registrar 54 | tag: v2.9.0 55 | # -- Extra arguments for csi-node-registrar node sidecar 56 | extraArgs: [] 57 | controller: 58 | # -- Extra arguments for seagate-exos-x-csi-controller container 59 | extraArgs: [-v=0] 60 | node: 61 | # -- Extra arguments for seagate-exos-x-csi-node containers 62 | extraArgs: [-v=0] 63 | multipathd: 64 | # -- Extra arguments for multipathd containers 65 | extraArgs: [] 66 | # -- Container that convert CSI liveness probe to kubernetes liveness/readiness probe 67 | nodeLivenessProbe: 68 | image: 69 | repository: registry.k8s.io/sig-storage/livenessprobe 70 | tag: v2.12.0 71 | # -- Extra arguments for the node's liveness probe containers 72 | extraArgs: [] 73 | nodeServer: 74 | # -- Kubernetes nodeSelector field for seagate-exos-x-csi-node-server Pod 75 | nodeSelector: 76 | # -- Kubernetes nodeAffinity field for seagate-exos-x-csi-node-server Pod 77 | nodeAffinity: 78 | podMonitor: 79 | # -- Set a Prometheus operator PodMonitor resource (true or false) 80 | enabled: false 81 | serviceMonitor: 82 | # -- Set a Prometheus operator ServiceMonitor resource (true or false) 83 | enabled: false 84 | -------------------------------------------------------------------------------- /test/sanity-crc/sanity-crc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage: crc-sanity.sh 4 | # 5 | # Launch cli sanity in a crc pod. Must be logged in with oc before running. 6 | # Runs all csi-sanity test cases 7 | 8 | secretsTemplate="../secrets.template.yml" 9 | secrets="secrets.yml" 10 | volumeTemplate="../volume.template.yml" 11 | volume="volume.yml" 12 | 13 | sanity=/home/dwhite/github.com/csi-test/cmd/csi-sanity/csi-sanity 14 | 15 | set -e 16 | 17 | #make sure oc command is setup 18 | oc > /dev/null 19 | 20 | function setup { 21 | cd $(dirname $0) 22 | set -a; . ../.env; set +a 23 | 24 | echo "" 25 | 26 | envsubst < ${secretsTemplate} > ${secrets} 27 | echo "===== ${secrets} =====" 28 | cat ${secrets} 29 | echo "===== END =====" 30 | 31 | echo "" 32 | 33 | envsubst < ${volumeTemplate} > ${volume} 34 | echo "===== ${volume} =====" 35 | cat ${volume} 36 | echo "===== END =====" 37 | 38 | cp $sanity . 39 | } 40 | 41 | setup 42 | 43 | #Build and push the sanity container 44 | echo "===== Building Container =====" 45 | buildah bud -t localhost/seagate-exos-x-csi/csi-sanity . 46 | podman login -u kubeadmin -p $(oc whoami -t) default-route-openshift-image-registry.apps-crc.testing --tls-verify=false 47 | podman tag localhost/seagate-exos-x-csi/csi-sanity default-route-openshift-image-registry.apps-crc.testing/seagate/csi-sanity 48 | podman push default-route-openshift-image-registry.apps-crc.testing/seagate/csi-sanity --tls-verify=false 49 | 50 | #Retrieve the controller UID, needed to mount the CSI socket from the CRC VM into the container 51 | controller_pod_name=$(oc get pods -n seagate -o name | grep seagate-exos-x-csi-controller-server) 52 | controller_pod_uid=$(oc get $controller_pod_name -o=jsonpath='{.metadata.uid}' -n seagate) 53 | sed "s/{{CONTROLLER_POD_UID}}/$controller_pod_uid/g" sanity-crc-template.yaml > sanity-crc.yaml 54 | 55 | set +e 56 | 57 | # #Run sanity 58 | echo "===== Deleting Old Sanity Pod =====" 59 | oc delete pod csi-sanity-crc 60 | 61 | echo "===== Creating Sanity Pod =====" 62 | oc create -f sanity-crc.yaml 63 | 64 | echo "Test pod is starting! Once running, use 'oc logs csi-sanity-crc' to get sanity output" 65 | 66 | counter=0 67 | success=0 68 | continue=1 69 | maxattempts=6 70 | 71 | while [ $continue ] 72 | do 73 | echo "" 74 | echo "Waiting for test pod to come online" 75 | 76 | testpodstatus=$(oc get pod csi-sanity-crc -o=jsonpath='{.status.phase}' -n seagate) 77 | echo $testpodstatus 78 | if [ "$testpodstatus" == "Running" ]; then 79 | echo "SUCCESS: test pod running" 80 | success=1 81 | break 82 | fi 83 | 84 | if [[ "$counter" -eq $maxattempts ]]; then 85 | echo "" 86 | echo "ERROR: Max attempts ($maxattempts) reached and test pod is not running." 87 | echo "" 88 | oc get pod csi-sanity-crc 89 | break 90 | else 91 | sleep 5 92 | fi 93 | 94 | ((counter++)) 95 | done 96 | 97 | log_output_file="csi-sanity.log" 98 | if [ "$success" -ne 0 ]; then 99 | echo "csi sanity in progress, logs will be tailed to $log_output_file" 100 | oc logs csi-sanity-crc -f > $log_output_file & 101 | fi 102 | -------------------------------------------------------------------------------- /test/secrets.template.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # For any questions about this software or licensing, 17 | # please email opensource@seagate.com or cortx-questions@seagate.com. 18 | 19 | # // CSISecrets consists of secrets used in CSI credentials. 20 | # type CSISecrets struct { 21 | # CreateVolumeSecret map[string]string `yaml:"CreateVolumeSecret"` 22 | # DeleteVolumeSecret map[string]string `yaml:"DeleteVolumeSecret"` 23 | # ControllerPublishVolumeSecret map[string]string `yaml:"ControllerPublishVolumeSecret"` 24 | # ControllerUnpublishVolumeSecret map[string]string `yaml:"ControllerUnpublishVolumeSecret"` 25 | # ControllerValidateVolumeCapabilitiesSecret map[string]string `yaml:"ControllerValidateVolumeCapabilitiesSecret"` 26 | # NodeStageVolumeSecret map[string]string `yaml:"NodeStageVolumeSecret"` 27 | # NodePublishVolumeSecret map[string]string `yaml:"NodePublishVolumeSecret"` 28 | # CreateSnapshotSecret map[string]string `yaml:"CreateSnapshotSecret"` 29 | # DeleteSnapshotSecret map[string]string `yaml:"DeleteSnapshotSecret"` 30 | # ControllerExpandVolumeSecret map[string]string `yaml:"ControllerExpandVolumeSecret"` 31 | # ListSnapshotsSecret map[string]string `yaml:"ListSnapshotsSecret"` 32 | # } 33 | 34 | CreateVolumeSecret: 35 | username: $TEST_USERNAME 36 | password: $TEST_PASSWORD 37 | apiAddress: $TEST_IP 38 | DeleteVolumeSecret: 39 | username: $TEST_USERNAME 40 | password: $TEST_PASSWORD 41 | apiAddress: $TEST_IP 42 | ControllerPublishVolumeSecret: 43 | username: $TEST_USERNAME 44 | password: $TEST_PASSWORD 45 | apiAddress: $TEST_IP 46 | ControllerUnpublishVolumeSecret: 47 | username: $TEST_USERNAME 48 | password: $TEST_PASSWORD 49 | apiAddress: $TEST_IP 50 | ControllerValidateVolumeCapabilitiesSecret: 51 | username: $TEST_USERNAME 52 | password: $TEST_PASSWORD 53 | apiAddress: $TEST_IP 54 | NodeStageVolumeSecret: 55 | username: $TEST_USERNAME 56 | password: $TEST_PASSWORD 57 | apiAddress: $TEST_IP 58 | NodePublishVolumeSecret: 59 | username: $TEST_USERNAME 60 | password: $TEST_PASSWORD 61 | apiAddress: $TEST_IP 62 | CreateSnapshotSecret: 63 | username: $TEST_USERNAME 64 | password: $TEST_PASSWORD 65 | apiAddress: $TEST_IP 66 | DeleteSnapshotSecret: 67 | username: $TEST_USERNAME 68 | password: $TEST_PASSWORD 69 | apiAddress: $TEST_IP 70 | ControllerExpandVolumeSecret: 71 | username: $TEST_USERNAME 72 | password: $TEST_PASSWORD 73 | apiAddress: $TEST_IP 74 | ListSnapshotsSecret: 75 | username: $TEST_USERNAME 76 | password: $TEST_PASSWORD 77 | apiAddress: $TEST_IP 78 | -------------------------------------------------------------------------------- /pkg/common/system_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (c) 2021 Seagate Technology LLC and/or its Affiliates 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | // For any questions about this software or licensing, 17 | // please email opensource@seagate.com or cortx-questions@seagate.com. 18 | 19 | package common 20 | 21 | import ( 22 | "fmt" 23 | "testing" 24 | 25 | "github.com/container-storage-interface/spec/lib/go/csi" 26 | . "github.com/onsi/gomega" 27 | ) 28 | 29 | func init() { 30 | fmt.Printf("Test Setup:\n") 31 | fmt.Printf(" VolumeNameMaxLength = %d\n", VolumeNameMaxLength) 32 | fmt.Printf(" VolumePrefixMaxLength = %d\n", VolumePrefixMaxLength) 33 | fmt.Printf("\n") 34 | } 35 | 36 | func createRequestVolume(name string, prefix string) (csi.CreateVolumeRequest, error) { 37 | // Create a CSI CreateVolumeRequest and Response 38 | 39 | req := csi.CreateVolumeRequest{ 40 | Name: name, 41 | Parameters: map[string]string{VolumePrefixKey: prefix}, 42 | } 43 | 44 | return req, nil 45 | } 46 | 47 | func runTest(t *testing.T, idin string, idout string, prefix string) { 48 | id, err := TranslateName(idin, prefix) 49 | g := NewWithT(t) 50 | g.Expect(err).To(BeNil()) 51 | g.Expect(id).To(Equal(idout)) 52 | } 53 | 54 | func TestTranslate(t *testing.T) { 55 | 56 | // Test empty name 57 | runTest(t, "", "csi_", "csi") 58 | 59 | // Test with no prefix 60 | runTest(t, "pvc-03c551d9-7e77-43ff-993e-c2308d2f09a1", "03c551d97e7743ff993ec2308d2f09a1", "") 61 | runTest(t, "03c551d97e7743ff993ec2308d2f09a1", "03c551d97e7743ff993ec2308d2f09a1", "") 62 | runTest(t, "8d2f09a1", "8d2f09a1", "") 63 | runTest(t, "51d9-7e77-43ff-993e-c2308d2f09a1", "51d9-7e77-43ff-993e-c2308d2f09a1", "") 64 | 65 | // Test with prefix 66 | runTest(t, "pvc-03c551d9-7e77-43ff-993e-c2308d2f09a1", "csi_51d97e7743ff993ec2308d2f09a1", "csi") 67 | runTest(t, "pvc-51d9-7e77-43ff-993e-c2308d2f09a1", "csi_51d97e7743ff993ec2308d2f09a1", "csi") 68 | runTest(t, "51d9-7e77-43ff-993e-c2308d2f09a1", "csi_51d97e7743ff993ec2308d2f09a1", "csi") 69 | runTest(t, "51d97e7743ff993ec2308d2f09a1", "csi_51d97e7743ff993ec2308d2f09a1", "csi") 70 | runTest(t, "51d97e7743ff993ec2308d2f09a1", "csi_51d97e7743ff993ec2308d2f09a1", "csi_123") 71 | runTest(t, "pvc-51d9-7e77-43ff-993e-c2308d2f09a1", "cd_51d97e7743ff993ec2308d2f09a1", "cd") 72 | runTest(t, "pvc-51d9-7e77-43ff-993e-c2308d2f09a1", "c_51d97e7743ff993ec2308d2f09a1", "c") 73 | 74 | // Test with prefix 75 | runTest(t, "snapshot-03c551d9-7e77-43ff-993e-c2308d2f09a1", "csi_51d97e7743ff993ec2308d2f09a1", "csi") 76 | } 77 | 78 | func TestValidate(t *testing.T) { 79 | g := NewWithT(t) 80 | g.Expect(ValidateName("abcdefghijklmnopqrstuvwxyz")).To(BeTrue()) 81 | g.Expect(ValidateName("ABCDEFGHIJKLMNOPQRSTUVWXYZ")).To(BeTrue()) 82 | g.Expect(ValidateName("a b _ . - c")).To(BeTrue()) 83 | 84 | // Test unaccepable characters: " , < \ 85 | g.Expect(ValidateName("\"abc")).To(BeFalse()) 86 | g.Expect(ValidateName("abc,")).To(BeFalse()) 87 | g.Expect(ValidateName("abc unicode.MaxASCII { 39 | return false 40 | } 41 | } 42 | return true 43 | } 44 | 45 | // TranslateName converts the passed in volume name to the translated volume name 46 | func TranslateName(name, prefix string) (string, error) { 47 | 48 | klog.V(2).Infof("TranslateName VolumeNameMaxLength=%d name=[%d]%q prefix=[%d]%q", VolumeNameMaxLength, len(name), name, len(prefix), prefix) 49 | volumeName := name 50 | 51 | if len(prefix) == 0 { 52 | // If string is greater than max, truncate it, otherwise return original string 53 | if len(volumeName) > VolumeNameMaxLength { 54 | // Skip over 'pvc-' 55 | if len(volumeName) >= 4 && volumeName[0:4] == "pvc-" { 56 | volumeName = volumeName[4:] 57 | } 58 | // Skip over 'snapshot-' 59 | if len(volumeName) >= 9 && volumeName[0:9] == "snapshot-" { 60 | volumeName = volumeName[9:] 61 | } 62 | volumeName = strings.ReplaceAll(volumeName, "-", "") 63 | klog.V(2).Infof("volumeName=[%d]%q", len(volumeName), volumeName) 64 | if len(volumeName) > VolumeNameMaxLength { 65 | volumeName = volumeName[:VolumeNameMaxLength] 66 | } 67 | } 68 | } else { 69 | // Skip over 'pvc-' and remove all dashes 70 | uuid := volumeName 71 | if len(volumeName) >= 4 && volumeName[0:4] == "pvc-" { 72 | uuid = volumeName[4:] 73 | klog.Infof("TranslateName(pvc): uuid=%q", uuid) 74 | } 75 | if len(volumeName) >= 9 && volumeName[0:9] == "snapshot-" { 76 | uuid = volumeName[9:] 77 | klog.Infof("TranslateName(snapshot): uuid=%q", uuid) 78 | } 79 | uuid = strings.ReplaceAll(uuid, "-", "") 80 | 81 | // Verify that the prefix is the required length, and truncate as needed, add an underscore 82 | if len(prefix) > VolumePrefixMaxLength { 83 | klog.Warningf("StorageClass volPrefix will be truncated from %q to %q", prefix, prefix[:VolumePrefixMaxLength]) 84 | prefix = prefix[:VolumePrefixMaxLength] 85 | } 86 | prefix = prefix + "_" 87 | 88 | if len(prefix)+len(uuid) > VolumeNameMaxLength { 89 | truncate := VolumeNameMaxLength - len(prefix) 90 | volumeName = prefix + uuid[len(uuid)-truncate:] 91 | } else { 92 | volumeName = prefix + uuid 93 | } 94 | } 95 | 96 | klog.Infof("TranslateName %q[%d], prefix %q[%d], result %q[%d]", name, len(name), prefix, len(prefix), volumeName, len(volumeName)) 97 | 98 | return volumeName, nil 99 | } 100 | 101 | // VolumeIdGetName: Decode the augmented volume identifier and return the name only 102 | func VolumeIdGetName(volumeId string) (string, error) { 103 | tokens := strings.Split(volumeId, AugmentKey) 104 | 105 | if len(tokens) > 0 { 106 | return tokens[0], nil 107 | } else { 108 | return "", fmt.Errorf("Unable to retrieve volume name from (%s)", volumeId) 109 | } 110 | } 111 | 112 | // VolumeIdGetStorageProtocol: Decode the augmented volume identifier and return the storage protocol only 113 | func VolumeIdGetStorageProtocol(volumeId string) (string, error) { 114 | tokens := strings.Split(volumeId, AugmentKey) 115 | 116 | if len(tokens) > 1 { 117 | return tokens[1], nil 118 | } else { 119 | return "", fmt.Errorf("Unable to retrieve storage protocol from (%s)", volumeId) 120 | } 121 | } 122 | 123 | // VolumeIdGetWwn: Decode the augmented volume identifier and return the WWN 124 | func VolumeIdGetWwn(volumeId string) (string, error) { 125 | tokens := strings.Split(volumeId, AugmentKey) 126 | 127 | if len(tokens) > 2 { 128 | return tokens[2], nil 129 | } else { 130 | return "", fmt.Errorf("Unable to retrieve wwn from (%s)", volumeId) 131 | } 132 | } 133 | 134 | // VolumeIdAugment: Extend the volume name by augmenting it with storage protocol 135 | func VolumeIdAugment(volumename, storageprotocol, wwn string) string { 136 | 137 | volumeId := volumename + AugmentKey + storageprotocol + AugmentKey + wwn 138 | klog.V(2).Infof("VolumeIdAugment: %s", volumeId) 139 | return volumeId 140 | } 141 | 142 | // We use IQN for Node ID, but IQN can contain colons which are not allowed in the topology map 143 | func GetTopologyCompliantNodeID(nodeID string) string { 144 | return strings.ReplaceAll(nodeID, ":", ".") 145 | } 146 | -------------------------------------------------------------------------------- /helm/csi-charts/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | # This YAML file contains all RBAC objects that are necessary to run external 2 | # CSI provisioner. 3 | # 4 | # In production, each CSI driver deployment has to be customized: 5 | # - to avoid conflicts, use non-default namespace and different names 6 | # for non-namespaced entities like the ClusterRole 7 | # - decide whether the deployment replicates the external CSI 8 | # provisioner, in which case leadership election must be enabled; 9 | # this influences the RBAC setup, see below 10 | 11 | apiVersion: v1 12 | kind: ServiceAccount 13 | metadata: 14 | name: csi-provisioner 15 | labels: 16 | {{ include "csidriver.labels" . | indent 4 }} 17 | 18 | --- 19 | kind: ClusterRole 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: external-provisioner-runner-systems 23 | labels: 24 | {{ include "csidriver.labels" . | indent 4 }} 25 | rules: 26 | - apiGroups: [""] 27 | resources: ["secrets"] 28 | verbs: ["get", "list"] 29 | - apiGroups: [""] 30 | resources: ["persistentvolumes"] 31 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 32 | - apiGroups: [""] 33 | resources: ["persistentvolumeclaims"] 34 | verbs: ["get", "list", "watch", "update"] 35 | - apiGroups: [""] 36 | resources: ["persistentvolumeclaims/status"] 37 | verbs: ["update", "patch"] 38 | - apiGroups: ["storage.k8s.io"] 39 | resources: ["storageclasses"] 40 | verbs: ["get", "list", "watch"] 41 | - apiGroups: [""] 42 | resources: ["events"] 43 | verbs: ["list", "watch", "create", "update", "patch"] 44 | - apiGroups: ["snapshot.storage.k8s.io"] 45 | resources: ["volumesnapshots"] 46 | verbs: ["get", "list"] 47 | - apiGroups: ["snapshot.storage.k8s.io"] 48 | resources: ["volumesnapshotclasses"] 49 | verbs: ["get", "list", "watch"] 50 | - apiGroups: ["snapshot.storage.k8s.io"] 51 | resources: ["volumesnapshotcontents"] 52 | verbs: ["create", "get", "list", "watch", "update", "delete"] 53 | - apiGroups: ["snapshot.storage.k8s.io"] 54 | resources: ["volumesnapshotcontents/status"] 55 | verbs: ["update"] 56 | - apiGroups: ["storage.k8s.io"] 57 | resources: ["csinodes"] 58 | verbs: ["get", "list", "watch"] 59 | - apiGroups: [""] 60 | resources: ["nodes"] 61 | verbs: ["get", "list", "watch"] 62 | - apiGroups: ["storage.k8s.io"] 63 | resources: ["volumeattachments"] 64 | verbs: ["get", "list", "watch", "update", "patch"] 65 | - apiGroups: ["storage.k8s.io"] 66 | resources: ["volumeattachments/status"] 67 | verbs: ["get", "list", "watch", "update", "patch"] 68 | - apiGroups: [""] 69 | resources: ["pods"] 70 | verbs: ["get", "list", "watch"] 71 | 72 | --- 73 | kind: ClusterRoleBinding 74 | apiVersion: rbac.authorization.k8s.io/v1 75 | metadata: 76 | name: csi-provisioner-role-systems 77 | labels: 78 | {{ include "csidriver.labels" . | indent 4 }} 79 | subjects: 80 | - kind: ServiceAccount 81 | name: csi-provisioner 82 | namespace: {{ .Release.Namespace }} 83 | roleRef: 84 | kind: ClusterRole 85 | name: external-provisioner-runner-systems 86 | apiGroup: rbac.authorization.k8s.io 87 | 88 | --- 89 | # Provisioner must be able to work with endpoints in current namespace 90 | # if (and only if) leadership election is enabled 91 | kind: Role 92 | apiVersion: rbac.authorization.k8s.io/v1 93 | metadata: 94 | name: external-provisioner-cfg-systems 95 | labels: 96 | {{ include "csidriver.labels" . | indent 4 }} 97 | rules: 98 | # Only one of the following rules for endpoints or leases is required based on 99 | # what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases. 100 | - apiGroups: [""] 101 | resources: ["endpoints"] 102 | verbs: ["get", "watch", "list", "delete", "update", "create"] 103 | - apiGroups: ["coordination.k8s.io"] 104 | resources: ["leases"] 105 | verbs: ["get", "watch", "list", "delete", "update", "create"] 106 | {{ if .Values.pspAdmissionControllerEnabled }} 107 | - apiGroups: ["policy"] 108 | resources: ["podsecuritypolicies"] 109 | verbs: ["use"] 110 | resourceNames: 111 | - seagate-exos-x-csi 112 | {{ end }} 113 | 114 | --- 115 | kind: RoleBinding 116 | apiVersion: rbac.authorization.k8s.io/v1 117 | metadata: 118 | name: csi-provisioner-role-cfg-systems 119 | labels: 120 | {{ include "csidriver.labels" . | indent 4 }} 121 | subjects: 122 | - kind: ServiceAccount 123 | name: csi-provisioner 124 | roleRef: 125 | kind: Role 126 | name: external-provisioner-cfg-systems 127 | apiGroup: rbac.authorization.k8s.io 128 | 129 | {{ if .Values.pspAdmissionControllerEnabled }} 130 | --- 131 | apiVersion: v1 132 | kind: ServiceAccount 133 | metadata: 134 | name: csi-node-registrar 135 | labels: 136 | {{ include "csidriver.labels" . | indent 4 }} 137 | 138 | --- 139 | kind: Role 140 | apiVersion: rbac.authorization.k8s.io/v1 141 | metadata: 142 | name: csi-node-registrar-cfg-systems 143 | labels: 144 | {{ include "csidriver.labels" . | indent 4 }} 145 | rules: 146 | - apiGroups: ["policy"] 147 | resources: ["podsecuritypolicies"] 148 | verbs: ["use"] 149 | resourceNames: 150 | - systems-role 151 | 152 | --- 153 | kind: RoleBinding 154 | apiVersion: rbac.authorization.k8s.io/v1 155 | metadata: 156 | name: csi-node-registrar-role-cfg-systems 157 | labels: 158 | {{ include "csidriver.labels" . | indent 4 }} 159 | subjects: 160 | - kind: ServiceAccount 161 | name: csi-node-registrar 162 | roleRef: 163 | kind: Role 164 | name: csi-node-registrar-cfg-systems 165 | apiGroup: rbac.authorization.k8s.io 166 | {{ end }} 167 | -------------------------------------------------------------------------------- /pkg/node_service/node_servicepb/node_rpc_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | // versions: 3 | // - protoc-gen-go-grpc v1.2.0 4 | // - protoc v4.22.2 5 | // source: pkg/node_service/node_servicepb/node_rpc.proto 6 | 7 | package node_servicepb 8 | 9 | import ( 10 | context "context" 11 | grpc "google.golang.org/grpc" 12 | codes "google.golang.org/grpc/codes" 13 | status "google.golang.org/grpc/status" 14 | ) 15 | 16 | // This is a compile-time assertion to ensure that this generated file 17 | // is compatible with the grpc package it is being compiled against. 18 | // Requires gRPC-Go v1.32.0 or later. 19 | const _ = grpc.SupportPackageIsVersion7 20 | 21 | // NodeServiceClient is the client API for NodeService service. 22 | // 23 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 24 | type NodeServiceClient interface { 25 | GetInitiators(ctx context.Context, in *InitiatorRequest, opts ...grpc.CallOption) (*Initiators, error) 26 | NotifyUnmap(ctx context.Context, in *UnmappedVolume, opts ...grpc.CallOption) (*Ack, error) 27 | } 28 | 29 | type nodeServiceClient struct { 30 | cc grpc.ClientConnInterface 31 | } 32 | 33 | func NewNodeServiceClient(cc grpc.ClientConnInterface) NodeServiceClient { 34 | return &nodeServiceClient{cc} 35 | } 36 | 37 | func (c *nodeServiceClient) GetInitiators(ctx context.Context, in *InitiatorRequest, opts ...grpc.CallOption) (*Initiators, error) { 38 | out := new(Initiators) 39 | err := c.cc.Invoke(ctx, "/node_service.NodeService/GetInitiators", in, out, opts...) 40 | if err != nil { 41 | return nil, err 42 | } 43 | return out, nil 44 | } 45 | 46 | func (c *nodeServiceClient) NotifyUnmap(ctx context.Context, in *UnmappedVolume, opts ...grpc.CallOption) (*Ack, error) { 47 | out := new(Ack) 48 | err := c.cc.Invoke(ctx, "/node_service.NodeService/NotifyUnmap", in, out, opts...) 49 | if err != nil { 50 | return nil, err 51 | } 52 | return out, nil 53 | } 54 | 55 | // NodeServiceServer is the server API for NodeService service. 56 | // All implementations must embed UnimplementedNodeServiceServer 57 | // for forward compatibility 58 | type NodeServiceServer interface { 59 | GetInitiators(context.Context, *InitiatorRequest) (*Initiators, error) 60 | NotifyUnmap(context.Context, *UnmappedVolume) (*Ack, error) 61 | mustEmbedUnimplementedNodeServiceServer() 62 | } 63 | 64 | // UnimplementedNodeServiceServer must be embedded to have forward compatible implementations. 65 | type UnimplementedNodeServiceServer struct { 66 | } 67 | 68 | func (UnimplementedNodeServiceServer) GetInitiators(context.Context, *InitiatorRequest) (*Initiators, error) { 69 | return nil, status.Errorf(codes.Unimplemented, "method GetInitiators not implemented") 70 | } 71 | func (UnimplementedNodeServiceServer) NotifyUnmap(context.Context, *UnmappedVolume) (*Ack, error) { 72 | return nil, status.Errorf(codes.Unimplemented, "method NotifyUnmap not implemented") 73 | } 74 | func (UnimplementedNodeServiceServer) mustEmbedUnimplementedNodeServiceServer() {} 75 | 76 | // UnsafeNodeServiceServer may be embedded to opt out of forward compatibility for this service. 77 | // Use of this interface is not recommended, as added methods to NodeServiceServer will 78 | // result in compilation errors. 79 | type UnsafeNodeServiceServer interface { 80 | mustEmbedUnimplementedNodeServiceServer() 81 | } 82 | 83 | func RegisterNodeServiceServer(s grpc.ServiceRegistrar, srv NodeServiceServer) { 84 | s.RegisterService(&NodeService_ServiceDesc, srv) 85 | } 86 | 87 | func _NodeService_GetInitiators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 88 | in := new(InitiatorRequest) 89 | if err := dec(in); err != nil { 90 | return nil, err 91 | } 92 | if interceptor == nil { 93 | return srv.(NodeServiceServer).GetInitiators(ctx, in) 94 | } 95 | info := &grpc.UnaryServerInfo{ 96 | Server: srv, 97 | FullMethod: "/node_service.NodeService/GetInitiators", 98 | } 99 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 100 | return srv.(NodeServiceServer).GetInitiators(ctx, req.(*InitiatorRequest)) 101 | } 102 | return interceptor(ctx, in, info, handler) 103 | } 104 | 105 | func _NodeService_NotifyUnmap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 106 | in := new(UnmappedVolume) 107 | if err := dec(in); err != nil { 108 | return nil, err 109 | } 110 | if interceptor == nil { 111 | return srv.(NodeServiceServer).NotifyUnmap(ctx, in) 112 | } 113 | info := &grpc.UnaryServerInfo{ 114 | Server: srv, 115 | FullMethod: "/node_service.NodeService/NotifyUnmap", 116 | } 117 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 118 | return srv.(NodeServiceServer).NotifyUnmap(ctx, req.(*UnmappedVolume)) 119 | } 120 | return interceptor(ctx, in, info, handler) 121 | } 122 | 123 | // NodeService_ServiceDesc is the grpc.ServiceDesc for NodeService service. 124 | // It's only intended for direct use with grpc.RegisterService, 125 | // and not to be introspected or modified (even as a copy) 126 | var NodeService_ServiceDesc = grpc.ServiceDesc{ 127 | ServiceName: "node_service.NodeService", 128 | HandlerType: (*NodeServiceServer)(nil), 129 | Methods: []grpc.MethodDesc{ 130 | { 131 | MethodName: "GetInitiators", 132 | Handler: _NodeService_GetInitiators_Handler, 133 | }, 134 | { 135 | MethodName: "NotifyUnmap", 136 | Handler: _NodeService_NotifyUnmap_Handler, 137 | }, 138 | }, 139 | Streams: []grpc.StreamDesc{}, 140 | Metadata: "pkg/node_service/node_servicepb/node_rpc.proto", 141 | } 142 | -------------------------------------------------------------------------------- /example/testpod-start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source common.sh 4 | 5 | helmpath=/home/seagate/github.com/Seagate/seagate-exos-x-csi/helm/csi-charts/ 6 | 7 | # Make sure the helm directory exists 8 | if [ ! -d "$helmpath" ] 9 | then 10 | echo "" 11 | echo "ERROR: Helm path DOES NOT exist, helmpath=$helmpath" 12 | echo "NOTE: Update this script with the correct helm path." 13 | echo "" 14 | exit 1 15 | fi 16 | 17 | if [ -z ${1+x} ]; then 18 | echo "" 19 | echo "Usage: testpod-start [target] [image:version]"; 20 | echo "Where:" 21 | echo " [target] - specifies a string used to install and run a particular test pod configuration." 22 | echo " [image:version] - specify a registry image for the csi driver image, overrides values.yaml settings" 23 | echo "" 24 | echo "Helm Path: $helmpath" 25 | echo "" 26 | echo "Example: testpod-start system1" 27 | echo "Example: testpod-start system1 docker.io/seagatecsi/seagate-exos-x-csi:v0.5.1" 28 | echo "" 29 | echo "Steps:" 30 | echo " 1) helm install test-release $helmpath -f $helmpath/values.yaml" 31 | echo " 2) kubectl create -f secret-system1.yaml" 32 | echo " 3) kubectl create -f storageclass-system1.yaml" 33 | echo " 4) kubectl create -f testpod-system1.yaml" 34 | echo "" 35 | exit 36 | else 37 | system=$1 38 | fi 39 | 40 | if [ -z "$2" ]; then 41 | registry="default" 42 | else 43 | registry=$2 44 | arrIN=(${registry//:/ }) 45 | image=${arrIN[0]} 46 | version=${arrIN[1]} 47 | fi 48 | 49 | setNamespace 50 | 51 | echo "[] testpod-start ($system) [$registry] [namespace=$namespace]" 52 | 53 | # 54 | # 1) Run helm install using local charts 55 | # 56 | if [ "$registry" == "default" ]; then 57 | banner "1) Run helm install using ($helmpath)" 58 | runCommand "helm install test-release --namespace $namespace $helmpath -f $helmpath/values.yaml" 59 | else 60 | banner "1) Run helm install using ($helmpath) --set image.repository=$image --set image.tag=$version" 61 | runCommand "helm install test-release --namespace $namespace $helmpath -f $helmpath/values.yaml --set image.repository=$image --set image.tag=$version" 62 | fi 63 | 64 | # Verify that the controller and node pods are running 65 | controllerid=$(kubectl get pods --namespace $namespace | grep controller | awk '{print $1}') 66 | nodeid=$(kubectl get pods --namespace $namespace | grep node | awk '{print $1}') 67 | 68 | counter=1 69 | success=0 70 | continue=1 71 | maxattempts=6 72 | 73 | while [ $continue ] 74 | do 75 | echo "" 76 | echo "[$counter] Verify ($controllerid) and ($nodeid) are Running" 77 | 78 | runCommand "kubectl get pods -o wide --namespace $namespace" 79 | controllerstatus=$(kubectl get pods $controllerid --namespace $namespace | grep $controllerid | awk '{print $3}') 80 | nodestatus=$(kubectl get pods $nodeid --namespace $namespace | grep $nodeid | awk '{print $3}') 81 | 82 | if [ "$controllerstatus" == "Running" ] && [ "$nodestatus" == "Running" ]; then 83 | echo "SUCCESS: ($controllerid) and ($nodeid) are Running" 84 | success=1 85 | break 86 | fi 87 | 88 | if [[ "$counter" -eq $maxattempts ]]; then 89 | echo "" 90 | echo "ERROR: Max attempts ($maxattempts) reached and pods are not running." 91 | echo "" 92 | break 93 | else 94 | sleep 5 95 | fi 96 | 97 | ((counter++)) 98 | done 99 | 100 | if [ "$success" -eq 0 ]; then 101 | exit 102 | fi 103 | 104 | # 105 | # 2) Create secrets for the CSI Driver 106 | # 107 | secret=seagate-exos-x-csi-secrets 108 | 109 | banner "2) kubectl create -f secret-$system.yaml --namespace $namespace" 110 | runCommand "kubectl create -f secret-$system.yaml --namespace $namespace" 111 | runCommand "kubectl describe secrets $secret --namespace $namespace" 112 | 113 | if [[ "$?" -ne 0 ]]; then 114 | echo "" 115 | echo "ERROR: Secret ($secret) was NOT created successfully." 116 | echo "" 117 | exit 118 | fi 119 | 120 | # 121 | # 3) Create the Storage Class 122 | # 123 | storageclass=systems-storageclass 124 | 125 | banner "3) kubectl create -f storageclass-$system.yaml --namespace $namespace" 126 | runCommand "kubectl create -f storageclass-$system.yaml --namespace $namespace" 127 | runCommand "kubectl describe sc $storageclass --namespace $namespace" 128 | 129 | if [[ "$?" -eq 1 ]]; then 130 | echo "" 131 | echo "ERROR: StorageClass ($storageclass) was NOT created successfully." 132 | echo "" 133 | exit 134 | fi 135 | 136 | # 137 | # 4) Create the test pod 138 | # 139 | testpod=test-pod 140 | 141 | banner "4) kubectl create -f testpod-$system.yaml --namespace $namespace" 142 | runCommand "kubectl create -f testpod-$system.yaml --namespace $namespace" 143 | 144 | counter=1 145 | success=0 146 | continue=1 147 | maxattempts=8 148 | 149 | while [ $continue ] 150 | do 151 | testpodstatus=$(kubectl get pods --namespace $namespace | grep $testpod | awk '{print $3}') 152 | testpodname=$(kubectl get pods --namespace $namespace | grep $testpod | awk '{print $1}') 153 | 154 | echo "" 155 | echo "[$counter] Verify ($testpodname) is Running" 156 | 157 | runCommand "kubectl get pods -o wide --namespace $namespace" 158 | 159 | if [ "$testpodstatus" == "Running" ]; then 160 | echo "SUCCESS: ($testpodname) is Running" 161 | success=1 162 | break 163 | fi 164 | 165 | if [[ "$counter" -eq $maxattempts ]]; then 166 | echo "" 167 | echo "ERROR: Max attempts ($maxattempts) reached and ($testpodname) is NOT running." 168 | echo "" 169 | break 170 | else 171 | sleep 20 172 | fi 173 | 174 | ((counter++)) 175 | done 176 | 177 | if [[ "$success" -eq 0 ]]; then 178 | exit 179 | fi 180 | 181 | banner "SUCCESS: All steps succeeded" 182 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Seagate CSI dynamic provisioner for Kubernetes 2 | 3 | The Seagate Exos X CSI Driver supports the following storage arrays 4 | 5 | - Seagate Exos X and AssuredSAN (4006/5005/4005/3005) 6 | - Dell PowerVault ME4 and ME5 Series 7 | 8 | iSCSI, SAS, and FC host interfaces are supported for both block and filesystem mount types 9 | 10 | [![Go Report Card](https://goreportcard.com/badge/github.com/Seagate/seagate-exos-x-csi)](https://goreportcard.com/report/github.com/Seagate/seagate-exos-x-csi) 11 | 12 | ## Introduction 13 | 14 | The Seagate Exos X CSI Driver helps storage admins efficiently manage 15 | their storage within container platforms that support the CSI 16 | standard. Dealing with persistent storage on Kubernetes can be 17 | particularly cumbersome, especially when dealing with on-premises 18 | installations, or when the cloud-provider persistent storage solutions 19 | are not applicable. The Seagate CSI Driver is a direct result of 20 | customer demand to bring the ease of use of Seagate Exos X to DevOps 21 | practices, and demonstrates Seagate’s continued commitment to the 22 | Kubernetes ecosystem 23 | 24 | More information about Seagate Data Storage Systems can be found 25 | [online](https://www.seagate.com/products/storage/data-storage-systems/) 26 | 27 | ## This project 28 | 29 | This project implements the **Container Storage Interface** in order to facilitate dynamic provisioning of persistent volumes on a Kubernetes cluster. 30 | 31 | This CSI driver is an open-source project under the Apache 2.0 [license](./LICENSE). 32 | 33 | ## Key Features 34 | - Manage persistent volumes on Exos X enclosures 35 | - Control multiple Exos X systems within a single Kubernetes cluster 36 | - Manage Exos X snapshots and clones, including restoring from snapshots 37 | - Clone, extend and manage persistent volumes created outside of the Exos CSI Driver 38 | - Collect usage and performance metrics for CSI driver usage and expose them via an open-source systems monitoring and alerting toolkit, such as Prometheus 39 | 40 | ## Installation 41 | 42 | ### Install iSCSI tools and multipath driver on your nodes 43 | 44 | `iscsid` and `multipathd` must be installed on every node. Check the 45 | installation method appropriate for your Linux distribution. The 46 | example below shows steps for Ubuntu Server but the process will be 47 | very similar for other GNU/Linux distributions. 48 | 49 | #### Ubuntu Installation procedure 50 | - Remove any containers that were running an earlier version of the Seagate Exos X CSI Driver. 51 | - Install required packages: 52 | 53 | ``` 54 | sudo apt update && sudo apt install open-iscsi scsitools multipath-tools -y 55 | ``` 56 | - Determine if any packages are required for your filesystem (ext3/ext4/xfs) choice and view current support: 57 | 58 | ``` 59 | cat /proc/filesystems 60 | ``` 61 | - Update /etc/multipath.conf. Check docs/iscsi/multipath.conf as a reference. In particular ensure your configuration includes these settings: 62 | ``` 63 | find_multipaths "greedy" 64 | user_friendly_names "no" 65 | ``` 66 | 67 | - Restart `multipathd`: 68 | 69 | ``` 70 | service multipath-tools restart 71 | ``` 72 | 73 | ### Deploy the provisioner to your kubernetes cluster 74 | 75 | These examples assume you have already installed the [helm]() command. 76 | 77 | The easiest method for installing the driver is to use Helm to install 78 | the helm package from 79 | [Github](https://github.com/seagate/seagate-exos-x-csi/releases). On 80 | the Releases page, right-click on the Helm Package and select "Copy 81 | Link Address". Choose a namespace in which to run 82 | the driver (in this example, _seagate_), and a name for the 83 | application (_exos-x-csi_) and then paste the link the onto the end of 84 | this command. For example: 85 | ``` 86 | helm install --create-namespace -n seagate exos-x-csi 87 | ``` 88 | 89 | Alternately, you can download and unpack the [helm 90 | package](https://github.com/Seagate/seagate-exos-x-csi/releases/download/v1.6.3/seagate-exos-x-csi-1.6.3.tgz) 91 | and extract it: 92 | ``` 93 | wget https://github.com/Seagate/seagate-exos-x-csi/releases/download/v1.6.3/seagate-exos-x-csi-1.6.3.tgz 94 | tar xpzf seagate-exos-x-csi-1.6.3.tgz 95 | helm install --create-namespace -n seagate exos-x-csi seagate-exos-x-csi 96 | ``` 97 | or clone the Github repository and install from the helm/csi-charts folder: 98 | 99 | ``` 100 | git clone https://github.com/Seagate/seagate-exos-x-csi 101 | cd seagate-exos-x-csi 102 | helm install exos-x-csi -n seagate --create-namespace \ 103 | helm/csi-charts -f helm/csi-charts/values.yaml 104 | ``` 105 | 106 | #### To deploy the provisioner to OpenShift cluster, run the following commands prior to using Helm: 107 | ``` 108 | oc create -f scc/exos-x-csi-access-scc.yaml --as system:admin 109 | oc adm policy add-scc-to-user exos-x-csi-access -z default -n NAMESPACE 110 | oc adm policy add-scc-to-user exos-x-csi-access -z csi-provisioner -n NAMESPACE 111 | ``` 112 | 113 | #### Configure your release 114 | 115 | - Update `helm/csi-charts/values.yaml` to match your configuration settings. 116 | - Update `example/secret-example1.yaml` with your storage controller credentials. Use `example/secret-example2-CHAP.yaml` if you wish to specify CHAP credentials as well. 117 | - Update `example/storageclass-example1.yaml` with your storage controller values. Use `example/storageclass-example2-CHAP.yaml` if you are using CHAP authentication 118 | - Update `example/testpod-example1.yaml` with any of you new values. 119 | 120 | ## Documentation 121 | 122 | You can find more documentation in the [docs](./docs) directory. 123 | Check docs/Seagate_Exos_X_CSI_driver_functionality.ipynb for usage examples and configuration files. 124 | 125 | ## Command-line arguments 126 | 127 | You can have a list of all available command line flags using the `-help` switch. 128 | 129 | ### Logging 130 | 131 | Logging can be modified using the `-v` flag : 132 | 133 | - `-v 0` : Standard logs to follow what's going on (default if not specified) 134 | - `-v 9` : Debug logs (quite awful to see) 135 | 136 | For advanced logging configuration, see [klog](https://github.com/kubernetes/klog). 137 | 138 | ### Development 139 | 140 | You can start the drivers over TCP so your remote dev cluster can connect to them. 141 | 142 | ``` 143 | go run ./cmd/ -bind=tcp://0.0.0.0:10000 144 | ``` 145 | 146 | ## Testing 147 | 148 | You can run sanity checks by using the `sanity` helper script in the `test/` directory: 149 | 150 | ``` 151 | ./test/sanity 152 | ``` 153 | -------------------------------------------------------------------------------- /pkg/controller/snapshotter.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "math" 8 | "strconv" 9 | 10 | storageapitypes "github.com/Seagate/seagate-exos-x-api-go/v2/pkg/common" 11 | "github.com/Seagate/seagate-exos-x-csi/pkg/common" 12 | "github.com/container-storage-interface/spec/lib/go/csi" 13 | "google.golang.org/grpc/codes" 14 | "google.golang.org/grpc/status" 15 | "k8s.io/klog/v2" 16 | ) 17 | 18 | // CreateSnapshot creates a snapshot of the given volume 19 | func (controller *Controller) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { 20 | 21 | parameters := req.GetParameters() 22 | snapshotName, err := common.TranslateName(req.GetName(), parameters[common.VolumePrefixKey]) 23 | if err != nil { 24 | return nil, status.Error(codes.InvalidArgument, "translate snapshot name contains invalid characters") 25 | } 26 | 27 | if common.ValidateName(snapshotName) == false { 28 | return nil, status.Error(codes.InvalidArgument, "snapshot name contains invalid characters") 29 | } 30 | 31 | sourceVolumeId, err := common.VolumeIdGetName(req.GetSourceVolumeId()) 32 | if sourceVolumeId == "" || err != nil { 33 | return nil, status.Error(codes.InvalidArgument, "snapshot SourceVolumeId is not valid") 34 | } 35 | 36 | respStatus, err := controller.client.CreateSnapshot(sourceVolumeId, snapshotName) 37 | if err != nil && respStatus.ReturnCode != storageapitypes.SnapshotAlreadyExists { 38 | return nil, err 39 | } 40 | 41 | // The expectation is that show snapshots will return a single array item for the snapshot created 42 | snapshots, _, err := controller.client.ShowSnapshots(snapshotName, "") 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | var snapshot *csi.Snapshot 48 | for _, ss := range snapshots { 49 | if ss.ObjectName != "snapshot" { 50 | continue 51 | } 52 | 53 | snapshot, err = newSnapshotFromResponse(&ss) 54 | if err != nil { 55 | return nil, err 56 | } 57 | } 58 | 59 | if snapshot == nil { 60 | return nil, errors.New("snapshot not found") 61 | } 62 | 63 | if snapshot.SourceVolumeId != sourceVolumeId { 64 | return nil, status.Error(codes.AlreadyExists, "cannot validate volume with empty ID") 65 | } 66 | 67 | return &csi.CreateSnapshotResponse{Snapshot: snapshot}, nil 68 | } 69 | 70 | // DeleteSnapshot deletes a snapshot of the given volume 71 | func (controller *Controller) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { 72 | 73 | if req.SnapshotId == "" { 74 | return nil, status.Error(codes.InvalidArgument, "DeleteSnapshot snapshot id is required") 75 | } 76 | 77 | status, err := controller.client.DeleteSnapshot(req.SnapshotId) 78 | if err != nil { 79 | if status != nil && status.ReturnCode == storageapitypes.SnapshotNotFoundErrorCode { 80 | klog.Infof("snapshot %s does not exist, assuming it has already been deleted", req.SnapshotId) 81 | return &csi.DeleteSnapshotResponse{}, nil 82 | } 83 | return nil, err 84 | } 85 | return &csi.DeleteSnapshotResponse{}, nil 86 | } 87 | 88 | // ListSnapshots: list existing snapshots up to MaxEntries 89 | func (controller *Controller) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { 90 | sourceVolumeId, err := common.VolumeIdGetName(req.GetSourceVolumeId()) 91 | 92 | response, respStatus, err := controller.client.ShowSnapshots(req.SnapshotId, sourceVolumeId) 93 | // BadInputParam is returned from the controller when an invalid volume is specified, 94 | // so return an empty response object in this case 95 | if err != nil { 96 | if respStatus.ReturnCode == storageapitypes.BadInputParam { 97 | return &csi.ListSnapshotsResponse{ 98 | Entries: []*csi.ListSnapshotsResponse_Entry{}, 99 | NextToken: "", 100 | }, nil 101 | } else { 102 | return nil, err 103 | } 104 | } 105 | 106 | // StartingToken is an index from 1 to maximum, "" returns 0 107 | startingToken, err := strconv.Atoi(req.StartingToken) 108 | klog.V(2).Infof("ListSnapshots: MaxEntries=%v, StartingToken=%q|%d", req.MaxEntries, req.StartingToken, startingToken) 109 | 110 | snapshots := []*csi.ListSnapshotsResponse_Entry{} 111 | var count, total, next int32 = 0, 0, math.MaxInt32 112 | 113 | for _, object := range response { 114 | 115 | // Convert raw object into csi.Snapshot object 116 | snapshot, err := newSnapshotFromResponse(&object) 117 | 118 | // Only store snapshot objects 119 | if err == nil { 120 | total++ 121 | klog.V(2).Infof("snapshot[%d]: SnapshotId=%v, SourceVolumeId=%v", total, snapshot.SnapshotId, snapshot.SourceVolumeId) 122 | 123 | // Filter entries if StartingToken is provided 124 | if (req.StartingToken == "") || (req.StartingToken != "" && total >= int32(startingToken)) { 125 | 126 | // Only add entries up to the maximum 127 | if (req.MaxEntries == 0) || (count < req.MaxEntries) { 128 | snapshots = append(snapshots, &csi.ListSnapshotsResponse_Entry{Snapshot: snapshot}) 129 | count++ 130 | klog.V(2).Infof(" added[%d]: SnapshotId=%v, SourceVolumeId=%v", count, snapshot.SnapshotId, snapshot.SourceVolumeId) 131 | } 132 | // When needed, store the next index which is returned to the caller 133 | if (req.MaxEntries != 0) && (count == req.MaxEntries) && (next == math.MaxInt32) { 134 | next = total + 1 135 | klog.V(2).Infof("next=%v", next) 136 | } 137 | } 138 | } 139 | } 140 | 141 | klog.V(2).Infof("ListSnapshots[%d]: %v", count, snapshots) 142 | 143 | // Mark the next token if there are snapshot entries remaining 144 | nextToken := "" 145 | if (req.MaxEntries != 0) && (next <= total) { 146 | nextToken = strconv.FormatInt(int64(next), 10) 147 | klog.V(2).Infof("next=%v, nextToken=%q", next, nextToken) 148 | } 149 | 150 | return &csi.ListSnapshotsResponse{ 151 | Entries: snapshots, 152 | NextToken: nextToken, 153 | }, nil 154 | } 155 | 156 | func newSnapshotFromResponse(snapshot *storageapitypes.SnapshotObject) (*csi.Snapshot, error) { 157 | if snapshot.ObjectName != "snapshot" { 158 | return nil, fmt.Errorf("not a snapshot object, type is %v", snapshot.ObjectName) 159 | } 160 | 161 | klog.InfoS("csi snapshot info", "snapshot", snapshot.Name, "volume", snapshot.MasterVolumeName, "creationTime", snapshot.CreationTime) 162 | 163 | return &csi.Snapshot{ 164 | SizeBytes: snapshot.TotalSizeNumeric, 165 | SnapshotId: snapshot.Name, 166 | SourceVolumeId: snapshot.MasterVolumeName, 167 | CreationTime: snapshot.CreationTime, 168 | ReadyToUse: true, 169 | }, nil 170 | } 171 | -------------------------------------------------------------------------------- /pkg/common/driver.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "runtime" 7 | "strings" 8 | "sync" 9 | "syscall" 10 | "time" 11 | 12 | "github.com/Seagate/seagate-exos-x-csi/pkg/exporter" 13 | "github.com/container-storage-interface/spec/lib/go/csi" 14 | "github.com/google/uuid" 15 | grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" 16 | "github.com/prometheus/client_golang/prometheus" 17 | "google.golang.org/grpc" 18 | "k8s.io/klog/v2" 19 | ) 20 | 21 | // PluginName is the public name to be used in storage class etc. 22 | const PluginName = "csi-exos-x.seagate.com" 23 | 24 | // Configuration constants 25 | const ( 26 | AugmentKey = "##" 27 | FsTypeConfigKey = "fsType" 28 | PoolConfigKey = "pool" 29 | APIAddressConfigKey = "apiAddress" 30 | APIAddressBConfigKey = "apiAddressB" 31 | UsernameSecretKey = "username" 32 | PasswordSecretKey = "password" 33 | CHAPUsernameKey = "CHAPusername" 34 | CHAPSecretKey = "CHAPpassword" 35 | CHAPUsernameInKey = "CHAPusernameIn" 36 | CHAPPasswordInKey = "CHAPpasswordIn" 37 | StorageClassAnnotationKey = "storageClass" 38 | VolumePrefixKey = "volPrefix" 39 | WWNs = "wwns" 40 | StorageProtocolKey = "storageProtocol" 41 | StorageProtocolISCSI = "iscsi" 42 | StorageProtocolFC = "fc" 43 | StorageProtocolSAS = "sas" 44 | TopologyInitiatorPrefix = "com.seagate-exos-x-csi" 45 | TopologySASInitiatorLabel = "sas-address" 46 | TopologyFCInitiatorLabel = "fc-address" 47 | TopologyNodeIdentifier = "node-id" 48 | TopologyNodeIDKey = TopologyInitiatorPrefix + "/" + TopologyNodeIdentifier 49 | 50 | MaximumLUN = 255 51 | VolumeNameMaxLength = 31 52 | VolumePrefixMaxLength = 3 53 | 54 | //If changed, must also be updated in helm charts 55 | NodeIPEnvVar = "CSI_NODE_IP" 56 | NodeNameEnvVar = "CSI_NODE_NAME" 57 | NodeServicePortEnvVar = "CSI_NODE_SERVICE_PORT" 58 | ) 59 | 60 | var SupportedAccessModes = [2]csi.VolumeCapability_AccessMode_Mode{ 61 | csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, 62 | csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, 63 | } 64 | 65 | // Driver contains main resources needed by the driver and references the underlying specific driver 66 | type Driver struct { 67 | Server *grpc.Server 68 | 69 | socket net.Listener 70 | exporter *exporter.Exporter 71 | } 72 | 73 | // WithSecrets is an interface for structs with secrets 74 | type WithSecrets interface { 75 | GetSecrets() map[string]string 76 | } 77 | 78 | // WithParameters is an interface for structs with parameters 79 | type WithParameters interface { 80 | GetParameters() map[string]string 81 | } 82 | 83 | // WithVolumeCaps is an interface for structs with volume capabilities 84 | type WithVolumeCaps interface { 85 | GetVolumeCapabilities() *[]*csi.VolumeCapability 86 | } 87 | 88 | // NewDriver is a convenience function for creating an abstract driver 89 | func NewDriver(collectors ...prometheus.Collector) *Driver { 90 | exporter := exporter.New(9842) 91 | 92 | for _, collector := range collectors { 93 | exporter.RegisterCollector(collector) 94 | } 95 | 96 | return &Driver{exporter: exporter} 97 | } 98 | 99 | var routineTimers = map[string]time.Time{} 100 | 101 | func (driver *Driver) InitServer(unaryServerInterceptors ...grpc.UnaryServerInterceptor) { 102 | interceptors := append([]grpc.UnaryServerInterceptor{ 103 | func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { 104 | start := time.Now() 105 | resp, err := handler(ctx, req) 106 | driver.exporter.Collector.IncCSIRPCCall(info.FullMethod, err == nil) 107 | driver.exporter.Collector.AddCSIRPCCallDuration(info.FullMethod, time.Since(start)) 108 | return resp, err 109 | }, 110 | }, unaryServerInterceptors...) 111 | 112 | driver.Server = grpc.NewServer( 113 | grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(interceptors...)), 114 | ) 115 | } 116 | 117 | var routineDepth = 0 118 | var mu sync.Mutex 119 | var useMutex = false 120 | 121 | func NewLogRoutineServerInterceptor(shouldLogRoutine func(string) bool) grpc.UnaryServerInterceptor { 122 | return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { 123 | if shouldLogRoutine(info.FullMethod) { 124 | uuid := uuid.New().String() 125 | shortuuid := uuid[strings.LastIndex(uuid, "-")+1:] 126 | klog.Infof("=== [ROUTINE REQUEST] [%d] %s (%s) <0s> ===", routineDepth, info.FullMethod, shortuuid) 127 | routineTimers[shortuuid] = time.Now() 128 | if useMutex { 129 | mu.Lock() 130 | } 131 | routineDepth++ 132 | duration := time.Since(routineTimers[shortuuid]) 133 | klog.Infof("=== [ROUTINE START] [%d] %s (%s) <%s> ===", routineDepth, info.FullMethod, shortuuid, duration) 134 | defer func() { 135 | routineDepth-- 136 | duration := time.Since(routineTimers[shortuuid]) 137 | klog.Infof("=== [ROUTINE END] [%d] %s (%s) <%s> ===", routineDepth, info.FullMethod, shortuuid, duration) 138 | delete(routineTimers, shortuuid) 139 | if useMutex { 140 | mu.Unlock() 141 | } 142 | }() 143 | } 144 | 145 | result, err := handler(ctx, req) 146 | if err != nil { 147 | klog.Error(err) 148 | } 149 | 150 | return result, err 151 | } 152 | } 153 | 154 | // Start does the boilerplate stuff for starting the driver 155 | // it loads its configuration from cli flags 156 | func (driver *Driver) Start(bind string) { 157 | 158 | var ll klog.Level = 0 159 | for i := 0; i < 10; i++ { 160 | if klog.V(klog.Level(i)).Enabled() { 161 | ll = klog.Level(i) 162 | } else { 163 | break 164 | } 165 | } 166 | 167 | klog.Infof("starting driver on %s (%s) [level %d]\n\n", runtime.GOOS, runtime.GOARCH, ll) 168 | 169 | parts := strings.Split(bind, "://") 170 | if len(parts) < 2 { 171 | klog.Fatal("please specify a protocol in your bind URI (e.g. \"tcp://\")") 172 | } 173 | 174 | if parts[0][:4] == "unix" { 175 | syscall.Unlink(parts[1]) 176 | } 177 | socket, err := net.Listen(parts[0], parts[1]) 178 | if err != nil { 179 | klog.Fatal(err) 180 | } 181 | driver.socket = socket 182 | 183 | go func() { 184 | driver.exporter.ListenAndServe() 185 | }() 186 | 187 | klog.Infof("driver listening on %s\n\n", bind) 188 | driver.Server.Serve(socket) 189 | } 190 | 191 | // Stop shuts down the driver 192 | func (driver *Driver) Stop() { 193 | klog.Info("gracefully stopping...") 194 | driver.Server.GracefulStop() 195 | driver.socket.Close() 196 | driver.exporter.Shutdown() 197 | } 198 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help all bin controller node test openshift push clean 2 | 3 | VENDOR := seagate 4 | GITHUB_ORG := Seagate 5 | # Project name, without vendor 6 | NAME := exos-x-csi 7 | # Project name, including vendor 8 | PROJECT := $(VENDOR)-$(NAME) 9 | GITHUB_URL := github.com/$(GITHUB_ORG)/$(PROJECT) 10 | NAMESPACE := $(VENDOR) 11 | 12 | ifdef DOCKER_HUB_REPOSITORY 13 | DOCKER_HUB_REPOSITORY := $(DOCKER_HUB_REPOSITORY) 14 | else 15 | DOCKER_HUB_REPOSITORY := ghcr.io/seagate 16 | endif 17 | 18 | # Note: the version number takes the form "v1.2.3" when used as a repository tag, but 19 | # appears as "1.2.3" in other contexts such as the Helm chart. 20 | ifdef VERSION 21 | VERSION := $(VERSION) 22 | else 23 | VERSION := v1.10.0 24 | endif 25 | HELM_VERSION := $(subst v,,$(VERSION)) 26 | VERSION_FLAG = -X $(GITHUB_URL)/pkg/common.Version=$(VERSION) 27 | 28 | ifndef BIN 29 | BIN = $(PROJECT) 30 | endif 31 | 32 | # $HELM_KEY must be the name of a secret key in the invoker's default keyring if package is to be signed 33 | HELM_KEY := css-host-software 34 | ifneq (,$(HELM_KEY)) 35 | HELM_KEYRING := ~/.gnupg/secring.gpg 36 | HELM_SIGN := --sign --key $(HELM_KEY) --keyring $(HELM_KEYRING) 37 | endif 38 | HELM_PACKAGE := $(BIN)-$(HELM_VERSION).tgz 39 | HELM_IMAGE_REPO := $(DOCKER_HUB_REPOSITORY)/$(BIN) 40 | IMAGE = $(DOCKER_HUB_REPOSITORY)/$(BIN):$(VERSION) 41 | 42 | help: 43 | @echo "" 44 | @echo "Build Targets:" 45 | @echo "-----------------------------------------------------------------------------------" 46 | @echo "make all - clean, build openshift docker image, push to registry" 47 | @echo "make bin - create controller and node driver binaries" 48 | @echo "make clean - remove '$(BIN)-controller' and '$(BIN)-node'" 49 | @echo "make controller - create controller driver image ($(BIN)-controller)" 50 | @echo "make helm-package - create signed helm package using HELM_VERSION, HELM_KEY environment variables" 51 | @echo "make node - create node driver image ($(BIN)-node)" 52 | @echo "make openshift - Create OpenShift-certification candidate image ($(IMAGE))" 53 | @echo "make push - push the docker image to '$(DOCKER_HUB_REPOSITORY)'" 54 | @echo "make test - build test/sanity" 55 | @echo "" 56 | 57 | all: clean openshift push 58 | 59 | bin: controller node 60 | 61 | 62 | protoc: 63 | @echo "" 64 | @echo "[] protocol buffers" 65 | protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative ./pkg/node_service/node_servicepb/node_rpc.proto 66 | 67 | controller: 68 | @echo "" 69 | @echo "[] controller" 70 | go build -v -ldflags "$(VERSION_FLAG)" -o $(BIN)-controller ./cmd/controller 71 | 72 | node: 73 | @echo "" 74 | @echo "[] node" 75 | go build -v -ldflags "$(VERSION_FLAG)" -o $(BIN)-node ./cmd/node 76 | 77 | test: 78 | @echo "" 79 | @echo "[] test" 80 | ./test/sanity 81 | 82 | push: 83 | @echo "" 84 | @echo "[] push" 85 | docker push $(IMAGE) 86 | 87 | pull: 88 | @echo "" 89 | @echo "[] pull" 90 | docker pull $(IMAGE) 91 | 92 | clean: 93 | @echo "" 94 | @echo "[] clean" 95 | rm -vf $(BIN)-controller $(BIN)-node *.zip *.tgz *.prov helm/$(BIN)-$(HELM_VERSION)* 96 | 97 | ######################## Openshift certification stuff ######################## 98 | 99 | openshift: 100 | @echo "" 101 | @echo "[] openshift" 102 | sed < Dockerfile.redhat > Dockerfile.tmp \ 103 | -e 's/^ARG version=.*/ARG version=$(VERSION)/' \ 104 | -e 's/^ARG vcs_ref=.*/ARG vcs_ref=$(strip $(shell git rev-parse HEAD))/' \ 105 | -e 's/^ARG build_date=.*/ARG build_date=$(strip $(shell date --utc -Iseconds))/' 106 | cmp Dockerfile.redhat Dockerfile.tmp && rm Dockerfile.tmp || mv Dockerfile.tmp Dockerfile.redhat 107 | docker build -f Dockerfile.redhat -t $(IMAGE) . 108 | docker inspect $(IMAGE) 109 | 110 | # Makefile.secrets should include the following lines: 111 | # 112 | # PYXIS_API_TOKEN= 113 | # REGISTRY_KEY= (see ) 114 | # 115 | # For more info, see the CSI Driver "Re-certifying" OneNote page, or 116 | # https://connect.redhat.com/account/api-keys?extIdCarryOver=true&sc_cid=701f2000001OH7JAAW 117 | # https://connect.redhat.com/projects/610494ea40182fa9651cdab0/setup-preflight 118 | # 119 | # Make sure this file does not get checked in to git! Note that for automation purposes you can 120 | # just pass these variables in the environment instead of a file. 121 | -include Makefile.secrets 122 | 123 | PREFLIGHT=../openshift-preflight/preflight-linux-amd64 # Path to the preflight executable 124 | PREFLIGHT_REGISTRY=localhost:5000 125 | PREFLIGHT_IMAGE=$(PREFLIGHT_REGISTRY)/$(BIN):$(VERSION) 126 | REDHAT_PROJECT_ID=610494ea40182fa9651cdab0 127 | REDHAT_IMAGE_BASE=quay.io/redhat-isv-containers/$(REDHAT_PROJECT_ID) 128 | REDHAT_IMAGE=$(REDHAT_IMAGE_BASE):$(VERSION) 129 | REDHAT_IMAGE_LATEST=$(REDHAT_IMAGE_BASE):latest 130 | PREFLIGHT_OPTIONS= 131 | PREFLIGHT_SUBMIT= 132 | PFLT_DOCKERCONFIG=.preflight-auth.json 133 | 134 | preflight: $(PFLT_DOCKERCONFIG) 135 | -docker start registry || -docker run -d -p 5000:5000 --name registry registry:2 # make sure local registry is running 136 | docker tag $(IMAGE) $(PREFLIGHT_IMAGE) 137 | docker push $(PREFLIGHT_IMAGE) 138 | PFLT_DOCKERCONFIG=$(PFLT_DOCKERCONFIG) $(PREFLIGHT) check container $(PREFLIGHT_SUBMIT) $(PREFLIGHT_OPTIONS) $(PREFLIGHT_IMAGE) 139 | 140 | preflight-submit: .preflight-auth.json 141 | $(MAKE) preflight PREFLIGHT_SUBMIT="--submit" \ 142 | PREFLIGHT_OPTIONS="--pyxis-api-token=$(PYXIS_API_TOKEN) --certification-project-id=$(REDHAT_PROJECT_ID)" \ 143 | PREFLIGHT_IMAGE=$(REDHAT_IMAGE) PFLT_DOCKERCONFIG=$(PFLT_DOCKERCONFIG) 144 | 145 | tag-latest: 146 | podman tag $(REDHAT_IMAGE) $(REDHAT_IMAGE_LATEST) 147 | podman push $(REDHAT_IMAGE_LATEST) 148 | 149 | preflight-push: 150 | podman login -u redhat-isv-containers+$(REDHAT_PROJECT_ID)-robot -p $(REGISTRY_KEY) quay.io 151 | podman tag $(IMAGE) $(REDHAT_IMAGE) 152 | podman push $(REDHAT_IMAGE) 153 | 154 | .preflight-auth.json: 155 | podman login -u redhat-isv-containers+610494ea40182fa9651cdab0-robot -p $(REGISTRY_KEY) --authfile "$@" quay.io 156 | 157 | ######################## Helm package creation ######################## 158 | 159 | 160 | # Create a helm package that can be installed from a remote HTTPS URL with, e.g. 161 | # helm install exos-x-csi https:////seagate-exos-x-csi-1.0.0.tgz 162 | helm-package: $(HELM_PACKAGE) 163 | 164 | # Update version numbers in the Helm chart. If yq is not installed, try "go install github.com/mikefarah/yq/v4@latest" 165 | update-chart: $(MAKEFILE) 166 | yq -i '.image.tag="$(VERSION)" | .image.repository="$(HELM_IMAGE_REPO)"' helm/csi-charts/values.yaml 167 | 168 | # Make a helm package. If yq is installed, the chart will be updated to reflect version $(VERSION) 169 | # To create a package without signing it, specify "make helm-package HELM_KEY=" 170 | # Note that helm doesn't support GPG v2.1 kbx files; if signing fails, try: 171 | # gpg --export-secret-keys > ~/.gnupg/secring.gpg 172 | $(HELM_PACKAGE): 173 | echo HELM_PACKAGE:=$@ 174 | ( which yq >/dev/null && $(MAKE) update-chart ) || true 175 | cd helm; helm package --app-version "$(HELM_VERSION)" --version "$(HELM_VERSION)" $(HELM_SIGN) $$PWD/csi-charts 176 | cp -p helm/$@* . 177 | 178 | # Verify a signed package create a zip file containing the package and its provenance file 179 | signed-helm-package: $(HELM_PACKAGE) 180 | helm verify --keyring $(HELM_KEYRING) $< 181 | zip -r $(subst .tgz,-signed-helm-package.zip,$<) $< $<.prov 182 | 183 | # This will allow the package to be installed directly from Github, with the command: 184 | # helm install -n $(NAMESPACE) exos-x-csi https://$(GITHUB_URL)/releases/download/$(VERSION)/$(PROJECT)-$(HELM_VERSION).tgz 185 | helm-upload: $(HELM_PACKAGE) 186 | gh release upload $(VERSION) '$^#Helm Package' -R $(GITHUB_ORG)/$(PROJECT) 187 | @echo Install package with: 188 | @echo ' ' helm install -n $(NAMESPACE) $(NAME) https://$(GITHUB_URL)/releases/download/$(VERSION)/$(PROJECT)-$(HELM_VERSION).tgz 189 | 190 | -------------------------------------------------------------------------------- /pkg/controller/provisioner.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | storageapi "github.com/Seagate/seagate-exos-x-api-go/v2/pkg/api" 9 | storageapitypes "github.com/Seagate/seagate-exos-x-api-go/v2/pkg/common" 10 | 11 | "github.com/Seagate/seagate-exos-x-csi/pkg/common" 12 | "github.com/Seagate/seagate-exos-x-csi/pkg/storage" 13 | "github.com/container-storage-interface/spec/lib/go/csi" 14 | "google.golang.org/grpc/codes" 15 | "google.golang.org/grpc/status" 16 | "k8s.io/klog/v2" 17 | ) 18 | 19 | // Extract available SAS addresses for Nodes from topology segments 20 | // This will contain all SAS initiators for all nodes unless the storage class 21 | // has specified allowed or preferred topologies 22 | func parseTopology(topologies []*csi.Topology, storageProtocol string, parameters *map[string]string) ([]*csi.Topology, error) { 23 | klog.V(5).Infof("parseTopology: %v", topologies) 24 | 25 | accessibleTopology := []*csi.Topology{} 26 | hasInitiators := false 27 | for _, topo := range topologies { 28 | 29 | segments := topo.GetSegments() 30 | 31 | nodeID := segments[common.TopologyNodeIDKey] 32 | hasInitiators = false 33 | for key, val := range segments { 34 | if strings.Contains(key, common.TopologySASInitiatorLabel) || strings.Contains(key, common.TopologyFCInitiatorLabel) { 35 | hasInitiators = true 36 | newKey := strings.TrimPrefix(key, common.TopologyInitiatorPrefix) 37 | // insert the node ID into the key so we can retrieve the node specific addresses after scheduling by the CO 38 | newKey = nodeID + newKey 39 | (*parameters)[newKey] = val 40 | } 41 | } 42 | if hasInitiators { 43 | accessibleTopology = append(accessibleTopology, topo) 44 | } 45 | 46 | } 47 | if len(accessibleTopology) == 0 { 48 | accessibleTopology = nil 49 | } 50 | return accessibleTopology, nil 51 | } 52 | 53 | // CreateVolume creates a new volume from the given request. The function is idempotent. 54 | func (controller *Controller) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { 55 | 56 | parameters := req.GetParameters() 57 | 58 | volumeName, err := common.TranslateName(req.GetName(), parameters[common.VolumePrefixKey]) 59 | if err != nil { 60 | return nil, status.Error(codes.InvalidArgument, "translate volume name contains invalid characters") 61 | } 62 | 63 | // Extract the storage interface protocol to be used for this volume (iscsi, fc, sas, etc) 64 | storageProtocol := storage.ValidateStorageProtocol(parameters[common.StorageProtocolKey]) 65 | 66 | if !common.ValidateName(volumeName) { 67 | return nil, status.Error(codes.InvalidArgument, "volume name contains invalid characters") 68 | } 69 | 70 | volumeCapabilities := req.GetVolumeCapabilities() 71 | if err := isValidVolumeCapabilities(volumeCapabilities); err != nil { 72 | return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("CreateVolume Volume capabilities not valid: %v", err)) 73 | } 74 | 75 | size := req.GetCapacityRange().GetRequiredBytes() 76 | sizeStr := getSizeStr(size) 77 | pool := parameters[common.PoolConfigKey] 78 | wwn := "" 79 | 80 | klog.Infof("creating volume %q (size %s) pool %q using protocol (%s)", volumeName, sizeStr, pool, storageProtocol) 81 | 82 | volumeExists, err := controller.client.CheckVolumeExists(volumeName, size) 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | if !volumeExists { 88 | var sourceId string 89 | 90 | if volume := req.VolumeContentSource.GetVolume(); volume != nil { 91 | sourceId = volume.VolumeId 92 | klog.Infof("-- GetVolume sourceID %q", sourceId) 93 | } 94 | 95 | if snapshot := req.VolumeContentSource.GetSnapshot(); sourceId == "" && snapshot != nil { 96 | sourceId = snapshot.SnapshotId 97 | klog.Infof("-- GetSnapshot sourceID %q", sourceId) 98 | } 99 | 100 | if sourceId != "" { 101 | sourceName, err := common.VolumeIdGetName(sourceId) 102 | if err != nil { 103 | return nil, err 104 | } 105 | apiStatus, err2 := controller.client.CopyVolume(sourceName, volumeName, parameters[common.PoolConfigKey]) 106 | if err2 != nil { 107 | klog.Infof("-- CopyVolume apiStatus.ReturnCode %v", apiStatus.ReturnCode) 108 | if apiStatus != nil && apiStatus.ReturnCode == storageapitypes.SnapshotNotFoundErrorCode { 109 | return nil, status.Errorf(codes.NotFound, "Snapshot source (%s) not found", sourceId) 110 | } else { 111 | return nil, err2 112 | } 113 | } 114 | 115 | } else { 116 | volume, apiStatus, err2 := controller.client.CreateVolume(volumeName, sizeStr, parameters[common.PoolConfigKey]) 117 | if err2 != nil { 118 | return nil, err2 119 | } else if apiStatus.ResponseTypeNumeric != 0 { 120 | return nil, status.Errorf(codes.Unknown, "Error creating volume: %s", apiStatus.Response) 121 | } 122 | if volume != nil { 123 | wwn = volume.Wwn 124 | } 125 | } 126 | } 127 | if wwn == "" { 128 | wwn, err = controller.client.GetVolumeWwn(volumeName) 129 | } 130 | if err != nil { 131 | klog.ErrorS(err, "Error retrieving WWN of new volume", "volumeName", volumeName) 132 | return nil, err 133 | } 134 | 135 | if storageProtocol == common.StorageProtocolISCSI { 136 | // Fill iSCSI context parameters 137 | targetId, err1 := storageapi.GetTargetId(controller.client.Info, "iSCSI") 138 | if err1 != nil { 139 | klog.Errorf("++ GetTargetId error: %v", err1) 140 | } 141 | req.GetParameters()["iqn"] = targetId 142 | portals, err2 := controller.client.GetPortals() 143 | if err2 != nil { 144 | klog.Errorf("++ GetPortals error: %v", err2) 145 | } 146 | req.GetParameters()["portals"] = portals 147 | klog.V(2).Infof("Storing iSCSI iqn: %s, portals: %v", targetId, portals) 148 | } 149 | 150 | volumeId := common.VolumeIdAugment(volumeName, storageProtocol, wwn) 151 | 152 | volume := &csi.CreateVolumeResponse{ 153 | Volume: &csi.Volume{ 154 | VolumeId: volumeId, 155 | VolumeContext: parameters, 156 | CapacityBytes: req.GetCapacityRange().GetRequiredBytes(), 157 | ContentSource: req.GetVolumeContentSource(), 158 | }, 159 | } 160 | 161 | klog.Infof("created volume %s (%s)", volumeId, sizeStr) 162 | 163 | // Log struct with field names 164 | klog.V(8).Infof("created volume %+v", volume) 165 | return volume, nil 166 | } 167 | 168 | // DeleteVolume deletes the given volume. The function is idempotent. 169 | func (controller *Controller) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { 170 | if len(req.GetVolumeId()) == 0 { 171 | return nil, status.Error(codes.InvalidArgument, "cannot delete volume with empty ID") 172 | } 173 | volumeName, _ := common.VolumeIdGetName(req.GetVolumeId()) 174 | klog.Infof("deleting volume %s", volumeName) 175 | 176 | respStatus, err := controller.client.DeleteVolume(volumeName) 177 | if err != nil { 178 | if respStatus != nil { 179 | if respStatus.ReturnCode == storageapitypes.VolumeNotFoundErrorCode { 180 | klog.Infof("volume %s does not exist, assuming it has already been deleted", volumeName) 181 | return &csi.DeleteVolumeResponse{}, nil 182 | } else if respStatus.ReturnCode == storageapitypes.VolumeHasSnapshot { 183 | return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("volume %s cannot be deleted since it has snapshots", volumeName)) 184 | } 185 | } 186 | return nil, err 187 | } 188 | 189 | klog.Infof("successfully deleted volume %s", volumeName) 190 | return &csi.DeleteVolumeResponse{}, nil 191 | } 192 | 193 | func getSizeStr(size int64) string { 194 | if size == 0 { 195 | size = 4096 196 | } 197 | 198 | return fmt.Sprintf("%dB", size) 199 | } 200 | 201 | // isValidVolumeCapabilities validates the given VolumeCapability array is valid 202 | func isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) error { 203 | if len(volCaps) == 0 { 204 | return fmt.Errorf("volume capabilities to validate not provided") 205 | } 206 | 207 | hasSupport := func(cap *csi.VolumeCapability) bool { 208 | for _, supportedMode := range common.SupportedAccessModes { 209 | // we currently support block and mount volumes with both supported access modes, so don't check mount types 210 | if cap.GetAccessMode().Mode == supportedMode { 211 | return true 212 | } 213 | } 214 | return false 215 | } 216 | 217 | for _, c := range volCaps { 218 | if !hasSupport(c) { 219 | return fmt.Errorf("driver does not support access mode %v", c.GetAccessMode()) 220 | } 221 | } 222 | return nil 223 | } 224 | -------------------------------------------------------------------------------- /pkg/storage/fcNode.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (c) 2022 Seagate Technology LLC and/or its Affiliates 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | // For any questions about this software or licensing, 17 | // please email opensource@seagate.com or cortx-questions@seagate.com. 18 | 19 | package storage 20 | 21 | import ( 22 | "bufio" 23 | "context" 24 | "fmt" 25 | "io/fs" 26 | "os" 27 | "os/exec" 28 | "path/filepath" 29 | "strings" 30 | "time" 31 | 32 | fclib "github.com/Seagate/csi-lib-sas/sas" 33 | "github.com/Seagate/seagate-exos-x-csi/pkg/common" 34 | "github.com/container-storage-interface/spec/lib/go/csi" 35 | "github.com/pkg/errors" 36 | "google.golang.org/grpc/codes" 37 | "google.golang.org/grpc/status" 38 | "k8s.io/klog/v2" 39 | ) 40 | 41 | // NodeStageVolume mounts the volume to a staging path on the node. This is 42 | // called by the CO before NodePublishVolume and is used to temporary mount the 43 | // volume to a staging path. Once mounted, NodePublishVolume will make sure to 44 | // mount it to the appropriate path 45 | // Will not be called as the plugin does not have the STAGE_UNSTAGE_VOLUME capability 46 | func (fc *fcStorage) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { 47 | return nil, status.Error(codes.Unimplemented, "NodeStageVolume is not implemented") 48 | } 49 | 50 | // NodeUnstageVolume unstages the volume from the staging path 51 | // Will not be called as the plugin does not have the STAGE_UNSTAGE_VOLUME capability 52 | func (fc *fcStorage) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { 53 | return nil, status.Error(codes.Unimplemented, "NodeUnstageVolume is not implemented") 54 | } 55 | 56 | func (fc *fcStorage) AttachStorage(ctx context.Context, req *csi.NodePublishVolumeRequest) (string, error) { 57 | CheckPreviouslyRemovedDevices(ctx) 58 | klog.InfoS("initiating FC connection...") 59 | wwn, _ := common.VolumeIdGetWwn(req.GetVolumeId()) 60 | connector := &fclib.Connector{VolumeWWN: wwn} 61 | path, err := fclib.Attach(ctx, connector, &fclib.OSioHandler{}) 62 | if err != nil { 63 | return path, err 64 | } 65 | klog.InfoS("attached device", "path", path) 66 | err = connector.Persist(ctx, fc.connectorInfoPath) 67 | return path, err 68 | } 69 | 70 | func (fc *fcStorage) DetachStorage(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) error { 71 | klog.InfoS("loading FC connection info from file", "connectorInfoPath", fc.connectorInfoPath) 72 | connector, err := fclib.GetConnectorFromFile(fc.connectorInfoPath) 73 | if err != nil { 74 | if errors.Is(err, fs.ErrNotExist) { 75 | klog.ErrorS(err, "assuming that FC connection was already closed") 76 | return nil 77 | } else { 78 | return err 79 | } 80 | } 81 | klog.InfoS("connector.OSPathName", "connector.OSPathName", connector.OSPathName) 82 | 83 | if IsVolumeInUse(connector.OSPathName) { 84 | klog.InfoS("volume is still in use on the node, thus it will not be detached") 85 | return nil 86 | } 87 | 88 | _, err = os.Stat(connector.OSPathName) 89 | if err != nil && errors.Is(err, fs.ErrNotExist) { 90 | klog.ErrorS(err, "assuming that volume is already disconnected") 91 | return nil 92 | } 93 | 94 | wwn, _ := common.VolumeIdGetWwn(req.GetVolumeId()) 95 | diskByIdPath := fmt.Sprintf("/dev/disk/by-id/dm-name-3%s", wwn) 96 | out, err := exec.Command("ls", "-l", diskByIdPath).CombinedOutput() 97 | klog.InfoS("check for dm-name", "command", fmt.Sprintf("ls -l %s, err = %v, out = \n%s", diskByIdPath, err, string(out))) 98 | 99 | if !connector.Multipath { 100 | // If we didn't discover the multipath device initially, double check that we didn't just miss it 101 | // Detach the discovered devices if they are found 102 | klog.V(3).InfoS("Device saved as non-multipath. Searching for additional devices before Detach") 103 | if connector.IoHandler == nil { 104 | connector.IoHandler = &fclib.OSioHandler{} 105 | } 106 | discoveredMpathName, devices := fclib.FindDiskById(klog.FromContext(ctx), wwn, connector.IoHandler) 107 | if (discoveredMpathName != connector.OSPathName) && (len(devices) > 0) { 108 | klog.V(0).InfoS("Found additional linked devices", "discoveredMpathName", discoveredMpathName, "devices", devices) 109 | klog.V(0).InfoS("Replacing original connector info prior to Detach", 110 | "originalDevice", connector.OSPathName, "newDevice", discoveredMpathName, 111 | "originalDevicePaths", connector.OSDevicePaths, "newDevicePaths", devices) 112 | connector.OSPathName = discoveredMpathName 113 | connector.OSDevicePaths = devices 114 | connector.Multipath = true 115 | } 116 | } 117 | 118 | klog.InfoS("DisconnectVolume, detaching device") 119 | err = fclib.Detach(ctx, connector.OSPathName, connector.IoHandler) 120 | if err != nil { 121 | klog.ErrorS(err, "error detaching FC connection") 122 | return err 123 | } 124 | 125 | klog.InfoS("deleting FC connection info file", "fc.connectorInfoPath", fc.connectorInfoPath) 126 | os.Remove(fc.connectorInfoPath) 127 | SASandFCRemovedDevicesMap[connector.VolumeWWN] = time.Now() 128 | return nil 129 | } 130 | 131 | // NodePublishVolume mounts the volume mounted to the staging path to the target path 132 | func (fc *fcStorage) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { 133 | return nil, status.Error(codes.Unimplemented, "FC specific NodePublishVolume not implemented") 134 | } 135 | 136 | // NodeUnpublishVolume unmounts the volume from the target path 137 | func (fc *fcStorage) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { 138 | return nil, status.Error(codes.Unimplemented, "FC specific NodeUnpublishVolume not implemented") 139 | } 140 | 141 | // NodeGetVolumeStats return info about a given volume 142 | // Will not be called as the plugin does not have the GET_VOLUME_STATS capability 143 | func (fc *fcStorage) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { 144 | return nil, status.Error(codes.Unimplemented, "NodeGetVolumeStats is not implemented") 145 | } 146 | 147 | // NodeExpandVolume finalizes volume expansion on the node 148 | func (fc *fcStorage) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { 149 | 150 | volumeName, _ := common.VolumeIdGetName(req.GetVolumeId()) 151 | volumepath := req.GetVolumePath() 152 | klog.V(2).Infof("NodeExpandVolume: VolumeId=%v, VolumePath=%v", volumeName, volumepath) 153 | 154 | if len(volumeName) == 0 { 155 | return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("node expand volume requires volume id")) 156 | } 157 | 158 | if len(volumepath) == 0 { 159 | return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("node expand volume requires volume path")) 160 | } 161 | 162 | connector, err := fclib.GetConnectorFromFile(fc.connectorInfoPath) 163 | klog.V(3).Infof("GetConnectorFromFile(%s) connector: %v, err: %v", volumeName, connector, err) 164 | 165 | if err != nil { 166 | return nil, status.Error(codes.NotFound, fmt.Sprintf("node expand volume path not found for volume id (%s)", volumeName)) 167 | } 168 | 169 | if connector.Multipath { 170 | klog.V(2).Info("device is using multipath") 171 | if err := fclib.ResizeMultipathDevice(ctx, connector.OSPathName); err != nil { 172 | return nil, err 173 | } 174 | } else { 175 | klog.V(2).Info("device is NOT using multipath") 176 | } 177 | 178 | if req.GetVolumeCapability().GetMount() != nil { 179 | klog.Infof("expanding filesystem using resize2fs on device %s", connector.OSPathName) 180 | output, err := exec.Command("resize2fs", connector.OSPathName).CombinedOutput() 181 | if err != nil { 182 | klog.V(2).InfoS("could not resize filesystem", "resize2fs output", output) 183 | return nil, fmt.Errorf("could not resize filesystem: %v", output) 184 | } 185 | } 186 | return &csi.NodeExpandVolumeResponse{}, nil 187 | } 188 | 189 | // NodeGetCapabilities returns the supported capabilities of the node server 190 | func (fc *fcStorage) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { 191 | return nil, status.Error(codes.Unimplemented, "NodeGetCapabilities is not implemented") 192 | } 193 | 194 | // NodeGetInfo returns info about the node 195 | func (fc *fcStorage) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { 196 | return nil, status.Error(codes.Unimplemented, "NodeGetInfo is not implemented") 197 | } 198 | 199 | // Retrieve FC initiators for use by controller code for publishing volumes 200 | func GetFCInitiators() ([]string, error) { 201 | specifiedFCAddrs, err := readFCAddrFile(FCAddressFilePath) 202 | if err != nil { 203 | klog.ErrorS(err, "Error reading fc address config file: %v", err) 204 | } 205 | if specifiedFCAddrs != nil { 206 | return specifiedFCAddrs, nil 207 | } 208 | 209 | klog.InfoS("begin FC address discovery") 210 | fcAddrFilename := "port_name" 211 | scsiHostBasePath := "/sys/class/fc_host/" 212 | 213 | dirList, err := os.ReadDir(scsiHostBasePath) 214 | if err != nil { 215 | return nil, err 216 | } 217 | 218 | discoveredFCAddresses := []string{} 219 | for _, hostDir := range dirList { 220 | fcAddrFile := filepath.Join(scsiHostBasePath, hostDir.Name(), fcAddrFilename) 221 | addrBytes, err := os.ReadFile(fcAddrFile) 222 | address := string(addrBytes) 223 | address = strings.TrimLeft(strings.TrimRight(address, "\n"), "0x") 224 | 225 | if err != nil { 226 | if errors.Is(err, os.ErrNotExist) { 227 | continue 228 | } else { 229 | klog.ErrorS(err, "error searching for FC HBA addresses", "path", fcAddrFile) 230 | return nil, err 231 | } 232 | } else { 233 | klog.InfoS("found FC initiator address", "address", address) 234 | discoveredFCAddresses = append(discoveredFCAddresses, address) 235 | } 236 | } 237 | return discoveredFCAddresses, nil 238 | } 239 | 240 | // Read the fc address configuration file and return addresses 241 | func readFCAddrFile(filename string) ([]string, error) { 242 | file, err := os.Open(filename) 243 | if err != nil { 244 | return nil, err 245 | } 246 | defer file.Close() 247 | 248 | foundAddresses := []string{} 249 | 250 | scanner := bufio.NewScanner(file) 251 | for scanner.Scan() { 252 | line := scanner.Text() 253 | line = strings.TrimSpace(line) 254 | foundAddresses = append(foundAddresses, line) 255 | } 256 | 257 | if err := scanner.Err(); err != nil { 258 | return nil, err 259 | } 260 | return foundAddresses, nil 261 | } 262 | -------------------------------------------------------------------------------- /pkg/storage/sasNode.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (c) 2022 Seagate Technology LLC and/or its Affiliates 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | // 16 | // For any questions about this software or licensing, 17 | // please email opensource@seagate.com or cortx-questions@seagate.com. 18 | 19 | package storage 20 | 21 | import ( 22 | "bufio" 23 | "context" 24 | "fmt" 25 | "io/fs" 26 | "os" 27 | "os/exec" 28 | "path/filepath" 29 | "strconv" 30 | "strings" 31 | "time" 32 | 33 | saslib "github.com/Seagate/csi-lib-sas/sas" 34 | "github.com/Seagate/seagate-exos-x-csi/pkg/common" 35 | "github.com/container-storage-interface/spec/lib/go/csi" 36 | "github.com/pkg/errors" 37 | "google.golang.org/grpc/codes" 38 | "google.golang.org/grpc/status" 39 | "k8s.io/klog/v2" 40 | ) 41 | 42 | // NodeStageVolume mounts the volume to a staging path on the node. This is 43 | // called by the CO before NodePublishVolume and is used to temporary mount the 44 | // volume to a staging path. Once mounted, NodePublishVolume will make sure to 45 | // mount it to the appropriate path 46 | // Will not be called as the plugin does not have the STAGE_UNSTAGE_VOLUME capability 47 | func (sas *sasStorage) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { 48 | return nil, status.Error(codes.Unimplemented, "NodeStageVolume is not implemented") 49 | } 50 | 51 | // NodeUnstageVolume unstages the volume from the staging path 52 | // Will not be called as the plugin does not have the STAGE_UNSTAGE_VOLUME capability 53 | func (sas *sasStorage) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { 54 | return nil, status.Error(codes.Unimplemented, "NodeUnstageVolume is not implemented") 55 | } 56 | 57 | // Read the sas address configuration file and return addresses 58 | func readSASAddrFile(filename string) ([]string, error) { 59 | file, err := os.Open(filename) 60 | if err != nil { 61 | return nil, err 62 | } 63 | defer file.Close() 64 | 65 | foundAddresses := []string{} 66 | 67 | scanner := bufio.NewScanner(file) 68 | for scanner.Scan() { 69 | line := scanner.Text() 70 | line = strings.TrimSpace(line) 71 | foundAddresses = append(foundAddresses, line) 72 | } 73 | 74 | if err := scanner.Err(); err != nil { 75 | return nil, err 76 | } 77 | return foundAddresses, nil 78 | } 79 | 80 | func GetSASInitiators() ([]string, error) { 81 | // look for file specifying addresses. Skip discovery process if it exists 82 | specifiedSASAddrs, err := readSASAddrFile(SASAddressFilePath) 83 | if err != nil { 84 | klog.ErrorS(err, "Error reading sas address config file", "path", SASAddressFilePath) 85 | } 86 | if specifiedSASAddrs != nil { 87 | return specifiedSASAddrs, nil 88 | } 89 | klog.InfoS("begin SAS address discovery") 90 | sasAddrFilename := "host_sas_address" 91 | scsiHostBasePath := "/sys/class/scsi_host/" 92 | 93 | dirList, err := os.ReadDir(scsiHostBasePath) 94 | if err != nil { 95 | return nil, err 96 | } 97 | 98 | for _, hostDir := range dirList { 99 | sasAddrFile := filepath.Join(scsiHostBasePath, hostDir.Name(), sasAddrFilename) 100 | addrBytes, err := os.ReadFile(sasAddrFile) 101 | address := string(addrBytes) 102 | address = strings.TrimLeft(strings.TrimRight(address, "\n"), "0x") 103 | 104 | if err != nil { 105 | if errors.Is(err, os.ErrNotExist) { 106 | continue 107 | } else { 108 | klog.ErrorS(err, "error searching for SAS HBA addresses", "path", sasAddrFile) 109 | return nil, err 110 | } 111 | } else { 112 | klog.InfoS("found SAS HBA address", "address", address) 113 | if firstAddress, err := strconv.ParseInt(address, 16, 0); err != nil { 114 | return nil, err 115 | } else { 116 | secondAddress := strconv.FormatInt(firstAddress+1, 16) 117 | specifiedSASAddrs = append(specifiedSASAddrs, address, secondAddress) 118 | } 119 | } 120 | } 121 | return specifiedSASAddrs, nil 122 | } 123 | 124 | func (sas *sasStorage) AttachStorage(ctx context.Context, req *csi.NodePublishVolumeRequest) (string, error) { 125 | CheckPreviouslyRemovedDevices(ctx) 126 | 127 | klog.InfoS("initiating SAS connection...") 128 | wwn, _ := common.VolumeIdGetWwn(req.GetVolumeId()) 129 | connector := saslib.Connector{VolumeWWN: wwn} 130 | path, err := saslib.Attach(ctx, &connector, &saslib.OSioHandler{}) 131 | if err != nil { 132 | return path, status.Error(codes.Unavailable, err.Error()) 133 | } 134 | klog.InfoS("attached device", "path", path) 135 | err = connector.Persist(ctx, sas.connectorInfoPath) 136 | return path, err 137 | } 138 | 139 | func (sas *sasStorage) DetachStorage(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) error { 140 | klog.InfoS("loading SAS connection info from file", "connectorInfoPath", sas.connectorInfoPath) 141 | connector, err := saslib.GetConnectorFromFile(sas.connectorInfoPath) 142 | if err != nil { 143 | if errors.Is(err, fs.ErrNotExist) { 144 | klog.ErrorS(err, "assuming that SAS connection was already closed") 145 | return nil 146 | } 147 | return status.Error(codes.Internal, err.Error()) 148 | } 149 | klog.InfoS("connector.OSPathName", "connector.OSPathName", connector.OSPathName) 150 | 151 | if IsVolumeInUse(connector.OSPathName) { 152 | klog.InfoS("volume is still in use on the node, thus it will not be detached") 153 | return nil 154 | } 155 | 156 | _, err = os.Stat(connector.OSPathName) 157 | if err != nil && errors.Is(err, fs.ErrNotExist) { 158 | klog.ErrorS(err, "assuming that volume is already disconnected") 159 | return nil 160 | } 161 | 162 | wwn, _ := common.VolumeIdGetWwn(req.GetVolumeId()) 163 | diskByIdPath := fmt.Sprintf("/dev/disk/by-id/dm-name-3%s", wwn) 164 | out, err := exec.Command("ls", "-l", diskByIdPath).CombinedOutput() 165 | klog.InfoS("check for dm-name", "command", fmt.Sprintf("ls -l %s, err = %v, out = \n%s", diskByIdPath, err, string(out))) 166 | 167 | if !connector.Multipath { 168 | // If we didn't discover the multipath device initially, double check that we didn't just miss it 169 | // Detach the discovered devices if they are found 170 | klog.V(3).InfoS("Device saved as non-multipath. Searching for additional devices before Detach") 171 | if connector.IoHandler == nil { 172 | connector.IoHandler = &saslib.OSioHandler{} 173 | } 174 | discoveredMpathName, devices := saslib.FindDiskById(klog.FromContext(ctx), wwn, connector.IoHandler) 175 | if (discoveredMpathName != connector.OSPathName) && (len(devices) > 0) { 176 | klog.V(0).InfoS("Found additional linked devices", "discoveredMpathName", discoveredMpathName, "devices", devices) 177 | klog.V(0).InfoS("Replacing original connector info prior to Detach", 178 | "originalDevice", connector.OSPathName, "newDevice", discoveredMpathName, 179 | "originalDevicePaths", connector.OSDevicePaths, "newDevicePaths", devices) 180 | connector.OSPathName = discoveredMpathName 181 | connector.OSDevicePaths = devices 182 | connector.Multipath = true 183 | } 184 | } 185 | 186 | klog.Info("DisconnectVolume, detaching SAS device") 187 | err = saslib.Detach(ctx, connector.OSPathName, connector.IoHandler) 188 | if err != nil { 189 | klog.ErrorS(err, "error detaching FC connection") 190 | return err 191 | } 192 | 193 | klog.InfoS("deleting SAS connection info file", "sas.connectorInfoPath", sas.connectorInfoPath) 194 | os.Remove(sas.connectorInfoPath) 195 | SASandFCRemovedDevicesMap[connector.VolumeWWN] = time.Now() 196 | return nil 197 | } 198 | 199 | func (sas *sasStorage) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { 200 | return nil, status.Error(codes.Unimplemented, "SAS specific NodePublishVolume not implemented") 201 | } 202 | 203 | // NodeUnpublishVolume unmounts the volume from the target path 204 | func (sas *sasStorage) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { 205 | return nil, status.Error(codes.Unimplemented, "SAS specific NodeUnpublishVolume not implemented") 206 | } 207 | 208 | // NodeGetVolumeStats return info about a given volume 209 | // Will not be called as the plugin does not have the GET_VOLUME_STATS capability 210 | func (sas *sasStorage) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { 211 | return nil, status.Error(codes.Unimplemented, "NodeGetVolumeStats is not implemented") 212 | } 213 | 214 | // NodeExpandVolume finalizes volume expansion on the node 215 | func (sas *sasStorage) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { 216 | 217 | volumeName, _ := common.VolumeIdGetName(req.GetVolumeId()) 218 | volumepath := req.GetVolumePath() 219 | klog.V(2).Infof("NodeExpandVolume: VolumeId=%v, VolumePath=%v", volumeName, volumepath) 220 | 221 | if len(volumeName) == 0 { 222 | return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("node expand volume requires volume id")) 223 | } 224 | 225 | if len(volumepath) == 0 { 226 | return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("node expand volume requires volume path")) 227 | } 228 | 229 | connector, err := saslib.GetConnectorFromFile(sas.connectorInfoPath) 230 | klog.V(3).Infof("GetConnectorFromFile(%s) connector: %v, err: %v", volumeName, connector, err) 231 | 232 | if err != nil { 233 | return nil, status.Error(codes.NotFound, fmt.Sprintf("node expand volume path not found for volume id (%s)", volumeName)) 234 | } 235 | 236 | if connector.Multipath { 237 | klog.V(2).Info("device is using multipath") 238 | if err := saslib.ResizeMultipathDevice(ctx, connector.OSPathName); err != nil { 239 | return nil, err 240 | } 241 | } else { 242 | klog.V(2).Info("device is NOT using multipath") 243 | } 244 | 245 | if req.GetVolumeCapability().GetMount() != nil { 246 | klog.Infof("expanding filesystem using resize2fs on device %s", connector.OSPathName) 247 | output, err := exec.Command("resize2fs", connector.OSPathName).CombinedOutput() 248 | if err != nil { 249 | klog.V(2).InfoS("could not resize filesystem", "resize2fs output", output) 250 | return nil, fmt.Errorf("could not resize filesystem: %v", output) 251 | } 252 | } 253 | 254 | return &csi.NodeExpandVolumeResponse{}, nil 255 | } 256 | 257 | // NodeGetCapabilities returns the supported capabilities of the node server 258 | func (sas *sasStorage) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { 259 | return nil, status.Error(codes.Unimplemented, "NodeGetCapabilities is not implemented") 260 | } 261 | 262 | // NodeGetInfo returns info about the node 263 | func (sas *sasStorage) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { 264 | return nil, status.Error(codes.Unimplemented, "NodeGetInfo is not implemented") 265 | } 266 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | --------------------------------------------------------------------------------