├── changelogs
└── released
│ ├── v4.0.0
│ └── 172-Abhinandan-Purkait
│ ├── v2.5.0
│ ├── 14-prateekpandey14
│ └── 22-allenhaozi
│ ├── v3.5.0
│ ├── 166-niladrih
│ └── 161-MingZhang-YBPS
│ ├── v4.1.0
│ ├── 189-niladrih
│ └── 188-niladrih
│ ├── v4.1.1
│ └── 200-niladrih
│ ├── v4.2.0
│ ├── 190-nobiit
│ ├── 224-mctech
│ ├── 227-lkummer
│ ├── 238-niladrih
│ └── 237-tiagolobocastro
│ ├── v4.4.0
│ └── 286-speedfl
│ ├── v3.2.0
│ ├── 127-niladrih
│ └── 130-csschwe
│ ├── v3.3.0
│ └── 137-hickersonj
│ ├── v2.8.0
│ └── 39-niladrih
│ ├── v2.3.0-RC1
│ └── 2-akhilerm
│ ├── v3.0.0
│ └── 78-almas33
│ ├── v3.1.0
│ ├── 106-Ab-hishek
│ └── 102-Ab-hishek
│ ├── v4.1.4
│ └── 236-niladrih
│ ├── v4.1.3
│ ├── 182-bernardgut
│ └── 183-laverya
│ ├── v4.1.2
│ └── 211-emosbaugh
│ ├── v2.4.0-RC1
│ └── 15-kmova
│ ├── v3.4.0
│ └── 162-Abhinandan-Purkait
│ ├── v2.7.0-RC2
│ └── 32-akhilerm
│ └── v4.3.0
│ └── 264-sushiMix
├── .muse
└── config.toml
├── .bettercodehub.yml
├── design
└── images
│ └── hostpath_localpv_provisioner-deployment.jpg
├── e2e-tests
├── experiments
│ ├── functional
│ │ ├── backup_and_restore
│ │ │ ├── credentials-velero
│ │ │ ├── patch.yml
│ │ │ ├── test_vars.yml
│ │ │ ├── backup-restore.yml
│ │ │ └── run_e2e_test.yml
│ │ ├── data-integrity
│ │ │ ├── pvc.yml
│ │ │ ├── test_vars.yml
│ │ │ ├── run_e2e_test.yml
│ │ │ ├── fio-write.yml
│ │ │ ├── fio-read.yml
│ │ │ └── commands.sh
│ │ └── localpv-provisioning-selected-device
│ │ │ ├── test_vars.yml
│ │ │ ├── storage_class.j2
│ │ │ ├── run_e2e_test.yml
│ │ │ ├── percona.j2
│ │ │ └── README.md
│ ├── chaos
│ │ ├── app_pod_failure
│ │ │ ├── data_persistence.j2
│ │ │ ├── test_vars.yml
│ │ │ ├── run_e2e_test.yml
│ │ │ └── README.md
│ │ └── local_pv_disk_reuse
│ │ │ ├── test_vars.yml
│ │ │ ├── run_e2e_test.yml
│ │ │ ├── busybox_statefulset.yml
│ │ │ └── test.yml
│ └── localpv-provisioner
│ │ ├── localpv-sc.yml
│ │ ├── charts_operator_provision.yml
│ │ ├── test_vars.yml
│ │ ├── master_operator_provision.yml
│ │ ├── README.md
│ │ ├── run_e2e_test.yml
│ │ └── release_tag_provision.yml
├── utils
│ ├── k8s
│ │ ├── status_testns.yml
│ │ ├── fetch_app_pod.yml
│ │ ├── deploy_app_svc.yml
│ │ ├── deploy_application.yml
│ │ ├── create_ns.yml
│ │ ├── application_liveness_check.yml
│ │ ├── deploy_single_app.yml
│ │ ├── status_app_pod.yml
│ │ ├── check_statefulset_status.yml
│ │ ├── scale_statefulset_replicas.yml
│ │ ├── check_deployment_status.yml
│ │ ├── pre_create_app_deploy.yml
│ │ ├── deprovision_statefulset.yml
│ │ └── deprovision_deployment.yml
│ ├── fcm
│ │ ├── create_testname.yml
│ │ └── update_e2e_result_resource.yml
│ └── scm
│ │ ├── applications
│ │ ├── mysql
│ │ │ └── check_db_connection.yml
│ │ └── busybox
│ │ │ └── busybox_data_persistence.yml
│ │ └── openebs
│ │ ├── fetch_replica_values.yml
│ │ └── check_replica_count.yml
├── apps
│ ├── percona
│ │ ├── workload
│ │ │ ├── tpcc.conf
│ │ │ ├── test_vars.yml
│ │ │ ├── tpcc_bench.yml
│ │ │ └── run_e2e_test.yml
│ │ └── deployers
│ │ │ ├── test_vars.yml
│ │ │ ├── run_e2e_test.yml
│ │ │ ├── percona.yml
│ │ │ └── test.yml
│ └── busybox
│ │ ├── liveness
│ │ ├── vars.yml
│ │ ├── run_e2e_test.yml
│ │ ├── busybox_liveness.yml
│ │ └── test.yml
│ │ └── deployers
│ │ ├── test_vars.yml
│ │ ├── busybox_statefulset.yml
│ │ ├── busybox_deployment.yml
│ │ ├── run_e2e_test.yml
│ │ └── test.yml
├── chaoslib
│ ├── kubectl
│ │ ├── app_pod_failure.yaml
│ │ └── app_pod_random_delete.yaml
│ ├── containerd_chaos
│ │ └── containerd-chaos-ds.yml
│ ├── crio_chaos
│ │ └── crio-chaos-ds.yml
│ └── pumba
│ │ └── pumba_kube.yaml
├── hack
│ ├── e2e-result.j2
│ ├── rbac.yaml
│ ├── push
│ └── crds.yaml
├── funclib
│ └── scale_replicas.yml
└── Dockerfile
├── ct.yaml
├── .gitignore
├── SECURITY.md
├── SECURITY_CONTACTS.md
├── .github
├── workflows
│ └── fossa.yml
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── feature-request.md
│ └── bug_report.md
└── pull_request_template.md
├── scripts
├── yq_utils.sh
├── log.sh
├── update-reg-repo.sh
└── validate-chart-version.sh
├── GOVERNANCE.md
├── MAINTAINERS.md
├── deploy
├── helm
│ └── charts
│ │ ├── .helmignore
│ │ ├── templates
│ │ ├── NOTES.txt
│ │ ├── psp.yaml
│ │ ├── hostpath-class.yaml
│ │ └── _helpers.tpl
│ │ └── Chart.yaml
└── kubectl
│ ├── fillup-localpv-hostpath.yaml
│ ├── busybox-localpv-path.yaml
│ └── openebs-hostpath-sc.yaml
├── CODE_OF_CONDUCT.md
├── nix
└── sources.json
├── docs
├── installation
│ └── platforms
│ │ ├── minikube.md
│ │ ├── rancher.md
│ │ ├── talos.md
│ │ └── microk8s.md
└── tutorials
│ ├── hostpath
│ ├── allowedtopologies.md
│ ├── xfs_quota
│ │ └── use-xfs-fs-with-loop-device.md
│ ├── nodeaffinitylabels.md
│ └── filepermissions.md
│ └── backup_and_restore
│ ├── restore.md
│ ├── velero
│ └── minio.md
│ └── backup.md
├── pkg
├── kubernetes
│ └── api
│ │ └── core
│ │ └── v1
│ │ ├── persistentvolume
│ │ ├── persistentvolume_test.go
│ │ └── buildlist.go
│ │ ├── persistentvolumeclaim
│ │ ├── persistentvolumeclaim_test.go
│ │ └── persistentvolumeclaim.go
│ │ ├── event
│ │ └── buildlist.go
│ │ ├── volume
│ │ └── volume.go
│ │ └── pod
│ │ └── buildlist.go
└── logger
│ └── logger.go
├── shell.nix
├── tests
├── bdd
│ ├── nodeAffinityLabels_cas_config.feature
│ ├── hostpath.feature
│ ├── xfs_quota.feature
│ ├── pvc_cas_config.feature
│ └── ext4_quota.feature
└── README.md
├── hack
└── update-k8s.sh
├── vm.nix
├── Makefile.buildx.mk
└── cmd
└── provisioner-localpv
├── app
├── env.go
├── types.go
├── helper_test.go
└── backward_compatability.go
└── main.go
/changelogs/released/v4.0.0/172-Abhinandan-Purkait:
--------------------------------------------------------------------------------
1 | disable localpv device
--------------------------------------------------------------------------------
/changelogs/released/v2.5.0/14-prateekpandey14:
--------------------------------------------------------------------------------
1 | add openebs localpv helm charts
2 |
--------------------------------------------------------------------------------
/changelogs/released/v3.5.0/166-niladrih:
--------------------------------------------------------------------------------
1 | upgrade google analytics client to use GA4
--------------------------------------------------------------------------------
/changelogs/released/v4.1.0/189-niladrih:
--------------------------------------------------------------------------------
1 | update base alpine image version to 3.20.1
--------------------------------------------------------------------------------
/changelogs/released/v4.1.1/200-niladrih:
--------------------------------------------------------------------------------
1 | Update analytics dependency to v0.3.0
2 |
--------------------------------------------------------------------------------
/changelogs/released/v4.2.0/190-nobiit:
--------------------------------------------------------------------------------
1 | Add feature to merge cas-config from PVC
2 |
--------------------------------------------------------------------------------
/changelogs/released/v4.2.0/224-mctech:
--------------------------------------------------------------------------------
1 | support helper pod to use hostNetwork
2 |
--------------------------------------------------------------------------------
/changelogs/released/v4.4.0/286-speedfl:
--------------------------------------------------------------------------------
1 | Add support for empty PVC selectors
2 |
3 |
--------------------------------------------------------------------------------
/changelogs/released/v3.2.0/127-niladrih:
--------------------------------------------------------------------------------
1 | fix bug where klog logging flags are not parsed
--------------------------------------------------------------------------------
/changelogs/released/v3.2.0/130-csschwe:
--------------------------------------------------------------------------------
1 | fix bug where XFS-Quota does not work with LVM
--------------------------------------------------------------------------------
/changelogs/released/v3.3.0/137-hickersonj:
--------------------------------------------------------------------------------
1 | support quota enforcement for ext4 filesystem
--------------------------------------------------------------------------------
/changelogs/released/v4.2.0/227-lkummer:
--------------------------------------------------------------------------------
1 | Add global.imageRegistry Helm chart parameter
2 |
--------------------------------------------------------------------------------
/changelogs/released/v4.1.0/188-niladrih:
--------------------------------------------------------------------------------
1 | update github.com/openebs/google-analytics-4 to v0.2.1
--------------------------------------------------------------------------------
/changelogs/released/v4.2.0/238-niladrih:
--------------------------------------------------------------------------------
1 | add eviction tolerations to the provisioner pod
2 |
--------------------------------------------------------------------------------
/.muse/config.toml:
--------------------------------------------------------------------------------
1 | ignoreRules = [ "G101",
2 | "ST1005"
3 | ]
4 |
--------------------------------------------------------------------------------
/changelogs/released/v2.8.0/39-niladrih:
--------------------------------------------------------------------------------
1 | fix provisioner crashing when old PVs are not cleaned up.
--------------------------------------------------------------------------------
/changelogs/released/v4.2.0/237-tiagolobocastro:
--------------------------------------------------------------------------------
1 | don't wait forever if the launch pod failed
2 |
--------------------------------------------------------------------------------
/changelogs/released/v2.3.0-RC1/2-akhilerm:
--------------------------------------------------------------------------------
1 | add support for multiarch builds to localpv provisioner
2 |
--------------------------------------------------------------------------------
/changelogs/released/v3.0.0/78-almas33:
--------------------------------------------------------------------------------
1 | add support for enabling XFS project quota in hostpath volumes
--------------------------------------------------------------------------------
/changelogs/released/v3.1.0/106-Ab-hishek:
--------------------------------------------------------------------------------
1 | add support for BlockDevice label selectors with device volumes
--------------------------------------------------------------------------------
/changelogs/released/v3.5.0/161-MingZhang-YBPS:
--------------------------------------------------------------------------------
1 | fix quota calculation for filesystem quota enforcement
--------------------------------------------------------------------------------
/changelogs/released/v4.1.4/236-niladrih:
--------------------------------------------------------------------------------
1 | Let helm chart generate priorityClassName from go-template
2 |
--------------------------------------------------------------------------------
/.bettercodehub.yml:
--------------------------------------------------------------------------------
1 | component_depth: 2
2 | languages:
3 | - go
4 | exclude:
5 | - /pkg/client/generated/.*
6 |
--------------------------------------------------------------------------------
/changelogs/released/v4.1.3/182-bernardgut:
--------------------------------------------------------------------------------
1 | add pod priorityClassName to prevent race condition due to pod eviction
2 |
--------------------------------------------------------------------------------
/changelogs/released/v3.1.0/102-Ab-hishek:
--------------------------------------------------------------------------------
1 | add support for multiple Node Affinity Labels for both hostpath and device volumes
--------------------------------------------------------------------------------
/changelogs/released/v4.1.2/211-emosbaugh:
--------------------------------------------------------------------------------
1 | Fix bug where analytics toggle env configuration doesn't disable analytics
2 |
--------------------------------------------------------------------------------
/changelogs/released/v4.1.3/183-laverya:
--------------------------------------------------------------------------------
1 | allow specifying additional labels to be applied to all helm chart resources
2 |
--------------------------------------------------------------------------------
/changelogs/released/v2.4.0-RC1/15-kmova:
--------------------------------------------------------------------------------
1 | allow custom node affinity label in place of hostnames for localpv hostpath provisioner
--------------------------------------------------------------------------------
/changelogs/released/v2.5.0/22-allenhaozi:
--------------------------------------------------------------------------------
1 | support passing image pull secrets when creating helper pod by localpv provisioner
2 |
--------------------------------------------------------------------------------
/changelogs/released/v3.4.0/162-Abhinandan-Purkait:
--------------------------------------------------------------------------------
1 | add helm template resolution for default StorageClass name and hostpath BasePath
--------------------------------------------------------------------------------
/changelogs/released/v2.7.0-RC2/32-akhilerm:
--------------------------------------------------------------------------------
1 | add support to push multiarch images to multiple registries and remove travis from repository
--------------------------------------------------------------------------------
/changelogs/released/v4.3.0/264-sushiMix:
--------------------------------------------------------------------------------
1 | add support for configuring file system mode for the hostpath directory
2 | add support for adding cas-config on PVC annotation
3 |
--------------------------------------------------------------------------------
/design/images/hostpath_localpv_provisioner-deployment.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openebs/dynamic-localpv-provisioner/HEAD/design/images/hostpath_localpv_provisioner-deployment.jpg
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/backup_and_restore/credentials-velero:
--------------------------------------------------------------------------------
1 | [default]
2 | aws_access_key_id = minio
3 | aws_secret_access_key = minio123
4 | [add-profile]
5 | aws_access_key_id = minio
6 | aws_secret_access_key = minio123
--------------------------------------------------------------------------------
/ct.yaml:
--------------------------------------------------------------------------------
1 | # See https://github.com/helm/chart-testing#configuration
2 | remote: origin
3 | target-branch: develop
4 | chart-dirs:
5 | - deploy/helm
6 | # helm-extra-args: --timeout=500s
7 | validate-maintainers: false
8 | check-version-increment: false
9 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/status_testns.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Checking the status of test specific namespace.
3 | k8s_facts:
4 | kind: Namespace
5 | name: "{{ app_ns }}"
6 | register: npstatus
7 | until: "'Active' in npstatus.resources.0.status.phase"
8 | delay: 30
9 | retries: 10
10 |
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /bin/
2 | nohup.out
3 | coverage.txt
4 | .DS_Store
5 | cmd/provisioner-localpv/start.sh
6 | # IDEs
7 | **/.idea/
8 |
9 | # Ginkgo test
10 | tests/tests.test
11 | /tests/integration_coverage.txt
12 |
13 | # Nix-Shell Go
14 | /nix/.go/
15 | /nix/.tmp/
16 |
17 | /nixos.qcow2
18 |
19 | /nix/bins/sudo
20 |
--------------------------------------------------------------------------------
/e2e-tests/apps/percona/workload/tpcc.conf:
--------------------------------------------------------------------------------
1 | {
2 | "db_user": "test_user",
3 | "db_password": "test_password",
4 | "warehouses": "test_warehouse",
5 | "connections": "test_connections",
6 | "warmup_period": "test_warmup_period",
7 | "run_duration": "test_duration",
8 | "interval": "test_interval"
9 | }
10 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/data-integrity/pvc.yml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: PersistentVolumeClaim
3 | apiVersion: v1
4 | metadata:
5 | name: demo-vol1-claim
6 | labels:
7 | name: demo-vol1-claim
8 | spec:
9 | storageClassName: testclass
10 | accessModes:
11 | - ReadWriteOnce
12 | resources:
13 | requests:
14 | storage: "5Gi"
15 |
--------------------------------------------------------------------------------
/e2e-tests/utils/fcm/create_testname.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 | - name: Record test instance/run ID
4 | set_fact:
5 | run_id: "{{ lookup('env','RUN_ID') }}"
6 |
7 | - name: Construct testname appended with runID
8 | set_fact:
9 | test_name: "{{ test_name }}-{{ run_id }}"
10 |
11 | when: lookup('env','RUN_ID')
12 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/data-integrity/test_vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## TEST-SPECIFIC PARAMS
3 | test_name: fio-data-integrity
4 | fio_write_yml: fio-write.yml
5 | fio_read_yml: fio-read.yml
6 | app_ns: "{{ lookup('env','FIO_NAMESPACE') }}"
7 | pvc_yml: pvc.yml
8 | operator_ns: "openebs"
9 | storage_class: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
10 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 |
4 |
5 | ## Umbrella Project
6 |
7 | OpenEBS is an "umbrella project". Every project, repository and file in the OpenEBS organization adopts and follows the policies found in the Community repo umbrella project files.
8 |
9 |
10 | This project follows the [OpenEBS Security Policy](https://github.com/openebs/community/blob/HEAD/SECURITY.md).
11 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/fetch_app_pod.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #Fetching the details of the application pod
3 | - name: Getting the {{ application_name }} POD name
4 | k8s_facts:
5 | kind: Pod
6 | namespace: "{{ app_ns }}"
7 | label_selectors:
8 | - "{{ app_label }}"
9 | register: pod_name
10 |
11 | - debug:
12 | msg: "{{ pod_name | json_query('resources[*].metadata.name') }}"
13 |
14 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/chaos/app_pod_failure/data_persistence.j2:
--------------------------------------------------------------------------------
1 | {% if data_persistence is defined and data_persistence == 'mysql' %}
2 | consistencyutil: /e2e-tests/utils/scm/applications/mysql/mysql_data_persistence.yml
3 | {% elif data_persistence is defined and data_persistence == 'busybox' %}
4 | consistencyutil: /e2e-tests/utils/scm/applications/busybox/busybox_data_persistence.yml
5 | {% endif %}
6 |
--------------------------------------------------------------------------------
/e2e-tests/utils/scm/applications/mysql/check_db_connection.yml:
--------------------------------------------------------------------------------
1 | #Check if the database is ready for connection, upper bound wait time: 900s
2 | - name: Check if db is ready for connections
3 | shell: kubectl logs {{ pod_name.resources.0.metadata.name }} -n {{ app_ns }} | grep 'ready for connections' | wc -l
4 | register: initcheck
5 | until: initcheck.stdout == "2"
6 | delay: 5
7 | retries: 180
8 |
9 |
--------------------------------------------------------------------------------
/SECURITY_CONTACTS.md:
--------------------------------------------------------------------------------
1 | # Security Contacts
2 |
3 |
4 |
5 | ## Umbrella Project
6 |
7 | OpenEBS is an "umbrella project". Every project, repository and file in the OpenEBS organization adopts and follows the policies found in the Community repo umbrella project files.
8 |
9 |
10 | Please refer to the [OpenEBS Security Contacts](https://github.com/openebs/community/blob/HEAD/SECURITY_CONTACTS.md).
11 |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/liveness/vars.yml:
--------------------------------------------------------------------------------
1 | test_name: busybox-liveness
2 | namespace: "{{ lookup('env','NAMESPACE') }}"
3 | app_label: "{{ lookup('env','APPLICATION_LABEL') }}"
4 | busybox_liveness: busybox_liveness.yml
5 | liveness_retry: "{{ lookup('env','LIVENESS_RETRY_COUNT') }}"
6 | liveness_timeout: "{{ lookup('env','LIVENESS_TIMEOUT_SECONDS') }}"
7 | liveness_log: "liveness-running"
8 | action: "{{ lookup('env','ACTION') }}"
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/deploy_app_svc.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Replace the application label placeholder in service spec
3 | replace:
4 | path: "{{ application_svc }}"
5 | regexp: "lkey: lvalue"
6 | replace: "{{ app_lkey }}: {{ app_lvalue }}"
7 |
8 | - name: Deploying {{ application_name }} service
9 | shell: kubectl apply -f {{ application_svc }} -n {{ app_ns }}
10 | args:
11 | executable: /bin/bash
12 |
--------------------------------------------------------------------------------
/.github/workflows/fossa.yml:
--------------------------------------------------------------------------------
1 | name: Fossa CLI
2 | on:
3 | push:
4 | branches:
5 | - 'develop'
6 | - 'release/**'
7 |
8 | jobs:
9 | fossa-scan:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v4
13 | with:
14 | submodules: recursive
15 | - uses: fossas/fossa-action@v1.4.0
16 | with:
17 | api-key: ${{ secrets.FOSSA_API_KEY }}
18 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/deploy_application.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This util can deploy application in K8s cluster.
3 | # The parameters required are
4 | # - app_yml ( Application spec)
5 | # - app_ns ( namespace where application needs to be deployed)
6 |
7 | - name: Deploy Application
8 | shell: kubectl apply -f {{ item }} -n {{ app_ns }}
9 | args:
10 | executable: /bin/bash
11 | with_items: "{{ app_yml }}"
12 |
13 |
14 |
--------------------------------------------------------------------------------
/scripts/yq_utils.sh:
--------------------------------------------------------------------------------
1 | # yq-go eats up blank lines
2 | # this function gets around that using diff with --ignore-blank-lines
3 | yq_ibl()
4 | {
5 | error=0
6 | diff_out=$(diff -B <(cat "$2") <(yq "$1" "$2")) || error=$?
7 | if [ "$error" != "0" ] && [ "$error" != "1" ]; then
8 | exit "$error"
9 | fi
10 | if [ -n "$diff_out" ]; then
11 | echo "$diff_out" | patch --quiet --no-backup-if-mismatch "$2" -
12 | fi
13 | }
14 |
--------------------------------------------------------------------------------
/e2e-tests/chaoslib/kubectl/app_pod_failure.yaml:
--------------------------------------------------------------------------------
1 | - name: Get the application pod name
2 | shell: kubectl get pod -l {{ label }} -n {{ namespace }} -o=custom-columns=NAME:".metadata.name" --no-headers
3 | args:
4 | executable: /bin/bash
5 | register: app_pod
6 |
7 | - name: Kill the application pod
8 | shell: >
9 | kubectl delete pod {{ app_pod.stdout }} -n {{ app_ns }}
10 | args:
11 | executable: /bin/bash
12 |
13 |
--------------------------------------------------------------------------------
/GOVERNANCE.md:
--------------------------------------------------------------------------------
1 | # Governance
2 |
3 | ## Umbrella Project
4 |
5 | OpenEBS is an `Umbrella Project` whose governance and policies are defined in the [community](https://github.com/openebs/community/) repository.
6 | These policies are applicable to every sub-project, repository and file existing within the [OpenEBS GitHub organization](https://github.com/openebs/).
7 |
8 | This project follows the [OpenEBS Governance](https://github.com/openebs/community/blob/HEAD/GOVERNANCE.md)
9 |
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | # Maintainers
2 |
3 | ## Umbrella Project
4 |
5 | OpenEBS is an `Umbrella Project` whose governance and policies are defined in the [community](https://github.com/openebs/community/) repository.
6 | These policies are applicable to every sub-project, repository and file existing within the [OpenEBS GitHub organization](https://github.com/openebs/).
7 |
8 | Please refer to the [OpenEBS Maintainers](https://github.com/openebs/community/blob/HEAD/MAINTAINERS.md).
9 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/chaos/app_pod_failure/test_vars.yml:
--------------------------------------------------------------------------------
1 | test_name: application-pod-failure
2 |
3 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
4 |
5 | app_label: "{{ lookup('env','APP_LABEL') }}"
6 |
7 | liveness_label: "{{ lookup('env','LIVENESS_APP_LABEL') }}"
8 |
9 | liveness_namespace: "{{ lookup('env','LIVENESS_APP_NAMESPACE') }}"
10 |
11 | cri: "{{ lookup('env','CONTAINER_RUNTIME') }}"
12 |
13 | data_persistence: "{{ lookup('env','DATA_PERSISTENCE') }}"
14 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/chaos/local_pv_disk_reuse/test_vars.yml:
--------------------------------------------------------------------------------
1 | application_statefulset: busybox_statefulset.yml
2 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
3 | app_label: "{{ lookup('env','APP_LABEL') }}"
4 | test_name: "local-pv-disk-reusability"
5 | app_replica: "{{ lookup('env','APP_REPLICA') }}"
6 | application_name: "busybox"
7 | app_pvc: "{{ lookup('env','APP_PVC') }}"
8 | storage_class: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
9 | operator_ns: openebs
10 |
--------------------------------------------------------------------------------
/scripts/log.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Write output to error output stream.
4 | log_to_stderr() {
5 | echo -e "${1}" >&2
6 | }
7 |
8 | log_error() {
9 | log_to_stderr "ERROR: $1"
10 | }
11 |
12 | log_warn() {
13 | log_to_stderr "WARNING: $1"
14 | }
15 |
16 | # Exit with error status and print error.
17 | log_fatal() {
18 | local -r _return="${2:-1}"
19 | log_error "$1"
20 | exit "${_return}"
21 | }
22 |
23 | log() {
24 | echo -e "${1}"
25 | }
26 |
--------------------------------------------------------------------------------
/deploy/helm/charts/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/localpv-provisioning-selected-device/test_vars.yml:
--------------------------------------------------------------------------------
1 | # Test-specific parameters
2 |
3 | operator_ns: "{{ lookup('env','OPERATOR_NS') }}"
4 |
5 | test_name: localpv-selected-device
6 |
7 | device_tag: "{{ lookup('env','BD_TAG') }}"
8 |
9 | pvc_name: "{{ lookup('env','PVC') }}"
10 |
11 | namespace: "{{ lookup('env','APP_NAMESPACE') }}"
12 |
13 | sc_name: localpv-selected-device-sc
14 |
15 | test_case_type: "{{ lookup('env','TEST_CASE_TYPE') }}"
16 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | ## Umbrella Project
4 |
5 | OpenEBS is an `Umbrella Project` whose governance and policies are defined in the [community](https://github.com/openebs/community/) repository.
6 | These policies are applicable to every sub-project, repository and file existing within the [OpenEBS GitHub organization](https://github.com/openebs/).
7 |
8 | This project follows the [OpenEBS Code of Conduct](https://github.com/openebs/community/blob/HEAD/CODE_OF_CONDUCT.md)
9 |
--------------------------------------------------------------------------------
/deploy/helm/charts/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | The OpenEBS Dynamic LocalPV Provisioner has been installed.
2 | Check its status by running:
3 | $ kubectl get pods -n {{ .Release.Namespace }}
4 |
5 | Get started with the Dynamic LocalPV Provisioner Quickstart guide at:
6 | https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md
7 |
8 | For more information, visit our Slack at https://kubernetes.slack.com/messages/openebs or view
9 | the OpenEBS documentation online at https://openebs.io/docs
10 |
--------------------------------------------------------------------------------
/e2e-tests/apps/percona/deployers/test_vars.yml:
--------------------------------------------------------------------------------
1 | # Test-specific parametres
2 |
3 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
4 | operator_ns: 'openebs'
5 | app_label: "{{ lookup('env','APP_LABEL') }}"
6 | test_name: percona-deployment
7 | application_name: "percona"
8 | action: "{{ lookup('env','ACTION') }}"
9 | app_pvc: "{{ lookup('env','APP_PVC') }}"
10 | storage_class: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
11 | capacity: "{{ lookup('env','CAPACITY') }}"
12 | application_deployment: percona.yml
13 |
14 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/create_ns.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Obtain list of existing namespaces
3 | shell: >
4 | kubectl get ns
5 | --no-headers
6 | -o custom-columns=:metadata.name
7 | args:
8 | executable: /bin/bash
9 | register: ns_list
10 |
11 | - name: Create test specific namespace.
12 | shell: kubectl create ns {{ app_ns }}
13 | args:
14 | executable: /bin/bash
15 | when: app_ns != 'e2e' and app_ns not in ns_list.stdout_lines
16 |
17 | - include_tasks: /e2e-tests/utils/k8s/status_testns.yml
--------------------------------------------------------------------------------
/e2e-tests/hack/e2e-result.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: e2e.io/v1alpha1
3 | kind: E2eResult
4 | metadata:
5 |
6 | # name of the e2e testcase
7 | name: {{ test }}
8 | spec:
9 |
10 | # holds information on the testcase
11 | testMetadata:
12 | app: {{ app }}
13 | chaostype: {{ chaostype }}
14 |
15 | # holds the state of testcase, manually updated by json merge patch
16 | # result is the useful value today, but anticipate phase use in future
17 | testStatus:
18 | phase: {{ phase }}
19 | result: {{ verdict }}
20 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/localpv-provisioning-selected-device/storage_class.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: storage.k8s.io/v1
3 | kind: StorageClass
4 | metadata:
5 | name: {{ sc_name }}
6 | annotations:
7 | openebs.io/cas-type: local
8 | cas.openebs.io/config: |
9 | - name: StorageType
10 | value: "device"
11 | - name: BlockDeviceSelectors
12 | data:
13 | openebs.io/block-device-tag: "{{ device_tag }}"
14 | provisioner: openebs.io/local
15 | volumeBindingMode: WaitForFirstConsumer
16 | reclaimPolicy: Delete
17 |
--------------------------------------------------------------------------------
/e2e-tests/utils/scm/openebs/fetch_replica_values.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #Reading the replica values from env and replace them in application lable
3 | #placholder
4 | - name: Get the application replica values from env
5 | set_fact:
6 | app_rkey: "{{ app_replica.split('=')[0] }}"
7 | app_rvalue: "{{ app_replica.split('=')[1] }}"
8 |
9 | - name: Replace the application replica placeholder in statefulset spec.
10 | replace:
11 | path: "{{ application_deployment }}"
12 | regexp: "rkey: rvalue"
13 | replace: "{{ app_rkey }}: {{ app_rvalue }}"
14 |
15 |
16 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/application_liveness_check.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get the liveness pods
3 | shell: kubectl get pod -n {{ liveness_namespace }} -l {{ liveness_label }} -o=custom-columns=NAME:".metadata.name" --no-headers
4 | register: liveness_pods
5 |
6 | - name: Checking status of liveness pods
7 | shell: kubectl get pods {{ item }} -n {{ liveness_namespace }} -o=custom-columns=NAME:".status.phase" --no-headers
8 | register: result
9 | with_items: "{{ liveness_pods.stdout_lines }}"
10 | until: "'Running' in result.stdout"
11 | delay: 10
12 | retries: 10
13 |
14 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/deploy_single_app.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #Deploying application on k8's cluster and cross checking whether the
3 | #application is deployed successfully.
4 | - name: Deploying {{ application_name }}
5 | k8s:
6 | state: present
7 | src: "{{ application_deployment }}"
8 | namespace: "{{ app_ns }}"
9 | merge_type: merge
10 | register: result
11 |
12 | - include_tasks: /e2e-tests/utils/k8s/status_app_pod.yml
13 | when: check_app_pod == 'yes'
14 |
15 | - include_tasks: /e2e-tests/utils/scm/openebs/check_replica_count.yml
16 | when: lookup('env','DEPLOY_TYPE') == 'statefulset'
17 |
18 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/backup_and_restore/patch.yml:
--------------------------------------------------------------------------------
1 | {
2 | "spec": {
3 | "template": {
4 | "spec": {
5 | "containers": [
6 | {
7 | "image": "gcr.io/heptio-images/velero:v1.1.0",
8 | "imagePullPolicy": "IfNotPresent",
9 | "name": "restic",
10 | "securityContext": {
11 | "privileged": true
12 | }
13 | }
14 | ]
15 | }
16 | }
17 | }
18 | }
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # Default code owners
2 | * @openebs/localpv-hostpath-approvers
3 |
4 | # CODEOWNERS file owners
5 | /.github/CODEOWNERS @openebs/org-maintainers
6 |
7 | # Policy docs owners
8 | /CODE_OF_CONDUCT.md @openebs/org-maintainers
9 | /CONTRIBUTING.md @openebs/org-maintainers
10 | /GOVERNANCE.md @openebs/org-maintainers
11 | /LICENSE @openebs/org-maintainers
12 | /MAINTAINERS.md @openebs/org-maintainers
13 | /SECURITY.md @openebs/org-maintainers
14 | /SECURITY_CONTACTS.md @openebs/org-maintainers
15 |
--------------------------------------------------------------------------------
/nix/sources.json:
--------------------------------------------------------------------------------
1 | {
2 | "nixpkgs": {
3 | "branch": "release-23.11",
4 | "description": "Nix Packages collection & NixOS",
5 | "homepage": "https://github.com/NixOS/nixpkgs",
6 | "owner": "NixOS",
7 | "repo": "nixpkgs",
8 | "rev": "205fd4226592cc83fd4c0885a3e4c9c400efabb5",
9 | "sha256": "1f5d2g1p6nfwycpmrnnmc2xmcszp804adp16knjvdkj8nz36y1fg",
10 | "type": "tarball",
11 | "url": "https://github.com/NixOS/nixpkgs/archive/205fd4226592cc83fd4c0885a3e4c9c400efabb5.tar.gz",
12 | "url_template": "https://github.com///archive/.tar.gz"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/deployers/test_vars.yml:
--------------------------------------------------------------------------------
1 | application_statefulset: busybox_statefulset.yml
2 | application_deployment: busybox_deployment.yml
3 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
4 | app_label: "{{ lookup('env','APP_LABEL') }}"
5 | test_name: "busybox-{{ action }}-{{ app_ns }}"
6 | deploy_type: "{{ lookup('env','DEPLOY_TYPE') }}"
7 | app_replica: "{{ lookup('env','APP_REPLICA') }}"
8 | affinity_label: "{{ lookup('env','AFFINITY_LABEL') }}"
9 | application_name: "busybox"
10 | action: "{{ lookup('env','ACTION') }}"
11 | app_pvc: "{{ lookup('env','APP_PVC') }}"
12 | storage_class: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
13 | operator_ns: openebs
14 |
--------------------------------------------------------------------------------
/e2e-tests/apps/percona/workload/test_vars.yml:
--------------------------------------------------------------------------------
1 | percona_loadgen: tpcc_bench.yml
2 | namespace: "{{ lookup('env','APP_NAMESPACE') }}"
3 | test_name: percona-loadgen
4 | app_service_label: "{{ lookup('env','APP_SERVICE_LABEL') }}"
5 | loadgen_label: "{{ lookup('env','LOADGEN_LABEL') }}"
6 | db_user: "{{ lookup('env','DB_USER') }}"
7 | db_password: "{{ lookup('env','DB_PASSWORD') }}"
8 | app_label: "{{ lookup('env','APP_LABEL') }}"
9 | load_duration: "{{ lookup('env','LOAD_DURATION') }}"
10 | test_warehouse: "{{ lookup('env','TPCC_WAREHOUSES') }}"
11 | test_connections: "{{ lookup('env','TPCC_CONNECTIONS') }}"
12 | test_warmup_period: "{{ lookup('env','TPCC_WARMUP_PERIOD') }}"
13 | test_interval: "{{ lookup('env','LOAD_INTERVAL') }}"
14 | tpcc_conf: tpcc.conf
15 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/localpv-provisioner/localpv-sc.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: storage.k8s.io/v1
3 | kind: StorageClass
4 | metadata:
5 | name: openebs-device
6 | annotations:
7 | openebs.io/cas-type: local
8 | cas.openebs.io/config: |
9 | - name: StorageType
10 | value: device
11 | provisioner: openebs.io/local
12 | reclaimPolicy: Delete
13 | volumeBindingMode: WaitForFirstConsumer
14 |
15 | ---
16 | apiVersion: storage.k8s.io/v1
17 | kind: StorageClass
18 | metadata:
19 | name: openebs-hostpath
20 | annotations:
21 | openebs.io/cas-type: local
22 | cas.openebs.io/config: |
23 | - name: StorageType
24 | value: hostpath
25 | provisioner: openebs.io/local
26 | reclaimPolicy: Delete
27 | volumeBindingMode: WaitForFirstConsumer
28 |
--------------------------------------------------------------------------------
/e2e-tests/apps/percona/workload/tpcc_bench.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | name: tpcc-bench
6 | spec:
7 | template:
8 | metadata:
9 | name: tpcc-bench
10 | labels:
11 | loadgen_lkey: loadgen_lvalue
12 | spec:
13 | restartPolicy: Never
14 | containers:
15 | - name: tpcc-bench
16 | image: openebs/tests-tpcc-client
17 | command: ["/bin/bash"]
18 | args: ["-c", "./tpcc-runner.sh service_ip tpcc.conf; exit 0"]
19 | volumeMounts:
20 | - name: tpcc-configmap
21 | mountPath: /tpcc-mysql/tpcc.conf
22 | subPath: tpcc.conf
23 | tty: true
24 | volumes:
25 | - name: tpcc-configmap
26 | configMap:
27 | name: tpcc-config
28 |
--------------------------------------------------------------------------------
/e2e-tests/utils/scm/openebs/check_replica_count.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 | - name: Obtain the number of replicas.
4 | shell: kubectl get statefulset -n {{ app_ns }} --no-headers -l "{{app_label}}" -o custom-columns=:spec.replicas
5 | args:
6 | executable: /bin/bash
7 | register: rep_count
8 | until: "rep_count.rc == 0"
9 | delay: 60
10 | retries: 15
11 |
12 | - name: Obtain the ready replica count and compare with the replica count.
13 | shell: kubectl get statefulset -n {{ app_ns }} -l "{{app_label}}" --no-headers -o custom-columns=:..readyReplicas
14 | args:
15 | executable: /bin/bash
16 | register: ready_rep
17 | until: "ready_rep.rc == 0 and ready_rep.stdout|int == rep_count.stdout|int"
18 | delay: 60
19 | retries: 30
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/status_app_pod.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get the container status of application.
3 | shell: >
4 | kubectl get pod -n {{ app_ns }} -l {{app_lkey}}="{{app_lvalue}}"
5 | -o custom-columns=:..containerStatuses[].state --no-headers | grep -w "running"
6 | args:
7 | executable: /bin/bash
8 | register: containerStatus
9 | until: "'running' in containerStatus.stdout"
10 | delay: 2
11 | retries: 150
12 |
13 | - name: Checking {{ application_name }} pod is in running state
14 | shell: kubectl get pods -n {{ app_ns }} -o jsonpath='{.items[?(@.metadata.labels.{{app_lkey}}=="{{app_lvalue}}")].status.phase}'
15 | register: result
16 | until: "((result.stdout.split()|unique)|length) == 1 and 'Running' in result.stdout"
17 | delay: '{{ delay }}'
18 | retries: '{{ retries }}'
19 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/localpv-provisioner/charts_operator_provision.yml:
--------------------------------------------------------------------------------
1 | - name: Downloading the operator file from charts
2 | get_url:
3 | url: "{{ charts_hostpath_operator }}"
4 | dest: "{{ playbook_dir }}/{{ hostpath_operator }}"
5 | force: yes
6 | register: status
7 | until: "'OK' in status.msg"
8 | delay: 5
9 | retries: 3
10 |
11 | - name: Deploy openebs operator
12 | shell: >
13 | kubectl apply -f {{ hostpath_operator }}
14 | args:
15 | executable: /bin/bash
16 | register: deploy_status
17 | when: lookup('env','ACTION') == "provision"
18 |
19 | - name: Removing openebs operator
20 | shell: >
21 | kubectl delete -f {{ hostpath_operator }}
22 | args:
23 | executable: /bin/bash
24 | register: deploy_status
25 | when: lookup('env','ACTION') == "deprovision"
26 |
--------------------------------------------------------------------------------
/e2e-tests/hack/rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: e2e
5 | ---
6 | apiVersion: v1
7 | kind: ServiceAccount
8 | metadata:
9 | name: e2e
10 | namespace: e2e
11 | labels:
12 | name: e2e
13 | ---
14 | # Source: openebs/templates/clusterrole.yaml
15 | apiVersion: rbac.authorization.k8s.io/v1
16 | kind: ClusterRole
17 | metadata:
18 | name: e2e
19 | labels:
20 | name: e2e
21 | rules:
22 | - apiGroups: ["*"]
23 | resources: ["*"]
24 | verbs: ["*"]
25 | ---
26 | apiVersion: rbac.authorization.k8s.io/v1
27 | kind: ClusterRoleBinding
28 | metadata:
29 | name: e2e
30 | labels:
31 | name: e2e
32 | roleRef:
33 | apiGroup: rbac.authorization.k8s.io
34 | kind: ClusterRole
35 | name: e2e
36 | subjects:
37 | - kind: ServiceAccount
38 | name: e2e
39 | namespace: e2e
40 |
--------------------------------------------------------------------------------
/deploy/kubectl/fillup-localpv-hostpath.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: fillup
5 | namespace: default
6 | spec:
7 | containers:
8 | - command:
9 | - sh
10 | - -c
11 | - 'dd if=/dev/zero of=/mnt/store1/dump.dd bs=1M; sync; sleep 5; sync; tail -f /dev/null;'
12 | image: busybox
13 | imagePullPolicy: Always
14 | name: fillup-bb
15 | volumeMounts:
16 | - mountPath: /mnt/store1
17 | name: fillup
18 | volumes:
19 | - name: fillup
20 | persistentVolumeClaim:
21 | claimName: fillup-claim
22 | ---
23 | kind: PersistentVolumeClaim
24 | apiVersion: v1
25 | metadata:
26 | name: fillup-claim
27 | spec:
28 | storageClassName: openebs-hostpath
29 | accessModes:
30 | - ReadWriteOnce
31 | resources:
32 | requests:
33 | storage: 1G
34 |
--------------------------------------------------------------------------------
/deploy/kubectl/busybox-localpv-path.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: busybox
5 | namespace: default
6 | spec:
7 | containers:
8 | - command:
9 | - sh
10 | - -c
11 | - 'date >> /mnt/store1/date.txt; hostname >> /mnt/store1/hostname.txt; sync; sleep 5; sync; tail -f /dev/null;'
12 | image: busybox
13 | imagePullPolicy: Always
14 | name: busybox
15 | volumeMounts:
16 | - mountPath: /mnt/store1
17 | name: demo-vol1
18 | volumes:
19 | - name: demo-vol1
20 | persistentVolumeClaim:
21 | claimName: demo-vol1-claim
22 | ---
23 | kind: PersistentVolumeClaim
24 | apiVersion: v1
25 | metadata:
26 | name: demo-vol1-claim
27 | spec:
28 | storageClassName: openebs-hostpath
29 | accessModes:
30 | - ReadWriteOnce
31 | resources:
32 | requests:
33 | storage: 5G
34 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea to improve openebs dynamic local pv provisioners
4 | labels: Enhancement
5 |
6 | ---
7 |
8 | **Describe the problem/challenge you have**
9 | [A description of the current limitation/problem/challenge that you are experiencing.]
10 |
11 |
12 | **Describe the solution you'd like**
13 | [A clear and concise description of what you want to happen.]
14 |
15 |
16 | **Anything else you would like to add:**
17 | [Miscellaneous information that will assist in solving the issue.]
18 |
19 |
20 | **Environment:**
21 | - OpenEBS version (use `kubectl get po -n openebs --show-labels`):
22 | - Kubernetes version (use `kubectl version`):
23 | - Cloud provider or hardware configuration:
24 | - OS (e.g: `cat /etc/os-release`):
25 | - kernel (e.g: `uname -a`):
26 | - others:
27 |
--------------------------------------------------------------------------------
/deploy/helm/charts/templates/psp.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.rbac.pspEnabled }}
2 | apiVersion: policy/v1beta1
3 | kind: PodSecurityPolicy
4 | metadata:
5 | name: {{ template "localpv.fullname" . }}-psp
6 | {{- with .Values.localpv.annotations }}
7 | annotations: {{ toYaml . | nindent 4 }}
8 | {{- end }}
9 | labels:
10 | {{- include "localpv.labels" . | nindent 4 }}
11 | {{- if .Values.extraLabels -}}
12 | {{- toYaml .Values.extraLabels | nindent 4 }}
13 | {{- end }}
14 | spec:
15 | privileged: {{ .Values.localpv.privileged }}
16 | allowPrivilegeEscalation: true
17 | allowedCapabilities: ['*']
18 | volumes: ['*']
19 | hostNetwork: true
20 | hostPorts:
21 | - min: 0
22 | max: 65535
23 | hostIPC: true
24 | hostPID: true
25 | runAsUser:
26 | rule: 'RunAsAny'
27 | seLinux:
28 | rule: 'RunAsAny'
29 | supplementalGroups:
30 | rule: 'RunAsAny'
31 | fsGroup:
32 | rule: 'RunAsAny'
33 | {{- end }}
34 |
--------------------------------------------------------------------------------
/deploy/helm/charts/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: localpv-provisioner
3 | description: Helm chart for OpenEBS Dynamic Local PV. For instructions to install OpenEBS Dynamic Local PV using helm chart, refer to https://openebs.github.io/dynamic-localpv-provisioner/.
4 | type: application
5 | # This is the chart version. This version number should be incremented each time you make changes
6 | # to the chart and its templates, including the app version.
7 | version: 4.5.0-develop
8 | # This is the version number of the application being deployed. This version number should be
9 | # incremented each time you make changes to the application.
10 | appVersion: 4.5.0-develop
11 | icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/openebs/icon/color/openebs-icon-color.png
12 | home: http://www.openebs.io/
13 | keywords:
14 | - storage
15 | - local
16 | - dynamic-localpv
17 | sources:
18 | - https://github.com/openebs/dynamic-localpv-provisioner
19 |
--------------------------------------------------------------------------------
/e2e-tests/chaoslib/containerd_chaos/containerd-chaos-ds.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: containerd-chaos
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: crictl
9 | template:
10 | metadata:
11 | labels:
12 | app: crictl
13 | name: containerd-chaos
14 | spec:
15 | containers:
16 | - image: gprasath/crictl:ci
17 | imagePullPolicy: IfNotPresent
18 | name: containerd-chaos
19 | command: ['sh', '-c', 'echo Hello! && sleep 1800']
20 | volumeMounts:
21 | - name: cri-socket
22 | mountPath: /run/containerd/containerd.sock
23 | - name: cri-config
24 | mountPath: /etc/crictl.yaml
25 | volumes:
26 | - hostPath:
27 | path: /run/containerd/containerd.sock
28 | name: cri-socket
29 | - hostPath:
30 | path: /etc/crictl.yaml
31 | name: cri-config
32 |
33 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/check_statefulset_status.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This utility checks if all the replicas in a statefulset are running.
3 | # The parameters required are
4 | # - app_ns (Namespace on which the application is deployed)
5 | # - app_label( Label of application in the form 'key=value'))
6 | # -
7 | - name: Obtain the number of replicas.
8 | shell: kubectl get statefulset -n {{ app_ns }} -l {{ app_label }} -o custom-columns=:spec.replicas
9 | args:
10 | executable: /bin/bash
11 | register: rep_count
12 | until: "rep_count.rc == 0"
13 | delay: 60
14 | retries: 15
15 |
16 | - name: Obtain the ready replica count and compare with the replica count.
17 | shell: kubectl get statefulset -n {{ app_ns }} -l {{ app_label }} -o custom-columns=:..readyReplicas
18 | args:
19 | executable: /bin/bash
20 | register: ready_rep
21 | until: "ready_rep.rc == 0 and ready_rep.stdout|int == rep_count.stdout|int"
22 | delay: 60
23 | retries: 15
24 |
--------------------------------------------------------------------------------
/e2e-tests/chaoslib/crio_chaos/crio-chaos-ds.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: crio-chaos
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: crio
9 | template:
10 | metadata:
11 | labels:
12 | app: crio
13 | name: crio-chaos
14 | spec:
15 | containers:
16 | - image: quay.io/nsathyaseelan/crictl:latest
17 | imagePullPolicy: IfNotPresent
18 | name: crio-chaos
19 | command: ['sh', '-c', 'echo Hello! && sleep 1800']
20 | volumeMounts:
21 | - name: cri-socket
22 | mountPath: /run/crio/crio.sock
23 | - name: cri-config
24 | mountPath: /etc/crictl.yaml
25 | securityContext:
26 | privileged: true
27 | volumes:
28 | - hostPath:
29 | path: /run/crio/crio.sock
30 | name: cri-socket
31 | - hostPath:
32 | path: /etc/crictl.yaml
33 | name: cri-config
34 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/backup_and_restore/test_vars.yml:
--------------------------------------------------------------------------------
1 | test_name: "velero-backup-restore"
2 | app_ns: "{{ lookup('env','APP_NAMESPACE') }}"
3 | app_ns_new: "{{ lookup('env','APP_NAMESPACE') }}-local"
4 | app_label: "{{ lookup('env','APP_LABEL') }}"
5 | app_pvc: "{{ lookup('env','APP_PVC') }}"
6 | operator_ns: "{{ lookup('env','OPERATOR_NAMESPACE') }}"
7 | backup_name: "{{ lookup('env','BACKUP_NAME') }}"
8 | failure_type: "{{ lookup('env','COMPONENT_FAILURE') }}"
9 | velero_plugin_name: "{{ lookup('env','VELERO_PLUGIN_NAME') }}"
10 | velero_version: "{{ lookup('env','VELERO_VERSION') }}"
11 | storage_engine: "{{ lookup('env','STORAGE_ENGINE') }}"
12 | profile_name: "{{ lookup('env','PROFILE') }}"
13 | bucket_type: "{{ lookup('env','STORAGE_BUCKET') }}"
14 | gcp_project_id: "{{ lookup('env','GCP_PROJECT_ID') }}"
15 | velero_binary_url: "https://github.com/vmware-tanzu/velero/releases/download/{{ lookup('env','VELERO_VERSION') }}/velero-{{ lookup('env','VELERO_VERSION') }}-linux-amd64.tar.gz"
--------------------------------------------------------------------------------
/docs/installation/platforms/minikube.md:
--------------------------------------------------------------------------------
1 | # Install Dynamic-LocalPV-Provisioner on minikube
2 |
3 | Follow the instructions below when installing dynamic-localpv-provisioner on minikube.
4 |
5 | ## Using no node driver
6 |
7 | The node-disk-manager DaemonSet Pods require Kubernetes hostpath mounts for the directories '/run/udev', '/dev', '/proc', '/var/openebs' and '/var/openebs/sparse' from the host node. Running minikube with the 'none' driver allows the kubelet to mount the directories into the a mountpoint inside the NDM DaemonSet Pods.
8 |
9 | Run minikube with the flag `--driver=none` to run minikube with no VM driver.
10 |
11 | ```bash
12 | minikube start --driver=none
13 | ```
14 |
15 | For more information on using the 'none' driver flag argument, [read the official minikube docs](https://minikube.sigs.k8s.io/docs/drivers/none/).
16 |
17 | After minikube is started with no VM driver, proceed with installation as described in [the quickstart](https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md).
18 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/data-integrity/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: e2e-di-fio-
6 | namespace: e2e
7 | spec:
8 | template:
9 | metadata:
10 | labels:
11 | name: e2e
12 | app: fio-di-e2e
13 | spec:
14 | serviceAccountName: e2e
15 | restartPolicy: Never
16 | containers:
17 | - name: ansibletest
18 | image: openebs/localpv-e2e:ci
19 | env:
20 | - name: ANSIBLE_STDOUT_CALLBACK
21 | value: default
22 |
23 | - name: PROVIDER_STORAGE_CLASS
24 | value: openebs-hostpath
25 |
26 | - name: FIO_NAMESPACE
27 | value: fio
28 |
29 | - name: FIO_SAMPLE_SIZE
30 | value: "128m"
31 |
32 | - name: FIO_TESTRUN_PERIOD
33 | value: "60"
34 |
35 | command: ["/bin/bash"]
36 | args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/data-integrity/test.yml -i /etc/ansible/hosts -v; exit 0"]
37 |
--------------------------------------------------------------------------------
/deploy/kubectl/openebs-hostpath-sc.yaml:
--------------------------------------------------------------------------------
1 | #Sample storage classes for OpenEBS Local PV
2 | apiVersion: storage.k8s.io/v1
3 | kind: StorageClass
4 | metadata:
5 | name: openebs-hostpath
6 | annotations:
7 | openebs.io/cas-type: local
8 | cas.openebs.io/config: |
9 | #hostpath type will create a PV by
10 | # creating a sub-directory under the
11 | # BASEPATH provided below.
12 | - name: StorageType
13 | value: "hostpath"
14 | #Specify the location (directory) where
15 | # where PV(volume) data will be saved.
16 | # A sub-directory with pv-name will be
17 | # created. When the volume is deleted,
18 | # the PV sub-directory will be deleted.
19 | #Default value is /var/openebs/local
20 | - name: BasePath
21 | value: "/var/openebs/local/"
22 | #Specify the node affinity label
23 | # to be added to the PV
24 | #Default: kubernetes.io/hostname
25 | #- name: NodeAffinityLabels
26 | # list:
27 | # - "openebs.io/stg-node-name"
28 | provisioner: openebs.io/local
29 | volumeBindingMode: WaitForFirstConsumer
30 | reclaimPolicy: Delete
31 |
--------------------------------------------------------------------------------
/pkg/kubernetes/api/core/v1/persistentvolume/persistentvolume_test.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2018-2020 The OpenEBS Authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package persistentvolume
16 |
17 | import (
18 | corev1 "k8s.io/api/core/v1"
19 | )
20 |
21 | func fakeAPIPVList(pvNames []string) *corev1.PersistentVolumeList {
22 | if len(pvNames) == 0 {
23 | return nil
24 | }
25 | list := &corev1.PersistentVolumeList{}
26 | for _, name := range pvNames {
27 | pv := corev1.PersistentVolume{}
28 | pv.SetName(name)
29 | list.Items = append(list.Items, pv)
30 | }
31 | return list
32 | }
33 |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/deployers/busybox_statefulset.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | lkey: lvalue
6 | name: busybox
7 | spec:
8 | clusterIP: None
9 | selector:
10 | lkey: lvalue
11 | ---
12 | apiVersion: apps/v1
13 | kind: StatefulSet
14 | metadata:
15 | name: busybox
16 | labels:
17 | lkey: lvalue
18 | spec:
19 | serviceName: busybox
20 | rkey: rvalue
21 | selector:
22 | matchLabels:
23 | lkey: lvalue
24 | template:
25 | metadata:
26 | labels:
27 | lkey: lvalue
28 | spec:
29 | containers:
30 | - name: busybox
31 | image: gcr.io/google-containers/busybox
32 | imagePullPolicy: IfNotPresent
33 | command:
34 | - sh
35 | - -c
36 | - 'date > /busybox/date.txt; sync; sleep 5; sync; tail -f /dev/null;'
37 | volumeMounts:
38 | - name: testclaim
39 | mountPath: /busybox
40 | volumeClaimTemplates:
41 | - metadata:
42 | name: testclaim
43 | spec:
44 | accessModes: [ "ReadWriteOnce" ]
45 | storageClassName: testclass
46 | resources:
47 | requests:
48 | storage: teststorage
49 |
--------------------------------------------------------------------------------
/e2e-tests/chaoslib/kubectl/app_pod_random_delete.yaml:
--------------------------------------------------------------------------------
1 | - name: Derive chaos iterations
2 | set_fact:
3 | chaos_iterations: "{{ (chaos_duration|int / chaos_interval|int)|int }}"
4 |
5 | - name: Set min chaos count to 1 if interval > duration
6 | set_fact:
7 | chaos_iterations: 1
8 | when: "chaos_iterations == '0'"
9 |
10 | - name: Kill application pods randomly for the specified duration
11 | shell: |
12 | kubectl get pods -l {{ label }} -n {{ namespace }} --no-headers -o custom-columns=:metadata.name | shuf -n 1 | xargs kubectl delete pod --force --grace-period=0 --wait=false
13 | sleep {{ chaos_interval }}
14 | args:
15 | executable: /bin/bash
16 | register: result
17 | with_sequence: start=1 end={{ chaos_iterations }}
18 | when: "c_force == 'true'"
19 |
20 | - name: Kill application pods randomly for the specified duration
21 | shell: |
22 | kubectl get pods -l {{ label }} -n {{ namespace }} --no-headers -o custom-columns=:metadata.name | shuf -n 1 | xargs kubectl delete pod
23 | sleep {{ chaos_interval }}
24 | args:
25 | executable: /bin/bash
26 | register: result
27 | with_sequence: start=1 end={{ chaos_iterations }}
28 | when: "c_force == 'false' or c_force == ''"
29 |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/deployers/busybox_deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | lkey: lvalue
6 | name: busybox
7 | spec:
8 | clusterIP: None
9 | selector:
10 | lkey: lvalue
11 | ---
12 | apiVersion: apps/v1
13 | kind: Deployment
14 | metadata:
15 | name: app-busybox
16 | labels:
17 | lkey: lvalue
18 | spec:
19 | serviceName: busybox
20 | selector:
21 | matchLabels:
22 | lkey: lvalue
23 |
24 | template:
25 | metadata:
26 | labels:
27 | lkey: lvalue
28 | spec:
29 | containers:
30 | - name: app-busybox
31 | imagePullPolicy: IfNotPresent
32 | image: gcr.io/google-containers/busybox
33 | command: ["/bin/sh"]
34 | args: ["-c", "while true; do sleep 10;done"]
35 | env:
36 | volumeMounts:
37 | - name: data-vol
38 | mountPath: /busybox
39 | volumes:
40 | - name: data-vol
41 | persistentVolumeClaim:
42 | claimName: testclaim
43 | ---
44 | kind: PersistentVolumeClaim
45 | apiVersion: v1
46 | metadata:
47 | name: testclaim
48 | spec:
49 | storageClassName: testclass
50 | accessModes:
51 | - ReadWriteOnce
52 | resources:
53 | requests:
54 | storage: teststorage
55 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/localpv-provisioner/test_vars.yml:
--------------------------------------------------------------------------------
1 | hostpath_operator_link: "https://raw.githubusercontent.com/openebs/dynamic-localpv-provisioner/develop/deploy/kubectl/hostpath-operator.yaml"
2 | ndm_operator_link: "https://raw.githubusercontent.com/openebs/node-disk-manager/master/deploy/ndm-operator.yaml"
3 | charts_hostpath_operator: "https://raw.githubusercontent.com/openebs/charts/gh-pages/versioned/{{ lookup('env','RELEASE_VERSION') }}/openebs-operator-lite.yaml"
4 | release_operator_link: "https://raw.githubusercontent.com/openebs/charts/{{ lookup('env','COMMIT_ID') }}/versioned/{{ lookup('env','RELEASE_VERSION') }}/openebs-operator-lite.yaml"
5 | localpv_version: "{{ lookup('env','LOCALPV_PROVISIONER_IMAGE') }}"
6 | release_version: "{{ lookup('env','RELEASE_VERSION') }}"
7 | image_type: "{{ lookup('env','IMAGE_TYPE') }}"
8 | ci_device_operator: hostpath-operator.yaml
9 | hostpath_operator: openebs-operator-lite.yaml
10 | ndm_operator: ndm-operator.yaml
11 | test_name: "device-localpv-{{ lookup('env','ACTION') }}"
12 | namespace: "{{ lookup('env','OPERATOR_NS') }}"
13 | ndm_version: "{{ lookup('env','NDM_VERSION') }}"
14 | commit_id: "{{ lookup('env','COMMIT_ID') }}"
15 | rc_tag: "{{ lookup('env','RC_TAG') }}"
16 | release_tag: "{{ lookup('env','RELEASE_TAG') }}"
17 |
--------------------------------------------------------------------------------
/docs/tutorials/hostpath/allowedtopologies.md:
--------------------------------------------------------------------------------
1 | # Scheduling based on Node label selector
2 |
3 | The ['Allowed Topologies'](https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies) feature allows you select the Nodes where the application Pods may be scheduled based on Node labels.
4 |
5 | The nodes which are preferred for scheduling may be labelled using a unique label and key. Multiple such labels and keys per label may be specified. All of the selection criteria is AND-ed.
6 |
7 | The following is a sample StorageClass which allows scheduling on nodes with the labels `kubernetes.io/hostname=worker-2`, `kubernetes.io/hostname=worker-3` and `kubernetes.io/hostname=worker-5`.
8 |
9 | ```yaml
10 | apiVersion: storage.k8s.io/v1
11 | kind: StorageClass
12 | metadata:
13 | name: custom-hostpath
14 | annotations:
15 | openebs.io/cas-type: local
16 | cas.openebs.io/config: |
17 | - name: StorageType
18 | value: "hostpath"
19 | - name: BasePath
20 | value: "/var/openebs/local"
21 | provisioner: openebs.io/local
22 | volumeBindingMode: WaitForFirstConsumer
23 | allowedTopologies:
24 | - matchLabelExpressions:
25 | - key: kubernetes.io/hostname
26 | values:
27 | - worker-2
28 | - worker-3
29 | - worker-5
30 | ```
31 |
--------------------------------------------------------------------------------
/docs/tutorials/hostpath/xfs_quota/use-xfs-fs-with-loop-device.md:
--------------------------------------------------------------------------------
1 | # Create XFS filesystem at the basepath as loop device (if filesystem is not XFS)
2 |
3 | If you don't have a device with XFS filesystem, you can use a loop device and create an XFS filesystem on it. This works even if your root filesystem is not XFS.
4 |
5 | Create a 32MiB sparse file which will be formatted as XFS filesystem, mounted as loop device and exposed as the directory `/var/openebs/local`
6 |
7 | 1. Make sure library for managing xfs-fs is installed.
8 |
9 | ```console
10 | sudo apt update
11 | sudo apt-get install -y xfsprogs
12 | #RHEL/Centos?
13 | #sudo yum install -y xfsprogs
14 | ```
15 |
16 | 2. Make directory where mount will occur
17 |
18 | ```console
19 | sudo mkdir -p /var/openebs/local
20 | cd /var/openebs
21 | ```
22 |
23 | 3. Create a sparse file of max size 32MiB using seek of max size 32MiB
24 |
25 | ```console
26 | sudo dd if=/dev/zero of=xfs.32M bs=1 count=0 seek=32M
27 | ```
28 |
29 | 4. Format the sparse file in xfs format
30 |
31 | ```console
32 | sudo mkfs -t xfs -q xfs.32M
33 | ```
34 |
35 | 5. Mount it as loop device with project quota enabled
36 |
37 | ```console
38 | sudo mount -o loop,rw xfs.32M -o pquota /var/openebs/local
39 | ```
40 |
--------------------------------------------------------------------------------
/docs/tutorials/hostpath/nodeaffinitylabels.md:
--------------------------------------------------------------------------------
1 | # Use Node-Selector labels
2 |
3 | Hostpath LocalPV uses the Kubernetes Node label(s) (For example: `kubernetes.io/hostname=`) to uniquely identify a node.
4 |
5 | In some cases, this label (`hostname`) is not unique across all the nodes in the cluster. This was seen on clusters provisioned with [Bosh](https://bosh.io/docs/) across different fault domains.
6 |
7 | A unique Node label (or set of labels) may be used instead of the above mentioned Kubernetes default label to uniquely identify a node. This label(s) may be set by you, the administrator.
8 | This label(s) can be set when defining a StorageClass. One such sample StorageClass is given below...
9 |
10 | ```yaml
11 | apiVersion: storage.k8s.io/v1
12 | kind: StorageClass
13 | metadata:
14 | name: local-hostpath
15 | annotations:
16 | openebs.io/cas-type: local
17 | cas.openebs.io/config: |
18 | - name: StorageType
19 | value: "hostpath"
20 | - name: NodeAffinityLabels
21 | list:
22 | - "openebs.io/custom-node-unique-id"
23 | provisioner: openebs.io/local
24 | volumeBindingMode: WaitForFirstConsumer
25 | ```
26 |
27 | **NOTE**: Using NodeAffinityLabels does not influence scheduling of the application Pod. You may use [allowedTopologies](./allowedtopologies.md) for that.
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/data-integrity/fio-write.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: basic
6 | data:
7 |
8 | basic-rw : |-
9 |
10 | [global]
11 | directory=/datadir
12 | filename=basic-fio
13 |
14 | [basic-fio]
15 | rw=write
16 | bs=4k
17 | verify=crc32c
18 | verify=pattern
19 | verify_pattern=%o
20 | ---
21 | apiVersion: batch/v1
22 | kind: Job
23 | metadata:
24 | name: fio
25 | spec:
26 | template:
27 | metadata:
28 | name: fio
29 | labels:
30 | name: fio-write
31 | spec:
32 | restartPolicy: Never
33 | containers:
34 | - name: perfrunner
35 | image: openebs/tests-fio:latest
36 | imagePullPolicy: IfNotPresent
37 | command: ["/bin/bash"]
38 | args: ["-c", "./fio_runner.sh --size 256m; sync;exit 0"]
39 | volumeMounts:
40 | - mountPath: /datadir
41 | name: demo-vol1
42 | - mountPath: templates/file/basic-rw
43 | subPath: basic-rw
44 | name: basic-configmap
45 | tty: true
46 |
47 | volumes:
48 | - name: demo-vol1
49 | persistentVolumeClaim:
50 | claimName: demo-vol1-claim
51 | - name: basic-configmap
52 | configMap:
53 | name: basic
54 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/localpv-provisioner/master_operator_provision.yml:
--------------------------------------------------------------------------------
1 | - name: Downloading openebs operator yaml for rc tag
2 | get_url:
3 | url: "{{ hostpath_operator_link }}"
4 | dest: "{{ playbook_dir }}/{{ ci_device_operator }}"
5 | force: yes
6 | register: ci_operator
7 |
8 | - name: Downloading openebs operator yaml for rc tag
9 | get_url:
10 | url: "{{ ndm_operator_link }}"
11 | dest: "{{ playbook_dir }}/{{ ndm_operator }}"
12 | force: yes
13 | register: rc_operator
14 |
15 | - block:
16 | - name: Applying openebs operator
17 | shell: kubectl apply -f "{{ ci_device_operator }}"
18 | args:
19 | executable: /bin/bash
20 |
21 | - name: Applying openebs operator
22 | shell: kubectl apply -f "{{ ndm_operator }}"
23 | args:
24 | executable: /bin/bash
25 | when: lookup('env','ACTION') == "provision"
26 |
27 | - block:
28 |
29 | - name: Applying openebs operator
30 | shell: kubectl delete -f "{{ ndm_operator }}"
31 | args:
32 | executable: /bin/bash
33 | ignore_errors: true
34 |
35 | - name: Applying openebs operator
36 | shell: kubectl delete -f "{{ ci_device_operator }}"
37 | args:
38 | executable: /bin/bash
39 | ignore_errors: true
40 |
41 | when: lookup('env','ACTION') == "deprovision"
--------------------------------------------------------------------------------
/e2e-tests/hack/push:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | if [ -z "${REPONAME}" ]
5 | then
6 | REPONAME="openebs"
7 | fi
8 |
9 | if [ -z "${IMGNAME}" ] || [ -z "${IMGTAG}" ];
10 | then
11 | echo "Image details are missing. Nothing to push.";
12 | exit 1
13 | fi
14 |
15 | IMAGEID=$( sudo docker images -q ${REPONAME}/${IMGNAME}:${IMGTAG} )
16 |
17 | if [ ! -z "${DNAME}" ] && [ ! -z "${DPASS}" ];
18 | then
19 | sudo docker login -u "${DNAME}" -p "${DPASS}";
20 | # Push image to docker hub
21 | echo "Pushing ${REPONAME}/${IMGNAME}:${IMGTAG} ...";
22 | sudo docker push ${REPONAME}/${IMGNAME}:${IMGTAG} ;
23 | if [ ! -z "${TRAVIS_TAG}" ] ;
24 | then
25 | # Push with different tags if tagged as a release
26 | # When github is tagged with a release, then Travis will
27 | # set the release tag in env TRAVIS_TAG
28 | echo "Pushing ${REPONAME}/${IMGNAME}:${TRAVIS_TAG} ...";
29 | sudo docker tag ${IMAGEID} ${REPONAME}/${IMGNAME}:${TRAVIS_TAG}
30 | sudo docker push ${REPONAME}/${IMGNAME}:${TRAVIS_TAG};
31 | echo "Pushing ${REPONAME}/${IMGNAME}:latest ...";
32 | sudo docker tag ${IMAGEID} ${REPONAME}/${IMGNAME}:latest
33 | sudo docker push ${REPONAME}/${IMGNAME}:latest;
34 | fi;
35 | else
36 | echo "No docker credentials provided. Skip uploading ${REPONAME}/${IMGNAME}:${IMGTAG} to docker hub";
37 | fi;
38 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/data-integrity/fio-read.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: basic-read
6 | data:
7 | basic-rw : |-
8 |
9 | [global]
10 | directory=/datadir
11 | filename=basic-fio
12 |
13 | [basic-fio]
14 | rw=read
15 | bs=4k
16 | verify=crc32c
17 | verify=pattern
18 | verify_pattern=%o
19 | ---
20 | apiVersion: batch/v1
21 | kind: Job
22 | metadata:
23 | name: fio-read
24 | spec:
25 | template:
26 | metadata:
27 | name: fio-read
28 | labels:
29 | name: fio-read
30 | spec:
31 | restartPolicy: Never
32 | containers:
33 | - name: perfrunner
34 | image: openebs/tests-fio:latest
35 | imagePullPolicy: IfNotPresent
36 | command: ["/bin/bash"]
37 | args: ["-c", "./fio_runner.sh --read-only /datadir/basic-fio; exit 0"]
38 | volumeMounts:
39 | - mountPath: /datadir
40 | name: demo-vol1
41 | - mountPath: templates/file/basic-rw
42 | subPath: basic-rw
43 | name: basic-configmap-read
44 | tty: true
45 |
46 | volumes:
47 | - name: demo-vol1
48 | persistentVolumeClaim:
49 | claimName: demo-vol1-claim
50 | - name: basic-configmap-read
51 | configMap:
52 | name: basic-read
53 |
--------------------------------------------------------------------------------
/shell.nix:
--------------------------------------------------------------------------------
1 | let
2 | sources = import ./nix/sources.nix;
3 | pkgs = import sources.nixpkgs {};
4 | in
5 | pkgs.mkShell {
6 | name = "hostpath-shell";
7 |
8 | buildInputs = with pkgs; [
9 | git
10 | chart-testing
11 | go_1_19
12 | golint
13 | kubectl
14 | kubernetes-helm
15 | gnumake
16 | semver-tool
17 | yq-go
18 | which
19 | curl
20 | cacert
21 | crane
22 | util-linux
23 | jq
24 | nixos-shell
25 | ] ++ pkgs.lib.optional (builtins.getEnv "IN_NIX_SHELL" == "pure") [ docker-client ];
26 |
27 | PRE_COMMIT_ALLOW_NO_CONFIG = 1;
28 |
29 | shellHook = ''
30 | unset GOROOT
31 | export GOPATH=$(pwd)/nix/.go
32 | export GOCACHE=$(pwd)/nix/.go/cache
33 | export TMPDIR=$(pwd)/nix/.tmp
34 | export PATH=$GOPATH/bin:$PATH
35 | mkdir -p "$TMPDIR"
36 |
37 | if [ "$IN_NIX_SHELL" = "pure" ]; then
38 | # working sudo within a pure nix-shell
39 | for sudo in /run/wrappers/bin/sudo /usr/bin/sudo /usr/local/bin/sudo /sbin/sudo /bin/sudo; do
40 | mkdir -p $(pwd)/nix/bins
41 | ln -sf $sudo $(pwd)/nix/bins/sudo
42 | export PATH=$(pwd)/nix/bins:$PATH
43 | break
44 | done
45 | else
46 | rm $(pwd)/nix/bins/sudo 2>/dev/null || :
47 | rmdir $(pwd)/nix/bins 2>/dev/null || :
48 | fi
49 |
50 | make bootstrap
51 | '';
52 | }
53 |
54 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/scale_statefulset_replicas.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This utilty task file can scale up the statefulset application deployed in K8s cluster.
3 | # The parameters required are
4 | # - app_ns ( namespace in which application is deployed)
5 | # - app_label( Applicastions's label in the form key=value)
6 | # - app_replica_count( Required number of application replicas)
7 |
8 | - name: Identifying the statefulset deployed.
9 | shell: kubectl get statefulset -n {{ app_ns }} --no-headers -l {{ app_label }} -o custom-columns=:metadata.name
10 | args:
11 | executable: /bin/bash
12 | register: result
13 |
14 | - name: Recording the application pod name.
15 | set_fact:
16 | sts_name: "{{ result.stdout }}"
17 |
18 | - name: scaling up the statefulset application.
19 | shell: kubectl scale statefulsets {{ sts_name }} --replicas={{ app_replica_count }} -n {{ app_ns }}
20 | args:
21 | executable: /bin/bash
22 | register: result
23 | failed_when: "'scaled' not in result.stdout"
24 |
25 | - name: Check if all the application replicas are running.
26 | shell: kubectl get statefulsets -n {{ app_ns }} --no-headers -l {{ app_label }} -o custom-columns=:..readyReplicas
27 | args:
28 | executable: /bin/bash
29 | register: running_replicas
30 | until: "running_replicas.stdout|int == app_replica_count|int"
31 | delay: 60
32 | retries: 15
33 |
34 |
--------------------------------------------------------------------------------
/e2e-tests/utils/fcm/update_e2e_result_resource.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 | - name: Generate the e2e result CR to reflect SOT (Start of Test)
4 | template:
5 | src: /e2e-tests/hack/e2e-result.j2
6 | dest: e2e-result.yaml
7 | vars:
8 | test: "{{ test_name }}"
9 | app: ""
10 | chaostype: ""
11 | phase: in-progress
12 | verdict: none
13 |
14 | - name: Analyze the cr yaml
15 | shell: cat e2e-result.yaml
16 |
17 | - name: Apply the e2e result CR
18 | shell: kubectl apply -f e2e-result.yaml
19 | args:
20 | executable: /bin/bash
21 | register: er_status
22 | failed_when: "er_status is failed"
23 |
24 | when: status == "SOT"
25 |
26 | - block:
27 | - name: Generate the e2e result CR to reflect EOT (End of Test)
28 | template:
29 | src: /e2e-tests/hack/e2e-result.j2
30 | dest: e2e-result.yaml
31 | vars:
32 | test: "{{ test_name }}"
33 | app: ""
34 | chaostype: ""
35 | phase: completed
36 | verdict: "{{ flag }}"
37 |
38 | - name: Analyze the cr yaml
39 | shell: cat e2e-result.yaml
40 |
41 | - name: Apply the e2e result CR
42 | shell: kubectl apply -f e2e-result.yaml
43 | args:
44 | executable: /bin/bash
45 | register: er_status
46 | failed_when: "er_status is failed"
47 |
48 | when: status == "EOT"
49 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/data-integrity/commands.sh:
--------------------------------------------------------------------------------
1 | # Copyright 2020-2021 The OpenEBS Authors. All rights reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | cd datadir
16 | error_check()
17 | {
18 | if [ $? != 0 ]
19 | then
20 | echo "error caught"
21 | exit 1
22 | fi
23 | }
24 |
25 | perform_data_write()
26 | {
27 | value=space_left
28 | i=0
29 | while [ $i -le $value ]
30 | do
31 | ls -all
32 | error_check
33 | touch file$i
34 | error_check
35 | dd if=/dev/urandom of=file$i bs=4k count=5000
36 | error_check
37 | sync
38 | read_data
39 | i=$(( i + 1 ))
40 | error_check
41 | done
42 | }
43 | read_data()
44 | {
45 | touch testfile
46 | error_check
47 | echo "OpenEBS Released newer version"
48 | error_check
49 | cat testfile
50 | error_check
51 | rm testfile
52 | }
53 | perform_data_write
54 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/check_deployment_status.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This util checks the application status in k8s cluster
3 | # The parameters required are
4 | # - app_ns ( namespace where the application is deployed)
5 | # - app_label ( Label of application in the form 'key=value'))
6 |
7 | - block:
8 | - name: Check the pod status
9 | shell: >
10 | kubectl get pods -n {{ app_ns }} -l {{ app_label }} --no-headers
11 | -o custom-columns=:status.phase
12 | args:
13 | executable: /bin/bash
14 | register: result
15 | until: "'Running' in result.stdout"
16 | delay: 30
17 | retries: 15
18 |
19 | when: lookup('env','DEPLOY_TYPE') == 'deployment'
20 |
21 | - block:
22 | - name: obtain the number of replicas.
23 | shell: kubectl get statefulset -n {{ app_ns }} -l {{ app_label }} -o custom-columns=:spec.replicas
24 | register: rep_count
25 | until: "rep_count.rc ==0"
26 | delay: 60
27 | retries: 15
28 |
29 | - name: Obtain the ready replica count and compare with the replica count.
30 | shell: kubectl get statefulset -n {{ app_ns }} -l {{ app_label }} -o custom-columns=:..readyReplicas
31 | register: ready_rep
32 | until: "ready_rep.rc == 0 and ready_rep.stdout|int == rep_count.stdout|int"
33 | delay: 60
34 | retries: 30
35 |
36 | when: lookup('env','DEPLOY_TYPE') == 'statefulset'
37 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug Report
3 | about: Report a bug encountered while using Local PVs
4 | labels: kind/bug
5 |
6 | ---
7 |
8 |
11 |
12 | **Describe the bug:** A clear and concise description of what the bug is.
13 |
14 | **Expected behaviour:** A concise description of what you expected to happen
15 |
16 | **Steps to reproduce the bug:**
17 | Steps to reproduce the bug should be clear and easily reproducible to help people gain an understanding of the problem
18 |
19 | **The output of the following commands will help us better understand what's going on**:
20 |
21 |
22 | * `kubectl get pods -n --show-labels`
23 | * `kubectl logs -n `
24 |
25 | **Anything else we need to know?:**
26 | Add any other context about the problem here.
27 |
28 | **Environment details:**
29 | - OpenEBS version (use `kubectl get po -n openebs --show-labels`):
30 | - Kubernetes version (use `kubectl version`):
31 | - Cloud provider or hardware configuration:
32 | - OS (e.g: `cat /etc/os-release`):
33 | - kernel (e.g: `uname -a`):
34 | - others:
35 |
--------------------------------------------------------------------------------
/tests/bdd/nodeAffinityLabels_cas_config.feature:
--------------------------------------------------------------------------------
1 | Feature: Volume Provisioning/De-provisioning with NodeAffinityLabels CAS-config on StorageClass
2 |
3 | Scenario: Volume provisioning/de-provisioning with custom NodeAffinityLabels CAS-config on StorageClass
4 | When a StorageClass is created with the following attributes:
5 | | name | sc-nod-aff-lab |
6 | | BasePath | /path/to/hostpath |
7 | | NodeAffinityLabels | "kubernetes.io/hostname", "kubernetes.io/os", "kubernetes.io/arch" |
8 | | provisionerName | openebs.io/local |
9 | | volumeBindingMode | WaitForFirstConsumer |
10 | | reclaimPolicy | Delete |
11 | And a PVC "pvc-nod-aff-lab" is created with StorageClass "sc-nod-aff-lab"
12 | And a deployment with a busybox image is created with PVC "pvc-nod-aff-lab"
13 | Then a Pod should be up and running
14 | And a bound PV should be created
15 | And the SC NodeAffinityLabels CAS-config should be set correctly on the PV
16 |
17 | When the application Deployment is deleted
18 | Then The Pod should be deleted
19 |
20 | When the PVC is deleted
21 | Then the PV should be deleted
22 |
--------------------------------------------------------------------------------
/hack/update-k8s.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | #Copyright 2020 The OpenEBS Authors
4 | #
5 | #Licensed under the Apache License, Version 2.0 (the "License");
6 | #you may not use this file except in compliance with the License.
7 | #You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | #Unless required by applicable law or agreed to in writing, software
12 | #distributed under the License is distributed on an "AS IS" BASIS,
13 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | #See the License for the specific language governing permissions and
15 | #limitations under the License.
16 | #
17 |
18 | # This script was developed by https://github.com/abursavich and posted
19 | # at https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-521493597
20 |
21 | set -euo pipefail
22 |
23 | VERSION=${1#"v"}
24 | if [ -z "$VERSION" ]; then
25 | echo "Must specify version!"
26 | exit 1
27 | fi
28 | MODS=($(
29 | curl -sS https://raw.githubusercontent.com/kubernetes/kubernetes/v${VERSION}/go.mod |
30 | sed -n 's|.*k8s.io/\(.*\) => ./staging/src/k8s.io/.*|k8s.io/\1|p'
31 | ))
32 | for MOD in "${MODS[@]}"; do
33 | V=$(
34 | go mod download -json "${MOD}@kubernetes-${VERSION}" |
35 | sed -n 's|.*"Version": "\(.*\)".*|\1|p'
36 | )
37 | go mod edit "-replace=${MOD}=${MOD}@${V}"
38 | done
39 | go get "k8s.io/kubernetes@v${VERSION}"
40 |
--------------------------------------------------------------------------------
/docs/installation/platforms/rancher.md:
--------------------------------------------------------------------------------
1 | # Install Dynamic-LocalPV-Provisioner on Rancher v2/RKE
2 |
3 | To use OpenEBS LocalPV Hostpath with an RKE/Rancher 2.x cluster, you will have to bind-mount the hostpath directories to the kubelet containers. You can do this by editing the kubelet configuration section of your RKE/Rancher 2.x cluster and adding in the `extra_binds` (see below).
4 |
5 | **Note:** If you want to use a custom hostpath directory, then you will have to bind-mount the custom directory's absolute path. See below for an example with the default hostpath directory.
6 |
7 | For an RKE cluster, you can add the `extra_binds` to your cluster.yml file and apply the changes using the `rke up` command.
8 |
9 | For a Rancher 2.x cluster, you can edit your cluster's configuration options and add the `extra_binds` there.
10 |
11 | ```yaml
12 | services:
13 | kubelet:
14 | extra_binds:
15 | #Default hostpath directory
16 | - /var/openebs/local:/var/openebs/local
17 | ```
18 |
19 | For more information, please go through the official Rancher documentaion -- [RKE - Kubernetes Configuration Options](https://rancher.com/docs/rke/latest/en/config-options/services/services-extras/#extra-binds), [RKE - Installation](https://rancher.com/docs/rke/latest/en/installation/#deploying-kubernetes-with-rke).
20 |
21 | After adding the `extra_binds` are added, proceed with installation as described in [the quickstart](https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md).
22 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/backup_and_restore/backup-restore.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Creating Backup
3 | shell: >
4 | velero backup create {{ velero_backup_name }} -l {{ app_label }} --include-namespaces={{ app_ns }}
5 | args:
6 | executable: /bin/bash
7 | when: action == "BACKUP"
8 |
9 | - name: Getting the state of Backup
10 | shell: kubectl get backup {{ velero_backup_name }} -n velero -o jsonpath='{.status.phase}'
11 | register: backup_state
12 | until: "'Completed' in backup_state.stdout"
13 | delay: 5
14 | retries: 100
15 | when: action == "BACKUP_STATE"
16 |
17 | - block:
18 |
19 | - name: Creating application namespace
20 | shell: kubectl create ns {{ app_ns }}
21 | register: app_ns_create_status
22 | failed_when: "'created' not in app_ns_create_status.stdout"
23 |
24 | - name: Restoring application
25 | shell: >
26 | velero restore create --from-backup {{ velero_backup_name }} --restore-volumes=true
27 | args:
28 | executable: /bin/bash
29 |
30 | - name: Getting restore name
31 | shell: velero get restore | grep {{ velero_backup_name }} | awk '{print $1}'
32 | register: restore_name
33 |
34 | - name: Checking the restore status
35 | shell: kubectl get restore {{ restore_name.stdout }} -n velero -o jsonpath='{.status.phase}'
36 | register: restore_state
37 | until: "'Completed' in restore_state.stdout"
38 | delay: 5
39 | retries: 60
40 |
41 | when: action == "RESTORE"
--------------------------------------------------------------------------------
/e2e-tests/funclib/scale_replicas.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This utilty task file can scale up the statefulset or deployment replicas in K8s cluster.
3 | # The parameters required are
4 | # - app_ns ( namespace in which application is deployed)
5 | # - app_label( Applications's label in the form key=value)
6 | # - app_replica_count( Required number of application replicas)
7 | # - deploy_type (Either 'deployment' or 'statefulset')
8 | # The above parameters should be obtained as environmental variables from the e2e-book.
9 |
10 | - name: Obtaining the application pod name.
11 | shell: kubectl get {{ deploy_type }} -n {{ operator_namespace }} --no-headers -l {{ app_label }} -o custom-columns=:metadata.name
12 | args:
13 | executable: /bin/bash
14 | register: result
15 |
16 | - name: Recording the application pod name.
17 | set_fact:
18 | app_name: "{{ result.stdout }}"
19 |
20 | - name: scaling up the replicas.
21 | shell: kubectl scale {{ deploy_type}} {{ app_name }} --replicas={{ app_replica_count }} -n {{ operator_namespace }}
22 | args:
23 | executable: /bin/bash
24 | register: result
25 | failed_when: "'scaled' not in result.stdout"
26 |
27 | - name: Check if all the application replicas are running.
28 | shell: kubectl get {{ deploy_type }} -n {{ operator_namespace }} --no-headers -l {{ app_label }} -o custom-columns=:..readyReplicas
29 | args:
30 | executable: /bin/bash
31 | register: running_replicas
32 | until: "running_replicas.stdout|int == app_replica_count|int"
33 | delay: 10
34 | retries: 60
35 |
36 |
--------------------------------------------------------------------------------
/e2e-tests/apps/percona/deployers/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: e2e-percona-
6 | namespace: e2e
7 | spec:
8 | template:
9 | metadata:
10 | name: e2e
11 | labels:
12 | app: percona-deployment
13 |
14 | spec:
15 | serviceAccountName: e2e
16 | restartPolicy: Never
17 | containers:
18 | - name: ansibletest
19 | image: openebs/localpv-e2e:ci
20 | imagePullPolicy: IfNotPresent
21 |
22 | env:
23 | - name: ANSIBLE_STDOUT_CALLBACK
24 | #value: log_plays, actionable, default
25 | value: default
26 |
27 | - name: PROVIDER_STORAGE_CLASS
28 | value: openebs-hostpath
29 |
30 | - name: APP_PVC
31 | value: percona-mysql-claim
32 |
33 | # Application label
34 | - name: APP_LABEL
35 | value: 'name=percona'
36 |
37 | # Application namespace
38 | - name: APP_NAMESPACE
39 | value: app-percona-ns
40 |
41 | # Use 'deprovision' for app-clean up
42 | - name: ACTION
43 | value: provision
44 |
45 | - name: CAPACITY
46 | value: 5Gi
47 |
48 | # Enable storage i/o based liveness probe
49 | - name: IO_PROBE
50 | value: enabled
51 |
52 | command: ["/bin/bash"]
53 | args: ["-c", "ansible-playbook ./e2e-tests/apps/percona/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"]
54 |
55 |
--------------------------------------------------------------------------------
/scripts/update-reg-repo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euo pipefail
4 |
5 | SCRIPT_DIR="$(dirname "$(realpath "${BASH_SOURCE[0]:-"$0"}")")"
6 | ROOT_DIR="$SCRIPT_DIR/.."
7 | CHART_DIR="$ROOT_DIR/deploy/helm/charts"
8 | VALUES_YAML="$CHART_DIR/values.yaml"
9 |
10 | NEW_REGISTRY="ghcr.io"
11 | NEW_REPOSITORY="openebs/dev"
12 |
13 | source "$SCRIPT_DIR/yq_utils.sh"
14 | source "$SCRIPT_DIR/log.sh"
15 |
16 | help() {
17 | cat </openebs-operator-lite.yaml
12 | ```
13 |
14 | ## Exit-Criteria
15 |
16 | - LocalPV and NDM components should be deployed successfully and all the pods including LocalPV provisioner, openebs-ndm and NDM operator are in running state.
17 |
18 | ## How to run
19 |
20 | - This experiment accepts the parameters in form of kubernetes job environmental variables.
21 | - For running this experiment of deploying openebs operator, clone openens/dynamic-localpv-provisioner[https://github.com/openebs/dynamic-localpv-provisioner] repo and then first apply rbac and crds for e2e-framework.
22 | ```
23 | kubectl apply -f dynamic-localpv-provisioner/e2e-tests/hack/rbac.yaml
24 | kubectl apply -f dynamic-localpv-provisioner/e2e-tests/hack/crds.yaml
25 | ```
26 | then update the needed test specific values in run_e2e_test.yml file and create the kubernetes job.
27 | ```
28 | kubectl create -f run_e2e_test.yml
29 | ```
30 | All the env variables description is provided with the comments in the same file.
31 |
32 |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/liveness/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: e2e-busybox-liveness-
6 | namespace: e2e
7 | spec:
8 | activeDeadlineSeconds: 5400
9 | template:
10 | metadata:
11 | name: e2e-busybox-liveness
12 | namespace: e2e
13 | labels:
14 | liveness: e2e-busybox-liveness
15 |
16 | # label used for mass-liveness check upon infra-chaos
17 | infra-aid: liveness
18 |
19 | spec:
20 | serviceAccountName: e2e
21 | restartPolicy: Never
22 |
23 | containers:
24 | - name: ansibletest
25 | image: openebs/localpv-e2e:ci
26 | imagePullPolicy: IfNotPresent
27 | env:
28 | - name: ANSIBLE_STDOUT_CALLBACK
29 | value: default
30 |
31 | - name: MY_POD_NAME
32 | valueFrom:
33 | fieldRef:
34 | fieldPath: metadata.name
35 |
36 | - name: LIVENESS_TIMEOUT_SECONDS
37 | value: "10"
38 |
39 | # number of retries when livenss-fails
40 | - name: LIVENESS_RETRY_COUNT
41 | value: "5"
42 |
43 | # Namespace in which busybox is running
44 | - name: NAMESPACE
45 | value: app-busybox-ns
46 |
47 | - name: APPLICATION_LABEL
48 | value: 'app=busybox-sts'
49 |
50 | - name: ACTION
51 | value: provision
52 |
53 | command: ["/bin/bash"]
54 | args: ["-c", "ansible-playbook ./e2e-tests/apps/busybox/liveness/test.yml -i /etc/ansible/hosts -v; exit 0"]
55 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/chaos/local_pv_disk_reuse/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: e2e-disk-reuse-localpv-
6 | namespace: e2e
7 | spec:
8 | template:
9 | metadata:
10 | name: e2e
11 | labels:
12 | app: localpv-disk-reuse
13 | spec:
14 | serviceAccountName: e2e
15 | restartPolicy: Never
16 | containers:
17 | - name: ansibletest
18 | image: openebs/localpv-e2e:ci
19 | imagePullPolicy: IfNotPresent
20 | env:
21 | - name: ANSIBLE_STDOUT_CALLBACK
22 | #value: log_plays
23 | value: default
24 |
25 | - name: PROVIDER_STORAGE_CLASS
26 | value: openebs-device
27 |
28 | # Application pvc
29 | - name: APP_PVC
30 | value: openebs-busybox
31 |
32 | # Application label
33 | # Use different 'LABEL' for deployment and statefulset application
34 | - name: APP_LABEL
35 | value: 'app=busybox-sts'
36 |
37 | # Application namespace
38 | - name: APP_NAMESPACE
39 | value: disk-reuse
40 |
41 | #Persistent Volume storage capacity
42 | - name: PV_CAPACITY
43 | value: 5Gi
44 |
45 | #Application replicas for statefulset application
46 | - name: APP_REPLICA
47 | value: 'replicas=3'
48 |
49 | command: ["/bin/bash"]
50 | args: ["-c", "ansible-playbook ./e2e-tests/experiments/chaos/local_pv_disk_reuse/test.yml -i /etc/ansible/hosts -v; exit 0"]
51 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/localpv-provisioning-selected-device/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: localpv-selected-device-
6 | namespace: e2e
7 | spec:
8 | template:
9 | metadata:
10 | name: e2e
11 | labels:
12 | app: localpv-selected-device
13 | spec:
14 | serviceAccountName: e2e
15 | restartPolicy: Never
16 | containers:
17 | - name: ansibletest
18 | image: openebs/localpv-e2e:ci
19 | imagePullPolicy: IfNotPresent
20 |
21 | env:
22 | - name: ANSIBLE_STDOUT_CALLBACK
23 | #value: log_plays, actionable, default
24 | value: default
25 |
26 | # Namespace where the OpenEBS components are deployed
27 | - name: OPERATOR_NS
28 | value: ''
29 |
30 | - name: BD_TAG
31 | value: 'e2e'
32 |
33 | - name: APP_NAMESPACE
34 | value: ''
35 |
36 | - name: PVC
37 | value: ''
38 |
39 | ## In positive case type first blockdevice is labeled and then pv is created
40 | ## In negative case type first pv creation is done, pvc remains in pending state
41 | ## and then we label the blockdevice. In this way successful reconcilation is verified.
42 | - name: TEST_CASE_TYPE ## `positive` OR `negative`
43 | value: ''
44 |
45 | command: ["/bin/bash"]
46 | args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/localpv-provisioning-selected-device/test.yml -i /etc/ansible/hosts -v; exit 0"]
47 |
--------------------------------------------------------------------------------
/pkg/kubernetes/api/core/v1/persistentvolumeclaim/persistentvolumeclaim_test.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2018-2020 The OpenEBS Authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package persistentvolumeclaim
16 |
17 | import (
18 | corev1 "k8s.io/api/core/v1"
19 | )
20 |
21 | func fakeAPIPVCList(pvcNames []string) *corev1.PersistentVolumeClaimList {
22 | if len(pvcNames) == 0 {
23 | return nil
24 | }
25 | list := &corev1.PersistentVolumeClaimList{}
26 | for _, name := range pvcNames {
27 | pvc := corev1.PersistentVolumeClaim{}
28 | pvc.SetName(name)
29 | list.Items = append(list.Items, pvc)
30 | }
31 | return list
32 | }
33 |
34 | func fakeAPIPVCListFromNameStatusMap(pvcs map[string]corev1.PersistentVolumeClaimPhase) *corev1.PersistentVolumeClaimList {
35 | if len(pvcs) == 0 {
36 | return nil
37 | }
38 | list := &corev1.PersistentVolumeClaimList{}
39 | for k, v := range pvcs {
40 | pvc := corev1.PersistentVolumeClaim{}
41 | pvc.SetName(k)
42 | pvc.Status.Phase = v
43 | list.Items = append(list.Items, pvc)
44 | }
45 | return list
46 | }
47 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/chaos/local_pv_disk_reuse/busybox_statefulset.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | openebs.io/app: busybox
6 | lkey: lvalue
7 | name: busybox
8 | spec:
9 | clusterIP: None
10 | selector:
11 | openebs.io/app: busybox
12 | lkey: lvalue
13 | ---
14 | apiVersion: apps/v1
15 | kind: StatefulSet
16 | metadata:
17 | name: busybox
18 | labels:
19 | openebs.io/app: busybox
20 | lkey: lvalue
21 | spec:
22 | serviceName: busybox
23 | replicas: 3
24 | selector:
25 | matchLabels:
26 | openebs.io/app: busybox
27 | lkey: lvalue
28 | template:
29 | metadata:
30 | labels:
31 | openebs.io/app: busybox
32 | lkey: lvalue
33 | spec:
34 | affinity:
35 | podAntiAffinity:
36 | requiredDuringSchedulingIgnoredDuringExecution:
37 | - labelSelector:
38 | matchLabels:
39 | openebs.io/app: busybox
40 | topologyKey: kubernetes.io/hostname
41 | containers:
42 | - name: busybox
43 | image: gcr.io/google-containers/busybox
44 | imagePullPolicy: IfNotPresent
45 | command:
46 | - sh
47 | - -c
48 | - 'date > /busybox/date.txt; sync; sleep 5; sync; tail -f /dev/null;'
49 | volumeMounts:
50 | - name: testclaim
51 | mountPath: /busybox
52 | volumeClaimTemplates:
53 | - metadata:
54 | name: testclaim
55 | spec:
56 | accessModes: [ "ReadWriteOnce" ]
57 | storageClassName: testclass
58 | resources:
59 | requests:
60 | storage: teststorage
--------------------------------------------------------------------------------
/tests/bdd/hostpath.feature:
--------------------------------------------------------------------------------
1 | Feature: TEST HOSTPATH LOCAL PV
2 |
3 | Scenario: Creating and Deleting StorageClass, PVC, and Deployment with Busybox
4 | Given a hostpath provisioner is running
5 | When a StorageClass is created with the following attributes:
6 | | name | sc-hp |
7 | | BasePath | /path/to/hostpath |
8 | | provisionerName | openebs.io/local |
9 | | volumeBindingMode | WaitForFirstConsumer |
10 | | reclaimPolicy | Delete |
11 | And a PVC is created with the following attributes:
12 | | name | pvc-hp |
13 | | storageClass | sc-hp |
14 | | accessModes | ReadWriteOnce |
15 | | capacity | 2Gi |
16 | And a deployment with a busybox image is created with the following attributes:
17 | | name | busybox-hostpath |
18 | | image | busybox |
19 | | command | ["sleep", "3600"] |
20 | | volumeMounts | name: demo-vol1, mountPath: /mnt/store1 |
21 | | volumes | name: demo-vol1, pvcName: pvc-hp |
22 | Then the Pod should be in Running state
23 | And a bound PV should be created
24 |
25 | When the deployment is deleted
26 | Then the deployment should not have any deployment or pod remaining
27 |
28 | When the PVC is deleted
29 | Then the PVC should be deleted successfully
30 | Then the PV should be deleted
31 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/localpv-provisioning-selected-device/percona.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: percona
6 | labels:
7 | name: percona
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | name: percona
13 | template:
14 | metadata:
15 | labels:
16 | name: percona
17 | spec:
18 | tolerations:
19 | - key: "ak"
20 | value: "av"
21 | operator: "Equal"
22 | effect: "NoSchedule"
23 | containers:
24 | - resources:
25 | limits:
26 | cpu: 0.5
27 | name: percona
28 | image: openebs/tests-custom-percona:latest
29 | args:
30 | - "--ignore-db-dir"
31 | - "lost+found"
32 | env:
33 | - name: MYSQL_ROOT_PASSWORD
34 | value: k8sDem0
35 | ports:
36 | - containerPort: 3306
37 | name: percona
38 | volumeMounts:
39 | - mountPath: /var/lib/mysql
40 | name: demo-vol1
41 | volumes:
42 | - name: demo-vol1
43 | persistentVolumeClaim:
44 | claimName: {{ pvc_name }}
45 | ---
46 | kind: PersistentVolumeClaim
47 | apiVersion: v1
48 | metadata:
49 | name: {{ pvc_name }}
50 | spec:
51 | storageClassName: {{ sc_name }}
52 | accessModes:
53 | - ReadWriteOnce
54 | resources:
55 | requests:
56 | storage: 5Gi
57 | ---
58 | apiVersion: v1
59 | kind: Service
60 | metadata:
61 | name: percona-mysql
62 | labels:
63 | name: percona-mysql
64 | spec:
65 | ports:
66 | - port: 3306
67 | targetPort: 3306
68 | selector:
69 | name: percona
70 |
71 |
--------------------------------------------------------------------------------
/docs/tutorials/backup_and_restore/restore.md:
--------------------------------------------------------------------------------
1 | # Restore Velero backups
2 |
3 | ## Step 1: List backups
4 |
5 | We will 'exec' into the Velero container to list our backups.
6 |
7 | Get the Pod name for the Velero Pod running in 'velero' namespace.
8 |
9 | ```console
10 | $ kubectl -n velero get pods
11 |
12 | NAME READY STATUS RESTARTS AGE
13 | openebs-backup-minio-ss-0-0 1/1 Running 0 7h23m
14 | restic-2xwsf 1/1 Running 0 7h12m
15 | velero-7dd57b857-2gd25 1/1 Running 0 7h12m
16 | ```
17 |
18 | 'Exec' into the Pod's velero container.
19 |
20 | ```console
21 | kubectl -n velero exec -it velero-7dd57b857-2gd25 -c velero -- /bin/bash
22 | ```
23 |
24 | List the backups available.
25 |
26 | ```console
27 | $ ./velero backup get
28 |
29 | NAME STATUS ERRORS WARNINGS CREATED EXPIRES STORAGE LOCATION SELECTOR
30 | my-localpv-backup Completed 0 0 2021-09-04 01:13:36 +0000 UTC 29d default
31 | ```
32 |
33 | ## Step 2: Create a restore
34 |
35 | Restores don't overwrite already existing components with the same name. To replace already-existing components with the contents of the backup, you will have to delete.
36 |
37 | Use the `--namespace-mappings [SOURCE_NAMESPACE]:[DESTINATION_NAMESPACE]` flag to restore to a different namespace.
38 |
39 | ```console
40 | ./velero restore create my-localpv-restore --from-backup my-localpv-backup --restore-volumes=true
41 | ```
42 |
43 | Verify the status of the restore and also the components that were restored.
44 |
45 | ```console
46 | ./velero restore get
47 | ```
48 |
49 | ```console
50 | exit
51 | ```
52 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/pre_create_app_deploy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 | - name: Check whether the provider storageclass is applied
4 | shell: kubectl get sc "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
5 | args:
6 | executable: /bin/bash
7 | register: result
8 | failed_when: "result.rc != 0"
9 |
10 | - name: Replace the pvc placeholder with provider
11 | replace:
12 | path: "{{ application_deployment }}"
13 | regexp: "testclaim"
14 | replace: "{{ lookup('env','APP_PVC') }}"
15 |
16 | - name: Replace the storageclass placeholder with provider
17 | replace:
18 | path: "{{ application_deployment }}"
19 | regexp: "testclass"
20 | replace: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
21 |
22 | - include_tasks: /e2e-tests/utils/scm/openebs/fetch_replica_values.yml
23 | when: lookup('env', 'APP_REPLICA')
24 |
25 | - name: Get the application label values from env
26 | set_fact:
27 | app_lkey: "{{ app_label.split('=')[0] }}"
28 | app_lvalue: "{{ app_label.split('=')[1] }}"
29 |
30 | - name: Replace the application label placeholder in deployment spec
31 | replace:
32 | path: "{{ application_deployment }}"
33 | regexp: "lkey: lvalue"
34 | replace: "{{ app_lkey }}: {{ app_lvalue }}"
35 |
36 | - name: Enable/Disable I/O based liveness probe
37 | shell: >
38 | sed -i '/#/
39 | ,/#/d'
40 | {{ application_deployment }}
41 | args:
42 | executable: /bin/bash
43 | when: lookup('env', 'IO_PROBE') is defined and lookup('env', 'IO_PROBE') == "disabled"
44 |
45 | - include_tasks: /e2e-tests/utils/k8s/create_ns.yml
46 |
--------------------------------------------------------------------------------
/pkg/kubernetes/api/core/v1/event/buildlist.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2021 The OpenEBS Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package event
18 |
19 | import (
20 | corev1 "k8s.io/api/core/v1"
21 | )
22 |
23 | type ListBuilder struct {
24 | list *EventList
25 | filters PredicateList
26 | }
27 |
28 | func ListBuilderFromAPIList(events *corev1.EventList) *ListBuilder {
29 | b := &ListBuilder{list: &EventList{}}
30 | if events == nil {
31 | return b
32 | }
33 | for _, event := range events.Items {
34 | event := event
35 | b.list.Items = append(b.list.Items, &Event{Object: &event})
36 | }
37 | return b
38 | }
39 |
40 | // List returns the list of Event
41 | // instances that was built by this
42 | // builder
43 | func (b *ListBuilder) List() *EventList {
44 | if b.filters == nil || len(b.filters) == 0 {
45 | return b.list
46 | }
47 | filtered := &EventList{}
48 | for _, event := range b.list.Items {
49 | if b.filters.all(event) {
50 | filtered.Items = append(filtered.Items, event)
51 | }
52 | }
53 | return filtered
54 | }
55 |
56 | // WithFilter add filters on which the Event
57 | // has to be filtered
58 | func (b *ListBuilder) WithFilter(pred ...Predicate) *ListBuilder {
59 | b.filters = append(b.filters, pred...)
60 | return b
61 | }
62 |
--------------------------------------------------------------------------------
/e2e-tests/chaoslib/pumba/pumba_kube.yaml:
--------------------------------------------------------------------------------
1 | # If you are running Kubernetes >= 1.1.0. You can take advantage of DaemonSets to automatically deploy the Pumba on all your nodes.
2 | # On 1.1.x you'll need to explicitly enable the DaemonSets extension, see http://kubernetes.io/v1.1/docs/admin/daemons.html#caveats.
3 |
4 | # You'll then be able to deploy the DaemonSet with the command
5 | # `kubectl create -f pumba_kube.yaml`
6 |
7 | # If you are not running Kubernetes >= 1.1.0 or do not want to use DaemonSets, you can also run the Pumba as a regular docker container on each node you want to make chaos.
8 | # `docker run -d -v /var/run/docker.sock:/var/run/docker.sock gaiaadm/pumba pumba --random --interval 3m kill --signal SIGKILL"`
9 |
10 | apiVersion: apps/v1
11 | kind: DaemonSet
12 | metadata:
13 | name: pumba
14 | spec:
15 | selector:
16 | matchLabels:
17 | app: pumba
18 | template:
19 | metadata:
20 | labels:
21 | app: pumba
22 | com.gaiaadm.pumba: "true" # prevent pumba from killing itself
23 | name: pumba
24 | spec:
25 | containers:
26 | - image: gaiaadm/pumba:0.4.8
27 | imagePullPolicy: IfNotPresent
28 | name: pumba
29 | # Pumba command: modify it to suite your needs
30 | # Dry run: Randomly try to kill some container every 3 minutes
31 | command: ["pumba", "--dry", "--random", "--interval", "3m", "kill", "--signal", "SIGTERM"]
32 | resources:
33 | requests:
34 | cpu: 10m
35 | memory: 5M
36 | limits:
37 | cpu: 100m
38 | memory: 20M
39 | volumeMounts:
40 | - name: dockersocket
41 | mountPath: /var/run/docker.sock
42 | volumes:
43 | - hostPath:
44 | path: /var/run/docker.sock
45 | name: dockersocket
46 |
--------------------------------------------------------------------------------
/deploy/helm/charts/templates/hostpath-class.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.hostpathClass.enabled }}
2 | apiVersion: storage.k8s.io/v1
3 | kind: StorageClass
4 | metadata:
5 | name: {{ tpl (.Values.hostpathClass.name) .}}
6 | annotations:
7 | openebs.io/cas-type: local
8 | cas.openebs.io/config: |
9 | - name: StorageType
10 | value: "hostpath"
11 | {{- if or .Values.localpv.basePath .Values.hostpathClass.basePath }}
12 | - name: BasePath
13 | value: {{ tpl (.Values.hostpathClass.basePath | default .Values.localpv.basePath | quote) . }}
14 | {{- end }}
15 | {{- if .Values.hostpathClass.nodeAffinityLabels }}
16 | - name: NodeAffinityLabels
17 | list:
18 | {{ toYaml .Values.hostpathClass.nodeAffinityLabels | indent 10 }}
19 | {{- end }}
20 | {{- if .Values.hostpathClass.xfsQuota.enabled }}
21 | - name: XFSQuota
22 | enabled: "{{ .Values.hostpathClass.xfsQuota.enabled }}"
23 | data:
24 | softLimitGrace: "{{ .Values.hostpathClass.xfsQuota.softLimitGrace }}"
25 | hardLimitGrace: "{{ .Values.hostpathClass.xfsQuota.hardLimitGrace }}"
26 | {{- end }}
27 | {{- if .Values.hostpathClass.ext4Quota.enabled }}
28 | - name: EXT4Quota
29 | enabled: "{{ .Values.hostpathClass.ext4Quota.enabled }}"
30 | data:
31 | softLimitGrace: "{{ .Values.hostpathClass.ext4Quota.softLimitGrace }}"
32 | hardLimitGrace: "{{ .Values.hostpathClass.ext4Quota.hardLimitGrace }}"
33 | {{- end }}
34 | {{- if .Values.hostpathClass.isDefaultClass }}
35 | storageclass.kubernetes.io/is-default-class: "true"
36 | {{- end }}
37 | {{- if .Values.extraLabels }}
38 | labels: {{- toYaml .Values.extraLabels | nindent 4 -}}
39 | {{- end }}
40 | provisioner: openebs.io/local
41 | volumeBindingMode: WaitForFirstConsumer
42 | reclaimPolicy: {{ .Values.hostpathClass.reclaimPolicy }}
43 | {{- end }}
44 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/chaos/app_pod_failure/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: app-failure
6 | namespace: e2e
7 | data:
8 | parameters.yml: |
9 |
10 | ---
11 | apiVersion: batch/v1
12 | kind: Job
13 | metadata:
14 | generateName: application-pod-failure-
15 | namespace: e2e
16 | spec:
17 | template:
18 | metadata:
19 | labels:
20 | name: application-pod-failure
21 | spec:
22 | serviceAccountName: e2e
23 | restartPolicy: Never
24 | containers:
25 | - name: ansibletest
26 | image: openebs/localpv-e2e:ci
27 | env:
28 | - name: ANSIBLE_STDOUT_CALLBACK
29 | #value: log_plays
30 | #value: actionable
31 | value: default
32 |
33 | - name: APP_NAMESPACE
34 | value: ""
35 |
36 | - name: APP_LABEL
37 | value: ""
38 |
39 | - name: LIVENESS_APP_LABEL
40 | value: ""
41 |
42 | - name: LIVENESS_APP_NAMESPACE
43 | value: ""
44 |
45 | - name: DEPLOY_TYPE
46 | value: deployment
47 |
48 | # Specify the container runtime used , to pick the relevant chaos util
49 | - name: CONTAINER_RUNTIME
50 | value: docker
51 |
52 | #Specify app pod name to check if the data is consistent. Currently supported values are 'mysql' and 'busybox'
53 | - name: DATA_PERSISTENCE
54 | value: ""
55 |
56 | command: ["/bin/bash"]
57 | args: ["-c", "ansible-playbook ./e2e-tests/experiments/chaos/app_pod_failure/test.yml -i /etc/ansible/hosts -vv; exit 0"]
58 | volumeMounts:
59 | - name: parameters
60 | mountPath: /mnt/
61 | volumes:
62 | - name: parameters
63 | configMap:
64 | name: app-failure
65 |
--------------------------------------------------------------------------------
/e2e-tests/apps/percona/deployers/percona.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: percona
6 | labels:
7 | lkey: lvalue
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | lkey: lvalue
13 | template:
14 | metadata:
15 | labels:
16 | lkey: lvalue
17 | spec:
18 | containers:
19 | - resources:
20 | limits:
21 | cpu: 1
22 | name: percona
23 | image: openebs/tests-custom-percona:latest
24 | imagePullPolicy: IfNotPresent
25 | args:
26 | - "--ignore-db-dir"
27 | - "lost+found"
28 | env:
29 | - name: MYSQL_ROOT_PASSWORD
30 | value: k8sDem0
31 | ports:
32 | - containerPort: 3306
33 | name: percona
34 | volumeMounts:
35 | - mountPath: /var/lib/mysql
36 | name: data-vol
37 | #
38 | livenessProbe:
39 | exec:
40 | command: ["bash", "sql-test.sh"]
41 | initialDelaySeconds: 60
42 | periodSeconds: 1
43 | timeoutSeconds: 10
44 | #
45 | volumes:
46 | - name: data-vol
47 | persistentVolumeClaim:
48 | claimName: testclaim
49 | ---
50 | kind: PersistentVolumeClaim
51 | apiVersion: v1
52 | metadata:
53 | name: testclaim
54 | spec:
55 | storageClassName: testclass
56 | accessModes:
57 | - ReadWriteOnce
58 | resources:
59 | requests:
60 | storage: volume-capacity
61 | ---
62 | apiVersion: v1
63 | kind: Service
64 | metadata:
65 | name: percona-mysql
66 | labels:
67 | lkey: lvalue
68 | spec:
69 | ports:
70 | - port: 3306
71 | targetPort: 3306
72 | selector:
73 | lkey: lvalue
74 |
75 |
--------------------------------------------------------------------------------
/e2e-tests/apps/percona/workload/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: percona-loadgen-
6 | namespace: e2e
7 | spec:
8 | template:
9 | metadata:
10 | name: percona-loadgen
11 | namespace: e2e
12 | labels:
13 | loadgen: percona-loadjob
14 | spec:
15 | serviceAccountName: e2e
16 | restartPolicy: Never
17 | containers:
18 | - name: ansibletest
19 | image: openebs/localpv-e2e:ci
20 | imagePullPolicy: IfNotPresent
21 | env:
22 | - name: ANSIBLE_STDOUT_CALLBACK
23 | #value: log_plays
24 | value: default
25 |
26 | - name: APP_LABEL
27 | value: name=percona
28 |
29 | #Namespace in which loadgen pod will be deployed
30 | - name: APP_NAMESPACE
31 | value: app-percona-ns
32 |
33 | - name: APP_SERVICE_LABEL
34 | value: name=percona
35 |
36 | - name: LOADGEN_LABEL
37 | value: loadgen=percona-loadgen
38 |
39 | # Database user name
40 | - name: DB_USER
41 | value: root
42 |
43 | - name: DB_PASSWORD
44 | value: k8sDem0
45 |
46 | # Bench duration (in min)
47 | # TODO: Use a tpcc-template to define workload w/ more granularity
48 | - name: LOAD_DURATION
49 | value: "600"
50 |
51 | - name: TPCC_WAREHOUSES
52 | value: "1"
53 |
54 | - name: TPCC_CONNECTIONS
55 | value: "18"
56 |
57 | - name: TPCC_WARMUP_PERIOD
58 | value: "10"
59 |
60 | - name: LOAD_INTERVAL
61 | value: "10"
62 |
63 | command: ["/bin/bash"]
64 | args: ["-c", "ansible-playbook ./e2e-tests/apps/percona/workload/test.yml -i /etc/ansible/hosts -v; exit 0"]
65 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/localpv-provisioner/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: e2e-dynamic-localpv-operator-
6 | namespace: e2e
7 | spec:
8 | template:
9 | metadata:
10 | name: e2e
11 | labels:
12 | provider: localpv-operator
13 | spec:
14 | serviceAccountName: e2e
15 | restartPolicy: Never
16 | containers:
17 | - name: ansibletest
18 | image: openebs/localpv-e2e:ci
19 | imagePullPolicy: IfNotPresent
20 | env:
21 | - name: ANSIBLE_STDOUT_CALLBACK
22 | value: default
23 |
24 | - name: LOCALPV_PROVISIONER_IMAGE
25 | value: ""
26 |
27 | - name: NDM_VERSION
28 | value: ""
29 |
30 | - name: RELEASE_TAG
31 | value: ""
32 |
33 | # To Deploy the operator from release charts value should be `charts`
34 | # To Deploy the operator for ci images value should be `master`
35 | # To Deploy the operator from any PR value should be `commit_id`
36 | - name: IMAGE_TYPE
37 | value: ""
38 |
39 | # release version in case of ci, value is 'master'
40 | # Other Release versions it should be 2.x.x for ex 2.10.0
41 | - name: RELEASE_VERSION
42 | value: ""
43 |
44 | - name: ACTION
45 | value: provision
46 |
47 | # Namespace where the csi plugins are deployed
48 | - name: OPERATOR_NS
49 | value: "openebs"
50 |
51 | - name: COMMIT_ID
52 | value: ""
53 |
54 | - name: RC_TAG
55 | value: ""
56 |
57 | command: ["/bin/bash"]
58 | args: ["-c", "ansible-playbook ./e2e-tests/experiments/localpv-provisioner/test.yml -i /etc/ansible/hosts -vv; exit 0"]
59 |
--------------------------------------------------------------------------------
/docs/installation/platforms/talos.md:
--------------------------------------------------------------------------------
1 | # Install Dynamic-LocalPV-Provisioner on Talos
2 |
3 | To use OpenEBS LocalPV Hostpath with a Talos cluster, you will have to bind-mount the hostpath directories to the kubelet containers. You can do this by editing the KubeletConfig section of your cluster machineconfig and adding in the `extraMounts` (see below).
4 |
5 | **Note:** If you want to use a custom hostpath directory, then you will have to bind-mount the custom directory's absolute path. See below for an example with the default hostpath directory.
6 |
7 | Visit the [Talos official documentation](https://www.talos.dev/docs) for instructions on editing machineconfig or using config patches.
8 |
9 | ```yaml
10 | kubelet:
11 | extraMounts:
12 | #Default Hostpath directory
13 | - destination: /var/openebs/local
14 | type: bind
15 | source: /var/openebs/local
16 | options:
17 | - rbind
18 | - rshared
19 | - rw
20 | ```
21 |
22 | If you are using the default Talos security policy you will also have to add privileged security labels on the `openebs` namespace to allow it to use `hostPath` volumes. Eg:
23 |
24 | ```yaml
25 | apiVersion: v1
26 | kind: Namespace
27 | metadata:
28 | name: openebs
29 | labels:
30 | pod-security.kubernetes.io/audit: privileged
31 | pod-security.kubernetes.io/enforce: privileged
32 | pod-security.kubernetes.io/warn: privileged
33 | ```
34 |
35 | Caution: When using local storage on Talos, you must remember to pass the `--preserve` argument when running `talosctl upgrade` to avoid host paths getting wiped out during the upgrade (as noted in [Talos Local Storage documentation](https://www.talos.dev/v1.2/kubernetes-guides/configuration/replicated-local-storage-with-openebs-jiva/)).
36 |
37 | After adding the required configuration, proceed with installation as described in [the quickstart](https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md).
38 |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/deployers/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: e2e-busybox-deploy-
6 | namespace: e2e
7 | spec:
8 | template:
9 | metadata:
10 | name: e2e
11 | labels:
12 | app: busybox-e2e
13 | spec:
14 | serviceAccountName: e2e
15 | restartPolicy: Never
16 | containers:
17 | - name: ansibletest
18 | image: openebs/localpv-e2e:ci
19 | imagePullPolicy: IfNotPresent
20 | env:
21 | - name: ANSIBLE_STDOUT_CALLBACK
22 | #value: log_plays
23 | value: default
24 |
25 | - name: PROVIDER_STORAGE_CLASS
26 | value: openebs-hostpath
27 |
28 | # Application pvc
29 | - name: APP_PVC
30 | value: openebs-busybox
31 |
32 | # Application label
33 | # Use different 'LABEL' for deployment and statefulset application
34 | - name: APP_LABEL
35 | value: 'app=busybox-sts'
36 |
37 | # Application namespace
38 | # Use different 'namespace' for deployment and statefulset application
39 | - name: APP_NAMESPACE
40 | value: app-busybox-ns
41 |
42 | # Use 'deployment' for Busybox deployment application
43 | # Use 'statefulset' for Busybox statefulset application
44 | - name: DEPLOY_TYPE
45 | value: statefulset
46 |
47 | #Persistent Volume storage capacity
48 | - name: PV_CAPACITY
49 | value: 5Gi
50 |
51 | #Application replicas for statefulset application
52 | - name: APP_REPLICA
53 | value: 'replicas=2'
54 |
55 | # Use 'deprovision' for app-clean up
56 | - name: ACTION
57 | value: provision
58 |
59 | command: ["/bin/bash"]
60 | args: ["-c", "ansible-playbook ./e2e-tests/apps/busybox/deployers/test.yml -i /etc/ansible/hosts -v; exit 0"]
61 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Pull Request template
2 |
3 | **Why is this PR required? What issue does it fix?**:
4 |
5 | **What this PR does?**:
6 |
7 | **Does this PR require any upgrade changes?**:
8 |
9 | **If the changes in this PR are manually verified, list down the scenarios covered:**:
10 |
11 | **Any additional information for your reviewer?** :
12 | _Mention if this PR is part of any design or a continuation of previous PRs_
13 |
14 |
15 | **Checklist:**
16 | - [ ] Fixes #
17 | - [ ] PR Title follows the convention of `(): `
18 | - [ ] Has the change log section been updated?
19 | - [ ] Commit has unit tests
20 | - [ ] Commit has integration tests
21 | - [ ] (Optional) Are upgrade changes included in this PR? If not, mention the issue/PR to track:
22 | - [ ] (Optional) If documentation changes are required, which issue on https://github.com/openebs/openebs-docs is used to track them:
23 |
24 |
25 | **PLEASE REMOVE BELOW INFORMATION BEFORE SUBMITTING**
26 |
27 | The PR title message must follow convention:
28 | `(): `.
29 |
30 | Where:
31 | Most common types are:
32 | * `feat` - for new features, not a new feature for build script
33 | * `fix` - for bug fixes or improvements, not a fix for build script
34 | * `chore` - changes not related to production code
35 | * `docs` - changes related to documentation
36 | * `style` - formatting, missing semi colons, linting fix etc; no significant production code changes
37 | * `test` - adding missing tests, refactoring tests; no production code change
38 | * `refactor` - refactoring production code, eg. renaming a variable or function name, there should not be any significant production code changes
39 | * `cherry-pick` - if PR is merged in master branch and raised to release branch(like v0.4.x)
40 |
41 | IMPORTANT: Please review the [CONTRIBUTING.md](../CONTRIBUTING.md) file for detailed contributing guidelines.
42 |
--------------------------------------------------------------------------------
/docs/tutorials/hostpath/filepermissions.md:
--------------------------------------------------------------------------------
1 | # File permission tuning
2 |
3 | Hostpath LocalPV will by default create folder with the following rights: `0777`. In some usecases, these rights are too wide and should be reduced.
4 | As an important point, when using hostpath the underlying PV will be a localpath whichs allows kubelet to chown the folder based on the [fsGroup](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods))
5 |
6 | We allow to set file permissions using:
7 |
8 | ```yaml
9 | #This is a custom StorageClass template
10 | apiVersion: storage.k8s.io/v1
11 | kind: StorageClass
12 | metadata:
13 | name: custom-hostpath
14 | annotations:
15 | openebs.io/cas-type: local
16 | cas.openebs.io/config: |
17 | - name: StorageType
18 | value: "hostpath"
19 | - name: BasePath
20 | value: "/var/openebs/local"
21 | - name: FilePermissions
22 | data:
23 | mode: "0770"
24 | provisioner: openebs.io/local
25 | reclaimPolicy: Delete
26 | #It is necessary to have volumeBindingMode as WaitForFirstConsumer
27 | volumeBindingMode: WaitForFirstConsumer
28 | ```
29 |
30 | With such configuration the folder will be crated with `0770` rights for all the PVC using this storage class.
31 |
32 | The same configuration is available at PVC level to have a more fined grained configuration capability (the Storage class configuration will always win against PVC one):
33 |
34 | ```yaml
35 | kind: PersistentVolumeClaim
36 | apiVersion: v1
37 | metadata:
38 | name: localpv-vol
39 | annotations:
40 | cas.openebs.io/config: |
41 | - name: FilePermissions
42 | data:
43 | mode: "0770"
44 | spec:
45 | #Change this name if you are using a custom StorageClass
46 | storageClassName: openebs-hostpath
47 | accessModes: ["ReadWriteOnce"]
48 | resources:
49 | requests:
50 | #Set capacity here
51 | storage: 5Gi
52 | ```
53 |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/liveness/busybox_liveness.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Source: openebs/templates/clusterrole.yaml
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: ClusterRole
5 | metadata:
6 | name: app-namespace
7 | labels:
8 | name: app-namespace
9 | rules:
10 | - apiGroups: ["*"]
11 | resources: ["*"]
12 | verbs: ["*"]
13 |
14 | ---
15 | apiVersion: v1
16 | kind: ServiceAccount
17 | metadata:
18 | name: app-namespace
19 | namespace: app-namespace
20 | labels:
21 | name: app-namespace
22 |
23 | ---
24 | apiVersion: rbac.authorization.k8s.io/v1
25 | kind: ClusterRoleBinding
26 | metadata:
27 | name: app-namespace
28 | labels:
29 | name: app-namespace
30 | roleRef:
31 | apiGroup: rbac.authorization.k8s.io
32 | kind: ClusterRole
33 | name: app-namespace
34 | subjects:
35 | - kind: ServiceAccount
36 | name: app-namespace
37 | namespace: app-namespace
38 |
39 | ---
40 | apiVersion: batch/v1
41 | kind: Job
42 | metadata:
43 | generateName: busybox-liveness-
44 | namespace: app-namespace
45 | spec:
46 | template:
47 | metadata:
48 | name: busybox-liveness
49 | namespace: app-namespace
50 | labels:
51 | liveness: busybox-liveness
52 |
53 | # label used for mass-liveness check upon infra-chaos
54 | infra-aid: liveness
55 |
56 | spec:
57 | serviceAccountName: app-namespace
58 | restartPolicy: Never
59 |
60 | containers:
61 | - name: busybox-liveness
62 | image: openebs/busybox-client
63 | imagePullPolicy: IfNotPresent
64 |
65 | env:
66 |
67 | - name: LIVENESS_TIMEOUT_SECONDS
68 | value: "liveness-timeout-seconds"
69 |
70 | # number of retries when livenss-fails
71 | - name: LIVENESS_RETRY_COUNT
72 | value: "liveness-retry-count"
73 |
74 | # Namespace in which busybox is running
75 | - name: NAMESPACE
76 | value: app-namespace
77 |
78 | - name: POD_NAME
79 | value: pod-name
80 |
81 | command: ["/bin/bash"]
82 | args: ["-c", "./liveness.sh; exit 0"]
83 |
--------------------------------------------------------------------------------
/pkg/logger/logger.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2019 The OpenEBS Authors.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package logger
18 |
19 | import (
20 | "log"
21 | "time"
22 |
23 | "github.com/spf13/pflag"
24 | "k8s.io/apimachinery/pkg/util/wait"
25 | "k8s.io/klog/v2"
26 | )
27 |
28 | // This is the default log flush interval for klog/v2
29 | // Ref: k8s.io/klog/v2@v2.40.1
30 | const KLOG_FLUSH_INTERVAL = 5 * time.Second //
31 |
32 | var (
33 | defaultFlushInterval = KLOG_FLUSH_INTERVAL
34 | logFlushFreq = pflag.Duration("log-flush-frequency", KLOG_FLUSH_INTERVAL, "Maximum number of seconds between log flushes")
35 | loggerKillSwitch = make(chan struct{})
36 | )
37 |
38 | type KlogWriter struct{}
39 |
40 | func (k KlogWriter) Write(data []byte) (n int, err error) {
41 | klog.Info(string(data))
42 | return len(data), nil
43 | }
44 |
45 | // This needs to be set correctly to the default log flush duration
46 | // in case it is not equal to KLOG_FLUSH_INTERVAL.
47 | // This sets the default flush interval for logs
48 | func SetDefaultFlushInterval(freq time.Duration) {
49 | defaultFlushInterval = freq
50 | }
51 |
52 | // This streams logs from the 'log' package to 'klog' and sets flush frequency
53 | // This initializes logging via klog
54 | func InitLogging() {
55 | log.SetOutput(KlogWriter{})
56 | log.SetFlags(0)
57 |
58 | // Flushes logs at set flush interval
59 | if *logFlushFreq != defaultFlushInterval {
60 | go wait.Until(klog.Flush, *logFlushFreq, loggerKillSwitch)
61 | }
62 | }
63 |
64 | func FinishLogging() {
65 | close(loggerKillSwitch)
66 | klog.Flush()
67 | }
68 |
--------------------------------------------------------------------------------
/vm.nix:
--------------------------------------------------------------------------------
1 | { ... }:
2 | let
3 | sources = import ./nix/sources.nix;
4 | pkgs = import sources.nixpkgs { };
5 | in
6 | {
7 | nix.nixPath = [
8 | "nixpkgs=${pkgs.path}"
9 | ];
10 | nixos-shell.mounts = {
11 | mountHome = false;
12 | mountNixProfile = false;
13 | cache = "none"; # default is "loose"
14 |
15 | extraMounts = {
16 | "/localpv" = {
17 | target = ./.;
18 | cache = "none";
19 | };
20 | };
21 | };
22 |
23 | virtualisation = {
24 | cores = 4;
25 | memorySize = 2048;
26 | # Uncomment to be able to ssh into the vm, example:
27 | # ssh -p 2222 -o StrictHostKeychecking=no root@localhost
28 | # forwardPorts = [
29 | # { from = "host"; host.port = 2222; guest.port = 22; }
30 | # ];
31 | diskSize = 20 * 1024;
32 | docker = {
33 | enable = true;
34 | };
35 | };
36 | documentation.enable = false;
37 |
38 | networking = {
39 | firewall = {
40 | allowedTCPPorts = [
41 | 6443 # k3s: required so that pods can reach the API server (running on port 6443 by default)
42 | ];
43 | };
44 | };
45 |
46 | services = {
47 | openssh.enable = true;
48 | k3s = {
49 | enable = true;
50 | role = "server";
51 | extraFlags = toString [
52 | "--disable=traefik"
53 | ];
54 | };
55 | };
56 |
57 | programs.git = {
58 | enable = true;
59 | config = {
60 | safe = {
61 | directory = [ "/localpv" ];
62 | };
63 | };
64 | };
65 |
66 | systemd.tmpfiles.rules = [
67 | "L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
68 | ];
69 |
70 | environment = {
71 | variables = {
72 | KUBECONFIG = "/etc/rancher/k3s/k3s.yaml";
73 | CI_K3S = "true";
74 | GOPATH = "/localpv/nix/.go";
75 | EDITOR = "vim";
76 | };
77 |
78 | shellAliases = {
79 | k = "kubectl";
80 | ke = "kubectl -n openebs";
81 | };
82 |
83 | shellInit = ''
84 | export PATH=$GOPATH/bin:$PATH
85 | cd /localpv
86 | '';
87 |
88 | systemPackages = with pkgs; [ vim docker-client k9s e2fsprogs xfsprogs ];
89 | };
90 | }
91 |
--------------------------------------------------------------------------------
/pkg/kubernetes/api/core/v1/volume/volume.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2018-2020 The OpenEBS Authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package volume
16 |
17 | import (
18 | corev1 "k8s.io/api/core/v1"
19 | )
20 |
21 | // Volume is a wrapper over named volume api object, used
22 | // within Pods. It provides build, validations and other common
23 | // logic to be used by various feature specific callers.
24 | type Volume struct {
25 | object *corev1.Volume
26 | }
27 |
28 | type volumeBuildOption func(*Volume)
29 |
30 | // NewForAPIObject returns a new instance of Volume
31 | func NewForAPIObject(obj *corev1.Volume, opts ...volumeBuildOption) *Volume {
32 | v := &Volume{object: obj}
33 | for _, o := range opts {
34 | o(v)
35 | }
36 | return v
37 | }
38 |
39 | // Predicate defines an abstraction
40 | // to determine conditional checks
41 | // against the provided volume instance
42 | type Predicate func(*Volume) bool
43 |
44 | // IsNil returns true if the Volume instance
45 | // is nil
46 | func (v *Volume) IsNil() bool {
47 | return v.object == nil
48 | }
49 |
50 | // IsNil is predicate to filter out nil Volume
51 | // instances
52 | func IsNil() Predicate {
53 | return func(v *Volume) bool {
54 | return v.IsNil()
55 | }
56 | }
57 |
58 | // PredicateList holds a list of predicate
59 | type PredicateList []Predicate
60 |
61 | // all returns true if all the predicates
62 | // succeed against the provided pvc
63 | // instance
64 | func (l PredicateList) all(v *Volume) bool {
65 | for _, pred := range l {
66 | if !pred(v) {
67 | return false
68 | }
69 | }
70 | return true
71 | }
72 |
--------------------------------------------------------------------------------
/Makefile.buildx.mk:
--------------------------------------------------------------------------------
1 | # Copyright 2020 The OpenEBS Authors
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | # default list of platforms for which multiarch image is built
16 | ifeq (${PLATFORMS}, )
17 | export PLATFORMS="linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le"
18 | endif
19 |
20 | # if IMG_RESULT is unspecified, by default the image will be pushed to registry
21 | ifeq (${IMG_RESULT}, load)
22 | export PUSH_ARG="--load"
23 | # if load is specified, image will be built only for the build machine architecture.
24 | export PLATFORMS="local"
25 | else ifeq (${IMG_RESULT}, cache)
26 | # if cache is specified, image will only be available in the build cache, it won't be pushed or loaded
27 | # therefore no PUSH_ARG will be specified
28 | else
29 | export PUSH_ARG="--push"
30 | endif
31 |
32 | .PHONY: docker.buildx
33 | docker.buildx:
34 | export DOCKER_CLI_EXPERIMENTAL=enabled
35 | @if ! docker buildx ls | grep -q container-builder; then\
36 | docker buildx create --platform ${PLATFORMS} --name container-builder --use;\
37 | fi
38 | @docker buildx build --platform ${PLATFORMS} \
39 | -t "$(DOCKERX_IMAGE_NAME)" ${DBUILD_ARGS} -f $(PWD)/buildscripts/$(COMPONENT)/$(COMPONENT).Dockerfile \
40 | . ${PUSH_ARG}
41 | @echo "--> Build docker image: $(DOCKERX_IMAGE_NAME)"
42 | @echo
43 |
44 | .PHONY: docker.buildx.provisioner-localpv
45 | docker.buildx.provisioner-localpv: DOCKERX_IMAGE_NAME=$(PROVISIONER_LOCALPV_IMAGE_TAG)
46 | docker.buildx.provisioner-localpv: COMPONENT=$(PROVISIONER_LOCALPV)
47 | docker.buildx.provisioner-localpv: docker.buildx
48 |
49 | .PHONY: buildx.push.provisioner-localpv
50 | buildx.push.provisioner-localpv:
51 | BUILDX=true DIMAGE=${IMAGE_ORG}/provisioner-localpv ./buildscripts/push.sh
52 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/backup_and_restore/run_e2e_test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | generateName: e2e-velero-backup-restore-
6 | namespace: e2e
7 | spec:
8 | template:
9 | metadata:
10 | name: e2e
11 | labels:
12 | app: velero-backup-restore
13 |
14 | spec:
15 | serviceAccountName: e2e
16 | restartPolicy: Never
17 | containers:
18 | - name: ansibletest
19 | image: openebs/localpv-e2e:ci
20 | imagePullPolicy: IfNotPresent
21 |
22 | env:
23 | - name: ANSIBLE_STDOUT_CALLBACK
24 | value: default
25 |
26 | - name: APP_LABEL
27 | value: 'name=percona'
28 |
29 | # Application pvc
30 | - name: APP_PVC
31 | value: percona-mysql-claim
32 |
33 | # Application namespace
34 | - name: APP_NAMESPACE
35 | value: app-percona-ns
36 |
37 | - name: DB_USER_NAME
38 | value: root
39 |
40 | - name: DB_PASSWORD
41 | value: k8sDem0
42 |
43 | - name: DEPLOY_TYPE
44 | value: deployment
45 |
46 | - name: BACKUP_NAME
47 | value: percona-backup
48 |
49 | - name: OPERATOR_NAMESPACE
50 | value: "openebs"
51 |
52 | - name: VELERO_PLUGIN_NAME
53 | value: "openebs/velero-plugin:ci"
54 |
55 | - name: VELERO_VERSION
56 | value: "v1.1.0"
57 |
58 | #Supported values are {'minio', 'GCP'}
59 | - name: STORAGE_BUCKET
60 | value: "minio"
61 |
62 | - name: GCP_PROJECT_ID
63 | value: "openebs-ci"
64 |
65 | #Supported values are {'default', 'user-defined'}, can be set only true when {bucket-type=minio, local-snapshot=false}
66 | - name: PROFILE
67 | value: "default"
68 |
69 | # supported values are: true & false (Deprovision velero namespace after test completion)
70 | - name: DEPROVISION_VELERO
71 | value: "false"
72 |
73 | command: ["/bin/bash"]
74 | args: ["-c", "ansible-playbook ./e2e-tests/experiments/functional/backup_and_restore/test.yml -i /etc/ansible/hosts -vv; exit 0"]
75 |
--------------------------------------------------------------------------------
/cmd/provisioner-localpv/app/env.go:
--------------------------------------------------------------------------------
1 | package app
2 |
3 | import (
4 | menv "github.com/openebs/maya/pkg/env/v1alpha1"
5 | k8sEnv "k8s.io/utils/env"
6 | )
7 |
8 | //This file defines the environement variable names that are specific
9 | // to this provisioner. In addition to the variables defined in this file,
10 | // provisioner also uses the following:
11 | // OPENEBS_NAMESPACE
12 | // NODE_NAME
13 | // OPENEBS_SERVICE_ACCOUNT
14 | // OPENEBS_IO_K8S_MASTER
15 | // OPENEBS_IO_KUBE_CONFIG
16 |
17 | const (
18 | // ProvisionerHelperImage is the environment variable that provides the
19 | // container image to be used to launch the help pods managing the
20 | // host path
21 | ProvisionerHelperImage menv.ENVKey = "OPENEBS_IO_HELPER_IMAGE"
22 |
23 | // ProvisionerHelperPodHostNetwork is the environment variable that provides the
24 | // host network mode to be used to launch the help pods
25 | ProvisionerHelperPodHostNetwork string = "OPENEBS_IO_HELPER_POD_HOST_NETWORK"
26 |
27 | // ProvisionerBasePath is the environment variable that provides the
28 | // default base path on the node where host-path PVs will be provisioned.
29 | ProvisionerBasePath menv.ENVKey = "OPENEBS_IO_BASE_PATH"
30 |
31 | // ProvisionerImagePullSecrets is the environment variable that provides the
32 | // init pod to use as authentication when pulling helper image, it is used in the scene where authentication is required
33 | ProvisionerImagePullSecrets menv.ENVKey = "OPENEBS_IO_IMAGE_PULL_SECRETS"
34 | )
35 |
36 | var (
37 | defaultHelperImage = "openebs/linux-utils:latest"
38 | defaultBasePath = "/var/openebs/local"
39 | )
40 |
41 | func getOpenEBSNamespace() string {
42 | return menv.Get(menv.OpenEBSNamespace)
43 | }
44 | func getDefaultHelperImage() string {
45 | return menv.GetOrDefault(ProvisionerHelperImage, string(defaultHelperImage))
46 | }
47 | func getHelperPodHostNetwork() bool {
48 | val, _ := k8sEnv.GetBool(ProvisionerHelperPodHostNetwork, false)
49 | return val
50 | }
51 |
52 | func getDefaultBasePath() string {
53 | return menv.GetOrDefault(ProvisionerBasePath, string(defaultBasePath))
54 | }
55 |
56 | func getOpenEBSServiceAccountName() string {
57 | return menv.Get(menv.OpenEBSServiceAccount)
58 | }
59 | func getOpenEBSImagePullSecrets() string {
60 | return menv.Get(ProvisionerImagePullSecrets)
61 | }
62 |
--------------------------------------------------------------------------------
/cmd/provisioner-localpv/app/types.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2019 The OpenEBS Authors.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
16 | */
17 |
18 | package app
19 |
20 | import (
21 | "context"
22 |
23 | mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
24 | v1 "k8s.io/api/core/v1"
25 | clientset "k8s.io/client-go/kubernetes"
26 | )
27 |
28 | const (
29 | SnapshotKind string = "VolumeSnapshot"
30 | PVCKind string = "PersistentVolumeClaim"
31 | SnapshotAPIGroup string = "snapshot.storage.k8s.io"
32 | )
33 |
34 | // Provisioner struct has the configuration and utilities required
35 | // across the different work-flows.
36 | type Provisioner struct {
37 | kubeClient *clientset.Clientset
38 | namespace string
39 | helperImage string
40 | // defaultConfig is the default configurations
41 | // provided from ENV or Code
42 | defaultConfig []mconfig.Config
43 | // getVolumeConfig is a reference to a function
44 | getVolumeConfig GetVolumeConfigFn
45 | }
46 |
47 | // VolumeConfig struct contains the merged configuration of the PVC
48 | // and the associated SC. The configuration is derived from the
49 | // annotation `cas.openebs.io/config`. The configuration will be
50 | // in the following json format:
51 | //
52 | // {
53 | // Key1:{
54 | // enabled: true
55 | // value: "string value"
56 | // },
57 | // Key2:{
58 | // enabled: true
59 | // value: "string value"
60 | // },
61 | // }
62 | type VolumeConfig struct {
63 | pvName string
64 | pvcName string
65 | scName string
66 | options map[string]interface{}
67 | configData map[string]interface{}
68 | configList map[string]interface{}
69 | }
70 |
71 | // GetVolumeConfigFn allows to plugin a custom function
72 | //
73 | // and makes it easy to unit test provisioner
74 | type GetVolumeConfigFn func(ctx context.Context, pvName string, pvc *v1.PersistentVolumeClaim) (*VolumeConfig, error)
75 |
--------------------------------------------------------------------------------
/pkg/kubernetes/api/core/v1/pod/buildlist.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2019-2020 The OpenEBS Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package pod
18 |
19 | import (
20 | corev1 "k8s.io/api/core/v1"
21 | )
22 |
23 | // ListBuilder enables building an instance of
24 | // Podlist
25 | type ListBuilder struct {
26 | list *PodList
27 | filters PredicateList
28 | }
29 |
30 | // NewListBuilder returns a instance of ListBuilder
31 | func NewListBuilder() *ListBuilder {
32 | return &ListBuilder{list: &PodList{items: []*Pod{}}}
33 | }
34 |
35 | // ListBuilderForAPIList returns a instance of ListBuilder from API PodList
36 | func ListBuilderForAPIList(pods *corev1.PodList) *ListBuilder {
37 | b := &ListBuilder{list: &PodList{}}
38 | if pods == nil {
39 | return b
40 | }
41 | for _, p := range pods.Items {
42 | p := p
43 | b.list.items = append(b.list.items, &Pod{object: &p})
44 | }
45 | return b
46 | }
47 |
48 | // ListBuilderForObjectList returns a instance of ListBuilder from API Pods
49 | func ListBuilderForObjectList(pods ...*Pod) *ListBuilder {
50 | b := &ListBuilder{list: &PodList{}}
51 | if pods == nil {
52 | return b
53 | }
54 | for _, p := range pods {
55 | p := p
56 | b.list.items = append(b.list.items, p)
57 | }
58 | return b
59 | }
60 |
61 | // List returns the list of pod
62 | // instances that was built by this
63 | // builder
64 | func (b *ListBuilder) List() *PodList {
65 | if b.filters == nil || len(b.filters) == 0 {
66 | return b.list
67 | }
68 | filtered := &PodList{}
69 | for _, pod := range b.list.items {
70 | if b.filters.all(pod) {
71 | filtered.items = append(filtered.items, pod)
72 | }
73 | }
74 | return filtered
75 | }
76 |
77 | // WithFilter add filters on which the pod
78 | // has to be filtered
79 | func (b *ListBuilder) WithFilter(pred ...Predicate) *ListBuilder {
80 | b.filters = append(b.filters, pred...)
81 | return b
82 | }
83 |
--------------------------------------------------------------------------------
/e2e-tests/Dockerfile:
--------------------------------------------------------------------------------
1 |
2 | # Copyright 2020-2021 The OpenEBS Authors. All rights reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 |
17 | FROM ubuntu:18.04
18 |
19 | LABEL maintainer="OpenEBS"
20 |
21 | #Installing necessary ubuntu packages
22 | RUN rm -rf /var/lib/apt/lists/* && \
23 | apt-get clean && \
24 | apt-get update --fix-missing || true && \
25 | apt-get install -y python python-pip netcat iproute2 jq sshpass bc git\
26 | curl openssh-client
27 |
28 | #Installing gcloud cli
29 | RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \
30 | apt-get install apt-transport-https ca-certificates gnupg -y && \
31 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - && \
32 | apt-get update && apt-get install google-cloud-sdk -y
33 |
34 | #Installing ansible
35 | RUN pip install ansible==2.7.3
36 | RUN pip install ruamel.yaml.clib==0.1.2
37 |
38 | #Installing openshift
39 | RUN pip install openshift==0.11.2
40 |
41 | #Installing jmespath
42 | RUN pip install jmespath
43 |
44 | RUN touch /mnt/parameters.yml
45 |
46 | #Installing Kubectl
47 | ENV KUBE_LATEST_VERSION="v1.12.0"
48 | RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && \
49 | chmod +x /usr/local/bin/kubectl && \
50 | curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-07-26/bin/linux/amd64/aws-iam-authenticator && \chmod +x /usr/local/bin/aws-iam-authenticator
51 |
52 | #Adding hosts entries and making ansible folders
53 | RUN mkdir /etc/ansible/ /ansible && \
54 | echo "[local]" >> /etc/ansible/hosts && \
55 | echo "127.0.0.1" >> /etc/ansible/hosts
56 |
57 | #Copying Necessary Files
58 | COPY ./e2e-tests ./e2e-tests
--------------------------------------------------------------------------------
/e2e-tests/apps/percona/deployers/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: False
5 |
6 | vars_files:
7 | - test_vars.yml
8 |
9 | tasks:
10 | - block:
11 |
12 | ## Generating the testname for deployment
13 | - include_tasks: /e2e-tests/utils/fcm/create_testname.yml
14 |
15 | ## RECORD START-OF-TEST IN e2e RESULT CR
16 | - include_tasks: "/e2e-tests/utils/fcm/update_e2e_result_resource.yml"
17 | vars:
18 | status: 'SOT'
19 |
20 | - name: Replace the storage capacity placeholder
21 | replace:
22 | path: "{{ application_deployment }}"
23 | regexp: "volume-capacity"
24 | replace: "{{ lookup('env','CAPACITY') }}"
25 |
26 | - block:
27 |
28 | ## Actual test
29 | ## Creating namespaces and making the application for deployment
30 | - include_tasks: /e2e-tests/utils/k8s/pre_create_app_deploy.yml
31 |
32 | - name: Display application deployment spec for verification
33 | debug: var=item
34 | with_file:
35 | - "{{ application_deployment }}"
36 |
37 | ## Deploying the application, upper bound wait time: 900s
38 | - include_tasks: /e2e-tests/utils/k8s/deploy_single_app.yml
39 | vars:
40 | check_app_pod: 'yes'
41 | delay: 5
42 | retries: 180
43 |
44 | ## Fetching the pod name
45 | - include_tasks: /e2e-tests/utils/k8s/fetch_app_pod.yml
46 |
47 | ## Checking the db is ready for connection
48 | - include_tasks: /e2e-tests/utils/scm/applications/mysql/check_db_connection.yml
49 |
50 | - set_fact:
51 | flag: "Pass"
52 |
53 | when: "'deprovision' not in action"
54 |
55 | - block:
56 |
57 | - name: Deprovisioning the Application
58 | include_tasks: "/e2e-tests/utils/k8s/deprovision_deployment.yml"
59 | vars:
60 | app_deployer: "{{ application_deployment }}"
61 |
62 | - set_fact:
63 | flag: "Pass"
64 |
65 | when: "'deprovision' is in action"
66 |
67 | rescue:
68 | - set_fact:
69 | flag: "Fail"
70 |
71 | always:
72 | ## RECORD END-OF-TEST IN e2e RESULT CR
73 | - include_tasks: /e2e-tests/utils/fcm/update_e2e_result_resource.yml
74 | vars:
75 | status: 'EOT'
76 |
--------------------------------------------------------------------------------
/cmd/provisioner-localpv/app/helper_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2019 The OpenEBS Authors.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 |
16 | This code was taken from https://github.com/rancher/local-path-provisioner
17 | and modified to work with the configuration options used by OpenEBS
18 | */
19 |
20 | package app
21 |
22 | import (
23 | "testing"
24 | )
25 |
26 | func TestConvertToK(t *testing.T) {
27 | type args struct {
28 | limit string
29 | pvcStorage int64
30 | }
31 | tests := map[string]struct {
32 | name string
33 | args args
34 | want string
35 | wantErr bool
36 | }{
37 | "Missing limit grace": {
38 | args: args{
39 | limit: "",
40 | pvcStorage: 5000000000,
41 | },
42 | want: "0k",
43 | wantErr: false,
44 | },
45 | "Present limit with grace": {
46 | args: args{
47 | limit: "0%",
48 | pvcStorage: 5000,
49 | },
50 | want: "5k",
51 | wantErr: false,
52 | },
53 | "Present limit grace exceeding 100%": {
54 | args: args{
55 | limit: "200%",
56 | pvcStorage: 5000000,
57 | },
58 | want: "9766k",
59 | wantErr: false,
60 | },
61 | "Present limit grace with decimal%": {
62 | args: args{
63 | limit: ".5%",
64 | pvcStorage: 1000,
65 | },
66 | want: "1k", // the final result of limit can't be a float
67 | wantErr: false,
68 | },
69 | "Present limit grace with invalid pattern": {
70 | args: args{
71 | limit: "10",
72 | pvcStorage: 10000,
73 | },
74 | want: "",
75 | wantErr: true,
76 | },
77 | "Present limit grace with only %": {
78 | args: args{
79 | limit: "%",
80 | pvcStorage: 10000,
81 | },
82 | want: "",
83 | wantErr: true,
84 | },
85 | }
86 | for _, tt := range tests {
87 | t.Run(tt.name, func(t *testing.T) {
88 | got, err := convertToK(tt.args.limit, tt.args.pvcStorage)
89 | if (err != nil) != tt.wantErr {
90 | t.Errorf("convertToK() error = %v, wantErr %v", err, tt.wantErr)
91 | return
92 | }
93 | if got != tt.want {
94 | t.Errorf("convertToK() = %v, want %v", got, tt.want)
95 | }
96 | })
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/cmd/provisioner-localpv/main.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2019 The OpenEBS Authors.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package main
18 |
19 | import (
20 | "flag"
21 | "os"
22 |
23 | "github.com/spf13/pflag"
24 | "k8s.io/klog/v2"
25 |
26 | "github.com/openebs/dynamic-localpv-provisioner/cmd/provisioner-localpv/app"
27 | "github.com/openebs/dynamic-localpv-provisioner/pkg/logger"
28 | )
29 |
30 | func init() {
31 | // Declare klog CLI flags
32 | klog.InitFlags(flag.CommandLine)
33 | // NOTE: As of klog/v2@v2.40.1 the --logtostderr=true option cannot be
34 | // used alongside other klog flags to write logs in a file or
35 | // directory.
36 | // The --alsologtostderr=true option can be used alongside other
37 | // klog flags to write logs to a file or directory. Disabling
38 | // this flag will disable logging to stderr (while
39 | // --logtostderr=false is set).
40 | // Ref: https://github.com/kubernetes/klog/issues/60
41 | // User flags will be honored at Parse time.
42 | flag.CommandLine.Set("logtostderr", "false")
43 | flag.CommandLine.Set("alsologtostderr", "true")
44 |
45 | // Merge klog CLI flags to the global flagset
46 | // The pflag.Commandline FlagSet will be parsed
47 | // in the run() function in this package, before
48 | // initializing logging and cobra command execution.
49 | // Ref: github.com/spf13/pflag#supporting-go-flags-when-using-pflag
50 | pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
51 | }
52 |
53 | func main() {
54 | if err := run(); err != nil {
55 | os.Exit(1)
56 | }
57 | os.Exit(0)
58 | }
59 |
60 | // run starts the dynamic provisioner for Local PVs
61 | func run() error {
62 | // Create new cobra command
63 | cmd, err := app.StartProvisioner()
64 | if err != nil {
65 | return err
66 | }
67 |
68 | // Merge all flags from the Cobra Command to the global FlagSet
69 | // and Parse them
70 | pflag.CommandLine.AddFlagSet(cmd.Flags())
71 | pflag.Parse()
72 |
73 | // NOTE: Logging must start after CLI flags have been parsed
74 | // Initialize logs and Flush logs on exit
75 | logger.InitLogging()
76 | defer logger.FinishLogging()
77 |
78 | // Run new command
79 | return cmd.Execute()
80 | }
81 |
--------------------------------------------------------------------------------
/tests/bdd/xfs_quota.feature:
--------------------------------------------------------------------------------
1 | Feature: Hostpath XFS Quota Local PV
2 |
3 | Scenario: HostPath XFS Quota Local PV with Unsupported Filesystem
4 | Given a sparse file "disk.img"
5 | And a loop device is created on top of disk.img
6 |
7 | When a StorageClass is created with the following attributes:
8 | | name | sc-hp-xfs |
9 | | BasePath | /path/to/hostpath |
10 | | XFSQuotaEnabled | "true" |
11 | | softLimit | "20%" |
12 | | hardLimit | "50%" |
13 | | provisionerName | openebs.io/local |
14 | | volumeBindingMode | WaitForFirstConsumer |
15 | | reclaimPolicy | Delete |
16 | And a minix filesystem is written into the loop device
17 | And the minix filesystem is mounted with project quota enabled
18 | And a PVC "pvc-hp-xfs" is created with the StorageClass "sc-hp-xfs"
19 | And a Pod is created with PVC "pvc-hp-xfs"
20 | Then the Pod should be in pending state
21 | And the PVC should be in pending state
22 |
23 | When the Pod "busybox-hostpath" is deleted
24 | Then the Pod should be deleted successfully
25 |
26 | When the PVC "pvc-hp-xfs" is deleted
27 | Then the PVC should be deleted successfully
28 |
29 | Scenario: HostPath XFS Quota Local PV with XFS Filesystem
30 | Given a sparse file "disk.img"
31 | And a loop device is created on top of disk.img
32 |
33 | When a StorageClass is created with the following attributes:
34 | | name | sc-hp-xfs |
35 | | BasePath | /path/to/hostpath |
36 | | XFSQuotaEnabled | "true" |
37 | | provisionerName | openebs.io/local |
38 | | volumeBindingMode | WaitForFirstConsumer |
39 | | reclaimPolicy | Delete |
40 | And the loop device is formatted with XFS filesystem
41 | And the xfs filesysten is mounted with project quota enabled
42 | And a PVC "pvc-hp-xfs" is created with the StorageClass "sc-hp-xfs"
43 | And a Pod is created with PVC "pvc-hp-xfs"
44 | Then the Pod should be up and running
45 |
46 | When data is written more than the quota limit into the hostpath volume
47 | Then the container process should not be able to write more than the enforced limit
48 |
49 | When the Pod consuming PVC "pvc-hp-xfs" is deleted
50 | Then the Pod should be deleted successfully
51 |
52 | When the PVC "pvc-hp-xfs" is deleted
53 | Then the PVC should be deleted successfully
54 | And the Provisioner should delete the PV
55 |
--------------------------------------------------------------------------------
/tests/bdd/pvc_cas_config.feature:
--------------------------------------------------------------------------------
1 | Feature: Volume Provisioning/De-provisioning with Additive and Conflicting CAS-configs on PVC and SC
2 |
3 | Scenario: Additive CAS-configs on PVC and SC
4 | When a StorageClass with is created with the following attributes:
5 | | name | sc-additive-cas-config |
6 | | BasePath | /path/to/hostpath |
7 | | provisionerName | openebs.io/local |
8 | | volumeBindingMode | WaitForFirstConsumer |
9 | | reclaimPolicy | Delete |
10 | And a PVC "pvc-additive-cas-config" is created with the following attributes:
11 | | name | pvc-additive-cas-config |
12 | | storageClass | sc-hp |
13 | | NodeAffinityLabels | "kubernetes.io/os", "kubernetes.io/arch" |
14 | | accessModes | ReadWriteOnce |
15 | | capacity | 2Gi |
16 | And a Deployment is created with PVC "pvc-additive-cas-config"
17 | Then the Pod should be up and running
18 | And a bound PV should be created
19 | And the PVC NodeAffinityLabels CAS-configs should be set correctly on the PV
20 |
21 | When the application Deployment is deleted
22 | Then The Pod should be deleted
23 |
24 | When the PVC is deleted
25 | Then the PV should be deleted
26 |
27 | Scenario: Conflicting CAS-configs on PVC and SC
28 | When a StorageClass is created with the following attributes:
29 | | name | sc-conflicting-cas-config |
30 | | BasePath | /path/to/hostpath |
31 | | NodeAffinityLabels | "kubernetes.io/hostname" |
32 | | provisionerName | openebs.io/local |
33 | | volumeBindingMode | WaitForFirstConsumer |
34 | | reclaimPolicy | Delete |
35 | And a PVC "pvc-conflicting-cas-config" is created with the following attributes:
36 | | name | pvc-conflicting-cas-config |
37 | | storageClass | sc-hp |
38 | | NodeAffinityLabels | "kubernetes.io/os", "kubernetes.io/arch" |
39 | | accessModes | ReadWriteOnce |
40 | | capacity | 2Gi |
41 | And a Deployment is created with PVC "pvc-conflicting-cas-config"
42 | Then a Pod should be up and running
43 | And a bound PV should be created
44 | And the SC NodeAffinityLabels CAS-config should be set correctly on the PV
45 |
46 | When the application Deployment deleted
47 | Then The Pod should be deleted
48 |
49 | When the PVC is deleted
50 | Then the PV should be deleted
51 |
--------------------------------------------------------------------------------
/tests/bdd/ext4_quota.feature:
--------------------------------------------------------------------------------
1 | Feature: Hostpath EXT4 Quota Local PV
2 |
3 | Scenario: HostPath EXT4 Quota Local PV with Unsupported Filesystem
4 | Given a sparse file "disk.img"
5 | And a loop device is created on top of disk.img
6 |
7 | When a StorageClass is created with the following attributes:
8 | | name | sc-hp-ext4 |
9 | | BasePath | /path/to/hostpath |
10 | | EXT4QuotaEnabled | "true" |
11 | | softLimit | "20%" |
12 | | hardLimit | "50%" |
13 | | provisionerName | openebs.io/local |
14 | | volumeBindingMode | WaitForFirstConsumer |
15 | | reclaimPolicy | Delete |
16 | And a minix filesystem is written into the loop device
17 | And the minix filesystem is mounted with project quota enabled
18 | And a PVC "pvc-hp-ext4" is created with the StorageClass "sc-hp-ext4"
19 | And a Pod is created with PVC "pvc-hp-ext4"
20 | Then the Pod should be in pending state
21 | And the PVC should be in pending state
22 |
23 | When the Pod "busybox-hostpath" is deleted
24 | Then the Pod should be deleted successfully
25 |
26 | When the PVC "pvc-hp-ext4" is deleted
27 | Then the PVC should be deleted successfully
28 |
29 | Scenario: HostPath EXT4 Quota Local PV with EXT4 Filesystem
30 | Given a sparse file "disk.img"
31 | And a loop device is created on top of disk.img
32 |
33 | When a StorageClass with valid EXT4 quota parameters is created
34 | Then it should create a StorageClass with the following attributes:
35 | | name | sc-hp-ext4 |
36 | | BasePath | /path/to/hostpath |
37 | | EXT4QuotaEnabled | "true" |
38 | | provisionerName | openebs.io/local |
39 | | volumeBindingMode | WaitForFirstConsumer |
40 | | reclaimPolicy | Delete |
41 |
42 | When the loop device is formatted with EXT4 filesystem
43 | And the ext4 filesysten is mounted with project quota enabled
44 | And a PVC "pvc-hp-ext4" is created with the StorageClass "sc-hp-ext4"
45 | And a Pod is created with PVC "pvc-hp-ext4"
46 | Then the Pod should be up and running
47 |
48 | When data is written more than the quota limit into the hostpath volume
49 | Then the container process should not be able to write more than the enforced limit
50 |
51 | When the Pod consuming PVC "pvc-hp-ext4" is deleted
52 | Then the Pod should be deleted successfully
53 |
54 | When the PVC "pvc-hp-ext4" is deleted
55 | Then the PVC should be deleted successfully
56 | And the Provisioner should delete the PV
57 |
--------------------------------------------------------------------------------
/cmd/provisioner-localpv/app/backward_compatability.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2019 The OpenEBS Authors.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package app
18 |
19 | import (
20 | "context"
21 |
22 | mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
23 | blockdeviceclaim "github.com/openebs/maya/pkg/blockdeviceclaim/v1alpha1"
24 | "github.com/pkg/errors"
25 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26 | clientset "k8s.io/client-go/kubernetes"
27 | "k8s.io/klog/v2"
28 | )
29 |
30 | // This function performs the preupgrade related tasks for 1.0 to 1.1
31 | func performPreupgradeTasks(ctx context.Context, kubeClient *clientset.Clientset) error {
32 | return addLocalPVFinalizerOnAssociatedBDCs(ctx, kubeClient)
33 | }
34 |
35 | // Add localpv finalizer on the BDCs that are used by PVs provisioned from localpv provisioner
36 | func addLocalPVFinalizerOnAssociatedBDCs(ctx context.Context, kubeClient *clientset.Clientset) error {
37 | // Get the list of PVs that are provisioned by device based local pv provisioner
38 | pvList, err := kubeClient.CoreV1().PersistentVolumes().List(
39 | ctx,
40 | metav1.ListOptions{
41 | LabelSelector: string(mconfig.CASTypeKey) + "=local-device",
42 | })
43 | if err != nil {
44 | return errors.Wrap(err, "failed to list localpv based pv(s)")
45 | }
46 |
47 | for _, pvObj := range pvList.Items {
48 | bdcName := "bdc-" + pvObj.Name
49 |
50 | bdcObj, err := blockdeviceclaim.NewKubeClient().WithNamespace(getOpenEBSNamespace()).
51 | Get(ctx, bdcName, metav1.GetOptions{})
52 | if err != nil {
53 | // BDCs may not exist if the PV reclaimPolicy is set
54 | // to 'Retain' and the BDCs have been manually removed
55 | // Ref: github.com/openebs/openebs/issues/3363
56 | // TODO: Clean this part of the code up a bit.
57 | klog.Warningf("failed to get bdc %v", bdcName)
58 | continue
59 | }
60 |
61 | // Add finalizer only if deletionTimestamp is not set
62 | if !bdcObj.DeletionTimestamp.IsZero() {
63 | continue
64 | }
65 |
66 | // Add finalizer on associated BDC
67 | _, err = blockdeviceclaim.BuilderForAPIObject(bdcObj).BDC.AddFinalizer(LocalPVFinalizer)
68 | if err != nil {
69 | return errors.Wrapf(err, "failed to add localpv finalizer on BDC %v",
70 | bdcObj.Name)
71 | }
72 | }
73 | return nil
74 | }
75 |
--------------------------------------------------------------------------------
/deploy/helm/charts/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "localpv.name" -}}
6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{/*
10 | Create a default fully qualified localpv provisioner name.
11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
12 | If release name contains chart name it will be used as a full name.
13 | */}}
14 | {{- define "localpv.fullname" -}}
15 | {{- if .Values.fullnameOverride -}}
16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
17 | {{- else -}}
18 | {{- $name := default .Chart.Name .Values.nameOverride -}}
19 | {{- if contains $name .Release.Name -}}
20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
21 | {{- else -}}
22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
23 | {{- end -}}
24 | {{- end -}}
25 | {{- end -}}
26 |
27 | {{/*
28 | Create chart name and version as used by the chart label.
29 | */}}
30 | {{- define "localpv.chart" -}}
31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
32 | {{- end -}}
33 |
34 |
35 | {{/*
36 | Meta labels
37 | */}}
38 | {{- define "localpv.common.metaLabels" -}}
39 | chart: {{ template "localpv.chart" . }}
40 | heritage: {{ .Release.Service }}
41 | {{- end -}}
42 |
43 | {{/*
44 | Selector labels
45 | */}}
46 | {{- define "localpv.selectorLabels" -}}
47 | app: {{ template "localpv.name" . }}
48 | release: {{ .Release.Name }}
49 | component: {{ .Values.localpv.name | quote }}
50 | {{- end -}}
51 |
52 | {{/*
53 | Component labels
54 | */}}
55 | {{- define "localpv.componentLabels" -}}
56 | openebs.io/component-name: openebs-{{ .Values.localpv.name }}
57 | {{- end -}}
58 |
59 | {{/*
60 | Common labels
61 | */}}
62 | {{- define "localpv.labels" -}}
63 | {{ include "localpv.common.metaLabels" . }}
64 | {{ include "localpv.selectorLabels" . }}
65 | {{ include "localpv.componentLabels" . }}
66 | {{- end -}}
67 |
68 | {{/*
69 | Create the name of the service account to use
70 | */}}
71 | {{- define "localpv.serviceAccountName" -}}
72 | {{- if .Values.serviceAccount.create -}}
73 | {{ default (include "localpv.fullname" .) .Values.serviceAccount.name }}
74 | {{- else -}}
75 | {{ default "default" .Values.serviceAccount.name }}
76 | {{- end -}}
77 | {{- end -}}
78 |
79 | {{/*
80 | Creates the tolerations based on the global tolerations, with early eviction
81 | Usage:
82 | {{ include "tolerations_with_early_eviction" . }}
83 | */}}
84 | {{- define "tolerations_with_early_eviction" -}}
85 | {{- if .Values.earlyEvictionTolerations }}
86 | {{- toYaml .Values.earlyEvictionTolerations | nindent 8 }}
87 | {{- end }}
88 | {{- if .Values.localpv.tolerations }}
89 | {{- toYaml .Values.localpv.tolerations | nindent 8 }}
90 | {{- end }}
91 | {{- end }}
92 |
--------------------------------------------------------------------------------
/pkg/kubernetes/api/core/v1/persistentvolume/buildlist.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2020 The OpenEBS Authors
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package persistentvolume
18 |
19 | import (
20 | errors "github.com/pkg/errors"
21 | corev1 "k8s.io/api/core/v1"
22 | )
23 |
24 | // ListBuilder enables building an instance of
25 | // PVlist
26 | type ListBuilder struct {
27 | list *PVList
28 | filters PredicateList
29 | errs []error
30 | }
31 |
32 | // NewListBuilder returns an instance of ListBuilder
33 | func NewListBuilder() *ListBuilder {
34 | return &ListBuilder{list: &PVList{}}
35 | }
36 |
37 | // ListBuilderForAPIObjects builds the ListBuilder object based on PV api list
38 | func ListBuilderForAPIObjects(pvs *corev1.PersistentVolumeList) *ListBuilder {
39 | b := &ListBuilder{list: &PVList{}}
40 | if pvs == nil {
41 | b.errs = append(b.errs, errors.New("failed to build pv list: missing api list"))
42 | return b
43 | }
44 | for _, pv := range pvs.Items {
45 | pv := pv
46 | b.list.items = append(b.list.items, &PV{object: &pv})
47 | }
48 | return b
49 | }
50 |
51 | // ListBuilderForObjects builds the ListBuilder object based on PVList
52 | func ListBuilderForObjects(pvs *PVList) *ListBuilder {
53 | b := &ListBuilder{}
54 | if pvs == nil {
55 | b.errs = append(b.errs, errors.New("failed to build pv list: missing object list"))
56 | return b
57 | }
58 | b.list = pvs
59 | return b
60 | }
61 |
62 | // List returns the list of pv
63 | // instances that was built by this
64 | // builder
65 | func (b *ListBuilder) List() (*PVList, error) {
66 | if len(b.errs) > 0 {
67 | return nil, errors.Errorf("failed to list pv: %+v", b.errs)
68 | }
69 | if b.filters == nil || len(b.filters) == 0 {
70 | return b.list, nil
71 | }
72 | filteredList := &PVList{}
73 | for _, pv := range b.list.items {
74 | if b.filters.all(pv) {
75 | filteredList.items = append(filteredList.items, pv)
76 | }
77 | }
78 | return filteredList, nil
79 | }
80 |
81 | // Len returns the number of items present
82 | // in the PVCList of a builder
83 | func (b *ListBuilder) Len() (int, error) {
84 | l, err := b.List()
85 | if err != nil {
86 | return 0, err
87 | }
88 | return l.Len(), nil
89 | }
90 |
91 | // APIList builds core API PV list using listbuilder
92 | func (b *ListBuilder) APIList() (*corev1.PersistentVolumeList, error) {
93 | l, err := b.List()
94 | if err != nil {
95 | return nil, err
96 | }
97 | return l.ToAPIList(), nil
98 | }
99 |
--------------------------------------------------------------------------------
/tests/README.md:
--------------------------------------------------------------------------------
1 | # Local PV Provisioner BDD
2 |
3 | Local PV Provisioner BDD tests are developed using ginkgo & gomega libraries.
4 |
5 | ## How to run the tests?
6 |
7 | ### Pre-requisites
8 |
9 | - These tests are meant to be run in a single-node Kubernetes (v1.16+)
10 | cluster with one single available blockdevice with no filesystem on
11 | it (should not be mounted).
12 |
13 | - Some of the tests require the 'xfsprogs' and 'quota' packages to run.
14 | For Ubuntu, you may need to install the quota_v2 kernel module. Install
15 | the 'linux-image-extra-virtual' package to install the kernel module.
16 | ```bash
17 | $ #For Ubuntu/Debian
18 | $ sudo apt-get update && sudo apt-get install -y xfsprogs quota linux-image-extra-virtual
19 | $ ##The kernel module package name may be different depending on the OS image
20 | $ ##E.g.: linux-modules-extra-`uname -r`
21 | $ #For CentOS/RHEL
22 | $ sudo yum install -y xfsprogs quota
23 | ```
24 |
25 | - You will require the Ginkgo binary to be able to run the tests.
26 | Install the latest Ginkgo binary using the following command:
27 | ```bash
28 | $ go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo@v2.14.0
29 | ```
30 |
31 | - Get your Kubernetes Cluster ready and make sure you can run
32 | kubectl from your development machine.
33 | Note down the path to the `kubeconfig` file used by kubectl
34 | to access your cluster. Example: /home/\/.kube/config
35 |
36 | - Set the KUBECONFIG environment variable on your
37 | development machine to point to the kubeconfig file.
38 | Example: `export KUBECONFIG=$HOME/.kube/config`
39 |
40 | If you do not set this ENV, you will have to pass the file
41 | to the Ginkgo CLI (see below)
42 |
43 | - The tests should not be run in parallel as it may lead to
44 | unavailability of blockdevices for some of the tests.
45 |
46 | - Install required OpenEBS LocalPV Provisioner components
47 | Example: `kubectl apply -f https://openebs.github.io/charts/openebs-operator-lite.yaml`
48 |
49 | ### Run tests
50 |
51 | Run the tests by being in the localpv tests folder.
52 | >**Note:** The tests require privileges to create loop devices and to create
53 | directories in the '/var' directory.
54 |
55 | ```bash
56 | $ cd /tests
57 | $ sudo -E env "PATH=$PATH" ginkgo -v
58 | ```
59 | In case the KUBECONFIG env is not configured, you can run:
60 | ```bash
61 | $ sudo -E env "PATH=$PATH" ginkgo -v -kubeconfig=/path/to/kubeconfig
62 | ```
63 |
64 | If your OpenEBS LocalPV components are in a different Kubernetes namespace than 'openebs', you may use the '-openebs-namespace' flag:
65 | ```bash
66 | $ sudo -E env "PATH=$PATH" ginkgo -v -openebs-namespace=
67 | ```
68 |
69 | >**Tip:** Raising a pull request to this repo's 'develop' branch (or any one of the release branches) will automatically run the BDD tests in GitHub Actions. You can verify your code changes by moving to the 'Checks' tab in your pull request page, and checking the results of the 'integration-test' check.
70 |
--------------------------------------------------------------------------------
/docs/installation/platforms/microk8s.md:
--------------------------------------------------------------------------------
1 | # Install Dynamic-LocalPV-Provisioner on MicroK8s
2 |
3 | The Dynamic-LocalPV-Provisioner may be installed into a MicroK8s cluster in ANY ONE of the following ways:
4 |
5 | ## Using the OpenEBS Addon
6 |
7 | MicroK8s (v1.21 onwards) ships with an OpenEBS Addon which deploys LocalPV, cStor and Jiva storage engine control-plane components. Enable the addon using the following command:
8 |
9 | ```console
10 | microk8s enable openebs
11 | ```
12 |
13 | Once installation succeeds, you may verify the creation of the Dynamic-LocalPV-Provisioner components using the following commands:
14 |
15 | ```console
16 | microk8s kubectl get pods -n openebs
17 | microk8s kubectl get storageclass
18 | ```
19 |
20 | ## Using the OpenEBS Helm Chart
21 |
22 | Using the helm chart directly let's you cuztomize your Dynamic-LocalPV-Provisioner deployment ([Helm chart README](https://github.com/openebs/charts/blob/develop/charts/openebs/README.md)). You will need to use the Helm3 MicroK8s Addon for this.
23 |
24 | ```console
25 | microk8s enable helm3
26 | ```
27 |
28 | Add the openebs helm chart repo
29 |
30 | ```console
31 | microk8s helm3 repo add openebs https://openebs.github.io/charts
32 | microk8s helm3 repo update
33 | ```
34 |
35 | Install the helm chart.
36 |
37 | ```console
38 | $ #Default installation command. This sets the default directories under '/var/snap/microk8s/common'
39 | $ microk8s helm3 install openebs openebs/openebs -n openebs --create-namespace \
40 | --set localprovisioner.basePath="/var/snap/microk8s/common/var/openebs/local"
41 | --set ndm.sparse.path="/var/snap/microk8s/common/var/openebs/sparse"
42 | --set varDirectoryPath.baseDir="/var/snap/microk8s/common/var/openebs"
43 | ```
44 |
45 | Once installation succeeds, you may verify the creation of the Dynamic-LocalPV-Provisioner components using the following commands:
46 |
47 | ```console
48 | microk8s kubectl get pods -n openebs
49 | microk8s kubectl get storageclass
50 | ```
51 |
52 | ## Using Operator YAML
53 |
54 | You may install Dynamic-LocalPV-Provisioner using the openebs-operator-lite.yaml and openebs-lite-sc.yaml files as well. Use the following commands to install using the Operator YAMLs, while creating the default directories under '/var/snap/microk8s/common'
55 |
56 | ```console
57 | #Apply openebs-operator-lite.yaml
58 | curl -fSsL https://openebs.github.io/charts/openebs-operator-lite.yaml | sed 's|\(/var/openebs\)|/var/snap/microk8s/common\1|g' | kubectl apply -f -
59 | #Apply openebs-lite-sc.yaml
60 | curl -fSsL https://openebs.github.io/charts/openebs-lite-sc.yaml | sed 's|\(/var/openebs\)|/var/snap/microk8s/common\1|g' | kubectl apply -f -
61 | ```
62 |
63 | Once installation succeeds, you may verify the creation of the Dynamic-LocalPV-Provisioner components using the following commands:
64 |
65 | ```console
66 | microk8s kubectl get pods -n openebs
67 | microk8s kubectl get storageclass
68 | ```
69 |
70 | For instructions on using the StorageClasses and creating volumes, refer to the [quickstart](https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md).
71 |
--------------------------------------------------------------------------------
/scripts/validate-chart-version.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Write output to error output stream.
4 | echo_stderr() {
5 | echo -e "${1}" >&2
6 | }
7 |
8 | die()
9 | {
10 | local _return="${2:-1}"
11 | echo_stderr "$1"
12 | exit "${_return}"
13 | }
14 |
15 | set -euo pipefail
16 |
17 | # Set the path to the Chart.yaml file
18 | SCRIPT_DIR="$(dirname "$(realpath "${BASH_SOURCE[0]:-"$0"}")")"
19 | ROOT_DIR="$SCRIPT_DIR/.."
20 | CHART_DIR="$ROOT_DIR/deploy/helm/charts"
21 | CHART_YAML="$CHART_DIR/Chart.yaml"
22 |
23 | # Check if the Chart.yaml file exists
24 | if [ ! -f "$CHART_YAML" ]; then
25 | die "Chart.yaml file not found in $CHART_YAML"
26 | fi
27 |
28 | # Extract the chart version and app version using yq
29 | CHART_VERSION=$(yq e '.version' "$CHART_YAML")
30 | APP_VERSION=$(yq e '.appVersion' "$CHART_YAML")
31 |
32 | # Check if extraction was successful
33 | if [ -z "$CHART_VERSION" ] || [ -z "$APP_VERSION" ]; then
34 | die "Failed to extract versions from Chart.yaml"
35 | fi
36 |
37 | # Print the extracted versions
38 | echo "Chart Version: $CHART_VERSION"
39 | echo "App Version: $APP_VERSION"
40 |
41 | # Validate that the versions are valid semver
42 | if [ "$(semver validate "$CHART_VERSION")" != "valid" ]; then
43 | die "Invalid chart version: $CHART_VERSION"
44 | fi
45 |
46 | if [ "$(semver validate "$APP_VERSION")" != "valid" ]; then
47 | die "Invalid app version: $APP_VERSION"
48 | fi
49 |
50 | help() {
51 | cat < Name of the branch on which this workflow is running.
56 |
57 | Examples:
58 | $(basename "$0") --branch develop
59 | EOF
60 | }
61 |
62 | # Parse arguments
63 | while [ "$#" -gt 0 ]; do
64 | case $1 in
65 | -b|--branch)
66 | BRANCH_NAME=$2
67 | shift
68 | ;;
69 | -h|--help)
70 | help
71 | exit 0
72 | ;;
73 | *)
74 | help
75 | die "Unknown option: $1"
76 | ;;
77 | esac
78 | shift
79 | done
80 |
81 | # Extract major and minor version from the branch name
82 | extract_major_minor() {
83 | echo "$1" | awk -F/ '{print $2}'
84 | }
85 |
86 | if [[ "$BRANCH_NAME" == "develop" ]]; then
87 | if [[ "$CHART_VERSION" != *"-develop" ]]; then
88 | die "Chart version must include '-develop' for develop branch"
89 | fi
90 | if [[ "$APP_VERSION" != *"-develop" ]]; then
91 | die "App version must include '-develop' for develop branch"
92 | fi
93 | elif [[ "$BRANCH_NAME" =~ ^(release/[0-9]+\.[0-9]+)$ ]]; then
94 | RELEASE_VERSION=$(extract_major_minor "$BRANCH_NAME")
95 | if [[ "$CHART_VERSION" != "$RELEASE_VERSION."*"-prerelease" ]]; then
96 | die "Chart version must be in format $RELEASE_VERSION.X-prerelease for release branch"
97 | fi
98 | if [[ "$APP_VERSION" != "$RELEASE_VERSION."*"-prerelease" ]]; then
99 | die "App version must be in format $RELEASE_VERSION.X-prerelease for release branch"
100 | fi
101 | else
102 | die "Unknown branch name: $BRANCH_NAME"
103 | fi
104 |
105 |
--------------------------------------------------------------------------------
/e2e-tests/hack/crds.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | annotations:
6 | controller-gen.kubebuilder.io/version: v0.4.0
7 | creationTimestamp: null
8 | name: e2eresults.e2e.io
9 | spec:
10 | group: e2e.io
11 | names:
12 | kind: E2eResult
13 | listKind: E2eResultList
14 | plural: e2eresults
15 | shortNames:
16 | - e2er
17 | singular: e2eresult
18 | scope: Cluster
19 | versions:
20 | - name: v1alpha1
21 | schema:
22 | openAPIV3Schema:
23 | description: E2eResult represents an e2e result
24 | properties:
25 | apiVersion:
26 | description: 'APIVersion defines the versioned schema of this representation
27 | of an object. Servers should convert recognized schemas to the latest
28 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
29 | type: string
30 | kind:
31 | description: 'Kind is a string value representing the REST resource this
32 | object represents. Servers may infer this from the endpoint the client
33 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
34 | type: string
35 | metadata:
36 | type: object
37 | spec:
38 | description: Spec i.e. specifications of the E2eResult
39 | properties:
40 | testMetadata:
41 | description: TestMetadata holds information on the testcase
42 | properties:
43 | app:
44 | description: App is the name of the application running
45 | nullable: true
46 | type: string
47 | chaostype:
48 | description: Chaostype is the type of test case
49 | nullable: true
50 | type: string
51 | type: object
52 | testStatus:
53 | description: TestStatus holds the state of testcase, manually updated
54 | by json merge patch result is the useful value today, but anticipate
55 | phase use in future
56 | properties:
57 | phase:
58 | description: Phase tells whether the test case is running or completed
59 | nullable: true
60 | type: string
61 | result:
62 | description: Result tells whether the test case passed or failed
63 | nullable: true
64 | type: string
65 | type: object
66 | type: object
67 | status:
68 | description: Status of E2eResult
69 | type: object
70 | type: object
71 | served: true
72 | storage: true
73 | status:
74 | acceptedNames:
75 | kind: ""
76 | plural: ""
77 | conditions: []
78 | storedVersions: []
79 |
--------------------------------------------------------------------------------
/docs/tutorials/backup_and_restore/velero/minio.md:
--------------------------------------------------------------------------------
1 | # Deploy single-server MinIO with Hostpath LocalPV storage
2 |
3 | Follow the instructions below to create a minimal MinIO deployment on a Kubernetes cluster. This setup creates a single MinIO server with 4 volumes, and is not recommended for use in production environments.
4 |
5 | **Prerequisites:**
6 | 1. Kubernetes v1.19 or above
7 | 2. kubectl CLI
8 |
9 | ## Step 1: Create namespace
10 |
11 | We will create the MinIO Tenant in the namespace which we will use for Velero.
12 |
13 | ```console
14 | $ kubectl create namespace velero
15 | ```
16 |
17 | ## Step 2: Create MinIO Operator and MinIO Tenant using Helm Chart
18 |
19 | We will use the MinIO Helm chart to create the MinIO Operator and also a single-server MinIO tenant. We will use 'minio' and 'minio123' as the default Access Key and Secret Key respectively. This is a minimal install, refer to the [MinIO helm chart documentation](https://github.com/minio/operator/blob/master/helm/minio-operator/README.md) for more options.
20 |
21 | ```console
22 | $ #helm repo add minio https://operator.min.io/
23 | $ #helm repo update
24 | $ helm install minio-operator minio/minio-operator \
25 | --namespace minio-operator \
26 | --create-namespace \
27 | --set tenants[0].name="openebs-backup-minio" \
28 | --set tenants[0].namespace="velero" \
29 | --set tenants[0].certificate.requestAutoCert=false \
30 | --set tenants[0].pools[0].servers=1 \
31 | --set tenants[0].pools[0].volumesPerServer=4 \
32 | --set tenants[0].pools[0].size=10Gi \
33 | --set tenants[0].pools[0].storageClassName="openebs-hostpath" \
34 | --set tenants[0].secrets.enabled=true \
35 | --set tenants[0].secrets.name="openebs-backup-minio-creds-secret" \
36 | --set tenants[0].secrets.accessKey="minio" \
37 | --set tenants[0].secrets.secretKey="minio123"
38 | ```
39 |
40 | Verify if the MinIO Operator Pod and the MinIO Console Pod got created.
41 | ```console
42 | $ kubectl get pods -n minio-operator
43 |
44 | NAME READY STATUS RESTARTS AGE
45 | minio-operator-67bc7fc5d6-sb2wq 1/1 Running 0 120m
46 | minio-operator-console-59db5db85-k2qbw 1/1 Running 0 120m
47 | ```
48 |
49 | Verify the status of the Secrets, Tenant objects and Pods in the 'velero' namespace.
50 | ```console
51 | $ kubectl get tenants,secrets,pods -n velero
52 |
53 | NAME STATE AGE
54 | tenant.minio.min.io/openebs-backup-minio Initialized 6h47m
55 |
56 | NAME TYPE DATA AGE
57 | secret/default-token-kqlr8 kubernetes.io/service-account-token 3 6h49m
58 | secret/openebs-backup-minio-creds-secret Opaque 2 6h41m
59 | secret/operator-tls Opaque 1 6h41m
60 | secret/operator-webhook-secret Opaque 3 6h41m
61 |
62 | NAME READY STATUS RESTARTS AGE
63 | pod/openebs-backup-minio-ss-0-0 1/1 Running 0 6h40m
64 | ```
65 |
66 | You can use the 'mc' MinIO client to create buckets in the MinIO object store.
67 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/functional/localpv-provisioning-selected-device/README.md:
--------------------------------------------------------------------------------
1 | ## Experiment Metadata
2 |
3 | | Type | Description | Storage | Applications | K8s Platform |
4 | | ---------- | ------------------------------------------------------------ | ------- | ------------ | ------------ |
5 | | Functional | Ensure that local PV can be provisioned on selected block device | OpenEBS | Any | Any |
6 |
7 | ## Entry-Criteria
8 |
9 | - K8s nodes should be ready.
10 | - OpenEBS should be running.
11 | - Unclaimed block device should be available
12 |
13 | ## Exit-Criteria
14 |
15 | - Volume should be created on the labelled block device
16 |
17 | ## Procedure
18 |
19 | - This functional test checks if the local pv can be provisioned on the selected block device.
20 |
21 | - This e2ebook accepts the parameters in form of job environmental variables and accordingly this test case can be run for two different test case type i.e. positive test case and negative test case.
22 |
23 | For positive test case first we label the block devices and then provision the volume and verify the successful provisioning of the volume. For the negative test case first we provision the volume but claim for the persistent volume should be in pending state, and then we label the block device and verify the successful reconcilation of provisioning the volume is done. And later in both the cases we verify that block device is selected from only the list of tagged block devices.
24 |
25 | 1. Certain block device will be labelled with `openebs.io/block-device-tag=< tag-value >`
26 |
27 | 2. The `< tag-value >` can be passed to Local PV storage class via cas annotations. If the value is present, then Local PV device provisioner will set the following additional selector on the BDC:
28 | `openebs.io/block-device-tag=< tag-value >`
29 |
30 | - The storage class spec will be built as follows:
31 |
32 | ```yaml
33 | apiVersion: storage.k8s.io/v1
34 | kind: StorageClass
35 | metadata:
36 | name: openebs-device-mongo
37 | annotations:
38 | openebs.io/cas-type: local
39 | cas.openebs.io/config: |
40 | - name: StorageType
41 | value: "device"
42 | - name: BlockDeviceSelectors
43 | data:
44 | openebs.io/block-device-tag: "{{ device_tag }}"
45 | provisioner: openebs.io/local
46 | volumeBindingMode: WaitForFirstConsumer
47 | reclaimPolicy: Delete
48 | ```
49 |
50 | - Upon using the above storage class, the PV should be provisioned on the tagged block device
51 |
52 | - Finally, checking if the tagged BD alone is used by BDC being part of the volume.
53 |
54 | ## e2ebook Environment Variables
55 |
56 | | Parameters | Description |
57 | | ------------- | ------------------------------------------------------------ |
58 | | APP_NAMESPACE | Namespace where application and volume is deployed. |
59 | | PVC | Name of PVC to be created |
60 | | OPERATOR_NS | Namespace where OpenEBS is running |
61 | | BD_TAG | The label value to be used by the key `openebs.io/block-device-tag=< tag-value >` |
62 | | TEST_CASE_TYPE| Run the test for `positive` or `negative` cases |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/liveness/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: False
5 |
6 | vars_files:
7 | - vars.yml
8 |
9 | tasks:
10 | - block:
11 | - block:
12 |
13 | - name: Record test instance/run ID
14 | set_fact:
15 | run_id: "{{ lookup('env','RUN_ID') }}"
16 |
17 | - name: Construct testname appended with runID
18 | set_fact:
19 | test_name: "{{ test_name }}-{{ run_id }}"
20 |
21 | when: lookup('env','RUN_ID')
22 |
23 | ## RECORD START-OF-TEST IN e2e RESULT CR
24 | - include_tasks: /e2e-tests/utils/fcm/update_e2e_result_resource.yml
25 | vars:
26 | status: 'SOT'
27 |
28 | - block:
29 |
30 | - name: Getting the application pod name
31 | shell: kubectl get pod -n {{ namespace }} -l {{ app_label }} -o jsonpath={.items[0].metadata.name}
32 | register: pod_name
33 |
34 | - name: Replacing the placeholder for pod-name
35 | replace:
36 | path: "{{ busybox_liveness }}"
37 | regexp: "pod-name"
38 | replace: "{{ pod_name.stdout }}"
39 |
40 | - name: Replacing the placeholder for namespace
41 | replace:
42 | path: "{{ busybox_liveness }}"
43 | regexp: "app-namespace"
44 | replace: "{{ namespace }}"
45 |
46 | - name: Replacing the placeholder for liveness-retry-count
47 | replace:
48 | path: "{{ busybox_liveness }}"
49 | regexp: "liveness-retry-count"
50 | replace: "{{ liveness_retry }}"
51 |
52 | - name: Replacing the placeholder for liveness-timeout
53 | replace:
54 | path: "{{ busybox_liveness }}"
55 | regexp: "liveness-timeout-seconds"
56 | replace: "{{ liveness_timeout }}"
57 |
58 | - name: Creating busybox-liveness job
59 | shell: kubectl create -f {{ busybox_liveness }}
60 |
61 | - name: Verifying whether liveness pod is started successfully
62 | shell: kubectl get pod -n {{ namespace }} -l liveness=busybox-liveness -o jsonpath={.items[0].status.phase}
63 | register: pod_status
64 | until: "'Running' in pod_status.stdout"
65 | delay: 60
66 | retries: 20
67 |
68 | - set_fact:
69 | flag: "Pass"
70 |
71 | when: "'deprovision' not in action"
72 |
73 | - block:
74 | - name: Getting the busybox liveness job
75 | shell: kubectl get job -l liveness=busybox-liveness -n {{ namespace }} -o jsonpath='{.items[0].metadata.name}'
76 | register: liveness_job
77 |
78 | - name: Deleting busybox liveness job
79 | shell: kubectl delete job {{ liveness_job.stdout }} -n {{ namespace }}
80 |
81 | - set_fact:
82 | flag: "Pass"
83 |
84 | when: "'deprovision' is in action"
85 |
86 |
87 | rescue:
88 | - set_fact:
89 | flag: "Fail"
90 |
91 | always:
92 |
93 | ## RECORD END-OF-TEST IN e2e RESULT CR
94 | - include_tasks: /e2e-tests/utils/fcm/update_e2e_result_resource.yml
95 | vars:
96 | status: 'EOT'
97 |
98 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/chaos/app_pod_failure/README.md:
--------------------------------------------------------------------------------
1 | ## Experiment Metadata
2 |
3 | | Type | Description | Storage | Applications | K8s Platform |
4 | | ----- | ------------------------ | ------- | ------------- | ------------ |
5 | | Chaos | Fail the application pod | OpenEBS | Percona MySQL | Any |
6 |
7 | ## Entry-Criteria
8 |
9 | - Application services are accessible & pods are healthy
10 | - Application writes are successful
11 |
12 | ## Exit-Criteria
13 |
14 | - Application services are accessible & pods are healthy
15 | - Data written prior to chaos is successfully retrieved/read
16 | - Database consistency is maintained as per db integrity check utils
17 | - Storage target pods are healthy
18 |
19 | ## Notes
20 |
21 | - Typically used as a disruptive test, to cause loss of access to storage by failing the application pod.
22 | - Tests Recovery workflow of the application pod.
23 |
24 | ## Associated Utils
25 |
26 | - `pod_failure_by_sigkill.yaml`
27 |
28 | ## e2ebook Environment Variables
29 |
30 | ### Application
31 |
32 | | Parameter | Description |
33 | | ------------- | ------------------------------------------------------------ |
34 | | APP_NAMESPACE | Namespace in which application pods are deployed |
35 | | APP_LABEL | Unique Labels in `key=value` format of application deployment |
36 |
37 | ### Health Checks
38 |
39 | | Parameter | Description |
40 | | ---------------------- | ------------------------------------------------------------ |
41 | | LIVENESS_APP_NAMESPACE | Namespace in which external liveness pods are deployed, if any |
42 | | LIVENESS_APP_LABEL | Unique Labels in `key=value` format for external liveness pod, if any |
43 | | DATA_PERSISTENCE | Specify the application name against which data consistency has to be ensured. Example: busybox,mysql |
44 |
45 |
46 | ## Procedure
47 |
48 | This experiment kills the application container and verifies if the container is scheduled back and the data is intact. Based on CRI used, uses the relevant util to kill the application container.
49 |
50 | After injecting the chaos into the component specified via environmental variable, e2e experiment observes the behaviour of corresponding OpenEBS PV and the application which consumes the volume.
51 |
52 | ### Data consistency check
53 |
54 | Based on the value of env DATA_PERSISTENCE, the corresponding data consistency util will be executed. At present only busybox and percona-mysql are supported. Along with specifying env in the e2e experiment, user needs to pass name for configmap and the data consistency specific parameters required via configmap in the format as follows:
55 |
56 | parameters.yml: |
57 | blocksize: 4k
58 | blockcount: 1024
59 | testfile: difiletest
60 | It is recommended to pass test-name for configmap and mount the corresponding configmap as volume in the e2e pod. The above snippet holds the parameters required for validation data consistency in busybox application.
61 |
62 | For percona-mysql, the following parameters are to be injected into configmap.
63 |
64 | parameters.yml: |
65 | dbuser: root
66 | dbpassword: k8sDem0
67 | dbname: tdb
68 | The configmap data will be utilised by e2e experiments as its variables while executing the scenario.
69 |
70 | Based on the data provided, e2e checks if the data is consistent after recovering from induced chaos.
71 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/chaos/local_pv_disk_reuse/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: False
5 |
6 | vars_files:
7 | - test_vars.yml
8 |
9 | tasks:
10 | - block:
11 |
12 | ## Generating the testname for deployment
13 | - include_tasks: /e2e-tests/utils/fcm/create_testname.yml
14 |
15 | ## RECORD START-OF-TEST IN e2e RESULT CR
16 | - include_tasks: /e2e-tests/utils/fcm/update_e2e_result_resource.yml
17 | vars:
18 | status: 'SOT'
19 |
20 | - name: Obtain the Node name
21 | shell: kubectl get nodes --no-headers -o custom-columns=:.metadata.name | grep -v master
22 | args:
23 | executable: /bin/bash
24 | register: node_name
25 |
26 | - name: check if the unclaimed bd available in all nodes
27 | shell: >
28 | kubectl get bd -n {{ operator_ns }} -l kubernetes.io/hostname={{ item }} -o custom-columns=:.status.claimState --no-headers
29 | args:
30 | executable: /bin/bash
31 | register: bd_status
32 | until: "'Unclaimed' in bd_status.stdout"
33 | delay: 5
34 | retries: 6
35 | with_items:
36 | - "{{ node_name.stdout_lines }}"
37 |
38 | ## Creating namespaces and making the application for deployment
39 | - include_tasks: /e2e-tests/utils/k8s/pre_create_app_deploy.yml
40 | vars:
41 | application_deployment: "{{ application_statefulset }}"
42 |
43 | - name: Replace the volume capcity placeholder with provider
44 | replace:
45 | path: "{{ application_statefulset }}"
46 | regexp: "teststorage"
47 | replace: "{{ lookup('env','PV_CAPACITY') }}"
48 |
49 | ## Deploying the application
50 | - include_tasks: /e2e-tests/utils/k8s/deploy_single_app.yml
51 | vars:
52 | application_deployment: "{{ application_statefulset }}"
53 | check_app_pod: 'no'
54 | delay: 10
55 | retries: 20
56 |
57 | - name: Deprovisioning the Application
58 | include_tasks: /e2e-tests/utils/k8s/deprovision_statefulset.yml
59 | vars:
60 | app_deployer: "{{ application_statefulset }}"
61 |
62 | ## Creating namespaces and making the application for deployment
63 | - include_tasks: /e2e-tests/utils/k8s/pre_create_app_deploy.yml
64 | vars:
65 | application_deployment: "{{ application_statefulset }}"
66 |
67 | - name: Replace the volume capcity placeholder with provider
68 | replace:
69 | path: "{{ application_statefulset }}"
70 | regexp: "teststorage"
71 | replace: "{{ lookup('env','PV_CAPACITY') }}"
72 |
73 | ## Deploying the application
74 | - include_tasks: /e2e-tests/utils/k8s/deploy_single_app.yml
75 | vars:
76 | application_deployment: "{{ application_statefulset }}"
77 | check_app_pod: 'no'
78 | delay: 10
79 | retries: 20
80 |
81 | - set_fact:
82 | flag: "Pass"
83 |
84 | rescue:
85 | - name: Setting fail flag
86 | set_fact:
87 | flag: "Fail"
88 |
89 | always:
90 | ## RECORD END-OF-TEST IN e2e RESULT CR
91 | - include_tasks: /e2e-tests/utils/fcm/update_e2e_result_resource.yml
92 | vars:
93 | status: 'EOT'
94 |
--------------------------------------------------------------------------------
/e2e-tests/apps/busybox/deployers/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: False
5 |
6 | vars_files:
7 | - test_vars.yml
8 |
9 | tasks:
10 | - block:
11 |
12 | ## Generating the testname for deployment
13 | - include_tasks: /e2e-tests/utils/fcm/create_testname.yml
14 |
15 | ## RECORD START-OF-TEST IN e2e RESULT CR
16 | - include_tasks: /e2e-tests/utils/fcm/update_e2e_result_resource.yml
17 | vars:
18 | status: 'SOT'
19 |
20 | - block:
21 | - block:
22 | ## Creating namespaces and making the application for deployment
23 | - include_tasks: /e2e-tests/utils/k8s/pre_create_app_deploy.yml
24 | vars:
25 | application_deployment: "{{ application_statefulset }}"
26 |
27 | - name: Replace the volume capcity placeholder with provider
28 | replace:
29 | path: "{{ application_statefulset }}"
30 | regexp: "teststorage"
31 | replace: "{{ lookup('env','PV_CAPACITY') }}"
32 |
33 | ## Deploying the application
34 | - include_tasks: /e2e-tests/utils/k8s/deploy_single_app.yml
35 | vars:
36 | application_deployment: "{{ application_statefulset }}"
37 | check_app_pod: 'no'
38 | delay: 10
39 | retries: 20
40 | when: "'deprovision' not in action"
41 |
42 | - name: Deprovisioning the Application
43 | include_tasks: /e2e-tests/utils/k8s/deprovision_statefulset.yml
44 | vars:
45 | app_deployer: "{{ application_statefulset }}"
46 | when: "'deprovision' is in action"
47 |
48 | when: lookup('env','DEPLOY_TYPE') == 'statefulset'
49 |
50 | - block:
51 | - block:
52 | ## Creating namespaces and making the application for deployment
53 | - include_tasks: /e2e-tests/utils/k8s/pre_create_app_deploy.yml
54 |
55 | - name: Replace the volume capcity placeholder with provider
56 | replace:
57 | path: "{{ application_deployment }}"
58 | regexp: "teststorage"
59 | replace: "{{ lookup('env','PV_CAPACITY') }}"
60 |
61 | ## Deploying the application
62 | - include_tasks: /e2e-tests/utils/k8s/deploy_single_app.yml
63 | vars:
64 | check_app_pod: 'yes'
65 | delay: 10
66 | retries: 20
67 | when: "'deprovision' not in action"
68 |
69 | - name: Deprovisioning the Application
70 | include_tasks: "/e2e-tests/utils/k8s/deprovision_deployment.yml"
71 | vars:
72 | app_deployer: "{{ application_deployment }}"
73 | when: "'deprovision' is in action"
74 |
75 | when: lookup('env','DEPLOY_TYPE') == 'deployment'
76 |
77 | - name: Setting pass flag
78 | set_fact:
79 | flag: "Pass"
80 |
81 | rescue:
82 | - name: Setting fail flag
83 | set_fact:
84 | flag: "Fail"
85 |
86 | always:
87 | ## RECORD END-OF-TEST IN e2e RESULT CR
88 | - include_tasks: /e2e-tests/utils/fcm/update_e2e_result_resource.yml
89 | vars:
90 | status: 'EOT'
91 |
--------------------------------------------------------------------------------
/docs/tutorials/backup_and_restore/backup.md:
--------------------------------------------------------------------------------
1 | # Backups using Velero and Restic
2 |
3 | You can create backups of Hostpath LocalPV volumes using Velero and Restic. Follow the steps below:
4 |
5 | ## Step 1: Prepare object-storage
6 |
7 | You will need an object-store to store your volume backups (Velero remote backup). You may use an AWS S3 bucket, a GCP storage bucket, or a MinIO instance for this.
8 |
9 | In this guide, we'll use a minimal MinIO instance installation. [Click here](./velero/minio.md) for instructions on setting up your own minimal single-server MinIO.
10 |
11 | > **Note**: Refer to the [official MinIO documentation](https://docs.min.io/docs/) for up-to-date instructions on setting up MinIO.
12 |
13 | ## Step 2: Install Velero with Restic
14 |
15 | You will need Velero with Restic to create backups. In this guide, we'll use the above MinIO as out default backup storage location. [Click here](./velero/velero_with_restic.md) for instructions on setting up Velero with Restic and creating a backupstoragelocation object.
16 |
17 | > **Note**: Refer to the [official Velero documentation](https://velero.io/docs/v1.6/restic/#setup-restic) for up-to-date instructions on setting up Velero with Restic.
18 |
19 | ## Step 3: Create backup
20 |
21 | We will 'exec' into the Velero container's shell and run the following commands.
22 |
23 | Get the Pod name for the Velero Pod running in 'velero' namespace.
24 |
25 | ```console
26 | $ kubectl -n velero get pods
27 |
28 | NAME READY STATUS RESTARTS AGE
29 | openebs-backup-minio-ss-0-0 1/1 Running 0 7h23m
30 | restic-2xwsf 1/1 Running 0 7h12m
31 | velero-7dd57b857-2gd25 1/1 Running 0 7h12m
32 | ```
33 |
34 | 'Exec' into the Pod's velero container.
35 |
36 | ```console
37 | kubectl -n velero exec -it velero-7dd57b857-2gd25 -c velero -- /bin/bash
38 | ```
39 |
40 | Verify if the following command lists the backup-location 'default' as 'Available'.
41 |
42 | ```console
43 | $ ./velero backup-location get
44 |
45 | NAME PROVIDER BUCKET/PREFIX PHASE LAST VALIDATED ACCESS MODE DEFAULT
46 | default aws velero Available 2021-09-04 01:05:06 +0000 UTC ReadWrite true
47 | ```
48 |
49 | Create a backup. We will use the `--default-volumes-to-restic` to use the Restic plugin for volumes. Use the `--wait` flag to wait for the backup to complete or fail before the command returns.
50 |
51 | ```console
52 | $ ./velero create backup my-localpv-backup --include-namespaces --default-volumes-to-restic --wait
53 |
54 | Backup request "my-localpv-backup" submitted successfully.
55 | Waiting for backup to complete. You may safely press ctrl-c to stop waiting - your backup will continue in the background.
56 | ....
57 | Backup completed with status: Completed. You may check for more information using the commands `velero backup describe my-localpv-backup` and `velero backup logs my-localpv-backup`.
58 | ```
59 |
60 | Verify the status of the backup using the following command...
61 |
62 | ```console
63 | $ ./velero backup get
64 | NAME STATUS ERRORS WARNINGS CREATED EXPIRES STORAGE LOCATION SELECTOR
65 | my-localpv-backup Completed 0 0 2021-09-04 01:13:36 +0000 UTC 29d default
66 | ```
67 |
68 | ```console
69 | exit
70 | ```
71 |
72 | For more information on using Velero, refer to the Velero documentation at [velero.io/docs](https://velero.io/docs).
73 |
--------------------------------------------------------------------------------
/pkg/kubernetes/api/core/v1/persistentvolumeclaim/persistentvolumeclaim.go:
--------------------------------------------------------------------------------
1 | // Copyright © 2018-2020 The OpenEBS Authors
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | package persistentvolumeclaim
16 |
17 | import (
18 | "strings"
19 |
20 | corev1 "k8s.io/api/core/v1"
21 | )
22 |
23 | // PVC is a wrapper over persistentvolumeclaim api
24 | // object. It provides build, validations and other common
25 | // logic to be used by various feature specific callers.
26 | type PVC struct {
27 | object *corev1.PersistentVolumeClaim
28 | }
29 |
30 | // PVCList is a wrapper over persistentvolumeclaim api
31 | // object. It provides build, validations and other common
32 | // logic to be used by various feature specific callers.
33 | type PVCList struct {
34 | items []*PVC
35 | }
36 |
37 | // Len returns the number of items present
38 | // in the PVCList
39 | func (p *PVCList) Len() int {
40 | return len(p.items)
41 | }
42 |
43 | // ToAPIList converts PVCList to API PVCList
44 | func (p *PVCList) ToAPIList() *corev1.PersistentVolumeClaimList {
45 | plist := &corev1.PersistentVolumeClaimList{}
46 | for _, pvc := range p.items {
47 | plist.Items = append(plist.Items, *pvc.object)
48 | }
49 | return plist
50 | }
51 |
52 | type pvcBuildOption func(*PVC)
53 |
54 | // NewForAPIObject returns a new instance of PVC
55 | func NewForAPIObject(obj *corev1.PersistentVolumeClaim, opts ...pvcBuildOption) *PVC {
56 | p := &PVC{object: obj}
57 | for _, o := range opts {
58 | o(p)
59 | }
60 | return p
61 | }
62 |
63 | // Predicate defines an abstraction
64 | // to determine conditional checks
65 | // against the provided pvc instance
66 | type Predicate func(*PVC) bool
67 |
68 | // IsBound returns true if the pvc is bounded
69 | func (p *PVC) IsBound() bool {
70 | return p.object.Status.Phase == corev1.ClaimBound
71 | }
72 |
73 | // IsBound is a predicate to filter out pvcs
74 | // which is bounded
75 | func IsBound() Predicate {
76 | return func(p *PVC) bool {
77 | return p.IsBound()
78 | }
79 | }
80 |
81 | // IsNil returns true if the PVC instance
82 | // is nil
83 | func (p *PVC) IsNil() bool {
84 | return p.object == nil
85 | }
86 |
87 | // IsNil is predicate to filter out nil PVC
88 | // instances
89 | func IsNil() Predicate {
90 | return func(p *PVC) bool {
91 | return p.IsNil()
92 | }
93 | }
94 |
95 | // ContainsName is filter function to filter pvc's
96 | // based on the name
97 | func ContainsName(name string) Predicate {
98 | return func(p *PVC) bool {
99 | return strings.Contains(p.object.GetName(), name)
100 | }
101 | }
102 |
103 | // PredicateList holds a list of predicate
104 | type PredicateList []Predicate
105 |
106 | // all returns true if all the predicates
107 | // succeed against the provided pvc
108 | // instance
109 | func (l PredicateList) all(p *PVC) bool {
110 | for _, pred := range l {
111 | if !pred(p) {
112 | return false
113 | }
114 | }
115 | return true
116 | }
117 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/deprovision_statefulset.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # This Utility task file can delete the application and its underlying resources such as pvc and service from K8s cluster
4 | # This accepts application namespace, application label and application manifest file as input parameters.
5 | # The parameters used are
6 | # - app_deployer ( Deployment yaml file )
7 | # - app_ns ( application namespace )
8 | # - app_label ( application label)
9 | #
10 |
11 | - block:
12 |
13 | - name: Check if the statefulset application exists.
14 | shell: kubectl get pods -n {{ app_ns }} -l {{ app_label }}
15 | register: pods
16 | failed_when: "'No resources found' in pods.stdout"
17 |
18 | - name: Obtaining PVCs related to the application.
19 | shell: kubectl get pvc -n {{ app_ns }} -l {{ app_label }} --no-headers -o custom-columns=:.metadata.name
20 | register: pvc_list
21 |
22 | - name: Obtaining the PV names.
23 | shell: kubectl get pvc -l {{ app_label }} -n {{ app_ns }} --no-headers -o custom-columns=:.spec.volumeName
24 | register: pv_list
25 |
26 | ## Replacing the item names in the respective deployer spec file.
27 |
28 | - name: Replace the PVC name in application deployer spec.
29 | replace:
30 | path: "{{ app_deployer }}"
31 | regexp: "testclaim"
32 | replace: "{{ lookup('env','APP_PVC') }}"
33 | when: app_pvc is defined
34 |
35 | - name: Replace the storageclass placeholder with provider
36 | replace:
37 | path: "{{ app_deployer }}"
38 | regexp: "testclass"
39 | replace: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
40 | when: storage_class is defined
41 |
42 | - block:
43 |
44 | - name: Get the application replica values from env
45 | set_fact:
46 | app_rkey: "{{ app_replica.split('=')[0] }}"
47 | app_rvalue: "{{ app_replica.split('=')[1] }}"
48 |
49 | - name: Replace the application label placeholder
50 | replace:
51 | path: "{{ app_deployer }}"
52 | regexp: "rkey: rvalue"
53 | replace: "{{ app_rkey }}: {{ app_rvalue }}"
54 |
55 | when: app_replica is defined
56 |
57 | - block:
58 |
59 | - name: Get the application label values from env
60 | set_fact:
61 | app_lkey: "{{ app_label.split('=')[0] }}"
62 | app_lvalue: "{{ app_label.split('=')[1] }}"
63 |
64 | - name: Replace the application label placeholder
65 | replace:
66 | path: "{{ app_deployer }}"
67 | regexp: "lkey: lvalue"
68 | replace: "{{ app_lkey }}: {{ app_lvalue }}"
69 |
70 | when: app_label is defined
71 |
72 | - name: Delete the application and its related service.
73 | shell: kubectl delete -f {{ app_deployer }} -n {{ app_ns }}
74 | register: app_status
75 | until: 'app_status.rc == 0'
76 | delay: 30
77 | retries: 10
78 |
79 | - name: Deleting the PVC
80 | shell: kubectl delete pvc {{ item }} -n {{ app_ns }}
81 | args:
82 | executable: /bin/bash
83 | with_items:
84 | - "{{ pvc_list.stdout_lines }}"
85 |
86 | - name: Check if the PVCs are deleted
87 | shell: kubectl get pvc -n {{ app_ns }}
88 | register: list_pvc
89 | until: "'No resources found' in list_pvc.stderr"
90 | delay: 30
91 | retries: 15
92 |
93 | - name: Delete the namespace.
94 | shell: kubectl delete ns {{ app_ns }}
95 | args:
96 | executable: /bin/bash
97 |
98 |
--------------------------------------------------------------------------------
/e2e-tests/experiments/localpv-provisioner/release_tag_provision.yml:
--------------------------------------------------------------------------------
1 | - block:
2 |
3 | - name: Downloading openebs operator yaml for rc tag
4 | get_url:
5 | url: "{{ hostpath_operator_link }}"
6 | dest: "{{ playbook_dir }}/{{ ci_device_operator }}"
7 | force: yes
8 | register: ci_operator
9 |
10 | - name: Downloading openebs operator yaml for rc tag
11 | get_url:
12 | url: "{{ ndm_operator_link }}"
13 | dest: "{{ playbook_dir }}/{{ ndm_operator }}"
14 | force: yes
15 | register: rc_operator
16 |
17 | - name: Change the OpenEBS Snapshot Controller Image
18 | replace:
19 | path: "{{ ci_device_operator }}"
20 | regexp: openebs/provisioner-localpv:ci
21 | replace: "openebs/provisioner-localpv:{{ release_tag }}"
22 |
23 | - name: Change the OpenEBS component labels to desired version in Operator yaml
24 | replace:
25 | path: "{{ ci_device_operator }}"
26 | regexp: 'openebs.io/version: dev'
27 | replace: "openebs.io/version: {{ release_tag }}"
28 |
29 | - name: Change the OpenEBS CleanUP Job Image
30 | replace:
31 | path: "{{ ci_device_operator }}"
32 | regexp: openebs/linux-utils:latest
33 | after: '- name: OPENEBS_IO_HELPER_IMAGE'
34 | replace: "openebs/linux-utils:{{ release_tag }}"
35 |
36 | - name: Change the OpenEBS component labels to desired version in Operator yaml
37 | replace:
38 | path: "{{ ndm_operator }}"
39 | regexp: 'openebs.io/version: dev'
40 | replace: "openebs.io/version: {{ release_tag }}"
41 |
42 | - name: Change the Image tag to newer version in operator yaml
43 | replace:
44 | path: "{{ ndm_operator }}"
45 | regexp: ':ci'
46 | replace: ":{{ ndm_version }}"
47 |
48 | - block:
49 | - name: Applying openebs operator
50 | shell: kubectl apply -f "{{ ci_device_operator }}"
51 | args:
52 | executable: /bin/bash
53 |
54 | - name: Applying openebs operator
55 | shell: kubectl apply -f "{{ ndm_operator }}"
56 | args:
57 | executable: /bin/bash
58 | when: lookup('env','ACTION') == "provision"
59 |
60 | - block:
61 |
62 | - name: Applying openebs operator
63 | shell: kubectl delete -f "{{ ndm_operator }}"
64 | args:
65 | executable: /bin/bash
66 | ignore_errors: true
67 |
68 | - name: Applying openebs operator
69 | shell: kubectl delete -f "{{ ci_device_operator }}"
70 | args:
71 | executable: /bin/bash
72 | ignore_errors: true
73 |
74 | when: lookup('env','ACTION') == "deprovision"
75 |
76 | when: rc_tag != ""
77 |
78 |
79 | - block:
80 |
81 | - name: Downloading openebs operator yaml for release tag
82 | get_url:
83 | url: "{{ release_operator_link }}"
84 | dest: "{{ playbook_dir }}/{{ hostpath_operator }}"
85 | force: yes
86 | register: ci_operator
87 |
88 | - name: Applying openebs operator
89 | shell: kubectl apply -f "{{ hostpath_operator }}"
90 | args:
91 | executable: /bin/bash
92 | when: lookup('env','ACTION') == "provision"
93 |
94 | - name: Applying openebs operator
95 | shell: kubectl delete -f "{{ hostpath_operator }}"
96 | args:
97 | executable: /bin/bash
98 | when: lookup('env','ACTION') == "deprovision"
99 |
100 | when: rc_tag == ""
101 |
--------------------------------------------------------------------------------
/e2e-tests/utils/scm/applications/busybox/busybox_data_persistence.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 |
4 | - name: Create some test data in the busybox app
5 | shell: >
6 | kubectl exec {{ pod_name }} -n {{ ns }}
7 | -- sh -c "{{ item }}"
8 | args:
9 | executable: /bin/bash
10 | register: result
11 | failed_when: "result.rc != 0"
12 | with_items:
13 | - "dd if=/dev/urandom of=/busybox/{{ testfile }} bs={{ blocksize }} count={{ blockcount }}"
14 | - "md5sum /busybox/{{ testfile }} > /busybox/{{ testfile }}-pre-chaos-md5"
15 | - "sync;sync;sync"
16 |
17 | when: status == "LOAD"
18 |
19 | - block:
20 |
21 | - name: Kill the application pod
22 | shell: >
23 | kubectl delete pod {{ pod_name }} -n {{ ns }}
24 | args:
25 | executable: /bin/bash
26 |
27 | - name: Verify if the application pod is deleted
28 | shell: >
29 | kubectl get pods -n {{ ns }}
30 | args:
31 | executable: /bin/bash
32 | register: podstatus
33 | until: '"{{ pod_name }}" not in podstatus.stdout'
34 | retries: 2
35 | delay: 150
36 |
37 | - name: Obtain the newly created pod name for application
38 | shell: >
39 | kubectl get pods -n {{ ns }} -l {{ label }} -o jsonpath='{.items[].metadata.name}'
40 | args:
41 | executable: /bin/bash
42 | register: newpod_name
43 |
44 | - name: Checking application pod is in running state
45 | shell: kubectl get pods -n {{ ns }} -o jsonpath='{.items[?(@.metadata.name=="{{ newpod_name.stdout }}")].status.phase}'
46 | register: result
47 | until: "((result.stdout.split()|unique)|length) == 1 and 'Running' in result.stdout"
48 | delay: 2
49 | retries: 150
50 |
51 | - name: Get the container status of application.
52 | shell: >
53 | kubectl get pods -n {{ ns }} -o jsonpath='{.items[?(@.metadata.name=="{{ newpod_name.stdout }}")].status.containerStatuses[].state}' | grep running
54 | args:
55 | executable: /bin/bash
56 | register: containerStatus
57 | until: "'running' in containerStatus.stdout"
58 | delay: 2
59 | retries: 150
60 |
61 | - name: Check the md5sum of stored data file
62 | shell: >
63 | kubectl exec {{ newpod_name.stdout }} -n {{ ns }}
64 | -- sh -c "md5sum /busybox/{{ testfile }} > /busybox/{{ testfile }}-post-chaos-md5"
65 | args:
66 | executable: /bin/bash
67 | register: status
68 | failed_when: "status.rc != 0"
69 |
70 | - name: Verify whether data is consistent
71 | shell: >
72 | kubectl exec {{ newpod_name.stdout }} -n {{ ns }}
73 | -- sh -c "diff /busybox/{{ testfile }}-pre-chaos-md5 /busybox/{{ testfile }}-post-chaos-md5"
74 | args:
75 | executable: /bin/bash
76 | register: result
77 | failed_when: "result.rc != 0 or result.stdout != ''"
78 |
79 | when: status == "VERIFY"
80 |
81 | - block:
82 |
83 | - name: Obtain the current pod name for application
84 | shell: >
85 | kubectl get pods -n {{ ns }} -l {{ label }} -o jsonpath='{.items[].metadata.name}'
86 | args:
87 | executable: /bin/bash
88 | register: newpod_name
89 |
90 | - name: Delete/drop the files
91 | shell: >
92 | kubectl exec {{ newpod_name.stdout }} -n {{ ns }}
93 | -- sh -c "rm -f /busybox/{{ testfile }}*"
94 | args:
95 | executable: /bin/bash
96 | register: status
97 |
98 | - name: Verify successful file delete
99 | shell: >
100 | kubectl exec {{ newpod_name.stdout }} -n {{ ns }}
101 | -- ls /busybox/
102 | args:
103 | executable: /bin/bash
104 | register: result
105 | failed_when: "testfile in result.stdout"
106 |
107 | when: status == "DELETE"
108 |
109 |
--------------------------------------------------------------------------------
/e2e-tests/utils/k8s/deprovision_deployment.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This Utility task file can delete the application and its underlying resources such as pvc and service from K8s cluster
3 | # This accepts application namespace, application label and application manifest file as input parameters.
4 | # The parameters used are
5 | # - app_deployer ( Deployment spec yaml file )
6 | # - app_ns ( application namespace )
7 | # - app_label ( application label)
8 | #
9 | - block:
10 |
11 | - name: Check if the application to be deleted is available.
12 | k8s_facts:
13 | kind: Pod
14 | label_selectors:
15 | - "{{ app_label }}"
16 | namespace: "{{ app_ns }}"
17 | register: po_name
18 | until: "{{ po_name | json_query('resources[*].status.phase') | unique | length==1}}"
19 | delay: 5
20 | retries: 60
21 |
22 | - name: Obtaining the PVC name using application label.
23 | set_fact:
24 | pvc_name: "{{ po_name.resources.0.spec.volumes.0.persistentVolumeClaim.claimName }}"
25 | pod_name: "{{ po_name.resources.0.metadata.name }}"
26 |
27 | - name: Obtaining the PV name from PVC name.
28 | k8s_facts:
29 | kind: PersistentVolumeClaim
30 | namespace: "{{ app_ns }}"
31 | name: "{{ pvc_name }}"
32 | register: pv_name
33 |
34 | - set_fact:
35 | pvname: "{{ pv_name | json_query('resources[0].spec.volumeName') }}"
36 |
37 | ## Replacing the item names in the respective deployer spec file.
38 | - name: Replace the PVC name in application deployer spec.
39 | replace:
40 | path: "{{ app_deployer }}"
41 | regexp: "testclaim"
42 | replace: "{{ lookup('env','APP_PVC') }}"
43 | when: app_pvc is defined
44 |
45 | - name: Replace the storageclass placeholder with provider
46 | replace:
47 | path: "{{ app_deployer }}"
48 | regexp: "testclass"
49 | replace: "{{ lookup('env','PROVIDER_STORAGE_CLASS') }}"
50 | when: storage_class is defined
51 |
52 | - block:
53 |
54 | - name: Get the application label values from env
55 | set_fact:
56 | app_lkey: "{{ app_label.split('=')[0] }}"
57 | app_lvalue: "{{ app_label.split('=')[1] }}"
58 |
59 | - name: Replace the application label placeholder
60 | replace:
61 | path: "{{ app_deployer }}"
62 | regexp: "lkey: lvalue"
63 | replace: "{{ app_lkey }}: {{ app_lvalue }}"
64 | when: app_label is defined
65 |
66 | - name: Delete the application deployment.
67 | shell: kubectl delete -f {{ app_deployer }} -n {{ app_ns }}
68 | args:
69 | executable: /bin/bash
70 | ignore_errors: true
71 |
72 | - name: Check if the PVC is deleted.
73 | k8s_facts:
74 | kind: PersistentVolumeClaim
75 | namespace: "{{ app_ns }}"
76 | label_selectors:
77 | - "{{ app_label }}"
78 | register: resource_list
79 | until: resource_list.resources | length < 1
80 | delay: 5
81 | retries: 120
82 |
83 | - name: Check if the pods are deleted in the namespaces
84 | shell: >
85 | kubectl get pods -n {{ app_ns }}
86 | args:
87 | executable: /bin/bash
88 | register: result
89 | until: "pod_name not in result.stdout"
90 | delay: 5
91 | retries: 60
92 |
93 | - name: Delete the namespace.
94 | k8s:
95 | state: absent
96 | kind: Namespace
97 | name: "{{ app_ns }}"
98 |
99 | - name: Check if the PV is deleted.
100 | k8s_facts:
101 | kind: PersistentVolume
102 | name: "{{ pvname }}"
103 | label_selectors:
104 | - "{{ app_label }}"
105 | register: pv_result
106 | failed_when: "pv_result.resources | length > 1"
107 |
--------------------------------------------------------------------------------