├── .gitignore
├── LICENSE
├── README.md
├── docs
├── blog_posts.md
├── csi-filesystem-options.md
├── csi-read-write-many.md
├── csi-snapshot-clones.md
├── csi-topology.md
├── csi-values-validation.md
├── csi-volume-expansion.md
├── csi-volume-import.md
├── custom-storageclasses.md
├── examples
│ ├── clone
│ │ ├── clone.yaml
│ │ └── pvc.yaml
│ ├── fsoptions
│ │ ├── pure-block-xfs.yaml
│ │ └── pure-file-nfs.yaml
│ ├── rwx
│ │ ├── pod-block-many.yaml
│ │ ├── pod-file-many.yaml
│ │ ├── pvc-block-many.yaml
│ │ └── pvc-file-many.yaml
│ ├── snapshot
│ │ ├── pvc.yaml
│ │ ├── restore-snapshot.yaml
│ │ └── snapshot.yaml
│ ├── topology
│ │ ├── pod-delay-binding.yaml
│ │ ├── pure-block-delay-binding.yaml
│ │ ├── pure-block-restrict-provisioning.yaml
│ │ ├── pvc-delay-binding.ymal
│ │ └── statefulset-topology.yaml
│ ├── volexpansion
│ │ ├── pod-block.yaml
│ │ ├── pod-file.yaml
│ │ ├── pvc-block.yaml
│ │ └── pvc-file.yaml
│ └── volumeimport
│ │ ├── pod-raw.yaml
│ │ ├── pod.yaml
│ │ ├── pv-import-block.yaml
│ │ ├── pv-import-file.yaml
│ │ ├── pv-import-raw.yaml
│ │ └── pvc-import.yaml
├── flex-csi-upgrade.md
├── flex-snapshot-for-flasharray.md
├── flex-volume-using-labels.md
├── index.html
├── index.yaml
├── pure-csi-1.0.0.tgz
├── pure-csi-1.0.1.tgz
├── pure-csi-1.0.2.tgz
├── pure-csi-1.0.3.tgz
├── pure-csi-1.0.4.tgz
├── pure-csi-1.0.5.tgz
├── pure-csi-1.0.6.tgz
├── pure-csi-1.0.7.tgz
├── pure-csi-1.0.8.tgz
├── pure-csi-1.1.0.tgz
├── pure-csi-1.1.1.tgz
├── pure-csi-1.2.0.tgz
├── pure-k8s-plugin-2.0.0.tgz
├── pure-k8s-plugin-2.0.1.tgz
├── pure-k8s-plugin-2.1.0.tgz
├── pure-k8s-plugin-2.1.1.tgz
├── pure-k8s-plugin-2.1.2.tgz
├── pure-k8s-plugin-2.2.0.tgz
├── pure-k8s-plugin-2.2.1.tgz
├── pure-k8s-plugin-2.3.0.tgz
├── pure-k8s-plugin-2.3.1.tgz
├── pure-k8s-plugin-2.3.2.tgz
├── pure-k8s-plugin-2.4.0.tgz
├── pure-k8s-plugin-2.4.1.tgz
├── pure-k8s-plugin-2.5.0.tgz
├── pure-k8s-plugin-2.5.1.tgz
├── pure-k8s-plugin-2.5.2.tgz
├── pure-k8s-plugin-2.5.4.tgz
├── pure-k8s-plugin-2.5.5.tgz
├── pure-k8s-plugin-2.5.6.tgz
├── pure-k8s-plugin-2.5.7.tgz
├── pure-k8s-plugin-2.5.8.tgz
├── pure-k8s-plugin-2.6.0.tgz
├── pure-k8s-plugin-2.6.1.tgz
├── pure-k8s-plugin-2.7.0.tgz
└── pure-k8s-plugin-2.7.1.tgz
├── operator-csi-plugin
├── Dockerfile
├── README.md
├── build.sh
├── install.sh
├── install_ose4.sh
├── licenses
│ └── LICENSE
├── ose_4_clusterrole_patch.yaml
├── update.sh
├── values.yaml
└── watches.yaml
├── operator-k8s-plugin
├── Dockerfile
├── README.md
├── build.sh
├── install.sh
├── licenses
│ └── LICENSE
├── update.sh
├── upgrade.sh
├── values.yaml
└── watches.yaml
├── pure-csi
├── .helmignore
├── Chart.yaml
├── README.md
├── pure-storage.png
├── snapshotclass.yaml
├── snapshotclass_ose44.yaml
├── templates
│ ├── _helpers.tpl
│ ├── node-configure.yaml
│ ├── node.yaml
│ ├── provisioner.yaml
│ ├── rbac.yaml
│ ├── scc.yaml
│ ├── secret.yaml
│ ├── service.yaml
│ └── storageclass.yaml
├── values.schema.json
└── values.yaml
├── pure-k8s-plugin
├── .helmignore
├── Chart.yaml
├── README.md
├── pure-storage.png
├── templates
│ ├── _helpers.tpl
│ ├── clusterrolebinding.yaml
│ ├── pure-flex-daemon.yaml
│ ├── pure-provisioner.yaml
│ ├── secret.yaml
│ └── storageclass.yaml
└── values.yaml
├── scripts
└── pso-collect-logs.sh
├── tests
├── common
│ ├── generate-version.sh
│ ├── helm-utils.sh
│ └── minikube-utils.sh
└── pure-k8s-plugin
│ ├── README.md
│ └── test-upgrade.sh
└── update.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | !.gitignore
2 | !*.go
3 | *.swp
4 | *.swo
5 | *~
6 | .idea
7 | /operator/helm-charts/**
8 | operator-csi-plugin/helm-charts/
9 | operator-k8s-plugin/helm-charts/
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Pure Service Orchestrator (PSO) Helm Charts
2 |
3 | ## !!NOTICE!! - PSO is going to be EOL July 31, 2022. New customers should start with [Portworx](https://portworx.com/).
4 |
5 | ## Feature-Frozen
6 | Pure Service Orchestrator 5.x, which is instaled using this Helm chart (pure-csi), is in feature freeze. All new features will be going into our new [6.x release (pso-csi)](https://github.com/purestorage/pso-csi).
7 | Bug reports for 5.x will still be addressed, and PSO 5.x (pure-csi) is currently still the required plugin for Google Anthos and OpenShift 3.11.
8 |
9 | ## What is PSO?
10 |
11 | Pure Service Orchestrator (PSO) delivers storage-as-a-service for containers, giving developers the agility of public cloud with the reliability and security of on-premises infrastructure.
12 |
13 | **Smart Provisioning**
14 | PSO automatically makes the best provisioning decision for each storage request – in real-time – by assessing multiple factors such as performance load, the capacity and health of your arrays, and policy tags.
15 |
16 | **Elastic Scaling**
17 | Uniting all your Pure FlashArray™ and FlashBlade™ arrays on a single shared infrastructure, and supporting file and block as needed, PSO makes adding new arrays effortless, so you can scale as your environment grows.
18 |
19 | **Transparent Recovery**
20 | To ensure your services stay robust, PSO self-heals – so you’re protected against data corruption caused by issues such as node failure, array performance limits, and low disk space.
21 |
22 | ## Software Pre-Requisites
23 |
24 | - #### Operating Systems Supported*:
25 | - CentOS 7
26 | - CoreOS (Ladybug 1298.6.0 and above)
27 | - RHEL 7
28 | - Ubuntu 16.04
29 | - Ubuntu 18.04
30 | - #### Environments Supported*:
31 | - Refer to the README for the type of PSO installation required
32 | - #### Other software dependencies:
33 | - Latest linux multipath software package for your operating system (Required)
34 | - Latest Filesystem utilities/drivers (XFS by default, Required)
35 | - Latest iSCSI initiator software for your operating system (Optional, required for iSCSI connectivity)
36 | - Latest NFS software package for your operating system (Optional, required for NFS connectivity)
37 | - Latest FC initiator software for your operating system (Optional, required for FC connectivity, *FC Supported on Bare-metal K8s installations only*)
38 | - #### FlashArray and FlashBlade:
39 | - The FlashArray and/or FlashBlade should be connected to the compute nodes using [Pure's best practices](https://support.purestorage.com/Solutions/Linux/Reference/Linux_Recommended_Settings)
40 | - #### FlashArray User Privilages
41 | - It is recommend to use a specific FlashArray user, and associated API token, for PSO access control to enable easier array auditing.
42 | - The PSO user can be local or based on a Directory Service controlled account (assuming DS is configured on the array).
43 | - The PSO user requires a mininum role level of `storage_admin`.
44 | - #### FlashBlade User Privileges
45 | - If the FlashBlade is configured to use Directory Services for array management, then a DS controlled account and its associated API token can be used for PSO.
46 | - The PSO user requires a mininum array management role level of `storage_admin`.
47 | - Currently ther is no option to create additonal local users on a FlashBlade.
48 |
49 | _* Please see release notes for details_
50 |
51 | ## Hardware Pre-Requisites
52 |
53 | PSO can be used with any of the following hardware appliances and associated minimum version of appliance code:
54 | * Pure Storage FlashArray (minimum Purity code version 4.8)
55 | * Pure Storage FlashBlade (minimum Purity version 2.2.0)
56 |
57 | ## Installation
58 |
59 | PSO can be deployed via an Operator or from the Helm chart.
60 |
61 | ### PSO Operator
62 |
63 | PSO has Operator-based install available for both its FlexVolume (**deprecated**) plugin and CSI plugin. This install method does not need Helm installation.
64 |
65 | Pure Flex Operator is the preferred installation method for FlexVolume on OpenShift version 3.11. The CSI Operator should be used for OpenShift 4.1 and 4.2.
66 |
67 | **Note** Use the CSI Helm3 install method for OpenShift 4.3 and higher with the adoption of Helm3 in OpenShift.
68 |
69 | For installation, see the [Flex Operator Documentation](./operator-k8s-plugin/README.md#overview) or the [CSI Operator Documentation](./operator-csi-plugin/README.md#overview)..
70 |
71 | ### Helm Chart
72 |
73 | **pure-k8s-plugin** deploys PSO FlexVolume plugin on your Kubernetes cluster - the Flex Driver is now deprecated
74 |
75 | **pure-csi** deploys PSO CSI plugin on your Kubernetes cluster.
76 |
77 | #### Helm Setup
78 |
79 | Install Helm by following the official documents:
80 | 1. For Kubernetes
81 | https://docs.helm.sh/using_helm#install-helm
82 |
83 | 2. For OpenShift
84 | **In OpenShift 3.11 the Red Hat preferred installation method is using an Operator. Follow the instructions in the [PSO operator directory](./operator/README.md).**
85 |
86 |
87 | Refer to the [k8s-plugin README](./pure-k8s-plugin/README.md) or the [csi-plugin README](./pure-csi/README.md) for further installation steps.
88 |
89 | ## PSO on the Internet
90 |
91 | [Checkout a list of some blogs related to Pure Service Orchestrator](./docs/blog_posts.md)
92 |
93 | ## Contributing
94 | We welcome contributions. The PSO Helm Charts project is under [Apache 2.0 license](https://github.com/purestorage/helm-charts/blob/master/LICENSE). We accept contributions via GitHub pull requests.
95 |
96 | ## Report a Bug
97 | For filing bugs, suggesting improvements, or requesting new features, please open an [issue](https://github.com/purestorage/helm-charts/issues).
98 |
--------------------------------------------------------------------------------
/docs/blog_posts.md:
--------------------------------------------------------------------------------
1 | # Blog Posts Relating to Pure Service Orchestrator
2 |
3 | ## Introduction
4 |
5 | This page provides link to some external blog posts that reference Pure Service Orchestrator.
6 |
7 | Blogs that are hosted outside of the official Pure Storage blogsite are the general musings and thoughts of people, who may be Pure employees, but these thoughts and musings are entirely their own.
8 |
9 | No support or approval by Pure Storage is given or implied.
10 |
11 | * [PSO Analytics: Visibility into how Kubernetes Applications use Storage](https://medium.com/@joshua_robinson/pso-analytics-visibility-into-how-kubernetes-applications-use-storage-e7bda52c3bf)
12 | * [Kubernetes Topology for StatefulSet Storage Redundancy with PSO](https://blog.2vcps.io/2020/04/18/kubernetes-topology-for-statefulset-storage-redundancy-with-pso/)
13 | * [Kasten: Raising the Bar for Kubernetes Backup and Mobility](https://blog.purestorage.com/kasten-kubernetes-backup-mobility/)
14 | * [PSO wrt DKS & UCP](https://blog.2vcps.io/2020/03/19/pso-wrt-dks-ucp/)
15 | * [Migrating k8s Stateful Apps with Pure Storage](https://blog.2vcps.io/2020/03/18/migrating-k8s-stateful-apps-with-pure-storage/)
16 | * [New Release: Pure Service Orchestrator 5.0.2](https://blog.2vcps.io/2019/11/18/new-release-pure-service-orchestrator-5-0-2/)
17 | * [Installing PSO in a PKS Cluster using the Operator](https://blog.2vcps.io/2019/11/05/installing-pso-in-a-pks-cluster-using-the-operator/)
18 | * [All-Flash Platform-as-a-Service: Pure Storage and Red Hat OpenShift Reference Architecture](https://blog.purestorage.com/paas-pure-storage-red-hat-openshift-reference-architecture/)
19 | * [Storage-as-a-Service for SQL Server 2019 Big Data Clusters](https://blog.purestorage.com/storage-as-a-service-for-sql-server-2019-big-data-clusters/)
20 | * [Choosing the Right Infrastructure for A SQL Server 2019 Big Cluster](https://blog.purestorage.com/infrastructure-sql-server-big-cluster/)
21 | * [Jupyter as a Service on FlashBlade](https://towardsdatascience.com/jupyter-as-a-service-on-flashblade-3c9ec27f8fcf)
22 | * [Pure Service Orchestrator is Validated for Enterprise PKS](https://blog.2vcps.io/2019/12/10/pure-service-orchestrator-is-validated-for-enterprise-pks/)
23 |
--------------------------------------------------------------------------------
/docs/csi-filesystem-options.md:
--------------------------------------------------------------------------------
1 |
2 | # Using Per-Volume FileSystem Options with Kubernetes
3 |
4 | ## Introduction
5 |
6 | The Pure Service Orchestrator Kubernetes CSI driver includes support for per-volume basis filesystem (FS) options starting with version 5.0.5.
7 | The feature allows Kubernetes end-users to create persistent volumes with customizable filesystem options on a per-volume basis.
8 | Users can customize the filesystem type (`FsType`) with create options (`CreateOptions`) during the volume staging phase and customize mount options (`MountOptions`) during the volume publish phase.
9 |
10 | The feature leverages Kubernetes `StorageClass` to carry the customized FS options to the underlying storage backend.
11 | Before this feature, users could only set up these parameters via [configuration](../pure-csi/README.md) in the values.yaml file. Then all persistent volumes used the same options, and the settings could not be changed after PSO had loaded.
12 | With this feature, users can customize the FS options for persistent volumes on-the-fly through various StorageClass settings to meet different application needs.
13 |
14 |
15 | ## Dependencies
16 |
17 | The following dependencies must be true before the customized filesystem options can be used:
18 |
19 | * Kubernetes already running, deployed, configured, etc
20 | * For the `MountOptions` feature, ensure you have Kubernetes 1.8+ installed.
21 | * PSO correctly installed and using [Pure CSI Driver v5.0.5](https://github.com/purestorage/helm-charts/releases/tag/5.0.5)+.
22 |
23 | ## FileSystem Options
24 | PSO leverages Kubernetes `StorageClass` to pass the customized FS options to the underlying storage backend. If the FS options are specified in the `StorageClass`, it will override the default values from the values.yaml.
25 | The default values will only apply when no FS options in the `StorageClass`.
26 | ### FsType
27 | The CSI external-provisioner allows users to set `FsType` via key-value parameters map in the `StorageClass`. You can use the pre-defined key `"csi.storage.k8s.io/fstype"` to set up the `FsType` like this:
28 |
29 | ```yaml
30 | parameters:
31 | csi.storage.k8s.io/fstype: vfat
32 | ```
33 |
34 | ### CreateOptions
35 | PSO allows users to set `CreateOptions` via key-valume parameters map in the `StorageClass`. You can use the pre-defined key `"createoptions"` to set up the `CreateOptions` like this:
36 |
37 | ```yaml
38 | parameters:
39 | createoptions: -F 32 -f 2 -S 512 -s 1 -R 32
40 | ```
41 |
42 | ### MountOptions
43 | Persistent Volumes that are dynamically created by a `StorageClass` will have the `MountOptions` specified in the _mountOptions_ field of the `StorageClass`. You can set the options like this:
44 | ```
45 | mountOptions:
46 | - nosuid
47 | - discard
48 | ```
49 |
50 | **Notes:**
51 |
52 | 1. _**Native mkfs and mount support**:_
53 | Please make sure your worker nodes support the `FsType` with the correct `CreateOptions` you specify in the `StorageClass`.
54 |
55 | During the Volume staging phase, PSO transfer these parameters to our driver and creates the file system for a volume using the command like this `mkfs. -o /dev/dm-0`. Failure of mkfs operation will lead pod volume attachment failures and pod will be stuck in pending state. It is also true when you mount with incorrect `MountOptions`.
56 | For **FlashBlade**, make sure your worker nodes have NFS utilities package installed:
57 | ``` bash
58 | yum install -y nfs-utils
59 | ```
60 |
61 | 2. _**FlashBlade support:**_ When you backend storage type is FB, PSO will ignore the `FsType` and `CreateOptions` parameters by default since FB does not allow users to format the filesystem when you attach volumes. The default filesystem for FB is `nfs`. However, users can still specify the `MountOptions` to mount volumes.
62 |
63 | 3. _**Kubenetes default Filesystem:**_
64 | Kubernetes uses `ext4` as the default file systems. If users do not specify `FsType` in the `StorageClass`, K8s will pass the default `ext4` to the driver.
65 | PSO recommends users to use `xfs` to achieve the best performance.
66 |
67 | 4. _**Default "discard" mount option:**_ By default, PSO automatically adds a "discard" option while mounting the volume to achieve the best performance unless users specifically add the "nodiscard" option, which PSO does not recommend.
68 |
69 | ## Example of StorageClass for FlashArray
70 |
71 | ```yaml
72 | kind: StorageClass
73 | apiVersion: storage.k8s.io/v1
74 | metadata:
75 | name: pure-block-xfs
76 | labels:
77 | kubernetes.io/cluster-service: "true"
78 | provisioner: pure-csi
79 | parameters:
80 | backend: block
81 | csi.storage.k8s.io/fstype: xfs
82 | createoptions: -q
83 | mountOptions:
84 | - discard
85 | ```
86 | To apply:
87 | ```bash
88 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/fsoptions/pure-block-xfs.yaml
89 | ```
90 | ## Example of StorageClass for FlashBlade
91 |
92 | ```yaml
93 | kind: StorageClass
94 | apiVersion: storage.k8s.io/v1
95 | metadata:
96 | name: pure-file-nfs
97 | labels:
98 | kubernetes.io/cluster-service: "true"
99 | provisioner: pure-csi
100 | parameters:
101 | backend: file
102 | mountOptions:
103 | - nfsvers=3
104 | - tcp
105 | ```
106 | To apply:
107 | ```bash
108 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/fsoptions/pure-file-nfs.yaml
109 | ```
--------------------------------------------------------------------------------
/docs/csi-read-write-many.md:
--------------------------------------------------------------------------------
1 |
2 | # Using Read-Write-Many (RWX) volumes with Kubernetes
3 |
4 | ## Introduction
5 |
6 | The Pure Service Orchestrator Kubernetes CSI driver includes support for Read-Write-Many (RWX) block volumes on FlashArray
7 | starting with version 5.1.0. This feature allows Kubernetes end-users to create persistent block volumes that may be mounted into
8 | multiple pods simultaneously. Persistent volume claims created this way can be mounted exactly the same as a normal pod, only
9 | requiring that `accessModes` contains `ReadWriteMany`.
10 |
11 | ## Restrictions
12 | Read-Write-Many cannot be used with all combinations of storage classes. Specifically, we prohibit mounting a block volume
13 | as a filesystem as RWX.
14 |
15 | | Backend Type | Mount Type | Access Mode | Valid? |
16 | |--------------|------------|-------------|--------|
17 | | Block | Block | RWO | Yes |
18 | | Block | Block | RWX | Yes |
19 | | Block | File | RWO | Yes |
20 | | Block | File | RWX | **No** |
21 | | File | File | RWO | Yes |
22 | | File | File | RWX | Yes |
23 |
24 | ## Examples
25 | To use these examples, install the Pure CSI plugin and apply the following example files.
26 |
27 | ### For FlashArray/Cloud Block Store
28 |
29 | FlashArrays can only be used for RWX volumes with [raw block mounts](https://kubernetes.io/blog/2019/03/07/raw-block-volume-support-to-beta/), as shown in the following example files.
30 |
31 | [Raw Block PVC](examples/rwx/pvc-block-many.yaml)
32 |
33 | [Example Pods](examples/rwx/pod-block-many.yaml)
34 |
35 | To apply:
36 | ```bash
37 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/rwx/pvc-block-many.yaml
38 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/rwx/pod-block-many.yaml
39 | # The raw block device will be mounted at /dev/pure-block-device
40 | ```
41 |
42 | **A note on caching:** while testing using the above examples, it can be remarkably annoying to test writing data between
43 | pods, as it can be difficult to ensure the caches are flushed. One easy way to test the shared block storage is
44 | `dd if=/dev/urandom of=/dev/pure-block-device bs=512 count=1 oflag=direct` (where `oflag=direct` will bypass caches), and
45 | then read using `dd if=/dev/pure-block-device of=/dev/stdout bs=512 count=1 iflag=direct` (where `iflag=direct` will bypass
46 | caches again).
47 |
48 | ### For FlashBlade
49 |
50 | FlashBlade shares can be easily used for RWX volumes since they use NFS, as shown in the following example files.
51 |
52 | [File PVC](examples/rwx/pvc-file-many.yaml)
53 |
54 | [Example Pods](examples/rwx/pod-file-many.yaml)
55 |
56 | To apply:
57 | ```bash
58 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/rwx/pvc-file-many.yaml
59 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/rwx/pod-file-many.yaml
60 | # The NFS volume will be mounted at /data
61 | ```
62 |
--------------------------------------------------------------------------------
/docs/csi-snapshot-clones.md:
--------------------------------------------------------------------------------
1 |
2 | # Using CSI Snapshots and Clones with Kubernetes
3 |
4 | ## Introduction
5 |
6 | The Pure Service Orchestrator Kubernetes CSI driver includes support for snapshots and clones. These features allow Kubernetes end users to capture point-in-time copies of their persistent volume claims, and mount those copies in other Kubernetes Pods, or recover from a snapshot. This enables several use cases, some of which include :
7 |
8 | 1. Test / Develop against copies of production data quickly (no need to copy large amounts of data)
9 | 2. Backup / Restore production volumes.
10 |
11 | These features use native Kubernetes APIs to call the feature-set in the underlying storage backend. Currently, only the FlashArray backend can fully support snapshots and clones.
12 |
13 | ## Dependencies
14 |
15 | The following dependencies must be true before the snapshot and clone functionality can be used:
16 |
17 | * Kubernetes already running, deployed, configured, etc
18 | * PSO correctly installed and using [Pure CSI Driver v5.0.5](https://github.com/purestorage/helm-charts/releases/tag/5.0.5)+.
19 | * For the snapshot feature, ensure you have Kubernetes 1.13+ installed and the `VolumeSnapshotDataSource` feature gate is enabled. This featuregate is set to `True` by default from 1.17 and therefore does not need to be set from this version onwards.
20 | * For the clone feature, ensure you have Kubernetes 1.15+ installed and the `VolumePVCDataSource` feature gate is enabled. This feature graduated to GA in 1.18 and is therefore no longer required in that and subsequent versions.
21 |
22 | ### Enabling Feature Gates
23 |
24 | To ensure that snapshot and clone functionality can be utilised by the CSI driver use the following commands to ensure that the correct feature gates are open in your Kubernetes deployment.
25 |
26 | Note that most Kubernetes deployments have proprietary methods for enabling feature gates and you should check with the deployment vendor if this is the case.
27 |
28 | In general you have to ensure that the `kubelet` process has the following switches used during the process startup:
29 |
30 | ```bash
31 | --feature-gates=VolumeSnapshotDataSource=true,VolumePVCDataSource=true
32 | ```
33 |
34 | **Note:**
35 | * `VolumePVCDataSource` gate is no longer required from Kuberenetes 1.18 (feature went GA at this version)
36 | * `VolumeSnapshotDataSource` gate is no longer required from Kubernetes 1.17 (defaults to true from this version)
37 |
38 | More details on feature-gate alpha and beta support can be found [here](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features)
39 |
40 | Detailed below are the methods to enable feature gates in a few common deployment tools:
41 |
42 | #### kubespray
43 |
44 | Edit the file `roles/kubespray-defaults/defaults/main.yaml` and add the following lines in the appropriate locations
45 |
46 | ```yaml
47 | volume_clones: True
48 | volume_snapshots: True
49 |
50 | feature_gate_snap_clone:
51 | - "VolumeSnapshotDataSource={{ volume_snapshots | string }}"
52 | - "VolumePVCDataSource={{ volume_clones | string }}"
53 | ```
54 |
55 | Update the `kube_feature_gates` parameter to enable the feature gates
56 |
57 | ```yaml
58 | kube_feature_gates: |-
59 | {{ feature_gate_snap_clone }}
60 | ```
61 |
62 | #### kubeadm
63 |
64 | Edit your kubeadm configuration and modify the `kind` config for the cluster apiServer. An example config would be:
65 |
66 | ```yaml
67 | kind: Cluster
68 | apiVersion: kind.sigs.k8s.io/v1alpha3
69 | # patch the generated kubeadm config with some extra settings
70 | kubeadmConfigPatches:
71 | - |
72 | apiVersion: kubeadm.k8s.io/v1beta2
73 | kind: ClusterConfiguration
74 | metadata:
75 | name: config
76 | apiServer:
77 | extraArgs:
78 | "feature-gates": "VolumeSnapshotDataSource=true"
79 | ```
80 |
81 | #### kops
82 |
83 | Edit the kops `cluster.yaml` and add the following for `kind: Cluster`:
84 |
85 | ```yaml
86 | spec:
87 | kubelet:
88 | featureGates:
89 | VolumeSnapshotDataSource: "true"
90 | VolumePVCDataSource: "true"
91 | ```
92 |
93 | #### OpenShift
94 |
95 | CSI snapshot and clone support is only available from OpenShift 4.3.
96 |
97 | To enable these features in OpenShift edit the Feature Gate Custom Resource, named `cluster`, in the `openshift-config` project. Add `VolumeSnapshotDataSource` and `VolumePVCDataSource`as enabled feature gates.
98 |
99 | #### Docker Kubernetes Service
100 |
101 | Install UCP with the `--storage-expt-enabled` flag. This will enable all the k8s 1.14 capable feature gates, including support for volume snapshots.
102 | **Note:** Volume clones are not supported in DKS due to the version of Kuberenetes deployed by Docker EE 3.0.
103 |
104 | #### Platform9 Managed Kuberenetes
105 |
106 | Currently the deployment GUI for PMK does not allow for changing feature-gates therefore to enable feature-gates on PMK it is first necessary to build your cluster using the Platfom9 tools and then enable the feature-gates after deployment.
107 |
108 | Once the cluster is deployed on each of the master nodes perform the following:
109 |
110 | Edit the file `/opt/pf9/pf9-kube/conf/masterconfig/base/master.yaml` and change the two reference of
111 |
112 | ```
113 | - "--feature-gates=PodPriority=true"
114 | ```
115 |
116 | to
117 |
118 | ```
119 | - "--feature-gates=PodPriority=true,VolumePVCDataSource=true,VolumeSnapshotDataSource=true"
120 | ```
121 |
122 | Once completed, reboot the master nodes in series.
123 |
124 | ### Validating Feature Gates
125 |
126 | To validate if your feature gates have been correctly set, check the `api-server` pod in the `kube-system` namespace for one of the nodes in the cluster:
127 |
128 | ```
129 | kubectl describe -n kube-system pod kube-api-server- | grep feature-gates
130 | ```
131 |
132 | This should result is the following if the feature gates are correctly set.
133 |
134 | ```
135 | --feature-gates=VolumeSnapshotDataSource=True,VolumePVCDataSource=True
136 | ```
137 |
138 | ### Examples
139 |
140 | Once you have correctly installed PSO on a Kubernetes deployment and the appropriate feature gates have been enabled the following examples can be used to show the use of the snapshot and clone functionality.
141 |
142 | These examples start with the assumption that a PVC, called `pure-claim` has been created by PSO under a block related storage class, for example the `pure-block` storage class provided by the PSO installation.
143 |
144 | #### Creating snapshots
145 |
146 | Use the following YAML to create a snapshot of the PVC `pure-claim`:
147 |
148 | ```yaml
149 | apiVersion: snapshot.storage.k8s.io/v1alpha1
150 | kind: VolumeSnapshot
151 | metadata:
152 | name: volumesnapshot-1
153 | spec:
154 | snapshotClassName: pure-snapshotclass
155 | source:
156 | name: pure-claim
157 | kind: PersistentVolumeClaim
158 | ```
159 | To give it a try:
160 | ```bash
161 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/pure-csi/snapshotclass.yaml
162 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/snapshot/pvc.yaml
163 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/snapshot/snapshot.yaml
164 | ```
165 | This will create a snapshot called `volumesnapshot-1` which can check the status of with
166 |
167 |
168 | ```bash
169 | kubectl describe -n volumesnapshot
170 | ```
171 |
172 | #### Restoring a Snapshot
173 |
174 | Use the following YAML to restore a snapshot to create a new PVC `pvc-restore-from-volumesnapshot-1`:
175 |
176 | ```yaml
177 | kind: PersistentVolumeClaim
178 | apiVersion: v1
179 | metadata:
180 | name: pvc-restore-from-volumesnapshot-1
181 | spec:
182 | accessModes:
183 | - ReadWriteOnce
184 | resources:
185 | requests:
186 | storage: 10Gi
187 | storageClassName: pure-block
188 | dataSource:
189 | kind: VolumeSnapshot
190 | name: volumesnapshot-1
191 | apiGroup: snapshot.storage.k8s.io
192 | ```
193 | To give it a try:
194 | ```bash
195 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/snapshot/restore-snapshot.yaml
196 | ```
197 | **NOTE:** Recovery of a volume snapshot to overwite its parent persistant volume is not supported in the CSI specification, however this can be achieved with a FlashArray based PVC and snapshot using the following steps:
198 |
199 | 1. Reduce application deployment replica count to zero to ensure there are no actives IOs through the PVC.
200 | 2. Log on to the FlashArray hosting the underlying PV and perform a snaphot restore through the GUI. More details can be found in the FlashArray Users Guide. This can also be achieved using the `purefa_snap` Ansible module, see [here](https://github.com/Pure-Storage-Ansible/FlashArray-Collection/blob/master/collections/ansible_collections/purestorage/flasharray/docs/purefa_snap.rst) for more details.
201 | 3. Increase the deployment replica count to 1 and allow the application to restart using the recovered PV.
202 |
203 | #### Create a clone of a PVC
204 |
205 | Use the following YAML to create a clone called `clone-of-pure-claim` of the PVC `pure-claim`:
206 | **Note:** both `clone-of-pure-claim` and `pure-claim` must use the same `storageClassName`.
207 |
208 | ```yaml
209 | apiVersion: v1
210 | kind: PersistentVolumeClaim
211 | metadata:
212 | name: clone-of-pure-claim
213 | spec:
214 | accessModes:
215 | - ReadWriteOnce
216 | storageClassName: pure
217 | resources:
218 | requests:
219 | storage: 10Gi
220 | dataSource:
221 | kind: PersistentVolumeClaim
222 | name: pure-claim
223 | ```
224 | To give it a try:
225 | ```bash
226 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/clone/pvc.yaml
227 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/clone/clone.yaml
228 | ```
229 | **Notes:**
230 |
231 | 1. _Application consistency:_
232 | The snapshot API does not have any application consistency functionality. If an application-consistent snapshot is needed, the application pods need to be frozen/quiesced from an IO perspective before the snapshot is called. The application then needs to be unquiesced after the snapshot(s) has been created.
233 |
--------------------------------------------------------------------------------
/docs/csi-values-validation.md:
--------------------------------------------------------------------------------
1 |
2 | # values.yaml file pre-install validation
3 |
4 | ## Introduction
5 |
6 | We allow PSO users to provide their version of values.yaml, then merging the provided key-value pairs with the default values.yaml under pure-csi directory. One improvement that was identified from past experience is that by adding a validation before installation, some of the common errors users have encountered can be eliminated. For example, We rely on users to provide FlashArrays and FlashBlades properties, without validations, it was possible for users to inject unwanted characters in their endpoint or APItoken, or even provide a different object type than intended. With Helm 3 now becoming the standard, we leverage the JSON validation functionality and added validation based on need of our backend, while at the same time, making it easier for our users to identify some issues early on.
7 |
8 | ## Restrictions
9 | JSON validation is by default case-sensitive, so we kindly ask users to be mindful when writing their own values.yaml files. We recommend copy directly from our default file and change only the values, but not the keys. Otherwise, users will likely see validation errors at runtime.
10 |
11 | ## How does it work?
12 | Under normal curcumstances, there are no extra tasks that users need to perform. Both
13 | ```bash
14 | helm install
15 | helm upgrade
16 | ```
17 | will trigger the validation, and installation will not continue if errors are thrown.
18 |
19 | It is also possible that the existing values.yaml users have been using will complain for the first time if cases of keys do not match. For example, "NFSEndPoint" is a required property under FlashBlades, but providing the key as "nfsEndPoint" or "NfsEndPoint" will not pass the validation.
20 |
21 | If running into problems installing due to failing validation and you believe valid inputs have been provided, please reach out to the PSO team.
22 |
23 |
24 |
--------------------------------------------------------------------------------
/docs/csi-volume-expansion.md:
--------------------------------------------------------------------------------
1 |
2 | # Expanding CSI persistent volumes with Kubernetes
3 |
4 | ## Introduction
5 |
6 | The Pure Service Orchestrator Kubernetes CSI driver includes support for CSI volume expansion starting with version 5.2.0.
7 |
8 | The feature allows Kubernetes end-users to expand a persistent volume (PV) after creation by resizing a dynamically provisioned persistent volume claim (PVC).
9 | The end-users are able to edit the `allowVolumeExpansion` boolean flag in Kubernetes `StorageClass` to modify the permission whether a PVC resizing is allowed.
10 |
11 | ## Prerequisites
12 |
13 | The following prerequisites must be true to ensure the function can work properly:
14 |
15 | * Only dynamically provisioned PVCs can be resized.
16 | * Only allow volume size expansion, shrinking a volume is not allowed.
17 | * The `StorageClass` that provisions the PVC must support resize. `allowVolumeExpansion` flag is set to true by default in all PSO `StorageClass` since version 5.2.0.
18 | * The PVC `accessMode` must be `ReadWriteOnce` or `ReadWriteMany`.
19 |
20 | ### Note
21 |
22 | Any PVC created using a StorageClass where the `Parameters: backend=block` is true will only be resized upon a (re)start of the pod bound to the PVC.
23 |
24 | ## Dependencies
25 |
26 | * Kubernetes already running, deployed, configured, etc.
27 | * Kubernetes supports CSI Volume Expansion as beta since 1.16 and is expected to GA in Kubernetes 1.19, so ensure you have Kubernetes 1.16+ installed. [More Info](https://kubernetes-csi.github.io/docs/volume-expansion.html)
28 | * PSO correctly installed and using [Pure CSI Driver v5.2.0](https://github.com/purestorage/helm-charts/releases/tag/5.2.0)+.
29 |
30 |
31 | ## Example usages
32 |
33 | PSO CSI driver supports `ONLINE` volume expansion capability, i.e. expanding an in-use PersistentVolumeClaim.
34 | For more details check the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md) please.
35 |
36 | ### FlashArray StorageClass "pure-block" volume expansion
37 |
38 | #### 1. Ensure `allowVolumeExpansion` is set to `true` in `pure-block` StorageClass:
39 |
40 | To ensure that the StorageClass has the correct setting run the following command
41 |
42 | ```bash
43 | kubectl patch sc pure-block --type='json' -p='[{"op": "add", "path": "/allowVolumeExpansion", "value": true }]'
44 | ```
45 |
46 | #### 2. Create a PVC:
47 |
48 | Example PVC:
49 |
50 | ```yaml
51 | kind: PersistentVolumeClaim
52 | apiVersion: v1
53 | metadata:
54 | # Referenced in pod.yaml for the volume spec
55 | name: pure-claim-block
56 | spec:
57 | accessModes:
58 | - ReadWriteOnce
59 | resources:
60 | requests:
61 | storage: 10Gi
62 | storageClassName: pure-block
63 | ```
64 |
65 | To create:
66 |
67 | ```bash
68 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/volexpansion/pvc-block.yaml
69 | ```
70 |
71 | #### 3. Start a Pod to use the PVC:
72 |
73 | Example Pod:
74 |
75 | ```yaml
76 | apiVersion: v1
77 | kind: Pod
78 | metadata:
79 | name: nginx
80 | spec:
81 | volumes:
82 | - name: pure-vol
83 | persistentVolumeClaim:
84 | claimName: pure-claim-block
85 | containers:
86 | - name: nginx
87 | image: nginx
88 | # Configure a mount for the volume We define above
89 | volumeMounts:
90 | - name: pure-vol
91 | mountPath: /data
92 | ports:
93 | - containerPort: 80
94 | ```
95 |
96 | To create:
97 |
98 | ```bash
99 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/volexpansion/pod-block.yaml
100 | ```
101 |
102 | #### 4. Expand the PVC:
103 |
104 | Patch the PVC to a larger size, e.g. 20Gi:
105 |
106 | ```bash
107 | kubectl patch pvc pure-claim-block -p='{"spec": {"resources": {"requests": {"storage": "20Gi"}}}}'
108 | ```
109 |
110 | Check that the PV is already resized successfully, but notice the PVC size is not changed, because a Pod (re-)start is required:
111 |
112 | ```bash
113 | # kubectl get pvc pure-claim-block
114 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
115 | pure-claim-block Bound pvc-b621957b-2828-4b75-a737-251916c05cb6 10Gi RWO pure-block 56s
116 | # kubectl get pv
117 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
118 | pvc-b621957b-2828-4b75-a737-251916c05cb6 20Gi RWO Delete Bound default/pure-claim-block pure-block 68s
119 |
120 | ```
121 | Check the PVC conditions:
122 |
123 | ```bash
124 | # kubectl get pvc pure-claim -o yaml
125 | ...
126 | status:
127 | conditions:
128 | message: Waiting for user to (re-)start a pod to finish file system resize of volume on node.
129 | status: "True"
130 | type: FileSystemResizePending
131 | phase: Bound
132 | ```
133 |
134 | #### 5. Restart the Pod:
135 |
136 | To restart:
137 |
138 | ```bash
139 | kubectl delete -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/volexpansion/pod-block.yaml
140 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/volexpansion/pod-block.yaml
141 | ```
142 | Verify the PVC is resized successfully after Pod is running:
143 | ```bash
144 |
145 | # kubectl get pvc pure-claim-block
146 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
147 | pure-claim-block Bound pvc-b621957b-2828-4b75-a737-251916c05cb6 20Gi RWO pure-block 2m46s
148 | ```
149 |
150 | ### FlashBlade StorageClass "pure-file" volume expansion
151 |
152 | The procedure should be exactly the same as FlashArray StorageClass `pure-block` volume expansion, except no Pod (re-)start is required.
153 | The PV and PVC should show the updated size immediately.
154 |
155 | #### 1. Ensure `allowVolumeExpansion` is set to `true` in `pure-file` StorageClass:
156 |
157 | To ensure that the StorageClass has the correct setting run the following command
158 |
159 | ```bash
160 | kubectl patch sc pure-file --type='json' -p='[{"op": "add", "path": "/allowVolumeExpansion", "value": true }]'
161 | ```
162 |
163 | #### 2. Create a PVC:
164 |
165 | ```bash
166 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/volexpansion/pvc-file.yaml
167 | ```
168 |
169 | #### 3. Start a Pod to use the PVC:
170 |
171 | ```bash
172 | kubectl apply -f https://raw.githubusercontent.com/purestorage/helm-charts/master/docs/examples/volexpansion/pod-file.yaml
173 | ```
174 |
175 | #### 4. Expand the PVC:
176 |
177 | ```bash
178 | kubectl patch pvc pure-claim-file -p='{"spec": {"resources": {"requests": {"storage": "20Gi"}}}}'
179 | ```
180 |
181 | Check that both the PV and PVC are immeadiately expanded. No pod restart is required.
182 |
183 | ```bash
184 | # kubectl get pvc pure-claim-file
185 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
186 | pure-claim-file Bound pvc-2ba56b33-3412-2965-f4e4-983de21ba772 20Gi RWO pure-file 25s
187 | # kubectl get pv
188 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
189 | pvc-2ba56b33-3412-2965-f4e4-983de21ba772 20Gi RWO Delete Bound default/pure-claim-file pure-file 27s
190 | ```
191 |
192 |
--------------------------------------------------------------------------------
/docs/csi-volume-import.md:
--------------------------------------------------------------------------------
1 | # Import volumes into Kubernetes
2 |
3 | ## Use Cases
4 | - Reinstall Kubernetes or migrate volume from one Kubernetes cluster to another
5 | - Disaster recovery from backup volume
6 | - Containerize legacy application
7 |
8 | ## Dependencies
9 | * Kubernetes v1.15+ (Lower version might work however not tested)
10 | * Pure CSI driver 5.2.0+
11 | * Support both FA (raw block and file system) and FB (file system)
12 |
13 | ## Import Guidance
14 | The beauty of volume import feature is that there are no changes or additional annotations to persistent volume objects. Take the scenario of migrating volumes from one Kubernetes cluster to another cluster: users can export persistent volume objects (and persistent volume claim objects) from the old cluster and deploy to a new Kubernetes cluster, with no additional changes.
15 |
16 | ```diff
17 | -Caution: important notes about reclaim policy
18 | ```
19 | >Both Delete and Retain reclaim policies are supported on imported volumes, where the reclaim policy is configured in persistent volume object. If users delete a persistent volume claim and the corresponding persistent volume has `Delete` as reclaim policy, both the persistent volume object and backend volume will be deleted automatically.
20 | >
21 | >If users delete a persistent volume (PV) object before deleting the associated persistent volume claim (PVC) the backend volume will *NOT* be deleted regardless of the reclaim policy setting for the PV. This is consistent with the behaviour of any dynamically provisioned volume.
22 |
23 | If users want to import a volume that was created outside of Kubernetes, persistent volume and persistent volume claim objects can be manually created using the following steps:
24 |
25 | 1. Create and deploy a persistent volume object with `volumeHandle` configured to be the name of the volume in backend, and `claimRef` to be the name of the persistent volume claim at your choice.
26 |
27 | Here is an example, but more examples can be found at: [examples/volumeimport](./examples/volumeimport)
28 | ```yaml
29 | apiVersion: v1
30 | kind: PersistentVolume
31 | metadata:
32 | annotations:
33 | pv.kubernetes.io/provisioned-by: pure-csi
34 | name: pv-import
35 | spec:
36 | accessModes:
37 | - ReadWriteOnce
38 | capacity:
39 | storage: 1Gi
40 | claimRef:
41 | apiVersion: v1
42 | kind: PersistentVolumeClaim
43 | # TODO: change to the PVC you want to bind this PV.
44 | # If you don't pre-bind PVC here, the PV might be automatically bound to a PVC by scheduler.
45 | name: pvc-import
46 | # Namespace of the PVC
47 | namespace: default
48 | csi:
49 | driver: pure-csi
50 | # TODO: change to the volume name in backend.
51 | # Volume with any name that exists in backend can be imported, and will not be renamed.
52 | volumeHandle: ttt-pvc-a90d7d5f-da6c-44db-a306-a4cc122f9dd3
53 | volumeAttributes:
54 | backend: file
55 | # TODO: configure your desired reclaim policy,
56 | # Use Retain if you don't want your volume to get deleted when the PV is deleted.
57 | persistentVolumeReclaimPolicy: Delete
58 | storageClassName: pure-file
59 | volumeMode: Filesystem
60 | ```
61 |
62 | 2. Create and deploy a persistent volume claim object with volumeName configured to the persistent volume created at step 1.
63 |
64 | Here is an example, but more examples can be found at: [examples/volumeimport](./examples/volumeimport)
65 |
66 | ```yaml
67 | apiVersion: "v1"
68 | kind: "PersistentVolumeClaim"
69 | metadata:
70 | name: pvc-import
71 | spec:
72 | accessModes:
73 | - "ReadWriteOnce"
74 | resources:
75 | requests:
76 | storage: "1Gi"
77 | # Note: These two fields are not required for pre-bound PV.
78 | # storageClassName: pure-block
79 | # volumeMode: Filesystem
80 |
81 | # TODO: Change to the name of the imported PV.
82 | volumeName: pv-import
83 | ```
84 |
85 | 3. Use the persistent volume claim.
86 |
87 | ```yaml
88 | apiVersion: v1
89 | kind: Pod
90 | metadata:
91 | name: nginx
92 | spec:
93 | # Specify a volume that uses the claim defined in pvc.yaml
94 | volumes:
95 | - name: pure-vol
96 | persistentVolumeClaim:
97 | claimName: pvc-import
98 | containers:
99 | - name: nginx
100 | image: nginx
101 | # Configure a mount for the volume We define above
102 | volumeMounts:
103 | - name: pure-vol
104 | mountPath: /data
105 | ports:
106 | - containerPort: 80
107 | ```
108 |
--------------------------------------------------------------------------------
/docs/custom-storageclasses.md:
--------------------------------------------------------------------------------
1 | # Using StorageClasses with Pure Service Orchestrator
2 |
3 | ***NOTE THAT THIS ONLY APPLIES TO THE CSI VERSION OF PSO***
4 |
5 | PSO creates persistent volumes within the federated storage pool of backend appliances defined using criteria defined in the `StorageClass` used to request the persistent volume.
6 |
7 | ## What is a `StorageClass`
8 |
9 | A `StorageClass` allow administrators to define various storage configurations to provide high availability, serve quality-of-service requirements, tune file system options, define backup policies, and more. PSO utilizes these storage classes to decide where to provision volumes and how to make the file system.
10 |
11 | For more information on `StorageClass` go [here](https://kubernetes.io/docs/concepts/storage/storage-classes/)
12 |
13 | ## Provided Storage Classes
14 |
15 | As part of the installation of PSO, three storage classes are created which can be used to simply create persistent volumes on a file or a block backend device within the federated storage pool using all the default settings PSO has provided.
16 |
17 | Two of these are called `pure-file` and `pure-block` and, as their names suggest, they will create persistent volumes from the block providing or file providing backends within the federated pool of backend devices. At all times the load-balancing algorithm within PSO will ensure that the persistent volume is created on the most appropriate backend given the block or file requirement, even if there are multiple block or file providing appliances in the pool.
18 |
19 | The third `StorageClass` is simply called `pure`. This is provided primarily as a legacy class to provide backwards capability with early PSO releases. By default this storage class uses block storage appliances to provision block-based persistent volumes, however, this can be modified in the PSO configuration `values.yaml`.
20 |
21 | ## Default `StorageClass`
22 |
23 | Within the PSO configuration file is a setting to enable the `pure` `StorageClass` as the default class for the Kubernetes cluster. To enable `pure` as the default class set the following parameter in `values.yaml`
24 |
25 | ```yaml
26 | storageClass:
27 | isPureDefault: true
28 | ```
29 |
30 | As mentioned above, the `pure` class uses block-based backend appliances to provision persistent volumes from. It is possible to change this default setting to use file-backed appliances from the pool.
31 |
32 | You may wish to do this if you only have FlashBlades in your PSO configuration, as these can only provide file-based persistent volumes.
33 |
34 | To change the backend type used by the `pure` `StorageClass` to file rather than block, change the following line within the values.yaml configuration file:
35 |
36 | ```yaml
37 | pureBackend: file
38 | ```
39 |
40 | ## Creating your own Storage Classes for PSO to use
41 |
42 | With the increasing options available to configure the persistent volumes supported by PSO it may be necessary to create additional storage classes that will use PSO to provide specific configurations of persistent volume.
43 |
44 | For example, if you wish to provide raw block volumes (shortly to be supported by PSO) then you will need to request these from a `StorageClass` that knows how to do this. Alternatively, you may have a requirement for some block-based persistent volumes to be formatted with the `btfs` file system rather than the default `xfs` file system provided by PSO through the `pure-block` `StorageClass`, or the default Kubernetes filesystem of `ext4`
45 |
46 | [**NOTE:** Pure does not recommend using `ext4` as a filesystem for persistent volumes in containers]
47 |
48 | With the addition of [per volume filesystem options](csi-filesystem-options.md), the ability to use a different `StorageClass` for different requirements becomes critical.
49 |
50 | To create a storageClass that will use PSO use the following template and modify as required for your custom storage class.
51 |
52 | ```yaml
53 | kind: StorageClass
54 | apiVersion: storage.k8s.io/v1
55 | metadata:
56 | name:
57 | labels:
58 | kubernetes.io/cluster-service: "true"
59 | provisioner: pure-csi
60 | parameters:
61 | ```
62 |
63 | Within the `parameters` section is where you add your own custom settings. More details of some of the options available to use in the parameters section can be found [here](csi-filesystem-options.md)
64 |
65 | Once you have created your `StorageClass` definition in a YAML file, create it in Kubernetes using the command
66 |
67 | ```bash
68 | kubectl apply -f .yaml
69 | ```
70 |
71 | If you wish to ensure your new `StoraegClass` is the default class then add the following into the metadata section of the definition:
72 |
73 | ```yaml
74 | annotations:
75 | storageclass.kubernetes.io/is-default-class: true
76 | ```
77 |
78 | **NOTE:** if a `StorageClass` is already flagged as default your new `StorageClass` will not take its place, but you will have multiple default storage classes. In this case Kubernetes will completely ignore the default flags and you may fail to create the persistent volumes you expected.
79 |
80 |
--------------------------------------------------------------------------------
/docs/examples/clone/clone.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: clone-of-pure-claim
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | storageClassName: pure
9 | resources:
10 | requests:
11 | storage: 10Gi
12 | dataSource:
13 | kind: PersistentVolumeClaim
14 | name: pure-claim
15 |
--------------------------------------------------------------------------------
/docs/examples/clone/pvc.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | # Referenced in pod.yaml for the volume spec
5 | name: pure-claim
6 | spec:
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 10Gi
12 | # Matches the name defined in deployment/storageclass.yaml
13 | storageClassName: pure
14 |
--------------------------------------------------------------------------------
/docs/examples/fsoptions/pure-block-xfs.yaml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: pure-block-xfs
5 | labels:
6 | kubernetes.io/cluster-service: "true"
7 | provisioner: pure-csi
8 | parameters:
9 | backend: block
10 | csi.storage.k8s.io/fstype: xfs
11 | createoptions: -q
12 | mountOptions:
13 | - discard
14 |
--------------------------------------------------------------------------------
/docs/examples/fsoptions/pure-file-nfs.yaml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: pure-file-nfs
5 | labels:
6 | kubernetes.io/cluster-service: "true"
7 | provisioner: pure-csi
8 | parameters:
9 | backend: file
10 | mountOptions:
11 | - nfsvers=3
12 | - tcp
13 |
--------------------------------------------------------------------------------
/docs/examples/rwx/pod-block-many.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-raw-block-1
5 | labels:
6 | app: "nginx-raw-block"
7 | spec:
8 | # Specify a volume that uses the claim defined in pvc.yaml
9 | volumes:
10 | - name: pure-vol
11 | persistentVolumeClaim:
12 | claimName: pure-claim-raw-block
13 | containers:
14 | - name: nginx
15 | image: nginx
16 | # Configure a device mount for the volume we defined above
17 | volumeDevices:
18 | - name: pure-vol
19 | devicePath: /dev/pure-block-device
20 | ports:
21 | - containerPort: 80
22 | affinity:
23 | podAntiAffinity:
24 | requiredDuringSchedulingIgnoredDuringExecution:
25 | - labelSelector:
26 | matchExpressions:
27 | - key: app
28 | operator: In
29 | values:
30 | - "nginx-raw-block"
31 | topologyKey: "kubernetes.io/hostname"
32 | ---
33 | apiVersion: v1
34 | kind: Pod
35 | metadata:
36 | name: nginx-raw-block-2
37 | labels:
38 | app: "nginx-raw-block"
39 | spec:
40 | # Specify a volume that uses the claim defined in pvc.yaml
41 | volumes:
42 | - name: pure-vol
43 | persistentVolumeClaim:
44 | claimName: pure-claim-raw-block
45 | containers:
46 | - name: nginx
47 | image: nginx
48 | # Configure a device mount for the volume we defined above
49 | volumeDevices:
50 | - name: pure-vol
51 | devicePath: /dev/pure-block-device
52 | ports:
53 | - containerPort: 80
54 | affinity:
55 | podAntiAffinity:
56 | requiredDuringSchedulingIgnoredDuringExecution:
57 | - labelSelector:
58 | matchExpressions:
59 | - key: app
60 | operator: In
61 | values:
62 | - "nginx-raw-block"
63 | topologyKey: "kubernetes.io/hostname"
64 |
--------------------------------------------------------------------------------
/docs/examples/rwx/pod-file-many.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-1
5 | labels:
6 | app: "nginx-file-many"
7 | spec:
8 | # Specify a volume that uses the claim defined in pvc.yaml
9 | volumes:
10 | - name: pure-vol
11 | persistentVolumeClaim:
12 | claimName: pure-claim
13 | containers:
14 | - name: nginx
15 | image: nginx
16 | # Configure a mount for the volume We define above
17 | volumeMounts:
18 | - name: pure-vol
19 | mountPath: /data
20 | ports:
21 | - containerPort: 80
22 | affinity:
23 | podAntiAffinity:
24 | requiredDuringSchedulingIgnoredDuringExecution:
25 | - labelSelector:
26 | matchExpressions:
27 | - key: app
28 | operator: In
29 | values:
30 | - "nginx-file-many"
31 | topologyKey: "kubernetes.io/hostname"
32 | ---
33 | apiVersion: v1
34 | kind: Pod
35 | metadata:
36 | name: nginx-2
37 | labels:
38 | app: "nginx-file-many"
39 | spec:
40 | # Specify a volume that uses the claim defined in pvc.yaml
41 | volumes:
42 | - name: pure-vol
43 | persistentVolumeClaim:
44 | claimName: pure-claim
45 | containers:
46 | - name: nginx
47 | image: nginx
48 | # Configure a mount for the volume We define above
49 | volumeMounts:
50 | - name: pure-vol
51 | mountPath: /data
52 | ports:
53 | - containerPort: 80
54 | affinity:
55 | podAntiAffinity:
56 | requiredDuringSchedulingIgnoredDuringExecution:
57 | - labelSelector:
58 | matchExpressions:
59 | - key: app
60 | operator: In
61 | values:
62 | - "nginx-file-many"
63 | topologyKey: "kubernetes.io/hostname"
64 |
--------------------------------------------------------------------------------
/docs/examples/rwx/pvc-block-many.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | # Referenced in pod.yaml for the volume spec
5 | name: pure-claim-raw-block
6 | spec:
7 | # This specifically is what allows the PVC to be used as a raw block device
8 | volumeMode: Block
9 | accessModes:
10 | - ReadWriteMany
11 | resources:
12 | requests:
13 | storage: 10Gi
14 | # Matches the name defined in deployment/storageclass.yaml
15 | storageClassName: pure-block
16 |
--------------------------------------------------------------------------------
/docs/examples/rwx/pvc-file-many.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | # Referenced in pod.yaml for the volume spec
5 | name: pure-claim
6 | spec:
7 | accessModes:
8 | - ReadWriteMany
9 | resources:
10 | requests:
11 | storage: 10Gi
12 | # Matches the name defined in deployment/storageclass.yaml
13 | storageClassName: pure-file
14 |
--------------------------------------------------------------------------------
/docs/examples/snapshot/pvc.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | # Referenced in pod.yaml for the volume spec
5 | name: pure-claim
6 | spec:
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 10Gi
12 | # Matches the name defined in deployment/storageclass.yaml
13 | storageClassName: pure
14 |
--------------------------------------------------------------------------------
/docs/examples/snapshot/restore-snapshot.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: pvc-restore-from-volumesnapshot-1
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | resources:
9 | requests:
10 | storage: 10Gi
11 | storageClassName: pure-block
12 | dataSource:
13 | kind: VolumeSnapshot
14 | name: volumesnapshot-1
15 | apiGroup: snapshot.storage.k8s.io
16 |
--------------------------------------------------------------------------------
/docs/examples/snapshot/snapshot.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: snapshot.storage.k8s.io/v1alpha1
2 | kind: VolumeSnapshot
3 | metadata:
4 | name: volumesnapshot-1
5 | spec:
6 | snapshotClassName: pure-snapshotclass
7 | source:
8 | name: pure-claim
9 | kind: PersistentVolumeClaim
10 |
--------------------------------------------------------------------------------
/docs/examples/topology/pod-delay-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod-delay-binding
5 | spec:
6 | affinity:
7 | nodeAffinity:
8 | requiredDuringSchedulingIgnoredDuringExecution:
9 | nodeSelectorTerms:
10 | - matchExpressions:
11 | - key: topology.purestorage.com/region
12 | operator: In
13 | values:
14 | - region-0
15 | # Specify a volume that uses the claim defined in pvc.yaml
16 | volumes:
17 | - name: pure-vol
18 | persistentVolumeClaim:
19 | claimName: pure-delay-binding
20 | containers:
21 | - name: nginx
22 | image: nginx
23 | # Configure a mount for the volume We define above
24 | volumeMounts:
25 | - name: pure-vol
26 | mountPath: /data
27 | ports:
28 | - containerPort: 80
29 |
--------------------------------------------------------------------------------
/docs/examples/topology/pure-block-delay-binding.yaml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: pure-block-delay-binding
5 | labels:
6 | kubernetes.io/cluster-service: "true"
7 | chart: pure-csi
8 | release: "pure-storage-driver"
9 | provisioner: pure-csi
10 | volumeBindingMode: WaitForFirstConsumer
11 | parameters:
12 | backend: block
13 |
--------------------------------------------------------------------------------
/docs/examples/topology/pure-block-restrict-provisioning.yaml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: pure-block-restrict-provisioning
5 | provisioner: pure-csi
6 | parameters:
7 | backend: block
8 | allowedTopologies:
9 | - matchLabelExpressions:
10 | - key: topology.purestorage.com/rack
11 | values:
12 | - rack-0
13 | - rack-1
14 |
--------------------------------------------------------------------------------
/docs/examples/topology/pvc-delay-binding.ymal:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | # Referenced in pod.yaml for the volume spec
5 | name: pure-delay-binding
6 | spec:
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 10Gi
12 | # Matches the name defined in deployment/storageclass.yaml
13 | storageClassName: pure-block-delay-binding
14 |
--------------------------------------------------------------------------------
/docs/examples/topology/statefulset-topology.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: web
5 | spec:
6 | replicas: 4
7 | selector:
8 | matchLabels:
9 | app: nginx
10 | serviceName: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | affinity:
17 | nodeAffinity:
18 | requiredDuringSchedulingIgnoredDuringExecution:
19 | nodeSelectorTerms:
20 | -
21 | matchExpressions:
22 | -
23 | key: topology.purestorage.com/region
24 | operator: In
25 | values:
26 | - region-0
27 | - region-1
28 | podAntiAffinity:
29 | requiredDuringSchedulingIgnoredDuringExecution:
30 | -
31 | labelSelector:
32 | matchExpressions:
33 | -
34 | key: app
35 | operator: In
36 | values:
37 | - nginx
38 | topologyKey: failure-domain.beta.kubernetes.io/zone
39 | containers:
40 | - name: nginx
41 | image: gcr.io/google_containers/nginx-slim:0.8
42 | ports:
43 | - containerPort: 80
44 | name: web
45 | volumeMounts:
46 | - name: www
47 | mountPath: /usr/share/nginx/html
48 | - name: logs
49 | mountPath: /logs
50 | volumeClaimTemplates:
51 | - metadata:
52 | name: www
53 | spec:
54 | accessModes: [ "ReadWriteOnce" ]
55 | storageClassName: pure-block-delay-binding
56 | resources:
57 | requests:
58 | storage: 5Gi
59 | - metadata:
60 | name: logs
61 | spec:
62 | accessModes: [ "ReadWriteOnce" ]
63 | storageClassName: pure-block-delay-binding
64 | resources:
65 | requests:
66 | storage: 1Gi
67 |
--------------------------------------------------------------------------------
/docs/examples/volexpansion/pod-block.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | spec:
6 | volumes:
7 | - name: pure-vol
8 | persistentVolumeClaim:
9 | claimName: pure-claim-block
10 | containers:
11 | - name: nginx
12 | image: nginx
13 | # Configure a mount for the volume We define above
14 | volumeMounts:
15 | - name: pure-vol
16 | mountPath: /data
17 | ports:
18 | - containerPort: 80
19 |
--------------------------------------------------------------------------------
/docs/examples/volexpansion/pod-file.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-file
5 | spec:
6 | volumes:
7 | - name: pure-vol
8 | persistentVolumeClaim:
9 | claimName: pure-claim-file
10 | containers:
11 | - name: nginx
12 | image: nginx
13 | volumeMounts:
14 | - name: pure-vol
15 | mountPath: /data
16 | ports:
17 | - containerPort: 80
18 |
--------------------------------------------------------------------------------
/docs/examples/volexpansion/pvc-block.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | # Referenced in pod.yaml for the volume spec
5 | name: pure-claim-block
6 | spec:
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 10Gi
12 | storageClassName: pure-block
13 |
--------------------------------------------------------------------------------
/docs/examples/volexpansion/pvc-file.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: pure-claim-file
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | resources:
9 | requests:
10 | storage: 10Gi
11 | storageClassName: pure-file
12 |
--------------------------------------------------------------------------------
/docs/examples/volumeimport/pod-raw.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-raw
5 | spec:
6 | # Specify a volume that uses the claim defined in pvc.yaml
7 | volumes:
8 | - name: pure-vol
9 | persistentVolumeClaim:
10 | claimName: pvc-import
11 | containers:
12 | - name: nginx
13 | image: nginx
14 | # Configure a device mount for the volume we defined above
15 | volumeDevices:
16 | - name: pure-vol
17 | devicePath: /dev/pure-block-device
18 | ports:
19 | - containerPort: 80
20 |
--------------------------------------------------------------------------------
/docs/examples/volumeimport/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | spec:
6 | # Specify a volume that uses the claim defined in pvc.yaml
7 | volumes:
8 | - name: pure-vol
9 | persistentVolumeClaim:
10 | claimName: pvc-import
11 | containers:
12 | - name: nginx
13 | image: nginx
14 | # Configure a mount for the volume We define above
15 | volumeMounts:
16 | - name: pure-vol
17 | mountPath: /data
18 | ports:
19 | - containerPort: 80
20 |
--------------------------------------------------------------------------------
/docs/examples/volumeimport/pv-import-block.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | annotations:
5 | pv.kubernetes.io/provisioned-by: pure-csi
6 | name: pv-import
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | capacity:
11 | storage: 1Gi
12 | claimRef:
13 | apiVersion: v1
14 | kind: PersistentVolumeClaim
15 | # TODO: change to the PVC you want to bind this PV.
16 | # If you don't pre-bind PVC here, the PV might be automatically bound to a PVC by scheduler.
17 | name: pvc-import
18 | # Namespace of the PVC
19 | namespace: default
20 | csi:
21 | driver: pure-csi
22 | # TODO: change to the volume name in backend.
23 | # Volume with any name that exists in backend can be imported, and will not be renamed.
24 | volumeHandle: ns03276-pvc-2031faf1-8348-4ac8-9737-1a0a9989cad7
25 | volumeAttributes:
26 | backend: block
27 | # TODO: configure your desired reclaim policy,
28 | # Use Retain if you don't want your volume to get deleted when the PV is deleted.
29 | persistentVolumeReclaimPolicy: Delete
30 | storageClassName: pure-block
31 | volumeMode: Filesystem
32 |
--------------------------------------------------------------------------------
/docs/examples/volumeimport/pv-import-file.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | annotations:
5 | pv.kubernetes.io/provisioned-by: pure-csi
6 | name: pv-import
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | capacity:
11 | storage: 1Gi
12 | claimRef:
13 | apiVersion: v1
14 | kind: PersistentVolumeClaim
15 | # TODO: change to the PVC you want to bind this PV.
16 | # If you don't pre-bind PVC here, the PV might be automatically bound to a PVC by scheduler.
17 | name: pvc-import
18 | # Namespace of the PVC
19 | namespace: default
20 | csi:
21 | driver: pure-csi
22 | # TODO: change to the volume name in backend.
23 | # Volume with any name that exists in backend can be imported, and will not be renamed.
24 | volumeHandle: ttt-pvc-a90d7d5f-da6c-44db-a306-a4cc122f9dd3
25 | volumeAttributes:
26 | backend: file
27 | # TODO: configure your desired reclaim policy,
28 | # Use Retain if you don't want your volume to get deleted when the PV is deleted.
29 | persistentVolumeReclaimPolicy: Delete
30 | storageClassName: pure-file
31 | volumeMode: Filesystem
32 |
--------------------------------------------------------------------------------
/docs/examples/volumeimport/pv-import-raw.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | annotations:
5 | pv.kubernetes.io/provisioned-by: pure-csi
6 | name: pv-import
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | capacity:
11 | storage: 1Gi
12 | claimRef:
13 | apiVersion: v1
14 | kind: PersistentVolumeClaim
15 | # TODO: change to the PVC you want to bind this PV.
16 | # If you don't pre-bind PVC here, the PV might be automatically bound to a PVC by scheduler.
17 | name: pvc-import
18 | # Namespace of the PVC
19 | namespace: default
20 | csi:
21 | driver: pure-csi
22 | # TODO: change to the volume name in backend.
23 | # Volume with any name that exists in backend can be imported, and will not be renamed.
24 | volumeHandle: ns04132-pvc-540d6142-2e86-45ba-939d-c0be5d8fd335
25 | volumeAttributes:
26 | backend: block
27 | # TODO: configure your desired reclaim policy,
28 | # Use Retain if you don't want your volume to get deleted when the PV is deleted.
29 | persistentVolumeReclaimPolicy: Delete
30 | storageClassName: pure-block
31 | volumeMode: Block
32 |
--------------------------------------------------------------------------------
/docs/examples/volumeimport/pvc-import.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "v1"
2 | kind: "PersistentVolumeClaim"
3 | metadata:
4 | name: pvc-import
5 | spec:
6 | accessModes:
7 | - "ReadWriteOnce"
8 | resources:
9 | requests:
10 | storage: "1Gi"
11 | # Note: These two fields are not required for pre-bound PV.
12 | # storageClassName: pure-block
13 | # volumeMode: Filesystem
14 |
15 | # TODO: Change to the name of the imported PV.
16 | volumeName: pv-import
17 |
--------------------------------------------------------------------------------
/docs/flex-csi-upgrade.md:
--------------------------------------------------------------------------------
1 | # Upgrading from PSO FlexDriver to PSO CSI
2 |
3 | ## Introduction
4 |
5 | With the deprecation of FlexDriver by the Kubernetes community, all external persistent storage volumes should now be managed by a CSI driver.
6 |
7 | Unfortunately, there is no seamless way to migrate a volume managed by a FlexDriver to being managed by a CSI driver and the PSO FlexDriver cannot be run in parallel with the PSO CSI driver.
8 |
9 | This document provides one strategy to migrate volumes created and managed by the PSO FlexDriver to management by the PSO CSI driver.
10 |
11 | Note that this requires all access to the persistent volume to be stopped whilst this migration takes place.
12 |
13 | ## Scale Down Applications
14 |
15 | The fisrt phase of the upgrade is to scale down all your deployments and statefulsets to zero to ensure that all PVs and PVs are not being accessed by application.
16 |
17 | Use the `kubectl scale --replicas=0` command to perform this.
18 |
19 | ## Upgrade to PSO CSI Driver
20 |
21 | The second phase of the migration process is to upgrade the PSO driver from the Flex version to the CSI version.
22 |
23 | During this upgrade all existing persistent volumes and volume claims are unaffected.
24 |
25 | First, uninstall the PSO FlexDriver by running the `helm delete` command. If you have installed the FlexDriver using the Operator then follow the process [here](../operator-k8s-plugin#uninstall) to uninstall.
26 |
27 | Secondly, install the PSO driver using the instructions provided [here](../pure-csi#how-to-install). Note that this procedures requires Helm3.
28 |
29 | At this point the PSO driver has been upgraded to use CSI and all new persistent volumes created will be managed by the CSI process.
30 |
31 | All volumes managed by the uninstalled FlexDriver process will still be in existance but cannot be managed, at this point, by the CSI process.
32 |
33 | ## Migrating Flex PVs to CSI control
34 |
35 | **This process requires that the PSO CSI version installed is a minimum of 5.2.0**
36 |
37 | ### Identify all existng, FlexDriver controlled, persistent volumes.
38 |
39 | You can determine if a PV is Flexdriver controlled by using the command:
40 |
41 | ```bash
42 | kubectl get pv -o json | jq -j '.items[] | "PV: \(.metadata.name), Driver: \(.spec.flexVolume.driver), PVC: \(.spec.claimRef.name), Namespace: \(.spec.claimRef.namespace)\n"'
43 | ```
44 | Persistent volumes where the driver equals `pure/flex` need to be migrated to CSI control.
45 |
46 | Once a PV has been identified as requiring migration you can perform the following command on the PV:
47 |
48 | ```bash
49 | kubectl patch pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
50 | ```
51 |
52 | ### Delete the PVC and PV (don't panic!!)
53 |
54 | **Before proceeding keep a record of which PVC was bound to which PV - you will need this later in the process**
55 |
56 | * Delete the associated PVC and notice that the associated PV is not deleted
57 |
58 | * Manually delete the PV
59 |
60 | These actions will leave the underlying volume on the backend storage ready for import.
61 |
62 | ### Importing the backend volume into CSI control
63 |
64 | Now that the PVC and PV have been deleted from Kubernetes, it is necessary to import the underlying volume on the backend back into Kubernetes control, but using the CSI driver.
65 |
66 | To achieve this use the volume import facility in PSO as documented [here](./csi-volume-import.md).
67 |
68 | * In this step the `volumeHandle` referenced will be the PV name prefixed by the Pure namespace, defined for your PSO installation, and a hyphen. The Pure namespace setting is available in the PSO installation `values.yaml`.
69 |
70 | For example:
71 |
72 | A PV called `pvc-70c5a426-c704-4478-b034-2d233ec673bc` and a Pure namespace of `k8s` will require the `volumeHandle` to be `k8s-pvc-70c5a426-c704-4478-b034-2d233ec673bc`.
73 |
74 | * The `name` setting in `claimRef` must match the PVC name linked to the PV name you are importing. **Reference the record of these you obtained earlier**.
75 |
76 | * Finally, ensure that the `persistentVolumeReclaimPolicy` is set to `Delete`. This will ensure that when the time comes for the PV to be deleted, the CSI driver will correctly delete the backend volume.
77 |
78 | ## Scale Up Applications
79 |
80 | The final phase of the upgrade is to scale up all your deployments and statefulsets to their original replica size.
81 |
82 | Use the `kubectl scale --replicas=` command to perform this.
83 |
--------------------------------------------------------------------------------
/docs/flex-snapshot-for-flasharray.md:
--------------------------------------------------------------------------------
1 |
2 | # Using FlashArray Snapshots with Kubernetes / OpenShift
3 |
4 | ## Introduction
5 |
6 | The Pure Service Orchestrator Kubernetes FlexVolume driver integration includes support for FlashArray snapshots. This allows Kubernetes end users to capture point-in-time copies of their FlashArray backed persistent volume claims, and mount those copies in other Kubernetes Pods. This enables several use cases, some of which are :
7 |
8 | 1. Test / Develop against copies of production data quickly (no need to copy large amounts of data)
9 | 2. Backup/Restore production volumes.
10 |
11 | Kubernetes native snapshot API is not available yet, and is currently under development. Pure's snapshot integration, therefore, is an extra command-line tool that enables developers to manage snapshots. See examples below.
12 |
13 | ### The "snapshot" CLI
14 | The snapshot CLI has the following format :
15 | ```
16 | snapshot create -n
17 | ```
18 | _Inputs:_
19 | `namespace`: Kubernetes namespace in which the pvc is created.
20 | `pvc-name`: Name of the PVC whose backend volumes you need to snapshot.
21 |
22 | _Output:_
23 | Name of the snapshot.
24 | ```
25 | snapshot delete
26 | ```
27 | _Inputs:_
28 | `snapshotname` : String returned from the output of `snapshot create` command.
29 |
30 | _Output :_
31 | None.
32 | Exit code 0 means success, otherwise you will see an error message.
33 |
34 | ### Running the snapshot CLI
35 |
36 | The snapshot CLI is deployed as a binary in Pure's dynamic provisioner pod. You do not need to download this binary to your computer. Instead you will use `kubectl exec` to run this binary.
37 | To create a snapshot you must get the name of the pure provisioner pod in the Kubernetes cluster. This name is randomly generated by Kubernetes and can be retrieved by running:
38 | ```
39 | # kubectl get -o name -l app=pure-provisioner pod | cut -d/ -f2
40 | ```
41 | Now you can execute the snapshot command using the `` just discovered:
42 | ```
43 | # kubectl exec -- snapshot create -n
44 | ```
45 | To delete a snapshot use the following command:
46 | ```
47 | # kubectl exec -- snapshot delete
48 | ```
49 |
50 | ### Examples
51 |
52 | #### Creating snapshots
53 | ```
54 | # kubectl exec pure-provisioner-6d9878fd47-wp41b -- snapshot create -n k8s_ns1 pvc1
55 | ```
56 | Output : `k8s-pvc-b9dd0972-c8b3-11e7-9ee8-fa163eb1e272.883661`
57 |
58 | where `k8s-pvc-b9dd0972-c8b3-11e7-9ee8-fa163eb1e272.883661` is the new snapshot name.
59 |
60 | #### Deleting snapshots
61 | ```
62 | # kubectl exec pure-provisioner-6d9878fd47-wp41b -- snapshot delete
63 | ```
64 |
65 | ### Example workflow :
66 |
67 | **Step 1** : Running your app with data on a FlashArray volume
68 |
69 | There are 2 steps to run an app with data on a FlashArray volume:
70 |
71 | 1. **Create persistent volume claim**
72 |
73 | Example of a FlashArray PVC yaml file (e.g. pvc-fa.yaml):
74 | ```
75 | kind: PersistentVolumeClaim
76 | apiVersion: v1
77 | metadata:
78 | # Referenced in pod.yaml for the volume spec
79 | name: pure-fa-claim
80 | spec:
81 | accessModes:
82 | - ReadWriteOnce
83 | resources:
84 | requests:
85 | storage: 10Gi
86 | # Matches the name defined in deployment/storageclass.yaml
87 | storageClassName: pure-block
88 | ```
89 | Create the PVC and make sure it is bound
90 | ```
91 | # kubectl create -f pvc-fa.yaml
92 | # kubectl get pvc pure-fa-claim
93 | ```
94 | 2. **Create app with the PVC**
95 |
96 | Example of a yaml file for app (e.g. nginx) with PVC (nginx.yaml)
97 | ```
98 | apiVersion: v1
99 | kind: Pod
100 | metadata:
101 | name: nginx
102 | namespace: default
103 | spec:
104 | # Specify a volume that uses the claim defined in pvc-fa.yaml
105 | volumes:
106 | - name: pure-vol
107 | persistentVolumeClaim:
108 | claimName: pure-fa-claim
109 | containers:
110 | - name: nginx
111 | image: nginx
112 | # Configure a mount for the volume We define above
113 | volumeMounts:
114 | - name: pure-vol
115 | mountPath: /data
116 | ports:
117 | - containerPort: 80
118 | ```
119 | Create an app of nginx:
120 | ```
121 | # kubectl create -f nginx.yaml
122 | ```
123 | **Step 2** : Creating a snapshot of your data
124 | ```
125 | # kubectl exec pure-provisioner-6d9878fd47-wp41b -- snapshot create -n default pure-fa-claim
126 | ```
127 | Output for success: (snapshot name)
128 | ```
129 | k8s-pvc-b9dd0972-c8b3-11e7-9ee8-fa163eb1e272.883661
130 | ```
131 | **Step 3** : Mounting the snapshot volume
132 |
133 | There are 2 steps to mount the snapshot volume:
134 |
135 | 1. **Create a FlashArray volume (PVC) from a snapshot**
136 |
137 | Example of a yaml file (e.g. pure-fa-snapshot-pvc.yaml)
138 | ```
139 | kind: PersistentVolumeClaim
140 | apiVersion: v1
141 | metadata:
142 | # Referenced in nginx-snapshot.yaml for the volume spec
143 | name: pure-fa-snapshot-claim
144 | annotations:
145 | snapshot.beta.purestorage.com/name: k8s-pvc-b9dd0972-c8b3-11e7-9ee8-fa163eb1e272.883661
146 | spec:
147 | accessModes:
148 | - ReadWriteOnce
149 | # storage size must be exact same as snapshot
150 | resources:
151 | requests:
152 | storage: 10Gi
153 | # Matches the name defined in deployment/storageclass.yaml
154 | storageClassName: pure-block
155 | ```
156 | Create PVC and make sure it is bound
157 | ```
158 | # kubectl create -f pure-fa-snapshot-pvc.yaml
159 | # kubectl get pvc pure-fa-snapshot-claim
160 | ```
161 | 2. **Mount the snapshot volume into an app**
162 |
163 | Example of a yaml file (e.g nginx-snapshot.yaml) for an app with snapshot volume (PVC)
164 | ```
165 | apiVersion: v1
166 | kind: Pod
167 | metadata:
168 | name: nginx-snapshot
169 | namespace: default
170 | spec:
171 | # Specify a volume that uses the claim defined in pure-fa-snapshot-pvc.yaml
172 | volumes:
173 | - name: pure-vol-snapshot
174 | persistentVolumeClaim:
175 | claimName: pure-fa-snapshot-claim
176 | containers:
177 | - name: nginx-snapshot
178 | image: nginx
179 | # Configure a mount for the volume We define above
180 | volumeMounts:
181 | - name: pure-vol-snapshot
182 | mountPath: /data
183 | ports:
184 | - containerPort: 80
185 | ```
186 | Create nginx app
187 | ```
188 | # kubectl create -f nginx-snapshot.yaml
189 | ```
190 | **Step 4** : Cleaning up the snapshot volume
191 |
192 | There are 2 steps to clean up the snapshot volume:
193 |
194 | 1. **Delete the app which mounts the snapshot volume**
195 | ```
196 | # kubectl delete pod nginx-snapshot
197 | ```
198 | 2. **Delete the snapshot volume**
199 | ```
200 | # kubectl delete pvc pure-fa-snapshot-claim
201 | ```
202 | **Step 5** : Cleaning up the snapshot
203 | ```
204 | # kubectl exec pure-provisioner-6d9878fd47-wp41b -- snapshot delete k8s-pvc-b9dd0972-c8b3-11e7-9ee8-fa163eb1e272.883661
205 | ```
206 |
207 |
208 | **Notes:**
209 |
210 | 1. _Application consistency:_
211 | The snapshot CLI does not have any app consistency functionality. If an application consistent snapshot is needed, the application pods need to be frozen/quiesced from an IO perspective before the snapshot CLI is called. The application then needs to be unquiesced after the snapshot CLI has been used.
212 |
213 | 2. _Migration to native Kubernetes snapshots API :_
214 | After Kubernetes releases native snapshot support, Pure will provide a non-disruptive path to migration from the current snapshot CLI to the native Kubernetes interface.
215 |
--------------------------------------------------------------------------------
/docs/flex-volume-using-labels.md:
--------------------------------------------------------------------------------
1 | # Using Labels with the FlexVolume Driver
2 |
3 | ## Introduction
4 | The Pure Service Orchestrator Kubernetes FlexVolume driver includes the capability to provision volumes with storage backend and node requirements using **label selectors**. Labels can be added to each FlashArray or FlashBlade with the
5 | main `values.yaml` configuration file.
6 | These **labels** are completely customizable and can be any key-value-pair required by the cluster administrator.
7 | There can be as many labels as required, there can be no labels, and different arrays can share the same labels. The world is your oyster!
8 |
9 | ## Example configuration
10 | Here is an example of how to configure **labels** in a `values.yaml` configuration file for two FlashArrays:
11 | ```yaml
12 | arrays:
13 | FlashArrays:
14 | - MgmtEndPoint: "xx.xx.xx.xx"
15 | APIToken: "3863412d-c8c9-64be-e7f5-1ef8b4d7b221"
16 | Labels:
17 | rack: 33
18 | user: prod
19 | - MgmtEndPoint: "yy.yy.yy.yy"
20 | APIToken: "e0770d27-adfd-a46b-42fa-0c3ebb5e4356"
21 | Labels:
22 | rack: 34
23 | user: prod
24 | ```
25 |
26 | In this example we can see that each array has two labels. One label, `user`, is common to the two arrays, and the other, `rack`, is unique to each array.
27 |
28 | ## Using **labels**
29 |
30 | The `label` definition can be used by selectors in your persistent volume claim (PVC) template. This can then be expanded to limit the
31 | worker nodes that can actually use these PVCs using the concept of node affinity. These constructs can help you manage the topology of your persistent storage.
32 |
33 | ### PV Topology and Affinity Control
34 | To create a PV on a specific array, or within a group of backend arrays, the PVC definition must contain the following with the `spec:` section of the PVC template:
35 | ```yaml
36 | spec:
37 | selector:
38 | matchExpressions:
39 | - key: user
40 | operator: In
41 | values: ["prod"]
42 | ```
43 | This example ensures that the PV is created by PSO on an array with the `user: prod` key-value pair. If there are multiple arrays with this label, PSO will load balance
44 | across only those arrays to determine the most appropriate location for the PV.
45 |
46 | Additionally, PVs can be limited to only allow access by specific worker nodes using the concept of Node Affinity. This node affinity can be limited to an
47 | indiviual node, or a group (or zone) of nodes.
48 |
49 | See the following examples:
50 |
51 | 1. Limiting a PV to a specific worker node
52 | ```yaml
53 | apiVersion: v1
54 | kind: PersistentVolume
55 | metadata:
56 | Name: pure-volume-1
57 | spec:
58 | capacity:
59 | storage: 100Gi
60 | storageClassName: pure-block
61 | local:
62 | path: /data
63 | nodeAffinity:
64 | required:
65 | nodeSelectorTerms:
66 | - matchExpressions:
67 | - key: kubernetes.io/hostname
68 | operator: In
69 | values:
70 | - node-1
71 | ```
72 | 2. Limiting to a group (or zone) of nodes (in this case to nodes labeled as being in Rack 33)
73 | ```yaml
74 | apiVersion: v1
75 | kind: PersistentVolume
76 | metadata:
77 | Name: pure-volume-1
78 | spec:
79 | capacity:
80 | storage: 100Gi
81 | storageClassName: pure-block
82 | nodeAffinity:
83 | required:
84 | nodeSelectorTerms:
85 | - matchExpressions:
86 | - key: failure-domain.beta.kubernetes.io/zone
87 | operator: In
88 | values:
89 | - rack-33
90 | ```
91 | 3. Limiting to any worker node in one or more zones
92 | ```yaml
93 | apiVersion: v1
94 | kind: PersistentVolume
95 | metadata:
96 | Name: pure-volume-1
97 | spec:
98 | capacity:
99 | storage: 100Gi
100 | storageClassName: pure-block
101 | nodeAffinity:
102 | required:
103 | nodeSelectorTerms:
104 | - matchExpressions:
105 | - key: failure-domain.beta.kubernetes.io/zone
106 | operator: In
107 | values:
108 | - rack-33
109 | - rack-34
110 | ```
111 | To ensure that these `nodeAffinity` rules are understood, it is necessary to correctly label your worker nodes:
112 | ```bash
113 | kubectl label node prod01 failure-domain.beta.kubernetes.io/zone="rack-33"
114 | kubectl label node prod02 failure-domain.beta.kubernetes.io/zone="rack-33"
115 | kubectl label node prod03 failure-domain.beta.kubernetes.io/zone="rack-34"
116 | kubectl label node prod04 failure-domain.beta.kubernetes.io/zone="rack-34"
117 | ```
118 | Additonally, you can control a specific application to use only worker nodes and PVs located in the same rack.
119 | An example of this would be where applications need their persistent storage to be close to the worker node running the application, such as an application
120 | that must run on a GPU-enabled node and needs its PVs to have the minimal separation to reduce latency. This can be achieved by ensuring the application
121 | deployment template contains the `selector` labels in the PVC definition section (as shown above) and the following (example) code in the Pod definiton section:
122 | ```yaml
123 | spec:
124 | template:
125 | spec:
126 | affinity:
127 | nodeAffinity:
128 | requiredDuringSchedulingIgnoredDuringExecution:
129 | nodeSelectorTerms:
130 | - matchExpressions:
131 | - key: "failure-domain.beta.kubernetes.io/zone"
132 | operator: In
133 | values: ["rack-33"]
134 | ```
135 | With some creative scripting of deployment templates it would even be possible to create a disaster tolerant deployment of an application such as MongoDB
136 | that controls its own data replication using replica pods, by ensuring that each replica node is deployed to a different zone/rack and that the
137 | persistent storage for that replica is only provided from a storage array in the same rack. This will give tolerance over an entire rack failing,
138 | with no data loss for the application, because the array in the failed rack is not providing storage to a replica in another rack.
139 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | purestorage/helm-charts
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.0.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.1.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.2.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.2.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.3.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.3.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.4.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.4.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.5.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.5.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.6.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.6.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.7.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.7.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.0.8.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.0.8.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.1.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.1.0.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.1.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.1.1.tgz
--------------------------------------------------------------------------------
/docs/pure-csi-1.2.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-csi-1.2.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.0.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.0.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.0.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.0.1.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.1.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.1.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.1.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.1.1.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.1.2.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.1.2.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.2.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.2.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.2.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.2.1.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.3.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.3.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.3.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.3.1.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.3.2.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.3.2.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.4.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.4.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.4.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.4.1.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.5.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.5.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.5.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.5.1.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.5.2.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.5.2.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.5.4.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.5.4.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.5.5.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.5.5.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.5.6.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.5.6.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.5.7.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.5.7.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.5.8.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.5.8.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.6.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.6.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.6.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.6.1.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.7.0.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.7.0.tgz
--------------------------------------------------------------------------------
/docs/pure-k8s-plugin-2.7.1.tgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/docs/pure-k8s-plugin-2.7.1.tgz
--------------------------------------------------------------------------------
/operator-csi-plugin/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM quay.io/operator-framework/helm-operator:v0.13.0
2 | MAINTAINER Pure Storage, Inc.
3 | LABEL name="pure-csi" vendor="Pure Storage" version="5.2.0" release="1.0" summary="Pure Storage CSI Operator" description="Pure Service Orchestrator CSI Operator"
4 | COPY helm-charts/ ${HOME}/helm-charts/
5 | COPY watches.yaml ${HOME}/watches.yaml
6 | COPY licenses /licenses
7 |
--------------------------------------------------------------------------------
/operator-csi-plugin/README.md:
--------------------------------------------------------------------------------
1 | **The CSI Operator should only be used for installation of PSO in an OpenShift 4.2 and 4.3 Environment**
2 |
3 | For all other deployments of Kubernetes, including OpenShift 4.4 and higher, use the Helm installation process.
4 |
5 | # Pure CSI Operator
6 |
7 | ## Overview
8 |
9 | The Pure CSI Operator packages and deploys the Pure Service Orchestrator (PSO) CSI plugin on OpenShift for dynamic provisioning of persistent volumes on FlashArray and FlashBlade storage appliances.
10 | This Operator is created as a [Custom Resource Definition](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions) from the [pure-csi Helm chart](https://github.com/purestorage/helm-charts#purestorage-helm-charts-and-helm-operator) using the [Operator-SDK](https://github.com/operator-framework/operator-sdk#overview).
11 | This installation process does not require Helm installation.
12 |
13 | ## Platform and Software Dependencies
14 |
15 | - #### Environments Supported*:
16 | - Red Hat OpenShift 4.3 - 4.4
17 | - #### Other software dependencies:
18 | - Latest linux multipath software package for your operating system (Required)
19 | - Latest Filesystem utilities/drivers (XFS by default, Required)
20 | - Latest iSCSI initiator software for your operating system (Optional, required for iSCSI connectivity)
21 | - Latest NFS software package for your operating system (Optional, required for NFS connectivity)
22 | - Latest FC initiator software for your operating system (Optional, required for FC connectivity, *FC Supported on Bare-metal installations only*)
23 | - #### FlashArray and FlashBlade:
24 | - The FlashArray and/or FlashBlade should be connected to the compute nodes using [Pure's best practices](https://support.purestorage.com/Solutions/Linux/Reference/Linux_Recommended_Settings)
25 |
26 | _* Please see release notes for details_
27 |
28 | ## CSI Snapshot and Clone feature
29 |
30 | More details on using the snapshot and clone functionality can be found [here](../docs/csi-snapshot-clones.md)
31 |
32 | ## Using Per-Volume FileSystem Options
33 |
34 | More details on using customized filesystem options can be found [here](../docs/csi-filesystem-options.md).
35 |
36 | ## Using Read-Write-Many (RWX) volumes
37 |
38 | More details on using Read-Write-Many (RWX) volumes with Kubernetes can be found [here](../docs/csi-read-write-many.md)
39 |
40 | ## PSO use of StorageClass
41 |
42 | Whilst there are some example `StorageClass` definitions provided by the PSO installation, refer [here](../docs/custom-storageclasses.md) for more details on these default storage classes and how to create your own custom storage classes that can be used by PSO.
43 |
44 | ## Installation
45 |
46 | Clone this GitHub repository, selecting the version of the operator you wish to install. We recommend using the latest released version. Information on this can be found [here](https://github.com/purestorage/helm-charts/releases)
47 |
48 | ```bash
49 | git clone --branch https://github.com/purestorage/helm-charts.git
50 | cd operator-csi-plugin
51 | ```
52 |
53 | Create your own `values.yaml`. The easiest way is to copy the default [./values.yaml](./values.yaml) with `wget`.
54 |
55 | The pure-csi-operator namespace/project is created by the install script (see below).
56 |
57 | Run the install script to set up the Pure CSI Operator.
58 |
59 | ```bash
60 | # For OpenShift 4.3 only
61 | install.sh --image= --namespace= --orchestrator=openshift -f
62 | # For OpenShift 4.4 and 4.5
63 | install_ose4.sh --image= --namespace= --orchestrator=openshift -f
64 | ```
65 |
66 | Parameter list:
67 | 1. ``image`` is the Pure CSI Operator image. If unspecified ``image`` resolves to the released version at [quay.io/purestorage/pso-operator](https://quay.io/purestorage/pso-operator).
68 | 2. ``namespace`` is the namespace/project in which the Pure CSI Operator and its entities will be installed. If unspecified, the operator creates and installs in the ``pure-csi-operator`` namespace.
69 | **Pure CSI Operator MUST be installed in a new project with no other pods. Otherwise an uninstall may delete pods that are not related to the Pure CSI Operator.**
70 | 3. ``values.yaml`` is the customized helm-chart configuration parameters. This is a **required parameter** and must contain the list of all backend FlashArray and FlashBlade storage appliances. All parameters that need a non-default value must be specified in this file.
71 | Refer to [Configuration for values.yaml.](../pure-csi/README.md#configuration)
72 |
73 | ### Install script steps:
74 |
75 | The install script will do the following:
76 | 1. Create New Project.
77 | The script creates a new project (if it does not already exist) with the given namespace. If no namespace parameter is specified, the ``pure-csi-operator`` namespace is used.
78 | 2. Create a Custom Resource Definition (CRD) for the Pure CSI Operator.
79 | The script waits for the CRD to be published in the cluster. If after 10 seconds the API server has not setup the CRD, the script times out. To wait longer, pass the parameter
80 | ``--timeout=`` to the install script.
81 | 3. Create RBAC rules for the Operator.
82 | The Pure CSI Operator needs the following Cluster-level Roles and RoleBindings.
83 |
84 |
85 | | Resource | Permissions | Notes |
86 | | ------------- |:-------------:| -----:|
87 | | Namespace | Get | Pure CSI Operator needs the ability to get created namespaces |
88 | | Storageclass | Create/Delete | Create and cleanup storage classes to be used for Provisioning |
89 | | ClusterRoleBinding | Create/Delete/Get | PSO Operator needs to create and cleanup a ClusterRoleBinding used by the external-provisioner sidecar and cluster-driver-registrar sidecar(only K8s 1.13) |
90 |
91 | The operator also needs all the Cluster-level Roles that are needed by the external-provisioner and cluster-driver-registrar sidecars.
92 | In addition, the operator needs access to multiple resources in the project/namespace that it is deployed in to function correctly. Hence it is recommended to install the Pure CSI Operator in the non-default namespace.
93 |
94 |
95 |
96 | 4. Creates a deployment for the Operator.
97 | Finally the script creates and deploys the operator using the customized parameters passed in the ``values.yaml`` file.
98 |
99 | ### Apply changes in ``values.yaml``
100 |
101 | The ``update.sh`` script is used to apply changes from ``values.yaml`` as follows.
102 |
103 | ```bash
104 | ./update.sh -f values.yaml
105 | ```
106 |
107 | ## Uninstall
108 |
109 | To uninstall the Pure CSI Operator, run
110 | ```bash
111 | oc delete PSOPlugin/psoplugin-operator -n
112 | oc delete all --all -n
113 | ```
114 |
115 | where ``pure-csi-operator-installed-namespace`` is the project/namespace in which the Pure CSI Operator is installed. It is **strongly recommended** to install the Pure CSI Operator in a new project and not add any other pods to this project/namespace. Any pods in this project will be cleaned up on an uninstall.
116 |
117 | To completely remove the CustomResourceDefinition used by the Operator run
118 |
119 | ```bash
120 | oc delete crd psoplugins.purestorage.com
121 | ```
122 |
123 | # License
124 |
125 | https://www.purestorage.com/content/dam/pdf/en/legal/pure-storage-plugin-end-user-license-agreement.pdf
126 |
--------------------------------------------------------------------------------
/operator-csi-plugin/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -xe
4 | PSO_OPERATOR_IMG_TAG=${PSO_OPERATOR_IMG_TAG:-pso-operator:latest}
5 | IMG_DIR=$(dirname $0)
6 | HELM_DIR=${IMG_DIR}/..
7 | if [ -d "${IMG_DIR}/helm-charts" ]; then rm -rf ${IMG_DIR}/helm-charts; fi
8 | mkdir -p ${IMG_DIR}/helm-charts
9 | cp -r ${HELM_DIR}/pure-csi ${IMG_DIR}/helm-charts
10 |
11 | docker build -t ${PSO_OPERATOR_IMG_TAG} ${IMG_DIR}
12 |
--------------------------------------------------------------------------------
/operator-csi-plugin/ose_4_clusterrole_patch.yaml:
--------------------------------------------------------------------------------
1 | - apiGroups:
2 | - snapshot.storage.k8s.io
3 | resources:
4 | - "volumesnapshotcontents/status"
5 | verbs:
6 | - update
7 |
--------------------------------------------------------------------------------
/operator-csi-plugin/update.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Script to Update PSO Arrays configurations after modification of values.yaml
3 |
4 | usage()
5 | {
6 | echo "Usage : $0 -f "
7 | exit
8 | }
9 |
10 | if [[ "$1" == "-h" || "$1" == "--help" ]]; then
11 | usage
12 | fi
13 |
14 | while (("$#")); do
15 | case "$1" in
16 | -f)
17 | if [ "$#" -lt 2 ]; then
18 | usage
19 | exit
20 | fi
21 | VALUESFILE="$2"
22 | shift
23 | shift
24 | ;;
25 | -h|--help|*)
26 | usage
27 | exit
28 | ;;
29 | esac
30 | done
31 |
32 | if [[ -z ${VALUESFILE} || ! -f ${VALUESFILE} ]]; then
33 | usage
34 | echo "File ${VALUESFILE} for values.yaml does not exist"
35 | exit 1
36 | fi
37 |
38 | # Find out if this is OpenShift
39 |
40 | OC=/usr/bin/oc
41 |
42 | if [ -f "$OC" ]; then
43 | KUBECTL=oc
44 | ORCHESTRATOR=openshift
45 | else
46 | KUBECTL=kubectl
47 | ORCHESTRATOR=k8s
48 | fi
49 |
50 | # Discover which namespace we have installed PSO in
51 |
52 | NAMESPACE=`$KUBECTL get statefulset --all-namespaces | grep pure-provisioner | awk '{print $1}' -`
53 | if [ -z $NAMESPACE ]; then
54 | echo "Error: Please confirm Namespace for PSO"
55 | exit 1
56 | fi
57 |
58 | # Discover the image we are currently using
59 |
60 | IMAGE=`$KUBECTL describe deployment pso-operator -n $NAMESPACE | grep Image | awk '{print $2}' -`
61 | if [ -z $IMAGE ]; then
62 | echo "Error: Failed to identify image being used"
63 | exit 1
64 | fi
65 |
66 | # Quietly Reinstall PSO
67 |
68 | ./install.sh --image=$IMAGE --namespace=$NAMESPACE --orchestrator=$ORCHESTRATOR -f $VALUESFILE > /dev/null 2>&1
69 |
70 | $KUBECTL rollout status sts pure-provisioner -n $NAMESPACE >/dev/null 2>&1
71 |
72 |
--------------------------------------------------------------------------------
/operator-csi-plugin/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for csi-plugin.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | image:
6 | name: purestorage/k8s
7 | tag: 5.2.0
8 | pullPolicy: Always
9 |
10 | csi:
11 | provisioner:
12 | image:
13 | name: quay.io/k8scsi/csi-provisioner
14 | pullPolicy: Always
15 | snapshotter:
16 | image:
17 | name: quay.io/k8scsi/csi-snapshotter
18 | pullPolicy: Always
19 | clusterDriverRegistrar:
20 | image:
21 | name: quay.io/k8scsi/csi-cluster-driver-registrar
22 | pullPolicy: Always
23 | nodeDriverRegistrar:
24 | image:
25 | name: quay.io/k8scsi/csi-node-driver-registrar
26 | pullPolicy: Always
27 | livenessProbe:
28 | image:
29 | name: quay.io/k8scsi/livenessprobe
30 | pullPolicy: Always
31 |
32 | # this option is to enable/disable the debug mode of this app
33 | # for pure-csi-driver
34 | app:
35 | debug: false
36 |
37 | # do you want to set pure as the default storageclass?
38 | storageclass:
39 | isPureDefault: false
40 | # set the type of backend you want for the 'pure' storageclass
41 | # pureBackend: file
42 |
43 | # specify the service account name for this app
44 | clusterrolebinding:
45 | serviceAccount:
46 | name: pure
47 |
48 | # support ISCSI or FC, not case sensitive
49 | flasharray:
50 | sanType: ISCSI
51 | defaultFSType: xfs
52 | defaultFSOpt: "-q"
53 | defaultMountOpt: ""
54 | preemptAttachments: "true"
55 | iSCSILoginTimeout: 20
56 | iSCSIAllowedCIDR: ""
57 |
58 | flashblade:
59 | snapshotDirectoryEnabled: "false"
60 |
61 | # there are two namespaces for this app
62 | # 1. namespace.pure is the backend storage namespace where volumes/shares/etc
63 | # will be created.
64 | # Values for this can only include alphanumeric and underscores. Hyphens are not allowed.
65 | namespace:
66 | pure: k8s
67 |
68 | # support k8s or openshift
69 | orchestrator:
70 | # name is either 'k8s' or 'openshift'
71 | name: k8s
72 |
73 | # arrays specify what storage arrays should be managed by the plugin, this is
74 | # required to be set upon installation. For FlashArrays you must set the "MgmtEndPoint"
75 | # and "APIToken", and for FlashBlades you need the additional "NfsEndPoint" parameter.
76 | # The labels are optional, and can be any key-value pair for use with the "fleet"
77 | # provisioner. An example is shown below:
78 | arrays:
79 | #FlashArrays:
80 | # - MgmtEndPoint: "1.2.3.4"
81 | # APIToken: "a526a4c6-18b0-a8c9-1afa-3499293574bb"
82 | # Labels:
83 | # topology.purestorage.com/rack: "22"
84 | # topology.purestorage.com/env: "prod"
85 | # - MgmtEndPoint: "1.2.3.5"
86 | # APIToken: "b526a4c6-18b0-a8c9-1afa-3499293574bb"
87 | #FlashBlades:
88 | # - MgmtEndPoint: "1.2.3.6"
89 | # APIToken: "T-c4925090-c9bf-4033-8537-d24ee5669135"
90 | # NFSEndPoint: "1.2.3.7"
91 | # Labels:
92 | # topology.purestorage.com/rack: "7b"
93 | # topology.purestorage.com/env: "dev"
94 | # - MgmtEndPoint: "1.2.3.8"
95 | # APIToken: "T-d4925090-c9bf-4033-8537-d24ee5669135"
96 | # NFSEndPoint: "1.2.3.9"
97 | # Labels:
98 | # topology.purestorage.com/rack: "6a"
99 |
100 | mounter:
101 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
102 | nodeSelector: {}
103 | # disktype: ssd
104 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
105 | tolerations: []
106 | # - operator: Exists
107 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
108 | affinity: {}
109 | # nodeAffinity:
110 | # requiredDuringSchedulingIgnoredDuringExecution:
111 | # nodeSelectorTerms:
112 | # - matchExpressions:
113 | # - key: e2e-az-NorthSouth
114 | # operator: In
115 | # values:
116 | # - e2e-az-North
117 | # - e2e-az-South
118 |
119 | provisioner:
120 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
121 | nodeSelector: {}
122 | # disktype: ssd
123 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
124 | tolerations: []
125 | # - operator: Exists
126 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
127 | affinity: {}
128 | # nodeAffinity:
129 | # requiredDuringSchedulingIgnoredDuringExecution:
130 | # nodeSelectorTerms:
131 | # - matchExpressions:
132 | # - key: e2e-az-NorthSouth
133 | # operator: In
134 | # values:
135 | # - e2e-az-North
136 | # - e2e-az-South
137 |
--------------------------------------------------------------------------------
/operator-csi-plugin/watches.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - version: v1
3 | group: purestorage.com
4 | kind: PSOPlugin
5 | chart: /opt/helm/helm-charts/pure-csi
6 |
--------------------------------------------------------------------------------
/operator-k8s-plugin/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM quay.io/operator-framework/helm-operator:v0.13.0
2 | MAINTAINER Pure Storage, Inc.
3 | LABEL name="pure-flex" vendor="Pure Storage" version="2.7.1" release="1.0" summary="Pure Storage FlexDriver Operator" description="Pure Service Orchestrator FlexDriver Operator"
4 | COPY helm-charts/ ${HOME}/helm-charts/
5 | COPY watches.yaml ${HOME}/watches.yaml
6 | COPY licenses /licenses
7 |
--------------------------------------------------------------------------------
/operator-k8s-plugin/README.md:
--------------------------------------------------------------------------------
1 | **The Flex Volume Driver has been deprecated in favour of the CSI Driver**
2 |
3 | Flex should only be used where the CSI driver is not supported due to a lower level of Kubernetes version.
4 |
5 | # Pure Flex Operator
6 |
7 | ## Overview
8 | Pure Flex Operator is the preferred install method for PSO on OpenShift 3.11.
9 | The Pure Flex Operator packages and deploys the Pure Service Orchestrator (PSO) Flexvolume driver on OpenShift for dynamic provisioning of persistent volumes on FlashArray and FlashBlade storage appliances.
10 | This Operator is created as a [Custom Resource Definition](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions) from the [pure-k8s-plugin Helm chart](https://github.com/purestorage/helm-charts#purestorage-helm-charts-and-helm-operator) using the [Operator-SDK](https://github.com/operator-framework/operator-sdk#overview).
11 | This installation process does not require Helm installation.
12 |
13 |
14 | ## Platform and Software Dependencies
15 | - #### Operating Systems Supported*:
16 | - CentOS 7
17 | - CoreOS (Ladybug 1298.6.0 and above)
18 | - RHEL 7
19 | - Ubuntu 16.04
20 | - Ubuntu 18.04
21 | - #### Environments Supported*:
22 | - Kubernetes 1.11
23 | - Access to a user account that has cluster-admin privileges.
24 | - OpenShift 3.11
25 | - Access to a user account that has cluster-admin privileges.
26 | - [Dynamic provisioning](https://docs.openshift.com/container-platform/3.11/install_config/persistent_storage/dynamically_provisioning_pvs.html#overview) enabled in the master nodes.
27 | - [Controller attach-detach disabled](https://docs.openshift.com/container-platform/3.11/install_config/persistent_storage/enabling_controller_attach_detach.html#configuring-nodes-to-enable-controller-managed-attachment-and-detachment) in all nodes the flex driver is running on.
28 | - #### Other software dependencies:
29 | - Latest linux multipath software package for your operating system (Required)
30 | - Latest Filesystem utilities/drivers (XFS by default, Required)
31 | - Latest iSCSI initiator software for your operating system (Optional, required for iSCSI connectivity)
32 | - Latest NFS software package for your operating system (Optional, required for NFS connectivity)
33 | - Latest FC initiator software for your operating system (Optional, required for FC connectivity, *FC Supported on Bare-metal K8s installations only*)
34 | - #### FlashArray and FlashBlade:
35 | - The FlashArray and/or FlashBlade should be connected to the compute nodes using [Pure's best practices](https://support.purestorage.com/Solutions/Linux/Reference/Linux_Recommended_Settings)
36 |
37 | _* Please see release notes for details_
38 |
39 | ## Installation
40 |
41 | Clone this GitHub repository, selecting the version of the operator you wish to install. We recommend using the latest released version.
42 | ```
43 | git clone --branch https://github.com/purestorage/helm-charts.git
44 | cd operator-k8s-plugin
45 | ```
46 |
47 | Create your own `values.yaml`. The easiest way is to copy the default [./values.yaml](./values.yaml) with `wget`.
48 |
49 | Run the install script to set up the PSO-operator.
50 |
51 | ```bash
52 | install.sh --image= --namespace= --orchestrator= -f
53 | ```
54 |
55 | Parameter list:
56 | 1. ``image`` is the Pure Flex Operator image. If unspecified ``image`` resolves to the released version at [quay.io/purestorage/pso-operator](https://quay.io/purestorage/pso-operator).
57 | 2. ``namespace`` is the namespace/project in which the Pure Flex Operator and its entities will be installed. If unspecified, the operator creates and installs in the ``pso-operator`` namespace.
58 | **Pure Flex Operator MUST be installed in a new project with no other pods. Otherwise an uninstall may delete pods that are not related to the Pure Flex operator.**
59 | 3. ``orchestrator`` should be either ``k8s`` or ``openshift`` depending on which orchestrator is being used. If unspecified, ``k8s`` is assumed.
60 | 4. ``values.yaml`` is the customized helm-chart configuration parameters. This is a **required parameter** and must contain the list of all backend FlashArray and FlashBlade storage appliances. All parameters that need a non-default value must be specified in this file.
61 | Refer to [Configuration for values.yaml.](../pure-k8s-plugin/README.md#configuration)
62 |
63 | ### Install script steps:
64 | The install script will do the following:
65 | 1. Create New Project.
66 | The script creates a new project (if it does not already exist) with the given namespace. If no namespace parameter is specified, the ``pso-operator`` namespace is used.
67 | **OpenShift Note**: In OpenShift 3.11, the default node-selector for a project does not allow PSO Operator to mount volumes on master and infra nodes.
68 | If you want to mount volumes on master and infra nodes OR run pods in the default namespace using volumes mounted by PSO, then set `--node-selector` flag to `""` when running the install script as follows.
69 |
70 | ```bash
71 | install.sh --image= --namespace= --orchestrator= --node-selector= -f
72 | ```
73 |
74 | 2. Create a Custom Resource Definition (CRD) for the PSO Operator.
75 | The script waits for the CRD to be published in the cluster. If after 10 seconds the API server has not setup the CRD, the script times out. To wait longer, pass the parameter
76 | ``--timeout=`` to the install script.
77 |
78 | 3. Create RBAC rules for the Operator.
79 | The Pure Flex Operator needs the following Cluster-level Roles and RoleBindings.
80 |
81 |
82 | | Resource | Permissions | Notes |
83 | | ------------- |:-------------:| -----:|
84 | | Namespace | Get | PSO Operator needs the ability to get created namespaces |
85 | | Storageclass | Create/Delete | Create and cleanup storage classes to be used for Provisioning |
86 | | ClusterRoleBinding | Create/Delete/Get | PSO Operator needs to create and cleanup a ClusterRoleBinding called ``pure-provisioner-rights`` to the ClusterRole ``system:persistent-volume-provisioner`` for provisioning PVs |
87 |
88 | In addition, the operator needs access to multiple resources in the project/namespace that it is deployed in to function correctly. Hence it is recommended to install the PSO-operator in the non-default namespace.
89 |
90 | 4. Creates a deployment for the Operator.
91 | Finally the script creates and deploys the operator using the customized parameters passed in the ``values.yaml`` file.
92 |
93 | ### Apply changes in ``values.yaml``
94 | The ``update.sh`` script is used to apply changes from ``values.yaml`` as follows.
95 |
96 | ```bash
97 | ./update.sh -f values.yaml
98 | ```
99 |
100 | ## Using Snapshots with a FlashArray
101 |
102 | More details on using the snapshot functionality can be found [here](../docs/flex-snapshot-for-flasharray.md)
103 |
104 | ## Using Labels to control volume topology
105 |
106 | More details on using configuration labels can be found [here](../docs/flex-volume-using-labels.md)
107 |
108 | ## Upgrading FlexDriver Operator version
109 | To upgrade the version of your FlexDriver perform the following actions:
110 | 1. Update your `helm-charts` directory using `git fetch` and `git rebase`. If you have modified any files you will need to commit these before performing the `rebase`. For more details see [here](https://git-scm.com/docs/git-rebase) and [here](https://git-scm.com/book/en/v2/Git-Branching-Rebasing).
111 | 2. Ensure that your local `values.yaml` file is modified to reflect the `tag` version of the `purestorage/k8s` FlexDriver image you wish to upgrade to, for example: `2.7.0`
112 | 3. Run the `upgrade.sh` script as follows:
113 |
114 | ```bash
115 | ./upgrade.sh -f values.yaml --version=
116 | ```
117 |
118 | where `` refers to the PSO Operator image version, such as `0.2.0`, you wish to upgrade to.
119 |
120 | **NOTE:** The Operator image version and FlexDriver version must be compatible
121 |
122 | ## Uninstall FlexDriver Operator
123 | To uninstall the Pure FlexVolume Operator, run
124 |
125 | ```bash
126 | kubectl delete PSOPlugin/psoplugin-operator -n
127 | kubectl delete all --all -n
128 | ```
129 |
130 | where ``pure-k8s-operator-installed-namespace`` is the project/namespace in which the Pure FlexDriver Operator is installed. It is **strongly recommended** to install the Pure FlexDriver Operator in a new project and not add any other pods to this project/namespace. Any pods in this project will be cleaned up on an uninstall.
131 |
132 | To completely remove the CustomResourceDefinition used by the Operator run
133 |
134 | ```bash
135 | kubectl delete crd psoplugins.purestorage.com
136 | ```
137 |
138 | If you are using OpenShift, replace `kubectl` with `oc` in the above commands.
139 |
140 | # License
141 | https://www.purestorage.com/content/dam/pdf/en/legal/pure-storage-plugin-end-user-license-agreement.pdf
142 |
--------------------------------------------------------------------------------
/operator-k8s-plugin/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -xe
4 | PSO_FLEX_OPERATOR_IMG_TAG=${PSO_FLEX_OPERATOR_IMG_TAG:-pso-operator:latest}
5 | IMG_DIR=$(dirname $0)
6 | HELM_DIR=${IMG_DIR}/..
7 | if [ -d "${IMG_DIR}/helm-charts" ]; then rm -rf ${IMG_DIR}/helm-charts; fi
8 | mkdir -p ${IMG_DIR}/helm-charts
9 | cp -r ${HELM_DIR}/pure-k8s-plugin ${IMG_DIR}/helm-charts
10 |
11 | docker build -t ${PSO_FLEX_OPERATOR_IMG_TAG} ${IMG_DIR}
12 |
--------------------------------------------------------------------------------
/operator-k8s-plugin/install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | IMAGE=quay.io/purestorage/pso-operator:v0.2.1
3 | NAMESPACE=pso-operator
4 | KUBECTL=oc
5 | ORCHESTRATOR=k8s
6 | NODESELECTOR="unset"
7 |
8 | usage()
9 | {
10 | echo "Usage : $0 --image= --namespace= --orchestrator= -f "
11 | }
12 |
13 | if [[ "$1" == "-h" || "$1" == "--help" ]]; then
14 | usage
15 | exit
16 | fi
17 |
18 | while (("$#")); do
19 | case "$1" in
20 | --image=*)
21 | IMAGE="${1#*=}"
22 | shift
23 | ;;
24 | --namespace=*)
25 | NAMESPACE="${1#*=}"
26 | shift
27 | ;;
28 | --orchestrator=*)
29 | ORCHESTRATOR="${1#*=}"
30 | if [[ "${ORCHESTRATOR}" == "k8s" || "${ORCHESTRATOR}" == "K8s" ]]; then
31 | KUBECTL=kubectl
32 | elif [[ "${ORCHESTRATOR}" == "openshift" ]]; then
33 | KUBECTL=oc
34 | else
35 | echo "orchestrator can only be 'k8s' or 'openshift'"
36 | usage
37 | exit
38 | fi
39 | shift
40 | ;;
41 | --node-selector=*)
42 | NODESELECTOR="${1#*=}"
43 | shift
44 | ;;
45 | -f)
46 | if [ "$#" -lt 2 ]; then
47 | usage
48 | exit
49 | fi
50 | VALUESFILE="$2"
51 | shift
52 | shift
53 | ;;
54 | -h|--help|*)
55 | usage
56 | exit
57 | ;;
58 | esac
59 | done
60 |
61 | CLUSTERROLEAPIVERSION="$(${KUBECTL} explain ClusterRole | grep "VERSION:" | awk '{ print $2 }')"
62 | CLUSTERROLEBINDINGAPIVERSION="$(${KUBECTL} explain ClusterRoleBinding | grep "VERSION:" | awk '{ print $2 }')"
63 | ROLEAPIVERSION="$(${KUBECTL} explain Role | grep "VERSION:" | awk '{ print $2 }')"
64 | if [[ "${ORCHESTRATOR}" == "openshift" ]]; then
65 | ROLEBINDINGAPIVERSION="rbac.authorization.k8s.io/v1beta1"
66 | else
67 | ROLEBINDINGAPIVERSION="$(${KUBECTL} explain RoleBinding | grep "VERSION:" | awk '{ print $2 }')"
68 | fi
69 | DEPLOYMENTAPIVERSION="$(${KUBECTL} explain Deployment | grep "VERSION:" | awk '{ print $2 }')"
70 |
71 | if [[ -z ${VALUESFILE} || ! -f ${VALUESFILE} ]]; then
72 | echo "File ${VALUESFILE} does not exist"
73 | usage
74 | exit 1
75 | fi
76 |
77 | KUBECTL_NS="${KUBECTL} apply -n ${NAMESPACE} -f"
78 |
79 | # 1. Create the namespace
80 | if [[ "${KUBECTL}" == "kubectl" ]]; then
81 | $KUBECTL create namespace ${NAMESPACE}
82 | else
83 | if [[ "${NODESELECTOR}" == "unset" ]]; then
84 | # Use openshift default node-selector
85 | $KUBECTL adm new-project ${NAMESPACE}
86 | else
87 | $KUBECTL adm new-project ${NAMESPACE} --node-selector=${NODESELECTOR}
88 | fi
89 |
90 | # Since this plugin needs to mount external volumes to containers, create a SCC to allow the flex-daemon pod to
91 | # use the hostPath volume plugin
92 | echo '
93 | kind: SecurityContextConstraints
94 | apiVersion: v1
95 | metadata:
96 | name: hostpath
97 | allowPrivilegedContainer: true
98 | allowHostDirVolumePlugin: true
99 | runAsUser:
100 | type: RunAsAny
101 | seLinuxContext:
102 | type: RunAsAny
103 | fsGroup:
104 | type: RunAsAny
105 | supplementalGroups:
106 | type: RunAsAny
107 | ' | $KUBECTL create -f -
108 |
109 | # Grant this SCC to the service account creating the flex-daemonset
110 | # extract the clusterrolebinding.serviceAccount.name from the values.yaml file if it exists.
111 | SVC_ACCNT=$( cat ${VALUESFILE} | sed 's/#.*$//' | awk '/clusterrolebinding:/,0' | grep 'name:' | sed 's/^.*://; s/ *$//; /^$/d;' | head -1)
112 | if [[ -z ${SVC_ACCNT} ]]; then
113 | SVC_ACCNT=pure
114 | fi
115 | $KUBECTL adm policy add-scc-to-user hostpath -n ${NAMESPACE} -z ${SVC_ACCNT}
116 | fi
117 |
118 | # 2. Create CRD and wait until TIMEOUT seconds for the CRD to be established.
119 | counter=0
120 | TIMEOUT=10
121 | echo "
122 | apiVersion: apiextensions.k8s.io/v1beta1
123 | kind: CustomResourceDefinition
124 | metadata:
125 | name: psoplugins.purestorage.com
126 | spec:
127 | group: purestorage.com
128 | names:
129 | kind: PSOPlugin
130 | listKind: PSOPluginList
131 | plural: psoplugins
132 | singular: psoplugin
133 | scope: Namespaced
134 | versions:
135 | - name: v1
136 | served: true
137 | storage: true
138 | subresources:
139 | status: {} " | ${KUBECTL} apply -f -
140 |
141 | while true; do
142 | result=$(${KUBECTL} get crd/psoplugins.purestorage.com -o jsonpath='{.status.conditions[?(.type == "Established")].status}{"\n"}' | grep -i true)
143 | if [ $? -eq 0 ]; then
144 | break
145 | fi
146 | counter=$(($counter+1))
147 | if [ $counter -gt $TIMEOUT ]; then
148 | break
149 | fi
150 | sleep 1
151 | done
152 |
153 | if [ $counter -gt $TIMEOUT ]; then
154 | echo "Timed out waiting for CRD"
155 | exit 1
156 | fi
157 |
158 |
159 | # 3. Create RBAC for the PSO-Operator
160 | echo "
161 | kind: ClusterRole
162 | apiVersion: ${CLUSTERROLEAPIVERSION}
163 | metadata:
164 | name: pso-operator
165 | rules:
166 | - apiGroups:
167 | - purestorage.com
168 | resources:
169 | - \"*\"
170 | verbs:
171 | - \"*\"
172 | - apiGroups:
173 | - \"\"
174 | resources:
175 | - namespaces
176 | verbs:
177 | - get
178 | - apiGroups:
179 | - storage.k8s.io
180 | resources:
181 | - storageclasses
182 | verbs:
183 | - \"create\"
184 | - \"delete\"
185 | # PSO operator needs to create/delete a ClusterRole and ClusterRoleBinding for provisioning PVs
186 | - apiGroups:
187 | - rbac.authorization.k8s.io
188 | resources:
189 | - clusterrolebindings
190 | - clusterroles
191 | verbs:
192 | - \"create\"
193 | - \"delete\"
194 | - \"get\"
195 | # On Openshift ClusterRoleBindings belong to a different apiGroup.
196 | - apiGroups:
197 | - authorization.openshift.io
198 | resources:
199 | - clusterrolebindings
200 | - clusterroles
201 | verbs:
202 | - \"create\"
203 | - \"delete\"
204 | - \"get\"
205 | # Need the same permissions as pure-provisioner-clusterrole to be able to create it
206 | - apiGroups:
207 | - \"\"
208 | resources:
209 | - persistentvolumes
210 | verbs:
211 | - \"create\"
212 | - \"delete\"
213 | - \"get\"
214 | - \"list\"
215 | - \"watch\"
216 | - \"update\"
217 | - apiGroups:
218 | - \"\"
219 | resources:
220 | - persistentvolumeclaims
221 | verbs:
222 | - \"get\"
223 | - \"list\"
224 | - \"update\"
225 | - \"watch\"
226 | - apiGroups:
227 | - storage.k8s.io
228 | resources:
229 | - storageclasses
230 | verbs:
231 | - \"get\"
232 | - \"list\"
233 | - \"watch\"
234 | - apiGroups:
235 | - \"\"
236 | resources:
237 | - \"events\"
238 | verbs:
239 | - \"create\"
240 | - \"patch\"
241 | - \"update\"
242 | - \"watch\"
243 |
244 | ---
245 | kind: ClusterRoleBinding
246 | apiVersion: ${CLUSTERROLEBINDINGAPIVERSION}
247 | metadata:
248 | name: pso-operator-role
249 | subjects:
250 | - kind: ServiceAccount
251 | name: default
252 | namespace: REPLACE_NAMESPACE
253 | roleRef:
254 | kind: ClusterRole
255 | name: pso-operator
256 | apiGroup: rbac.authorization.k8s.io
257 |
258 | ---
259 | kind: Role
260 | apiVersion: ${ROLEAPIVERSION}
261 | metadata:
262 | name: pso-operator
263 | rules:
264 | - apiGroups:
265 | - \"\"
266 | resources:
267 | - pods
268 | - services
269 | - endpoints
270 | - configmaps
271 | - secrets
272 | - serviceaccounts
273 | verbs:
274 | - \"*\"
275 | - apiGroups:
276 | - \"\"
277 | resources:
278 | - namespaces
279 | verbs:
280 | - get
281 | - apiGroups:
282 | - apps
283 | resources:
284 | - deployments
285 | - daemonsets
286 | - replicasets
287 | verbs:
288 | - \"*\"
289 | - apiGroups:
290 | - extensions
291 | resources:
292 | - daemonsets
293 | verbs:
294 | - \"*\"
295 | - apiGroups:
296 | - rbac.authorization.k8s.io
297 | resources:
298 | - roles
299 | - rolebindings
300 | verbs:
301 | - \"*\"
302 | - apiGroups:
303 | - authorization.openshift.io
304 | resources:
305 | - roles
306 | - rolebindings
307 | verbs:
308 | - \"*\"
309 |
310 | ---
311 |
312 | kind: RoleBinding
313 | apiVersion: ${ROLEBINDINGAPIVERSION}
314 | metadata:
315 | name: default-account-pso-operator
316 | subjects:
317 | - kind: ServiceAccount
318 | name: default
319 | roleRef:
320 | kind: Role
321 | name: pso-operator
322 | apiGroup: rbac.authorization.k8s.io
323 | " | sed "s|REPLACE_NAMESPACE|${NAMESPACE}|" | ${KUBECTL_NS} -
324 |
325 | # 4. Create a PSO-Operator Deployment
326 | echo "
327 | apiVersion: ${DEPLOYMENTAPIVERSION}
328 | kind: Deployment
329 | metadata:
330 | name: pso-operator
331 | spec:
332 | replicas: 1
333 | selector:
334 | matchLabels:
335 | name: pso-operator
336 | template:
337 | metadata:
338 | labels:
339 | name: pso-operator
340 | spec:
341 | serviceAccountName: default
342 | containers:
343 | - name: pso-operator
344 | # Replace this with the built image name
345 | image: REPLACE_IMAGE
346 | ports:
347 | - containerPort: 60000
348 | name: metrics
349 | imagePullPolicy: Always
350 | env:
351 | - name: WATCH_NAMESPACE
352 | valueFrom:
353 | fieldRef:
354 | fieldPath: metadata.namespace
355 | - name: POD_NAME
356 | valueFrom:
357 | fieldRef:
358 | fieldPath: metadata.name
359 | - name: OPERATOR_NAME
360 | value: \"pso-operator\"
361 | " | sed "s|REPLACE_IMAGE|${IMAGE}|" | ${KUBECTL_NS} -
362 |
363 | # 5. Use the values.yaml file to create a customized PSO operator instance
364 | ( echo '
365 | apiVersion: purestorage.com/v1
366 | kind: PSOPlugin
367 | metadata:
368 | name: psoplugin-operator
369 | namespace: REPLACE_NAMESPACE
370 | spec:
371 | # Add fields here' | sed "s|REPLACE_NAMESPACE|${NAMESPACE}|"; sed 's/.*/ &/' ${VALUESFILE}) | ${KUBECTL_NS} -
372 |
373 |
--------------------------------------------------------------------------------
/operator-k8s-plugin/update.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Script to Update PSO Arrays configurations after modification of values.yaml
3 |
4 | usage()
5 | {
6 | echo "Usage : $0 -f "
7 | exit
8 | }
9 |
10 | if [[ "$1" == "-h" || "$1" == "--help" ]]; then
11 | usage
12 | fi
13 |
14 | while (("$#")); do
15 | case "$1" in
16 | -f)
17 | if [ "$#" -lt 2 ]; then
18 | usage
19 | exit
20 | fi
21 | VALUESFILE="$2"
22 | shift
23 | shift
24 | ;;
25 | -h|--help|*)
26 | usage
27 | exit
28 | ;;
29 | esac
30 | done
31 |
32 | if [[ -z ${VALUESFILE} || ! -f ${VALUESFILE} ]]; then
33 | usage
34 | echo "File ${VALUESFILE} for values.yaml does not exist"
35 | exit 1
36 | fi
37 |
38 | # Find out if this is OpenShift
39 |
40 | OC=/usr/bin/oc
41 |
42 | if [ -f "$OC" ]; then
43 | KUBECTL=oc
44 | ORCHESTRATOR=openshift
45 | else
46 | KUBECTL=kubectl
47 | ORCHESTRATOR=k8s
48 | fi
49 |
50 | # Discover which namespace we have installed PSO in
51 |
52 | NAMESPACE=`$KUBECTL get deployment --all-namespaces | grep pure-provisioner | awk '{print $1}' -`
53 | if [ -z $NAMESPACE ]; then
54 | echo "Error: Please confirm Namespace for PSO"
55 | exit 1
56 | fi
57 |
58 | # Discover the image we are currently using
59 |
60 | IMAGE=`$KUBECTL describe deployment pso-operator -n $NAMESPACE | grep Image | awk '{print $2}' -`
61 | if [ -z $IMAGE ]; then
62 | echo "Error: Failed to identify image being used"
63 | exit 1
64 | fi
65 |
66 | # Quietly Reinstall PSO
67 |
68 | ./install.sh --image=$IMAGE --namespace=$NAMESPACE --orchestrator=$ORCHESTRATOR -f $VALUESFILE > /dev/null 2>&1
69 |
70 | $KUBECTL rollout status deployment pure-provisioner -n $NAMESPACE >/dev/null 2>&1
71 |
72 |
--------------------------------------------------------------------------------
/operator-k8s-plugin/upgrade.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Script to Upgrade PSO FlexDriver
3 |
4 | usage()
5 | {
6 | echo "Usage : $0 --version= -f "
7 | exit
8 | }
9 |
10 | if [[ "$1" == "-h" || "$1" == "--help" ]]; then
11 | usage
12 | fi
13 |
14 | while (("$#")); do
15 | case "$1" in
16 | --version=*)
17 | VERSION="${1#*=}"
18 | NEW_VERSION=`echo $VERSION | awk '{print tolower($1)}' -`
19 | V_CHAR=`echo $NEW_VERSION | awk '{print substr($1,1,1)}' -`
20 | if [ $V_CHAR != "v" ]; then
21 | NEW_VERSION=`echo $NEW_VERSION | awk '$0="v"$0' -`
22 | fi
23 | shift
24 | ;;
25 | -f)
26 | if [ "$#" -lt 2 ]; then
27 | usage
28 | exit
29 | fi
30 | VALUESFILE="$2"
31 | shift
32 | shift
33 | ;;
34 | -h|--help|*)
35 | usage
36 | exit
37 | ;;
38 | esac
39 | done
40 |
41 | if [[ -z ${VALUESFILE} || ! -f ${VALUESFILE} ]]; then
42 | usage
43 | echo "File ${VALUESFILE} for values.yaml does not exist"
44 | exit 1
45 | fi
46 |
47 | # Find out if this is OpenShift
48 |
49 | OC=/usr/bin/oc
50 |
51 | if [ -f "$OC" ]; then
52 | KUBECTL=oc
53 | ORCHESTRATOR=openshift
54 | else
55 | KUBECTL=kubectl
56 | ORCHESTRATOR=k8s
57 | fi
58 |
59 | # Discover which namespace we have installed PSO in
60 |
61 | NAMESPACE=`$KUBECTL get deployment --all-namespaces | grep pure-provisioner | awk '{print $1}' -`
62 | if [ -z $NAMESPACE ]; then
63 | echo "Error: Failed to identify namespace for PSO. Please ensure it is installed and running properly"
64 | exit 1
65 | fi
66 |
67 | # Discover the image we are currently using
68 | # For dark-sites we retain the registry location for the upgrade
69 |
70 | IMAGE_LOC=`$KUBECTL describe deployment pso-operator -n $NAMESPACE | grep Image | sed 's/ //g' | awk 'BEGIN{FS=":"};{print $2}' -`
71 | IMAGE_VER=`$KUBECTL describe deployment pso-operator -n $NAMESPACE | grep Image | sed 's/ //g' | awk 'BEGIN{FS=":"};{print $3}' -`
72 |
73 | if [ -z $IMAGE_VER ]; then
74 | echo "Error: Failed to identify image being used"
75 | exit 1
76 | fi
77 |
78 | if [ $IMAGE_VER == $NEW_VERSION ]; then
79 | echo "Error: New version already installed"
80 | exit 1
81 | fi
82 |
83 | # Quietly Upggrade PSO
84 |
85 | COLON=":"
86 |
87 | ./install.sh --image=$IMAGE_LOC$COLON$NEW_VERSION --namespace=$NAMESPACE --orchestrator=$ORCHESTRATOR -f $VALUESFILE > /dev/null 2>&1
88 |
89 | $KUBECTL rollout status deployment pure-provisioner -n $NAMESPACE >/dev/null 2>&1
90 |
--------------------------------------------------------------------------------
/operator-k8s-plugin/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for k8s-plugin.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | image:
6 | name: purestorage/k8s
7 | tag: 2.7.1
8 | pullPolicy: Always
9 |
10 | # this option is to enable/disable the debug mode of this app
11 | # for pure-provisioner and pure-flex-daemon
12 | app:
13 | debug: false
14 |
15 | # do you want to set pure as the default storageclass?
16 | storageclass:
17 | isPureDefault: false
18 | # set the type of backend you want for the 'pure' storageclass
19 | # pureBackend: file
20 |
21 | # specify the service account name for this app
22 | clusterrolebinding:
23 | serviceAccount:
24 | name: pure
25 |
26 | # support ISCSI or FC, not case sensitive
27 | flasharray:
28 | sanType: ISCSI
29 | defaultFSType: xfs
30 | defaultFSOpt: "-q"
31 | defaultMountOpt: ""
32 | preemptAttachments: "true"
33 | iSCSILoginTimeout: 20
34 | iSCSIAllowedCIDR: ""
35 |
36 | flashblade:
37 | snapshotDirectoryEnabled: "false"
38 |
39 | # there are two namespaces for this app
40 | # 1. namespace.pure is the backend storage namespace where volumes/shares/etc
41 | # will be created.
42 | namespace:
43 | pure: k8s
44 |
45 | # support k8s or openshift
46 | # if you want to install flex into a different place, you need to
47 | # overwrite the flexpath.
48 | orchestrator:
49 | # name is either 'k8s' or 'openshift'
50 | name: k8s
51 |
52 | # flexPath is for image.tag >= 2.0
53 | # `flexPath` needs to align with kubelet "volume-plugin-dir" configuration
54 | # by default in Kubernetes it is '/usr/libexec/kubernetes/kubelet-plugins/volume/exec'
55 | # Select an option below or customize for the environment.
56 |
57 | # Default for Kubernetes and OpenShift on RHEL Server
58 | flexPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
59 |
60 | # Default for Openshift 3.10+ on RHEL Atomic (containerized kubelet/origin-node)
61 | #flexPath: /etc/origin/kubelet-plugins/volume/exec
62 |
63 | # Default for Openshift 3.9 and lower with RHEL Atomic
64 | #flexPath : /usr/libexec/kubernetes/kubelet-plugins/volume/exec
65 |
66 | # Default for RKE
67 | #flexPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
68 |
69 | # Default for GKE
70 | #flexPath: /home/kubernetes/flexvolume
71 |
72 | # Default for Kubespray
73 | #flexPath: /var/lib/kubelet/volume-plugins
74 |
75 | #Default for OpenStack Magnum
76 | #flexPath: /var/lib/kubelet/volumeplugins
77 |
78 | # arrays specify what storage arrays should be managed by the plugin, this is
79 | # required to be set upon installation. For FlashArrays you must set the "MgmtEndPoint"
80 | # and "APIToken", and for FlashBlades you need the additional "NfsEndPoint" parameter.
81 | # The labels are optional, and can be any key-value pair for use with the "fleet"
82 | # provisioner. An example is shown below:
83 | arrays:
84 | #FlashArrays:
85 | # - MgmtEndPoint: "1.2.3.4"
86 | # APIToken: "a526a4c6-18b0-a8c9-1afa-3499293574bb"
87 | # Labels:
88 | # rack: "22"
89 | # env: "prod"
90 | # - MgmtEndPoint: "1.2.3.5"
91 | # APIToken: "b526a4c6-18b0-a8c9-1afa-3499293574bb"
92 | #FlashBlades:
93 | # - MgmtEndPoint: "1.2.3.6"
94 | # APIToken: "T-c4925090-c9bf-4033-8537-d24ee5669135"
95 | # NfsEndPoint: "1.2.3.7"
96 | # Labels:
97 | # rack: "7b"
98 | # env: "dev"
99 | # - MgmtEndPoint: "1.2.3.8"
100 | # APIToken: "T-d4925090-c9bf-4033-8537-d24ee5669135"
101 | # NfsEndPoint: "1.2.3.9"
102 | # Labels:
103 | # rack: "6a"
104 |
105 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
106 | nodeSelector: {}
107 | # disktype: ssd
108 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
109 | tolerations: []
110 | # - operator: Exists
111 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
112 | affinity: {}
113 | # nodeAffinity:
114 | # requiredDuringSchedulingIgnoredDuringExecution:
115 | # nodeSelectorTerms:
116 | # - matchExpressions:
117 | # - key: e2e-az-NorthSouth
118 | # operator: In
119 | # values:
120 | # - e2e-az-North
121 | # - e2e-az-South
122 |
123 | flexDaemon:
124 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
125 | nodeSelector: {}
126 | # disktype: ssd
127 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
128 | tolerations: []
129 | # - operator: Exists
130 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
131 | affinity: {}
132 | # nodeAffinity:
133 | # requiredDuringSchedulingIgnoredDuringExecution:
134 | # nodeSelectorTerms:
135 | # - matchExpressions:
136 | # - key: e2e-az-NorthSouth
137 | # operator: In
138 | # values:
139 | # - e2e-az-North
140 | # - e2e-az-South
141 |
142 | provisioner:
143 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
144 | nodeSelector: {}
145 | # disktype: ssd
146 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
147 | tolerations: []
148 | # - operator: Exists
149 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
150 | affinity: {}
151 | # nodeAffinity:
152 | # requiredDuringSchedulingIgnoredDuringExecution:
153 | # nodeSelectorTerms:
154 | # - matchExpressions:
155 | # - key: e2e-az-NorthSouth
156 | # operator: In
157 | # values:
158 | # - e2e-az-North
159 | # - e2e-az-South
160 |
--------------------------------------------------------------------------------
/operator-k8s-plugin/watches.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - version: v1
3 | group: purestorage.com
4 | kind: PSOPlugin
5 | chart: /opt/helm/helm-charts/pure-k8s-plugin
6 |
--------------------------------------------------------------------------------
/pure-csi/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 |
--------------------------------------------------------------------------------
/pure-csi/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | description: A Helm chart for Pure Service Orchestrator CSI driver
3 | name: pure-csi
4 | version: 1.2.0
5 | appVersion: 1.2.0
6 | icon: https://raw.githubusercontent.com/purestorage/helm-charts/master/pure-csi/pure-storage.png
7 | keywords:
8 | - purestorage
9 | maintainers:
10 | - email: sales@purestorage.com
11 | name: Pure Storage, Inc.
12 |
--------------------------------------------------------------------------------
/pure-csi/pure-storage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/pure-csi/pure-storage.png
--------------------------------------------------------------------------------
/pure-csi/snapshotclass.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: snapshot.storage.k8s.io/v1alpha1
2 | kind: VolumeSnapshotClass
3 | metadata:
4 | name: pure-snapshotclass
5 | annotations:
6 | snapshot.storage.kubernetes.io/is-default-class: "true"
7 | snapshotter: pure-csi
8 | reclaimPolicy: Delete
9 |
--------------------------------------------------------------------------------
/pure-csi/snapshotclass_ose44.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: snapshot.storage.k8s.io/v1beta1
2 | kind: VolumeSnapshotClass
3 | metadata:
4 | name: pure-snapshotclass
5 | driver: pure-csi
6 | deletionPolicy: Delete
7 |
--------------------------------------------------------------------------------
/pure-csi/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "pure-csi.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
6 | {{- end -}}
7 |
8 | {{/* Create a chart_labels for each resources
9 | */}}
10 | {{- define "pure_csi.labels" -}}
11 | generator: helm
12 | chart: {{ .Chart.Name }}
13 | release: {{ .Release.Name | quote }}
14 | {{- end -}}
15 |
16 | {{/*
17 | Create a default fully qualified app name.
18 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
19 | If release name contains chart name it will be used as a full name.
20 | */}}
21 | {{- define "pure-csi.fullname" -}}
22 | {{- if .Values.fullnameOverride -}}
23 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
24 | {{- else -}}
25 | {{- $name := default .Chart.Name .Values.nameOverride -}}
26 | {{- if contains $name .Release.Name -}}
27 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
28 | {{- else -}}
29 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
30 | {{- end -}}
31 | {{- end -}}
32 | {{- end -}}
33 |
34 | {{/*
35 | Create chart name and version as used by the chart label.
36 | */}}
37 | {{- define "pure-csi.chart" -}}
38 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
39 | {{- end -}}
40 |
41 | {{/*
42 | Return the appropriate apiVersion for deployment.
43 | */}}
44 | {{- define "deployment.apiVersion" -}}
45 | {{- if semverCompare ">=1.9-0" .Capabilities.KubeVersion.GitVersion -}}
46 | {{- print "apps/v1" -}}
47 | {{- else -}}
48 | {{- print "apps/v1beta1" -}}
49 | {{- end -}}
50 | {{- end -}}
51 |
52 | {{/*
53 | Return the appropriate apiVersion for daemonset.
54 | */}}
55 | {{- define "daemonset.apiVersion" -}}
56 | {{- if semverCompare ">=1.9-0" .Capabilities.KubeVersion.GitVersion -}}
57 | {{- print "apps/v1" -}}
58 | {{- else -}}
59 | {{- print "apps/v1beta1" -}}
60 | {{- end -}}
61 | {{- end -}}
62 |
63 | {{/*
64 | Return the appropriate apiVersion for RBAC APIs.
65 | */}}
66 | {{- define "rbac.apiVersion" -}}
67 | {{- if semverCompare "^1.8-0" .Capabilities.KubeVersion.GitVersion -}}
68 | {{- print "rbac.authorization.k8s.io/v1" -}}
69 | {{- else -}}
70 | {{- print "rbac.authorization.k8s.io/v1beta1" -}}
71 | {{- end -}}
72 | {{- end -}}
73 |
74 | {{/*
75 | Return the appropriate apiVersion for statefulset.
76 | */}}
77 | {{- define "statefulset.apiVersion" -}}
78 | {{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}}
79 | {{- print "apps/v1beta2" -}}
80 | {{- else -}}
81 | {{- print "apps/v1" -}}
82 | {{- end -}}
83 | {{- end -}}
84 |
--------------------------------------------------------------------------------
/pure-csi/templates/node-configure.yaml:
--------------------------------------------------------------------------------
1 | # Automatic node configuration has only been tested on GKE.
2 | {{ if (.Capabilities.KubeVersion.GitVersion | regexMatch "gke") }}
3 | apiVersion: apps/v1
4 | kind: DaemonSet
5 | metadata:
6 | name: pso-node-config
7 | namespace: {{ .Release.Namespace }}
8 | spec:
9 | selector:
10 | matchLabels:
11 | name: pso-node-config
12 | template:
13 | metadata:
14 | labels:
15 | name: pso-node-config
16 | spec:
17 | hostPID: true
18 | containers:
19 | - name: pso-node-config
20 | image: "{{ .Values.image.name }}:{{ .Values.image.tag }}"
21 | imagePullPolicy: {{ .Values.image.pullPolicy }}
22 | command: ["/bin/sh", "-c", "/node-configure.sh"]
23 | securityContext:
24 | privileged: true
25 | {{ end }}
26 |
--------------------------------------------------------------------------------
/pure-csi/templates/node.yaml:
--------------------------------------------------------------------------------
1 | # On K8s 1.12 and 1.13 you need to create this CRD on the cluster before the helm install.
2 | # This is because CSI is still alpha on these versions and the CSIDriver object is a CRD.
3 | # Our helm chart cannot create this CRD because if affects other CSI-drivers that may be installed
4 | # on the cluster.
5 | # See https://kubernetes-csi.github.io/docs/csi-driver-object.html for details.
6 | # Inlining the CRD definition here in comments if you want to copy and apply.
7 | #
8 | # {{ if and (eq .Capabilities.KubeVersion.Major "1") (lt .Capabilities.KubeVersion.Minor "14") }}
9 | # apiVersion: apiextensions.k8s.io/v1beta1
10 | # kind: CustomResourceDefinition
11 | # metadata:
12 | # name: csidrivers.csi.storage.k8s.io
13 | # labels:
14 | # addonmanager.kubernetes.io/mode: Reconcile
15 | # spec:
16 | # group: csi.storage.k8s.io
17 | # names:
18 | # kind: CSIDriver
19 | # plural: csidrivers
20 | # scope: Cluster
21 | # validation:
22 | # openAPIV3Schema:
23 | # properties:
24 | # spec:
25 | # description: Specification of the CSI Driver.
26 | # properties:
27 | # attachRequired:
28 | # description: Indicates this CSI volume driver requires an attach operation,
29 | # and that Kubernetes should call attach and wait for any attach operation
30 | # to complete before proceeding to mount.
31 | # type: boolean
32 | # podInfoOnMountVersion:
33 | # description: Indicates this CSI volume driver requires additional pod
34 | # information (like podName, podUID, etc.) during mount operations.
35 | # type: string
36 | # version: v1alpha1
37 | # {{ end }}
38 | ---
39 | {{ if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
40 | # For Kubernetes v1.13, this is not needed, because
41 | # we should be using cluster-driver-registrar sidecar
42 | # container.
43 | # For v1.14, this object needs to be created manually.
44 | # When controller-attach-detach comes in, we will start
45 | # using the controller sidecar, and we can delete
46 | # this object.
47 | apiVersion: storage.k8s.io/v1beta1
48 | kind: CSIDriver
49 | metadata:
50 | name: pure-csi
51 | spec:
52 | attachRequired: false
53 | {{ end }}
54 | ---
55 | kind: DaemonSet
56 | apiVersion: {{ template "daemonset.apiVersion" . }}
57 | metadata:
58 | name: pure-csi
59 | namespace: {{ .Release.Namespace }}
60 | labels:
61 | {{ include "pure_csi.labels" . | indent 4}}
62 |
63 | spec:
64 | updateStrategy:
65 | type: RollingUpdate
66 | rollingUpdate:
67 | # CSI node plugin may not be running on some nodes in the cluster like master/infra nodes. This affects the maxUnavailable nodes for a RollingUpdate.
68 | # Set maxUnavailable to 100% so that a Rolling Update is possible for any cluster configuration.
69 | maxUnavailable: 100%
70 | selector:
71 | matchLabels:
72 | app: pure-csi
73 | template:
74 | metadata:
75 | labels:
76 | app: pure-csi
77 | {{ include "pure_csi.labels" . | indent 8}}
78 | spec:
79 | serviceAccountName: {{ .Values.clusterrolebinding.serviceAccount.name }}
80 | hostNetwork: true
81 | hostPID: true
82 | containers:
83 | - name: node-driver-registrar
84 | {{- with .Values.csi.nodeDriverRegistrar.image }}
85 | image: {{ .name | default "quay.io/k8scsi/csi-node-driver-registrar" }}:v1.3.0
86 | imagePullPolicy: {{ .pullPolicy }}
87 | {{- end }}
88 | args:
89 | - --csi-address=/csi/csi.sock
90 | - --kubelet-registration-path={{ .Values.orchestrator.basePath | default "/var/lib/kubelet" }}/plugins/pure-csi/csi.sock
91 | securityContext:
92 | privileged: true
93 | env:
94 | - name: KUBE_NODE_NAME
95 | valueFrom:
96 | fieldRef:
97 | apiVersion: v1
98 | fieldPath: spec.nodeName
99 | volumeMounts:
100 | - mountPath: /csi
101 | name: socket-dir
102 | - mountPath: /registration
103 | name: registration-dir
104 | - mountPath: /csi-data-dir
105 | name: csi-data-dir
106 |
107 | - name: pure-csi-container
108 | image: "{{ .Values.image.name }}:{{ .Values.image.tag }}"
109 | imagePullPolicy: {{ .Values.image.pullPolicy }}
110 | command:
111 | - "/csi-server"
112 | - "-endpoint=$(CSI_ENDPOINT)"
113 | - "-nodeid=$(KUBE_NODE_NAME)"
114 | {{- if eq .Values.app.debug true}}
115 | - "-debug"
116 | {{- end}}
117 | env:
118 | - name: CSI_ENDPOINT
119 | value: unix:///csi/csi.sock
120 | - name: KUBE_NODE_NAME
121 | valueFrom:
122 | fieldRef:
123 | apiVersion: v1
124 | fieldPath: spec.nodeName
125 | - name: PURE_DISCOVERY_CONF
126 | value: /etc/pure/pure.json
127 | - name: PURE_FLASHARRAY_SAN_TYPE
128 | value: {{ .Values.flasharray.sanType | upper }}
129 | - name: PURE_K8S_NAMESPACE
130 | value: {{ .Values.namespace.pure }}
131 | - name: PURE_DEFAULT_BLOCK_FS_TYPE
132 | value: {{ .Values.flasharray.defaultFSType }}
133 | - name: PURE_DEFAULT_BLOCK_FS_OPT
134 | value: "{{ .Values.flasharray.defaultFSOpt }}"
135 | {{- $defaultMountOptString := "" }}
136 | # support either string or list for .Values.flasharray.defaultMountOpt
137 | {{- if kindIs "string" .Values.flasharray.defaultMountOpt }}
138 | {{- $defaultMountOptString = .Values.flasharray.defaultMountOpt }}
139 | {{- else if or (kindIs "array" .Values.flasharray.defaultMountOpt) (kindIs "slice" .Values.flasharray.defaultMountOpt) }}
140 | {{- range .Values.flasharray.defaultMountOpt }}
141 | {{- $defaultMountOptString = printf "%s %s" $defaultMountOptString . }}
142 | {{- end}}
143 | {{- end}}
144 | - name: PURE_DEFAULT_BLOCK_MNT_OPT
145 | value: "{{$defaultMountOptString |trim}}"
146 | - name: PURE_PREEMPT_RWO_ATTACHMENTS_DEFAULT
147 | value: "{{ .Values.flasharray.preemptAttachments }}"
148 | - name: PURE_ISCSI_ALLOWED_CIDRS
149 | value: "{{ .Values.flasharray.iSCSIAllowedCIDR }}"
150 | - name: PURE_ISCSI_LOGIN_TIMEOUT
151 | value: "{{ .Values.flasharray.iSCSILoginTimeout }}"
152 | securityContext:
153 | privileged: true
154 | ports:
155 | - containerPort: 9898
156 | name: healthz
157 | protocol: TCP
158 | livenessProbe:
159 | failureThreshold: 5
160 | httpGet:
161 | path: /healthz
162 | port: healthz
163 | initialDelaySeconds: 10
164 | timeoutSeconds: 3
165 | periodSeconds: 2
166 | volumeMounts:
167 | - mountPath: /etc/pure
168 | name: config
169 | readOnly: true
170 | - mountPath: /dev
171 | name: dev
172 | - mountPath: /csi
173 | name: socket-dir
174 | - mountPath: /var/lib/kubelet/pods
175 | mountPropagation: Bidirectional
176 | name: mountpoint-dir
177 | - mountPath: /var/lib/kubelet/plugins
178 | mountPropagation: Bidirectional
179 | name: plugins-dir
180 | - mountPath: /csi-data-dir
181 | name: csi-data-dir
182 |
183 | - name: liveness-probe
184 | volumeMounts:
185 | - mountPath: /csi
186 | name: socket-dir
187 | {{- with .Values.csi.livenessProbe.image }}
188 | image: {{ .name | default "quay.io/k8scsi/livenessprobe" }}:v2.0.0
189 | imagePullPolicy: {{ .pullPolicy }}
190 | {{- end }}
191 | args:
192 | - --csi-address=/csi/csi.sock
193 | - --probe-timeout=3s
194 | - --health-port=9898
195 | - --v=0
196 | volumes:
197 | - secret:
198 | secretName: pure-provisioner-secret
199 | name: config
200 | - hostPath:
201 | path: /dev
202 | name: dev
203 | - hostPath:
204 | path: {{ .Values.orchestrator.basePath | default "/var/lib/kubelet" }}/plugins/pure-csi
205 | type: DirectoryOrCreate
206 | name: socket-dir
207 | - hostPath:
208 | path: {{ .Values.orchestrator.basePath | default "/var/lib/kubelet" }}/pods
209 | type: DirectoryOrCreate
210 | name: mountpoint-dir
211 | - hostPath:
212 | path: {{ .Values.orchestrator.basePath | default "/var/lib/kubelet" }}/plugins_registry
213 | type: Directory
214 | name: registration-dir
215 | - hostPath:
216 | path: {{ .Values.orchestrator.basePath | default "/var/lib/kubelet" }}/plugins
217 | type: Directory
218 | name: plugins-dir
219 | - hostPath:
220 | # 'path' is where PV data is persisted on host.
221 | # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
222 | path: /var/lib/pure-csi-data/
223 | type: DirectoryOrCreate
224 | name: csi-data-dir
225 | {{- if .Values.mounter -}}
226 | {{- with .Values.mounter.nodeSelector | default .Values.nodeSelector }}
227 | nodeSelector:
228 | {{ toYaml . | indent 8 }}
229 | {{- end }}
230 | {{- with .Values.mounter.affinity | default .Values.affinity }}
231 | affinity:
232 | {{ toYaml . | indent 8 }}
233 | {{- end }}
234 | {{- with .Values.mounter.tolerations | default .Values.tolerations }}
235 | tolerations:
236 | {{ toYaml . | indent 8 }}
237 | {{- end }}
238 | {{- else -}}
239 | # Look at the flexDaemon nodeSelector values too because we may be using a flex-plugin values.yaml and we honor the old values.yaml
240 | {{- if .Values.flexDaemon -}}
241 | {{- with .Values.flexDaemon.nodeSelector | default .Values.nodeSelector }}
242 | nodeSelector:
243 | {{ toYaml . | indent 8 }}
244 | {{- end }}
245 | {{- with .Values.flexDaemon.affinity | default .Values.affinity }}
246 | affinity:
247 | {{ toYaml . | indent 8 }}
248 | {{- end }}
249 | {{- with .Values.flexDaemon.tolerations | default .Values.tolerations }}
250 | tolerations:
251 | {{ toYaml . | indent 8 }}
252 | {{- end }}
253 | {{- end -}}
254 | {{- end -}}
255 |
--------------------------------------------------------------------------------
/pure-csi/templates/provisioner.yaml:
--------------------------------------------------------------------------------
1 | # Service defined here, plus serviceName below in StatefulSet,
2 | # are needed only because of condition explained in
3 | # https://github.com/kubernetes/kubernetes/issues/69608
4 | kind: Service
5 | apiVersion: v1
6 | metadata:
7 | name: pure-provisioner
8 | labels:
9 | app: pure-provisioner
10 | spec:
11 | selector:
12 | app: pure-provisioner
13 | ports:
14 | - name: dummy
15 | port: 12345
16 | ---
17 | # Why does this need to be a statefulset?
18 | # Because we need only one copy of the provisioner running
19 | kind: StatefulSet
20 | apiVersion: {{ template "statefulset.apiVersion" . }}
21 | metadata:
22 | name: pure-provisioner
23 | namespace: {{ .Release.Namespace }}
24 | labels:
25 | {{ include "pure_csi.labels" . | indent 4}}
26 |
27 | spec:
28 | serviceName: "pure-provisioner"
29 | replicas: 1
30 | selector:
31 | matchLabels:
32 | app: pure-provisioner
33 | template:
34 | metadata:
35 | labels:
36 | app: pure-provisioner
37 | {{ include "pure_csi.labels" . | indent 8}}
38 | spec:
39 | serviceAccountName: {{ .Values.clusterrolebinding.serviceAccount.name }}
40 | containers:
41 | - name: pure-csi-container
42 | image: "{{ .Values.image.name }}:{{ .Values.image.tag }}"
43 | imagePullPolicy: {{ .Values.image.pullPolicy }}
44 | command:
45 | - "/csi-server"
46 | - "-endpoint=$(CSI_ENDPOINT)"
47 | - "-nodeid=$(KUBE_NODE_NAME)"
48 | {{- if eq .Values.app.debug true}}
49 | - "-debug"
50 | {{- end}}
51 | volumeMounts:
52 | - name: socket-dir
53 | mountPath: /csi
54 | - name: config
55 | mountPath: /etc/pure
56 | readOnly: true
57 | env:
58 | - name: CSI_ENDPOINT
59 | value: unix:///csi/csi.sock
60 | - name: PURE_DISCOVERY_CONF
61 | value: /etc/pure/pure.json
62 | - name: PURE_K8S_NAMESPACE
63 | value: {{ .Values.namespace.pure }}
64 | - name: PURE_DEFAULT_BLOCK_FS_TYPE
65 | value: {{ .Values.flasharray.defaultFSType }}
66 | - name: PURE_DEFAULT_ENABLE_FB_NFS_SNAPSHOT
67 | value: {{ quote .Values.flashblade.snapshotDirectoryEnabled }}
68 |
69 | # This is the external provisioner sidecar
70 | - name: csi-provisioner
71 | {{- with .Values.csi.provisioner.image }}
72 | image: {{ .name | default "quay.io/k8scsi/csi-provisioner" }}:v1.4.0
73 | imagePullPolicy: {{ .pullPolicy }}
74 | {{- end }}
75 | args:
76 | - --csi-address=/csi/csi.sock
77 | - --connection-timeout=15s
78 | - --feature-gates=Topology={{ .Values.storagetopology.enable }}
79 | {{- if and .Values.storagetopology.strictTopology .Values.storagetopology.enable}}
80 | - --strict-topology
81 | {{- end}}
82 | volumeMounts:
83 | - name: socket-dir
84 | mountPath: /csi
85 | # Google Anthos (which is built on GKE) prohibits alpha snapshotters
86 | {{- if not (.Capabilities.KubeVersion.GitVersion | regexMatch "gke") }}
87 | - name: csi-snapshotter
88 | {{- with .Values.csi.snapshotter.image }}
89 | image: {{ .name | default "quay.io/k8scsi/csi-snapshotter" }}:v1.2.2
90 | imagePullPolicy: {{ .pullPolicy }}
91 | {{- end }}
92 | args:
93 | - "--csi-address=/csi/csi.sock"
94 | - "--connection-timeout=15s"
95 | - "--leader-election=false"
96 | volumeMounts:
97 | - name: socket-dir
98 | mountPath: /csi
99 | {{- end }}
100 | # The csi-resizer sidecar that watches the Kubernetes API server for PersistentVolumeClaim updates.
101 | # Does not scale with more replicas, only one is elected as leader and running.
102 | # PSO requires K8s 1.16+ for CSI VolumeExpansion
103 | {{ if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }}
104 | - name: csi-resizer
105 | {{- with .Values.csi.resizer.image }}
106 | image: {{ .name | default "quay.io/k8scsi/csi-resizer" }}:v0.5.0
107 | imagePullPolicy: {{ .pullPolicy }}
108 | {{- end }}
109 | args:
110 | - "--csi-address=/csi/csi.sock"
111 | - "--csiTimeout=15s"
112 | - "--leader-election=false"
113 | volumeMounts:
114 | - name: socket-dir
115 | mountPath: /csi
116 | {{ end }}
117 | # This is the cluster-driver-registrar sidecar that allows helm-install without CRD-hooks for the CSIDriver CRD
118 | # The reason we do not want a crd-hook with helm-chart is to avoid upgrade issues like: https://github.com/helm/helm/issues/4489
119 | {{ if and (eq .Capabilities.KubeVersion.Major "1") (eq .Capabilities.KubeVersion.Minor "13") }}
120 | - name: cluster-driver-registrar
121 | {{- with .Values.csi.clusterDriverRegistrar.image }}
122 | image: {{ .name | default "quay.io/k8scsi/csi-cluster-driver-registrar" }}:v1.0.1
123 | imagePullPolicy: {{ .pullPolicy }}
124 | {{- end }}
125 | args:
126 | - "--csi-address=/csi/csi.sock"
127 | - "--driver-requires-attachment=false"
128 | volumeMounts:
129 | - name: socket-dir
130 | mountPath: /csi
131 | {{ end }}
132 | volumes:
133 | - name: socket-dir
134 | emptyDir: {}
135 | - name: config
136 | secret:
137 | secretName: pure-provisioner-secret
138 | {{- with .Values.provisioner.nodeSelector | default .Values.nodeSelector }}
139 | nodeSelector:
140 | {{ toYaml . | indent 8 }}
141 | {{- end }}
142 | {{- with .Values.provisioner.affinity | default .Values.affinity }}
143 | affinity:
144 | {{ toYaml . | indent 8 }}
145 | {{- end }}
146 | {{- with .Values.provisioner.tolerations | default .Values.tolerations }}
147 | tolerations:
148 | {{ toYaml . | indent 8 }}
149 | {{- end }}
150 |
--------------------------------------------------------------------------------
/pure-csi/templates/rbac.yaml:
--------------------------------------------------------------------------------
1 | # This file is downloaded from
2 | # https://raw.githubusercontent.com/kubernetes-csi/external-provisioner/master/deploy/kubernetes/rbac.yaml
3 |
4 | # This YAML file contains all RBAC objects that are necessary to run external
5 | # CSI provisioner.
6 | #
7 | # In production, each CSI driver deployment has to be customized:
8 | # - to avoid conflicts, use non-default namespace and different names
9 | # for non-namespaced entities like the ClusterRole
10 | # - decide whether the deployment replicates the external CSI
11 | # provisioner, in which case leadership election must be enabled;
12 | # this influences the RBAC setup, see below
13 |
14 | apiVersion: v1
15 | kind: ServiceAccount
16 | metadata:
17 | name: {{ .Values.clusterrolebinding.serviceAccount.name }}
18 | namespace: {{ .Release.Namespace }}
19 |
20 | ---
21 | kind: ClusterRole
22 | apiVersion: {{ template "rbac.apiVersion" . }}
23 | metadata:
24 | name: external-provisioner-runner
25 | labels:
26 | {{ include "pure_csi.labels" . | indent 4}}
27 | rules:
28 | # The following rule should be uncommented for plugins that require secrets
29 | # for provisioning.
30 | # - apiGroups: [""]
31 | # resources: ["secrets"]
32 | # verbs: ["get", "list"]
33 | - apiGroups: [""]
34 | resources: ["persistentvolumes"]
35 | verbs: ["get", "list", "watch", "create", "delete"]
36 | - apiGroups: [""]
37 | resources: ["persistentvolumeclaims"]
38 | verbs: ["get", "list", "watch", "update"]
39 | - apiGroups: ["storage.k8s.io"]
40 | resources: ["storageclasses"]
41 | verbs: ["get", "list", "watch"]
42 | - apiGroups: [""]
43 | resources: ["events"]
44 | verbs: ["list", "watch", "create", "update", "patch"]
45 | - apiGroups: ["snapshot.storage.k8s.io"]
46 | resources: ["volumesnapshots"]
47 | verbs: ["get", "list", "watch", "update"]
48 | - apiGroups: ["snapshot.storage.k8s.io"]
49 | resources: ["volumesnapshots/status"]
50 | verbs: ["update"]
51 | - apiGroups: ["snapshot.storage.k8s.io"]
52 | resources: ["volumesnapshotcontents"]
53 | verbs: ["create", "get", "list", "watch", "update", "delete"]
54 | - apiGroups: ["storage.k8s.io"]
55 | resources: ["csinodes"]
56 | verbs: ["get", "list", "watch"]
57 | - apiGroups: [""]
58 | resources: ["nodes"]
59 | verbs: ["get", "list", "watch"]
60 | - apiGroups: ["snapshot.storage.k8s.io"]
61 | resources: ["volumesnapshotclasses"]
62 | verbs: ["get", "list", "watch"]
63 | - apiGroups: ["apiextensions.k8s.io"]
64 | resources: ["customresourcedefinitions"]
65 | verbs: ["create", "list", "watch", "delete", "get", "update"]
66 | - apiGroups: [""]
67 | resources: ["pods", "nodes"]
68 | verbs: ["get", "watch", "list"]
69 |
70 | ---
71 | kind: ClusterRoleBinding
72 | apiVersion: {{ template "rbac.apiVersion" . }}
73 | metadata:
74 | name: csi-provisioner-role
75 | labels:
76 | {{ include "pure_csi.labels" . | indent 4}}
77 | subjects:
78 | - kind: ServiceAccount
79 | name: {{ .Values.clusterrolebinding.serviceAccount.name }}
80 | namespace: {{ .Release.Namespace }}
81 | roleRef:
82 | kind: ClusterRole
83 | name: external-provisioner-runner
84 | apiGroup: rbac.authorization.k8s.io
85 |
86 | ---
87 | kind: ClusterRoleBinding
88 | apiVersion: {{ template "rbac.apiVersion" . }}
89 | metadata:
90 | name: csi-snapshotter-role
91 | labels:
92 | {{ include "pure_csi.labels" . | indent 4}}
93 | subjects:
94 | - kind: ServiceAccount
95 | name: {{ .Values.clusterrolebinding.serviceAccount.name }}
96 | namespace: {{ .Release.Namespace }}
97 | roleRef:
98 | kind: ClusterRole
99 | name: external-provisioner-runner
100 | apiGroup: rbac.authorization.k8s.io
101 | ---
102 | kind: ClusterRoleBinding
103 | apiVersion: {{ template "rbac.apiVersion" . }}
104 | metadata:
105 | name: pure-topology-role
106 | subjects:
107 | - kind: ServiceAccount
108 | name: default
109 | namespace: {{ .Release.Namespace }}
110 | roleRef:
111 | kind: ClusterRole
112 | name: external-provisioner-runner
113 | apiGroup: rbac.authorization.k8s.io
114 | ---
115 |
116 | {{ if and (eq .Capabilities.KubeVersion.Major "1") (eq .Capabilities.KubeVersion.Minor "13") }}
117 | # This file is downloaded from https://github.com/kubernetes-csi/cluster-driver-registrar/archive/v1.0.1.tar.gz
118 | # NOTE: this repo is only for Kubernetes versions 1.12 and 1.13. The rbac in the master branch is not maintained so use the
119 | # one in the released branch.
120 |
121 | # This YAML file contains all RBAC objects that are necessary to run cluster-driver-registrar sidecar because our
122 | # CSI driver does not yet support ControllerPublish/Unpublish and we need the 'SkipAttach' functionality.
123 | # Also this sidecar is not being used in 1.14 so this is only for 1.13 right now.
124 | # NOTE: Kubernetes 1.12 needs a different sidecar : https://github.com/kubernetes-csi/driver-registrar.
125 | #
126 | # In production, each CSI driver deployment has to be customized:
127 | # - to avoid conflicts, use non-default namespace and different names
128 | # for non-namespaced entities like the ClusterRole
129 | # - decide whether the deployment replicates the external CSI
130 | # provisioner, in which case leadership election must be enabled;
131 | # this influences the RBAC setup, see below
132 |
133 | kind: ClusterRole
134 | apiVersion: {{ template "rbac.apiVersion" . }}
135 | metadata:
136 | name: driver-registrar-runner
137 | labels:
138 | {{ include "pure_csi.labels" . | indent 4}}
139 | rules:
140 | - apiGroups: [""]
141 | resources: ["events"]
142 | verbs: ["get", "list", "watch", "create", "update", "patch"]
143 | # The following permissions are only needed when running
144 | # driver-registrar without the --kubelet-registration-path
145 | # parameter, i.e. when using driver-registrar instead of
146 | # kubelet to update the csi.volume.kubernetes.io/nodeid
147 | # annotation. That mode of operation is going to be deprecated
148 | # and should not be used anymore, but is needed on older
149 | # Kubernetes versions.
150 | # - apiGroups: [""]
151 | # resources: ["nodes"]
152 | # verbs: ["get", "update", "patch"]
153 | # ** This is needed for helm to Install the CRD for CSIDriver
154 | - apiGroups: ["apiextensions.k8s.io"]
155 | resources: ["customresourcedefinitions"]
156 | verbs: ['*']
157 | - apiGroups: ["csi.storage.k8s.io"]
158 | resources: ["csidrivers"]
159 | verbs: ['*']
160 | ---
161 | kind: ClusterRoleBinding
162 | apiVersion: {{ template "rbac.apiVersion" . }}
163 | metadata:
164 | name: csi-driver-registrar-role
165 | labels:
166 | {{ include "pure_csi.labels" . | indent 4}}
167 | subjects:
168 | - kind: ServiceAccount
169 | name: {{ .Values.clusterrolebinding.serviceAccount.name }}
170 | namespace: {{ .Release.Namespace }}
171 | roleRef:
172 | kind: ClusterRole
173 | name: driver-registrar-runner
174 | apiGroup: rbac.authorization.k8s.io
175 |
176 | {{ end }}
177 |
178 | {{ if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }}
179 | # PSO requires K8s 1.16+ for CSI VolumeExpansion
180 | # This file is downloaded from https://github.com/kubernetes-csi/external-resizer/blob/master/deploy/kubernetes/rbac.yaml
181 | ---
182 | kind: ClusterRole
183 | apiVersion: {{ template "rbac.apiVersion" . }}
184 | metadata:
185 | name: external-resizer-runner
186 | labels:
187 | {{ include "pure_csi.labels" . | indent 4}}
188 | rules:
189 | # The following rule should be uncommented for plugins that require secrets
190 | # for provisioning.
191 | # - apiGroups: [""]
192 | # resources: ["secrets"]
193 | # verbs: ["get", "list", "watch"]
194 | - apiGroups: [""]
195 | resources: ["persistentvolumes"]
196 | verbs: ["get", "list", "watch", "update", "patch"]
197 | - apiGroups: [""]
198 | resources: ["persistentvolumeclaims"]
199 | verbs: ["get", "list", "watch"]
200 | - apiGroups: [""]
201 | resources: ["persistentvolumeclaims/status"]
202 | verbs: ["update", "patch"]
203 | - apiGroups: [""]
204 | resources: ["events"]
205 | verbs: ["list", "watch", "create", "update", "patch"]
206 | ---
207 |
208 | kind: ClusterRoleBinding
209 | apiVersion: {{ template "rbac.apiVersion" . }}
210 | metadata:
211 | name: csi-resizer-role
212 | labels:
213 | {{ include "pure_csi.labels" . | indent 4}}
214 | subjects:
215 | - kind: ServiceAccount
216 | name: {{ .Values.clusterrolebinding.serviceAccount.name }}
217 | namespace: {{ .Release.Namespace }}
218 | roleRef:
219 | kind: ClusterRole
220 | name: external-resizer-runner
221 | apiGroup: rbac.authorization.k8s.io
222 | {{ end }}
223 |
--------------------------------------------------------------------------------
/pure-csi/templates/scc.yaml:
--------------------------------------------------------------------------------
1 | {{- if eq .Values.orchestrator.name "openshift"}}
2 | apiVersion: security.openshift.io/v1
3 | kind: SecurityContextConstraints
4 | metadata:
5 | name: pso-scc
6 |
7 | allowHostDirVolumePlugin: true
8 | allowHostIPC: false
9 | allowHostNetwork: true
10 | allowHostPID: true
11 | allowHostPorts: true
12 | allowPrivilegeEscalation: true
13 | allowPrivilegedContainer: true
14 | allowedCapabilities: null
15 |
16 | defaultAddCapabilities: null
17 | fsGroup:
18 | type: RunAsAny
19 | groups: []
20 | priority: null
21 | readOnlyRootFilesystem: false
22 | requiredDropCapabilities: null
23 | runAsUser:
24 | type: RunAsAny
25 | seLinuxContext:
26 | type: RunAsAny
27 | supplementalGroups:
28 | type: RunAsAny
29 | users:
30 | - system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.clusterrolebinding.serviceAccount.name }}
31 | volumes:
32 | # Allow all volume types (we specifically use hostPath and secrets)
33 | - '*'
34 | {{- end}}
35 |
--------------------------------------------------------------------------------
/pure-csi/templates/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: pure-provisioner-secret
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | {{ include "pure_csi.labels" . | indent 4}}
8 | data:
9 | pure.json: {{ .Values.arrays | toJson | b64enc | quote }}
10 |
--------------------------------------------------------------------------------
/pure-csi/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "pure-csi.fullname" . }}
5 | labels:
6 | app.kubernetes.io/name: {{ include "pure-csi.name" . }}
7 | helm.sh/chart: {{ include "pure-csi.chart" . }}
8 | app.kubernetes.io/instance: {{ .Release.Name }}
9 | app.kubernetes.io/managed-by: {{ .Release.Service }}
10 | spec:
11 | selector:
12 | app.kubernetes.io/name: {{ include "pure-csi.name" . }}
13 | app.kubernetes.io/instance: {{ .Release.Name }}
14 | ports:
15 | - name: dummy
16 | port: 12345
17 |
18 |
--------------------------------------------------------------------------------
/pure-csi/templates/storageclass.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.storageclass.createBuiltIn }}
2 | apiVersion: storage.k8s.io/v1
3 | kind: StorageClass
4 | metadata:
5 | name: pure
6 | annotations:
7 | storageclass.kubernetes.io/is-default-class: "{{ .Values.storageclass.isPureDefault }}"
8 | labels:
9 | kubernetes.io/cluster-service: "true"
10 | {{ include "pure_csi.labels" . | indent 4}}
11 | provisioner: pure-csi # This must match the name of the CSIDriver. And the name of the CSI plugin from the RPC 'GetPluginInfo'
12 | parameters:
13 | backend: {{ .Values.storageclass.pureBackend | default "block" | quote }}
14 | {{- if ne .Values.storageclass.pureBackend "file"}}
15 | csi.storage.k8s.io/fstype: {{ .Values.flasharray.defaultFSType | default "xfs" | quote }}
16 | createoptions: {{ .Values.flasharray.defaultFSOpt | default "-q" | quote }}
17 | {{- end }}
18 | # PSO requires K8s 1.16+ for CSI VolumeExpansion
19 | {{ if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }}
20 | allowVolumeExpansion: true
21 | {{ end }}
22 | {{- if eq .Values.storageclass.pureBackend "file"}}
23 | mountOptions:
24 | {{- else -}}
25 | # support either string or list for .Values.flasharray.defaultMountOpt
26 | {{- if or (kindIs "array" .Values.flasharray.defaultMountOpt) (kindIs "slice" .Values.flasharray.defaultMountOpt) }}
27 | mountOptions:
28 | {{- range .Values.flasharray.defaultMountOpt }}
29 | - {{ . }}
30 | {{- end }}
31 | {{- else if kindIs "string" .Values.flasharray.defaultMountOpt}}
32 | mountOptions:
33 | - {{ .Values.flasharray.defaultMountOpt }}
34 | {{- end }}
35 | {{- end }}
36 | ---
37 | kind: StorageClass
38 | apiVersion: storage.k8s.io/v1
39 | metadata:
40 | name: pure-file
41 | labels:
42 | kubernetes.io/cluster-service: "true"
43 | {{ include "pure_csi.labels" . | indent 4}}
44 | provisioner: pure-csi # This must match the name of the CSIDriver. And the name of the CSI plugin from the RPC 'GetPluginInfo'
45 | parameters:
46 | backend: file
47 | # PSO requires K8s 1.16+ for CSI VolumeExpansion
48 | {{ if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }}
49 | allowVolumeExpansion: true
50 | {{ end }}
51 | ---
52 | kind: StorageClass
53 | apiVersion: storage.k8s.io/v1
54 | metadata:
55 | name: pure-block
56 | labels:
57 | kubernetes.io/cluster-service: "true"
58 | {{ include "pure_csi.labels" . | indent 4}}
59 | provisioner: pure-csi # This must match the name of the CSIDriver. And the name of the CSI plugin from the RPC 'GetPluginInfo'
60 | parameters:
61 | backend: block
62 | csi.storage.k8s.io/fstype: xfs
63 | createoptions: -q
64 | # PSO requires K8s 1.16+ for CSI VolumeExpansion
65 | {{ if semverCompare ">=1.16-0" .Capabilities.KubeVersion.GitVersion }}
66 | allowVolumeExpansion: true
67 | {{ end }}
68 | # support either string or list for .Values.flasharray.defaultMountOpt
69 | {{- if or (kindIs "array" .Values.flasharray.defaultMountOpt) (kindIs "slice" .Values.flasharray.defaultMountOpt) }}
70 | mountOptions:
71 | {{- range .Values.flasharray.defaultMountOpt }}
72 | - {{ . }}
73 | {{- end }}
74 | {{- else if kindIs "string" .Values.flasharray.defaultMountOpt}}
75 | mountOptions:
76 | - {{ .Values.flasharray.defaultMountOpt }}
77 | {{- end }}
78 | {{- end }}
79 |
--------------------------------------------------------------------------------
/pure-csi/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for csi-plugin.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | image:
6 | name: "purestorage/k8s"
7 | tag: "5.2.0"
8 | pullPolicy: "Always"
9 |
10 | csi:
11 | provisioner:
12 | image:
13 | name: "quay.io/k8scsi/csi-provisioner"
14 | pullPolicy: "Always"
15 | snapshotter:
16 | image:
17 | name: "quay.io/k8scsi/csi-snapshotter"
18 | pullPolicy: "Always"
19 | resizer:
20 | image:
21 | name: "quay.io/k8scsi/csi-resizer"
22 | pullPolicy: "Always"
23 | clusterDriverRegistrar:
24 | image:
25 | name: "quay.io/k8scsi/csi-cluster-driver-registrar"
26 | pullPolicy: "Always"
27 | nodeDriverRegistrar:
28 | image:
29 | name: "quay.io/k8scsi/csi-node-driver-registrar"
30 | pullPolicy: "Always"
31 | livenessProbe:
32 | image:
33 | name: "quay.io/k8scsi/livenessprobe"
34 | pullPolicy: "Always"
35 |
36 | # this option is to enable/disable the csi topology feature
37 | # for pure-csi-driver
38 | storagetopology:
39 | enable: false
40 | strictTopology: false
41 |
42 |
43 | # this option is to enable/disable the debug mode of this app
44 | # for pure-csi-driver
45 | app:
46 | debug: false
47 |
48 | # do you want to set pure as the default storageclass?
49 | storageclass:
50 | # create the built-in StorageClasses 'pure', 'pure-file' and 'pure-block'?
51 | createBuiltIn: true
52 | isPureDefault: false
53 | # set the type of backend you want for the 'pure' storageclass
54 | pureBackend: "block"
55 |
56 | # specify the service account name for this app
57 | clusterrolebinding:
58 | serviceAccount:
59 | name: "pure"
60 |
61 | # support ISCSI or FC, not case sensitive
62 | flasharray:
63 | sanType: "ISCSI"
64 | defaultFSType: "xfs"
65 | defaultFSOpt: "-q"
66 | defaultMountOpt:
67 | - discard
68 | preemptAttachments: "true"
69 | iSCSILoginTimeout: 20
70 | iSCSIAllowedCIDR: ""
71 |
72 | flashblade:
73 | snapshotDirectoryEnabled: "false"
74 |
75 | # there are two namespaces for this app
76 | # 1. namespace.pure is the backend storage namespace where volumes/shares/etc
77 | # will be created.
78 | # Values for this can only include alphanumeric and underscores. Hyphens are not allowed.
79 | namespace:
80 | pure: "k8s"
81 |
82 | # support k8s or openshift
83 | orchestrator:
84 | # name is either 'k8s' or 'openshift'
85 | name: "k8s"
86 | # Use this to specify the base path of Kubelet. It should contain the "plugins", "plugins_registry", and "pods" directories.
87 | # Default is /var/lib/kubelet. For CoreOS and Rancher, this is usually /opt/rke/var/lib/kubelet unless configured otherwise.
88 | basePath: "/var/lib/kubelet"
89 |
90 | # arrays specify what storage arrays should be managed by the plugin, this is
91 | # required to be set upon installation. For FlashArrays you must set the "MgmtEndPoint"
92 | # and "APIToken", and for FlashBlades you need the additional "NFSEndPoint" parameter.
93 | # The labels are optional, and can be any key-value pair for use with the "fleet"
94 | # provisioner. An example is shown below:
95 | # arrays:
96 | # FlashArrays:
97 | # - MgmtEndPoint: "1.2.3.4"
98 | # APIToken: "a526a4c6-18b0-a8c9-1afa-3499293574bb"
99 | # Labels:
100 | # topology.purestorage.com/rack: "22"
101 | # topology.purestorage.com/env: "prod"
102 | # - MgmtEndPoint: "1.2.3.5"
103 | # APIToken: "b526a4c6-18b0-a8c9-1afa-3499293574bb"
104 | # FlashBlades:
105 | # - MgmtEndPoint: "1.2.3.6"
106 | # APIToken: "T-c4925090-c9bf-4033-8537-d24ee5669135"
107 | # NFSEndPoint: "1.2.3.7"
108 | # Labels:
109 | # topology.purestorage.com/rack: "7b"
110 | # topology.purestorage.com/env: "dev"
111 | # - MgmtEndPoint: "1.2.3.8"
112 | # APIToken: "T-d4925090-c9bf-4033-8537-d24ee5669135"
113 | # NFSEndPoint: "1.2.3.9"
114 | # Labels:
115 | # topology.purestorage.com/rack: "6a"
116 | arrays:
117 | FlashArrays: []
118 | FlashBlades: []
119 |
120 |
121 | mounter:
122 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
123 | nodeSelector: {}
124 | # disktype: ssd
125 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
126 | tolerations: []
127 | # - operator: Exists
128 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
129 | affinity: {}
130 | # nodeAffinity:
131 | # requiredDuringSchedulingIgnoredDuringExecution:
132 | # nodeSelectorTerms:
133 | # - matchExpressions:
134 | # - key: e2e-az-NorthSouth
135 | # operator: In
136 | # values:
137 | # - e2e-az-North
138 | # - e2e-az-South
139 |
140 | provisioner:
141 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
142 | nodeSelector: {}
143 | # disktype: ssd
144 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
145 | tolerations: []
146 | # - operator: Exists
147 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
148 | affinity: {}
149 | # nodeAffinity:
150 | # requiredDuringSchedulingIgnoredDuringExecution:
151 | # nodeSelectorTerms:
152 | # - matchExpressions:
153 | # - key: e2e-az-NorthSouth
154 | # operator: In
155 | # values:
156 | # - e2e-az-North
157 | # - e2e-az-South
158 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | description: A Helm chart for Pure Storage persistent storage driver for Kubernetes/OpenShift
3 | name: pure-k8s-plugin
4 | version: 2.7.1
5 | appVersion: 2.7.1
6 | icon: https://raw.githubusercontent.com/purestorage/helm-charts/master/pure-k8s-plugin/pure-storage.png
7 | keywords:
8 | - purestorage
9 | maintainers:
10 | - email: sales@purestorage.com
11 | name: Pure Storage, Inc.
12 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/pure-storage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/purestorage/helm-charts/c591f6a2eb7cbbaec60b62c32a757c11d0119d99/pure-k8s-plugin/pure-storage.png
--------------------------------------------------------------------------------
/pure-k8s-plugin/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* Create a chart_labels for each resources
2 | */}}
3 | {{- define "pure_k8s_plugin.labels" -}}
4 | generator: helm
5 | chart: {{ .Chart.Name }}
6 | release: {{ .Release.Name | quote }}
7 | {{- end -}}
8 |
9 | {{/* Define the flexpath to install pureflex
10 | */}}
11 | {{ define "pure_k8s_plugin.flexpath" -}}
12 | {{ if .Values.flexPath -}}
13 | {{ .Values.flexPath }}
14 | {{ else if eq .Values.orchestrator.name "k8s" -}}
15 | {{ .Values.orchestrator.k8s.flexPath }}
16 | {{ else if eq .Values.orchestrator.name "openshift" -}}
17 | {{ .Values.orchestrator.openshift.flexPath }}
18 | {{- end -}}
19 | {{- end -}}
20 |
21 | {{/*
22 | Return the appropriate apiVersion for deployment.
23 | */}}
24 | {{- define "deployment.apiVersion" -}}
25 | {{- if semverCompare ">=1.9-0" .Capabilities.KubeVersion.GitVersion -}}
26 | {{- print "apps/v1" -}}
27 | {{- else -}}
28 | {{- print "apps/v1beta1" -}}
29 | {{- end -}}
30 | {{- end -}}
31 |
32 | {{/*
33 | Return the appropriate apiVersion for daemonset.
34 | */}}
35 | {{- define "daemonset.apiVersion" -}}
36 | {{- if semverCompare ">=1.9-0" .Capabilities.KubeVersion.GitVersion -}}
37 | {{- print "apps/v1" -}}
38 | {{- else -}}
39 | {{- print "apps/v1beta1" -}}
40 | {{- end -}}
41 | {{- end -}}
42 |
43 | {{/*
44 | Return the appropriate apiVersion for RBAC APIs.
45 | */}}
46 | {{- define "rbac.apiVersion" -}}
47 | {{- if semverCompare "^1.8-0" .Capabilities.KubeVersion.GitVersion -}}
48 | {{- print "rbac.authorization.k8s.io/v1" -}}
49 | {{- else -}}
50 | {{- print "rbac.authorization.k8s.io/v1beta1" -}}
51 | {{- end -}}
52 | {{- end -}}
53 |
54 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/templates/clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | # Create a service account for installation
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ .Values.clusterrolebinding.serviceAccount.name }}
6 | namespace: {{ .Release.Namespace }}
7 |
8 | ---
9 |
10 | apiVersion: {{ template "rbac.apiVersion" . }}
11 | kind: ClusterRole
12 | metadata:
13 | labels:
14 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
15 | name: pure-provisioner-clusterrole
16 | rules:
17 | - apiGroups: [""]
18 | resources: ["persistentvolumes"]
19 | verbs: ["create", "delete", "get", "list", "watch", "update"]
20 | - apiGroups: [""]
21 | resources: ["persistentvolumeclaims"]
22 | verbs: ["get", "list", "update", "watch"]
23 | - apiGroups: ["storage.k8s.io"]
24 | resources: ["storageclasses"]
25 | verbs: ["get", "list", "watch"]
26 | - apiGroups: [""]
27 | resources: ["events"]
28 | verbs: ["create", "patch", "update", "watch"]
29 |
30 | ---
31 |
32 | # Assign cluster role to the service account
33 | apiVersion: {{ template "rbac.apiVersion" . }}
34 | kind: ClusterRoleBinding
35 | metadata:
36 | name: pure-provisioner-rights
37 | labels:
38 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
39 | roleRef:
40 | apiGroup: rbac.authorization.k8s.io
41 | kind: ClusterRole
42 | name: pure-provisioner-clusterrole
43 | subjects:
44 | - kind: ServiceAccount
45 | name: {{ .Values.clusterrolebinding.serviceAccount.name }}
46 | namespace: {{ .Release.Namespace }}
47 |
48 | ---
49 |
50 | apiVersion: {{ template "rbac.apiVersion" . }}
51 | kind: Role
52 | metadata:
53 | name: pure-provisioner-role
54 | namespace: {{ .Release.Namespace }}
55 | labels:
56 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
57 | rules:
58 | - apiGroups: [""]
59 | resources: ["endpoints"]
60 | verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
61 |
62 | ---
63 |
64 | # Assign role to the service account
65 | apiVersion: {{ template "rbac.apiVersion" . }}
66 | kind: RoleBinding
67 | metadata:
68 | name: pure-provisioner-rights-ns
69 | namespace: {{ .Release.Namespace }}
70 | labels:
71 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
72 | roleRef:
73 | apiGroup: rbac.authorization.k8s.io
74 | kind: Role
75 | name: pure-provisioner-role
76 | subjects:
77 | - kind: ServiceAccount
78 | name: {{ .Values.clusterrolebinding.serviceAccount.name }}
79 | namespace: {{ .Release.Namespace }}
80 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/templates/pure-flex-daemon.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: {{ template "daemonset.apiVersion" . }}
2 | kind: DaemonSet
3 | metadata:
4 | name: pure-flex
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
8 | spec:
9 | updateStrategy:
10 | type: RollingUpdate
11 | rollingUpdate:
12 | # flex-daemon may not be running on some nodes in the cluster like master/infra nodes. This affects the maxUnavailable nodes for a RollingUpdate.
13 | # Set maxUnavailable to 100% so that a Rolling Update is possible for any cluster configuration.
14 | maxUnavailable: 100%
15 | selector:
16 | matchLabels:
17 | app: pure-flex
18 | template:
19 | metadata:
20 | labels:
21 | app: pure-flex
22 | {{ include "pure_k8s_plugin.labels" . | indent 8}}
23 | spec:
24 | serviceAccountName: {{ .Values.clusterrolebinding.serviceAccount.name }}
25 | containers:
26 | - name: pure-flex
27 | image: "{{ .Values.image.name }}:{{ .Values.image.tag }}"
28 | lifecycle:
29 | preStop:
30 | exec:
31 | command:
32 | - "/uninstaller"
33 | imagePullPolicy: {{ .Values.image.pullPolicy }}
34 | securityContext:
35 | privileged: true
36 | command:
37 | - "/pure-flex-daemon"
38 | - "--kubeletBaseDir=/kubelet-plugins"
39 | {{- if eq .Values.app.debug true}}
40 | - "--debug"
41 | {{- end}}
42 | - "--daemon"
43 | - "install"
44 | volumeMounts:
45 | - name: config
46 | mountPath: /etc/pure
47 | readOnly: true
48 | - name: kubelet-plugins
49 | mountPath: /kubelet-plugins
50 | env:
51 | - name: PURE_DISCOVERY_CONF
52 | value: /etc/pure/pure.json
53 | - name: PURE_FLASHARRAY_SAN_TYPE
54 | value: {{ .Values.flasharray.sanType | upper }}
55 | - name: PURE_K8S_NAMESPACE
56 | value: {{ .Values.namespace.pure }}
57 | - name: PURE_DEFAULT_BLOCK_FS_TYPE
58 | value: {{ .Values.flasharray.defaultFSType }}
59 | - name: PURE_DEFAULT_BLOCK_FS_OPT
60 | value: "{{ .Values.flasharray.defaultFSOpt }}"
61 | - name: PURE_DEFAULT_BLOCK_MNT_OPT
62 | value: "{{ .Values.flasharray.defaultMountOpt }}"
63 | - name: PURE_PREEMPT_RWO_ATTACHMENTS_DEFAULT
64 | value: "{{ .Values.flasharray.preemptAttachments }}"
65 | - name: PURE_ISCSI_ALLOWED_CIDRS
66 | value: "{{ .Values.flasharray.iSCSIAllowedCIDR }}"
67 | - name: PURE_ISCSI_LOGIN_TIMEOUT
68 | value: "{{ .Values.flasharray.iSCSILoginTimeout }}"
69 | volumes:
70 | - name: config
71 | secret:
72 | secretName: pure-provisioner-secret
73 | - name: kubelet-plugins
74 | hostPath:
75 | path: {{ template "pure_k8s_plugin.flexpath" .}}
76 | {{- with .Values.flexDaemon.nodeSelector | default .Values.nodeSelector }}
77 | nodeSelector:
78 | {{ toYaml . | indent 8 }}
79 | {{- end }}
80 | {{- with .Values.flexDaemon.affinity | default .Values.affinity }}
81 | affinity:
82 | {{ toYaml . | indent 8 }}
83 | {{- end }}
84 | {{- with .Values.flexDaemon.tolerations | default .Values.tolerations }}
85 | tolerations:
86 | {{ toYaml . | indent 8 }}
87 | {{- end }}
88 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/templates/pure-provisioner.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: {{ template "deployment.apiVersion" . }}
2 | kind: Deployment
3 | metadata:
4 | name: pure-provisioner
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: pure-provisioner
13 | template:
14 | metadata:
15 | labels:
16 | app: pure-provisioner
17 | {{ include "pure_k8s_plugin.labels" . | indent 8}}
18 | spec:
19 | serviceAccountName: {{ .Values.clusterrolebinding.serviceAccount.name }}
20 | containers:
21 | - name: pure-provisioner
22 | image: "{{ .Values.image.name }}:{{ .Values.image.tag }}"
23 | imagePullPolicy: {{ .Values.image.pullPolicy }}
24 | command:
25 | - /k8s-provisioner
26 | - '-logtostderr=true'
27 | - '-log_file_max_size=32'
28 | - '-stderrthreshold=2'
29 | {{- if eq .Values.app.debug true}}
30 | - '--debug'
31 | {{- end}}
32 | volumeMounts:
33 | - name: config
34 | mountPath: /etc/pure
35 | readOnly: true
36 | env:
37 | - name: PURE_DISCOVERY_CONF
38 | value: /etc/pure/pure.json
39 | - name: PURE_K8S_NAMESPACE
40 | value: {{ .Values.namespace.pure }}
41 | - name: PURE_DEFAULT_BLOCK_FS_TYPE
42 | value: {{ .Values.flasharray.defaultFSType }}
43 | - name: PURE_DEFAULT_ENABLE_FB_NFS_SNAPSHOT
44 | value: {{ quote .Values.flashblade.snapshotDirectoryEnabled }}
45 | volumes:
46 | - name: config
47 | secret:
48 | secretName: pure-provisioner-secret
49 | {{- with .Values.provisioner.nodeSelector | default .Values.nodeSelector }}
50 | nodeSelector:
51 | {{ toYaml . | indent 8 }}
52 | {{- end }}
53 | {{- with .Values.provisioner.affinity | default .Values.affinity }}
54 | affinity:
55 | {{ toYaml . | indent 8 }}
56 | {{- end }}
57 | {{- with .Values.provisioner.tolerations | default .Values.tolerations }}
58 | tolerations:
59 | {{ toYaml . | indent 8 }}
60 | {{- end }}
61 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/templates/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: pure-provisioner-secret
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
8 | data:
9 | pure.json: {{ .Values.arrays | toJson | b64enc | quote }}
10 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/templates/storageclass.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.storageclass.createBuiltIn }}
2 | apiVersion: storage.k8s.io/v1
3 | kind: StorageClass
4 | metadata:
5 | name: pure
6 | annotations:
7 | storageclass.kubernetes.io/is-default-class: "{{ .Values.storageclass.isPureDefault }}"
8 | labels:
9 | kubernetes.io/cluster-service: "true"
10 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
11 | provisioner: pure-provisioner
12 | parameters:
13 | backend: {{ .Values.storageclass.pureBackend | default "block" | quote }}
14 | ---
15 | kind: StorageClass
16 | apiVersion: storage.k8s.io/v1
17 | metadata:
18 | name: pure-file
19 | labels:
20 | kubernetes.io/cluster-service: "true"
21 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
22 | provisioner: pure-provisioner
23 | parameters:
24 | backend: file
25 | ---
26 | kind: StorageClass
27 | apiVersion: storage.k8s.io/v1
28 | metadata:
29 | name: pure-block
30 | labels:
31 | kubernetes.io/cluster-service: "true"
32 | {{ include "pure_k8s_plugin.labels" . | indent 4}}
33 | provisioner: pure-provisioner
34 | parameters:
35 | backend: block
36 | {{- end }}
37 |
--------------------------------------------------------------------------------
/pure-k8s-plugin/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for k8s-plugin.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | image:
6 | name: purestorage/k8s
7 | tag: 2.7.1
8 | pullPolicy: Always
9 |
10 | # this option is to enable/disable the debug mode of this app
11 | # for pure-provisioner and pure-flex-daemon
12 | app:
13 | debug: false
14 |
15 | # do you want to set pure as the default storageclass?
16 | storageclass:
17 | # create the built-in StorageClasses 'pure', 'pure-file' and 'pure-block'?
18 | createBuiltIn: true
19 | isPureDefault: false
20 | # set the type of backend you want for the 'pure' storageclass
21 | pureBackend: block
22 |
23 | # specify the service account name for this app
24 | clusterrolebinding:
25 | serviceAccount:
26 | name: pure
27 |
28 | # support ISCSI or FC, not case sensitive
29 | flasharray:
30 | sanType: ISCSI
31 | defaultFSType: xfs
32 | defaultFSOpt: "-q"
33 | defaultMountOpt: ""
34 | preemptAttachments: "true"
35 | iSCSILoginTimeout: 20
36 | iSCSIAllowedCIDR: ""
37 |
38 | flashblade:
39 | snapshotDirectoryEnabled: "false"
40 |
41 | # there are two namespaces for this app
42 | # 1. namespace.pure is the backend storage namespace where volumes/shares/etc
43 | # will be created.
44 | namespace:
45 | pure: k8s
46 |
47 | # support k8s or openshift
48 | # if you want to install flex into a different place, you need to
49 | # overwrite the flexpath.
50 | orchestrator:
51 | # name is either 'k8s' or 'openshift'
52 | name: k8s
53 |
54 | # flexPath is for image.tag >= 2.0
55 | # `flexPath` needs to align with kubelet "volume-plugin-dir" configuration
56 | # by default in Kubernetes it is '/usr/libexec/kubernetes/kubelet-plugins/volume/exec'
57 | # Select an option below or customize for the environment.
58 |
59 | # Default for Kubernetes and OpenShift on RHEL Server
60 | flexPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
61 |
62 | # Default for Openshift 3.10+ on RHEL Atomic (containerized kubelet/origin-node)
63 | #flexPath: /etc/origin/kubelet-plugins/volume/exec
64 |
65 | # Default for Openshift 3.9 and lower with RHEL Atomic
66 | #flexPath : /usr/libexec/kubernetes/kubelet-plugins/volume/exec
67 |
68 | # Default for RKE
69 | #flexPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
70 |
71 | # Default for GKE
72 | #flexPath: /home/kubernetes/flexvolume
73 |
74 | # Default for Kubespray
75 | #flexPath: /var/lib/kubelet/volume-plugins
76 |
77 | #Default for OpenStack Magnum
78 | #flexPath: /var/lib/kubelet/volumeplugins
79 |
80 | # arrays specify what storage arrays should be managed by the plugin, this is
81 | # required to be set upon installation. For FlashArrays you must set the "MgmtEndPoint"
82 | # and "APIToken", and for FlashBlades you need the additional "NfsEndPoint" parameter.
83 | # The labels are optional, and can be any key-value pair for use with the "fleet"
84 | # provisioner. An example is shown below:
85 | # arrays:
86 | # FlashArrays:
87 | # - MgmtEndPoint: "1.2.3.4"
88 | # APIToken: "a526a4c6-18b0-a8c9-1afa-3499293574bb"
89 | # Labels:
90 | # rack: "22"
91 | # env: "prod"
92 | # - MgmtEndPoint: "1.2.3.5"
93 | # APIToken: "b526a4c6-18b0-a8c9-1afa-3499293574bb"
94 | # FlashBlades:
95 | # - MgmtEndPoint: "1.2.3.6"
96 | # APIToken: "T-c4925090-c9bf-4033-8537-d24ee5669135"
97 | # NfsEndPoint: "1.2.3.7"
98 | # Labels:
99 | # rack: "7b"
100 | # env: "dev"
101 | # - MgmtEndPoint: "1.2.3.8"
102 | # APIToken: "T-d4925090-c9bf-4033-8537-d24ee5669135"
103 | # NfsEndPoint: "1.2.3.9"
104 | # Labels:
105 | # rack: "6a"
106 |
107 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
108 | nodeSelector: {}
109 | # disktype: ssd
110 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
111 | tolerations: []
112 | # - operator: Exists
113 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
114 | affinity: {}
115 | # nodeAffinity:
116 | # requiredDuringSchedulingIgnoredDuringExecution:
117 | # nodeSelectorTerms:
118 | # - matchExpressions:
119 | # - key: e2e-az-NorthSouth
120 | # operator: In
121 | # values:
122 | # - e2e-az-North
123 | # - e2e-az-South
124 |
125 | flexDaemon:
126 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
127 | nodeSelector: {}
128 | # disktype: ssd
129 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
130 | tolerations: []
131 | # - operator: Exists
132 | # These values map directly to yaml in the daemonset spec, see the kubernetes docs for info
133 | affinity: {}
134 | # nodeAffinity:
135 | # requiredDuringSchedulingIgnoredDuringExecution:
136 | # nodeSelectorTerms:
137 | # - matchExpressions:
138 | # - key: e2e-az-NorthSouth
139 | # operator: In
140 | # values:
141 | # - e2e-az-North
142 | # - e2e-az-South
143 |
144 | provisioner:
145 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
146 | nodeSelector: {}
147 | # disktype: ssd
148 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
149 | tolerations: []
150 | # - operator: Exists
151 | # These values map directly to yaml in the deployment spec, see the kubernetes docs for info
152 | affinity: {}
153 | # nodeAffinity:
154 | # requiredDuringSchedulingIgnoredDuringExecution:
155 | # nodeSelectorTerms:
156 | # - matchExpressions:
157 | # - key: e2e-az-NorthSouth
158 | # operator: In
159 | # values:
160 | # - e2e-az-North
161 | # - e2e-az-South
162 |
--------------------------------------------------------------------------------
/scripts/pso-collect-logs.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | FULL_MODE="false"
3 | while [ -n "$1" ]; do # while loop starts
4 | case "$1" in
5 |
6 | --full)
7 | echo "--full option specified"
8 | FULL_MODE="true";;
9 |
10 | --help)
11 | echo -e "Usage: *.bash [OPTION]"
12 | echo -e "If kubeconfig is not configured please run: export KUBECONFIG=[kube-config-file]\n"
13 | echo -e "--full: full log mode, collect pod information outside of PSO and kube-system namespace, please make sure there is no sensitive information."
14 | exit;;
15 |
16 | *)
17 | echo "Option $1 not recognized, use --help for more information."
18 | exit;;
19 |
20 | esac
21 | shift
22 | done
23 |
24 | if [ "$FULL_MODE" == "false" ]; then
25 | tput setaf 2;
26 | echo -e "Will not collect user application info, if there is PVC mount issue, please run with --full option to collect info for all pods in all namespaces, make sure there is no sensitive info."
27 | tput sgr0
28 | fi
29 |
30 | KUBECTL=kubectl
31 |
32 | if [ -n "$(which oc)" ]; then
33 | echo "oc exists, use oc instead of kubectl"
34 | KUBECTL=oc
35 | fi
36 |
37 | LOG_DIR=./pso-logs
38 | PSO_NS=$($KUBECTL get pod -o wide --all-namespaces | grep pure-provisioner- | awk '{print $1}')
39 |
40 | tput setaf 2;
41 | echo -e "PSO namespace is $PSO_NS, overwritting log dir $LOG_DIR\n"
42 | tput sgr0
43 |
44 | PODS=$($KUBECTL get pod -n $PSO_NS | awk '{print $1}' | grep -e pure-)
45 |
46 | if [ -d "$LOG_DIR" ]
47 | then
48 | rm $LOG_DIR -r
49 | fi
50 | mkdir $LOG_DIR
51 |
52 | if [[ $KUBECTL == "oc" ]]; then
53 | echo "collect scc info"
54 | # Get log for scc which only exists in openshift cluster, in case oc exists but the cluster is k8s cluster
55 | # we will get "resource type does not exist", which is ok.
56 | oc get scc -o wide > $LOG_DIR/scc.log 2>/dev/null
57 | echo -e "\n" >> $LOG_DIR/scc.log
58 | oc describe scc >> $LOG_DIR/scc.log 2>/dev/null
59 | fi
60 |
61 | for pod in $PODS
62 | do
63 | echo "collect logs for pod $pod"
64 |
65 | # For flex driver
66 | if [[ $pod == *"pure-flex-"* ]]; then
67 | $KUBECTL logs $pod -c pure-flex -n $PSO_NS > $LOG_DIR/$pod.log
68 | fi
69 |
70 | if [[ $pod == *"pure-provisioner-"* ]]; then
71 | $KUBECTL logs $pod -c pure-provisioner -n $PSO_NS > $LOG_DIR/$pod.log
72 | fi
73 | # For csi driver
74 | if [[ $pod == *"pure-csi-"* ]] || [[ $pod == *"pure-provisioner-0"* ]] ; then
75 | $KUBECTL logs $pod -c pure-csi-container -n $PSO_NS > $LOG_DIR/$pod.log
76 | fi
77 |
78 | done
79 |
80 | if [ "$FULL_MODE" == "true" ]; then
81 | echo "collect info for all pods in all namespaces"
82 | $KUBECTL get pod --all-namespaces -o wide > $LOG_DIR/all-pods.log
83 | echo -e "\n" >> $LOG_DIR/all-pods.log
84 | $KUBECTL describe pod --all-namespaces >> $LOG_DIR/all-pods.log
85 | else
86 | echo "collect info for pods in PSO namespace $PSO_NS and kube-system namespace"
87 | $KUBECTL get pod -n $PSO_NS -o wide > $LOG_DIR/all-pods.log
88 | $KUBECTL get pod -n kube-system -o wide >> $LOG_DIR/all-pods.log
89 | echo -e "\n" >> $LOG_DIR/all-pods.log
90 | $KUBECTL describe pod -n $PSO_NS >> $LOG_DIR/all-pods.log
91 | $KUBECTL describe pod -n kube-system >> $LOG_DIR/all-pods.log
92 | fi
93 |
94 | echo "collect logs for all nodes"
95 | $KUBECTL get node -o wide > $LOG_DIR/all-nodes.log
96 |
97 | echo "collect logs for all pvcs"
98 | $KUBECTL get pvc -o wide > $LOG_DIR/all-pvcs.log
99 | echo -e "\n" >> $LOG_DIR/all-pvcs.log
100 | $KUBECTL describe pvc >> $LOG_DIR/all-pvcs.log
101 |
102 | echo "collect logs for all pvs"
103 | $KUBECTL get pv -o wide > $LOG_DIR/all-pvs.log
104 | echo -e "\n" >> $LOG_DIR/all-pvs.log
105 | $KUBECTL describe pv >> $LOG_DIR/all-pvs.log
106 |
107 | echo "collect logs for all resources in PSO namespace"
108 | $KUBECTL get all -o wide -n $PSO_NS > $LOG_DIR/all-resource.log
109 | echo -e "\n" >> $LOG_DIR/all-resource.log
110 | # Supress potential error: Error from server (NotFound): the server could not find the requested resource
111 | $KUBECTL describe all -n $PSO_NS >> $LOG_DIR/all-resource.log 2>/dev/null
112 |
113 | COMPRESS_FILE=pso-logs-$(date "+%Y.%m.%d-%H.%M.%S").tar.gz
114 | tput setaf 2;
115 | echo -e "Compressing log folder $LOG_DIR into $COMPRESS_FILE"
116 | tput sgr0
117 | tar -czvf $COMPRESS_FILE $LOG_DIR >/dev/null
118 |
119 |
120 |
--------------------------------------------------------------------------------
/tests/common/generate-version.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Copyright 2017, Pure Storage Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # First see if we are on a tag
18 | MD5_BIN=md5sum
19 | if [ `uname -s` == "Darwin" ]; then
20 | MD5_BIN=md5
21 | fi
22 |
23 | GIT_TAG=$(git describe --tags --exact-match 2> /dev/null)
24 | MATCH_RESULT=$?
25 |
26 | VERSION=""
27 | if [[ "${MATCH_RESULT}" == 0 ]]; then
28 | VERSION="${GIT_TAG}"
29 | else
30 | LATEST_TAG=$(git describe --abbrev=0 --tags 2> /dev/null)
31 |
32 | # If there are no tags to be found..
33 | if [[ -z "${LATEST_TAG}" ]]; then
34 | LATEST_TAG="unknown"
35 | fi
36 |
37 | CURRENT_REV=$(git rev-parse --verify HEAD | cut -c1-8)
38 | VERSION="${LATEST_TAG}-${CURRENT_REV}"
39 | fi
40 |
41 | diffHash() {
42 | { git diff --full-index; $(git ls-files --others --exclude-standard | while read -r i; do git diff --full-index -- /dev/null "$i"; done); } 2>&1 | \
43 | ${MD5_BIN} | cut -d ' ' -f 1 | cut -c1-8
44 | }
45 |
46 | if test -n "$(git status --porcelain)"; then
47 | VERSION+="-$(diffHash)"
48 | fi
49 |
50 | echo ${VERSION}
51 |
--------------------------------------------------------------------------------
/tests/common/helm-utils.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Copyright 2017, Pure Storage Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | HELM_CHARTS_REPO_URL_DEFAULT=https://purestorage.github.io/helm-charts
18 | HELM_CHARTS_REPO_NAME_DEFAULT=pure
19 |
20 | function init_helm {
21 | local chart_repo_url=${1:-${HELM_CHARTS_REPO_URL_DEFAULT}}
22 | local chart_repo_name=${2:-${HELM_CHARTS_REPO_NAME_DEFAULT}}
23 | helm init
24 | local n=0
25 | while true; do
26 | [ $n -lt ${CHECK_LIMIT} ]
27 | n=$[$n+1]
28 | sleep ${CHECK_INTERVAL}
29 | local readyTillers=$(kubectl get rs -l name=tiller -n ${TILLER_NAMESPACE} -o json | jq -r '.items[].status.readyReplicas')
30 | [[ ${readyTillers} == [0-9]* ]] || continue
31 | [ ${readyTillers} -gt 0 ] && break
32 | done
33 | # test if helm is working
34 | helm list
35 | helm repo add ${chart_repo_name} ${chart_repo_url}
36 | helm repo update
37 | }
38 |
--------------------------------------------------------------------------------
/tests/common/minikube-utils.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Copyright 2017, Pure Storage Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | CHECK_LIMIT=30
18 | CHECK_INTERVAL=10
19 |
20 | function start_minikube {
21 | local minikube_instance_name=$1
22 | if [ "${minikube_instance_name}" == "" ]; then
23 | echo "Must provide a minikube instance name to create"
24 | return 1
25 | fi
26 | local instance=$(echo ${minikube_instance_name} | sed 's/-/_/g')
27 | eval _minikube_vm_driver_$instance=${2:-virtualbox}
28 | eval _delete_minikube_$instance=true
29 | if eval [ "\${_minikube_vm_driver_$instance}" == "none" ]; then
30 | if pgrep kubelet; then
31 | echo "Found an exisitng minikube. Please have a check. Stop and delete it carefully before retry"
32 | # when exit, don't delete the minikube
33 | eval _delete_minikube_$instance=false
34 | return 1
35 | fi
36 | else
37 | if minikube status -p ${minikube_instance_name}; then
38 | echo "Found an exisitng minikube(${minikube_instance_name}). Please have a check. Stop and delete it carefully before retry"
39 | # when exit, don't delete the minikube
40 | eval _delete_minikube_$instance=false
41 | return 1
42 | fi
43 | fi
44 |
45 | echo "Starting a minikube(${minikube_instance_name}) for testing ..."
46 | # start a minikube for test
47 | eval minikube start --vm-driver \${_minikube_vm_driver_$instance} -p ${minikube_instance_name}
48 | # verify minikube
49 | local n=0
50 | while true; do
51 | [ $n -lt ${CHECK_LIMIT} ]
52 | n=$[$n+1]
53 | sleep ${CHECK_INTERVAL}
54 | kubectl get pods --all-namespaces | grep kube-system | grep -v Running || break
55 | done
56 | }
57 |
58 | function cleanup_minikube() {
59 | local minikube_instance_name=$1
60 | if [ "${minikube_instance_name}" == "" ]; then
61 | echo "Must provide a minikube instance name to stop and delete"
62 | return 1
63 | fi
64 | local instance=$(echo ${minikube_instance_name} | sed 's/-/_/g')
65 | if eval [ ! -z "\${_delete_minikube_$instance}" ]; then
66 | if eval [ "\${_delete_minikube_$instance}" == "true" ]; then
67 | minikube delete -p ${minikube_instance_name}
68 | if eval [ "\${_minikube_vm_driver_$instance}" == "none" ]; then
69 | # cleanup all the docker containers created by minikube
70 | docker ps -a -q -f name="k8s" -f status=exited | xargs docker rm -f
71 | fi
72 | fi
73 | fi
74 | eval unset _delete_minikube_$instance
75 | eval unset _minikube_vm_driver_$instance
76 | }
--------------------------------------------------------------------------------
/tests/pure-k8s-plugin/README.md:
--------------------------------------------------------------------------------
1 | # Tests
2 |
3 | ## Pre-requisites
4 |
5 | To run the tests, the following tools should be installed firstly:
6 | * Minikube
7 | https://kubernetes.io/docs/tasks/tools/install-minikube/
8 | * Kubectl
9 | https://kubernetes.io/docs/tasks/tools/install-kubectl/
10 | * Helm
11 | https://docs.helm.sh/using_helm/#installing-helm
12 |
13 | ## Upgrade test
14 | `test-upgrade.sh` is to test the pure-k8s-plugin helm chart upgrade from any GA version to the current developing code.
15 |
16 | * Setup Env variables for a test, (optional)
17 | There are some test environment variables:
18 | * MINIKUBE_VM_DRIVER | `virtualbox` (default)
19 | * TEST_CHARTS_REPO_URL | `https://purestorage.github.io/helm-charts` (default)
20 | * TEST_CHART_GA_VERSION | `latest` (default)
21 |
22 | You can setup a different value for any one of them
23 | ```bash
24 | export MINIKUBE_VM_DRIVER=none
25 | export TEST_CHARTS_REPO_URL=https://purestorage.github.io/helm-charts
26 | export TEST_CHART_GA_VERSION=latest
27 | ```
28 |
29 | * Run a test:
30 | ```bash
31 | ./tests/pure-k8s-plugin/test_upgrade.sh
32 | ```
33 |
--------------------------------------------------------------------------------
/tests/pure-k8s-plugin/test-upgrade.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Copyright 2017, Pure Storage Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | set -xe
18 |
19 | # This script is to test the upgrade from the latest GA version to the current developing one
20 | script_abs_path=`cd $(dirname $0); echo $(pwd)/$(basename $0)`
21 | WORKSPACE=$(dirname ${script_abs_path})/../..
22 |
23 | TEST_CHART_NAME=pure-k8s-plugin
24 |
25 | CHARTS_DIR=${WORKSPACE}
26 | CHARTS_TESTS_DIR=${WORKSPACE}/tests
27 |
28 | MINIKUBE_VM_DRIVER=${MINIKUBE_VM_DRIVER:-virtualbox}
29 | MINIKUBE_INSTANCE_NAME=${TEST_CHART_NAME}
30 |
31 | CHECK_LIMIT=30
32 | CHECK_INTERVAL=10
33 |
34 | function verify_chart_installation {
35 | # verify for pure-provisioner
36 | local imageInstalled=$(kubectl get deploy pure-provisioner -o json | jq -r '.spec.template.spec.containers[].image')
37 | [ "${imageInstalled}" == "purestorage/k8s:${IMAGE_TAG}" ]
38 |
39 | local desiredProvisioner=1
40 | local n=0
41 | while true; do
42 | [ $n -lt ${CHECK_LIMIT} ]
43 | n=$[$n+1]
44 | sleep ${CHECK_INTERVAL}
45 | local readyProvisioner=$(kubectl get deploy pure-provisioner -o json | jq -r '.status.readyReplicas')
46 | [ "${readyProvisioner}" == "${desiredProvisioner}" ] && break
47 | done
48 |
49 | # verify for pure-flex
50 | local imageInstalled=$(kubectl get ds pure-flex -o json | jq -r '.spec.template.spec.containers[].image')
51 | [ "${imageInstalled}" == "purestorage/k8s:${IMAGE_TAG}" ]
52 |
53 | local desiredFlexes=$(kubectl get ds pure-flex -o json | jq -r '.status.desiredNumberScheduled')
54 | n=0
55 | while true; do
56 | [ $n -lt ${CHECK_LIMIT} ]
57 | n=$[$n+1]
58 | sleep ${CHECK_INTERVAL}
59 | local readyFlexes=$(kubectl get ds pure-flex -o json | jq -r '.status.numberReady')
60 | [ "${readyFlexes}" == "${desiredFlexes}" ] && break
61 | done
62 | }
63 |
64 | export KUBECONFIG=${CHARTS_TESTS_DIR}/${TEST_CHART_NAME}/kube.conf
65 | export HELM_HOME=${CHARTS_TESTS_DIR}/${TEST_CHART_NAME}/helm
66 |
67 | source ${CHARTS_TESTS_DIR}/common/minikube-utils.sh
68 |
69 | function final_steps() {
70 | if [ -e ${CHARTS_DIR}/${TEST_CHART_NAME}/Chart.yaml.bak ]; then
71 | mv ${CHARTS_DIR}/${TEST_CHART_NAME}/Chart.yaml.bak ${CHARTS_DIR}/${TEST_CHART_NAME}/Chart.yaml
72 | fi
73 | cleanup_minikube ${MINIKUBE_INSTANCE_NAME}
74 | rm -rf ${KUBECONFIG} ${HELM_HOME}
75 | }
76 | trap final_steps EXIT
77 |
78 | start_minikube ${MINIKUBE_INSTANCE_NAME} ${MINIKUBE_VM_DRIVER}
79 |
80 | TEST_CHARTS_REPO_URL=${TEST_CHARTS_REPO_URL:-https://purestorage.github.io/helm-charts}
81 | TEST_CHARTS_REPO_NAME=pure
82 |
83 | TILLER_NAMESPACE=kube-system
84 | source ${CHARTS_TESTS_DIR}/common/helm-utils.sh
85 | init_helm ${TEST_CHARTS_REPO_URL} ${TEST_CHARTS_REPO_NAME}
86 |
87 | CHART_VERSION_LIST=$(helm search ${TEST_CHARTS_REPO_NAME}/${TEST_CHART_NAME} -l | grep ${TEST_CHART_NAME} | awk '{print $2}')
88 | LATEST_CHART_VERSION=$(helm search ${TEST_CHARTS_REPO_NAME}/${TEST_CHART_NAME} | grep ${TEST_CHART_NAME} | awk '{print $2}')
89 | IMAGE_TAG=${TEST_CHART_GA_VERSION:-latest}
90 | isValidVersion=0
91 | if [ "${IMAGE_TAG}" == "latest" ]; then
92 | IMAGE_TAG=${LATEST_CHART_VERSION}
93 | else
94 | for v in ${CHART_VERSION_LIST}; do
95 | if [ "$v" == ${IMAGE_TAG} ]; then
96 | isValidVersion=1
97 | break
98 | fi
99 | done
100 | if [ $isValidVersion -ne 1 ]; then
101 | echo "Failure: Invalid chart version ${IMAGE_TAG}"
102 | false
103 | fi
104 | fi
105 |
106 | echo "Installing the helm chart of ${TEST_CHART_NAME} ..."
107 | TEST_CHART_INSTANCE=pure
108 | # for testing upgrade only, set arrays to empty
109 | helm install -n ${TEST_CHART_INSTANCE} ${TEST_CHARTS_REPO_NAME}/${TEST_CHART_NAME} --version ${IMAGE_TAG} --set arrays=""
110 |
111 | echo "Verifying the installation ..."
112 | verify_chart_installation
113 | kubectl get all -o wide
114 |
115 | echo "Upgrading the helm chart of ${TEST_CHART_NAME} ..."
116 | CHART_DEV_VERSION=$(sh ${CHARTS_TESTS_DIR}/common/generate-version.sh)
117 | sed -i.bak "s/version: [0-9.]*/version: ${CHART_DEV_VERSION}/" ${CHARTS_DIR}/${TEST_CHART_NAME}/Chart.yaml
118 | IMAGE_TAG=$(grep ' tag:' ${CHARTS_DIR}/${TEST_CHART_NAME}/values.yaml | cut -d':' -f2 | tr -d ' ')
119 | helm upgrade ${TEST_CHART_INSTANCE} ${CHARTS_DIR}/${TEST_CHART_NAME} --version ${CHART_DEV_VERSION} --set arrays=""
120 |
121 | echo "Verifying the upgrade ..."
122 | verify_chart_installation
123 | kubectl get all -o wide
124 |
125 | helm history ${TEST_CHART_INSTANCE}
126 |
--------------------------------------------------------------------------------
/update.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -ex
4 |
5 | SCRIPT_DIR=$(dirname $0)
6 | cd ${SCRIPT_DIR}
7 |
8 | for CHART_FILE in $(find "${SCRIPT_DIR}" -name Chart.yaml); do
9 | CHART_DIR=$(dirname "${CHART_FILE}")
10 | CHART_NAME=$(basename "${CHART_DIR}")
11 | helm package "${CHART_DIR}"
12 | mv "${SCRIPT_DIR}"/"${CHART_NAME}"*.tgz "${SCRIPT_DIR}/docs/"
13 | done
14 |
15 | helm repo index docs --url https://purestorage.github.io/helm-charts
16 | echo "Updated ${SCRIPT_DIR}/docs/index.yaml"
17 |
--------------------------------------------------------------------------------