├── deploy ├── .gitignore ├── group_vars │ └── all.yml ├── templates │ └── gcs-manifests │ │ ├── gcs-namespace.yml.j2 │ │ ├── gcs-storage-virtblock.yml.j2 │ │ ├── gcs-storage-snapshot.yml.j2 │ │ ├── gcs-prometheus-operator-metrics.yml.j2 │ │ ├── gcs-prometheus-alertmanager-cluster.yml.j2 │ │ ├── gcs-operator-crd.yml.j2 │ │ ├── gcs-prometheus-etcd.yml.j2 │ │ ├── gcs-mixins.yml.j2 │ │ ├── gcs-etcd-cluster.yml.j2 │ │ ├── gcs-gd2-services.yml.j2 │ │ ├── gcs-prometheus-kube-metrics.yml.j2 │ │ ├── gcs-prometheus-bundle.yml.j2 │ │ ├── gcs-operator.yml.j2 │ │ ├── gcs-etcd-operator.yml.j2 │ │ ├── gcs-prometheus-operator.yml.j2 │ │ ├── gcs-grafana.yml.j2 │ │ ├── gcs-gd2.yml.j2 │ │ ├── gcs-prometheus-node-exporter.yml.j2 │ │ ├── gcs-prometheus-kube-state-metrics.yml.j2 │ │ ├── gcs-virtblock-csi.yml.j2 │ │ └── gcs-fs-csi.yml.j2 ├── tasks │ ├── deploy-gd2.yml │ ├── create-gd2-manifests.yml │ └── add-devices-to-peer.yml ├── vagrant-reset.yml ├── examples │ ├── inventory-gcs-only.example │ ├── app-using-gcs-volume.yml │ ├── inventory-gcs-kubespray.example │ └── app-using-gcs-volume-clone.yml ├── ansible.cfg ├── deploy-k8s.yml ├── vagrant-playbook.yml ├── prepare.sh ├── Vagrantfile ├── README.md └── deploy-gcs.yml ├── .gitignore ├── .gitmodules ├── README.md ├── doc └── deploying_gcs_on_bare_metal_machines.md └── LICENSE /deploy/.gitignore: -------------------------------------------------------------------------------- 1 | kubeconfig 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | .vagrant 3 | .envrc 4 | *.retry 5 | gcs-venv 6 | kubectl 7 | -------------------------------------------------------------------------------- /deploy/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | gcs_namespace: "gcs" 2 | monitoring_namespace: "monitoring" 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "deploy/kubespray"] 2 | path = deploy/kubespray 3 | url = https://github.com/kubernetes-incubator/kubespray 4 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-namespace.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: {{ gcs_namespace }} 6 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-storage-virtblock.yml.j2: -------------------------------------------------------------------------------- 1 | ## Deploy the Gluster Virtual Block StorageClass 2 | --- 3 | kind: StorageClass 4 | apiVersion: storage.k8s.io/v1 5 | metadata: 6 | name: glustervirtblock-csi 7 | provisioner: org.gluster.glustervirtblock 8 | -------------------------------------------------------------------------------- /deploy/tasks/deploy-gd2.yml: -------------------------------------------------------------------------------- 1 | ## This file is included as a loop from the deploy-gcs playbook 2 | --- 3 | - name: GCS | GD2 Cluster | Set fact kube_hostname 4 | set_fact: 5 | kube_hostname: "{{ gcs_node }}" 6 | 7 | - name: GCS | GD2 Cluster | Deploy glusterd2 on {{ gcs_node }} 8 | kube: 9 | kubectl: "{{ kubectl }}" 10 | file: "{{ manifests_dir }}/gcs-gd2-{{ gcs_node }}.yml" 11 | -------------------------------------------------------------------------------- /deploy/vagrant-reset.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reset Kubernetes 3 | import_playbook: ./kubespray/reset.yml 4 | 5 | - name: Reset disks 6 | hosts: all 7 | gather_facts: false 8 | become: true 9 | 10 | tasks: 11 | - name: Rescan for PVs to update lvm cache on host 12 | command: pvscan --cache 13 | 14 | - name: Delete provisioned VGs 15 | command: pvremove --force --force -y {{ item }} 16 | loop: "{{ gcs_disks }}" 17 | -------------------------------------------------------------------------------- /deploy/tasks/create-gd2-manifests.yml: -------------------------------------------------------------------------------- 1 | ## This file is included as a loop from the deploy-gcs playbook 2 | --- 3 | - name: GCS Pre | Manifests | Create GD2 manifests for {{ gcs_node }} | Set fact kube_hostname 4 | set_fact: 5 | kube_hostname: "{{ gcs_node }}" 6 | 7 | - name: GCS Pre | Manifests | Create GD2 manifests for {{ gcs_node }} | Create gcs-gd2-{{ gcs_node }}.yml 8 | template: 9 | src: "gcs-manifests/gcs-gd2.yml.j2" 10 | dest: "{{ manifests_dir }}/gcs-gd2-{{ gcs_node }}.yml" 11 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-storage-snapshot.yml.j2: -------------------------------------------------------------------------------- 1 | ## Deploy the StorageClass 2 | --- 3 | kind: StorageClass 4 | apiVersion: storage.k8s.io/v1 5 | metadata: 6 | name: glusterfs-csi 7 | annotations: 8 | storageclass.kubernetes.io/is-default-class: "true" 9 | provisioner: org.gluster.glusterfs 10 | 11 | ## Deploy the SnapshotClass 12 | --- 13 | apiVersion: snapshot.storage.k8s.io/v1alpha1 14 | kind: VolumeSnapshotClass 15 | metadata: 16 | name: glusterfs-csi-snap 17 | snapshotter: org.gluster.glusterfs 18 | -------------------------------------------------------------------------------- /deploy/examples/inventory-gcs-only.example: -------------------------------------------------------------------------------- 1 | ## Only the ansible master needs to have passwordless ssh setup 2 | master ansible_host=10.10.10.1 3 | 4 | ## List all the kube nodes that will form the GCS cluster 5 | ## Ensure that their hostnames are correct 6 | gcs1 gcs_disks='["/dev/sdb", "/dev/sdc", "/dev/sdd"]' 7 | gcs2 gcs_disks='["/dev/sdb", "/dev/sdc", "/dev/sdd"]' 8 | gcs3 gcs_disks='["/dev/sdb", "/dev/sdc", "/dev/sdd", "/dev/sde"]' 9 | 10 | 11 | [kube-master] 12 | master 13 | 14 | [gcs-node] 15 | gcs1 16 | gcs2 17 | gcs3 18 | -------------------------------------------------------------------------------- /deploy/ansible.cfg: -------------------------------------------------------------------------------- 1 | [ssh_connection] 2 | pipelining=True 3 | ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null 4 | 5 | [defaults] 6 | host_key_checking=False 7 | gathering = smart 8 | fact_caching = jsonfile 9 | fact_caching_connection = /tmp 10 | stdout_callback = skippy 11 | callback_whitelist = profile_tasks 12 | deprecation_warnings=False 13 | inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds 14 | library = ./kubespray/library 15 | roles_path = ./kubespray/roles 16 | inventory = ./.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory 17 | 18 | [inventory] 19 | ignore_patterns = artifacts, credentials 20 | -------------------------------------------------------------------------------- /deploy/tasks/add-devices-to-peer.yml: -------------------------------------------------------------------------------- 1 | ## This file is included as a loop from the deploy-gcs playbook 2 | --- 3 | - name: GCS | GD2 Cluster | Add devices | Set facts 4 | set_fact: 5 | kube_hostname: "{{ peer.name.split('-')[1]}}" 6 | 7 | - name: GCS | GD2 Cluster | Add devices | Add devices for {{ kube_hostname }} 8 | uri: 9 | url: "{{ gd2_client_endpoint }}/v1/devices/{{ peer.id }}" 10 | method: POST 11 | body: "{ \"device\": \"{{ disk }}\"}" 12 | body_format: json 13 | status_code: 201 14 | register: res 15 | until: res.status is defined and res.status == 201 16 | delay: 10 17 | retries: 50 18 | loop: "{{ hostvars[kube_hostname].gcs_disks }}" 19 | loop_control: 20 | loop_var: disk 21 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-prometheus-operator-metrics.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: prometheus-operator 6 | name: prometheus-operator 7 | namespace: {{ gcs_namespace }} 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: http 12 | port: 8080 13 | targetPort: http 14 | selector: 15 | k8s-app: prometheus-operator 16 | --- 17 | apiVersion: monitoring.coreos.com/v1 18 | kind: ServiceMonitor 19 | metadata: 20 | labels: 21 | k8s-app: prometheus-operator 22 | name: prometheus-operator 23 | namespace: {{ gcs_namespace }} 24 | spec: 25 | endpoints: 26 | - honorLabels: true 27 | port: http 28 | selector: 29 | matchLabels: 30 | k8s-app: prometheus-operator 31 | 32 | -------------------------------------------------------------------------------- /deploy/examples/app-using-gcs-volume.yml: -------------------------------------------------------------------------------- 1 | # Create a PVC first 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: gcs-example-volume 7 | spec: 8 | storageClassName: glusterfs-csi 9 | accessModes: 10 | - ReadWriteMany 11 | resources: 12 | requests: 13 | storage: 1Gi 14 | # Next create a Pod that binds to the PVC 15 | --- 16 | kind: Pod 17 | apiVersion: v1 18 | metadata: 19 | name: gcs-example 20 | labels: 21 | app: redis 22 | spec: 23 | containers: 24 | - name: redis 25 | image: redis:latest 26 | volumeMounts: 27 | - mountPath: /data 28 | name: gcs-example-volume 29 | volumes: 30 | - name: gcs-example-volume 31 | persistentVolumeClaim: 32 | claimName: gcs-example-volume 33 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-prometheus-alertmanager-cluster.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | name: alert 5 | namespace: {{ gcs_namespace }} 6 | spec: 7 | replicas: 2 8 | --- 9 | apiVersion: v1 10 | data: 11 | alertmanager.yaml: Imdsb2JhbCI6IAogICJyZXNvbHZlX3RpbWVvdXQiOiAiNW0iCiJyZWNlaXZlcnMiOiAKLSAibmFtZSI6ICJudWxsIgoicm91dGUiOiAKICAiZ3JvdXBfYnkiOiAKICAtICJqb2IiCiAgImdyb3VwX2ludGVydmFsIjogIjVtIgogICJncm91cF93YWl0IjogIjMwcyIKICAicmVjZWl2ZXIiOiAibnVsbCIKICAicmVwZWF0X2ludGVydmFsIjogIjEyaCIKICAicm91dGVzIjogCiAgLSAibWF0Y2giOiAKICAgICAgImFsZXJ0bmFtZSI6ICJEZWFkTWFuc1N3aXRjaCIKICAgICJyZWNlaXZlciI6ICJudWxsIg== 12 | kind: Secret 13 | metadata: 14 | name: alertmanager-alert 15 | namespace: {{ gcs_namespace }} 16 | type: Opaque 17 | 18 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-operator-crd.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: glusterclusters.operator.gluster.org 6 | spec: 7 | group: operator.gluster.org 8 | names: 9 | kind: GlusterCluster 10 | listKind: GlusterClusterList 11 | plural: glusterclusters 12 | singular: glustercluster 13 | scope: Namespaced 14 | version: v1alpha1 15 | 16 | --- 17 | apiVersion: apiextensions.k8s.io/v1beta1 18 | kind: CustomResourceDefinition 19 | metadata: 20 | name: glusternodes.operator.gluster.org 21 | spec: 22 | group: operator.gluster.org 23 | names: 24 | kind: GlusterNode 25 | listKind: GlusterNodeList 26 | plural: glusternodes 27 | singular: glusternode 28 | scope: Namespaced 29 | version: v1alpha1 30 | -------------------------------------------------------------------------------- /deploy/examples/inventory-gcs-kubespray.example: -------------------------------------------------------------------------------- 1 | kube1 ansible_host=10.10.10.1 2 | kube2 ansible_host=10.10.10.2 gcs_disks='["/dev/sdb", "/dev/sdc", "/dev/sdd"]' 3 | kube3 ansible_host=10.10.10.3 gcs_disks='["/dev/sdb", "/dev/sdc", "/dev/sdd"]' 4 | kube4 ansible_host=10.10.10.4 gcs_disks='["/dev/sdb", "/dev/sdc", "/dev/sdd", "/dev/sde"]' 5 | 6 | ## Hosts that will run etcd for the Kubernetes cluster 7 | [etcd] 8 | kube1 9 | kube2 10 | kube3 11 | 12 | ## Hosts that will be kubernetes master nodes 13 | [kube-master] 14 | kube1 15 | 16 | ## Hosts that will be kuberenetes nodes 17 | [kube-node] 18 | kube1 19 | kube2 20 | kube3 21 | kube4 22 | 23 | ## The full kubernetes cluster 24 | [k8s-cluster:children] 25 | kube-master 26 | kube-node 27 | 28 | ## Hosts that will be used for GCS. 29 | ## Systems grouped here need to define 'gcs_disks' as hostvars, which are the disks that will be used by GCS to provision storage. 30 | [gcs-node] 31 | kube2 32 | kube3 33 | kube4 34 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-prometheus-etcd.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: etcd-operator 6 | name: etcd-operator 7 | namespace: {{ gcs_namespace }} 8 | spec: 9 | ports: 10 | - name: metrics 11 | port: 8080 12 | selector: 13 | app.kubernetes.io/component: etcd 14 | app.kubernetes.io/name: etcd-operator 15 | --- 16 | apiVersion: monitoring.coreos.com/v1 17 | kind: ServiceMonitor 18 | metadata: 19 | labels: 20 | k8s-app: etcd-operator 21 | name: etcd-operator 22 | namespace: {{ gcs_namespace }} 23 | spec: 24 | endpoints: 25 | - port: metrics 26 | path: metrics 27 | selector: 28 | matchLabels: 29 | k8s-app: etcd-operator 30 | --- 31 | apiVersion: monitoring.coreos.com/v1 32 | kind: ServiceMonitor 33 | metadata: 34 | labels: 35 | k8s-app: etcd-metrics 36 | name: etcd-metrics 37 | namespace: {{ gcs_namespace }} 38 | spec: 39 | endpoints: 40 | - port: client 41 | path: metrics 42 | selector: 43 | matchLabels: 44 | app: etcd 45 | etcd_cluster: etcd 46 | 47 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-mixins.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: gluster-mixins 5 | namespace: {{ gcs_namespace }} 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: gluster-mixins 11 | image: quay.io/gluster/gluster-mixins:latest 12 | restartPolicy: Never 13 | serviceAccountName: gluster-mixins 14 | --- 15 | kind: ServiceAccount 16 | apiVersion: v1 17 | metadata: 18 | name: gluster-mixins 19 | namespace: {{ gcs_namespace }} 20 | --- 21 | kind: Role 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | metadata: 24 | name: gluster-mixins 25 | namespace: {{ gcs_namespace }} 26 | rules: 27 | - apiGroups: 28 | - monitoring.coreos.com 29 | resources: 30 | - prometheusrules 31 | verbs: 32 | - '*' 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - configmaps 37 | verbs: 38 | - '*' 39 | --- 40 | apiVersion: rbac.authorization.k8s.io/v1beta1 41 | kind: RoleBinding 42 | metadata: 43 | name: gluster-mixins 44 | namespace: {{ gcs_namespace }} 45 | subjects: 46 | - kind: ServiceAccount 47 | name: gluster-mixins 48 | namespace: {{ gcs_namespace }} 49 | roleRef: 50 | kind: Role 51 | name: gluster-mixins 52 | apiGroup: rbac.authorization.k8s.io 53 | 54 | -------------------------------------------------------------------------------- /deploy/deploy-k8s.yml: -------------------------------------------------------------------------------- 1 | # This playbook should only contain general customization required to deploy a 2 | # K8S cluster ready to run GCS. Any specific customizations with respect to the 3 | # vagrant environment needs to be done in vagrant-playbook. 4 | --- 5 | - name: Deploy K8S 6 | import_playbook: "kubespray/cluster.yml" 7 | vars: 8 | dns_mode: "coredns" 9 | docker_mount_flags: "shared" 10 | kube_network_plugin: "flannel" 11 | kube_version: "v1.13.1" 12 | local_volumes_enabled: true 13 | kube_feature_gates: ["VolumeSnapshotDataSource=true"] 14 | 15 | - name: Fetch config 16 | hosts: kube-master[0] 17 | become: true 18 | tasks: 19 | - name: Retrieve kubectl config 20 | fetch: 21 | dest: ./kubeconfig 22 | flat: yes 23 | src: /root/.kube/config 24 | 25 | - name: Copy kube config for vagrant user 26 | hosts: kube-master 27 | remote_user: vagrant 28 | tasks: 29 | - name: Create a directory 30 | file: 31 | path: /home/vagrant/.kube 32 | state: directory 33 | group: vagrant 34 | owner: vagrant 35 | mode: 755 36 | 37 | - name: Copy kube config for vagrant user 38 | copy: 39 | remote_src: yes 40 | src: /etc/kubernetes/admin.conf 41 | dest: /home/vagrant/.kube/config 42 | -------------------------------------------------------------------------------- /deploy/examples/app-using-gcs-volume-clone.yml: -------------------------------------------------------------------------------- 1 | #create a snapshot of already available cs_volume 2 | --- 3 | apiVersion: snapshot.storage.k8s.io/v1alpha1 4 | kind: VolumeSnapshot 5 | metadata: 6 | name: glusterfs-csi-ss 7 | spec: 8 | snapshotClassName: glusterfs-csi-snap 9 | source: 10 | name: gcs-example-volume 11 | kind: PersistentVolumeClaim 12 | 13 | 14 | #restore a PVC from snapshot 15 | --- 16 | apiVersion: v1 17 | kind: PersistentVolumeClaim 18 | metadata: 19 | name: glusterfs-pv-restore 20 | spec: 21 | storageClassName: glusterfs-csi 22 | dataSource: 23 | name: glusterfs-csi-ss 24 | kind: VolumeSnapshot 25 | apiGroup: snapshot.storage.k8s.io 26 | accessModes: 27 | - ReadWriteMany 28 | resources: 29 | requests: 30 | storage: 1Gi 31 | 32 | #create an app to use restored PVC 33 | --- 34 | apiVersion: v1 35 | kind: Pod 36 | metadata: 37 | name: redis-pvc-restore 38 | labels: 39 | name: redis-pvc-restore 40 | spec: 41 | containers: 42 | - name: redis-pvc-restore 43 | image: redis:latest 44 | imagePullPolicy: IfNotPresent 45 | volumeMounts: 46 | - mountPath: /data 47 | name: glusterfscsivol 48 | volumes: 49 | - name: glusterfscsivol 50 | persistentVolumeClaim: 51 | claimName: glusterfs-pv-restore 52 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-etcd-cluster.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: EtcdCluster 3 | apiVersion: etcd.database.coreos.com/v1beta2 4 | metadata: 5 | name: etcd 6 | namespace: {{ gcs_namespace }} 7 | labels: 8 | app.kubernetes.io/part-of: gcs 9 | app.kubernetes.io/component: etcd 10 | app.kubernetes.io/name: etcd-cluster 11 | spec: 12 | pod: 13 | affinity: 14 | podAntiAffinity: 15 | preferredDuringSchedulingIgnoredDuringExecution: 16 | - weight: 100 17 | podAffinityTerm: 18 | labelSelector: 19 | matchExpressions: 20 | - key: etcd_cluster 21 | operator: In 22 | values: 23 | - etcd 24 | topologyKey: kubernetes.io/hostname 25 | etcdEnv: 26 | # https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/maintenance.md#auto-compaction 27 | - name: ETCD_AUTO_COMPACTION_MODE 28 | value: "periodic" 29 | - name: ETCD_AUTO_COMPACTION_RETENTION 30 | value: "5m" 31 | - name: ETCD_SNAPSHOT_COUNT 32 | value: "10000" 33 | - name: ETCD_QUOTA_BACKEND_BYTES 34 | value: "8589934592" # 8 * 1024 * 1024 * 1024 = 8GiB 35 | - name: ETCD_MAX_REQUEST_BYTES 36 | value: "5242880" # 5 * 1024 * 1024 = 5MB 37 | size: 3 38 | version: 3.3.8 39 | 40 | # TODO: Setup backup 41 | -------------------------------------------------------------------------------- /deploy/vagrant-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Any specific customization required in the vagrant environment needs to be 3 | # done in this play 4 | - name: Pre-deploy bootstrapping 5 | hosts: all 6 | become: true 7 | gather_facts: false 8 | tags: 9 | - predeploy 10 | vars: 11 | kmods: 12 | - dm_mirror 13 | - dm_snapshot 14 | - dm_thin_pool 15 | 16 | tasks: 17 | - name: Extend root VG 18 | lvg: 19 | vg: atomicos 20 | pvs: /dev/vda2,/dev/vdb 21 | state: present 22 | 23 | - name: Extend the root LV and FS to occupy remaining space 24 | lvol: 25 | vg: atomicos 26 | lv: root 27 | size: 100%FREE 28 | resizefs: true 29 | tags: diskextend 30 | 31 | - name: Load required kernel modules 32 | modprobe: 33 | name: "{{ item }}" 34 | state: present 35 | with_items: "{{ kmods }}" 36 | 37 | - name: Persist loaded modules 38 | copy: 39 | dest: "/etc/modules-load.d/gluster-{{ item }}.conf" 40 | content: "{{ item }}" 41 | with_items: "{{ kmods }}" 42 | 43 | - name: Install packages 44 | command: "rpm-ostree install {{ item }}" 45 | with_items: 46 | # socat is needed for Helm 47 | - socat 48 | 49 | - name: Reboot to make layered packages available 50 | shell: sleep 2 && systemctl reboot 51 | async: 1 52 | poll: 0 53 | 54 | - name: Wait for host to be available 55 | wait_for_connection: 56 | delay: 15 57 | 58 | 59 | - import_playbook: "./deploy-k8s.yml" 60 | vars: 61 | # local_release_dir needs to be set for atomic hosts. 62 | local_release_dir: "/var/vagrant/temp" 63 | - import_playbook: "./deploy-gcs.yml" 64 | -------------------------------------------------------------------------------- /deploy/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## This script sets up the environment to allow GCS deployments using kubespray and ansible 4 | 5 | DEP_NOT_FOUND=() 6 | function check_dep() { 7 | if ! which $1 &>/dev/null; then 8 | echo $1 not found 9 | DEP_NOT_FOUND+=($2) 10 | return 1 11 | fi 12 | return 0 13 | } 14 | 15 | check_dep ansible ansible 16 | check_dep virtualenv python-virtualenv 17 | if check_dep vagrant vagrant; then 18 | if ! (vagrant plugin list | grep -q vagrant-libvirt); then 19 | echo vagrant-libvirt not found 20 | DEP_NOT_FOUND+=(vagrant-libvirt) 21 | else 22 | cat < "none" # (requires vagrant-libvirt 0.44 which is not in Fedora yet) 30 | lv.random :model => 'random' 31 | lv.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' 32 | 33 | lv.storage :file, :device => "vdb", :size => '20G' 34 | 35 | disks = [] 36 | (2..4).each do |d| 37 | lv.storage :file, :device => "vd#{driveletters[d]}", :size => '1024G' 38 | disks.push "/dev/vd#{driveletters[d]}" 39 | end 40 | host_vars[vm_name] = {"gcs_disks" => disks.to_s} 41 | end 42 | # TODO: Maybe support other providers... like VirtualBox 43 | 44 | if i == 3 45 | vm.vm.provision :ansible do |ansible| 46 | ansible.playbook = "vagrant-playbook.yml" 47 | ansible.become = true 48 | ansible.limit = "all" 49 | ansible.groups = { 50 | "etcd" => ["kube[1:3]"], 51 | "kube-master" => ["kube[1:2]"], 52 | "kube-node" => ["kube[1:3]"], 53 | "k8s-cluster:children" => ["kube-master", "kube-node"], 54 | "gcs-node" => ["kube[1:3]"] 55 | } 56 | ansible.host_vars = host_vars 57 | end 58 | end 59 | end 60 | end 61 | end 62 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-operator.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: anthill 6 | namespace: {{ gcs_namespace }} 7 | rules: 8 | - apiGroups: [""] 9 | resources: 10 | - pods 11 | - services 12 | - endpoints 13 | - persistentvolumeclaims 14 | - events 15 | - configmaps 16 | - secrets 17 | verbs: ["create", "get", "list", "watch", "update", "delete"] 18 | - apiGroups: ["apps"] 19 | resources: 20 | - deployments 21 | - daemonsets 22 | - replicasets 23 | - statefulsets 24 | verbs: ["create", "get", "list", "watch", "update", "delete"] 25 | - apiGroups: ["monitoring.coreos.com"] 26 | resources: 27 | - servicemonitors 28 | verbs: ["get", "create"] 29 | - apiGroups: ["operator.gluster.org"] 30 | resources: 31 | - glusterclusters 32 | - glusternodes 33 | verbs: ["create", "get", "list", "watch", "update", "delete"] 34 | 35 | --- 36 | apiVersion: v1 37 | kind: ServiceAccount 38 | metadata: 39 | name: anthill 40 | namespace: {{ gcs_namespace }} 41 | 42 | --- 43 | kind: RoleBinding 44 | apiVersion: rbac.authorization.k8s.io/v1 45 | metadata: 46 | name: anthill 47 | namespace: {{ gcs_namespace }} 48 | subjects: 49 | - kind: ServiceAccount 50 | name: anthill 51 | namespace: {{ gcs_namespace }} 52 | roleRef: 53 | kind: Role 54 | name: anthill 55 | apiGroup: rbac.authorization.k8s.io 56 | 57 | --- 58 | apiVersion: apps/v1 59 | kind: Deployment 60 | metadata: 61 | name: anthill 62 | namespace: {{ gcs_namespace }} 63 | labels: 64 | app.kubernetes.io/part-of: gcs 65 | app.kubernetes.io/component: operator 66 | app.kubernetes.io/name: anthill 67 | spec: 68 | replicas: 1 69 | selector: 70 | matchLabels: 71 | name: anthill 72 | template: 73 | metadata: 74 | labels: 75 | name: anthill 76 | spec: 77 | serviceAccountName: anthill 78 | containers: 79 | - name: anthill 80 | image: quay.io/gluster/anthill:latest 81 | ports: 82 | - containerPort: 60000 83 | name: metrics 84 | command: 85 | - anthill 86 | imagePullPolicy: Always 87 | env: 88 | - name: WATCH_NAMESPACE 89 | valueFrom: 90 | fieldRef: 91 | fieldPath: metadata.namespace 92 | - name: POD_NAME 93 | valueFrom: 94 | fieldRef: 95 | fieldPath: metadata.name 96 | - name: OPERATOR_NAME 97 | value: "anthill" 98 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-etcd-operator.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: etcd-operator 6 | namespace: {{ gcs_namespace }} 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: etcd-operator 12 | namespace: {{ gcs_namespace }} 13 | rules: 14 | - apiGroups: 15 | - etcd.database.coreos.com 16 | resources: 17 | - etcdclusters 18 | - etcdbackups 19 | - etcdrestores 20 | verbs: 21 | - "*" 22 | - apiGroups: 23 | - apiextensions.k8s.io 24 | resources: 25 | - customresourcedefinitions 26 | verbs: 27 | - "*" 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - pods 32 | - services 33 | - endpoints 34 | - persistentvolumeclaims 35 | - events 36 | verbs: 37 | - "*" 38 | - apiGroups: 39 | - apps 40 | resources: 41 | - deployments 42 | verbs: 43 | - "*" 44 | - apiGroups: 45 | - "" 46 | resources: 47 | - secrets 48 | verbs: 49 | - get 50 | --- 51 | kind: ClusterRoleBinding 52 | apiVersion: rbac.authorization.k8s.io/v1 53 | metadata: 54 | name: etcd-operator 55 | namespace: {{ gcs_namespace }} 56 | roleRef: 57 | apiGroup: rbac.authorization.k8s.io 58 | kind: ClusterRole 59 | name: etcd-operator 60 | subjects: 61 | - kind: ServiceAccount 62 | name: etcd-operator 63 | namespace: {{ gcs_namespace }} 64 | --- 65 | kind: Deployment 66 | apiVersion: apps/v1 67 | metadata: 68 | name: etcd-operator 69 | namespace: {{ gcs_namespace }} 70 | labels: 71 | app.kubernetes.io/part-of: gcs 72 | app.kubernetes.io/component: etcd 73 | app.kubernetes.io/name: etcd-operator 74 | spec: 75 | replicas: 1 76 | selector: 77 | matchLabels: 78 | app.kubernetes.io/part-of: gcs 79 | app.kubernetes.io/component: etcd 80 | app.kubernetes.io/name: etcd-operator 81 | template: 82 | metadata: 83 | labels: 84 | app.kubernetes.io/part-of: gcs 85 | app.kubernetes.io/component: etcd 86 | app.kubernetes.io/name: etcd-operator 87 | namespace: {{ gcs_namespace }} 88 | spec: 89 | serviceAccountName: etcd-operator 90 | containers: 91 | - name: etcd-operator 92 | image: quay.io/coreos/etcd-operator:v0.9.2 93 | command: 94 | - etcd-operator 95 | env: 96 | - name: MY_POD_NAMESPACE 97 | valueFrom: 98 | fieldRef: 99 | fieldPath: metadata.namespace 100 | - name: MY_POD_NAME 101 | valueFrom: 102 | fieldRef: 103 | fieldPath: metadata.name 104 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-prometheus-operator.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-operator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-operator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-operator 12 | namespace: {{ gcs_namespace }} 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | kind: ClusterRole 16 | metadata: 17 | name: prometheus-operator 18 | rules: 19 | - apiGroups: 20 | - apiextensions.k8s.io 21 | resources: 22 | - customresourcedefinitions 23 | verbs: 24 | - '*' 25 | - apiGroups: 26 | - monitoring.coreos.com 27 | resources: 28 | - alertmanagers 29 | - prometheuses 30 | - prometheuses/finalizers 31 | - alertmanagers/finalizers 32 | - servicemonitors 33 | - prometheusrules 34 | verbs: 35 | - '*' 36 | - apiGroups: 37 | - apps 38 | resources: 39 | - statefulsets 40 | verbs: 41 | - '*' 42 | - apiGroups: 43 | - "" 44 | resources: 45 | - configmaps 46 | - secrets 47 | verbs: 48 | - '*' 49 | - apiGroups: 50 | - "" 51 | resources: 52 | - pods 53 | verbs: 54 | - list 55 | - delete 56 | - apiGroups: 57 | - "" 58 | resources: 59 | - services 60 | - endpoints 61 | verbs: 62 | - get 63 | - create 64 | - update 65 | - apiGroups: 66 | - "" 67 | resources: 68 | - nodes 69 | verbs: 70 | - list 71 | - watch 72 | - apiGroups: 73 | - "" 74 | resources: 75 | - namespaces 76 | verbs: 77 | - get 78 | - list 79 | - watch 80 | --- 81 | apiVersion: apps/v1beta2 82 | kind: Deployment 83 | metadata: 84 | labels: 85 | k8s-app: prometheus-operator 86 | name: prometheus-operator 87 | namespace: {{ gcs_namespace }} 88 | spec: 89 | replicas: 1 90 | selector: 91 | matchLabels: 92 | k8s-app: prometheus-operator 93 | template: 94 | metadata: 95 | labels: 96 | k8s-app: prometheus-operator 97 | spec: 98 | containers: 99 | - args: 100 | - --kubelet-service=kube-system/kubelet 101 | - --logtostderr=true 102 | - --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1 103 | - --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.25.0 104 | image: quay.io/coreos/prometheus-operator:v0.25.0 105 | name: prometheus-operator 106 | ports: 107 | - containerPort: 8080 108 | name: http 109 | resources: 110 | limits: 111 | cpu: 200m 112 | memory: 200Mi 113 | requests: 114 | cpu: 100m 115 | memory: 100Mi 116 | securityContext: 117 | allowPrivilegeEscalation: false 118 | readOnlyRootFilesystem: true 119 | nodeSelector: 120 | beta.kubernetes.io/os: linux 121 | securityContext: 122 | runAsNonRoot: true 123 | runAsUser: 65534 124 | serviceAccountName: prometheus-operator 125 | --- 126 | apiVersion: v1 127 | kind: ServiceAccount 128 | metadata: 129 | name: prometheus-operator 130 | namespace: {{ gcs_namespace }} 131 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-grafana.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: data 5 | labels: 6 | name: data 7 | namespace: {{ gcs_namespace }} 8 | data: 9 | config: |- 10 | 11 | apiVersion: 1 12 | 13 | datasources: 14 | - name: Prometheus 15 | type: prometheus 16 | url: http://prometheus.gcs.svc:9090 17 | access: proxy 18 | basicAuth: false 19 | --- 20 | apiVersion: extensions/v1beta1 21 | kind: Deployment 22 | metadata: 23 | labels: 24 | app: grafana 25 | name: grafana 26 | namespace: {{ gcs_namespace }} 27 | spec: 28 | replicas: 1 29 | selector: 30 | matchLabels: 31 | app: grafana 32 | strategy: 33 | rollingUpdate: 34 | maxSurge: 1 35 | maxUnavailable: 1 36 | type: RollingUpdate 37 | template: 38 | metadata: 39 | creationTimestamp: null 40 | labels: 41 | app: grafana 42 | spec: 43 | containers: 44 | - name: grafana 45 | image: grafana/grafana:5.3.2 46 | ports: 47 | - containerPort: 3000 48 | protocol: TCP 49 | resources: 50 | limits: 51 | cpu: 500m 52 | memory: 2500Mi 53 | requests: 54 | cpu: 100m 55 | memory: 100Mi 56 | volumeMounts: 57 | - mountPath: /etc/grafana/provisioning/datasources 58 | name: data 59 | readOnly: false 60 | - mountPath: /etc/grafana/provisioning/dashboards 61 | name: grafana-dashboards 62 | readOnly: false 63 | - mountPath: /grafana-dashboard-definitions/0/grafana-dashboard-k8s-storage-resources-glusterfs-pv 64 | name: grafana-dashboard-k8s-storage-resources-glusterfs-pv 65 | readOnly: false 66 | restartPolicy: Always 67 | securityContext: {} 68 | terminationGracePeriodSeconds: 30 69 | volumes: 70 | - name: data 71 | configMap: 72 | name: data 73 | items: 74 | - key: config 75 | path: datasource.yml 76 | - configMap: 77 | name: grafana-dashboards 78 | name: grafana-dashboards 79 | - configMap: 80 | name: grafana-dashboard-k8s-storage-resources-glusterfs-pv 81 | name: grafana-dashboard-k8s-storage-resources-glusterfs-pv 82 | --- 83 | apiVersion: v1 84 | kind: Service 85 | metadata: 86 | name: grafana 87 | namespace: {{ gcs_namespace }} 88 | spec: 89 | type: NodePort 90 | ports: 91 | - name: http 92 | nodePort: 30800 93 | port: 80 94 | protocol: TCP 95 | targetPort: 3000 96 | selector: 97 | app: grafana 98 | --- 99 | apiVersion: v1 100 | data: 101 | dashboards.yaml: |- 102 | { 103 | "apiVersion": 1, 104 | "providers": [ 105 | { 106 | "folder": "", 107 | "name": "0", 108 | "options": { 109 | "path": "/grafana-dashboard-definitions/0" 110 | }, 111 | "orgId": 1, 112 | "type": "file" 113 | } 114 | ] 115 | } 116 | kind: ConfigMap 117 | metadata: 118 | name: grafana-dashboards 119 | namespace: gcs 120 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-gd2.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: StatefulSet 3 | apiVersion: apps/v1 4 | metadata: 5 | name: gluster-{{ kube_hostname.split(".")[0] }} 6 | namespace: {{ gcs_namespace }} 7 | labels: 8 | app.kubernetes.io/part-of: gcs 9 | app.kubernetes.io/component: glusterfs 10 | app.kubernetes.io/name: glusterd2 11 | spec: 12 | serviceName: glusterd2 13 | selector: 14 | matchLabels: 15 | app.kubernetes.io/part-of: gcs 16 | app.kubernetes.io/component: glusterfs 17 | app.kubernetes.io/name: glusterd2 18 | template: 19 | metadata: 20 | labels: 21 | app.kubernetes.io/part-of: gcs 22 | app.kubernetes.io/component: glusterfs 23 | app.kubernetes.io/name: glusterd2 24 | spec: 25 | affinity: 26 | nodeAffinity: 27 | requiredDuringSchedulingIgnoredDuringExecution: 28 | nodeSelectorTerms: 29 | - matchExpressions: 30 | - key: kubernetes.io/hostname 31 | operator: In 32 | values: 33 | - {{ kube_hostname }} 34 | containers: 35 | - name: glusterd2 36 | image: docker.io/gluster/glusterd2-nightly 37 | livenessProbe: 38 | httpGet: 39 | path: /ping 40 | port: 24007 41 | initialDelaySeconds: 10 42 | periodSeconds: 60 43 | env: 44 | - name: GD2_ETCDENDPOINTS 45 | value: "http://etcd-client.{{ gcs_namespace }}:2379" 46 | - name: GD2_CLUSTER_ID 47 | value: "{{ gcs_gd2_clusterid }}" 48 | - name: GD2_CLIENTADDRESS 49 | value: "gluster-{{ kube_hostname.split(".")[0] }}-0.glusterd2.{{ gcs_namespace }}:24007" 50 | - name: GD2_ENDPOINTS 51 | value: "http://gluster-{{ kube_hostname.split(".")[0] }}-0.glusterd2.{{ gcs_namespace }}:24007" 52 | - name: GD2_PEERADDRESS 53 | value: "gluster-{{ kube_hostname.split(".")[0] }}-0.glusterd2.{{ gcs_namespace }}:24008" 54 | # TODO: Remove RESTAUTH false once we enable setting auth token 55 | # using secrets 56 | - name: GD2_RESTAUTH 57 | value: "false" 58 | securityContext: 59 | capabilities: {} 60 | privileged: true 61 | volumeMounts: 62 | - name: gluster-dev 63 | mountPath: "/dev" 64 | - name: gluster-cgroup 65 | mountPath: "/sys/fs/cgroup" 66 | readOnly: true 67 | - name: gluster-lvm 68 | mountPath: "/run/lvm" 69 | - name: gluster-udev 70 | mountPath: "/run/udev" 71 | - name: gluster-kmods 72 | mountPath: "/usr/lib/modules" 73 | readOnly: true 74 | - name: glusterd2-statedir 75 | mountPath: "/var/lib/glusterd2" 76 | - name: glusterd2-logdir 77 | mountPath: "/var/log/glusterd2" 78 | - name: rsyslog-sidecar 79 | image: docker.io/gluster/gluster-rsyslog 80 | volumeMounts: 81 | - name: glusterd2-logdir 82 | mountPath: "/var/log/glusterd2" 83 | volumes: 84 | - name: gluster-dev 85 | hostPath: 86 | path: "/dev" 87 | - name: gluster-cgroup 88 | hostPath: 89 | path: "/sys/fs/cgroup" 90 | - name: gluster-lvm 91 | hostPath: 92 | path: "/run/lvm" 93 | - name: gluster-udev 94 | hostPath: 95 | path: "/run/udev" 96 | - name: gluster-kmods 97 | hostPath: 98 | path: "/usr/lib/modules" 99 | - name: glusterd2-statedir 100 | hostPath: 101 | path: "/var/lib/glusterd2" 102 | type: DirectoryOrCreate 103 | - name: glusterd2-logdir 104 | hostPath: 105 | path: "/var/log/glusterd2" 106 | type: DirectoryOrCreate 107 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-prometheus-node-exporter.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: node-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | --- 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | kind: ClusterRoleBinding 21 | metadata: 22 | name: node-exporter 23 | roleRef: 24 | apiGroup: rbac.authorization.k8s.io 25 | kind: ClusterRole 26 | name: node-exporter 27 | subjects: 28 | - kind: ServiceAccount 29 | name: node-exporter 30 | namespace: {{ gcs_namespace }} 31 | --- 32 | apiVersion: apps/v1beta2 33 | kind: DaemonSet 34 | metadata: 35 | labels: 36 | app: node-exporter 37 | name: node-exporter 38 | namespace: {{ gcs_namespace }} 39 | spec: 40 | selector: 41 | matchLabels: 42 | app: node-exporter 43 | template: 44 | metadata: 45 | labels: 46 | app: node-exporter 47 | spec: 48 | containers: 49 | - args: 50 | - --web.listen-address=127.0.0.1:9100 51 | - --path.procfs=/host/proc 52 | - --path.sysfs=/host/sys 53 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) 54 | - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ 55 | image: quay.io/prometheus/node-exporter:v0.16.0 56 | name: node-exporter 57 | resources: 58 | limits: 59 | cpu: 250m 60 | memory: 180Mi 61 | requests: 62 | cpu: 102m 63 | memory: 180Mi 64 | volumeMounts: 65 | - mountPath: /host/proc 66 | name: proc 67 | readOnly: false 68 | - mountPath: /host/sys 69 | name: sys 70 | readOnly: false 71 | - mountPath: /host/root 72 | mountPropagation: HostToContainer 73 | name: root 74 | readOnly: true 75 | - args: 76 | - --secure-listen-address=$(IP):9100 77 | - --upstream=http://127.0.0.1:9100/ 78 | env: 79 | - name: IP 80 | valueFrom: 81 | fieldRef: 82 | fieldPath: status.podIP 83 | image: quay.io/coreos/kube-rbac-proxy:v0.4.0 84 | name: kube-rbac-proxy 85 | ports: 86 | - containerPort: 9100 87 | hostPort: 9100 88 | name: https 89 | resources: 90 | limits: 91 | cpu: 20m 92 | memory: 40Mi 93 | requests: 94 | cpu: 10m 95 | memory: 20Mi 96 | hostNetwork: true 97 | hostPID: true 98 | nodeSelector: 99 | beta.kubernetes.io/os: linux 100 | securityContext: 101 | runAsNonRoot: true 102 | runAsUser: 65534 103 | serviceAccountName: node-exporter 104 | tolerations: 105 | - effect: NoSchedule 106 | key: node-role.kubernetes.io/master 107 | volumes: 108 | - hostPath: 109 | path: /proc 110 | name: proc 111 | - hostPath: 112 | path: /sys 113 | name: sys 114 | - hostPath: 115 | path: / 116 | name: root 117 | --- 118 | apiVersion: v1 119 | kind: ServiceAccount 120 | metadata: 121 | name: node-exporter 122 | namespace: {{ gcs_namespace }} 123 | --- 124 | apiVersion: v1 125 | kind: Service 126 | metadata: 127 | labels: 128 | k8s-app: node-exporter 129 | name: node-exporter 130 | namespace: {{ gcs_namespace }} 131 | spec: 132 | clusterIP: None 133 | ports: 134 | - name: https 135 | port: 9100 136 | targetPort: https 137 | selector: 138 | app: node-exporter 139 | --- 140 | apiVersion: monitoring.coreos.com/v1 141 | kind: ServiceMonitor 142 | metadata: 143 | labels: 144 | k8s-app: node-exporter 145 | name: node-exporter 146 | namespace: {{ gcs_namespace }} 147 | spec: 148 | endpoints: 149 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 150 | interval: 30s 151 | port: https 152 | scheme: https 153 | tlsConfig: 154 | insecureSkipVerify: true 155 | jobLabel: k8s-app 156 | selector: 157 | matchLabels: 158 | k8s-app: node-exporter 159 | 160 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | As of now, one can already use Gluster as storage for containers by making use of different projects available in github repositories associated with Gluster (especially GlusterD1) & Heketi through [gluster-kubernetes](https://github.com/gluster/gluster-kubernetes) . The goal of the GCS initiative is to provide a new stack ( especially with [GD2](https://github.com/gluster/glusterd2), [gluster-csi-driver](https://github.com/gluster/gluster-csi-driver) ) focused on easier integration, much more opinionated bases install, a better upgrade experience to deploy Gluster for container storage. We are primarily focused on integration with Kubernetes (k8s) through this initiative. 2 | 3 | ## Quickstart - Try it out 4 | 5 | The [deploy/ directory](deploy/) contains instructions for installing GCS, 6 | either in a Vagrant-based test environment, or on your own cluster. 7 | 8 | ## Key projects for GCS 9 | 10 | ### Glusterd2 (GD2) 11 | 12 | Repo: https://github.com/gluster/glusterd2 13 | 14 | The challenge we have with current management layer of Gluster (glusterd) is that it is not designed for a service oriented architecture. Heketi overcame this limitation and made Gluster consumable in k8s by providing all the necessary hooks needed for supporting Persistent Volume Claims. 15 | 16 | Glusterd2 provides a service oriented architecture for volume & cluster management. Gd2 also intends to provide many of the Heketi functionalities needed by Kubernetes natively. Hence we are working on merging Heketi with gd2 and you can follow more of this action in the issues associated with the gd2 github repository. 17 | 18 | ### anthill / operator 19 | Repo: https://github.com/gluster/anthill 20 | 21 | This project aims to add an operator for Gluster in Kubernetes., Since it is relatively new, there are areas where you can contribute to make the operator experience better (please refer to the list of issues). This project intends to make the whole Gluster experience in k8s much smoother by automatic management of operator tasks like installation, rolling upgrades etc. 22 | 23 | ### gluster-csi-driver 24 | Repo: http://github.com/gluster/gluster-csi-driver 25 | 26 | This project will provide CSI (Container Storage Interface) compliant drivers for GlusterFS & gluster-block in k8s. 27 | 28 | ### gluster-kubernetes 29 | Repo: https://github.com/gluster/gluster-kubernetes 30 | 31 | This project is intended to provide all the required installation and management steps for getting gluster up and running in k8s. 32 | 33 | ### GlusterFS 34 | Repo: https://github.com/gluster/glusterfs 35 | 36 | GlusterFS is the main and the core repository of Gluster. To support storage in container world, we don’t need all the features of Gluster. Hence, we would be focusing on a stack which would be absolutely required in k8s. This would allow us to plan and execute tests well, and also provide users with a setup which works without too many options to tweak. 37 | 38 | Notice that glusterfs default volumes would continue to work as of now, but the translator stack which is used in GCS will be much leaner and geared to work optimally in k8s. 39 | 40 | ### Monitoring 41 | Repo: https://github.com/gluster/gluster-prometheus 42 | 43 | As k8s ecosystem provides its own native monitoring mechanisms, we intend to have this project be the placeholder for required monitoring plugins. The scope of this project is currently WIP and we welcome your inputs to shape the project. 44 | 45 | More details on this can be found at: https://lists.gluster.org/pipermail/gluster-users/2018-July/034435.html 46 | 47 | ### gluster-mixins 48 | Repo: https://github.com/gluster/gluster-mixins 49 | 50 | A set of Grafana dashboards and Prometheus alerts for Gluster. 51 | 52 | The scope of this project is to provide Gluster specific Grafana dashboard configs and Prometheus rule files using Prometheus Mixins. 53 | 54 | ### Gluster-Containers 55 | 56 | Repo: https://github.com/gluster/gluster-containers 57 | 58 | This repository provides container specs / Dockerfiles that can be used with a container runtime like cri-o & docker. 59 | 60 | ### gluster-block 61 | Repo: https://github.com/gluster/gluster-block 62 | 63 | This project intends to expose files in a gluster volume as block devices. Gluster-block enables supporting ReadWriteOnce (RWO) PVCs and the corresponding workloads in Kubernetes using gluster as the underlying storage technology. 64 | 65 | Gluster-block is intended to be consumed by stateful RWO applications like databases and k8s infrastructure services like logging, metrics etc. gluster-block is more preferred than file based Persistent Volumes in K8s for stateful/transactional workloads as it provides better performance & consistency guarantees. 66 | 67 | ---- 68 | 69 | Note that this is not an exhaustive or final list of projects involved with GCS. We will continue to update the project list depending on the new requirements and priorities that we discover in this journey. 70 | 71 | We welcome you to join this journey by looking up the repositories and contributing to them. As always, we are happy to hear your thoughts about this initiative and please stay tuned as we provide periodic updates about GCS here! 72 | 73 | -------------------------------------------------------------------------------- /doc/deploying_gcs_on_bare_metal_machines.md: -------------------------------------------------------------------------------- 1 | ### Deploying kubernetes + GCS on bare metal machines 2 | 3 | #### Cluster: 4 | 5 | Total number of nodes: 3 {Node1, Node2, Node3} 6 | 7 | #### Node configurations: 8 | 9 | OS: Fedora 29
10 | RAM: 128 GB
11 | CPUs: 24
12 | 13 | ### Deploying Kubernetes on bare metal machines: 14 | 15 | ##### Cluster details: 16 | 17 | Total number of nodes : 3 {Node1: kube1, Node2: kube2, Node3: kube3}
18 | Master nodes : 1 {kube1}
19 | Kube nodes: 3
20 | 21 | #### Steps to install Kubernetes: 22 | 23 | 1. Install ansible on master node:
24 | `$ yum install ansible` 25 | 26 | 2. To ensure ansible is able to ssh into the nodes, add public key of master node{kube1} as authorized 27 | key in other kube nodes
28 | `$ cat ~/.ssh/id_rsa.pub | ssh root@kube2 'cat >> ~/.ssh/authorized_keys'` 29 | 30 | 3. Stop firewalld on all machines
31 | `$ systemctl stop firewalld` 32 | 33 | 4. We will install kubernetes via the deploy-k8s script in the GCS repository. Clone GCS repository in master 34 | node(kube1):
35 | `$ git clone --recurse-submodules git@github.com:gluster/gcs.git` 36 | 37 | 5. To install kubernetes we need create an inventory file mentioning the kubernetes nodes.
38 | 39 | Template for GCS inventory file can be found under deploy directory in gcs repository:
40 | `https://github.com/gluster/gcs/blob/master/deploy/examples/inventory-gcs-kubespray.example` 41 | 42 | We will be deploying GCS after deploying kubernetes so we will have to provide GCS 43 | nodes in the inventory file. Also, we will mention the devices that we want to use to create volumes. 44 | Since we are using only 3 nodes we will provide devices for all the 3 nodes and mention 45 | all the nodes in the gcs nodes. 46 | ``` 47 | kube1 ansible_host= gcs_disks='["", ""]' 48 | kube2 ansible_host= gcs_disks='["", ""]' 49 | kube3 ansible_host= gcs_disks='["", ""]' 50 | 51 | ## Hosts that will run etcd for the Kubernetes cluster 52 | [etcd] 53 | kube1 54 | kube2 55 | kube3 56 | 57 | ## Hosts that will be kubernetes master nodes 58 | [kube-master] 59 | kube1 60 | 61 | ## Hosts that will be kubernetes nodes 62 | [kube-node] 63 | kube1 64 | kube2 65 | kube3 66 | 67 | ## The full kubernetes cluster 68 | [k8s-cluster:children] 69 | kube-master 70 | kube-node 71 | 72 | ## Hosts that will be used for GCS. 73 | ## Systems grouped here need to define 'gcs_disks' as hostvars, which are the disks that will be used by GCS to 74 | provision storage. 75 | [gcs-node] 76 | kube1 77 | kube2 78 | kube3 79 | ``` 80 | 81 | 82 | 6. To deploy kubernetes we need to excecute deploy-k8s.yml file using ansible and provide inventory file created in Step 5.
83 | The deploy-k8s.yml file is present under deploy directory in gcs repository.
84 | `$ ansible-playbook -i / /deploy-k8s.yml` 86 | 87 | 7. Check whether all the nodes are in ready state.
88 | `$ kubectl get nodes`
89 | 90 | The status of all the nodes should be in READY state. 91 | 92 | > Note: In case deploy kubernetes fails then please check the troubleshooting section. 93 | 94 | ### Deploying GCS on kubernetes cluster: 95 | 96 | #### Steps to install GCS: 97 | 98 | 1. To deploy GCS on kubernetes cluster we will execute the deploy-gcs.yml file using ansible from master node(kube 1). The deploy-gcs.yml file is present under deploy directory.
99 | `$ ansible-playbook -i / /deploy-gcs.yml`. 100 | 101 | ### Testing the deployment 102 | 103 | To test whether the deployment was successfull or not, you can run the following command on the master node.
104 | `$ kubectl get pods -n gcs`
105 | 106 | All the pods should be in `Running` state. 107 | 108 | ### Steps to create volume from kubernetes. 109 | 110 | follow 111 | [glusterfs](https://github.com/gluster/gluster-csi-driver/blob/master/README.md#create-a-glusterfs-storage-class-rwx) 112 | to create and bind RWX PVC 113 | 114 | follow 115 | [gluster-virtblock](https://github.com/gluster/gluster-csi-driver/blob/master/README.md#create-a-gluster-virtual-block-storage-class) 116 | to create and bind RWO PVC 117 | 118 | ### Scale Testing results: 119 | 120 | We deployed GCS by following the steps mentioned above. We created 1000 volumes in batch of 250, and deleted 1000 volumes in the batch of 10. The scale testing results can be found in this [speadsheet](https://docs.google.com/spreadsheets/d/1nqySz3R2uR7MUPWWxzJMVWETMob-Mkwh67HmP6QHPDQ/edit?usp=sharing). 121 | 122 | ### Remove GCS setup 123 | 124 | To remove gcs setup, run:
125 | `$ kubectl delete namespace gcs` 126 | 127 | > Note : wait for the command to complete. 128 | 129 | ### Remove kubernetes cluster 130 | 131 | To remove the kubernetes setup, run:
132 | `$ ansible-playbook -i / /deploy/kubespray/reset.yml` 133 | 134 | ### Troubleshooting 135 | 136 | * If deploying kubernetes fails with error: `Failure: "No package docker-ce available.`
137 | Then one must manually install docker-ce on the system. 138 | ##### In Centos: 139 | 140 | ``` 141 | $ yum install docker 142 | $ systemctl status docker (to check whether docker is runnning or not) 143 | ``` 144 | 145 | ##### In Fedora 146 | 147 | ``` 148 | $ sudo dnf -y install dnf-plugins-core 149 | $ sudo dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo 150 | $ sudo dnf config-manager --set-enabled docker-ce-edge 151 | $ sudo dnf config-manager --set-enabled docker-ce-test 152 | $ sudo dnf install docker-ce 153 | $ sudo systemctl enable docker 154 | $ sudo systemctl start docker 155 | ``` 156 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-prometheus-kube-state-metrics.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: kube-state-metrics 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - secrets 11 | - nodes 12 | - pods 13 | - services 14 | - resourcequotas 15 | - replicationcontrollers 16 | - limitranges 17 | - persistentvolumeclaims 18 | - persistentvolumes 19 | - namespaces 20 | - endpoints 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - extensions 26 | resources: 27 | - daemonsets 28 | - deployments 29 | - replicasets 30 | verbs: 31 | - list 32 | - watch 33 | - apiGroups: 34 | - apps 35 | resources: 36 | - statefulsets 37 | - daemonsets 38 | - deployments 39 | - replicasets 40 | verbs: 41 | - list 42 | - watch 43 | - apiGroups: 44 | - batch 45 | resources: 46 | - cronjobs 47 | - jobs 48 | verbs: 49 | - list 50 | - watch 51 | - apiGroups: 52 | - autoscaling 53 | resources: 54 | - horizontalpodautoscalers 55 | verbs: 56 | - list 57 | - watch 58 | - apiGroups: 59 | - authentication.k8s.io 60 | resources: 61 | - tokenreviews 62 | verbs: 63 | - create 64 | - apiGroups: 65 | - authorization.k8s.io 66 | resources: 67 | - subjectaccessreviews 68 | verbs: 69 | - create 70 | - apiGroups: 71 | - policy 72 | resources: 73 | - poddisruptionbudgets 74 | verbs: 75 | - list 76 | - watch 77 | --- 78 | apiVersion: rbac.authorization.k8s.io/v1 79 | kind: ClusterRoleBinding 80 | metadata: 81 | name: kube-state-metrics 82 | roleRef: 83 | apiGroup: rbac.authorization.k8s.io 84 | kind: ClusterRole 85 | name: kube-state-metrics 86 | subjects: 87 | - kind: ServiceAccount 88 | name: kube-state-metrics 89 | namespace: {{ gcs_namespace }} 90 | --- 91 | apiVersion: apps/v1beta2 92 | kind: Deployment 93 | metadata: 94 | labels: 95 | app: kube-state-metrics 96 | name: kube-state-metrics 97 | namespace: {{ gcs_namespace }} 98 | spec: 99 | replicas: 1 100 | selector: 101 | matchLabels: 102 | app: kube-state-metrics 103 | template: 104 | metadata: 105 | labels: 106 | app: kube-state-metrics 107 | spec: 108 | containers: 109 | - args: 110 | - --secure-listen-address=:8443 111 | - --upstream=http://127.0.0.1:8081/ 112 | image: quay.io/coreos/kube-rbac-proxy:v0.4.0 113 | name: kube-rbac-proxy-main 114 | ports: 115 | - containerPort: 8443 116 | name: https-main 117 | resources: 118 | limits: 119 | cpu: 20m 120 | memory: 40Mi 121 | requests: 122 | cpu: 10m 123 | memory: 20Mi 124 | - args: 125 | - --secure-listen-address=:9443 126 | - --upstream=http://127.0.0.1:8082/ 127 | image: quay.io/coreos/kube-rbac-proxy:v0.4.0 128 | name: kube-rbac-proxy-self 129 | ports: 130 | - containerPort: 9443 131 | name: https-self 132 | resources: 133 | limits: 134 | cpu: 20m 135 | memory: 40Mi 136 | requests: 137 | cpu: 10m 138 | memory: 20Mi 139 | - args: 140 | - --host=127.0.0.1 141 | - --port=8081 142 | - --telemetry-host=127.0.0.1 143 | - --telemetry-port=8082 144 | image: quay.io/coreos/kube-state-metrics:v1.4.0 145 | name: kube-state-metrics 146 | resources: 147 | limits: 148 | cpu: 100m 149 | memory: 150Mi 150 | requests: 151 | cpu: 100m 152 | memory: 150Mi 153 | - command: 154 | - /pod_nanny 155 | - --container=kube-state-metrics 156 | - --cpu=100m 157 | - --extra-cpu=2m 158 | - --memory=150Mi 159 | - --extra-memory=30Mi 160 | - --threshold=5 161 | - --deployment=kube-state-metrics 162 | env: 163 | - name: MY_POD_NAME 164 | valueFrom: 165 | fieldRef: 166 | apiVersion: v1 167 | fieldPath: metadata.name 168 | - name: MY_POD_NAMESPACE 169 | valueFrom: 170 | fieldRef: 171 | apiVersion: v1 172 | fieldPath: metadata.namespace 173 | image: quay.io/coreos/addon-resizer:1.0 174 | name: addon-resizer 175 | resources: 176 | limits: 177 | cpu: 50m 178 | memory: 30Mi 179 | requests: 180 | cpu: 10m 181 | memory: 30Mi 182 | nodeSelector: 183 | beta.kubernetes.io/os: linux 184 | securityContext: 185 | runAsNonRoot: true 186 | runAsUser: 65534 187 | serviceAccountName: kube-state-metrics 188 | --- 189 | apiVersion: v1 190 | kind: ServiceAccount 191 | metadata: 192 | name: kube-state-metrics 193 | namespace: {{ gcs_namespace }} 194 | --- 195 | apiVersion: v1 196 | kind: Service 197 | metadata: 198 | labels: 199 | k8s-app: kube-state-metrics 200 | name: kube-state-metrics 201 | namespace: {{ gcs_namespace }} 202 | spec: 203 | clusterIP: None 204 | ports: 205 | - name: https-main 206 | port: 8443 207 | targetPort: https-main 208 | - name: https-self 209 | port: 9443 210 | targetPort: https-self 211 | selector: 212 | app: kube-state-metrics 213 | --- 214 | apiVersion: monitoring.coreos.com/v1 215 | kind: ServiceMonitor 216 | metadata: 217 | labels: 218 | k8s-app: kube-state-metrics 219 | name: kube-state-metrics 220 | namespace: {{ gcs_namespace }} 221 | spec: 222 | endpoints: 223 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 224 | honorLabels: true 225 | interval: 30s 226 | port: https-main 227 | scheme: https 228 | scrapeTimeout: 30s 229 | tlsConfig: 230 | insecureSkipVerify: true 231 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 232 | interval: 30s 233 | port: https-self 234 | scheme: https 235 | tlsConfig: 236 | insecureSkipVerify: true 237 | jobLabel: k8s-app 238 | selector: 239 | matchLabels: 240 | k8s-app: kube-state-metrics 241 | 242 | -------------------------------------------------------------------------------- /deploy/README.md: -------------------------------------------------------------------------------- 1 | # GCS Deployment scripts 2 | 3 | This repository contains playbooks to deploy a GCS cluster on Kubernetes. It also contains a Vagrantfile to setup a local GCS cluster using vagrant-libvirt. 4 | 5 | > _IMP: Clone this repository with submodules_ 6 | > git clone --recurse-submodules -j8 git@github.com:gluster/gcs.git 7 | 8 | ## Available playbooks 9 | 10 | ### deploy-k8s.yml 11 | 12 | This playbook deploys a kubernetes cluster on the configured nodes, and creates a copy of the kube-config to allow kubectl from the Ansible host. 13 | Kubernetes is configured to use Flannel as the network plugin. 14 | 15 | > TODO: 16 | > - Describe the minimum required ansible inventory format. Till then ./kubespray/inventory/sample/hosts.ini 17 | 18 | 19 | ### deploy-gcs.yml 20 | 21 | This playbook deploys GCS on a Kubernetes cluster. All of GCS, except for the StorageClass is deployed into its own namespace. The playbook deploys the following, 22 | 23 | - etcd-operator 24 | - etcd-cluster with 3 nodes 25 | - glusterd2-cluster with a pod on each kube node and configured to use the deployed etcd-cluster 26 | - glusterd2-client service providing a single rest access point to GD2 27 | - glusterfs-csi-driver configured to use glusterd2-client service to reach GD2 volume management APIs 28 | - glustervirtblock-csi-driver configured to use glusterd2-client service to reach GD2 virtblock management plugin APIs 29 | 30 | > NOTE: The glusterd2-cluster deployment is unsecure currently, with ReST API auth disabled and without any TLS. This leaves the cluster open to direct access by other applications on the Kubernetes cluster. 31 | 32 | Uses the inventory defined for deploy-k8s.yml. 33 | 34 | At the moment this is a playbook, but it really should become a role. 35 | 36 | > TODO: 37 | > - Enable secure deployment with ReST AUTH and TLS. 38 | > - Convert to role 39 | > - Add more configurability 40 | 41 | ### vagrant-playbook.yml 42 | 43 | This playbook combines all the above to provision the local cluster brought up by Vagrant 44 | 45 | ## Helper scripts 46 | 47 | ### prepare.sh 48 | 49 | This script prepares the local environment to run the deployment. 50 | 51 | ## How to use 52 | 53 | ### Requirements 54 | 55 | - ansible 56 | - python-virtualenv 57 | 58 | #### Optional 59 | 60 | - vagrant, vagrant-libvirt - Only required if you want to bring up the vagrant powered local cluster 61 | - kubectl - Only required if you plan to manage the kube cluster from the ansible host 62 | 63 | ### External kube cluster 64 | 65 | To use the deploy-gcs.yml playbook on an external setup kubernetes cluster, requires a custom inventory file. 66 | An example of this custom inventory is in `examples/inventory-gcs-only.example` 67 | 68 | Please read up the [ansible inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) documentation to learn about the ansible inventory file format. 69 | 70 | The custom inventory file needs the following, 71 | 72 | - One or more kubernetes master hosts must be defined, and must be set up with password-less ssh. 73 | - One or more gcs hosts must be defined as well, but need not have password-less ssh. 74 | - The hostnames used to the gcs hosts must be the ones used by kubernetes as the node names. If unsure, get the correct names using `kubectl get nodes`. 75 | - The gcs hosts must define the disks to be used by GCS, as a `gcs_disks` hostvar. 76 | - A `kube-master` group must be defined, with the master hosts as members. 77 | - A `gcs-node` group must be defined, with the gcs hosts as members. 78 | 79 | With the inventory file defined, run the deploy-gcs playbook as follows, 80 | 81 | ``` 82 | (gcs-venv) $ ansible-playbook --become deploy-gcs.yml 83 | ``` 84 | 85 | ### Local cluster using Vagrant 86 | 87 | The provided Vagrantfile brings up a 3-node kubernetes cluster, with 3 1000GB 88 | virtual disks attached to each node. 89 | 90 | > Note that the virtual disks are created in the default storage pool of libvirt, 91 | > though they are all sparse images. The default storage pool is set to 92 | > /var/lib/libvirt/images, so if used the cluster is used heavily for GCS 93 | > testing, it is possible to run out of space on your root partition. 94 | 95 | - Run the `prepare.sh` script to perform preliminary checks and prepare the virtualenv. 96 | 97 | ``` 98 | $ ./prepare.sh 99 | ``` 100 | 101 | - Activate the virtualenv 102 | ``` 103 | $ source gcs-venv/bin/activate 104 | (gcs-venv) $ 105 | ``` 106 | 107 | - Bring up the cluster. This will take a long time. 108 | 109 | ``` 110 | (gcs-venv) $ vagrant up 111 | ``` 112 | 113 | Once begin using the cluster once it's up by either, 114 | - SSHing into one of the Kube nodes and running the `kubectl` commands in there 115 | 116 | ``` 117 | (gcs-venv) $ vagrant ssh kube1 118 | ``` 119 | 120 | #### Resetting the cluster 121 | 122 | If the Vagrant vms are restarted (say because of host reboot), the kubernetes cluster cannot come back up. In such a case, reset the cluster and re-deploy kube and GCS. 123 | 124 | ``` 125 | (gcs-venv) $ ansible-playbook --become vagrant-reset.yml 126 | . 127 | . 128 | . 129 | (gcs-venv) $ ansible-playbook --skip-tags predeploy --become vagrant-playbook.yml 130 | . 131 | . 132 | . 133 | ``` 134 | 135 | This will create a brand new cluster. If this fails, destroy the vagrant environment and restart fresh. 136 | 137 | ``` 138 | (gcs-venv) $ vagrant destroy -f 139 | . 140 | . 141 | . 142 | (gcs-venv) $ vagrant up 143 | ``` 144 | 145 | ### Deploying an app with GCS backed volume 146 | 147 | An example config is provided for creating a PersistentVolumeClaim against GCS and using the claim in an app, in [examples/app-using-gcs-volume.yml](examples/app-using-gcs-volume.yml) 148 | 149 | ssh to the master node: 150 | ``` 151 | (gcs-venv) $ vagrant ssh kube1 152 | ``` 153 | 154 | Create a config file and deploy the application along with PVC as below: 155 | ``` 156 | vagrant@kube1 ~]$ kubectl create -f app-using-gcs-volume.yml 157 | ``` 158 | 159 | ### Deploying an app with GCS backed cloned volume 160 | 161 | An example config is provided for creating a snapshot of a volume and creating a PersistentVolumeClaim against GCS from the snapshot and using the claim in an app, in [examples/app-using-gcs-volume-clone.yml](examples/app-using-gcs-volume.yml) 162 | 163 | Create a config file and deploy the application as below: 164 | 165 | ``` 166 | vagrant@kube1 ~]$ kubectl create -f app-using-gcs-volume-clone.yml 167 | ``` 168 | 169 | ### Accessing glustercli from glusterd2 pods 170 | 171 | - List glusterd2 pods 172 | 173 | ``` 174 | kubectl get po -ngcs --selector=app.kubernetes.io/name=glusterd2 175 | NAME READY STATUS RESTARTS AGE 176 | gluster-kube1-0 1/1 Running 0 16h 177 | gluster-kube2-0 1/1 Running 0 16h 178 | gluster-kube3-0 1/1 Running 0 16h 179 | ``` 180 | 181 | - Step inside any one of the glusterd2 pod 182 | 183 | ``` 184 | [vagrant@kube1 ~]$ kubectl exec -it gluster-kube1-0 /bin/bash -ngcs 185 | [root@gluster-kube1-0 /]# 186 | ``` 187 | 188 | - Accessing glustercli inside pod 189 | 190 | ``` 191 | [root@gluster-kube1-0 /]# glustercli peer list 192 | +--------------------------------------+---------+-----------------------------+-----------------------------+--------+-----+ 193 | | ID | NAME | CLIENT ADDRESSES | PEER ADDRESSES | ONLINE | PID | 194 | +--------------------------------------+---------+-----------------------------+-----------------------------+--------+-----+ 195 | | 33346529-d8df-45ff-aeaf-3befed986435 | gluster-kube1-0 | gluster-kube1-0.glusterd2.gcs:24007 | gluster-kube1-0.glusterd2.gcs:24008 | yes | 21 | 196 | | 4527724d-e5de-4b58-a6e8-8f15490fa6b5 | gluster-kube3-0 | gluster-kube3-0.glusterd2.gcs:24007 | gluster-kube3-0.glusterd2.gcs:24008 | yes | 21 | 197 | | d35ef7e1-1846-4826-8447-2d1f92de0881 | gluster-kube2-0 | gluster-kube2-0.glusterd2.gcs:24007 | gluster-kube2-0.glusterd2.gcs:24008 | yes | 21 | 198 | +--------------------------------------+---------+-----------------------------+-----------------------------+--------+-----+ 199 | ``` 200 | 201 | ### Accessing Prometheus UI and Grafana Dashboard to view the metrics 202 | 203 | To access the Prometheus UI, load up the web browser from the host machine and go to ```http://localhost:9090/graph``` to view the prometheus UI. The available metrics can be viewed and selected from the dropdown tab and press Execute to show the values. 204 | 205 | To access the Grafana Dashboard: 206 | 207 | 1) Load up the web browser from the host machine and go to ```http://localhost:9000``` to view the Grafana UI. 208 | 209 | 2) Enter Username and password as ```admin```. 210 | 211 | 3) Now you should be able to see the dashboards that gcs provides by default. The default gcs dashboard and alert configuration is provided by the [gluster mixins](https://github.com/gluster/gluster-mixins) project. If there is a need to create additional dashboards please follow steps 4) and 5). 212 | 213 | 4) Select New Dashboard ( No need to add a datasource because prometheus datasource is added by default in the deployment ) --> Graph --> Panel Title --> Edit. 214 | 215 | 5) Select Prometheus as the data source in the data source drop down menu. 216 | 217 | 6) You can now use the expression tab to lookup metrics via autocompletion. 218 | 219 | -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-virtblock-csi.yml.j2: -------------------------------------------------------------------------------- 1 | ## Deploy Gluster Virtual Block CSI attacher 2 | --- 3 | kind: StatefulSet 4 | apiVersion: apps/v1 5 | metadata: 6 | name: csi-glustervirtblock-attacher 7 | namespace: {{ gcs_namespace }} 8 | labels: 9 | app.kubernetes.io/part-of: gcs 10 | app.kubernetes.io/component: csi-driver 11 | app.kubernetes.io/name: csi-attacher 12 | spec: 13 | serviceName: csi-attacher 14 | replicas: 1 15 | selector: 16 | matchLabels: 17 | app.kubernetes.io/part-of: gcs 18 | app.kubernetes.io/component: csi-driver 19 | app.kubernetes.io/name: csi-attacher 20 | template: 21 | metadata: 22 | labels: 23 | app.kubernetes.io/part-of: gcs 24 | app.kubernetes.io/component: csi-driver 25 | app.kubernetes.io/name: csi-attacher 26 | spec: 27 | serviceAccountName: csi-attacher 28 | containers: 29 | - name: csi-attacher 30 | image: quay.io/k8scsi/csi-attacher:v1.0.1 31 | args: 32 | - "--v=5" 33 | - "--csi-address=$(ADDRESS)" 34 | env: 35 | - name: MY_NAME 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: metadata.name 39 | - name: MY_NAMESPACE 40 | valueFrom: 41 | fieldRef: 42 | fieldPath: metadata.namespace 43 | - name: ADDRESS 44 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 45 | volumeMounts: 46 | - name: socket-dir 47 | mountPath: /var/lib/csi/sockets/pluginproxy/ 48 | 49 | - name: glustervirtblock-attacher 50 | image: docker.io/gluster/glustervirtblock-csi-driver:latest 51 | args: 52 | - "--nodeid=$(NODE_ID)" 53 | - "--v=5" 54 | - "--endpoint=$(CSI_ENDPOINT)" 55 | - "--resturl=$(REST_URL)" 56 | - "--resttimeout=120" #glusterd2 client timeout in sec 57 | env: 58 | - name: NODE_ID 59 | valueFrom: 60 | fieldRef: 61 | fieldPath: spec.nodeName 62 | - name: CSI_ENDPOINT 63 | value: unix://plugin/csi.sock 64 | - name: REST_URL 65 | value: http://glusterd2-client.{{ gcs_namespace }}:24007 66 | volumeMounts: 67 | - name: socket-dir 68 | mountPath: /plugin 69 | volumes: 70 | - name: socket-dir 71 | emptyDir: 72 | 73 | ## Deploy Gluster Virtual Block CSI NodePlugin 74 | --- 75 | kind: DaemonSet 76 | apiVersion: apps/v1 77 | metadata: 78 | name: csi-glustervirtblock-nodeplugin 79 | namespace: {{ gcs_namespace }} 80 | labels: 81 | app.kubernetes.io/part-of: gcs 82 | app.kubernetes.io/component: csi-driver 83 | app.kubernetes.io/name: csi-nodeplugin 84 | spec: 85 | selector: 86 | matchLabels: 87 | app.kubernetes.io/part-of: gcs 88 | app.kubernetes.io/component: csi-driver 89 | app.kubernetes.io/name: csi-nodeplugin 90 | template: 91 | metadata: 92 | labels: 93 | app.kubernetes.io/part-of: gcs 94 | app.kubernetes.io/component: csi-driver 95 | app.kubernetes.io/name: csi-nodeplugin 96 | namespace: {{ gcs_namespace }} 97 | spec: 98 | serviceAccount: csi-nodeplugin 99 | containers: 100 | - name: csi-node-driver-registrar 101 | image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.1 102 | args: 103 | - "--v=5" 104 | - "--csi-address=$(ADDRESS)" 105 | - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" 106 | lifecycle: 107 | preStop: 108 | exec: 109 | command: ["/bin/sh", "-c", "rm -rf /registration/org.gluster.glustervirtblock /registration/org.gluster.glustervirtblock-reg.sock"] 110 | env: 111 | - name: ADDRESS 112 | value: /plugin/csi.sock 113 | - name: DRIVER_REG_SOCK_PATH 114 | value: /var/lib/kubelet/plugins_registry/org.gluster.glustervirtblock/csi.sock 115 | - name: KUBE_NODE_NAME 116 | valueFrom: 117 | fieldRef: 118 | fieldPath: spec.nodeName 119 | volumeMounts: 120 | - name: plugin-dir 121 | mountPath: /plugin 122 | - name: registration-dir 123 | mountPath: /registration 124 | - name: glustervirtblock-nodeplugin 125 | securityContext: 126 | privileged: true 127 | capabilities: 128 | add: ["CAP_MKNOD", "CAP_SYS_ADMIN", "SYS_ADMIN"] 129 | allowPrivilegeEscalation: true 130 | image: docker.io/gluster/glustervirtblock-csi-driver:latest 131 | args: 132 | - "--nodeid=$(NODE_ID)" 133 | - "--v=5" 134 | - "--endpoint=$(CSI_ENDPOINT)" 135 | - "--resturl=$(REST_URL)" 136 | - "--resttimeout=120" #glusterd2 client timeout in sec 137 | env: 138 | - name: NODE_ID 139 | valueFrom: 140 | fieldRef: 141 | fieldPath: spec.nodeName 142 | - name: CSI_ENDPOINT 143 | value: unix://plugin/csi.sock 144 | - name: REST_URL 145 | value: http://glusterd2-client.{{ gcs_namespace }}:24007 146 | volumeMounts: 147 | - name: gluster-dev 148 | mountPath: /dev 149 | - name: plugin-dir 150 | mountPath: /plugin 151 | - name: pods-mount-dir 152 | mountPath: /var/lib/kubelet/pods 153 | mountPropagation: "Bidirectional" 154 | volumes: 155 | - name: gluster-dev 156 | hostPath: 157 | path: /dev 158 | - name: plugin-dir 159 | hostPath: 160 | path: /var/lib/kubelet/plugins_registry/org.gluster.glustervirtblock 161 | type: DirectoryOrCreate 162 | - name: pods-mount-dir 163 | hostPath: 164 | path: /var/lib/kubelet/pods 165 | type: Directory 166 | - name: registration-dir 167 | hostPath: 168 | path: /var/lib/kubelet/plugins_registry/ 169 | type: Directory 170 | 171 | ## Deploy Gluster Virtual block CSI Provisioner 172 | --- 173 | kind: StatefulSet 174 | apiVersion: apps/v1 175 | metadata: 176 | name: csi-glustervirtblock-provisioner 177 | namespace: {{ gcs_namespace }} 178 | labels: 179 | app.kubernetes.io/part-of: gcs 180 | app.kubernetes.io/component: csi-driver 181 | app.kubernetes.io/name: csi-provisioner 182 | spec: 183 | serviceName: csi-glustervirtblock-provisioner 184 | replicas: 1 185 | selector: 186 | matchLabels: 187 | app.kubernetes.io/part-of: gcs 188 | app.kubernetes.io/component: csi-driver 189 | app.kubernetes.io/name: csi-provisioner 190 | template: 191 | metadata: 192 | name: csi-provisioner 193 | namespace: {{ gcs_namespace }} 194 | labels: 195 | app.kubernetes.io/part-of: gcs 196 | app.kubernetes.io/component: csi-driver 197 | app.kubernetes.io/name: csi-provisioner 198 | spec: 199 | serviceAccountName: csi-provisioner 200 | containers: 201 | - name: csi-provisioner 202 | image: quay.io/k8scsi/csi-provisioner:v1.0.1 203 | args: 204 | - "--provisioner=org.gluster.glustervirtblock" 205 | - "--csi-address=$(ADDRESS)" 206 | - "--connection-timeout=15s" 207 | env: 208 | - name: ADDRESS 209 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 210 | volumeMounts: 211 | - name: socket-dir 212 | mountPath: /var/lib/csi/sockets/pluginproxy/ 213 | - name: csi-cluster-driver-registrar 214 | image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1 215 | args: 216 | - "--v=5" 217 | - "--pod-info-mount-version=\"v1\"" 218 | - "--csi-address=$(ADDRESS)" 219 | env: 220 | - name: ADDRESS 221 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 222 | volumeMounts: 223 | - name: socket-dir 224 | mountPath: /var/lib/csi/sockets/pluginproxy/ 225 | - name: glustervirtblock-provisioner 226 | securityContext: 227 | privileged: true 228 | capabilities: 229 | add: ["CAP_SYS_ADMIN", "SYS_ADMIN"] 230 | allowPrivilegeEscalation: true 231 | image: docker.io/gluster/glustervirtblock-csi-driver:latest 232 | args: 233 | - "--nodeid=$(NODE_ID)" 234 | - "--v=5" 235 | - "--endpoint=$(CSI_ENDPOINT)" 236 | - "--resturl=$(REST_URL)" 237 | - "--resttimeout=120" #glusterd2 client timeout in sec 238 | env: 239 | - name: NODE_ID 240 | valueFrom: 241 | fieldRef: 242 | fieldPath: spec.nodeName 243 | - name: CSI_ENDPOINT 244 | value: unix://plugin/csi.sock 245 | - name: REST_URL 246 | value: http://glusterd2-client.{{ gcs_namespace }}:24007 247 | volumeMounts: 248 | - name: socket-dir 249 | mountPath: /plugin 250 | volumes: 251 | - name: socket-dir 252 | emptyDir: 253 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /deploy/templates/gcs-manifests/gcs-fs-csi.yml.j2: -------------------------------------------------------------------------------- 1 | ## Deploy GCS CSI attacher 2 | --- 3 | kind: ServiceAccount 4 | apiVersion: v1 5 | metadata: 6 | name: csi-attacher 7 | namespace: {{ gcs_namespace }} 8 | --- 9 | kind: ClusterRole 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | metadata: 12 | name: external-attacher-runner 13 | namespace: {{ gcs_namespace }} 14 | rules: 15 | - apiGroups: [""] 16 | resources: ["persistentvolumes"] 17 | verbs: ["get", "list", "watch", "update"] 18 | - apiGroups: [""] 19 | resources: ["nodes"] 20 | verbs: ["get", "list", "watch"] 21 | - apiGroups: ["csi.storage.k8s.io"] 22 | resources: ["csinodeinfos"] 23 | verbs: ["get", "list", "watch"] 24 | - apiGroups: ["storage.k8s.io"] 25 | resources: ["volumeattachments"] 26 | verbs: ["get", "list", "watch", "update"] 27 | --- 28 | kind: ClusterRoleBinding 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | metadata: 31 | name: csi-attacher-role 32 | namespace: {{ gcs_namespace }} 33 | subjects: 34 | - kind: ServiceAccount 35 | name: csi-attacher 36 | namespace: {{ gcs_namespace }} 37 | roleRef: 38 | kind: ClusterRole 39 | name: external-attacher-runner 40 | apiGroup: rbac.authorization.k8s.io 41 | --- 42 | kind: StatefulSet 43 | apiVersion: apps/v1 44 | metadata: 45 | name: csi-glusterfsplugin-attacher 46 | namespace: {{ gcs_namespace }} 47 | labels: 48 | app.kubernetes.io/part-of: gcs 49 | app.kubernetes.io/component: csi-driver 50 | app.kubernetes.io/name: csi-attacher 51 | spec: 52 | serviceName: csi-attacher 53 | replicas: 1 54 | selector: 55 | matchLabels: 56 | app.kubernetes.io/part-of: gcs 57 | app.kubernetes.io/component: csi-driver 58 | app.kubernetes.io/name: csi-attacher 59 | template: 60 | metadata: 61 | labels: 62 | app.kubernetes.io/part-of: gcs 63 | app.kubernetes.io/component: csi-driver 64 | app.kubernetes.io/name: csi-attacher 65 | spec: 66 | serviceAccountName: csi-attacher 67 | containers: 68 | - name: csi-attacher 69 | image: quay.io/k8scsi/csi-attacher:v1.0.1 70 | args: 71 | - "--v=5" 72 | - "--csi-address=$(ADDRESS)" 73 | env: 74 | - name: MY_NAME 75 | valueFrom: 76 | fieldRef: 77 | fieldPath: metadata.name 78 | - name: MY_NAMESPACE 79 | valueFrom: 80 | fieldRef: 81 | fieldPath: metadata.namespace 82 | - name: ADDRESS 83 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 84 | volumeMounts: 85 | - name: socket-dir 86 | mountPath: /var/lib/csi/sockets/pluginproxy/ 87 | 88 | - name: glusterfs 89 | image: docker.io/gluster/glusterfs-csi-driver:latest 90 | args: 91 | - "--nodeid=$(NODE_ID)" 92 | - "--v=5" 93 | - "--endpoint=$(CSI_ENDPOINT)" 94 | - "--resturl=$(REST_URL)" 95 | - "--resttimeout=120" #glusterd2 client timeout in sec 96 | env: 97 | - name: NODE_ID 98 | valueFrom: 99 | fieldRef: 100 | fieldPath: spec.nodeName 101 | - name: CSI_ENDPOINT 102 | value: unix://plugin/csi.sock 103 | - name: REST_URL 104 | value: http://glusterd2-client.{{ gcs_namespace }}:24007 105 | volumeMounts: 106 | - name: socket-dir 107 | mountPath: /plugin 108 | volumes: 109 | - name: socket-dir 110 | emptyDir: 111 | 112 | ## Deploy GCS CSI NodePlugin 113 | --- 114 | kind: ServiceAccount 115 | apiVersion: v1 116 | metadata: 117 | name: csi-nodeplugin 118 | namespace: {{ gcs_namespace }} 119 | --- 120 | kind: ClusterRole 121 | apiVersion: rbac.authorization.k8s.io/v1 122 | metadata: 123 | name: csi-nodeplugin 124 | namespace: {{ gcs_namespace }} 125 | rules: 126 | - apiGroups: [""] 127 | resources: ["events"] 128 | verbs: ["get", "list", "watch", "create", "update", "patch"] 129 | --- 130 | kind: ClusterRoleBinding 131 | apiVersion: rbac.authorization.k8s.io/v1 132 | metadata: 133 | name: csi-nodeplugin 134 | namespace: {{ gcs_namespace }} 135 | subjects: 136 | - kind: ServiceAccount 137 | name: csi-nodeplugin 138 | namespace: {{ gcs_namespace }} 139 | roleRef: 140 | kind: ClusterRole 141 | name: csi-nodeplugin 142 | apiGroup: rbac.authorization.k8s.io 143 | --- 144 | kind: DaemonSet 145 | apiVersion: apps/v1 146 | metadata: 147 | name: csi-glusterfsplugin-nodeplugin 148 | namespace: {{ gcs_namespace }} 149 | labels: 150 | app.kubernetes.io/part-of: gcs 151 | app.kubernetes.io/component: csi-driver 152 | app.kubernetes.io/name: csi-nodeplugin 153 | spec: 154 | selector: 155 | matchLabels: 156 | app.kubernetes.io/part-of: gcs 157 | app.kubernetes.io/component: csi-driver 158 | app.kubernetes.io/name: csi-nodeplugin 159 | template: 160 | metadata: 161 | labels: 162 | app.kubernetes.io/part-of: gcs 163 | app.kubernetes.io/component: csi-driver 164 | app.kubernetes.io/name: csi-nodeplugin 165 | namespace: {{ gcs_namespace }} 166 | spec: 167 | serviceAccount: csi-nodeplugin 168 | containers: 169 | - name: csi-node-driver-registrar 170 | image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.1 171 | args: 172 | - "--v=5" 173 | - "--csi-address=$(ADDRESS)" 174 | - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" 175 | lifecycle: 176 | preStop: 177 | exec: 178 | command: ["/bin/sh", "-c", "rm -rf /registration/org.gluster.glusterfs /registration/org.gluster.glusterfs-reg.sock"] 179 | env: 180 | - name: ADDRESS 181 | value: /plugin/csi.sock 182 | - name: DRIVER_REG_SOCK_PATH 183 | value: /var/lib/kubelet/plugins_registry/org.gluster.glusterfs/csi.sock 184 | - name: KUBE_NODE_NAME 185 | valueFrom: 186 | fieldRef: 187 | fieldPath: spec.nodeName 188 | volumeMounts: 189 | - name: plugin-dir 190 | mountPath: /plugin 191 | - name: registration-dir 192 | mountPath: /registration 193 | - name: gluster-nodeplugin 194 | securityContext: 195 | privileged: true 196 | capabilities: 197 | add: ["SYS_ADMIN"] 198 | allowPrivilegeEscalation: true 199 | image: docker.io/gluster/glusterfs-csi-driver:latest 200 | args: 201 | - "--nodeid=$(NODE_ID)" 202 | - "--v=5" 203 | - "--endpoint=$(CSI_ENDPOINT)" 204 | - "--resturl=$(REST_URL)" 205 | - "--resttimeout=120" #glusterd2 client timeout in sec 206 | env: 207 | - name: NODE_ID 208 | valueFrom: 209 | fieldRef: 210 | fieldPath: spec.nodeName 211 | - name: CSI_ENDPOINT 212 | value: unix://plugin/csi.sock 213 | - name: REST_URL 214 | value: http://glusterd2-client.{{ gcs_namespace }}:24007 215 | volumeMounts: 216 | - name: plugin-dir 217 | mountPath: /plugin 218 | - name: pods-mount-dir 219 | mountPath: /var/lib/kubelet/pods 220 | mountPropagation: "Bidirectional" 221 | volumes: 222 | - name: plugin-dir 223 | hostPath: 224 | path: /var/lib/kubelet/plugins_registry/org.gluster.glusterfs 225 | type: DirectoryOrCreate 226 | - name: pods-mount-dir 227 | hostPath: 228 | path: /var/lib/kubelet/pods 229 | type: Directory 230 | - name: registration-dir 231 | hostPath: 232 | path: /var/lib/kubelet/plugins_registry/ 233 | type: Directory 234 | 235 | ## Deploy GCS CSI Provisioner 236 | --- 237 | apiVersion: v1 238 | kind: ServiceAccount 239 | metadata: 240 | name: csi-provisioner 241 | namespace: {{ gcs_namespace }} 242 | --- 243 | kind: ClusterRole 244 | apiVersion: rbac.authorization.k8s.io/v1 245 | metadata: 246 | name: external-provisioner-runner 247 | namespace: {{ gcs_namespace }} 248 | rules: 249 | - apiGroups: [""] 250 | resources: ["persistentvolumes"] 251 | verbs: ["get", "list", "watch", "create", "delete"] 252 | - apiGroups: [""] 253 | resources: ["persistentvolumeclaims"] 254 | verbs: ["get", "list", "watch", "update"] 255 | - apiGroups: ["storage.k8s.io"] 256 | resources: ["storageclasses"] 257 | verbs: ["get", "list", "watch"] 258 | - apiGroups: [""] 259 | resources: ["events"] 260 | verbs: ["list", "watch", "create", "update", "patch","delete","get"] 261 | - apiGroups: [""] 262 | resources: ["secrets"] 263 | verbs: ["get", "list"] 264 | - apiGroups: [""] 265 | resources: ["endpoints"] 266 | verbs: ["get", "list", "watch", "create", "update"] 267 | - apiGroups: ["snapshot.storage.k8s.io"] 268 | resources: ["volumesnapshotclasses"] 269 | verbs: ["get", "list", "watch"] 270 | - apiGroups: ["snapshot.storage.k8s.io"] 271 | resources: ["volumesnapshotcontents"] 272 | verbs: ["create", "get", "list", "watch", "update", "delete"] 273 | - apiGroups: ["snapshot.storage.k8s.io"] 274 | resources: ["volumesnapshots"] 275 | verbs: ["get", "list", "watch", "update"] 276 | - apiGroups: ["apiextensions.k8s.io"] 277 | resources: ["customresourcedefinitions"] 278 | verbs: ["create", "list", "watch", "delete"] 279 | - apiGroups: ["csi.storage.k8s.io"] 280 | resources: ["csidrivers"] 281 | verbs: ["create", "delete"] 282 | 283 | --- 284 | kind: ClusterRoleBinding 285 | apiVersion: rbac.authorization.k8s.io/v1 286 | metadata: 287 | name: csi-provisioner-role 288 | namespace: {{ gcs_namespace }} 289 | subjects: 290 | - kind: ServiceAccount 291 | name: csi-provisioner 292 | namespace: {{ gcs_namespace }} 293 | roleRef: 294 | kind: ClusterRole 295 | name: external-provisioner-runner 296 | apiGroup: rbac.authorization.k8s.io 297 | --- 298 | kind: StatefulSet 299 | apiVersion: apps/v1 300 | metadata: 301 | name: csi-glusterfsplugin-provisioner 302 | namespace: {{ gcs_namespace }} 303 | labels: 304 | app.kubernetes.io/part-of: gcs 305 | app.kubernetes.io/component: csi-driver 306 | app.kubernetes.io/name: csi-provisioner 307 | spec: 308 | serviceName: csi-glusterfsplugin-provisioner 309 | replicas: 1 310 | selector: 311 | matchLabels: 312 | app.kubernetes.io/part-of: gcs 313 | app.kubernetes.io/component: csi-driver 314 | app.kubernetes.io/name: csi-provisioner 315 | template: 316 | metadata: 317 | name: csi-provisioner 318 | namespace: {{ gcs_namespace }} 319 | labels: 320 | app.kubernetes.io/part-of: gcs 321 | app.kubernetes.io/component: csi-driver 322 | app.kubernetes.io/name: csi-provisioner 323 | spec: 324 | serviceAccountName: csi-provisioner 325 | containers: 326 | - name: csi-provisioner 327 | image: quay.io/k8scsi/csi-provisioner:v1.0.1 328 | args: 329 | - "--provisioner=org.gluster.glusterfs" 330 | - "--csi-address=$(ADDRESS)" 331 | - "--connection-timeout=15s" 332 | env: 333 | - name: ADDRESS 334 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 335 | volumeMounts: 336 | - name: socket-dir 337 | mountPath: /var/lib/csi/sockets/pluginproxy/ 338 | - name: csi-snapshotter 339 | image: quay.io/k8scsi/csi-snapshotter:v1.0.1 340 | args: 341 | - "--csi-address=$(ADDRESS)" 342 | - "--connection-timeout=15s" 343 | env: 344 | - name: ADDRESS 345 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 346 | imagePullPolicy: "IfNotPresent" 347 | volumeMounts: 348 | - name: socket-dir 349 | mountPath: /var/lib/csi/sockets/pluginproxy/ 350 | - name: csi-cluster-driver-registrar 351 | image: quay.io/k8scsi/csi-cluster-driver-registrar:v1.0.1 352 | args: 353 | - "--v=5" 354 | - "--pod-info-mount-version=\"v1\"" 355 | - "--csi-address=$(ADDRESS)" 356 | env: 357 | - name: ADDRESS 358 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 359 | volumeMounts: 360 | - name: socket-dir 361 | mountPath: /var/lib/csi/sockets/pluginproxy/ 362 | 363 | - name: gluster-provisioner 364 | image: docker.io/gluster/glusterfs-csi-driver:latest 365 | args: 366 | - "--nodeid=$(NODE_ID)" 367 | - "--v=5" 368 | - "--endpoint=$(CSI_ENDPOINT)" 369 | - "--resturl=$(REST_URL)" 370 | - "--resttimeout=120" #glusterd2 client timeout in sec 371 | env: 372 | - name: NODE_ID 373 | valueFrom: 374 | fieldRef: 375 | fieldPath: spec.nodeName 376 | - name: CSI_ENDPOINT 377 | value: unix://plugin/csi.sock 378 | - name: REST_URL 379 | value: http://glusterd2-client.{{ gcs_namespace }}:24007 380 | volumeMounts: 381 | - name: socket-dir 382 | mountPath: /plugin 383 | volumes: 384 | - name: socket-dir 385 | emptyDir: 386 | -------------------------------------------------------------------------------- /deploy/deploy-gcs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy GCS 3 | hosts: kube-master[0] 4 | become: true 5 | gather_facts: false 6 | vars: 7 | kubectl: /usr/local/bin/kubectl 8 | 9 | pre_tasks: 10 | - name: GCS Pre | Cluster ID 11 | block: 12 | - name: GCS Pre | Cluster ID | Generate a UUID 13 | command: uuidgen 14 | register: uuidgen_result 15 | 16 | - name: GCS Pre | Cluster ID | Set gcs_gd2_clusterid fact 17 | set_fact: 18 | gcs_gd2_clusterid: "{{ uuidgen_result.stdout }}" 19 | 20 | - name: GCS Pre | Manifests directory 21 | block: 22 | - name: GCS Pre | Manifests directory | Create a temporary directory 23 | tempfile: 24 | state: directory 25 | prefix: gcs-manifests 26 | register: tempdir 27 | 28 | - name: GCS Pre | Manifests directory | Set manifests_dir fact 29 | set_fact: 30 | manifests_dir: "{{ tempdir.path }}" 31 | 32 | - name: GCS Pre | Manifests | Sync GCS manifests 33 | template: 34 | src: "gcs-manifests/{{ item }}.j2" 35 | dest: "{{ manifests_dir }}/{{ item }}" 36 | loop: 37 | - gcs-namespace.yml 38 | - gcs-etcd-operator.yml 39 | - gcs-etcd-cluster.yml 40 | - gcs-gd2-services.yml 41 | - gcs-fs-csi.yml 42 | - gcs-storage-snapshot.yml 43 | - gcs-virtblock-csi.yml 44 | - gcs-storage-virtblock.yml 45 | - gcs-prometheus-operator.yml 46 | - gcs-prometheus-bundle.yml 47 | - gcs-prometheus-alertmanager-cluster.yml 48 | - gcs-prometheus-operator-metrics.yml 49 | - gcs-prometheus-kube-state-metrics.yml 50 | - gcs-prometheus-node-exporter.yml 51 | - gcs-prometheus-kube-metrics.yml 52 | - gcs-prometheus-etcd.yml 53 | - gcs-grafana.yml 54 | - gcs-operator-crd.yml 55 | - gcs-operator.yml 56 | - gcs-mixins.yml 57 | 58 | - name: GCS Pre | Manifests | Create GD2 manifests 59 | include_tasks: tasks/create-gd2-manifests.yml 60 | loop: "{{ groups['gcs-node'] }}" 61 | loop_control: 62 | loop_var: gcs_node 63 | 64 | post_tasks: 65 | - name: GCS Post | Manifests | Delete 66 | file: 67 | path: "{{ manifests_dir }}" 68 | state: absent 69 | 70 | tasks: 71 | - name: GCS | Namespace | Create GCS namespace 72 | kube: 73 | kubectl: "{{ kubectl }}" 74 | file: "{{ manifests_dir }}/gcs-namespace.yml" 75 | 76 | - name: GCS | ETCD Operator 77 | block: 78 | - name: GCS | ETCD Operator | Deploy etcd-operator 79 | kube: 80 | kubectl: "{{ kubectl }}" 81 | file: "{{ manifests_dir }}/gcs-etcd-operator.yml" 82 | 83 | - name: GCS | ETCD Operator | Wait for etcd-operator to be available 84 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath='{.status.availableReplicas}' get deployment etcd-operator" 85 | register: result 86 | until: result.stdout|int == 1 87 | delay: 10 88 | retries: 50 89 | 90 | - name: GCS | Anthill 91 | block: 92 | - name: GCS | Anthill | Register CRDs 93 | kube: 94 | kubectl: "{{ kubectl }}" 95 | file: "{{ manifests_dir }}/gcs-operator-crd.yml" 96 | 97 | - name: Wait for GlusterCluster CRD to be registered 98 | command: "{{ kubectl }} get customresourcedefinitions glusterclusters.operator.gluster.org" 99 | changed_when: false 100 | register: result 101 | until: result.rc == 0 102 | delay: 10 103 | retries: 30 104 | 105 | - name: Wait for GlusterNode CRD to be registered 106 | command: "{{ kubectl }} get customresourcedefinitions glusternodes.operator.gluster.org" 107 | changed_when: false 108 | register: result 109 | until: result.rc == 0 110 | delay: 10 111 | retries: 30 112 | 113 | - name: GCS | Anthill | Deploy operator 114 | kube: 115 | kubectl: "{{ kubectl }}" 116 | file: "{{ manifests_dir }}/gcs-operator.yml" 117 | 118 | - name: GCS | ETCD Cluster 119 | block: 120 | - name: GCS | ETCD Cluster | Deploy etcd-cluster 121 | kube: 122 | kubectl: "{{ kubectl }}" 123 | file: "{{ manifests_dir }}/gcs-etcd-cluster.yml" 124 | register: result 125 | until: not result.failed 126 | delay: 5 127 | retries: 5 128 | 129 | - name: GCS | ETCD Cluster | Get etcd-client service 130 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath='{.spec.clusterIP}' get service etcd-client" 131 | register: etcd_client_service 132 | until: etcd_client_service.rc == 0 133 | delay: 5 134 | retries: 5 135 | 136 | - name: GCS | ETCD Cluster | Set etcd_client_endpoint 137 | set_fact: 138 | etcd_client_endpoint: "http://{{ etcd_client_service.stdout }}:2379" 139 | cacheable: true 140 | 141 | - name: GCS | ETCD Cluster | Wait for etcd-cluster to become ready 142 | uri: 143 | url: "{{ etcd_client_endpoint }}/v2/members" 144 | register: result 145 | until: result.status is defined and (result.status == 200 and result.json.members|length == 3) 146 | delay: 10 147 | retries: 50 148 | 149 | - name: GCS | GD2 Cluster 150 | block: 151 | - name: GCS | GD2 Cluster | Deploy GD2 services 152 | kube: 153 | kubectl: "{{ kubectl }}" 154 | file: "{{ manifests_dir }}/gcs-gd2-services.yml" 155 | 156 | - name: GCS | GD2 Cluster | Deploy GD2 157 | include_tasks: ./tasks/deploy-gd2.yml 158 | loop: "{{ groups['gcs-node'] }}" 159 | loop_control: 160 | loop_var: gcs_node 161 | 162 | - name: GCS | GD2 Cluster | Get glusterd2-client service 163 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath='{.spec.clusterIP}' get service glusterd2-client " 164 | register: gd2_client_service 165 | until: gd2_client_service.rc == 0 166 | delay: 5 167 | retries: 5 168 | 169 | - name: GCS | GD2 Cluster | Set gd2_client_endpoint 170 | set_fact: 171 | gd2_client_endpoint: "http://{{ gd2_client_service.stdout }}:24007" 172 | cacheable: true 173 | 174 | - name: GCS | GD2 Cluster | Wait for glusterd2-cluster to become ready 175 | uri: 176 | url: "{{ gd2_client_endpoint }}/v1/peers" 177 | register: peers_resp 178 | until: peers_resp.status is defined and (peers_resp.status == 200 and peers_resp.json|length == groups['kube-node']|length) 179 | delay: 10 180 | retries: 50 181 | 182 | - name: GCS | GD2 Cluster | Add devices 183 | include_tasks: ./tasks/add-devices-to-peer.yml 184 | loop: "{{ peers_resp.json }}" 185 | loop_control: 186 | loop_var: peer 187 | 188 | - name: GCS | CSI Driver 189 | block: 190 | - name: GCS | CSI Driver | Deploy csi driver 191 | kube: 192 | kubectl: "{{ kubectl }}" 193 | file: "{{ manifests_dir }}/gcs-fs-csi.yml" 194 | 195 | - name: GCS | CSI Driver | Wait for csi-provisioner to become available 196 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath={.status.readyReplicas} get statefulset csi-glusterfsplugin-provisioner" 197 | register: result 198 | until: result.stdout|int == 1 199 | delay: 10 200 | retries: 50 201 | 202 | - name: GCS | CSI Driver | Wait for csi-attacher to become available 203 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath={.status.readyReplicas} get statefulset csi-glusterfsplugin-attacher" 204 | register: result 205 | until: result.stdout|int == 1 206 | delay: 10 207 | retries: 50 208 | 209 | - name: GCS | CSI Driver | Wait for csi-nodeplugin to become available 210 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath={.status.numberAvailable} get daemonset csi-glusterfsplugin-nodeplugin" 211 | register: result 212 | until: result.stdout|int == groups['kube-node']|length 213 | delay: 10 214 | retries: 50 215 | 216 | - name: GCS | Storage | Snapshot | Create Storage and Snapshot class 217 | kube: 218 | kubectl: "{{ kubectl }}" 219 | file: "{{ manifests_dir }}/gcs-storage-snapshot.yml" 220 | 221 | - name: GCS | Virtual Block CSI Driver 222 | block: 223 | - name: GCS | Virtual Block CSI Driver | Deploy virtual block csi driver 224 | kube: 225 | kubectl: "{{ kubectl }}" 226 | file: "{{ manifests_dir }}/gcs-virtblock-csi.yml" 227 | 228 | - name: GCS | Virtual Block CSI Driver | Wait for virtual block csi-provisioner to become available 229 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath={.status.readyReplicas} get statefulset csi-glustervirtblock-provisioner" 230 | register: result 231 | until: result.stdout|int == 1 232 | delay: 10 233 | retries: 50 234 | 235 | - name: GCS | Virtual Block CSI Driver | Wait for virtual block csi-attacher to become available 236 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath={.status.readyReplicas} get statefulset csi-glustervirtblock-attacher" 237 | register: result 238 | until: result.stdout|int == 1 239 | delay: 10 240 | retries: 50 241 | 242 | - name: GCS | Virtual Block CSI Driver | Wait for virtual block csi-nodeplugin to become available 243 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath={.status.numberAvailable} get daemonset csi-glustervirtblock-nodeplugin" 244 | register: result 245 | until: result.stdout|int == groups['kube-node']|length 246 | delay: 10 247 | retries: 50 248 | 249 | - name: GCS | Virtual Block Storage | Create Virtual Block Storage class 250 | kube: 251 | kubectl: "{{ kubectl }}" 252 | file: "{{ manifests_dir }}/gcs-storage-virtblock.yml" 253 | 254 | - name: GCS | Prometheus Operator 255 | block: 256 | - name: GCS | Prometheus Operator | Deploy Prometheus Operator 257 | kube: 258 | kubectl: "{{ kubectl }}" 259 | file: "{{ manifests_dir }}/gcs-prometheus-operator.yml" 260 | 261 | - name: GCS | Prometheus Operator | Wait for the Prometheus Operator to become ready 262 | command: "{{ kubectl }} -n{{ gcs_namespace }} -ojsonpath='{.status.availableReplicas}' get deployment prometheus-operator" 263 | register: result 264 | until: result.stdout|int == 1 265 | delay: 10 266 | retries: 50 267 | 268 | - name: GCS | Prometheus Objects 269 | block: 270 | - name: Check if the Custrom Resource Definitions are set 271 | command: "{{ kubectl }} get customresourcedefinitions servicemonitors.monitoring.coreos.com" 272 | register: result 273 | until: result.rc == 0 274 | delay: 10 275 | retries: 30 276 | 277 | - name: Check if Service Monitor CRD is registered 278 | command: "{{ kubectl }} get servicemonitors -n{{ gcs_namespace }}" 279 | register: result 280 | until: result.rc == 0 281 | delay: 10 282 | retries: 30 283 | 284 | - name: Check if Prometheus CRD object is registered 285 | command: "{{ kubectl }} get Prometheus -n{{ gcs_namespace }}" 286 | register: result 287 | until: result.rc == 0 288 | delay: 10 289 | retries: 30 290 | 291 | - name: GCS | Prometheus Objects | Deploy services, ServiceMonitor and Prometheus Objects for Gluster Storage Monitoring 292 | kube: 293 | kubectl: "{{ kubectl }}" 294 | file: "{{ manifests_dir }}/gcs-prometheus-bundle.yml" 295 | 296 | - name: GCS | Kube-State-Metrics Exporter Deployment and corresponding Prometheus Objects 297 | kube: 298 | kubectl: "{{ kubectl }}" 299 | file: "{{ manifests_dir }}/gcs-prometheus-kube-state-metrics.yml" 300 | 301 | - name: GCS | Prometheus Objects in order to monitor Kubelet, APIServer and CoreDNS 302 | kube: 303 | kubectl: "{{ kubectl }}" 304 | file: "{{ manifests_dir }}/gcs-prometheus-kube-metrics.yml" 305 | 306 | - name: GCS | Prometheus Objects in order to monitor etcd operator and the etcd cluster 307 | kube: 308 | kubectl: "{{ kubectl }}" 309 | file: "{{ manifests_dir }}/gcs-prometheus-etcd.yml" 310 | 311 | - name: GCS | Node-Exporter Deployment and corresponding Prometheus Objects 312 | kube: 313 | kubectl: "{{ kubectl }}" 314 | file: "{{ manifests_dir }}/gcs-prometheus-node-exporter.yml" 315 | 316 | - name: GCS | Prometheus Objects for monitoring the Prometheus Operator (meta-monitoring) 317 | kube: 318 | kubectl: "{{ kubectl }}" 319 | file: "{{ manifests_dir }}/gcs-prometheus-operator-metrics.yml" 320 | 321 | - name: GCS | Alertmanager Cluster 322 | kube: 323 | kubectl: "{{ kubectl }}" 324 | file: "{{ manifests_dir }}/gcs-prometheus-alertmanager-cluster.yml" 325 | 326 | - name: GCS | Gluster Mixins | Deploy Grafana Dashboard and Prometheus Alert Rules 327 | kube: 328 | kubectl: "{{ kubectl }}" 329 | file: "{{ manifests_dir }}/gcs-mixins.yml" 330 | 331 | - name: GCS | Grafana Dashboard | Deploy Grafana Dashboard 332 | kube: 333 | kubectl: "{{ kubectl }}" 334 | file: "{{ manifests_dir }}/gcs-grafana.yml" 335 | 336 | --------------------------------------------------------------------------------