├── helm ├── kube-linstor │ ├── README.md │ ├── .helmignore │ ├── templates │ │ ├── csi-driver.yaml │ │ ├── controller-secret.yaml │ │ ├── db-tls.yaml │ │ ├── configurator-configmap.yaml │ │ ├── drbd-reactor-configmap.yaml │ │ ├── stork-service.yaml │ │ ├── drbd-reactor-service.yaml │ │ ├── podsecuritypolicy.yaml │ │ ├── stork-configmap.yaml │ │ ├── stork-rbac.yaml │ │ ├── controller-configmap.yaml │ │ ├── controller-service.yaml │ │ ├── satellite-rbac.yaml │ │ ├── issuer.yaml │ │ ├── csi-node-rbac.yaml │ │ ├── controller-rbac.yaml │ │ ├── satellite-configmap.yaml │ │ ├── satellite-tls.yaml │ │ ├── ha-controller-rbac.yaml │ │ ├── _helpers.tpl │ │ ├── configurator-deployment.yaml │ │ ├── controller-tls.yaml │ │ ├── stork-scheduler-rbac.yaml │ │ ├── stork-scheduler-deployment.yaml │ │ ├── stork-deployment.yaml │ │ ├── ha-controller-deployment.yaml │ │ ├── csi-node-daemonset.yaml │ │ ├── csi-controller-deployment.yaml │ │ ├── csi-controller-rbac.yaml │ │ ├── controller-deployment.yaml │ │ └── satellite-daemonset.yaml │ ├── Chart.yaml │ ├── scripts │ │ ├── configurator.controller │ │ ├── configurator.node │ │ └── functions.sh │ └── values.yaml └── pv-hostpath │ ├── values.yaml │ ├── .helmignore │ ├── Chart.yaml │ └── templates │ └── pv.yaml ├── dockerfiles ├── linstor-ha-controller │ └── Dockerfile ├── linstor-csi │ └── Dockerfile ├── linstor-stork │ └── Dockerfile ├── linstor-controller │ └── Dockerfile └── linstor-satellite │ └── Dockerfile ├── hack └── version-bump.sh ├── .github └── stale.yml ├── examples ├── linstor-db.yaml └── linstor.yaml ├── docs ├── BACKUP.md └── UPGRADE.md ├── README.md └── LICENSE /helm/kube-linstor/README.md: -------------------------------------------------------------------------------- 1 | ../../README.md -------------------------------------------------------------------------------- /helm/pv-hostpath/values.yaml: -------------------------------------------------------------------------------- 1 | size: 100Gi 2 | -------------------------------------------------------------------------------- /helm/kube-linstor/.helmignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .git 3 | -------------------------------------------------------------------------------- /helm/pv-hostpath/.helmignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .git 3 | -------------------------------------------------------------------------------- /helm/pv-hostpath/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: pv-hostpath 2 | description: HostPath Physical Volume 3 | version: 1.0.0 4 | maintainers: 5 | - name: kvaps 6 | email: kvapss@gmail.com 7 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/csi-driver.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.csi.enabled }} 2 | --- 3 | apiVersion: storage.k8s.io/v1 4 | kind: CSIDriver 5 | metadata: 6 | name: linstor.csi.linbit.com 7 | spec: 8 | attachRequired: true 9 | podInfoOnMount: true 10 | {{- end }} 11 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/controller-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- $config := include "linstor.controllerConfig" . -}} 3 | {{- if .Values.controller.enabled }} 4 | --- 5 | apiVersion: v1 6 | kind: Secret 7 | metadata: 8 | name: {{ template "linstor.fullname" . }}-controller 9 | data: 10 | linstor.toml: {{ $config | b64enc }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/db-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.controller.enabled }} 3 | {{- if .Values.controller.db.tls }} 4 | --- 5 | apiVersion: v1 6 | kind: Secret 7 | metadata: 8 | name: {{ $fullName }}-db-tls 9 | type: kubernetes.io/tls 10 | data: 11 | tls.crt: {{ b64enc .Values.controller.db.cert }} 12 | tls.key: {{ b64enc .Values.controller.db.key }} 13 | ca.crt: {{ b64enc .Values.controller.db.ca }} 14 | {{- end }} 15 | {{- end }} -------------------------------------------------------------------------------- /dockerfiles/linstor-ha-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.15 as builder 2 | 3 | RUN git clone https://github.com/piraeusdatastore/piraeus-ha-controller /usr/local/go/piraeus-ha-controller \ 4 | && cd /usr/local/go/piraeus-ha-controller \ 5 | && git reset --hard v0.1.3 \ 6 | && cd cmd/piraeus-ha-controller \ 7 | && go build \ 8 | && mv ./piraeus-ha-controller / 9 | 10 | FROM debian:buster 11 | COPY --from=builder /piraeus-ha-controller /piraeus-ha-controller 12 | ENTRYPOINT ["/piraeus-ha-controller"] 13 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/configurator-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.configurator.enabled }} 3 | --- 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ $fullName }}-configurator 8 | data: 9 | functions.sh: | 10 | {{- .Files.Get "scripts/functions.sh" | nindent 4 }} 11 | configurator.controller: | 12 | {{- tpl (.Files.Get "scripts/configurator.controller") . | nindent 4}} 13 | configurator.node: | 14 | {{- tpl (.Files.Get "scripts/configurator.node") . | nindent 4 }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /dockerfiles/linstor-csi/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.15 as builder 2 | 3 | RUN git clone https://github.com/linbit/linstor-csi/ /usr/local/go/linstor-csi/ \ 4 | && cd /usr/local/go/linstor-csi \ 5 | && git reset --hard v0.13.1 \ 6 | && make -f container.mk staticrelease \ 7 | && mv ./linstor-csi-linux-amd64 / 8 | 9 | FROM debian:buster 10 | RUN apt-get update \ 11 | && apt-get install -y --no-install-recommends \ 12 | xfsprogs \ 13 | e2fsprogs \ 14 | && apt-get clean \ 15 | && rm -rf /var/lib/apt/lists/* 16 | COPY --from=builder /linstor-csi-linux-amd64 /linstor-csi 17 | ENTRYPOINT ["/linstor-csi"] 18 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/drbd-reactor-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.reactor.enabled }} 3 | --- 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ $fullName }}-drbd-reactor 8 | namespace: {{ .Release.Namespace }} 9 | data: 10 | drbd-reactor.toml: |+ 11 | snippets = "/etc/drbd-reactor.d" 12 | statistics-poll-interval = {{ .Values.reactor.pollInterval }} 13 | [[log]] 14 | level = "info" 15 | file = "/dev/stdout" 16 | [[prometheus]] 17 | enums = true 18 | address = "0.0.0.0:{{ .Values.reactor.port }}" 19 | {{- end }} -------------------------------------------------------------------------------- /dockerfiles/linstor-stork/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.15 as builder 2 | RUN apt-get update \ 3 | && apt-get install -y --no-install-recommends \ 4 | go-dep \ 5 | && apt-get clean \ 6 | && rm -rf /var/lib/apt/lists/* 7 | 8 | RUN git clone https://github.com/libopenstorage/stork /go/src/github.com/libopenstorage/stork \ 9 | && cd /go/src/github.com/libopenstorage/stork \ 10 | && git reset --hard v2.6.4 11 | 12 | WORKDIR /go/src/github.com/libopenstorage/stork 13 | 14 | RUN make vendor 15 | 16 | RUN make stork storkctl \ 17 | && mv bin/stork bin/linux/storkctl / 18 | 19 | FROM debian:buster 20 | COPY --from=builder /stork /storkctl / 21 | ENTRYPOINT ["/stork"] 22 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/stork-service.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.stork.enabled }} 3 | --- 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: {{ $fullName }}-stork 8 | namespace: {{ .Release.Namespace }} 9 | labels: 10 | app: {{ $fullName }}-stork 11 | {{- with .Values.stork.service.labels }} 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | {{- with .Values.stork.service.annotations }} 15 | annotations: 16 | {{- toYaml . | nindent 4 }} 17 | {{- end }} 18 | spec: 19 | ports: 20 | - name: extender 21 | port: 8099 22 | - name: webhook 23 | port: 443 24 | selector: 25 | app: {{ $fullName }}-stork 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/drbd-reactor-service.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.reactor.enabled }} 3 | --- 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: {{ $fullName }}-drbd-reactor 8 | namespace: {{ .Release.Namespace }} 9 | labels: 10 | app: {{ $fullName }}-drbd-reactor 11 | {{- with .Values.reactor.service.labels }} 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | {{- with .Values.reactor.service.annotations }} 15 | annotations: 16 | {{- toYaml . | nindent 4 }} 17 | {{- end }} 18 | spec: 19 | clusterIP: None 20 | ports: 21 | - name: metrics 22 | port: {{ .Values.reactor.port }} 23 | selector: 24 | app: {{ $fullName }}-satellite 25 | {{- end }} -------------------------------------------------------------------------------- /helm/kube-linstor/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: linstor 2 | description: Containerized LINSTOR SDS for Kubernetes, ready for production use. 3 | version: 1.14.0 4 | appVersion: 1.14.0 5 | icon: https://hsto.org/getpro/habr/post_images/e47/594/c07/e47594c0721332fb46493d20339bb1be.png 6 | keywords: 7 | - linstor 8 | - drbd 9 | - storage 10 | - csi 11 | home: https://github.com/kvaps/kube-linstor 12 | sources: 13 | - https://hub.docker.com/r/kvaps/linstor-controller 14 | - https://hub.docker.com/r/kvaps/linstor-satellite 15 | - https://hub.docker.com/r/kvaps/linstor-csi 16 | - https://hub.docker.com/r/kvaps/linstor-stork 17 | - https://hub.docker.com/r/kvaps/linstor-ha-controller 18 | maintainers: 19 | - name: kvaps 20 | email: kvapss@gmail.com 21 | -------------------------------------------------------------------------------- /hack/version-bump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | EC=0 3 | 4 | version=$1 5 | [ -z "$version" ] && echo "version is not specified as first argument" && exit 1 6 | 7 | echo "bumping version to $version" 8 | 9 | sed -i "s/raw\/v[0-9]\+\.[0-9]\+\.[0-9]\+/raw\/v${version}/" README.md 10 | 11 | for f in README.md docs/UPGRADE.md; do 12 | sed -i "s/\(linstor --version\) [0-9]\+\.[0-9]\+\.[0-9]\+/\1 ${version}/" "$f" 13 | git diff --exit-code "$f" && echo "$f not changed" && EC=1 14 | done 15 | 16 | f=helm/kube-linstor/Chart.yaml 17 | sed -i "s/\(^version\|appVersion:\) [0-9]\+\.[0-9]\+\.[0-9]\+/\1 ${version}/" "$f" 18 | git diff --exit-code "$f" && echo "$f not changed" && EC=1 19 | 20 | if [ "$EC" != 0 ]; then 21 | echo 22 | echo "not all files were changed!" 23 | fi 24 | exit "$EC" 25 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.podSecurityPolicy.enabled }} 3 | apiVersion: policy/v1beta1 4 | kind: PodSecurityPolicy 5 | metadata: 6 | name: {{ $fullName }} 7 | annotations: 8 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 9 | spec: 10 | allowPrivilegeEscalation: true 11 | allowedCapabilities: 12 | - '*' 13 | fsGroup: 14 | rule: RunAsAny 15 | hostIPC: true 16 | hostNetwork: true 17 | hostPID: true 18 | hostPorts: 19 | - max: 65535 20 | min: 0 21 | privileged: true 22 | runAsUser: 23 | rule: RunAsAny 24 | seLinux: 25 | rule: RunAsAny 26 | supplementalGroups: 27 | rule: RunAsAny 28 | volumes: 29 | - '*' 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/stork-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.stork.enabled }} 3 | --- 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ $fullName }}-stork 8 | namespace: {{ .Release.Namespace }} 9 | data: 10 | policy.cfg: |- 11 | { 12 | "kind": "Policy", 13 | "apiVersion": "v1", 14 | "extenders": [ 15 | { 16 | "urlPrefix": "http://{{ $fullName }}-stork.{{ .Release.Namespace }}.svc:8099", 17 | "apiVersion": "v1beta1", 18 | "filterVerb": "filter", 19 | "prioritizeVerb": "prioritize", 20 | "weight": 5, 21 | "enableHttps": false, 22 | "nodeCacheCapable": false 23 | } 24 | ] 25 | } 26 | {{- end }} 27 | 28 | 29 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/stork-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.stork.enabled }} 3 | --- 4 | kind: ClusterRole 5 | apiVersion: rbac.authorization.k8s.io/v1 6 | metadata: 7 | name: {{ $fullName }}-stork 8 | rules: 9 | - apiGroups: ["*"] 10 | resources: ["*"] 11 | verbs: ["*"] 12 | --- 13 | kind: ClusterRoleBinding 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | metadata: 16 | name: {{ $fullName }}-stork 17 | roleRef: 18 | kind: ClusterRole 19 | name: {{ $fullName }}-stork 20 | apiGroup: rbac.authorization.k8s.io 21 | subjects: 22 | - kind: ServiceAccount 23 | name: {{ $fullName }}-stork 24 | namespace: {{ .Release.Namespace }} 25 | --- 26 | apiVersion: v1 27 | kind: ServiceAccount 28 | metadata: 29 | name: {{ $fullName }}-stork 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 90 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 30 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - frozen 8 | staleLabel: stale 9 | # Comment to post when marking an issue as stale. Set to `false` to disable 10 | markComment: > 11 | This issue has been automatically marked as stale because it has not had 12 | recent activity. It will be closed if no further activity occurs. Any further update will 13 | cause the issue/pull request to no longer be considered stale. Thank you for your contributions. 14 | # Comment to post when closing a stale issue. Set to `false` to disable 15 | closeComment: > 16 | This issue is being automatically closed due to inactivity. 17 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/controller-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.controller.enabled }} 3 | --- 4 | apiVersion: v1 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ $fullName }}-client 8 | namespace: {{ .Release.Namespace }} 9 | data: 10 | linstor-client.conf: |+ 11 | [global] 12 | {{- if not .Values.controller.ssl.enabled }} 13 | controllers = linstor://localhost:{{ .Values.controller.port }},linstor://{{ $fullName }}-controller:{{ .Values.controller.port }} 14 | {{- else }} 15 | controllers = linstor+ssl://localhost:{{ .Values.controller.ssl.port }},linstor+ssl://{{ $fullName }}-controller:{{ .Values.controller.ssl.port }} 16 | certfile = /tls/tls.crt 17 | keyfile = /tls/tls.key 18 | cafile = /tls/ca.crt 19 | {{- end }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/controller-service.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.controller.enabled }} 3 | --- 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: {{ $fullName }}-controller 8 | namespace: {{ .Release.Namespace }} 9 | labels: 10 | app: {{ $fullName }}-controller 11 | {{- with .Values.controller.service.labels }} 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | {{- with .Values.controller.service.annotations }} 15 | annotations: 16 | {{- toYaml . | nindent 4 }} 17 | {{- end }} 18 | spec: 19 | ports: 20 | - name: restapi 21 | port: {{ .Values.controller.port }} 22 | - name: ssl 23 | port: {{ .Values.controller.ssl.port }} 24 | # NOTE: No selector here! A selector would automatically add all matching and ready pods to the endpoint 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /helm/pv-hostpath/templates/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: {{ .Release.Name }} 5 | spec: 6 | capacity: 7 | storage: {{ required "A valid .Values.size entry required!" .Values.size }} 8 | volumeMode: Filesystem 9 | accessModes: 10 | - ReadWriteOnce 11 | hostPath: 12 | path: {{ required "A valid .Values.size entry required!" .Values.path }} 13 | type: DirectoryOrCreate 14 | claimRef: 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | name: {{ .Release.Name }} 18 | namespace: {{ .Release.Namespace }} 19 | nodeAffinity: 20 | required: 21 | nodeSelectorTerms: 22 | - matchExpressions: 23 | - key: kubernetes.io/hostname 24 | operator: In 25 | values: 26 | - {{ required "A valid .Values.node entry required!" .Values.node }} 27 | 28 | -------------------------------------------------------------------------------- /examples/linstor-db.yaml: -------------------------------------------------------------------------------- 1 | # This is an example values for stolon chart 2 | # 3 | # All values described here: 4 | # https://github.com/helm/charts/blob/master/stable/stolon/values.yaml 5 | 6 | superuserPassword: hackme 7 | replicationPassword: hackme 8 | persistence: 9 | enabled: true 10 | size: 10G 11 | 12 | keeper: 13 | replicaCount: 3 14 | nodeSelector: 15 | node-role.kubernetes.io/master: "" 16 | tolerations: 17 | - effect: NoSchedule 18 | key: node-role.kubernetes.io/master 19 | 20 | proxy: 21 | replicaCount: 3 22 | nodeSelector: 23 | node-role.kubernetes.io/master: "" 24 | tolerations: 25 | - effect: NoSchedule 26 | key: node-role.kubernetes.io/master 27 | 28 | sentinel: 29 | replicaCount: 3 30 | nodeSelector: 31 | node-role.kubernetes.io/master: "" 32 | tolerations: 33 | - effect: NoSchedule 34 | key: node-role.kubernetes.io/master 35 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/satellite-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if and .Values.satellite.enabled .Values.podSecurityPolicy.enabled }} 3 | --- 4 | apiVersion: v1 5 | kind: ServiceAccount 6 | metadata: 7 | name: {{ $fullName }}-satellite-sa 8 | --- 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: Role 11 | metadata: 12 | name: {{ $fullName }}-satellite-role 13 | rules: 14 | - apiGroups: ["extensions"] 15 | resources: ["podsecuritypolicies"] 16 | resourceNames: ["{{ $fullName }}"] 17 | verbs: ["use"] 18 | --- 19 | kind: RoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: {{ $fullName }}-satellite-binding 23 | roleRef: 24 | kind: Role 25 | name: {{ $fullName }}-satellite-role 26 | apiGroup: rbac.authorization.k8s.io 27 | subjects: 28 | - kind: ServiceAccount 29 | name: {{ $fullName }}-satellite-sa 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /helm/kube-linstor/scripts/configurator.controller: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | . $(dirname $0)/functions.sh 4 | 5 | load_controller_params 6 | wait_controller 7 | 8 | {{- with .Values.configurator.controller }} 9 | {{- with .props }} 10 | configure_controller_props {{ toJson . | quote }} 11 | {{- end }} 12 | 13 | {{- $selectFilter := dict }} 14 | {{- range .resourceGroups }} 15 | {{- range $k, $v := .selectFilter }} 16 | {{- $_ := set $selectFilter (snakecase $k) $v }} 17 | {{- end }} 18 | configure_resource_group {{ required "A valid .Values.configurator.controller.resourceGroups[].name entry required!" .name | quote }} {{ toJson $selectFilter | quote }} {{ toJson (.props | default (dict)) | quote }} 19 | {{- $rg_name := .name }} 20 | {{- range .volumeGroups }} 21 | configure_volume_group {{ $rg_name | quote }} {{ required "A valid .Values.configurator.controller.resourceGroups[].volumeGroups[].volumeNumber entry required!" .volumeNumber | quote }} {{ toJson (.props | default (dict)) | quote }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | finish 27 | -------------------------------------------------------------------------------- /examples/linstor.yaml: -------------------------------------------------------------------------------- 1 | # This is an example values for kube-linstor chart 2 | # 3 | # All values described here: 4 | # https://github.com/kvaps/kube-linstor/blob/master/helm/kube-linstor/values.yaml 5 | 6 | controller: 7 | db: 8 | user: linstor 9 | password: hackme 10 | connectionUrl: jdbc:postgresql://linstor-db-stolon-proxy/linstor 11 | nodeSelector: 12 | node-role.kubernetes.io/master: "" 13 | tolerations: 14 | - effect: NoSchedule 15 | key: node-role.kubernetes.io/master 16 | 17 | satellite: 18 | tolerations: 19 | - effect: NoSchedule 20 | key: node-role.kubernetes.io/master 21 | - effect: NoSchedule 22 | key: node.kubernetes.io/unschedulable 23 | 24 | csi: 25 | controller: 26 | nodeSelector: 27 | node-role.kubernetes.io/master: "" 28 | tolerations: 29 | - effect: NoSchedule 30 | key: node-role.kubernetes.io/master 31 | node: 32 | tolerations: 33 | - effect: NoSchedule 34 | key: node-role.kubernetes.io/master 35 | - effect: NoSchedule 36 | key: node.kubernetes.io/unschedulable 37 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/issuer.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if or .Values.controller.enabled .Values.satellite.enabled }} 3 | {{- if or .Values.controller.ssl.enabled .Values.satellite.ssl.enabled }} 4 | {{- if eq .Values.controller.ssl.method "cert-manager" }} 5 | --- 6 | apiVersion: cert-manager.io/v1 7 | kind: Issuer 8 | metadata: 9 | name: {{ $fullName }}-selfsigning-issuer 10 | spec: 11 | selfSigned: {} 12 | --- 13 | apiVersion: cert-manager.io/v1 14 | kind: Certificate 15 | metadata: 16 | name: {{ $fullName }}-ca-tls 17 | spec: 18 | commonName: {{ $fullName }}-ca 19 | secretName: {{ $fullName }}-ca-tls 20 | duration: 87600h # 3650d 21 | renewBefore: 8760h # 365d 22 | usages: 23 | - "signing" 24 | - "key encipherment" 25 | - "cert sign" 26 | isCA: true 27 | issuerRef: 28 | name: "{{ $fullName }}-selfsigning-issuer" 29 | kind: Issuer 30 | --- 31 | apiVersion: cert-manager.io/v1 32 | kind: Issuer 33 | metadata: 34 | name: {{ $fullName }}-ca-issuer 35 | spec: 36 | ca: 37 | secretName: {{ $fullName }}-ca-tls 38 | {{- end }} 39 | {{- end }} 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/csi-node-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.csi.enabled }} 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: ClusterRole 6 | metadata: 7 | name: {{ $fullName }}-csi-driver-registrar-role 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["events"] 11 | verbs: ["get", "list", "watch", "create", "update", "patch"] 12 | {{- if .Values.podSecurityPolicy.enabled }} 13 | - apiGroups: ["extensions"] 14 | resources: ["podsecuritypolicies"] 15 | resourceNames: ["{{ $fullName }}"] 16 | verbs: ["use"] 17 | {{- end }} 18 | --- 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | kind: ClusterRoleBinding 21 | metadata: 22 | name: {{ $fullName }}-csi-driver-registrar-binding 23 | roleRef: 24 | apiGroup: rbac.authorization.k8s.io 25 | kind: ClusterRole 26 | name: {{ $fullName }}-csi-driver-registrar-role 27 | subjects: 28 | - kind: ServiceAccount 29 | name: {{ $fullName }}-csi-node-sa 30 | namespace: {{ .Release.Namespace }} 31 | --- 32 | apiVersion: v1 33 | kind: ServiceAccount 34 | metadata: 35 | name: {{ $fullName }}-csi-node-sa 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/controller-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.controller.enabled }} 3 | --- 4 | kind: Role 5 | apiVersion: rbac.authorization.k8s.io/v1 6 | metadata: 7 | name: {{ $fullName }}-controller 8 | rules: 9 | - apiGroups: ["coordination.k8s.io"] 10 | resources: ["leases"] 11 | verbs: ["create"] 12 | - apiGroups: ["coordination.k8s.io"] 13 | resources: ["leases"] 14 | verbs: ["get", "update"] 15 | resourceNames: ["{{ $fullName }}-controller"] 16 | - apiGroups: [""] 17 | resources: ["endpoints", "endpoints/restricted"] 18 | verbs: ["create", "patch", "update"] 19 | {{- if .Values.podSecurityPolicy.enabled }} 20 | - apiGroups: ["extensions"] 21 | resources: ["podsecuritypolicies"] 22 | resourceNames: ["{{ $fullName }}"] 23 | verbs: ["use"] 24 | {{- end }} 25 | --- 26 | kind: RoleBinding 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | metadata: 29 | name: {{ $fullName }}-controller 30 | roleRef: 31 | kind: Role 32 | name: {{ $fullName }}-controller 33 | apiGroup: rbac.authorization.k8s.io 34 | subjects: 35 | - kind: ServiceAccount 36 | name: {{ $fullName }}-controller 37 | --- 38 | apiVersion: v1 39 | kind: ServiceAccount 40 | metadata: 41 | name: {{ $fullName }}-controller 42 | {{- end }} 43 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/satellite-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.satellite.enabled }} 3 | {{- if or .Values.satellite.overwriteDrbdConf .Values.satellite.ssl.enabled }} 4 | --- 5 | apiVersion: v1 6 | kind: ConfigMap 7 | metadata: 8 | name: {{ $fullName }}-satellite 9 | data: 10 | {{- if .Values.satellite.overwriteDrbdConf }} 11 | drbd.conf: |+ 12 | include "drbd.d/global_common.conf"; 13 | include "drbd.d/*.res"; 14 | 15 | global_common.conf: |+ 16 | global { 17 | usage-count no; 18 | udev-always-use-vnr; 19 | } 20 | common { 21 | handlers { 22 | } 23 | startup { 24 | } 25 | options { 26 | } 27 | disk { 28 | } 29 | net { 30 | } 31 | } 32 | {{- end }} 33 | linstor_satellite.toml: |+ 34 | [netcom] 35 | {{- if not .Values.satellite.ssl.enabled }} 36 | port={{ .Values.satellite.port }} 37 | {{- else }} 38 | type="ssl" 39 | port={{ .Values.satellite.ssl.port }} 40 | server_certificate="/config/ssl/keystore.jks" 41 | trusted_certificates="/config/ssl/certificates.jks" 42 | key_password="linstor" 43 | keystore_password="linstor" 44 | truststore_password="linstor" 45 | ssl_protocol="TLSv1.2" 46 | {{- end }} 47 | {{- end }} 48 | {{- end }} 49 | -------------------------------------------------------------------------------- /docs/BACKUP.md: -------------------------------------------------------------------------------- 1 | # Perform backups and database management 2 | 3 | If you're using stolon as back-end for your LINSTOR installation you can easily perform the backup of your database: 4 | ```bash 5 | kubectl exec -n linstor sts/linstor-db-stolon-keeper -- \ 6 | sh -c 'PGPASSWORD=$(cat $STKEEPER_PG_SU_PASSWORDFILE) pg_dump -c -h linstor-db-stolon-proxy -U stolon linstor | gzip' \ 7 | > linstor-backup.sql.gz 8 | ``` 9 | 10 | If you need to restore database from backup: 11 | ```bash 12 | kubectl exec -i -n linstor sts/linstor-db-stolon-keeper -- \ 13 | sh -c 'zcat | PGPASSWORD=$(cat $STKEEPER_PG_SU_PASSWORDFILE) psql -h linstor-db-stolon-proxy -U stolon -d linstor' \ 14 | < linstor-backup.sql.gz 15 | ``` 16 | 17 | --- 18 | 19 | To check the state of stolon cluster do 20 | ```bash 21 | kubectl exec -n linstor sts/linstor-db-stolon-keeper -- stolonctl --cluster-name linstor-db-stolon --store-backend kubernetes --kube-resource-kind=configmap status 22 | ``` 23 | --- 24 | 25 | If something has gonna wrong, you can always connect to your database to perform manual actions: 26 | 27 | ```bash 28 | kubectl exec -ti -n linstor sts/linstor-db-stolon-keeper -- \ 29 | sh -c 'PGPASSWORD=$(cat $STKEEPER_PG_SU_PASSWORDFILE) psql -h linstor-db-stolon-proxy -U stolon linstor' 30 | ``` 31 | 32 | Note, if you have different user name unlike `linstor` you need to make schema visible on each connect: 33 | ```sql 34 | SET search_path TO "LINSTOR",public; 35 | ``` 36 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/satellite-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- $cn := printf "%s-satellite" $fullName -}} 3 | {{- $altName1 := printf "%s.%s" $cn .Release.Namespace }} 4 | {{- $altName2 := printf "%s.%s.svc" $cn .Release.Namespace }} 5 | {{- if .Values.satellite.enabled }} 6 | {{- if .Values.satellite.ssl.enabled }} 7 | {{- if eq .Values.controller.ssl.method "helm" }} 8 | --- 9 | apiVersion: v1 10 | kind: Secret 11 | metadata: 12 | name: {{ $fullName }}-satellite-tls 13 | annotations: 14 | "helm.sh/resource-policy": "keep" 15 | "helm.sh/hook": "pre-install" 16 | "helm.sh/hook-delete-policy": "before-hook-creation" 17 | "directives.qbec.io/update-policy": "never" 18 | type: kubernetes.io/tls 19 | data: 20 | {{- with genSelfSignedCert $cn nil (list $cn $altName1 $altName2 "localhost") 3650 }} 21 | tls.crt: {{ b64enc .Cert }} 22 | tls.key: {{ b64enc .Key }} 23 | {{- end }} 24 | {{- else if eq .Values.controller.ssl.method "cert-manager" }} 25 | --- 26 | apiVersion: cert-manager.io/v1 27 | kind: Certificate 28 | metadata: 29 | name: {{ $fullName }}-satellite-tls 30 | spec: 31 | secretName: {{ $fullName }}-satellite-tls 32 | commonName: {{ $cn }} 33 | dnsNames: 34 | - {{ $cn }} 35 | - {{ $altName1 }} 36 | - {{ $altName2 }} 37 | - localhost 38 | duration: 87600h # 3650d 39 | usages: 40 | - "signing" 41 | - "key encipherment" 42 | - "server auth" 43 | issuerRef: 44 | name: {{ $fullName }}-ca-issuer 45 | kind: Issuer 46 | group: cert-manager.io 47 | {{- else }} 48 | {{- fail ".Values.controller.ssl.method is not set to " }} 49 | {{- end }} 50 | {{- end }} 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/ha-controller-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.haController.enabled }} 3 | --- 4 | kind: ClusterRole 5 | apiVersion: rbac.authorization.k8s.io/v1 6 | metadata: 7 | name: {{ $fullName }}-ha-controller 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["pods"] 11 | verbs: ["list", "watch", "delete"] 12 | - apiGroups: ["storage.k8s.io"] 13 | resources: ["volumeattachments"] 14 | verbs: ["list", "watch", "delete"] 15 | - apiGroups: [""] 16 | resources: ["events"] 17 | verbs: ["create"] 18 | - apiGroups: [""] 19 | resources: ["persistentvolumeclaims"] 20 | verbs: ["list", "watch"] 21 | - apiGroups: ["coordination.k8s.io"] 22 | resources: ["leases"] 23 | verbs: ["create"] 24 | - apiGroups: ["coordination.k8s.io"] 25 | resources: ["leases"] 26 | verbs: ["get", "update"] 27 | resourceNames: ["{{ $fullName }}-ha-controller"] 28 | {{- if .Values.podSecurityPolicy.enabled }} 29 | - apiGroups: ["extensions"] 30 | resources: ["podsecuritypolicies"] 31 | resourceNames: ["{{ $fullName }}"] 32 | verbs: ["use"] 33 | - apiGroups: ["extensions"] 34 | resources: ["podsecuritypolicies"] 35 | resourceNames: ["{{ $fullName }}"] 36 | verbs: ["use"] 37 | {{- end }} 38 | --- 39 | kind: ClusterRoleBinding 40 | apiVersion: rbac.authorization.k8s.io/v1 41 | metadata: 42 | name: {{ $fullName }}-ha-controller 43 | roleRef: 44 | kind: ClusterRole 45 | name: {{ $fullName }}-ha-controller 46 | apiGroup: rbac.authorization.k8s.io 47 | subjects: 48 | - kind: ServiceAccount 49 | name: {{ $fullName }}-ha-controller 50 | namespace: {{ .Release.Namespace }} 51 | --- 52 | apiVersion: v1 53 | kind: ServiceAccount 54 | metadata: 55 | name: {{ $fullName }}-ha-controller 56 | {{- end }} 57 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=gohtmltmpl: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "linstor.name" -}} 6 | {{- default "linstor" .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "linstor.fullname" -}} 14 | {{- if .Values.fullnameOverride -}} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 16 | {{- else -}} 17 | {{- $name := default "linstor" .Values.nameOverride -}} 18 | {{- if or (contains $name .Release.Name) (eq (.Release.Name | upper) "RELEASE-NAME") -}} 19 | {{- $name | trunc 63 | trimSuffix "-" -}} 20 | {{- else -}} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 22 | {{- end -}} 23 | {{- end -}} 24 | {{- end -}} 25 | 26 | {{/* 27 | Generates linstor.toml config file 28 | */}} 29 | {{- define "linstor.controllerConfig" -}} 30 | [db] 31 | user = "{{ .Values.controller.db.user }}" 32 | password = "{{ .Values.controller.db.password }}" 33 | connection_url = "{{ .Values.controller.db.connectionUrl }}" 34 | {{- if .Values.controller.db.tls }} 35 | ca_certificate = "/tls/db/ca.crt" 36 | client_certificate = "/tls/db/tls.crt" 37 | client_key_pkcs8_pem = "/tls/db/tls.key" 38 | {{- end }} 39 | {{- with .Values.controller.db.etcdPrefix }} 40 | [db.etcd] 41 | prefix = "{{ . }}" 42 | {{- end }} 43 | [http] 44 | port = {{ .Values.controller.port }} 45 | {{- if or .Values.controller.ssl.enabled }} 46 | [https] 47 | enabled = true 48 | port = {{ .Values.controller.ssl.port }} 49 | keystore = "/config/ssl/keystore.jks" 50 | keystore_password = "linstor" 51 | truststore = "/config/ssl/trustore_client.jks" 52 | truststore_password = "linstor" 53 | {{- end }} 54 | {{ end }} 55 | -------------------------------------------------------------------------------- /helm/kube-linstor/scripts/configurator.node: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | . $(dirname $0)/functions.sh 4 | echo "Node hostname: $HOSTNAME" 5 | 6 | load_satellite_params 7 | load_controller_params 8 | wait_satellite 9 | wait_controller 10 | {{- with .Values.configurator.autoJoinNodes }} 11 | register_node 12 | {{- end }} 13 | 14 | {{- range .Values.configurator.nodes }} 15 | if [[ $HOSTNAME =~ {{ required "A valid .Values.configurator.nodes[].regex entry required!" .regex }} ]]; then 16 | echo 'Handling configuration for "{{ .regex }}"' 17 | {{- with .props }} 18 | configure_node_props {{ toJson . | quote }} 19 | {{- end }} 20 | {{- range .interfaces }} 21 | configure_interface {{ required "A valid .Values.configurator.nodes[].interfaces[].name entry required!" .name | quote }} {{ required "A valid .Values.configurator.nodes[].interfaces[].ip entry required!" .ip | quote }} 22 | {{- end }} 23 | {{- range .storagePools }} 24 | {{- if has .providerKind (list "LVM" "LVM_THIN" "ZFS" "ZFS_THIN") }} 25 | {{- if eq .providerKind "LVM" }} 26 | if check_lvm_pool {{ index .props "StorDriver/LvmVg" }}; then 27 | {{- else if eq .providerKind "LVM_THIN" }} 28 | if check_lvmthin_pool {{ index .props "StorDriver/LvmVg" }}/{{ index .props "StorDriver/ThinPool" }}; then 29 | {{- else if eq .providerKind "ZFS" }} 30 | if check_zfs_pool {{ index .props "StorDriver/ZPool" }}; then 31 | {{- else if eq .providerKind "ZFS_THIN" }} 32 | if check_zfs_pool {{ index .props "StorDriver/ZPoolThin" }}; then 33 | {{- end }} 34 | {{- end }} 35 | configure_storage_pool {{ required "A valid .Values.configurator.nodes[].storagePools[].name entry required!" .name | quote }} {{ required "A valid .Values.configurator.nodes[].storagePools[].providerKind entry required!" .providerKind | quote }} {{ toJson (.props | default (dict)) | quote }} 36 | {{- if has .providerKind (list "LVM" "LVM_THIN" "ZFS" "ZFS_THIN") }} 37 | fi 38 | {{- end }} 39 | {{- end }} 40 | fi 41 | {{- end }} 42 | 43 | finish 44 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/configurator-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if and .Values.configurator.enabled .Values.configurator.controller }} 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | labels: 8 | app: {{ $fullName }}-configurator 9 | name: {{ $fullName }}-configurator 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app: {{ $fullName }}-configurator 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ $fullName }}-configurator 20 | annotations: 21 | checksum/scripts: {{ printf "%s\n%s" (.Files.Get "scripts/functions.sh") (tpl (.Files.Get "scripts/configurator.controller") .) | sha256sum }} 22 | spec: 23 | imagePullSecrets: 24 | {{- toYaml .Values.satellite.image.pullSecrets | nindent 8 }} 25 | containers: 26 | - name: configurator-controller 27 | {{- with .Values.satellite.image }} 28 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 29 | imagePullPolicy: {{ .pullPolicy }} 30 | {{- end }} 31 | command: 32 | - /bin/bash 33 | - /scripts/configurator.controller 34 | env: 35 | - name: LS_CONTROLLERS 36 | {{- if not .Values.controller.ssl.enabled }} 37 | value: http://{{ $fullName }}-controller:{{ .Values.controller.port }} 38 | {{- else }} 39 | value: https://{{ $fullName }}-controller:{{ .Values.controller.ssl.port }} 40 | {{- end }} 41 | volumeMounts: 42 | - name: scripts 43 | mountPath: /scripts 44 | - name: client-config 45 | mountPath: /etc/linstor 46 | {{- if .Values.controller.ssl.enabled }} 47 | - name: client-tls 48 | mountPath: /tls/client 49 | {{- end }} 50 | serviceAccountName: {{ $fullName }}-controller 51 | priorityClassName: system-node-critical 52 | {{- with .Values.controller.nodeSelector }} 53 | nodeSelector: 54 | {{- toYaml . | nindent 8 }} 55 | {{- end }} 56 | {{- with .Values.controller.tolerations }} 57 | tolerations: 58 | {{- toYaml . | nindent 6 }} 59 | {{- end }} 60 | volumes: 61 | - name: scripts 62 | configMap: 63 | name: {{ $fullName }}-configurator 64 | - name: client-config 65 | configMap: 66 | name: {{ $fullName }}-client 67 | {{- if .Values.controller.ssl.enabled }} 68 | - name: client-tls 69 | secret: 70 | secretName: {{ $fullName }}-client-tls 71 | {{- end }} 72 | {{- end }} 73 | 74 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/controller-tls.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- $cn := printf "%s-controller" $fullName -}} 3 | {{- $altName1 := printf "%s.%s" $cn .Release.Namespace }} 4 | {{- $altName2 := printf "%s.%s.svc" $cn .Release.Namespace }} 5 | {{- if .Values.controller.enabled }} 6 | {{- if or .Values.controller.ssl.enabled .Values.satellite.ssl.enabled }} 7 | {{- if eq .Values.controller.ssl.method "helm" }} 8 | {{- $ca := genCA (printf "%s-ca" $fullName) 3650 -}} 9 | --- 10 | apiVersion: v1 11 | kind: Secret 12 | metadata: 13 | name: {{ $fullName }}-controller-tls 14 | annotations: 15 | "helm.sh/resource-policy": "keep" 16 | "helm.sh/hook": "pre-install" 17 | "helm.sh/hook-delete-policy": "before-hook-creation" 18 | "directives.qbec.io/update-policy": "never" 19 | type: kubernetes.io/tls 20 | data: 21 | {{- with genSignedCert $cn (list "127.0.0.1") (list $cn $altName1 $altName2 "localhost") 3650 $ca }} 22 | tls.crt: {{ b64enc .Cert }} 23 | tls.key: {{ b64enc .Key }} 24 | ca.crt: {{ b64enc $ca.Cert }} 25 | {{- end }} 26 | {{- if or .Values.controller.ssl.enabled }} 27 | --- 28 | apiVersion: v1 29 | kind: Secret 30 | metadata: 31 | name: {{ $fullName }}-client-tls 32 | annotations: 33 | "helm.sh/resource-policy": "keep" 34 | "helm.sh/hook": "pre-install" 35 | "helm.sh/hook-delete-policy": "before-hook-creation" 36 | "directives.qbec.io/update-policy": "never" 37 | type: kubernetes.io/tls 38 | data: 39 | {{- with genSignedCert (printf "%s-client" $fullName) nil nil 3650 $ca }} 40 | tls.crt: {{ b64enc .Cert }} 41 | tls.key: {{ b64enc .Key }} 42 | ca.crt: {{ b64enc $ca.Cert }} 43 | {{- end }} 44 | {{- end }} 45 | {{- else if eq .Values.controller.ssl.method "cert-manager" }} 46 | --- 47 | apiVersion: cert-manager.io/v1 48 | kind: Certificate 49 | metadata: 50 | name: {{ $fullName }}-controller-tls 51 | spec: 52 | secretName: {{ $fullName }}-controller-tls 53 | commonName: {{ $cn }} 54 | dnsNames: 55 | - {{ $cn }} 56 | - {{ $altName1 }} 57 | - {{ $altName2 }} 58 | - localhost 59 | duration: 87600h # 3650d 60 | usages: 61 | - "signing" 62 | - "key encipherment" 63 | - "server auth" 64 | issuerRef: 65 | name: {{ $fullName }}-ca-issuer 66 | kind: Issuer 67 | group: cert-manager.io 68 | --- 69 | apiVersion: cert-manager.io/v1 70 | kind: Certificate 71 | metadata: 72 | name: {{ $fullName }}-client-tls 73 | spec: 74 | secretName: {{ $fullName }}-client-tls 75 | commonName: {{ $fullName }}-client 76 | duration: 87600h # 3650d 77 | usages: 78 | - "signing" 79 | - "key encipherment" 80 | - "client auth" 81 | issuerRef: 82 | name: {{ $fullName }}-ca-issuer 83 | kind: Issuer 84 | group: cert-manager.io 85 | {{- else }} 86 | {{- fail ".Values.controller.ssl.method is not set to " }} 87 | {{- end }} 88 | {{- end }} 89 | {{- end }} 90 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/stork-scheduler-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.stork.enabled }} 3 | --- 4 | kind: ClusterRole 5 | apiVersion: rbac.authorization.k8s.io/v1 6 | metadata: 7 | name: {{ $fullName }}-stork-scheduler 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["endpoints"] 11 | verbs: ["get", "update"] 12 | - apiGroups: [""] 13 | resources: ["configmaps"] 14 | verbs: ["get", "list", "watch"] 15 | - apiGroups: [""] 16 | resources: ["events"] 17 | verbs: ["create", "patch", "update"] 18 | - apiGroups: ["events.k8s.io"] 19 | resources: ["events"] 20 | verbs: ["create", "patch", "update"] 21 | - apiGroups: [""] 22 | resources: ["endpoints"] 23 | verbs: ["create"] 24 | - apiGroups: [""] 25 | resourceNames: ["kube-scheduler"] 26 | resources: ["endpoints"] 27 | verbs: ["delete", "get", "patch", "update"] 28 | - apiGroups: [""] 29 | resources: ["nodes"] 30 | verbs: ["get", "list", "watch"] 31 | - apiGroups: [""] 32 | resources: ["pods"] 33 | verbs: ["delete", "get", "list", "watch"] 34 | - apiGroups: [""] 35 | resources: ["bindings", "pods/binding"] 36 | verbs: ["create"] 37 | - apiGroups: [""] 38 | resources: ["pods/status"] 39 | verbs: ["patch", "update"] 40 | - apiGroups: [""] 41 | resources: ["replicationcontrollers", "services"] 42 | verbs: ["get", "list", "watch"] 43 | - apiGroups: ["*"] 44 | resources: ["replicasets"] 45 | verbs: ["get", "list", "watch"] 46 | - apiGroups: ["apps"] 47 | resources: ["statefulsets"] 48 | verbs: ["get", "list", "watch"] 49 | - apiGroups: ["policy"] 50 | resources: ["poddisruptionbudgets"] 51 | verbs: ["get", "list", "watch"] 52 | - apiGroups: [""] 53 | resources: ["persistentvolumeclaims", "persistentvolumes"] 54 | verbs: ["get", "list", "watch"] 55 | - apiGroups: ["storage.k8s.io"] 56 | resources: ["storageclasses", "csinodes"] 57 | verbs: ["get", "list", "watch"] 58 | - apiGroups: ["coordination.k8s.io"] 59 | resources: ["leases"] 60 | verbs: ["get", "create", "update"] 61 | - apiGroups: ["storage.k8s.io"] 62 | resources: ["csidrivers", "csistoragecapacities"] 63 | verbs: ["get", "list", "watch"] 64 | {{- if .Values.podSecurityPolicy.enabled }} 65 | - apiGroups: ["extensions"] 66 | resources: ["podsecuritypolicies"] 67 | resourceNames: ["{{ $fullName }}"] 68 | verbs: ["use"] 69 | {{- end }} 70 | --- 71 | kind: ClusterRoleBinding 72 | apiVersion: rbac.authorization.k8s.io/v1 73 | metadata: 74 | name: {{ $fullName }}-stork-scheduler 75 | roleRef: 76 | kind: ClusterRole 77 | name: {{ $fullName }}-stork-scheduler 78 | apiGroup: rbac.authorization.k8s.io 79 | subjects: 80 | - kind: ServiceAccount 81 | name: {{ $fullName }}-stork-scheduler 82 | namespace: {{ .Release.Namespace }} 83 | --- 84 | apiVersion: v1 85 | kind: ServiceAccount 86 | metadata: 87 | name: {{ $fullName }}-stork-scheduler 88 | {{- end }} 89 | -------------------------------------------------------------------------------- /docs/UPGRADE.md: -------------------------------------------------------------------------------- 1 | # Upgrade notes 2 | 3 | 4 | If you're using stolon as back-end for your LINSTOR installation you can easily perform the backup of your database: 5 | 6 | ```bash 7 | kubectl exec -n linstor sts/linstor-db-stolon-keeper -- \ 8 | sh -c 'PGPASSWORD=$(cat $STKEEPER_PG_SU_PASSWORDFILE) pg_dump -c -h linstor-db-stolon-proxy -U stolon linstor | gzip' \ 9 | > linstor-backup.sql.gz 10 | ``` 11 | 12 | It always recommended to perform the backup before each LINSTOR upgrade. 13 | 14 | 15 | 16 | Also if you were using `helm template` to perform the installation as described in [README.md for v1.1.2](https://github.com/kvaps/kube-linstor/tree/v1.1.2), I would suggest you switch to Helmv3, however helm template method should also work fine, we're using it with [qbec](https://qbec.io/). 17 | 18 | Anyway you can perform upgrade by simple replacing resources in your Kubernetes cluster thus 19 | 20 | --- 21 | 22 | ## Upgrading helm repo index 23 | 24 | ``` 25 | helm repo update 26 | ``` 27 | 28 | ## Upgrading stolon 29 | 30 | 31 | ***Helm way:*** 32 | 33 | ```bash 34 | helm upgrade linstor-db kvaps/stolon --namespace linstor -f linstor-db.yaml 35 | ``` 36 | 37 | ***Templated manifests:*** 38 | 39 | - (optional) Remove all stolon resources except generated ones. 40 | The generated resources (like PVC's and `stolon-cluster-linstor-db-stolon` configmap), should remain in the cluster even after you remove mentioned statefulsets for them. 41 | 42 | - Install new resources with the same names, they should start using old PVCs and `linstor-db-stolon` configmap to reload cluster state. Specify `--set job.autoCreateCluster=false` option for stolon chart. 43 | 44 | ## Upgrading LINSTOR 45 | 46 | 47 | ***Helm way:*** 48 | 49 | ```bash 50 | helm upgrade linstor kvaps/linstor --version 1.14.0 --namespace linstor -f linstor.yaml 51 | ``` 52 | 53 | ***Templated manifests:*** 54 | 55 | - (optional) Remove all LINSTOR resources, it does not store any state in the Kubernetes cluster, so you can do that without fear. The LINSTOR state is stored only in database. 56 | 57 | - Create new LINSTOR resources. check the controller log, it should perform the schema migration for the database. 58 | 59 | --- 60 | 61 | If you're upgrading from old version you can see your nodes in `Offline` state, that's because latest version enables mutual ssl authentification for the linstor-satellites. 62 | 63 | You can easily fix that by executing this command in your linstor-controller pod: 64 | ```bash 65 | linstor n l | awk '/(PLAIN)/ { print "linstor n i m -p 3367 --communication-type SSL " $2 " default" }' | sh -ex 66 | ``` 67 | --- 68 | 69 | v1.9.0 release also introduce shorter release name: `linstor-` instead of `linstor-linstor-`, this change shouldn't break anything, however it will regenerate SSL certificates. 70 | If you are using LINSTOR API externally, you might need to update the client certificates or keep the old release name prefix by specifying `--set fullnameOverride=linstor-linstor` option. 71 | 72 | See [#18](https://github.com/kvaps/kube-linstor/issues/18) for more details. 73 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/stork-scheduler-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.storkScheduler.enabled }} 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | labels: 8 | app: {{ $fullName }}-stork-scheduler 9 | name: {{ $fullName }}-stork-scheduler 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | replicas: {{ .Values.storkScheduler.replicaCount }} 13 | selector: 14 | matchLabels: 15 | app: {{ $fullName }}-stork-scheduler 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ $fullName }}-stork-scheduler 20 | spec: 21 | imagePullSecrets: 22 | {{- toYaml .Values.storkScheduler.image.pullSecrets | nindent 8 }} 23 | {{- if or .Values.storkScheduler.affinity .Values.storkScheduler.podAntiAffinity }} 24 | affinity: 25 | {{- with .Values.storkScheduler.affinity }} 26 | {{- toYaml . | nindent 8 }} 27 | {{- end }} 28 | {{- if eq .Values.storkScheduler.podAntiAffinity "hard" }} 29 | podAntiAffinity: 30 | requiredDuringSchedulingIgnoredDuringExecution: 31 | - topologyKey: "{{ .Values.storkScheduler.podAntiAffinityTopologyKey }}" 32 | labelSelector: 33 | matchLabels: 34 | app: {{ $fullName }}-controller 35 | {{- else if eq .Values.storkScheduler.podAntiAffinity "soft" }} 36 | podAntiAffinity: 37 | preferredDuringSchedulingIgnoredDuringExecution: 38 | - weight: 1 39 | podAffinityTerm: 40 | topologyKey: "{{ .Values.storkScheduler.podAntiAffinityTopologyKey }}" 41 | labelSelector: 42 | matchLabels: 43 | app: {{ $fullName }}-controller 44 | {{- end }} 45 | {{- end }} 46 | containers: 47 | - name: stork 48 | image: {{ .Values.storkScheduler.image.repository }}:{{ .Values.storkScheduler.image.tag }} 49 | imagePullPolicy: {{ .Values.storkScheduler.image.pullPolicy }} 50 | command: 51 | - /usr/local/bin/kube-scheduler 52 | - --address=0.0.0.0 53 | - --scheduler-name=stork 54 | - --policy-configmap={{ $fullName }}-stork 55 | - --policy-configmap-namespace=$(NAMESPACE) 56 | - --leader-elect=true 57 | - --leader-elect-resource-name={{ $fullName }}-stork-scheduler 58 | - --leader-elect-resource-namespace=$(NAMESPACE) 59 | env: 60 | - name: NAMESPACE 61 | valueFrom: 62 | fieldRef: 63 | fieldPath: metadata.namespace 64 | livenessProbe: 65 | httpGet: 66 | path: /healthz 67 | port: 10251 68 | initialDelaySeconds: 15 69 | affinity: 70 | podAntiAffinity: 71 | requiredDuringSchedulingIgnoredDuringExecution: 72 | - labelSelector: 73 | matchExpressions: 74 | - key: "app" 75 | operator: In 76 | values: 77 | - {{ $fullName }}-stork-scheduler 78 | topologyKey: "kubernetes.io/hostname" 79 | serviceAccountName: {{ $fullName }}-stork-scheduler 80 | priorityClassName: system-node-critical 81 | {{- with .Values.storkScheduler.nodeSelector }} 82 | nodeSelector: 83 | {{- toYaml . | nindent 8 }} 84 | {{- end }} 85 | {{- with .Values.storkScheduler.tolerations }} 86 | tolerations: 87 | {{- toYaml . | nindent 6 }} 88 | {{- end }} 89 | {{- end }} 90 | -------------------------------------------------------------------------------- /dockerfiles/linstor-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:buster as builder 2 | 3 | ARG VERSION=1.14.0 4 | 5 | ENV DEBIAN_FRONTEND noninteractive 6 | RUN apt-get update \ 7 | && apt-get -y upgrade \ 8 | && apt-get -y install build-essential git default-jdk-headless dh-systemd gradle python3-all 9 | 10 | RUN git clone https://github.com/LINBIT/linstor-server.git /linstor-server 11 | WORKDIR /linstor-server 12 | RUN git checkout v${VERSION} 13 | 14 | RUN make debrelease \ 15 | && rm -rf /root/.gradle/caches/ \ 16 | && mv linstor-server-${VERSION}.tar.gz /linstor-server_${VERSION}.orig.tar.gz \ 17 | && tar -C / -xvf /linstor-server_${VERSION}.orig.tar.gz 18 | 19 | WORKDIR /linstor-server-${VERSION} 20 | RUN dpkg-buildpackage -us -uc 21 | 22 | # ------------------------------------------------------------------------------ 23 | 24 | FROM debian:buster as client-builder 25 | 26 | ARG API_VERSION=1.9.0 27 | ARG CLIENT_VERSION=1.9.0 28 | 29 | ENV DEBIAN_FRONTEND noninteractive 30 | RUN apt-get update \ 31 | && apt-get -y upgrade \ 32 | && apt-get -y install build-essential debhelper git python3-all python3-setuptools help2man bash-completion docbook-xsl xsltproc 33 | 34 | RUN git clone --recurse-submodules https://github.com/LINBIT/linstor-api-py /linstor-api-py 35 | WORKDIR /linstor-api-py 36 | RUN git checkout v${API_VERSION} \ 37 | && make debrelease \ 38 | && mv ./dist/python-linstor-${API_VERSION}.tar.gz ../python-linstor_${API_VERSION}.orig.tar.gz \ 39 | && tar -C / -xvf /python-linstor_${API_VERSION}.orig.tar.gz 40 | WORKDIR /python-linstor-${API_VERSION} 41 | RUN dpkg-buildpackage -us -uc 42 | 43 | RUN rm -rf /linstor-api-py \ 44 | && mv /python-linstor-${API_VERSION} /linstor-api-py 45 | 46 | RUN git clone https://github.com/LINBIT/linstor-client.git /linstor-client 47 | WORKDIR /linstor-client 48 | RUN git checkout v${CLIENT_VERSION} \ 49 | && make debrelease \ 50 | && mv dist/linstor-client-${CLIENT_VERSION}.tar.gz /linstor-client_${CLIENT_VERSION}.orig.tar.gz \ 51 | && tar -C / -xvf /linstor-client_${CLIENT_VERSION}.orig.tar.gz 52 | WORKDIR /linstor-client-${CLIENT_VERSION} 53 | RUN dpkg-buildpackage -us -uc 54 | 55 | # ------------------------------------------------------------------------------ 56 | 57 | FROM golang:1.15 as k8s-await-election-builder 58 | 59 | RUN git clone https://github.com/LINBIT/k8s-await-election/ /usr/local/go/k8s-await-election/ \ 60 | && cd /usr/local/go/k8s-await-election \ 61 | && git reset --hard v0.2.4 \ 62 | && make \ 63 | && mv ./out/k8s-await-election-amd64 /k8s-await-election 64 | 65 | # ------------------------------------------------------------------------------ 66 | 67 | FROM debian:buster 68 | 69 | COPY --from=builder /linstor-common_*_all.deb /linstor-controller_*_all.deb /packages/ 70 | COPY --from=client-builder /python-linstor_*_all.deb /linstor-client_*_all.deb /packages/ 71 | COPY --from=k8s-await-election-builder /k8s-await-election /k8s-await-election 72 | 73 | # Install repos and system upgrade 74 | ENV DEBIAN_FRONTEND noninteractive 75 | RUN apt-get -y update \ 76 | && apt-get -y upgrade \ 77 | && apt-get clean \ 78 | && rm -rf /var/lib/apt/lists/* 79 | 80 | # Install linstor-controller 81 | RUN apt-get update \ 82 | && apt-get install -y default-jre-headless python3-all python3-natsort \ 83 | && dpkg -i packages/*.deb \ 84 | && sed -i "s|'$| \"-Djdk.tls.acknowledgeCloseNotify=true\"'|" \ 85 | /usr/share/linstor-server/bin/Controller \ 86 | && apt-get clean \ 87 | && rm -rf /var/lib/apt/lists/* \ 88 | && mkdir -p /config /logs \ 89 | && /usr/share/linstor-server/bin/linstor-config create-db-file \ 90 | /data/linstordb > /config/linstor.toml 91 | 92 | ENTRYPOINT [ "/usr/share/linstor-server/bin/Controller", "--logs=/logs", "--config-directory=/config" ] 93 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/stork-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.stork.enabled }} 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | labels: 8 | app: {{ $fullName }}-stork 9 | name: {{ $fullName }}-stork 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | replicas: {{ .Values.stork.replicaCount }} 13 | selector: 14 | matchLabels: 15 | app: {{ $fullName }}-stork 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ $fullName }}-stork 20 | spec: 21 | imagePullSecrets: 22 | {{- toYaml .Values.stork.image.pullSecrets | nindent 8 }} 23 | {{- if or .Values.stork.affinity .Values.stork.podAntiAffinity }} 24 | affinity: 25 | {{- with .Values.stork.affinity }} 26 | {{- toYaml . | nindent 8 }} 27 | {{- end }} 28 | {{- if eq .Values.stork.podAntiAffinity "hard" }} 29 | podAntiAffinity: 30 | requiredDuringSchedulingIgnoredDuringExecution: 31 | - topologyKey: "{{ .Values.stork.podAntiAffinityTopologyKey }}" 32 | labelSelector: 33 | matchLabels: 34 | app: {{ $fullName }}-controller 35 | {{- else if eq .Values.stork.podAntiAffinity "soft" }} 36 | podAntiAffinity: 37 | preferredDuringSchedulingIgnoredDuringExecution: 38 | - weight: 1 39 | podAffinityTerm: 40 | topologyKey: "{{ .Values.stork.podAntiAffinityTopologyKey }}" 41 | labelSelector: 42 | matchLabels: 43 | app: {{ $fullName }}-controller 44 | {{- end }} 45 | {{- end }} 46 | containers: 47 | - name: stork 48 | image: {{ .Values.stork.image.repository }}:{{ .Values.stork.image.tag }} 49 | imagePullPolicy: {{ .Values.stork.image.pullPolicy }} 50 | command: 51 | - /stork 52 | - --driver=linstor 53 | - --leader-elect=true 54 | - --lock-object-name={{ $fullName }}-stork 55 | - --lock-object-namespace=$(NAMESPACE) 56 | - --health-monitor=false 57 | - --snapshotter=false 58 | - --cluster-domain-controllers=false 59 | env: 60 | - name: NAMESPACE 61 | valueFrom: 62 | fieldRef: 63 | fieldPath: metadata.namespace 64 | - name: LS_CONTROLLERS 65 | {{- if not .Values.controller.ssl.enabled }} 66 | value: http://{{ $fullName }}-controller:{{ .Values.controller.port }} 67 | {{- else }} 68 | value: https://{{ $fullName }}-controller:{{ .Values.controller.ssl.port }} 69 | - name: LS_USER_CERTIFICATE 70 | valueFrom: 71 | secretKeyRef: 72 | key: tls.crt 73 | name: {{ $fullName }}-client-tls 74 | - name: LS_USER_KEY 75 | valueFrom: 76 | secretKeyRef: 77 | key: tls.key 78 | name: {{ $fullName }}-client-tls 79 | - name: LS_ROOT_CA 80 | valueFrom: 81 | secretKeyRef: 82 | key: ca.crt 83 | name: {{ $fullName }}-client-tls 84 | {{- end }} 85 | ports: 86 | - name: extender 87 | containerPort: 8099 88 | - name: webhook 89 | containerPort: 443 90 | serviceAccountName: {{ $fullName }}-stork 91 | priorityClassName: system-node-critical 92 | {{- with .Values.stork.nodeSelector }} 93 | nodeSelector: 94 | {{- toYaml . | nindent 8 }} 95 | {{- end }} 96 | {{- with .Values.stork.tolerations }} 97 | tolerations: 98 | {{- toYaml . | nindent 6 }} 99 | {{- end }} 100 | {{- end }} 101 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/ha-controller-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.haController.enabled }} 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | labels: 8 | app: {{ $fullName }}-ha-controller 9 | name: {{ $fullName }}-ha-controller 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | replicas: {{ .Values.haController.replicaCount }} 13 | selector: 14 | matchLabels: 15 | app: {{ $fullName }}-ha-controller 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ $fullName }}-ha-controller 20 | spec: 21 | imagePullSecrets: 22 | {{- toYaml .Values.haController.image.pullSecrets | nindent 8 }} 23 | {{- if or .Values.haController.affinity .Values.haController.podAntiAffinity }} 24 | affinity: 25 | {{- with .Values.haController.affinity }} 26 | {{- toYaml . | nindent 8 }} 27 | {{- end }} 28 | {{- if eq .Values.haController.podAntiAffinity "hard" }} 29 | podAntiAffinity: 30 | requiredDuringSchedulingIgnoredDuringExecution: 31 | - topologyKey: "{{ .Values.haController.podAntiAffinityTopologyKey }}" 32 | labelSelector: 33 | matchLabels: 34 | app: {{ $fullName }}-controller 35 | {{- else if eq .Values.haController.podAntiAffinity "soft" }} 36 | podAntiAffinity: 37 | preferredDuringSchedulingIgnoredDuringExecution: 38 | - weight: 1 39 | podAffinityTerm: 40 | topologyKey: "{{ .Values.haController.podAntiAffinityTopologyKey }}" 41 | labelSelector: 42 | matchLabels: 43 | app: {{ $fullName }}-controller 44 | {{- end }} 45 | {{- end }} 46 | containers: 47 | - name: linstor-ha-controller 48 | {{- with .Values.haController.image }} 49 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 50 | imagePullPolicy: {{ .pullPolicy }} 51 | {{- end }} 52 | command: 53 | - /piraeus-ha-controller 54 | - --leader-election=true 55 | - --leader-election-lease-name=$(NAME) 56 | - --leader-election-namespace=$(NAMESPACE) 57 | - --leader-election-resource-name={{ $fullName }}-ha-controller 58 | - --v=5 59 | livenessProbe: 60 | httpGet: 61 | path: /healthz 62 | port: 8080 63 | scheme: HTTP 64 | env: 65 | - name: LS_CONTROLLERS 66 | {{- if not .Values.controller.ssl.enabled }} 67 | value: http://{{ $fullName }}-controller:{{ .Values.controller.port }} 68 | {{- else }} 69 | value: https://{{ $fullName }}-controller:{{ .Values.controller.ssl.port }} 70 | - name: LS_USER_CERTIFICATE 71 | valueFrom: 72 | secretKeyRef: 73 | key: tls.crt 74 | name: {{ $fullName }}-client-tls 75 | - name: LS_USER_KEY 76 | valueFrom: 77 | secretKeyRef: 78 | key: tls.key 79 | name: {{ $fullName }}-client-tls 80 | - name: LS_ROOT_CA 81 | valueFrom: 82 | secretKeyRef: 83 | key: ca.crt 84 | name: {{ $fullName }}-client-tls 85 | {{- end }} 86 | - name: NAME 87 | valueFrom: 88 | fieldRef: 89 | fieldPath: metadata.name 90 | - name: NAMESPACE 91 | valueFrom: 92 | fieldRef: 93 | fieldPath: metadata.namespace 94 | serviceAccountName: {{ $fullName }}-ha-controller 95 | priorityClassName: system-node-critical 96 | {{- with .Values.haController.nodeSelector }} 97 | nodeSelector: 98 | {{- toYaml . | nindent 8 }} 99 | {{- end }} 100 | {{- with .Values.haController.tolerations }} 101 | tolerations: 102 | {{- toYaml . | nindent 6 }} 103 | {{- end }} 104 | {{- end }} 105 | -------------------------------------------------------------------------------- /dockerfiles/linstor-satellite/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:buster as builder 2 | 3 | ARG VERSION=1.14.0 4 | 5 | ENV DEBIAN_FRONTEND noninteractive 6 | RUN apt-get update \ 7 | && apt-get -y upgrade \ 8 | && apt-get -y install build-essential git default-jdk-headless dh-systemd gradle python3-all 9 | 10 | RUN git clone https://github.com/LINBIT/linstor-server.git /linstor-server 11 | WORKDIR /linstor-server 12 | RUN git checkout v${VERSION} 13 | 14 | RUN make debrelease \ 15 | && rm -rf /root/.gradle/caches/ \ 16 | && mv linstor-server-${VERSION}.tar.gz /linstor-server_${VERSION}.orig.tar.gz \ 17 | && tar -C / -xvf /linstor-server_${VERSION}.orig.tar.gz 18 | 19 | WORKDIR /linstor-server-${VERSION} 20 | RUN dpkg-buildpackage -us -uc 21 | 22 | # ------------------------------------------------------------------------------ 23 | 24 | FROM debian:buster as utils-builder 25 | 26 | ARG UTILS_VERSION=9.18.2 27 | 28 | ENV DEBIAN_FRONTEND noninteractive 29 | RUN apt-get update \ 30 | && apt-get -y upgrade \ 31 | && apt-get -y install build-essential debhelper git flex clitest xsltproc locales libxml2-utils po4a bash-completion dh-systemd docbook-xsl udev asciidoctor 32 | 33 | RUN git clone --recurse-submodules https://github.com/LINBIT/drbd-utils /drbd-utils 34 | WORKDIR /drbd-utils 35 | RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ 36 | locale-gen 37 | ENV LANG en_US.UTF-8 38 | ENV LANGUAGE en_US:en 39 | ENV LC_ALL en_US.UTF-8 40 | RUN git checkout v${UTILS_VERSION} \ 41 | && git submodule update --init --force --checkout \ 42 | && install /dev/null /usr/local/bin/lbvers.py \ 43 | && ./autogen.sh \ 44 | && ./configure \ 45 | && make debrelease VERSION=${UTILS_VERSION} \ 46 | && mv drbd-utils-${UTILS_VERSION}.tar.gz ../drbd-utils_${UTILS_VERSION}.orig.tar.gz \ 47 | && tar -C / -xvf ../drbd-utils_${UTILS_VERSION}.orig.tar.gz 48 | WORKDIR /drbd-utils-${UTILS_VERSION} 49 | RUN dpkg-buildpackage -us -uc 50 | 51 | ARG THIN_SEND_RECV_VERSION=0.24 52 | RUN git clone --recurse-submodules https://github.com/LINBIT/thin-send-recv /thin-send-recv 53 | WORKDIR /thin-send-recv 54 | RUN git checkout v${THIN_SEND_RECV_VERSION} \ 55 | && make debrelease \ 56 | && mv thin-send-recv-${THIN_SEND_RECV_VERSION}.tar.gz ../thin-send-recv_${THIN_SEND_RECV_VERSION}.orig.tar.gz \ 57 | && tar -C / -xvf ../thin-send-recv_${THIN_SEND_RECV_VERSION}.orig.tar.gz 58 | WORKDIR /thin-send-recv-${THIN_SEND_RECV_VERSION} 59 | RUN dpkg-buildpackage -us -uc 60 | 61 | # ------------------------------------------------------------------------------ 62 | 63 | FROM debian:buster as reactor-builder 64 | 65 | ARG REACTOR_VERSION=0.4.3 66 | 67 | ENV DEBIAN_FRONTEND noninteractive 68 | RUN apt-get update \ 69 | && apt-get -y upgrade \ 70 | && apt-get -y install build-essential debhelper git cargo rustc dh-python 71 | 72 | RUN git clone --recurse-submodules https://github.com/LINBIT/drbd-reactor /drbd-reactor 73 | WORKDIR /drbd-reactor 74 | RUN git checkout v${REACTOR_VERSION} \ 75 | && install /dev/null /usr/local/bin/lbvers.py \ 76 | && make debrelease VERSION=${REACTOR_VERSION} \ 77 | && mv drbd-reactor-${REACTOR_VERSION}.tar.gz ../drbd-reactor_${REACTOR_VERSION}.orig.tar.gz \ 78 | && tar -C / -xvf ../drbd-reactor_${REACTOR_VERSION}.orig.tar.gz 79 | WORKDIR /drbd-reactor-${REACTOR_VERSION} 80 | RUN dpkg-buildpackage -us -uc 81 | 82 | # ------------------------------------------------------------------------------ 83 | 84 | FROM debian:buster 85 | 86 | COPY --from=builder /linstor-common_*_all.deb /linstor-satellite_*_all.deb /packages/ 87 | COPY --from=utils-builder /python-linstor_*_all.deb /drbd-utils_*_amd64.deb /thin-send-recv_*_amd64.deb /packages/ 88 | COPY --from=reactor-builder /drbd-reactor_*_amd64.deb /packages/ 89 | 90 | # Install repos and system upgrade 91 | ENV DEBIAN_FRONTEND noninteractive 92 | RUN apt-get -y update \ 93 | && apt-get -y upgrade \ 94 | && apt-get clean \ 95 | && rm -rf /var/lib/apt/lists/* 96 | 97 | # Install linstor-satellite 98 | RUN apt-get update \ 99 | && apt-get install -y default-jre-headless thin-provisioning-tools python3-toml \ 100 | && dpkg -i packages/*.deb \ 101 | && sed -i "s|'$| \"-Djdk.tls.acknowledgeCloseNotify=true\"'|" \ 102 | /usr/share/linstor-server/bin/Satellite \ 103 | && apt-get clean \ 104 | && rm -rf /var/lib/apt/lists/* \ 105 | && mkdir -p /config /logs 106 | 107 | # Install additional tools 108 | RUN sed -i '/^deb / s/main/main contrib/' /etc/apt/sources.list \ 109 | && apt-get update \ 110 | && apt-get install --no-install-recommends -y zfsutils-linux cryptsetup nvme-cli zstd socat curl \ 111 | && apt-get download lvm2 \ 112 | && dpkg --unpack lvm2*.deb \ 113 | && rm -f lvm2*.deb /var/lib/dpkg/info/lvm2.postinst \ 114 | && apt-get -fy install \ 115 | && apt-get clean \ 116 | && rm -rf /var/lib/apt/lists/* \ 117 | && sed -i /etc/lvm/lvm.conf \ 118 | -e "s%#\? \?\(use_lvmetad *=\).*%\1 0%" \ 119 | -e "s%#\? \?\(error_when_full *=\).*%\1 1%" \ 120 | -e "s%#\? \?\(global_filter *=\).*%\1 [ \"r|/dev/drbd.*|\", \"r|/dev/dm-.*|\", \"r|/dev/zd.*|\" ]%" 121 | 122 | ENTRYPOINT [ "/usr/share/linstor-server/bin/Satellite", "--logs=/logs", "--config-directory=/config" ] 123 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/csi-node-daemonset.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.csi.enabled }} 3 | --- 4 | apiVersion: apps/v1 5 | kind: DaemonSet 6 | metadata: 7 | name: {{ $fullName }}-csi-node 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: {{ $fullName }}-csi-node 12 | role: linstor-csi 13 | template: 14 | metadata: 15 | labels: 16 | app: {{ $fullName }}-csi-node 17 | role: linstor-csi 18 | spec: 19 | containers: 20 | - name: csi-node-driver-registrar 21 | {{- with .Values.csi.image.csiNodeDriverRegistrar }} 22 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 23 | imagePullPolicy: {{ .pullPolicy }} 24 | {{- end }} 25 | args: 26 | - --v=5 27 | # No --timeout here, it's a very recent addition and not very useful for a single call that should return 28 | # static information 29 | - --csi-address=$(ADDRESS) 30 | - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) 31 | env: 32 | - name: ADDRESS 33 | value: /csi/csi.sock 34 | - name: DRIVER_REG_SOCK_PATH 35 | value: /var/lib/kubelet/plugins/linstor.csi.linbit.com/csi.sock 36 | - name: KUBE_NODE_NAME 37 | valueFrom: 38 | fieldRef: 39 | fieldPath: spec.nodeName 40 | lifecycle: 41 | preStop: 42 | exec: 43 | command: 44 | - /bin/sh 45 | - -c 46 | - rm -rf /registration/linstor.csi.linbit.com /registration/linstor.csi.linbit.com-reg.sock 47 | volumeMounts: 48 | - mountPath: /csi/ 49 | name: plugin-dir 50 | - mountPath: /registration/ 51 | name: registration-dir 52 | - name: csi-livenessprobe 53 | {{- with .Values.csi.image.csiLivenessProbe }} 54 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 55 | imagePullPolicy: {{ .pullPolicy }} 56 | {{- end }} 57 | args: 58 | - --csi-address=$(CSI_ENDPOINT) 59 | env: 60 | - name: CSI_ENDPOINT 61 | value: unix:///csi/csi.sock 62 | volumeMounts: 63 | - mountPath: /csi/ 64 | name: plugin-dir 65 | - name: linstor-csi-plugin 66 | {{- with .Values.csi.image.linstorCsiPlugin }} 67 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 68 | imagePullPolicy: {{ .pullPolicy }} 69 | {{- end }} 70 | args: 71 | - --csi-endpoint=$(CSI_ENDPOINT) 72 | - --node=$(KUBE_NODE_NAME) 73 | - --linstor-endpoint=$(LS_CONTROLLERS) 74 | - --log-level=debug 75 | livenessProbe: 76 | httpGet: 77 | path: /healthz 78 | port: 9808 79 | env: 80 | - name: CSI_ENDPOINT 81 | value: unix:///csi/csi.sock 82 | - name: KUBE_NODE_NAME 83 | valueFrom: 84 | fieldRef: 85 | fieldPath: spec.nodeName 86 | - name: LS_CONTROLLERS 87 | {{- if not .Values.controller.ssl.enabled }} 88 | value: http://{{ $fullName }}-controller:{{ .Values.controller.port }} 89 | {{- else }} 90 | value: https://{{ $fullName }}-controller:{{ .Values.controller.ssl.port }} 91 | - name: LS_USER_CERTIFICATE 92 | valueFrom: 93 | secretKeyRef: 94 | key: tls.crt 95 | name: {{ $fullName }}-client-tls 96 | - name: LS_USER_KEY 97 | valueFrom: 98 | secretKeyRef: 99 | key: tls.key 100 | name: {{ $fullName }}-client-tls 101 | - name: LS_ROOT_CA 102 | valueFrom: 103 | secretKeyRef: 104 | key: ca.crt 105 | name: {{ $fullName }}-client-tls 106 | {{- end }} 107 | securityContext: 108 | allowPrivilegeEscalation: true 109 | capabilities: 110 | add: 111 | - SYS_ADMIN 112 | privileged: true 113 | volumeMounts: 114 | - mountPath: /csi 115 | name: plugin-dir 116 | - mountPath: /var/lib/kubelet 117 | mountPropagation: Bidirectional 118 | name: pods-mount-dir 119 | - mountPath: /dev 120 | name: device-dir 121 | imagePullSecrets: 122 | {{- toYaml .Values.csi.image.pullSecrets | nindent 8 }} 123 | {{- with . }} 124 | nodeSelector: 125 | {{- toYaml .Values.csi.node.nodeSelector | nindent 8 }} 126 | {{- end }} 127 | {{- with .Values.csi.node.tolerations }} 128 | tolerations: 129 | {{- toYaml . | nindent 6 }} 130 | {{- end }} 131 | priorityClassName: system-node-critical 132 | serviceAccount: {{ $fullName }}-csi-node-sa 133 | volumes: 134 | - name: registration-dir 135 | hostPath: 136 | path: /var/lib/kubelet/plugins_registry/ 137 | type: DirectoryOrCreate 138 | - name: plugin-dir 139 | hostPath: 140 | path: /var/lib/kubelet/plugins/linstor.csi.linbit.com/ 141 | type: DirectoryOrCreate 142 | - name: pods-mount-dir 143 | hostPath: 144 | path: /var/lib/kubelet 145 | type: Directory 146 | - name: device-dir 147 | hostPath: 148 | path: /dev 149 | {{- end }} 150 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/csi-controller-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.csi.enabled }} 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | name: {{ $fullName }}-csi-controller 8 | spec: 9 | replicas: {{ .Values.csi.controller.replicaCount }} 10 | selector: 11 | matchLabels: 12 | app: {{ $fullName }}-csi-controller 13 | role: linstor-csi 14 | template: 15 | metadata: 16 | labels: 17 | app: {{ $fullName }}-csi-controller 18 | role: linstor-csi 19 | spec: 20 | containers: 21 | - name: csi-provisioner 22 | {{- with .Values.csi.image.csiProvisioner }} 23 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 24 | imagePullPolicy: {{ .pullPolicy }} 25 | {{- end }} 26 | args: 27 | - --csi-address=$(ADDRESS) 28 | - --v=5 29 | - --timeout=1m 30 | # restore old default fstype 31 | - --default-fstype=ext4 32 | - --feature-gates=Topology={{ .Values.csi.controller.csiProvisioner.topology }} 33 | - --leader-election=true 34 | - --leader-election-namespace=$(NAMESPACE) 35 | env: 36 | - name: ADDRESS 37 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 38 | - name: NAMESPACE 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.namespace 42 | volumeMounts: 43 | - mountPath: /var/lib/csi/sockets/pluginproxy/ 44 | name: socket-dir 45 | - name: csi-attacher 46 | {{- with .Values.csi.image.csiAttacher }} 47 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 48 | imagePullPolicy: {{ .pullPolicy }} 49 | {{- end }} 50 | args: 51 | - --v=5 52 | - --csi-address=$(ADDRESS) 53 | - --timeout=1m 54 | - --leader-election=true 55 | - --leader-election-namespace=$(NAMESPACE) 56 | env: 57 | - name: ADDRESS 58 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 59 | - name: NAMESPACE 60 | valueFrom: 61 | fieldRef: 62 | fieldPath: metadata.namespace 63 | volumeMounts: 64 | - mountPath: /var/lib/csi/sockets/pluginproxy/ 65 | name: socket-dir 66 | - name: csi-resizer 67 | {{- with .Values.csi.image.csiResizer }} 68 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 69 | imagePullPolicy: {{ .pullPolicy }} 70 | {{- end }} 71 | args: 72 | - --v=5 73 | - --csi-address=$(ADDRESS) 74 | - --timeout=1m 75 | # LINSTOR can resize while in use, no need to check if volume is in use 76 | - --handle-volume-inuse-error=false 77 | - --leader-election=true 78 | - --leader-election-namespace=$(NAMESPACE) 79 | env: 80 | - name: ADDRESS 81 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 82 | - name: NAMESPACE 83 | valueFrom: 84 | fieldRef: 85 | fieldPath: metadata.namespace 86 | volumeMounts: 87 | - mountPath: /var/lib/csi/sockets/pluginproxy/ 88 | name: socket-dir 89 | - name: csi-snapshotter 90 | {{- with .Values.csi.image.csiSnapshotter }} 91 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 92 | imagePullPolicy: {{ .pullPolicy }} 93 | {{- end }} 94 | args: 95 | - --timeout=1m 96 | - --csi-address=$(ADDRESS) 97 | - --leader-election=true 98 | - --leader-election-namespace=$(NAMESPACE) 99 | env: 100 | - name: ADDRESS 101 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 102 | - name: NAMESPACE 103 | valueFrom: 104 | fieldRef: 105 | fieldPath: metadata.namespace 106 | volumeMounts: 107 | - mountPath: /var/lib/csi/sockets/pluginproxy/ 108 | name: socket-dir 109 | - name: csi-livenessprobe 110 | {{- with .Values.csi.image.csiLivenessProbe }} 111 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 112 | imagePullPolicy: {{ .pullPolicy }} 113 | {{- end }} 114 | args: 115 | - --csi-address=$(CSI_ENDPOINT) 116 | env: 117 | - name: CSI_ENDPOINT 118 | value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock 119 | volumeMounts: 120 | - name: socket-dir 121 | mountPath: /var/lib/csi/sockets/pluginproxy/ 122 | - name: linstor-csi-plugin 123 | {{- with .Values.csi.image.linstorCsiPlugin }} 124 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 125 | imagePullPolicy: {{ .pullPolicy }} 126 | {{- end }} 127 | args: 128 | - --csi-endpoint=$(CSI_ENDPOINT) 129 | - --node=$(KUBE_NODE_NAME) 130 | - --linstor-endpoint=$(LS_CONTROLLERS) 131 | - --log-level=debug 132 | livenessProbe: 133 | httpGet: 134 | path: /healthz 135 | port: 9808 136 | env: 137 | - name: CSI_ENDPOINT 138 | value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock 139 | - name: KUBE_NODE_NAME 140 | valueFrom: 141 | fieldRef: 142 | fieldPath: spec.nodeName 143 | - name: LS_CONTROLLERS 144 | {{- if not .Values.controller.ssl.enabled }} 145 | value: http://{{ $fullName }}-controller:{{ .Values.controller.port }} 146 | {{- else }} 147 | value: https://{{ $fullName }}-controller:{{ .Values.controller.ssl.port }} 148 | - name: LS_USER_CERTIFICATE 149 | valueFrom: 150 | secretKeyRef: 151 | key: tls.crt 152 | name: {{ $fullName }}-client-tls 153 | - name: LS_USER_KEY 154 | valueFrom: 155 | secretKeyRef: 156 | key: tls.key 157 | name: {{ $fullName }}-client-tls 158 | - name: LS_ROOT_CA 159 | valueFrom: 160 | secretKeyRef: 161 | key: ca.crt 162 | name: {{ $fullName }}-client-tls 163 | {{- end }} 164 | volumeMounts: 165 | - name: socket-dir 166 | mountPath: /var/lib/csi/sockets/pluginproxy/ 167 | imagePullSecrets: 168 | {{- toYaml .Values.csi.image.pullSecrets | nindent 8 }} 169 | {{- if or .Values.csi.controller.affinity .Values.csi.controller.podAntiAffinity }} 170 | affinity: 171 | {{- with .Values.csi.controller.affinity }} 172 | {{- toYaml . | nindent 8 }} 173 | {{- end }} 174 | {{- if eq .Values.csi.controller.podAntiAffinity "hard" }} 175 | podAntiAffinity: 176 | requiredDuringSchedulingIgnoredDuringExecution: 177 | - topologyKey: "{{ .Values.csi.controller.podAntiAffinityTopologyKey }}" 178 | labelSelector: 179 | matchLabels: 180 | app: {{ $fullName }}-controller 181 | {{- else if eq .Values.csi.controller.podAntiAffinity "soft" }} 182 | podAntiAffinity: 183 | preferredDuringSchedulingIgnoredDuringExecution: 184 | - weight: 1 185 | podAffinityTerm: 186 | topologyKey: "{{ .Values.csi.controller.podAntiAffinityTopologyKey }}" 187 | labelSelector: 188 | matchLabels: 189 | app: {{ $fullName }}-controller 190 | {{- end }} 191 | {{- end }} 192 | {{- with .Values.csi.controller.nodeSelector }} 193 | nodeSelector: 194 | {{- toYaml . | nindent 8 }} 195 | {{- end }} 196 | {{- with .Values.csi.controller.tolerations }} 197 | tolerations: 198 | {{- toYaml . | nindent 6 }} 199 | {{- end }} 200 | priorityClassName: system-cluster-critical 201 | serviceAccount: {{ $fullName }}-csi-controller-sa 202 | volumes: 203 | - emptyDir: {} 204 | name: socket-dir 205 | {{- end }} 206 | -------------------------------------------------------------------------------- /helm/kube-linstor/scripts/functions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | curl_and_log() { 4 | echo "request: $curl $@" | awk '{print " " $0}' 5 | $curl -o - -w "\n%{http_code}\n" "$@" | awk '{l[NR] = $0} END {printf " response: "; for (i=1; i<=NR-1; i++) print l[i]}; END{printf "\n status: " $0 ; if ($0<200||$0>299) {print " (error)"; exit 1} else print " (ok)"}' 6 | } 7 | 8 | load_controller_params() { 9 | echo "Loading controller parameters" 10 | if [ -z "$LS_CONTROLLERS" ]; then 11 | echo "Variable LS_CONTROLLERS is not set!" 12 | exit 1 13 | fi 14 | curl="curl -sS -H Content-Type:application/json" 15 | if [ -f /tls/client/ca.crt ]; then 16 | curl="$curl --cacert /tls/client/ca.crt" 17 | fi 18 | if [ -f /tls/client/tls.crt ] && [ /tls/client/tls.key ]; then 19 | curl="$curl --cert /tls/client/tls.crt --key /tls/client/tls.key" 20 | fi 21 | controller_port=$(echo "$LS_CONTROLLERS" | awk -F'[/:]+' '{print $NF}') 22 | controller_address=$(echo "$LS_CONTROLLERS" | awk -F'[/:]+' '{print $(NF-1)}') 23 | } 24 | 25 | load_satellite_params() { 26 | echo "Loading satellite parameters" 27 | case "" in 28 | $NODE_NAME) 29 | echo "Variable NODE_NAME is not set!" 30 | exit 1 31 | ;; 32 | $NODE_IP) 33 | echo "Variable NODE_IP is not set!" 34 | exit 1 35 | ;; 36 | esac 37 | satellite_config=/config/linstor_satellite.toml 38 | satellite_encryption_type=${NODE_ENCRYPTION_TYPE:-$(awk -F= '$1 == " type" {gsub("\"","",$2); print $2}' "$satellite_config")} 39 | satellite_port=${NODE_PORT:-$(awk -F= '$1 == " port" {gsub("\"","",$2); print $2}' "$satellite_config")} 40 | satellite_encryption_type=${satellite_encryption_type:-Plain} 41 | satellite_port=${satellite_port:-3366} 42 | } 43 | 44 | wait_tcp_port(){ 45 | until printf "" 2>/dev/null >"/dev/tcp/$1/$2"; do 46 | sleep 1 47 | done 48 | } 49 | 50 | wait_satellite(){ 51 | echo "Waiting linstor-satellite to launch on localhost:$satellite_port..." 52 | wait_tcp_port localhost "$satellite_port" 53 | echo "Service linstor-satellite launched" 54 | } 55 | 56 | wait_controller(){ 57 | echo "Waiting linstor-controller to launch on $controller_address:$controller_port..." 58 | wait_tcp_port "$controller_address" "$controller_port" 59 | echo "Service linstor-controller launched" 60 | } 61 | 62 | configure_controller_props(){ 63 | echo "Setting controller properties..." 64 | curl_and_log -X POST -d "{\"override_props\": $1}" "$LS_CONTROLLERS/v1/controller/properties" 65 | echo 66 | } 67 | 68 | configure_resource_group(){ 69 | local rg_name=$1 70 | local rg_selectfilter_json=$2 71 | local rg_props_json=$3 72 | 73 | local rg_json="$(cat </dev/null; then 84 | echo "Resource-group $rg_name already exists, updating..." 85 | curl_and_log -X PUT -d "{\"select_filter\": $rg_selectfilter_json, \"override_props\": $rg_props_json}" "$LS_CONTROLLERS/v1/resource-groups/$rg_name" 86 | else 87 | echo "Resource-group $rg_name does not exists, adding..." 88 | curl_and_log -X POST -d "$rg_json" "$LS_CONTROLLERS/v1/resource-groups" 89 | fi 90 | echo 91 | } 92 | 93 | configure_volume_group(){ 94 | local rg_name=$1 95 | local vg_number=$2 96 | local vg_props_json=$3 97 | 98 | local vg_json="$(cat </dev/null; then 108 | echo "Volume-group $vg_number already exists for resource-group $rg_name, updating..." 109 | curl_and_log -X PUT -d "{\"override_props\": $vg_props_json}" "$LS_CONTROLLERS/v1/resource-groups/$rg_name/volume-groups/$vg_number" 110 | else 111 | echo "Volume-group $vg_number does not exists for resource-group $rg_name, adding..." 112 | curl_and_log -X POST -d "$vg_json" "$LS_CONTROLLERS/v1/resource-groups/$rg_name/volume-groups" 113 | fi 114 | echo 115 | } 116 | 117 | register_node(){ 118 | echo "Checking if node $NODE_NAME exists in cluster" 119 | if $curl -f "$LS_CONTROLLERS/v1/nodes/${NODE_NAME}" >/dev/null; then 120 | echo "Node $NODE_NAME already exists in cluster, skip adding..." 121 | return 0 122 | fi 123 | echo "Node $NODE_NAME does not exists in cluster" 124 | 125 | echo "Adding node $NODE_NAME to the cluster" 126 | node_json="$(cat </dev/null; then 171 | echo "Interface $interface_name already exists on node $NODE_NAME, updating..." 172 | curl_and_log -X PUT -d "$interface_json" "$LS_CONTROLLERS/v1/nodes/${NODE_NAME}/net-interfaces/$interface_name" 173 | else 174 | echo "Interface $interface_name does not exists on node $NODE_NAME, adding..." 175 | curl_and_log -X POST -d "$interface_json" "$LS_CONTROLLERS/v1/nodes/${NODE_NAME}/net-interfaces" 176 | fi 177 | echo 178 | } 179 | 180 | configure_node_props(){ 181 | echo "Setting node properties for $NODE_NAME..." 182 | curl_and_log -X PUT -d "{\"override_props\": $1}" "$LS_CONTROLLERS/v1/nodes/${NODE_NAME}" 183 | echo 184 | } 185 | 186 | configure_storage_pool(){ 187 | local sp_name=$1 188 | local sp_provider=$2 189 | local sp_props_json=$3 190 | 191 | local sp_json="$(cat </dev/null; then 203 | echo "Storage-pool $sp_name already exists on node $NODE_NAME, updating..." 204 | curl_and_log -X PUT -d "{\"override_props\": $sp_props_json}" "$LS_CONTROLLERS/v1/nodes/${NODE_NAME}/storage-pools/$sp_name" 205 | else 206 | echo "Storage-pool $sp_name does not exists on node $NODE_NAME, adding..." 207 | curl_and_log -X POST -d "$sp_json" "$LS_CONTROLLERS/v1/nodes/${NODE_NAME}/storage-pools" 208 | fi 209 | echo 210 | } 211 | 212 | check_lvm_pool(){ 213 | if vgs "$1" >/dev/null 2>&1; then 214 | echo "Volume group $1 found on node $NODE_NAME" 215 | return 0 216 | else 217 | echo "Volume group $1 didn't found on node $NODE_NAME" 218 | return 1 219 | fi 220 | } 221 | 222 | check_lvmthin_pool(){ 223 | if lvs "$1" >/dev/null 2>&1; then 224 | echo "Logical volume $1 found on node $NODE_NAME" 225 | return 0 226 | else 227 | echo "Logical volume $1 didn't found on node $NODE_NAME" 228 | return 1 229 | fi 230 | } 231 | 232 | check_zfs_pool(){ 233 | if zfs list "$1" >/dev/null 2>&1; then 234 | echo "ZFS dataset $1 found on node $NODE_NAME" 235 | return 0 236 | else 237 | echo "zfs dataset $1 didn't found on node $NODE_NAME" 238 | return 1 239 | fi 240 | } 241 | 242 | finish(){ 243 | echo "Configuration has been successfully finished" 244 | exec sleep infinity 245 | } 246 | 247 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/csi-controller-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.csi.enabled }} 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: ClusterRole 6 | metadata: 7 | name: {{ $fullName }}-csi-attacher-role 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["persistentvolumes"] 11 | verbs: ["get", "list", "watch", "patch"] 12 | - apiGroups: ["storage.k8s.io"] 13 | resources: ["csinodes"] 14 | verbs: ["get", "list", "watch"] 15 | - apiGroups: ["storage.k8s.io"] 16 | resources: ["volumeattachments"] 17 | verbs: ["get", "list", "watch", "patch"] 18 | - apiGroups: ["storage.k8s.io"] 19 | resources: ["volumeattachments/status"] 20 | verbs: ["patch"] 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: Role 24 | metadata: 25 | name: {{ $fullName }}-csi-attacher-role 26 | rules: 27 | - apiGroups: ["coordination.k8s.io"] 28 | resources: ["leases"] 29 | verbs: ["get", "watch", "list", "delete", "update", "create"] 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRole 33 | metadata: 34 | name: {{ $fullName }}-csi-provisioner-role 35 | rules: 36 | - apiGroups: [""] 37 | resources: ["persistentvolumes"] 38 | verbs: ["get", "list", "watch", "create", "delete"] 39 | - apiGroups: [""] 40 | resources: ["persistentvolumeclaims"] 41 | verbs: ["get", "list", "watch", "update"] 42 | - apiGroups: ["storage.k8s.io"] 43 | resources: ["storageclasses"] 44 | verbs: ["get", "list", "watch"] 45 | - apiGroups: [""] 46 | resources: ["events"] 47 | verbs: ["list", "watch", "create", "update", "patch"] 48 | - apiGroups: ["snapshot.storage.k8s.io"] 49 | resources: ["volumesnapshots"] 50 | verbs: ["get", "list"] 51 | - apiGroups: ["snapshot.storage.k8s.io"] 52 | resources: ["volumesnapshotcontents"] 53 | verbs: ["get", "list"] 54 | - apiGroups: ["storage.k8s.io"] 55 | resources: ["csinodes"] 56 | verbs: ["get", "list", "watch"] 57 | - apiGroups: [""] 58 | resources: ["nodes"] 59 | verbs: ["get", "list", "watch"] 60 | - apiGroups: ["storage.k8s.io"] 61 | resources: ["volumeattachments"] 62 | verbs: ["get", "list", "watch"] 63 | --- 64 | apiVersion: rbac.authorization.k8s.io/v1 65 | kind: Role 66 | metadata: 67 | name: {{ $fullName }}-csi-provisioner-role 68 | rules: 69 | - apiGroups: ["coordination.k8s.io"] 70 | resources: ["leases"] 71 | verbs: ["get", "watch", "list", "delete", "update", "create"] 72 | {{- if .Values.podSecurityPolicy.enabled }} 73 | - apiGroups: ["extensions"] 74 | resources: ["podsecuritypolicies"] 75 | resourceNames: ["{{ $fullName }}"] 76 | verbs: ["use"] 77 | {{- end }} 78 | --- 79 | apiVersion: rbac.authorization.k8s.io/v1 80 | kind: ClusterRole 81 | metadata: 82 | name: {{ $fullName }}-csi-resizer-role 83 | rules: 84 | - apiGroups: [""] 85 | resources: ["persistentvolumes"] 86 | verbs: ["get", "list", "watch", "patch"] 87 | - apiGroups: [""] 88 | resources: ["persistentvolumeclaims"] 89 | verbs: ["get", "list", "watch"] 90 | - apiGroups: [""] 91 | resources: ["pods"] 92 | verbs: ["get", "list", "watch"] 93 | - apiGroups: [""] 94 | resources: ["persistentvolumeclaims/status"] 95 | verbs: ["patch"] 96 | - apiGroups: [""] 97 | resources: ["events"] 98 | verbs: ["list", "watch", "create", "update", "patch"] 99 | --- 100 | apiVersion: rbac.authorization.k8s.io/v1 101 | kind: Role 102 | metadata: 103 | name: {{ $fullName }}-csi-resizer-role 104 | rules: 105 | - apiGroups: ["coordination.k8s.io"] 106 | resources: ["leases"] 107 | verbs: ["get", "watch", "list", "delete", "update", "create"] 108 | --- 109 | kind: ClusterRole 110 | apiVersion: rbac.authorization.k8s.io/v1 111 | metadata: 112 | name: {{ $fullName }}-csi-snapshotter-role 113 | rules: 114 | - apiGroups: [""] 115 | resources: ["persistentvolumes"] 116 | verbs: ["get", "list", "watch"] 117 | - apiGroups: [""] 118 | resources: ["persistentvolumeclaims"] 119 | verbs: ["get", "list", "watch", "update"] 120 | - apiGroups: ["storage.k8s.io"] 121 | resources: ["storageclasses"] 122 | verbs: ["get", "list", "watch"] 123 | - apiGroups: [""] 124 | resources: ["events"] 125 | verbs: ["list", "watch", "create", "update", "patch"] 126 | - apiGroups: ["snapshot.storage.k8s.io"] 127 | resources: ["volumesnapshotclasses"] 128 | verbs: ["get", "list", "watch"] 129 | - apiGroups: ["snapshot.storage.k8s.io"] 130 | resources: ["volumesnapshotcontents"] 131 | verbs: ["create", "get", "list", "watch", "update", "delete"] 132 | - apiGroups: ["snapshot.storage.k8s.io"] 133 | resources: ["volumesnapshotcontents/status"] 134 | verbs: ["update"] 135 | - apiGroups: ["snapshot.storage.k8s.io"] 136 | resources: ["volumesnapshots"] 137 | verbs: ["get", "list", "watch", "update"] 138 | - apiGroups: ["apiextensions.k8s.io"] 139 | resources: ["customresourcedefinitions"] 140 | verbs: ["create", "list", "watch", "delete"] 141 | - apiGroups: ["snapshot.storage.k8s.io"] 142 | resources: ["volumesnapshots/status"] 143 | verbs: ["update"] 144 | --- 145 | apiVersion: rbac.authorization.k8s.io/v1 146 | kind: Role 147 | metadata: 148 | name: {{ $fullName }}-csi-snapshotter-role 149 | rules: 150 | - apiGroups: ["coordination.k8s.io"] 151 | resources: ["leases"] 152 | verbs: ["get", "watch", "list", "delete", "update", "create"] 153 | --- 154 | apiVersion: rbac.authorization.k8s.io/v1 155 | kind: ClusterRoleBinding 156 | metadata: 157 | name: {{ $fullName }}-csi-attacher-binding 158 | roleRef: 159 | apiGroup: rbac.authorization.k8s.io 160 | kind: ClusterRole 161 | name: {{ $fullName }}-csi-attacher-role 162 | subjects: 163 | - kind: ServiceAccount 164 | name: {{ $fullName }}-csi-controller-sa 165 | namespace: {{ .Release.Namespace }} 166 | --- 167 | apiVersion: rbac.authorization.k8s.io/v1 168 | kind: RoleBinding 169 | metadata: 170 | name: {{ $fullName }}-csi-attacher-binding 171 | roleRef: 172 | apiGroup: rbac.authorization.k8s.io 173 | kind: Role 174 | name: {{ $fullName }}-csi-attacher-role 175 | subjects: 176 | - kind: ServiceAccount 177 | name: {{ $fullName }}-csi-controller-sa 178 | --- 179 | apiVersion: rbac.authorization.k8s.io/v1 180 | kind: ClusterRoleBinding 181 | metadata: 182 | name: {{ $fullName }}-csi-provisioner-binding 183 | roleRef: 184 | apiGroup: rbac.authorization.k8s.io 185 | kind: ClusterRole 186 | name: {{ $fullName }}-csi-provisioner-role 187 | subjects: 188 | - kind: ServiceAccount 189 | name: {{ $fullName }}-csi-controller-sa 190 | namespace: {{ .Release.Namespace }} 191 | --- 192 | apiVersion: rbac.authorization.k8s.io/v1 193 | kind: RoleBinding 194 | metadata: 195 | name: {{ $fullName }}-csi-provisioner-binding 196 | roleRef: 197 | apiGroup: rbac.authorization.k8s.io 198 | kind: Role 199 | name: {{ $fullName }}-csi-provisioner-role 200 | subjects: 201 | - kind: ServiceAccount 202 | name: {{ $fullName }}-csi-controller-sa 203 | --- 204 | kind: ClusterRoleBinding 205 | apiVersion: rbac.authorization.k8s.io/v1 206 | metadata: 207 | name: {{ $fullName }}-csi-resizer-binding 208 | roleRef: 209 | kind: ClusterRole 210 | name: {{ $fullName }}-csi-resizer-role 211 | apiGroup: rbac.authorization.k8s.io 212 | subjects: 213 | - kind: ServiceAccount 214 | name: {{ $fullName }}-csi-controller-sa 215 | namespace: {{ .Release.Namespace }} 216 | --- 217 | apiVersion: rbac.authorization.k8s.io/v1 218 | kind: RoleBinding 219 | metadata: 220 | name: {{ $fullName }}-csi-resizer-binding 221 | roleRef: 222 | apiGroup: rbac.authorization.k8s.io 223 | kind: Role 224 | name: {{ $fullName }}-csi-resizer-role 225 | subjects: 226 | - kind: ServiceAccount 227 | name: {{ $fullName }}-csi-controller-sa 228 | --- 229 | kind: ClusterRoleBinding 230 | apiVersion: rbac.authorization.k8s.io/v1 231 | metadata: 232 | name: {{ $fullName }}-csi-snapshotter-binding 233 | roleRef: 234 | kind: ClusterRole 235 | name: {{ $fullName }}-csi-snapshotter-role 236 | apiGroup: rbac.authorization.k8s.io 237 | subjects: 238 | - kind: ServiceAccount 239 | name: {{ $fullName }}-csi-controller-sa 240 | namespace: {{ .Release.Namespace }} 241 | --- 242 | apiVersion: rbac.authorization.k8s.io/v1 243 | kind: RoleBinding 244 | metadata: 245 | name: {{ $fullName }}-csi-snapshotter-bindin 246 | roleRef: 247 | apiGroup: rbac.authorization.k8s.io 248 | kind: Role 249 | name: {{ $fullName }}-csi-snapshotter-role 250 | subjects: 251 | - kind: ServiceAccount 252 | name: {{ $fullName }}-csi-controller-sa 253 | --- 254 | apiVersion: v1 255 | kind: ServiceAccount 256 | metadata: 257 | name: {{ $fullName }}-csi-controller-sa 258 | {{- end }} 259 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/controller-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.controller.enabled }} 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | labels: 8 | app: {{ $fullName }}-controller 9 | name: {{ $fullName }}-controller 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | replicas: {{ .Values.controller.replicaCount }} 13 | strategy: 14 | type: Recreate 15 | selector: 16 | matchLabels: 17 | app: {{ $fullName }}-controller 18 | template: 19 | metadata: 20 | labels: 21 | app: {{ $fullName }}-controller 22 | annotations: 23 | checksum/config: {{ include (print $.Template.BasePath "/controller-secret.yaml") . | sha256sum }} 24 | spec: 25 | imagePullSecrets: 26 | {{- toYaml .Values.controller.image.pullSecrets | nindent 8 }} 27 | {{- if or .Values.controller.affinity .Values.controller.podAntiAffinity }} 28 | affinity: 29 | {{- with .Values.controller.affinity }} 30 | {{- toYaml . | nindent 8 }} 31 | {{- end }} 32 | {{- if eq .Values.controller.podAntiAffinity "hard" }} 33 | podAntiAffinity: 34 | requiredDuringSchedulingIgnoredDuringExecution: 35 | - topologyKey: "{{ .Values.controller.podAntiAffinityTopologyKey }}" 36 | labelSelector: 37 | matchLabels: 38 | app: {{ $fullName }}-controller 39 | {{- else if eq .Values.controller.podAntiAffinity "soft" }} 40 | podAntiAffinity: 41 | preferredDuringSchedulingIgnoredDuringExecution: 42 | - weight: 1 43 | podAffinityTerm: 44 | topologyKey: "{{ .Values.controller.podAntiAffinityTopologyKey }}" 45 | labelSelector: 46 | matchLabels: 47 | app: {{ $fullName }}-controller 48 | {{- end }} 49 | {{- end }} 50 | {{- if or .Values.controller.ssl.enabled .Values.satellite.ssl.enabled .Values.controller.initSettings.enabled }} 51 | initContainers: 52 | {{- if or .Values.controller.ssl.enabled .Values.satellite.ssl.enabled }} 53 | - name: load-certs 54 | {{- with .Values.controller.image }} 55 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 56 | imagePullPolicy: {{ .pullPolicy }} 57 | {{- end }} 58 | command: 59 | - /bin/bash 60 | - -xec 61 | - |- 62 | tmp="$(mktemp -u).p12" 63 | openssl pkcs12 -export -in /tls/controller/tls.crt -inkey /tls/controller/tls.key -out "$tmp" -name linstor-controller -passin 'pass:linstor' -passout 'pass:linstor' 64 | rm -f /config/ssl/keystore.jks /config/ssl/certificates.jks /config/ssl/trustore_client.jks 65 | keytool -importkeystore -srcstorepass linstor -deststorepass linstor -keypass linstor -srckeystore "$tmp" -destkeystore /config/ssl/keystore.jks 66 | {{- if .Values.controller.ssl.enabled }} 67 | keytool -importcert -noprompt -deststorepass linstor -keypass linstor -file /tls/client/ca.crt -alias linstor-ca -destkeystore /config/ssl/trustore_client.jks 68 | {{- end }} 69 | {{- if .Values.satellite.ssl.enabled }} 70 | keytool -importcert -noprompt -deststorepass linstor -keypass linstor -file /tls/satellite/tls.crt -alias linstor-satellite -destkeystore /config/ssl/certificates.jks 71 | {{- end }} 72 | rm -f "$tmp" 73 | volumeMounts: 74 | - name: config 75 | mountPath: /config 76 | - name: config-ssl 77 | mountPath: /config/ssl 78 | {{- if or .Values.controller.ssl.enabled .Values.satellite.ssl.enabled }} 79 | - name: controller-tls 80 | mountPath: /tls/controller 81 | {{- end }} 82 | {{- if .Values.controller.ssl.enabled }} 83 | - name: client-tls 84 | mountPath: /tls/client 85 | {{- end }} 86 | {{- if .Values.satellite.ssl.enabled }} 87 | - name: satellite-tls 88 | mountPath: /tls/satellite 89 | {{- end }} 90 | {{- end }} 91 | {{- if .Values.controller.initSettings.enabled }} 92 | - name: init-settings 93 | {{- with .Values.controller.image }} 94 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 95 | imagePullPolicy: {{ .pullPolicy }} 96 | {{- end }} 97 | command: 98 | - /bin/bash 99 | - -xec 100 | - |- 101 | echo "setcfgval namespace(netcom) key(PlainConnector/bindaddress) value({{ .Values.controller.initSettings.plainConnectorBindAddress }}) 102 | {{- if .Values.controller.initSettings.disableUserSecurity }} 103 | setSecLvl secLvl(NO_SECURITY){{ end }} 104 | shutdown" | /usr/share/linstor-server/bin/Controller -d --logs=/logs --config-directory=/config 105 | volumeMounts: 106 | - name: config 107 | mountPath: /config 108 | - name: logs 109 | mountPath: /logs 110 | {{- end }} 111 | {{- end }} 112 | containers: 113 | - name: linstor-controller 114 | {{- with .Values.controller.image }} 115 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 116 | imagePullPolicy: {{ .pullPolicy }} 117 | {{- end }} 118 | command: 119 | - /k8s-await-election 120 | - /usr/share/linstor-server/bin/Controller 121 | - --logs=/logs 122 | - --config-directory=/config 123 | livenessProbe: 124 | httpGet: 125 | path: / 126 | port: 9999 127 | scheme: HTTP 128 | ports: 129 | - name: restapi 130 | containerPort: {{ .Values.controller.port }} 131 | {{- if .Values.controller.ssl.enabled }} 132 | - name: ssl 133 | containerPort: {{ .Values.controller.ssl.port }} 134 | {{- end }} 135 | volumeMounts: 136 | - name: config 137 | mountPath: /config 138 | - name: client-config 139 | mountPath: /etc/linstor 140 | {{- if or .Values.controller.ssl.enabled .Values.satellite.ssl.enabled }} 141 | - name: config-ssl 142 | mountPath: /config/ssl 143 | readOnly: true 144 | {{- end }} 145 | {{- if .Values.controller.ssl.enabled }} 146 | - name: client-tls 147 | mountPath: /tls 148 | {{- end }} 149 | {{- if .Values.controller.db.tls }} 150 | - name: db-tls 151 | mountPath: /tls/db 152 | {{- end }} 153 | - name: logs 154 | mountPath: /logs 155 | env: 156 | - name: K8S_AWAIT_ELECTION_ENABLED 157 | value: "1" 158 | - name: K8S_AWAIT_ELECTION_NAME 159 | value: {{ $fullName }}-controller 160 | - name: K8S_AWAIT_ELECTION_LOCK_NAME 161 | value: {{ $fullName }}-controller 162 | - name: K8S_AWAIT_ELECTION_LOCK_NAMESPACE 163 | valueFrom: 164 | fieldRef: 165 | fieldPath: metadata.namespace 166 | - name: K8S_AWAIT_ELECTION_IDENTITY 167 | valueFrom: 168 | fieldRef: 169 | apiVersion: v1 170 | fieldPath: metadata.name 171 | - name: K8S_AWAIT_ELECTION_STATUS_ENDPOINT 172 | value: :9999 173 | - name: K8S_AWAIT_ELECTION_SERVICE_NAME 174 | value: {{ $fullName }}-controller 175 | - name: K8S_AWAIT_ELECTION_SERVICE_NAMESPACE 176 | valueFrom: 177 | fieldRef: 178 | fieldPath: metadata.namespace 179 | - name: K8S_AWAIT_ELECTION_SERVICE_PORTS_JSON 180 | value: '{{ with include (print $.Template.BasePath "/controller-service.yaml") . | fromYaml }}{{ .spec.ports | toJson }}{{ end }}' 181 | - name: K8S_AWAIT_ELECTION_POD_IP 182 | valueFrom: 183 | fieldRef: 184 | fieldPath: status.podIP 185 | serviceAccountName: {{ $fullName }}-controller 186 | priorityClassName: system-node-critical 187 | {{- with .Values.controller.nodeSelector }} 188 | nodeSelector: 189 | {{- toYaml . | nindent 8 }} 190 | {{- end }} 191 | {{- with .Values.controller.tolerations }} 192 | tolerations: 193 | {{- toYaml . | nindent 6 }} 194 | {{- end }} 195 | volumes: 196 | - name: config 197 | secret: 198 | secretName: {{ $fullName }}-controller 199 | - name: logs 200 | hostPath: 201 | path: /var/log/{{ $fullName }}-controller 202 | - name: client-config 203 | configMap: 204 | name: {{ $fullName }}-client 205 | {{- if or .Values.controller.ssl.enabled .Values.satellite.ssl.enabled }} 206 | - name: config-ssl 207 | emptyDir: {} 208 | {{- end }} 209 | {{- if or .Values.controller.ssl.enabled .Values.satellite.ssl.enabled }} 210 | - name: controller-tls 211 | secret: 212 | secretName: {{ $fullName }}-controller-tls 213 | {{- end }} 214 | {{- if .Values.controller.ssl.enabled }} 215 | - name: client-tls 216 | secret: 217 | secretName: {{ $fullName }}-client-tls 218 | {{- end }} 219 | {{- if .Values.satellite.ssl.enabled }} 220 | - name: satellite-tls 221 | secret: 222 | secretName: {{ $fullName }}-satellite-tls 223 | {{- end }} 224 | {{- if .Values.controller.db.tls }} 225 | - name: db-tls 226 | secret: 227 | secretName: {{ $fullName }}-db-tls 228 | {{- end }} 229 | {{- end }} 230 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kube-Linstor 2 | 3 | Containerized Linstor Storage easy to run in your Kubernetes cluster. 4 | 5 | > :warning: **This project currently unmaintained** 6 | > 7 | > I decided join forces over creation the union solution for running LINSTOR on Kubenretes, and contribute missing features of this chart to upstream project: [piraeus-operator](https://github.com/piraeusdatastore/piraeus-operator). 8 | > 9 | > If you want to upgrade, you can easily switch between them, see https://github.com/kvaps/kube-linstor/issues/53#issuecomment-1028006151 for more details. 10 | > 11 | > Dockefiles continue maintaining in [Deckhouse](https://github.com/deckhouse/deckhouse/tree/main/modules/031-linstor/images) project. 12 | 13 | ## Images 14 | 15 | | Image | Latest Tag | 16 | |:---------------------------------|:-----------------------------------------------------------------------------------------| 17 | | **[linstor-controller]** | [![linstor-controller-version]](https://hub.docker.com/r/kvaps/linstor-controller) | 18 | | **[linstor-satellite]** | [![linstor-satellite-version]](https://hub.docker.com/r/kvaps/linstor-satellite) | 19 | | **[linstor-csi]** | [![linstor-csi-version]](https://hub.docker.com/r/kvaps/linstor-csi) | 20 | | **[linstor-stork]** | [![linstor-stork-version]](https://hub.docker.com/r/kvaps/linstor-stork) | 21 | | **[linstor-ha-controller]** | [![linstor-ha-controller-version]](https://hub.docker.com/r/kvaps/linstor-ha-controller) | 22 | 23 | [linstor-controller]: dockerfiles/linstor-controller/Dockerfile 24 | [linstor-controller-version]: https://img.shields.io/docker/v/kvaps/linstor-controller.svg?sort=semver 25 | [linstor-satellite]: dockerfiles/linstor-satellite/Dockerfile 26 | [linstor-satellite-version]: https://img.shields.io/docker/v/kvaps/linstor-satellite.svg?sort=semver 27 | [linstor-csi]: dockerfiles/linstor-csi/Dockerfile 28 | [linstor-csi-version]: https://img.shields.io/docker/v/kvaps/linstor-csi.svg?sort=semver 29 | [linstor-stork]: dockerfiles/linstor-stork/Dockerfile 30 | [linstor-stork-version]: https://img.shields.io/docker/v/kvaps/linstor-stork.svg?sort=semver 31 | [linstor-ha-controller]: dockerfiles/linstor-ha-controller/Dockerfile 32 | [linstor-ha-controller-version]: https://img.shields.io/docker/v/kvaps/linstor-ha-controller.svg?sort=semver 33 | 34 | ## Requirements 35 | 36 | * Working Kubernetes cluster (`v1.18` or higher). 37 | * DRBD9 kernel module installed on each satellite node. 38 | * PostgeSQL database / etcd or any other backing store for redundancy. 39 | * [Snapshot Controller](https://kubernetes-csi.github.io/docs/snapshot-controller.html#snapshot-controller) (optional) 40 | 41 | ## QuckStart 42 | 43 | Kube-Linstor consists of several components: 44 | 45 | * **Linstor-controller** - Controller is the main control point for Linstor. It provides an API for clients and communicates with satellites for creating and monitoring DRBD-devices. 46 | * **Linstor-satellite** - Satellites run on every node. They listen and perform controller tasks, and operate directly with LVM and ZFS subsystems. 47 | * **Linstor-csi** - CSI driver provides compatibility level for adding Linstor support for Kubernetes. 48 | * **Linstor-stork** - Stork is a scheduler extender plugin for Kubernetes which allows a storage driver to give the Kubernetes scheduler hints about where to place a new pod so that it is optimally located for storage performance. 49 | 50 | #### Preparation 51 | 52 | [Install Helm](https://helm.sh/docs/intro/). 53 | 54 | > **_NOTE:_** 55 | > Commands below provided for Helm v3 but Helm v2 is also supported. 56 | > You can use `helm template` instead of `helm install`, this is also working as well. 57 | 58 | Create `linstor` namespace. 59 | ``` 60 | kubectl create ns linstor 61 | ``` 62 | 63 | Install Helm repository: 64 | ``` 65 | helm repo add kvaps https://kvaps.github.io/charts 66 | ``` 67 | 68 | #### Database 69 | 70 | * Install [stolon](https://github.com/kvaps/stolon-chart) chart: 71 | 72 | ```bash 73 | # download example values 74 | curl -LO https://github.com/kvaps/kube-linstor/raw/v1.14.0/examples/linstor-db.yaml 75 | 76 | # install release 77 | helm install linstor-db kvaps/stolon \ 78 | --namespace linstor \ 79 | -f linstor-db.yaml 80 | ``` 81 | 82 | > **_NOTE:_** 83 | > The current example will deploy stolon cluster on your Kubernetes-master nodes 84 | 85 | > **_NOTE:_** 86 | > In case of update your stolon add `--set job.autoCreateCluster=false` flag to not reinitialisate your cluster. 87 | 88 | * Create Persistent Volumes: 89 | ```bash 90 | helm install data-linstor-db-stolon-keeper-0 kvaps/pv-hostpath \ 91 | --namespace linstor \ 92 | --set path=/var/lib/linstor-db \ 93 | --set node=node1 94 | 95 | helm install data-linstor-db-stolon-keeper-1 kvaps/pv-hostpath \ 96 | --namespace linstor \ 97 | --set path=/var/lib/linstor-db \ 98 | --set node=node2 99 | 100 | helm install data-linstor-db-stolon-keeper-2 kvaps/pv-hostpath \ 101 | --namespace linstor \ 102 | --set path=/var/lib/linstor-db \ 103 | --set node=node3 104 | ``` 105 | 106 | Parameters `name` and `namespace` **must match** the PVC's name and namespace of your database, `node` should match exact node name. 107 | 108 | Check your PVC/PV list after creation, if everything right, they should obtain **Bound** status. 109 | 110 | * Connect to database: 111 | ```bash 112 | kubectl exec -ti -n linstor sts/linstor-db-stolon-keeper -- bash 113 | PGPASSWORD=$(cat $STKEEPER_PG_SU_PASSWORDFILE) psql -h linstor-db-stolon-proxy -U stolon postgres 114 | ``` 115 | 116 | * Create user and database for linstor: 117 | ```bash 118 | CREATE DATABASE linstor; 119 | CREATE USER linstor WITH PASSWORD 'hackme'; 120 | GRANT ALL PRIVILEGES ON DATABASE linstor TO linstor; 121 | ``` 122 | 123 | #### Linstor 124 | 125 | * Install kube-linstor chart: 126 | 127 | ```bash 128 | # download example values 129 | curl -LO https://github.com/kvaps/kube-linstor/raw/v1.14.0/examples/linstor.yaml 130 | 131 | # install release 132 | helm install linstor kvaps/linstor --version 1.14.0 \ 133 | --namespace linstor \ 134 | -f linstor.yaml 135 | ``` 136 | 137 | > **_NOTE:_** 138 | > The current example will deploy linstor- and csi-controllers on your Kubernetes-master nodes and satellites on all nodes in the cluster. 139 | 140 | 141 | ## Install snapshot-controller 142 | 143 | https://kubernetes-csi.github.io/docs/snapshot-controller.html#deployment 144 | 145 | ## Usage 146 | 147 | The satellite nodes will register themselves on controller automatically by init-container. 148 | 149 | You can get interactive linstor shell by simple exec into **linstor-controller** pod: 150 | 151 | ```bash 152 | kubectl exec -ti -n linstor deploy/linstor-controller -- linstor interactive 153 | ``` 154 | 155 | Refer to [official linstor documentation](https://docs.linbit.com/docs/linstor-guide/) to define ***storage pools*** on them and configure ***resource groups***. 156 | 157 | #### SSL notes 158 | 159 | This chart enables SSL encryption for control-plane by default. It does not affect the DRBD performance but makes your LINSTOR setup more secure. 160 | 161 | If you want to have external access, you need to download certificates for linstor client: 162 | 163 | ```bash 164 | kubectl get secrets --namespace linstor linstor-client-tls \ 165 | -o go-template='{{ range $k, $v := .data }}{{ $v | base64decode }}{{ end }}' 166 | ``` 167 | 168 | Then follow [official linstor documentation](https://www.linbit.com/drbd-user-guide/users-guide-linstor/#s-rest-api-https-restricted-client) to configure the client. 169 | 170 | ## Additional Information 171 | 172 | * [Perform backups and database management](docs/BACKUP.md) 173 | * [Upgrade notes](docs/UPGRADE.md) 174 | 175 | ## How Kube-Linstor compares to other DRBD-on-Kubernetes solutions 176 | 177 | ### Piraeus Operator 178 | 179 | [Piraeus Operator][piraeus-operator] is the operator that powers [Piraeus][piraeus], [LINBIT][linbit]'s official Software Defined Storage (SDS) solution for Kubernetes. The dependencies of Kube-Linstor and Piraeus Operator are mostly shared, as both projects aim to create and administer LINSTOR clusters, but there are some differences in methodology and features: 180 | 181 | - Kube-Linstor aims to be simple to operate, with less built-in logic for more straight-forward administration. To achieve this goal Kube-Linstor installs via a simple Helm chart, and installs primarily Kubernetes-native resources (Deployments, DaemonSets, etc). 182 | - Piraeus Operator relies heavily on a [Custom Resource Definition][k8s-crd]-driven approach to bootstrapping pieces of infrastructure like the [Linstor-Server][linstor-server] (satellites, etc) itself. With Piraeus Operator you create CRDs that manage the creation of Kubernetes-native resources 183 | - Kube-Linstor directly contains the Deployments, DaemonSets and other Kubernetes-native resources as necessary 184 | - Both Piraeus Operator and Kube-Linstor offer offers automatic configuration of nodes, storage pools and other LINSTOR-related resources. Where Piraeus Operator accomplishes this with CRDs, Kube-Linstor uses simple shell script with template helpers integrated into the Helm chart 185 | - Piraeus Operator offers automatic DRBD9 Kernel Module Injection Image. Kube-Linstor expects the DRBD9 kernel module to pre-installed on all nodes. 186 | 187 | [piraeus-operator]: https://github.com/piraeusdatastore/piraeus-operator 188 | [piraeus]: https://piraeus.io/ 189 | [linstor-server]: https://github.com/LINBIT/linstor-server 190 | [k8s-crd]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/ 191 | [linbit]: https://linbit.com 192 | 193 | ## Licenses 194 | 195 | * **[This project](LICENSE)** under **Apache License** 196 | * **[linstor-server]**, **[drbd]** and **[drbd-utils]** is **GPL** licensed by LINBIT 197 | * **[linstor-csi]** under **Apache License** by LINBIT 198 | * **[stork]** under **Apache License** 199 | 200 | [linstor-server]: https://github.com/LINBIT/linstor-server/blob/master/COPYING 201 | [drbd]: https://github.com/LINBIT/drbd-9.0/blob/master/COPY 202 | [drbd-utils]: https://github.com/LINBIT/drbd-utils/blob/master/COPYING 203 | [linstor-csi]: https://github.com/piraeusdatastore/linstor-csi/blob/master/LICENSE 204 | [stork]: https://github.com/libopenstorage/stork/blob/master/LICENSE 205 | -------------------------------------------------------------------------------- /helm/kube-linstor/values.yaml: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------------ 2 | # Linstor-controller is main control point for Linstor, it provides API for 3 | # clients and communicates with satellites for creating and monitor DRBD-devices 4 | # ------------------------------------------------------------------------------ 5 | 6 | controller: 7 | enabled: true 8 | image: 9 | repository: ghcr.io/kvaps/linstor-controller 10 | tag: v1.14.0 11 | pullPolicy: IfNotPresent 12 | pullSecrets: 13 | - name: regsecret 14 | 15 | replicaCount: 2 16 | podAntiAffinity: soft 17 | podAntiAffinityTopologyKey: kubernetes.io/hostname 18 | affinity: {} 19 | 20 | port: 3370 21 | ssl: 22 | enabled: true 23 | # Certificates generation method: 24 | method: helm 25 | port: 3371 26 | 27 | #hostNetwork: true 28 | 29 | service: 30 | labels: {} 31 | annotations: 32 | prometheus.io/path: "/metrics?error_reports=false" 33 | prometheus.io/port: "3370" 34 | prometheus.io/scrape: "true" 35 | 36 | # nodeSelector: 37 | # node-role.kubernetes.io/master: "" 38 | 39 | # tolerations: 40 | # - key: node-role.kubernetes.io/master 41 | # operator: Exists 42 | # effect: NoSchedule 43 | 44 | initSettings: 45 | enabled: false 46 | # Set plain connector listen to localhost 47 | plainConnectorBindAddress: "127.0.0.1" 48 | # Disable user security (required for setting global options) 49 | disableUserSecurity: true 50 | 51 | # Database config 52 | db: 53 | user: linstor 54 | password: linstor 55 | connectionUrl: jdbc:h2:/data/linstordb 56 | 57 | # PostgreSQL example 58 | # db: 59 | # user: linstor 60 | # password: linstor 61 | # connectionUrl: jdbc:postgresql://linstor-db-stolon-proxy/linstor 62 | 63 | # Etcd example 64 | # db: 65 | # connectionUrl: etcd://node1:2379,node2:2379 66 | # tls: true 67 | # cert: | 68 | # -----BEGIN CERTIFICATE----- 69 | # MIIC2DCCAcCgAwIBAgIBATANBgkqh ... 70 | # key: | 71 | # -----BEGIN RSA PRIVATE KEY----- 72 | # MIIEpAIBAAKCAQEA0xRXCs7WUhmHZ ... 73 | # ca: | 74 | # -----BEGIN CERTIFICATE----- 75 | # MIIDDzCCAfegAwIBAgIRAKTcgKx3g ... 76 | # etcdPrefix: "/LINSTOR/" 77 | 78 | # ------------------------------------------------------------------------------ 79 | # Linstor-satellites run on every node, they listen and perform controller tasks 80 | # They operates directly with LVM and ZFS subsystems 81 | # ------------------------------------------------------------------------------ 82 | 83 | satellite: 84 | enabled: true 85 | image: 86 | repository: ghcr.io/kvaps/linstor-satellite 87 | tag: v1.14.0 88 | pullPolicy: IfNotPresent 89 | pullSecrets: 90 | - name: regsecret 91 | 92 | port: 3366 93 | ssl: 94 | enabled: true 95 | # Certificates generation method: helm/cert-manager 96 | method: helm 97 | port: 3367 98 | 99 | # Oerwrite drbd.conf and global_common.conf files. This option will enable 100 | # usage-count=no and udev-always-use-vnr options by default 101 | overwriteDrbdConf: true 102 | 103 | # How many nodes can simultaneously download new image 104 | update: 105 | maxUnavailable: 40 106 | 107 | # ------------------------------------------------------------------------------ 108 | # drbd-reactor is a optional component that performs DRBD events processing. 109 | # It serves a prometheus compatible endpoint and exposing various DRBD metrics. 110 | # ------------------------------------------------------------------------------ 111 | reactor: 112 | enabled: true 113 | port: 9942 114 | pollInterval: 60 115 | service: 116 | labels: {} 117 | annotations: 118 | prometheus.io/path: "/" 119 | prometheus.io/port: "9942" 120 | prometheus.io/scrape: "true" 121 | 122 | # nodeSelector: 123 | # linstor-satellite: "" 124 | 125 | # tolerations: 126 | # - key: node-role.kubernetes.io/master 127 | # operator: Exists 128 | # effect: NoSchedule 129 | 130 | # ------------------------------------------------------------------------------ 131 | # Linstor configuration script parameters. It enables automatic join nodes to 132 | # the cluster and allows you declaratively configure basic linstor entities. 133 | # ------------------------------------------------------------------------------ 134 | configurator: 135 | enabled: true 136 | 137 | # Join the nodes automatically at init 138 | autoJoinNodes: true 139 | 140 | #controller: 141 | # props: 142 | # DrbdOptions/Net/csums-alg: crc32 143 | # resourceGroups: 144 | # - name: DfltRscGrp 145 | # props: {} 146 | # volumeGroups: 147 | # - volumeNumber: 0 148 | # props: {} 149 | # selectFilter: 150 | # PlaceCount: 2 151 | # StoragePoolList: ["lvm-thin"] 152 | # LayerStack: 153 | # - DRBD 154 | # - STORAGE 155 | 156 | #nodes: 157 | #- regex: ".*" # Regular expression to match node names 158 | # props: 159 | # Aux/aaa: bbb 160 | # interfaces: 161 | # - name: data 162 | # ip: 10.29.0.0/16 # specify subnet for additional interface on node 163 | # storagePools: 164 | # - name: DfltDisklessStorPool 165 | # providerKind: DISKLESS 166 | # props: 167 | # PrefNic: data 168 | # - name: lvm-thick 169 | # providerKind: LVM 170 | # props: 171 | # StorDriver/LvmVg: drbdpool 172 | # - name: lvm-thin 173 | # providerKind: LVM_THIN 174 | # props: 175 | # StorDriver/LvmVg: drbdpool 176 | # StorDriver/ThinPool: thinpool 177 | # - name: my-linstor-zpool 178 | # providerKind: ZFS 179 | # props: 180 | # StorDriver/ZPool: for-linstor 181 | # - name: my-linstor-zpool-thin 182 | # providerKind: ZFS_THIN 183 | # props: 184 | # StorDriver/ZPoolThin: for-linstor 185 | 186 | # ------------------------------------------------------------------------------ 187 | # Linstor CSI driver provides compatibility level for adding Linstor support 188 | # for Kubernetes 189 | # ------------------------------------------------------------------------------ 190 | 191 | csi: 192 | enabled: true 193 | image: 194 | pullSecrets: 195 | - name: regsecret 196 | linstorCsiPlugin: 197 | repository: ghcr.io/kvaps/linstor-csi 198 | tag: v1.14.0 199 | pullPolicy: IfNotPresent 200 | csiProvisioner: 201 | repository: k8s.gcr.io/sig-storage/csi-provisioner 202 | tag: v2.2.2 203 | pullPolicy: IfNotPresent 204 | csiAttacher: 205 | repository: k8s.gcr.io/sig-storage/csi-attacher 206 | tag: v3.2.1 207 | pullPolicy: IfNotPresent 208 | csiResizer: 209 | repository: k8s.gcr.io/sig-storage/csi-resizer 210 | tag: v1.2.0 211 | pullPolicy: IfNotPresent 212 | csiSnapshotter: 213 | repository: k8s.gcr.io/sig-storage/csi-snapshotter 214 | tag: v4.1.1 215 | pullPolicy: IfNotPresent 216 | csiNodeDriverRegistrar: 217 | repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar 218 | tag: v2.2.0 219 | pullPolicy: IfNotPresent 220 | csiLivenessProbe: 221 | repository: k8s.gcr.io/sig-storage/livenessprobe 222 | tag: v2.3.0 223 | pullPolicy: IfNotPresent 224 | 225 | controller: 226 | replicaCount: 2 227 | podAntiAffinity: soft 228 | podAntiAffinityTopologyKey: kubernetes.io/hostname 229 | affinity: {} 230 | 231 | csiProvisioner: 232 | topology: false 233 | 234 | # nodeSelector: 235 | # node-role.kubernetes.io/master: "" 236 | 237 | # tolerations: 238 | # - key: node-role.kubernetes.io/master 239 | # operator: Exists 240 | # effect: NoSchedule 241 | 242 | node: {} 243 | # nodeSelector: 244 | # linstor-satellite: "" 245 | 246 | # tolerations: 247 | # - key: node-role.kubernetes.io/master 248 | # operator: Exists 249 | # effect: NoSchedule 250 | 251 | haController: 252 | enabled: true 253 | image: 254 | repository: ghcr.io/kvaps/linstor-ha-controller 255 | tag: v1.14.0 256 | pullPolicy: IfNotPresent 257 | pullSecrets: 258 | - name: regsecret 259 | 260 | replicaCount: 2 261 | podAntiAffinity: soft 262 | podAntiAffinityTopologyKey: kubernetes.io/hostname 263 | affinity: {} 264 | 265 | # nodeSelector: 266 | # node-role.kubernetes.io/master: "" 267 | 268 | # tolerations: 269 | # - key: node-role.kubernetes.io/master 270 | # operator: Exists 271 | # effect: NoSchedule 272 | 273 | # ------------------------------------------------------------------------------ 274 | # Stork is a scheduler extender plugin for Kubernetes which allows a storage 275 | # driver to give the Kubernetes scheduler hints about where to place a new pod 276 | # so that it is optimally located for storage performance 277 | # ------------------------------------------------------------------------------ 278 | 279 | stork: 280 | enabled: true 281 | image: 282 | repository: ghcr.io/kvaps/linstor-stork 283 | tag: v1.14.0 284 | pullPolicy: IfNotPresent 285 | pullSecrets: 286 | - name: regsecret 287 | 288 | replicaCount: 2 289 | podAntiAffinity: soft 290 | podAntiAffinityTopologyKey: kubernetes.io/hostname 291 | affinity: {} 292 | 293 | service: 294 | labels: {} 295 | annotations: {} 296 | 297 | # nodeSelector: 298 | # node-role.kubernetes.io/master: "" 299 | 300 | # tolerations: 301 | # - key: node-role.kubernetes.io/master 302 | # operator: Exists 303 | # effect: NoSchedule 304 | 305 | storkScheduler: 306 | enabled: true 307 | image: 308 | repository: k8s.gcr.io/kube-scheduler 309 | tag: v1.20.6 310 | pullPolicy: IfNotPresent 311 | pullSecrets: 312 | - name: regsecret 313 | 314 | replicaCount: 2 315 | podAntiAffinity: soft 316 | podAntiAffinityTopologyKey: kubernetes.io/hostname 317 | affinity: {} 318 | 319 | # nodeSelector: 320 | # node-role.kubernetes.io/master: "" 321 | 322 | # tolerations: 323 | # - key: node-role.kubernetes.io/master 324 | # operator: Exists 325 | # effect: NoSchedule 326 | 327 | # ------------------------------------------------------------------------------ 328 | # Specify if a Pod Security Policy for linstor components must be created 329 | # Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ 330 | # ------------------------------------------------------------------------------ 331 | 332 | podSecurityPolicy: 333 | enabled: false 334 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /helm/kube-linstor/templates/satellite-daemonset.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "linstor.fullname" . -}} 2 | {{- if .Values.satellite.enabled }} 3 | --- 4 | apiVersion: apps/v1 5 | kind: DaemonSet 6 | metadata: 7 | name: {{ $fullName }}-satellite 8 | namespace: {{ .Release.Namespace }} 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: {{ $fullName }}-satellite 13 | template: 14 | metadata: 15 | labels: 16 | app: {{ $fullName }}-satellite 17 | annotations: 18 | checksum/config: {{ include (print $.Template.BasePath "/satellite-configmap.yaml") . | sha256sum }} 19 | {{- if .Values.configurator.enabled }} 20 | checksum/scripts: {{ printf "%s\n%s" (.Files.Get "scripts/functions.sh") (tpl (.Files.Get "scripts/configurator.satellite") .) | sha256sum }} 21 | {{- end }} 22 | spec: 23 | {{- if or .Values.satellite.ssl.enabled .Values.satellite.overwriteDrbdConf }} 24 | initContainers: 25 | {{- if .Values.satellite.ssl.enabled }} 26 | - name: load-certs 27 | {{- with .Values.satellite.image }} 28 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 29 | imagePullPolicy: {{ .pullPolicy }} 30 | {{- end }} 31 | command: 32 | - /bin/sh 33 | - -exc 34 | - | 35 | tmp="$(mktemp -u).p12" 36 | openssl pkcs12 -export -in /tls/satellite/tls.crt -inkey /tls/satellite/tls.key -out "$tmp" -name linstor-satellite -passin 'pass:linstor' -passout 'pass:linstor' 37 | rm -f /config/ssl/keystore.jks /config/ssl/certificates.jks 38 | keytool -importkeystore -srcstorepass linstor -deststorepass linstor -keypass linstor -srckeystore "$tmp" -destkeystore /config/ssl/keystore.jks 39 | keytool -importcert -noprompt -deststorepass linstor -keypass linstor -file /tls/controller/ca.crt -alias linstor-ca -destkeystore /config/ssl/certificates.jks 40 | keytool -importcert -noprompt -deststorepass linstor -keypass linstor -file /tls/controller/tls.crt -alias linstor-controller -destkeystore /config/ssl/certificates.jks 41 | rm -f "$tmp" 42 | volumeMounts: 43 | - name: config-ssl 44 | mountPath: /config/ssl 45 | - name: controller-tls 46 | mountPath: /tls/controller 47 | - name: satellite-tls 48 | mountPath: /tls/satellite 49 | {{- end }} 50 | {{- if .Values.satellite.overwriteDrbdConf }} 51 | - command: 52 | - /bin/sh 53 | - -exc 54 | - | 55 | cat /config/drbd.conf > /etc/drbd.conf 56 | cp -f /config/global_common.conf /etc/drbd.d/global_common.conf 57 | {{- with .Values.satellite.image }} 58 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 59 | imagePullPolicy: {{ .pullPolicy }} 60 | {{- end }} 61 | name: load-drbd-conf 62 | securityContext: 63 | privileged: true 64 | volumeMounts: 65 | - name: config 66 | mountPath: /config 67 | - name: etc-drbd-conf 68 | mountPath: /etc/drbd.conf 69 | - name: etc-drbd-d 70 | mountPath: /etc/drbd.d 71 | - name: usr-local-sbin 72 | mountPath: /host-bin 73 | - name: logs 74 | mountPath: /logs 75 | {{- end }} 76 | {{- end }} 77 | containers: 78 | - name: linstor-satellite 79 | {{- with .Values.satellite.image }} 80 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 81 | imagePullPolicy: {{ .pullPolicy }} 82 | {{- end }} 83 | securityContext: 84 | privileged: true 85 | ports: 86 | {{- if not .Values.satellite.ssl.enabled }} 87 | - containerPort: {{ .Values.satellite.port }} 88 | hostPort: {{ .Values.satellite.port }} 89 | protocol: TCP 90 | {{- else }} 91 | - containerPort: {{ .Values.satellite.ssl.port }} 92 | hostPort: {{ .Values.satellite.ssl.port }} 93 | protocol: TCP 94 | {{- end }} 95 | readinessProbe: 96 | tcpSocket: 97 | {{- if not .Values.satellite.ssl.enabled }} 98 | port: {{ .Values.satellite.port }} 99 | {{- else }} 100 | port: {{ .Values.satellite.ssl.port }} 101 | {{- end }} 102 | periodSeconds: 10 103 | initialDelaySeconds: 5 104 | volumeMounts: 105 | - name: config 106 | mountPath: /config 107 | {{- if .Values.satellite.ssl.enabled }} 108 | - name: config-ssl 109 | mountPath: /config/ssl 110 | readOnly: true 111 | {{- end }} 112 | - name: etc-drbd-conf 113 | mountPath: /etc/drbd.conf 114 | - name: etc-drbd-d 115 | mountPath: /etc/drbd.d 116 | - name: var-lib-drbd 117 | mountPath: /var/lib/drbd 118 | - name: var-lib-linstor 119 | mountPath: /var/lib/linstor 120 | - name: var-lib-linstor-d 121 | mountPath: /var/lib/linstor.d 122 | - name: lib-modules 123 | mountPath: /lib/modules 124 | - name: dev 125 | mountPath: /dev 126 | - name: etc-lvm 127 | mountPath: /etc/lvm 128 | - name: run-lock-lvm 129 | mountPath: /run/lock/lvm 130 | - name: logs 131 | mountPath: /logs 132 | - name: run-lvm 133 | mountPath: /run/lvm 134 | - name: run-udev 135 | mountPath: /run/udev 136 | {{- if .Values.reactor.enabled }} 137 | - name: drbd-reactor 138 | {{- with .Values.satellite.image }} 139 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 140 | imagePullPolicy: {{ .pullPolicy }} 141 | {{- end }} 142 | securityContext: 143 | privileged: true 144 | ports: 145 | - containerPort: {{ .Values.reactor.port }} 146 | name: metrics 147 | protocol: TCP 148 | command: 149 | - /usr/sbin/drbd-reactor 150 | volumeMounts: 151 | - name: drbd-reactor-config 152 | mountPath: /etc/drbd-reactor.toml 153 | subPath: drbd-reactor.toml 154 | {{- end }} 155 | - name: configurator 156 | {{- with .Values.satellite.image }} 157 | image: "{{ .repository }}{{ if .digest }}@{{ .digest }}{{ else }}:{{ .tag }}{{ end }}" 158 | imagePullPolicy: {{ .pullPolicy }} 159 | {{- end }} 160 | command: 161 | - /bin/bash 162 | - /scripts/configurator.node 163 | livenessProbe: 164 | exec: 165 | command: 166 | - sh 167 | - -c 168 | - | 169 | cn() { curl -sS {{ if .Values.controller.ssl.enabled }}--cacert /tls/client/ca.crt --cert /tls/client/tls.crt --key /tls/client/tls.key{{ end }} -X $1 $LS_CONTROLLERS/v1/nodes/$NODE_NAME$2; } 170 | ! cn GET | grep -q '"connection_status":"OFFLINE"' || cn PUT /reconnect 171 | initialDelaySeconds: 30 172 | periodSeconds: 10 173 | successThreshold: 1 174 | failureThreshold: 10 175 | env: 176 | - name: LS_CONTROLLERS 177 | {{- if not .Values.controller.ssl.enabled }} 178 | value: http://{{ $fullName }}-controller:{{ .Values.controller.port }} 179 | {{- else }} 180 | value: https://{{ $fullName }}-controller:{{ .Values.controller.ssl.port }} 181 | {{- end }} 182 | {{- if .Values.satellite.ssl.enabled }} 183 | - name: NODE_PORT 184 | value: "{{ .Values.satellite.ssl.port }}" 185 | - name: NODE_ENCRYPTION_TYPE 186 | value: "ssl" 187 | {{- else }} 188 | - name: NODE_PORT 189 | value: "{{ .Values.satellite.port }}" 190 | - name: NODE_ENCRYPTION_TYPE 191 | value: "Plain" 192 | {{- end }} 193 | - name: NODE_NAME 194 | valueFrom: 195 | fieldRef: 196 | fieldPath: spec.nodeName 197 | - name: NODE_IP 198 | valueFrom: 199 | fieldRef: 200 | fieldPath: status.podIP 201 | securityContext: 202 | privileged: true 203 | volumeMounts: 204 | - name: scripts 205 | mountPath: /scripts 206 | {{- if .Values.controller.ssl.enabled }} 207 | - name: client-tls 208 | mountPath: /tls/client 209 | {{- end }} 210 | - name: config 211 | mountPath: /config 212 | {{- if .Values.satellite.ssl.enabled }} 213 | - name: config-ssl 214 | mountPath: /config/ssl 215 | readOnly: true 216 | {{- end }} 217 | {{- if .Values.configurator.enabled }} 218 | - name: etc-drbd-conf 219 | mountPath: /etc/drbd.conf 220 | - name: etc-drbd-d 221 | mountPath: /etc/drbd.d 222 | - name: var-lib-drbd 223 | mountPath: /var/lib/drbd 224 | - name: var-lib-linstor 225 | mountPath: /var/lib/linstor 226 | - name: var-lib-linstor-d 227 | mountPath: /var/lib/linstor.d 228 | - name: lib-modules 229 | mountPath: /lib/modules 230 | - name: dev 231 | mountPath: /dev 232 | - name: etc-lvm 233 | mountPath: /etc/lvm 234 | - name: run-lock-lvm 235 | mountPath: /run/lock/lvm 236 | - name: logs 237 | mountPath: /logs 238 | - name: run-lvm 239 | mountPath: /run/lvm 240 | - name: run-udev 241 | mountPath: /run/udev 242 | {{- end }} 243 | hostIPC: true 244 | hostNetwork: true 245 | hostPID: true 246 | dnsPolicy: ClusterFirstWithHostNet 247 | imagePullSecrets: 248 | {{- toYaml .Values.satellite.image.pullSecrets | nindent 8 }} 249 | {{- if .Values.podSecurityPolicy.enabled }} 250 | serviceAccountName: {{ $fullName }}-satellite-sa 251 | {{- end }} 252 | priorityClassName: system-node-critical 253 | {{- with .Values.satellite.nodeSelector }} 254 | nodeSelector: 255 | {{- toYaml . | nindent 8 }} 256 | {{- end }} 257 | {{- with .Values.satellite.tolerations }} 258 | tolerations: 259 | {{- toYaml . | nindent 6 }} 260 | {{- end }} 261 | volumes: 262 | - name: etc-drbd-conf 263 | hostPath: 264 | path: /etc/drbd.conf 265 | type: FileOrCreate 266 | - name: etc-drbd-d 267 | hostPath: 268 | path: /etc/drbd.d 269 | - name: var-lib-drbd 270 | hostPath: 271 | path: /var/lib/drbd 272 | - name: var-lib-linstor 273 | hostPath: 274 | path: /var/lib/linstor 275 | - name: var-lib-linstor-d 276 | hostPath: 277 | path: /var/lib/linstor.d 278 | - name: lib-modules 279 | hostPath: 280 | path: /lib/modules 281 | - name: usr-local-sbin 282 | hostPath: 283 | path: /usr/local/sbin 284 | - name: dev 285 | hostPath: 286 | path: /dev 287 | - name: etc-lvm 288 | hostPath: 289 | path: /etc/lvm 290 | - name: run-lock-lvm 291 | hostPath: 292 | path: /run/lock/lvm 293 | - name: config 294 | configMap: 295 | name: {{ $fullName }}-satellite 296 | {{- if .Values.satellite.ssl.enabled }} 297 | - name: config-ssl 298 | emptyDir: {} 299 | - name: controller-tls 300 | secret: 301 | secretName: {{ $fullName }}-controller-tls 302 | - name: satellite-tls 303 | secret: 304 | secretName: {{ $fullName }}-satellite-tls 305 | {{- end }} 306 | {{- if .Values.configurator.enabled }} 307 | - name: scripts 308 | configMap: 309 | name: {{ $fullName }}-configurator 310 | {{- end }} 311 | {{- if and .Values.controller.ssl.enabled .Values.configurator.enabled }} 312 | - name: client-tls 313 | secret: 314 | secretName: {{ $fullName }}-client-tls 315 | {{- end }} 316 | - name: logs 317 | hostPath: 318 | path: /var/log/{{ $fullName }}-satellite 319 | - name: run-lvm 320 | hostPath: 321 | path: /run/lvm 322 | - name: run-udev 323 | hostPath: 324 | path: /run/udev 325 | {{- if .Values.reactor.enabled }} 326 | - name: drbd-reactor-config 327 | configMap: 328 | name: {{ $fullName }}-drbd-reactor 329 | {{- end}} 330 | updateStrategy: 331 | rollingUpdate: 332 | maxUnavailable: {{ .Values.satellite.update.maxUnavailable }} 333 | type: RollingUpdate 334 | {{- end }} 335 | --------------------------------------------------------------------------------