├── tests
├── __init__.py
├── run
│ ├── __init__.py
│ └── smoke.py
├── steps
│ ├── __init__.py
│ ├── minikube.py
│ ├── system.py
│ ├── helm.py
│ ├── users.py
│ ├── kubernetes.py
│ └── deployment.py
├── helpers
│ ├── __init__.py
│ └── argparser.py
├── scenarios
│ ├── __init__.py
│ └── smoke.py
├── requirements
│ ├── __init__.py
│ └── helm.md
├── requirements.txt
└── fixtures
│ ├── 01-minimal-single-node.yaml
│ ├── upgrade
│ ├── initial.yaml
│ └── upgrade.yaml
│ ├── 05-persistence-disabled.yaml
│ ├── 04-external-keeper.yaml
│ ├── 07-eks-io-optimized.yaml
│ ├── 03-sharded-advanced.yaml
│ ├── 02-replicated-with-users.yaml
│ └── 06-eks-multi-zone-production.yaml
├── charts
├── clickhouse
│ ├── .gitignore
│ ├── examples
│ │ ├── values-simple.yaml
│ │ ├── values-existing-keeper.yaml
│ │ ├── values-production.yaml
│ │ ├── values-production-2zones-3replicas.yaml
│ │ ├── values-production-2zones-3replicas-mini.yaml
│ │ └── values-production-eks.yaml
│ ├── Chart.lock
│ ├── templates
│ │ ├── credentials.yaml
│ │ ├── serviceaccount.yaml
│ │ ├── NOTES.txt
│ │ ├── chk.yaml
│ │ ├── chi.yaml
│ │ └── _helpers.tpl
│ ├── Chart.yaml
│ ├── values.yaml
│ ├── values.schema.json
│ └── README.md
└── clickhouse-eks
│ ├── templates
│ ├── credentials.yaml
│ └── chi.yaml
│ ├── Chart.yaml
│ ├── values.yaml
│ └── README.md
├── .gitignore
├── .helmdocsignore
├── .envrc.sample
├── templates
├── eks-README.md.gotmpl
├── README.md.gotmpl
├── install.gotmpl
├── clickhouse-install.gotmpl
└── clickhouse-README.md.gotmpl
├── scripts
├── lint.sh
└── validate.sh
├── .github
├── workflows
│ ├── tests.yml
│ └── release.yml
└── actions
│ └── test-setup
│ └── action.yml
├── Makefile
├── devbox.json
├── devbox.lock
├── README.md
└── LICENSE
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/run/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/steps/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/helpers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/scenarios/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/requirements/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/charts/clickhouse/.gitignore:
--------------------------------------------------------------------------------
1 | charts
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .envrc
2 | __pycache__
3 | .venv
4 | .idea
5 |
--------------------------------------------------------------------------------
/tests/requirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.32.3
2 | testflows==2.4.13
3 | testflows.texts==2.0.211217.1011222
4 | PyYAML==6.0.1
--------------------------------------------------------------------------------
/charts/clickhouse/examples/values-simple.yaml:
--------------------------------------------------------------------------------
1 | clickhouse:
2 | defaultUser:
3 | password: "password"
4 | persistence:
5 | size: 5Gi
6 |
--------------------------------------------------------------------------------
/.helmdocsignore:
--------------------------------------------------------------------------------
1 | # Ignore the clickhouse chart when processing other charts
2 | charts/clickhouse
3 | # Ignore the clickhouse-eks chart when processing deprecated charts
4 | charts/clickhouse-eks
--------------------------------------------------------------------------------
/charts/clickhouse-eks/templates/credentials.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: clickhouse-credentials
5 | type: Opaque
6 | stringData:
7 | user: "{{ .Values.clickhouse.user }}"
8 | password: "{{ .Values.clickhouse.password }}"
9 |
--------------------------------------------------------------------------------
/charts/clickhouse-eks/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: clickhouse-eks
3 | description: A Helm chart for ClickHouse running on AWS EKS across AZs using a nodeSelector to pin resources to run on specific VMs types
4 | type: application
5 | version: 0.1.9
6 | appVersion: "1.16.0"
7 |
--------------------------------------------------------------------------------
/charts/clickhouse/Chart.lock:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - name: altinity-clickhouse-operator
3 | repository: https://helm.altinity.com
4 | version: 0.25.5
5 | digest: sha256:47275c2271832926b4c1041b238d068708bb847395dc1ab708bca183f6a707e4
6 | generated: "2025-10-31T11:49:40.820796477-04:00"
7 |
--------------------------------------------------------------------------------
/.envrc.sample:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Automatically sets up your devbox environment whenever you cd into this
4 | # directory via our direnv integration:
5 |
6 | eval "$(devbox generate direnv --print-envrc)"
7 |
8 | # check out https://www.jetify.com/docs/devbox/ide_configuration/direnv/
9 | # for more details
10 |
--------------------------------------------------------------------------------
/charts/clickhouse/templates/credentials.yaml:
--------------------------------------------------------------------------------
1 | {{- if not .Values.clickhouse.defaultUser.password_secret_name }}
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: {{ include "clickhouse.credentialsName" . }}
6 | type: Opaque
7 | stringData:
8 | user: "default"
9 | password: "{{ .Values.clickhouse.defaultUser.password }}"
10 | {{- end }}
11 |
--------------------------------------------------------------------------------
/charts/clickhouse/examples/values-existing-keeper.yaml:
--------------------------------------------------------------------------------
1 | clickhouse:
2 | replicasCount: 2
3 | keeper:
4 | # use keeper instance at another host
5 | host: "replicated-clickhouse-keeper"
6 | # disable the built in keeper instance
7 | keeper:
8 | enabled: false
9 | # disable the built in operator installation
10 | operator:
11 | enabled: false
12 |
--------------------------------------------------------------------------------
/tests/helpers/argparser.py:
--------------------------------------------------------------------------------
1 | import os
2 | from testflows.core import Secret
3 |
4 |
5 | def argparser(parser):
6 | """Parse common arguments for the tests."""
7 |
8 | parser.add_argument(
9 | "--feature",
10 | metavar="name",
11 | type=str,
12 | help="Test Feature name",
13 | required=False,
14 | )
15 |
16 | pass
17 |
--------------------------------------------------------------------------------
/templates/eks-README.md.gotmpl:
--------------------------------------------------------------------------------
1 | {{ template "chart.header" . }}
2 |
3 | {{ template "chart.badgesSection" . }}
4 |
5 | {{ template "chart.description" . }}
6 |
7 | {{ template "extra.install" . }}
8 |
9 | {{ template "chart.homepageLine" . }}
10 |
11 | {{ template "chart.maintainersSection" . }}
12 |
13 | {{ template "chart.sourcesSection" . }}
14 |
15 | {{ template "chart.valuesSection" . }}
--------------------------------------------------------------------------------
/charts/clickhouse/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.clickhouse.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "clickhouse.serviceAccountName" . }}
6 | labels:
7 | {{- include "clickhouse.labels" . | nindent 4 }}
8 | {{- with .Values.clickhouse.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- end }}
--------------------------------------------------------------------------------
/charts/clickhouse/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: clickhouse
3 | description: A Helm chart for creating a ClickHouse® Cluster with the Altinity Operator for ClickHouse
4 | type: application
5 | version: 0.3.7
6 | appVersion: "25.3.6.10034"
7 |
8 | dependencies:
9 | - name: altinity-clickhouse-operator
10 | repository: https://helm.altinity.com
11 | version: 0.25.5
12 | alias: operator
13 | condition: operator.enabled
14 |
--------------------------------------------------------------------------------
/scripts/lint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -euo pipefail
3 |
4 | GIT_REPO_ROOT=$(git rev-parse --show-toplevel)
5 | CHARTS_DIRECTORY="${GIT_REPO_ROOT}/charts"
6 |
7 | FAILED=()
8 |
9 | cd ${CHARTS_DIRECTORY}
10 | for d in */; do
11 | echo "Linting chart ${d} w/ helm v3"
12 | helm lint ${CHARTS_DIRECTORY}/${d} || FAILED+=("${d}")
13 | done
14 |
15 | if [[ "${#FAILED[@]}" -eq 0 ]]; then
16 | echo "All charts passed linting!"
17 | exit 0
18 | else
19 | for chart in "${FAILED[@]}"; do
20 | printf "%40s ❌\n" "$chart"
21 | done
22 | fi
23 |
--------------------------------------------------------------------------------
/templates/README.md.gotmpl:
--------------------------------------------------------------------------------
1 | {{ template "chart.header" . }}
2 |
3 | {{ template "chart.badgesSection" . }}
4 |
5 | {{ template "chart.description" . }}
6 |
7 | Since [Release 0.24.0](https://docs.altinity.com/releasenotes/altinity-kubernetes-operator-release-notes/#release-0240) keeper can be managed with a custom resource. **This chart is deprecated** and may not receive further updates:
8 |
9 | {{ template "chart.homepageLine" . }}
10 |
11 | {{ template "chart.maintainersSection" . }}
12 |
13 | {{ template "chart.sourcesSection" . }}
14 |
15 | {{ template "chart.valuesSection" . }}
16 |
--------------------------------------------------------------------------------
/tests/fixtures/01-minimal-single-node.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Minimal single-node deployment (baseline test)
3 | # Tests: Basic deployment, no keeper, minimal config
4 | # Expected pods: 1 ClickHouse
5 | clickhouse:
6 | replicasCount: 1
7 | shardsCount: 1
8 |
9 | defaultUser:
10 | password: "MinimalPassword123"
11 | allowExternalAccess: true
12 |
13 | persistence:
14 | enabled: true
15 | size: 2Gi
16 | accessMode: ReadWriteOnce
17 |
18 | service:
19 | type: ClusterIP
20 |
21 | keeper:
22 | enabled: false
23 |
24 | operator:
25 | enabled: true
26 |
27 |
28 |
--------------------------------------------------------------------------------
/scripts/validate.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -euo pipefail
3 |
4 | GIT_REPO_ROOT=$(git rev-parse --show-toplevel)
5 | CHARTS_DIRECTORY="${GIT_REPO_ROOT}/charts"
6 |
7 | FAILED=()
8 |
9 | cd ${CHARTS_DIRECTORY}
10 | for d in */; do
11 | echo "Validating chart ${d} w/ helm v3"
12 | helm template ${CHARTS_DIRECTORY}/${d} | kubeconform --strict --ignore-missing-schemas || FAILED+=("${d}")
13 | done
14 |
15 | if [[ "${#FAILED[@]}" -eq 0 ]]; then
16 | echo "All charts passed validations!"
17 | exit 0
18 | else
19 | for chart in "${FAILED[@]}"; do
20 | printf "%40s ❌\n" "$chart"
21 | done
22 | fi
--------------------------------------------------------------------------------
/tests/fixtures/upgrade/initial.yaml:
--------------------------------------------------------------------------------
1 | nameOverride: "initial"
2 |
3 | clickhouse:
4 | replicasCount: 2
5 | shardsCount: 1
6 |
7 | image:
8 | repository: "altinity/clickhouse-server"
9 | tag: "25.3.6.10034.altinitystable"
10 | pullPolicy: "IfNotPresent"
11 |
12 | persistence:
13 | enabled: true
14 | size: "5Gi"
15 | accessMode: "ReadWriteOnce"
16 |
17 | lbService:
18 | enabled: false
19 |
20 | defaultUser:
21 | password: "SimplePassword"
22 | allowExternalAccess: false
23 |
24 | keeper:
25 | enabled: true
26 | replicaCount: 1
27 |
28 | localStorage:
29 | size: "2Gi"
30 |
31 |
32 |
--------------------------------------------------------------------------------
/tests/run/smoke.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import sys
3 | import os
4 |
5 | from testflows.core import *
6 |
7 | append_path(sys.path, "../..")
8 |
9 | from tests.helpers.argparser import argparser
10 |
11 |
12 | @TestModule
13 | @Name("smoke")
14 | @ArgumentParser(argparser)
15 | def regression(self, feature):
16 | """Execute smoke tests."""
17 |
18 | self.context.altinity_repo = "https://helm.altinity.com"
19 | self.context.version = "25.3.6.10034.altinitystable"
20 | self.context.local_chart_path = os.path.join(os.getcwd(), "charts", "clickhouse")
21 | Feature(run=load(f"tests.scenarios.smoke", "feature"))
22 |
23 |
24 | if main():
25 | regression()
26 |
--------------------------------------------------------------------------------
/tests/fixtures/05-persistence-disabled.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Ephemeral storage deployment
3 | # Tests: Deployment without persistent volumes, replicas with ephemeral storage
4 | # Expected pods: 2 ClickHouse + 3 Keeper = 5 total
5 | nameOverride: "ephemeral"
6 |
7 | clickhouse:
8 | replicasCount: 2
9 | shardsCount: 1
10 |
11 | defaultUser:
12 | password: "EphemeralPassword123"
13 | allowExternalAccess: true
14 |
15 | # Test deployment without persistence
16 | persistence:
17 | enabled: false
18 |
19 | service:
20 | type: ClusterIP
21 |
22 | keeper:
23 | enabled: true
24 | replicaCount: 3
25 | localStorage:
26 | size: 2Gi
27 |
28 | operator:
29 | enabled: true
30 |
31 |
32 |
--------------------------------------------------------------------------------
/charts/clickhouse/examples/values-production.yaml:
--------------------------------------------------------------------------------
1 | clickhouse:
2 | antiAffinity: true
3 | zones:
4 | - us-west-2a
5 | - us-west-2b
6 | nodeSelector:
7 | node.kubernetes.io/instance-type: "m7i.large"
8 | persistence:
9 | enabled: true
10 | size: 50Gi
11 | storageClass: gp3-encrypted
12 |
13 | keeper:
14 | enabled: true
15 | replicaCount: 3
16 | zoneSpread: true
17 | localStorage:
18 | size: 5Gi
19 | storageClass: gp3-encrypted
20 | metricsPort: "7000"
21 | settings:
22 | prometheus/endpoint: /metrics
23 | prometheus/port: 7000
24 | prometheus/metrics: true
25 | prometheus/events: true
26 | prometheus/asynchronous_metrics: true
27 | prometheus/status_info: true
28 | podAnnotations:
29 | prometheus.io/port: "7000"
30 | prometheus.io/scrape: "true"
31 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yml:
--------------------------------------------------------------------------------
1 | name: Regression Tests
2 | run-name: ${{ github.actor }} is running ${{ github.event.inputs.suite }} suite
3 |
4 | on:
5 | workflow_dispatch:
6 | inputs:
7 | suite:
8 | description: 'Suite to run'
9 | required: true
10 | default: 'smoke'
11 | type: choice
12 | options:
13 | - 'smoke'
14 |
15 | jobs:
16 | smoke:
17 | name: smoke
18 | if: ${{ inputs.suite == 'smoke' }}
19 | runs-on: ubuntu-latest
20 | timeout-minutes: 60
21 |
22 | steps:
23 | - name: Checkout
24 | uses: actions/checkout@v4
25 | with:
26 | ref: ${{ github.ref }}
27 | fetch-depth: 0
28 |
29 | - name: Setup
30 | uses: ./.github/actions/test-setup
31 |
32 | - name: Run Test Module
33 | run: |
34 | python3 ./tests/run/smoke.py
35 |
--------------------------------------------------------------------------------
/tests/fixtures/upgrade/upgrade.yaml:
--------------------------------------------------------------------------------
1 | nameOverride: "custom"
2 |
3 | clickhouse:
4 | replicasCount: 3
5 | shardsCount: 2
6 |
7 | image:
8 | repository: "altinity/clickhouse-server"
9 | tag: "25.3.6.10034.altinitystable"
10 | pullPolicy: "IfNotPresent"
11 |
12 | persistence:
13 | enabled: true
14 | size: "10Gi"
15 | accessMode: "ReadWriteOnce"
16 |
17 | lbService:
18 | enabled: true
19 | loadBalancerSourceRanges:
20 | - "0.0.0.0/0"
21 |
22 | defaultUser:
23 | password: "SuperSecret"
24 | allowExternalAccess: true
25 | hostIP: "0.0.0.0/0"
26 |
27 | users:
28 | - name: "analytics"
29 | password: "AnalyticsPassword123"
30 | password_sha256_hex: "a085c76ed0e7818e8a5c106cc01ea81d8b6a46500ee98c3be432297f47d7b99f"
31 | grants:
32 | - "GRANT SELECT ON default.*"
33 |
34 | keeper:
35 | enabled: true
36 | replicaCount: 3
37 |
38 | localStorage:
39 | size: "5Gi"
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/charts/clickhouse/examples/values-production-2zones-3replicas.yaml:
--------------------------------------------------------------------------------
1 | clickhouse:
2 | antiAffinity: true
3 | replicasCount: 3
4 | nodeSelector:
5 | node.kubernetes.io/instance-type: "m7i.large"
6 | persistence:
7 | enabled: true
8 | size: 50Gi
9 | storageClass: gp3-encrypted
10 | topologySpreadConstraints:
11 | - maxSkew: 1
12 | topologyKey: topology.kubernetes.io/zone
13 | whenUnsatisfiable: ScheduleAnyway
14 | labelSelector:
15 | matchLabels:
16 | clickhouse-keeper.altinity.com/cluster: chk-test
17 |
18 | keeper:
19 | enabled: true
20 | replicaCount: 3
21 | zoneSpread: true
22 | localStorage:
23 | size: 5Gi
24 | storageClass: gp3-encrypted
25 | metricsPort: "7000"
26 | settings:
27 | prometheus/endpoint: /metrics
28 | prometheus/port: 7000
29 | prometheus/metrics: true
30 | prometheus/events: true
31 | prometheus/asynchronous_metrics: true
32 | prometheus/status_info: true
33 | podAnnotations:
34 | prometheus.io/port: "7000"
35 | prometheus.io/scrape: "true"
36 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | REPO_ROOT ?= $(shell git rev-parse --show-toplevel)
2 | BUILD_DIR ?= $(dir $(realpath -s $(firstword $(MAKEFILE_LIST))))/build
3 | VERSION ?= $(shell git describe --tags --always --dirty)
4 |
5 | $(shell mkdir -p ${BUILD_DIR})
6 |
7 | docs:
8 | # Generate docs for clickhouse chart with custom template
9 | helm-docs --chart-search-root=charts/clickhouse --ignore-file=/dev/null --template-files=templates/clickhouse-README.md.gotmpl
10 | # Generate docs for clickhouse-eks chart with install template (no deprecation notice)
11 | helm-docs --chart-search-root=charts/clickhouse-eks --ignore-file=/dev/null --template-files=templates/install.gotmpl --template-files=templates/eks-README.md.gotmpl
12 | # Trim whitespace from generated README files
13 | for file in $$(find charts -name "README.md"); do \
14 | sed -i -e '1,2{/^[[:space:]]*$$/d;}' -e 's/[[:space:]]*$$//' "$$file"; \
15 | done
16 |
17 | verify:
18 | ${REPO_ROOT}/scripts/validate.sh
19 | ${REPO_ROOT}/scripts/lint.sh
20 |
21 | version:
22 | @echo ${VERSION}
23 |
24 | help:
25 | @grep -E '^[a-zA-Z_-]+:.*$$' $(MAKEFILE_LIST) | sort
26 |
27 | .PHONY: version version help docs
28 |
--------------------------------------------------------------------------------
/tests/fixtures/04-external-keeper.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Deployment using external keeper (operator disabled)
3 | # Tests: External keeper configuration, replicas without built-in keeper,
4 | # custom namespace domain pattern, persistence without logs
5 | # Expected pods: 4 ClickHouse (2 shards x 2 replicas) + 0 Keeper = 4 total
6 | # NOTE: Requires external keeper at specified host
7 | nameOverride: "external"
8 | namespaceDomainPattern: "%s.svc.custom.local"
9 |
10 | clickhouse:
11 | replicasCount: 2
12 | shardsCount: 2
13 |
14 | defaultUser:
15 | password: "ExternalKeeperPassword123"
16 | allowExternalAccess: false
17 |
18 | # Point to external keeper instance
19 | keeper:
20 | host: "external-clickhouse-keeper.default.svc.cluster.local"
21 | port: 2181
22 |
23 | persistence:
24 | enabled: true
25 | size: 10Gi
26 | accessMode: ReadWriteOnce
27 |
28 | service:
29 | type: ClusterIP
30 |
31 | # Built-in keeper disabled (using external)
32 | keeper:
33 | enabled: false
34 |
35 | # Operator disabled (assumes operator already installed cluster-wide)
36 | operator:
37 | enabled: false
38 |
39 |
40 |
--------------------------------------------------------------------------------
/charts/clickhouse-eks/values.yaml:
--------------------------------------------------------------------------------
1 | all:
2 | metadata:
3 | labels:
4 | # -- The name of the application group
5 | application_group: "eks"
6 |
7 | clickhouse:
8 |
9 | # -- Metadata name
10 | name: eks
11 | # -- Cluster name
12 | cluster: dev
13 | # -- AWS availability zones for creating replicas
14 | zones:
15 | - us-east-1a
16 | - us-east-1a
17 | - us-east-1c
18 | # -- AWS instance type
19 | node_selector: m6i.large
20 |
21 | # -- Name of the keeper cluster
22 | keeper_name: keeper-eks
23 |
24 | # -- Storage class for ClickHouse data
25 | storage_class_name: gp2
26 | # -- Storage size for ClickHouse data
27 | storage: 50Gi
28 |
29 | # -- ClickHouse server image
30 | image: "altinity/clickhouse-server:24.3.12.76.altinitystable"
31 |
32 | # -- Extra volumes for clickhouse pods
33 | extraVolumes: []
34 |
35 | # -- Extra containers for clickhouse pods
36 | extraContainers: []
37 |
38 | # -- Possible service types are `cluster-ip`, `internal-loadbalancer` and `external-loadbalancer`
39 | service_type: cluster-ip
40 |
41 | # -- ClickHouse user name
42 | user: default
43 |
44 | # -- ClickHouse user password
45 | password:
46 |
--------------------------------------------------------------------------------
/devbox.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.13.10/.schema/devbox.schema.json",
3 | "packages": [
4 | "python312",
5 | "kubernetes-helm",
6 | "kubectl",
7 | "kubeconform",
8 | "xvfb-run",
9 | "helm-docs"
10 | ],
11 | "shell": {
12 | "init_hook": [
13 | ". $VENV_DIR/bin/activate",
14 | "echo 'Devbox environment ready!'",
15 | "echo 'Python version:' && python3 --version",
16 | "echo 'Virtual environment activated'",
17 | "echo ''",
18 | "echo 'To install test dependencies, run:'",
19 | "echo ' devbox run test-install'",
20 | "echo ''",
21 | "echo 'Available commands:'",
22 | "echo ' make docs - Generate chart documentation'",
23 | "echo ' make verify - Validate and lint charts'",
24 | "echo ' make version - Show version information'",
25 | "echo ' devbox run test - Run smoke tests'"
26 | ],
27 | "scripts": {
28 | "test": "timeout 600 xvfb-run -a python3 ./tests/run/smoke.py",
29 | "test-install": "pip install -r tests/requirements.txt"
30 | }
31 | },
32 | "env": {
33 | "PYTHONUNBUFFERED": "1",
34 | "MPLBACKEND": "Agg"
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/charts/clickhouse/examples/values-production-2zones-3replicas-mini.yaml:
--------------------------------------------------------------------------------
1 | clickhouse:
2 | antiAffinity: true
3 | replicasCount: 3
4 | lbService:
5 | enabled: true
6 | serviceAnnotations:
7 | minikube.io/lb: "internal"
8 | nodeSelector:
9 | node.kubernetes.io/instance-type: "minikube-node"
10 | persistence:
11 | enabled: true
12 | size: 5Gi
13 | topologySpreadConstraints:
14 | - maxSkew: 1
15 | topologyKey: topology.kubernetes.io/zone
16 | whenUnsatisfiable: ScheduleAnyway
17 | labelSelector:
18 | matchLabels:
19 | clickhouse-keeper.altinity.com/cluster: chk-test
20 |
21 | keeper:
22 | enabled: true
23 | replicaCount: 3
24 | zoneSpread: true
25 | localStorage:
26 | size: 1Gi
27 | metricsPort: "7000"
28 | settings:
29 | prometheus/endpoint: /metrics
30 | prometheus/port: 7000
31 | prometheus/metrics: true
32 | prometheus/events: true
33 | prometheus/asynchronous_metrics: true
34 | prometheus/status_info: true
35 | podAnnotations:
36 | prometheus.io/port: "7000"
37 | prometheus.io/scrape: "true"
38 | resources:
39 | cpuRequestsMs: 250
40 | memoryRequestsMiB: "128Mi"
41 | cpuLimitsMs: 500
42 | memoryLimitsMiB: "128Mi"
43 |
--------------------------------------------------------------------------------
/charts/clickhouse/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | {{ if .Release.IsInstall }}
2 |
3 | ---------------------------------------------------------------------------------
4 | Thank you for using Altinity Helm Charts!
5 | ---------------------------------------------------------------------------------
6 |
7 | {{- if .Values.operator.enabled }}
8 |
9 | The Altinity Operator for ClickHouse is being installed first.
10 |
11 | {{- else }}
12 |
13 | Your configuration requires the Altinity Operator for ClickHouse in your cluster.
14 | Visit the Altinity docs for instructions on how to install the Operator:
15 |
16 | https://docs.altinity.com/
17 |
18 | {{- end }}
19 |
20 | Your ClickHouseInstallation resource has been created.
21 |
22 | Once the cluster is available you can connect with:
23 |
24 | kubectl -n {{ .Release.Namespace }} exec -it \
25 | chi-{{ include "clickhouse.fullname" . }}-{{ include "clickhouse.clustername" . }}-0-0-0 \
26 | clickhouse-client
27 |
28 | Or if you'd like to connect a client you can use port forwarding:
29 |
30 | kubectl -n {{ .Release.Namespace }} port-forward \
31 | services/{{ include "clickhouse.serviceTemplateName" . }} \
32 | 9000:9000
33 |
34 | Happy querying!
35 |
36 | {{- end }}
37 |
--------------------------------------------------------------------------------
/charts/clickhouse/examples/values-production-eks.yaml:
--------------------------------------------------------------------------------
1 | operator:
2 | enabled: false
3 |
4 | clickhouse:
5 | antiAffinity: true
6 | zones:
7 | - us-east-1a
8 | - us-east-1b
9 | nodeSelector:
10 | node.kubernetes.io/instance-type: "m6i.large"
11 | persistence:
12 | enabled: true
13 | size: 50Gi
14 | storageClass: gp3-encrypted
15 | tolerations:
16 | - key: "dedicated"
17 | operator: "Equal"
18 | value: "clickhouse"
19 | effect: "NoSchedule"
20 |
21 | keeper:
22 | enabled: true
23 | replicaCount: 3
24 | zoneSpread: false
25 | localStorage:
26 | size: 5Gi
27 | storageClass: gp3-encrypted
28 | metricsPort: "7000"
29 | settings:
30 | prometheus/endpoint: /metrics
31 | prometheus/port: 7000
32 | prometheus/metrics: true
33 | prometheus/events: true
34 | prometheus/asynchronous_metrics: true
35 | prometheus/status_info: true
36 | podAnnotations:
37 | prometheus.io/port: "7000"
38 | prometheus.io/scrape: "true"
39 | tolerations:
40 | - key: "dedicated"
41 | operator: "Equal"
42 | value: "clickhouse"
43 | effect: "NoSchedule"
44 | resources:
45 | cpuRequestsMs: 1
46 | memoryRequestsMiB: 1Gi
47 | cpuLimitsMs: 2
48 | memoryLimitsMiB: 3Gi
49 |
--------------------------------------------------------------------------------
/tests/steps/minikube.py:
--------------------------------------------------------------------------------
1 | from tests.steps.system import *
2 | from tests.steps.kubernetes import use_context
3 |
4 |
5 | @TestStep(Given)
6 | def minikube_start(self, cpus, memory):
7 | """Start minikube."""
8 |
9 | run(cmd=f"minikube start --driver=docker --cpus={cpus} --memory={memory}")
10 |
11 |
12 | @TestStep(Given)
13 | def minikube_delete(self):
14 | """Delete minikube."""
15 |
16 | run(cmd="minikube delete")
17 |
18 |
19 | @TestStep(When)
20 | def minikube_status(self):
21 | """Check if minikube is running."""
22 |
23 | try:
24 | result = run(cmd="minikube status", check=False)
25 | return result.returncode == 0 and "Running" in result.stdout
26 | except:
27 | return False
28 |
29 |
30 | @TestStep(When)
31 | def minikube_stop(self):
32 | """Stop minikube."""
33 |
34 | run(cmd="minikube stop")
35 |
36 |
37 | @TestStep(Given)
38 | def setup_minikube_environment(self, cpus=4, memory="6g", clean_up=True):
39 | """Set up minikube environment with context."""
40 |
41 | if minikube_status():
42 | minikube_stop()
43 |
44 | minikube_start(cpus=cpus, memory=memory)
45 |
46 | use_context(context_name="minikube")
47 |
48 | yield
49 |
50 | if clean_up:
51 | cleanup_minikube_environment()
52 |
53 |
54 | @TestStep(Finally)
55 | def cleanup_minikube_environment(self):
56 | """Clean up minikube environment."""
57 |
58 | minikube_delete()
59 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release Charts
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths-ignore:
8 | - '**/README.md'
9 | jobs:
10 | release:
11 | permissions:
12 | contents: write
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@v3
17 | with:
18 | fetch-depth: 0
19 |
20 | - name: Configure Git
21 | run: |
22 | git config user.name "$GITHUB_ACTOR"
23 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
24 |
25 | - name: Add Operator Chart Repo
26 | run: |
27 | helm repo add altinity-operator https://docs.altinity.com/clickhouse-operator
28 | helm repo add altinity https://helm.altinity.com/
29 |
30 | - name: Run chart-releaser
31 | uses: helm/chart-releaser-action@v1.6.0
32 | with:
33 | charts_dir: charts
34 | skip_existing: true
35 | env:
36 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
37 |
38 | test:
39 | needs: release
40 | runs-on: ubuntu-latest
41 | timeout-minutes: 60
42 | steps:
43 | - name: Checkout
44 | uses: actions/checkout@v4
45 | with:
46 | ref: ${{ github.ref }}
47 | fetch-depth: 0
48 |
49 | - name: Setup
50 | uses: ./.github/actions/test-setup
51 |
52 | - name: Run Test Module
53 | run: |
54 | python3 ./tests/run/smoke.py
55 |
--------------------------------------------------------------------------------
/templates/install.gotmpl:
--------------------------------------------------------------------------------
1 | {{ define "extra.install" -}}
2 |
3 | ## Installing the Chart
4 |
5 | ```sh
6 | # add the altinity chart repository
7 | helm repo add altinity https://helm.altinity.com
8 |
9 | # use this command to install {{ template "chart.name" . }} chart (it will also create a `clickhouse` namespace)
10 | helm install clickhouse altinity/{{ template "chart.name" . }} --namespace clickhouse --create-namespace
11 | ```
12 |
13 | > Use `-f` flag to override default values: `helm install -f newvalues.yaml`
14 |
15 | ## Upgrading the Chart
16 | ```sh
17 | # get latest repository versions
18 | helm repo update
19 |
20 | # upgrade to a newer version using the release name (`clickhouse`)
21 | helm upgrade clickhouse altinity/{{ template "chart.name" . }} --namespace clickhouse
22 | ```
23 |
24 | ## Uninstalling the Chart
25 |
26 | ```sh
27 | # uninstall using the release name (`clickhouse`)
28 | helm uninstall clickhouse --namespace clickhouse
29 | ```
30 |
31 | > This command removes all the Kubernetes components associated with the chart and deletes the release.
32 |
33 | ## Connecting to your ClickHouse Cluster
34 |
35 | ```sh
36 | # list your pods
37 | kubectl get pods --namespace clickhouse
38 |
39 | # pick any of your available pods and connect through the clickhouse-client
40 | kubectl exec -it chi-eks-dev-0-0-0 --namespace clickhouse -- clickhouse-client
41 | ```
42 |
43 | > Use `kubectl port forward` to access your ClickHouse cluster from outside: `kubectl port-forward service clickhouse-eks 9000:9000 & clickhouse-client`
44 |
45 | {{- end }}
46 |
47 |
48 |
--------------------------------------------------------------------------------
/tests/fixtures/07-eks-io-optimized.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # IO-optimized EKS deployment for high-performance workloads
3 | # Tests: High IOPS storage (io2), instance store, multi-zone, large volumes
4 | # Expected pods: 4 ClickHouse (2 shards x 2 replicas) + 3 Keeper = 7 total
5 | # NOTE: REQUIRES EKS CLUSTER with io2 storage class and i4i instance types
6 | nameOverride: "io-optimized"
7 |
8 | clickhouse:
9 | replicasCount: 2
10 | shardsCount: 2
11 |
12 | zones:
13 | - us-west-2a
14 | - us-west-2b
15 |
16 | antiAffinity: true
17 | antiAffinityScope: "ClickHouseInstallation"
18 |
19 | defaultUser:
20 | password: "IOOptimizedPassword123"
21 | allowExternalAccess: false
22 |
23 | persistence:
24 | enabled: true
25 | size: 500Gi
26 | storageClass: io2
27 | accessMode: ReadWriteOnce
28 |
29 | service:
30 | type: ClusterIP
31 |
32 | # High-performance instance types with NVMe
33 | nodeSelector:
34 | node.kubernetes.io/instance-type: "i4i.2xlarge"
35 | disktype: nvme
36 |
37 | topologySpreadConstraints:
38 | - maxSkew: 1
39 | topologyKey: topology.kubernetes.io/zone
40 | whenUnsatisfiable: DoNotSchedule
41 | labelSelector:
42 | matchLabels:
43 | app.kubernetes.io/name: clickhouse
44 |
45 | keeper:
46 | enabled: true
47 | replicaCount: 3
48 | zoneSpread: true
49 | localStorage:
50 | size: 50Gi
51 | storageClass: io2
52 | nodeSelector:
53 | node.kubernetes.io/instance-type: "i4i.large"
54 | disktype: nvme
55 |
56 | operator:
57 | enabled: true
58 |
59 |
60 |
--------------------------------------------------------------------------------
/tests/steps/system.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import sys
3 | import yaml
4 | import tempfile
5 | from pathlib import Path
6 | from testflows.core import *
7 |
8 |
9 | @TestStep(When)
10 | def run(self, cmd, check=True):
11 | """Execute a shell command."""
12 | note(f"> {cmd}")
13 | result = subprocess.run(cmd, shell=True, capture_output=True, text=True)
14 |
15 | if check and result.returncode != 0:
16 | note(result.stderr)
17 | sys.exit(result.returncode)
18 |
19 | return result
20 |
21 |
22 | @TestStep(Given)
23 | def get_values_file(self, values):
24 | """Create a temporary values file for Helm."""
25 | with tempfile.NamedTemporaryFile(mode="w", suffix=".yaml", delete=False) as f:
26 | yaml.dump(values, f)
27 | temp_file = Path(f.name)
28 |
29 | yield str(temp_file)
30 |
31 | temp_file.unlink(missing_ok=True)
32 |
33 |
34 | @TestStep(When)
35 | def values_argument(self, values=None, values_file=None):
36 | """Get Helm command arguments for values file or dict.
37 |
38 | Args:
39 | values: Dictionary of values to use (will be converted to temp file)
40 | values_file: Path to values file (relative to tests/ directory)
41 |
42 | Returns:
43 | String with --values argument for helm command, or empty string if no values
44 | """
45 | if not values and not values_file:
46 | return ""
47 |
48 | if values_file:
49 | tests_dir = Path(__file__).parent.parent
50 | full_path = tests_dir / values_file
51 | return f" --values {full_path}"
52 |
53 | temp_values_file = get_values_file(values=values)
54 | return f" --values {temp_values_file}"
55 |
--------------------------------------------------------------------------------
/templates/clickhouse-install.gotmpl:
--------------------------------------------------------------------------------
1 | {{ define "extra.clickhouse.install" -}}
2 |
3 | ## Installing the Chart
4 |
5 | ```sh
6 | # add the altinity chart repository
7 | helm repo add altinity https://helm.altinity.com
8 |
9 | # use this command to install {{ template "chart.name" . }} chart (it will also create a `clickhouse` namespace)
10 | helm install release-name altinity/{{ template "chart.name" . }} --namespace clickhouse --create-namespace
11 | ```
12 |
13 | > Use `-f` flag to override default values: `helm install -f newvalues.yaml`
14 |
15 | ## Upgrading the Chart
16 | ```sh
17 | # get latest repository versions
18 | helm repo update
19 |
20 | # upgrade to a newer version using the release name (`clickhouse`)
21 | helm upgrade clickhouse altinity/{{ template "chart.name" . }} --namespace clickhouse
22 | ```
23 |
24 | ## Uninstalling the Chart
25 |
26 | ```sh
27 | # uninstall using the release name (`clickhouse`)
28 | helm uninstall clickhouse --namespace clickhouse
29 | ```
30 |
31 | **Note:** If you installed the Altinity Operator with this chart, your ClickHouse Installations will hang because the Operator will be deleted before their finalizers complete. To resolve this you must manually edit each `chi` resource and remove the finalizer.
32 |
33 | PVCs created by this helm chart will not be automatically deleted and must be deleted manually. An easy way to do this is to delete the namespace:
34 |
35 | ```sh
36 | kubectl delete namespace clickhouse
37 | ```
38 |
39 | > This command removes all the Kubernetes components associated with the chart and deletes the release.
40 |
41 | ## Connecting to your ClickHouse Cluster
42 |
43 | ```sh
44 | # list your pods
45 | kubectl get pods --namespace clickhouse
46 |
47 | # pick any of your available pods and connect through the clickhouse-client
48 | kubectl exec -it chi-clickhouse-0-0-0 --namespace clickhouse -- clickhouse-client
49 | ```
50 |
51 | > Use `kubectl port forward` to access your ClickHouse cluster from outside: `kubectl port-forward service clickhouse-eks 9000:9000 & clickhouse-client`
52 |
53 | {{- end }}
54 |
55 |
--------------------------------------------------------------------------------
/devbox.lock:
--------------------------------------------------------------------------------
1 | {
2 | "lockfile_version": "1",
3 | "packages": {
4 | "github:NixOS/nixpkgs/nixpkgs-unstable": {
5 | "last_modified": "2025-10-20T13:06:07Z",
6 | "resolved": "github:NixOS/nixpkgs/cb82756ecc37fa623f8cf3e88854f9bf7f64af93?lastModified=1760965567&narHash=sha256-0JDOal5P7xzzAibvD0yTE3ptyvoVOAL0rcELmDdtSKg%3D"
7 | },
8 | "helm-docs": {
9 | "resolved": "github:NixOS/nixpkgs/cb82756ecc37fa623f8cf3e88854f9bf7f64af93?narHash=sha256-0JDOal5P7xzzAibvD0yTE3ptyvoVOAL0rcELmDdtSKg%3D#helm-docs",
10 | "source": "nixpkg"
11 | },
12 | "kubeconform": {
13 | "resolved": "github:NixOS/nixpkgs/cb82756ecc37fa623f8cf3e88854f9bf7f64af93?narHash=sha256-0JDOal5P7xzzAibvD0yTE3ptyvoVOAL0rcELmDdtSKg%3D#kubeconform",
14 | "source": "nixpkg",
15 | "systems": {
16 | "x86_64-linux": {
17 | "outputs": [
18 | {
19 | "path": "/nix/store/d6w4kb2i2z02fwg6z2w589xihvfpqmvp-kubeconform-0.7.0",
20 | "default": true
21 | }
22 | ]
23 | }
24 | }
25 | },
26 | "kubectl": {
27 | "resolved": "github:NixOS/nixpkgs/cb82756ecc37fa623f8cf3e88854f9bf7f64af93?narHash=sha256-0JDOal5P7xzzAibvD0yTE3ptyvoVOAL0rcELmDdtSKg%3D#kubectl",
28 | "source": "nixpkg",
29 | "systems": {
30 | "x86_64-linux": {
31 | "outputs": [
32 | {
33 | "path": "/nix/store/5fs5bi2b03j7b1s05h317iszhm8q23x7-kubectl-1.34.1",
34 | "default": true
35 | },
36 | {
37 | "name": "man",
38 | "path": "/nix/store/zsl5hlc1rgy52ljdkl0f5vfcp4vfs9gr-kubectl-1.34.1-man",
39 | "default": true
40 | }
41 | ]
42 | }
43 | }
44 | },
45 | "kubernetes-helm": {
46 | "resolved": "github:NixOS/nixpkgs/cb82756ecc37fa623f8cf3e88854f9bf7f64af93?narHash=sha256-0JDOal5P7xzzAibvD0yTE3ptyvoVOAL0rcELmDdtSKg%3D#kubernetes-helm",
47 | "source": "nixpkg"
48 | },
49 | "python312": {
50 | "plugin_version": "0.0.4",
51 | "resolved": "github:NixOS/nixpkgs/cb82756ecc37fa623f8cf3e88854f9bf7f64af93?narHash=sha256-0JDOal5P7xzzAibvD0yTE3ptyvoVOAL0rcELmDdtSKg%3D#python312",
52 | "source": "nixpkg"
53 | },
54 | "xvfb-run": {
55 | "resolved": "github:NixOS/nixpkgs/cb82756ecc37fa623f8cf3e88854f9bf7f64af93?narHash=sha256-0JDOal5P7xzzAibvD0yTE3ptyvoVOAL0rcELmDdtSKg%3D#xvfb-run",
56 | "source": "nixpkg"
57 | }
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/.github/actions/test-setup/action.yml:
--------------------------------------------------------------------------------
1 | name: 'Common Setup'
2 | description: 'Common setup steps for all test jobs'
3 |
4 | runs:
5 | using: 'composite'
6 | steps:
7 | - name: Set Runner IP and SSH Command
8 | shell: bash
9 | run: |
10 | export RUNNER_IP=$(hostname -I | cut -d ' ' -f 1)
11 | export RUNNER_SSH_COMMAND="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$RUNNER_IP"
12 | echo "Runner IP: $RUNNER_IP"
13 | echo "Runner SSH Command: $RUNNER_SSH_COMMAND"
14 | uname -i
15 |
16 | - name: Cache Python dependencies
17 | uses: actions/cache@v4
18 | with:
19 | path: ~/.cache/pip
20 | key: ${{ runner.os }}-pip-${{ hashFiles('./tests/requirements.txt') }}
21 | restore-keys: |
22 | ${{ runner.os }}-pip-
23 |
24 | - name: System Setup
25 | run: |
26 | sudo apt-get update -y
27 | sudo apt-get install -y wget gnupg software-properties-common jq curl unzip git
28 | export RUNNER_IP=$(hostname -I | cut -d ' ' -f 1)
29 | export RUNNER_SSH_COMMAND="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$RUNNER_IP"
30 | uname -i
31 | sudo rm -rf /var/lib/apt/lists/*
32 | sudo rm -rf /var/cache/debconf
33 | sudo rm -rf /tmp/*
34 | shell: bash
35 |
36 | - name: Set up Python
37 | uses: actions/setup-python@v4
38 | with:
39 | python-version: '3.12.3'
40 | cache: 'pip'
41 | cache-dependency-path: ./tests/requirements.txt
42 |
43 | - name: Install Python Dependencies
44 | run: |
45 | python3 -m pip install --upgrade pip
46 | pip install -r ./tests/requirements.txt
47 | shell: bash
48 |
49 | - name: Set PYTHONPATH
50 | run: echo "PYTHONPATH=${{ github.workspace }}" >> $GITHUB_ENV
51 | shell: bash
52 |
53 | - name: Install Helm
54 | run: |
55 | curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
56 | helm version
57 | shell: bash
58 |
59 | - name: Install Minikube
60 | run: |
61 | # Install kubectl
62 | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
63 | chmod +x kubectl
64 | sudo mv kubectl /usr/local/bin/
65 |
66 | # Install minikube
67 | curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
68 | sudo install minikube-linux-amd64 /usr/local/bin/minikube
69 |
70 |
71 | # Verify installations
72 | kubectl version --client
73 | minikube version
74 | docker --version
75 | shell: bash
76 |
77 |
--------------------------------------------------------------------------------
/tests/fixtures/03-sharded-advanced.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Sharded cluster with advanced features
3 | # Tests: Sharding (3 shards x 2 replicas), anti-affinity, cluster secrets,
4 | # LoadBalancer service, service account, node selectors, tolerations,
5 | # topology spread constraints, keeper with 5 replicas
6 | # Expected pods: 6 ClickHouse (3 shards x 2 replicas) + 5 Keeper = 11 total
7 | nameOverride: "sharded"
8 |
9 | clickhouse:
10 | replicasCount: 2
11 | shardsCount: 3
12 |
13 | # Test anti-affinity at shard scope
14 | antiAffinity: true
15 | antiAffinityScope: "Shard"
16 |
17 | defaultUser:
18 | password: "ShardedPassword123"
19 | allowExternalAccess: true
20 |
21 | # Test cluster secret for secure inter-node communication
22 | clusterSecret:
23 | enabled: true
24 | auto: true
25 | secure: false
26 |
27 | persistence:
28 | enabled: true
29 | size: 15Gi
30 | accessMode: ReadWriteOnce
31 |
32 | service:
33 | type: ClusterIP
34 |
35 | # Test LoadBalancer service with IP restrictions
36 | lbService:
37 | enabled: true
38 | loadBalancerSourceRanges:
39 | - "10.0.0.0/8"
40 | - "172.16.0.0/12"
41 | serviceAnnotations:
42 | service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
43 | service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
44 | serviceLabels:
45 | exposed: "true"
46 |
47 | # Test service account creation
48 | serviceAccount:
49 | create: true
50 | annotations:
51 | eks.amazonaws.com/role-arn: "arn:aws:iam::123456789012:role/clickhouse-role"
52 | name: "clickhouse-sa"
53 |
54 | # Test node selection
55 | nodeSelector:
56 | disktype: ssd
57 | workload: database
58 |
59 | # Test tolerations
60 | tolerations:
61 | - key: "dedicated"
62 | operator: "Equal"
63 | value: "clickhouse"
64 | effect: "NoSchedule"
65 | - key: "high-memory"
66 | operator: "Exists"
67 | effect: "NoSchedule"
68 |
69 | # Test topology spread constraints
70 | topologySpreadConstraints:
71 | - maxSkew: 1
72 | topologyKey: kubernetes.io/hostname
73 | whenUnsatisfiable: ScheduleAnyway
74 | labelSelector:
75 | matchLabels:
76 | app.kubernetes.io/name: clickhouse
77 |
78 | podAnnotations:
79 | prometheus.io/scrape: "true"
80 | prometheus.io/port: "8001"
81 |
82 | podLabels:
83 | app: clickhouse
84 | tier: database
85 |
86 | # Test keeper with 5 replicas (high availability)
87 | keeper:
88 | enabled: true
89 | replicaCount: 5
90 | localStorage:
91 | size: 10Gi
92 | nodeSelector:
93 | disktype: ssd
94 | tolerations:
95 | - key: "dedicated"
96 | operator: "Equal"
97 | value: "clickhouse"
98 | effect: "NoSchedule"
99 | resources:
100 | cpuRequestsMs: 200
101 | memoryRequestsMiB: 1Gi
102 | cpuLimitsMs: 1000
103 | memoryLimitsMiB: 2Gi
104 |
105 | operator:
106 | enabled: true
107 |
108 |
109 |
--------------------------------------------------------------------------------
/charts/clickhouse-eks/README.md:
--------------------------------------------------------------------------------
1 | # clickhouse-eks
2 |
3 |   
4 |
5 | A Helm chart for ClickHouse running on AWS EKS across AZs using a nodeSelector to pin resources to run on specific VMs types
6 |
7 | ## Installing the Chart
8 |
9 | ```sh
10 | # add the altinity chart repository
11 | helm repo add altinity https://helm.altinity.com
12 |
13 | # use this command to install clickhouse-eks chart (it will also create a `clickhouse` namespace)
14 | helm install clickhouse altinity/clickhouse-eks --namespace clickhouse --create-namespace
15 | ```
16 |
17 | > Use `-f` flag to override default values: `helm install -f newvalues.yaml`
18 |
19 | ## Upgrading the Chart
20 | ```sh
21 | # get latest repository versions
22 | helm repo update
23 |
24 | # upgrade to a newer version using the release name (`clickhouse`)
25 | helm upgrade clickhouse altinity/clickhouse-eks --namespace clickhouse
26 | ```
27 |
28 | ## Uninstalling the Chart
29 |
30 | ```sh
31 | # uninstall using the release name (`clickhouse`)
32 | helm uninstall clickhouse --namespace clickhouse
33 | ```
34 |
35 | > This command removes all the Kubernetes components associated with the chart and deletes the release.
36 |
37 | ## Connecting to your ClickHouse Cluster
38 |
39 | ```sh
40 | # list your pods
41 | kubectl get pods --namespace clickhouse
42 |
43 | # pick any of your available pods and connect through the clickhouse-client
44 | kubectl exec -it chi-eks-dev-0-0-0 --namespace clickhouse -- clickhouse-client
45 | ```
46 |
47 | > Use `kubectl port forward` to access your ClickHouse cluster from outside: `kubectl port-forward service clickhouse-eks 9000:9000 & clickhouse-client`
48 |
49 | ## Values
50 |
51 | | Key | Type | Default | Description |
52 | |-----|------|---------|-------------|
53 | | all.metadata.labels.application_group | string | `"eks"` | The name of the application group |
54 | | clickhouse.cluster | string | `"dev"` | Cluster name |
55 | | clickhouse.extraContainers | list | `[]` | Extra containers for clickhouse pods |
56 | | clickhouse.extraVolumes | list | `[]` | Extra volumes for clickhouse pods |
57 | | clickhouse.image | string | `"altinity/clickhouse-server:24.3.12.76.altinitystable"` | ClickHouse server image |
58 | | clickhouse.keeper_name | string | `"keeper-eks"` | Name of the keeper cluster |
59 | | clickhouse.name | string | `"eks"` | Metadata name |
60 | | clickhouse.node_selector | string | `"m6i.large"` | AWS instance type |
61 | | clickhouse.password | string | `nil` | ClickHouse user password |
62 | | clickhouse.service_type | string | `"cluster-ip"` | Possible service types are `cluster-ip`, `internal-loadbalancer` and `external-loadbalancer` |
63 | | clickhouse.storage | string | `"50Gi"` | Storage size for ClickHouse data |
64 | | clickhouse.storage_class_name | string | `"gp2"` | Storage class for ClickHouse data |
65 | | clickhouse.user | string | `"default"` | ClickHouse user name |
66 | | clickhouse.zones | list | `["us-east-1a","us-east-1a","us-east-1c"]` | AWS availability zones for creating replicas |
--------------------------------------------------------------------------------
/tests/fixtures/02-replicated-with-users.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Replicated deployment with comprehensive user management and persistence
3 | # Tests: Replication, keeper (3 replicas), multiple users, log volumes,
4 | # pod annotations/labels, service annotations, extraConfig
5 | # Expected pods: 3 ClickHouse + 3 Keeper = 6 total
6 | nameOverride: "replicated"
7 |
8 | clickhouse:
9 | replicasCount: 3
10 | shardsCount: 1
11 |
12 | defaultUser:
13 | password: "AdminPassword123"
14 | allowExternalAccess: false
15 | hostIP: "10.0.0.0/8"
16 |
17 | # Test multiple users with various permission levels
18 | users:
19 | - name: readonly
20 | password_sha256_hex: "5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8"
21 | password: "password" # Plain password for testing (Helm ignores this field)
22 | hostIP: "0.0.0.0/0"
23 | accessManagement: 0
24 | grants:
25 | - "GRANT SELECT ON default.*"
26 | - "GRANT SELECT ON system.*"
27 |
28 | - name: analytics
29 | password_sha256_hex: "a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3"
30 | password: "123" # Plain password for testing (Helm ignores this field)
31 | hostIP:
32 | - "10.0.0.0/8"
33 | - "172.16.0.0/12"
34 | accessManagement: 0
35 | grants:
36 | - "GRANT SELECT ON *.*"
37 |
38 | - name: appuser
39 | password_sha256_hex: "8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92"
40 | password: "123456" # Plain password for testing (Helm ignores this field)
41 | hostIP: "0.0.0.0/0"
42 | accessManagement: 1
43 |
44 | # Test persistence with separate log volumes
45 | persistence:
46 | enabled: true
47 | size: 10Gi
48 | accessMode: ReadWriteOnce
49 | logs:
50 | enabled: true
51 | size: 5Gi
52 | accessMode: ReadWriteOnce
53 |
54 | service:
55 | type: ClusterIP
56 | serviceAnnotations:
57 | prometheus.io/scrape: "true"
58 | prometheus.io/port: "8001"
59 | prometheus.io/path: "/metrics"
60 | serviceLabels:
61 | app: clickhouse
62 | tier: database
63 | environment: test
64 |
65 | # Test pod annotations and labels
66 | podAnnotations:
67 | prometheus.io/scrape: "true"
68 | prometheus.io/port: "8001"
69 | backup.velero.io/backup-volumes: "data,logs"
70 | app.version: "v1.0"
71 |
72 | podLabels:
73 | app: clickhouse
74 | tier: database
75 | environment: test
76 | team: data-engineering
77 |
78 | # Test custom ClickHouse configuration
79 | extraConfig: |
80 |
81 | 1000
82 | 100
83 | 30
84 | 0
85 |
86 | information
87 |
88 |
89 |
90 | keeper:
91 | enabled: true
92 | replicaCount: 3
93 | localStorage:
94 | size: 5Gi
95 | podAnnotations:
96 | backup.velero.io/backup-volumes: "data"
97 | resources:
98 | cpuRequestsMs: 100
99 | memoryRequestsMiB: 512Mi
100 | cpuLimitsMs: 500
101 | memoryLimitsMiB: 1Gi
102 |
103 | operator:
104 | enabled: true
105 |
106 |
107 |
--------------------------------------------------------------------------------
/tests/steps/helm.py:
--------------------------------------------------------------------------------
1 | from tests.steps.system import *
2 | import os
3 |
4 |
5 | @TestStep(Given)
6 | def ensure_dependencies(self, chart_path=None):
7 | """Ensure Helm chart dependencies are built.
8 |
9 | Args:
10 | chart_path: Path to the chart directory (defaults to context.local_chart_path)
11 | """
12 | if chart_path is None:
13 | chart_path = self.context.local_chart_path
14 |
15 | with Given("Altinity Helm repo and build dependencies"):
16 | # Add repo with force update to handle already existing repos
17 | run(
18 | cmd=f"helm repo add altinity {self.context.altinity_repo} --force-update",
19 | check=False,
20 | )
21 | run(cmd="helm repo update")
22 | # Build dependencies in the same context so repo is available
23 | run(cmd=f"helm dependency build {chart_path}", check=True)
24 |
25 |
26 | @TestStep(Given)
27 | def install(
28 | self,
29 | namespace,
30 | release_name,
31 | values=None,
32 | values_file=None,
33 | local=True,
34 | clean_up=True,
35 | ):
36 | """Install ClickHouse Operator using Altinity Helm charts with optional custom values.
37 |
38 | Args:
39 | namespace: Kubernetes namespace
40 | release_name: Helm release name
41 | values: Dictionary of values to use (will be converted to temp file)
42 | values_file: Path to values file (relative to tests/ directory)
43 | local: Whether to use local chart or remote
44 | """
45 |
46 | chart_path = self.context.local_chart_path if local else "altinity/clickhouse"
47 |
48 | if local:
49 | # Ensure dependencies are built for local charts
50 | ensure_dependencies()
51 | else:
52 | with Given("Altinity Helm repo"):
53 | run(cmd=f"helm repo add altinity {self.context.altinity_repo} || true")
54 | run(cmd="helm repo update")
55 |
56 | cmd = f"helm install {release_name} {chart_path} --namespace {namespace} --create-namespace"
57 | cmd += values_argument(values=values, values_file=values_file)
58 |
59 | with When("install ClickHouse Operator"):
60 | r = run(cmd=cmd, check=True)
61 |
62 | yield r
63 |
64 | if clean_up:
65 | with Finally("uninstall ClickHouse Operator"):
66 | uninstall(namespace=namespace, release_name=release_name)
67 |
68 |
69 | @TestStep(Finally)
70 | def uninstall(self, namespace, release_name):
71 | """Uninstall ClickHouse Operator."""
72 |
73 | run(cmd=f"helm uninstall {release_name} -n {namespace}", check=False)
74 |
75 |
76 | @TestStep(When)
77 | def upgrade(self, namespace, release_name, values=None, values_file=None, local=True):
78 | """Upgrade an existing Helm release with optional custom values.
79 |
80 | Args:
81 | namespace: Kubernetes namespace
82 | release_name: Helm release name
83 | values: Dictionary of values to use (will be converted to temp file)
84 | values_file: Path to values file (relative to tests/ directory)
85 | local: Whether to use local chart or remote
86 | """
87 |
88 | chart_path = self.context.local_chart_path if local else "altinity/clickhouse"
89 |
90 | if local:
91 | # Ensure dependencies are built for local charts
92 | ensure_dependencies()
93 |
94 | cmd = f"helm upgrade {release_name} {chart_path} --namespace {namespace}"
95 | cmd += values_argument(values=values, values_file=values_file)
96 |
97 | r = run(cmd=cmd)
98 |
99 | return r
100 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Altinity Helm Charts for ClickHouse®
2 |
3 | Helm charts for use with the Altinity Operator for ClickHouse®
4 |
5 | ## Running ClickHouse on Kubernetes with the Altinity Operator
6 | A complete Kubernetes deployment of ClickHouse includes:
7 |
8 | - The Altinity Operator for ClickHouse
9 | - The Altinity Operator CRDs
10 | - A `ClickHouseInstallation` Custom Resource defining your ClickHouse cluster settings
11 | - A `ClickHouseKeeperInstallation` Custom Resource defining your Keeper deployment (optional for single-node instances)
12 | - A custom storage class (optional) - we recommend gp3 in production.
13 |
14 | For convenience, the [ClickHouse](./charts/clickhouse) chart in this repo includes the [Operator Helm Chart](https://github.com/Altinity/clickhouse-operator/tree/master/deploy/helm/clickhouse-operator) as a dependency.
15 |
16 | These samples demonstrate straightforward Helm charts that can be used to deploy ClickHouse and ClickHouse Keeper. The examples are intended as starting points for more complex configurations and do not cover all possible uses cases.
17 |
18 | For more complex configurations, follow the [Installation Guide](https://docs.altinity.com/altinitykubernetesoperator/quickstartinstallation/) from the documentation to install the Operator and create a custom `ClickHouseInstallation` resource.
19 |
20 | **Installing the Operator first provides better control when uninstalling clusters.**
21 |
22 | > Please refer to the Altinity Operator project instructions for details on operator upgrade with Helm, including running custom resource definition files independently.
23 |
24 | ## Helm Charts
25 |
26 | - **[clickhouse](./charts/clickhouse/)**: All-in-one chart to deploy a ClickHouse cluster (and optionally Keeper and the Altinity Operator)
27 | - **[clickhouse-eks](./charts/clickhouse-eks/)**: An EKS-specific chart for high-availability ClickHouse clusters.
28 |
29 | ## Community
30 |
31 | These charts are a community effort sponsored by Altinity. The best way to reach us or ask questions is:
32 |
33 | * Join the [Altinity Slack](https://altinity.com/slack) - Chat with the developers and other users
34 | * Log an [issue on GitHub](https://github.com/Altinity/helm-charts/issues) - Ask questions, log bugs and feature requests
35 |
36 | ## Contributing
37 | We welcome contributions from the community! If you encounter issues or have improvements to suggest, please log an issue or submit a PR.
38 |
39 | ## Legal
40 | All code, unless specified otherwise, is licensed under the [Apache-2.0](LICENSE) license.
41 | Copyright (c) 2025 Altinity, Inc.
42 | Altinity.Cloud®, and Altinity Stable® are registered trademarks of Altinity, Inc. ClickHouse® is a registered trademark of ClickHouse, Inc.; Altinity is not affiliated with or associated with ClickHouse, Inc. Kubernetes, MySQL, and PostgreSQL are trademarks and property of their respective owners.
43 |
44 | ## Commercial Support
45 |
46 | Altinity is the primary maintainer of the operator. It is the basis of Altinity.Cloud and
47 | is also used in self-managed installations. Altinity offers a range of
48 | services related to ClickHouse and analytic applications on Kubernetes.
49 |
50 | - [Official website](https://altinity.com/) - Get a high level overview of Altinity and our offerings.
51 | - [Altinity.Cloud](https://altinity.com/cloud-database/) - Run ClickHouse in our cloud or yours.
52 | - [Altinity Support](https://altinity.com/support/) - Get Enterprise-class support for ClickHouse.
53 | - [Slack](https://altinity.com/slack) - Talk directly with ClickHouse users and Altinity devs.
54 | - [Contact us](https://hubs.la/Q020sH3Z0) - Contact Altinity with your questions or issues.
55 | - [Free consultation](https://hubs.la/Q020sHkv0) - Get a free consultation with a ClickHouse expert today.
56 |
--------------------------------------------------------------------------------
/charts/clickhouse-eks/templates/chi.yaml:
--------------------------------------------------------------------------------
1 | # ClickHouse using default storage class.
2 | apiVersion: "clickhouse.altinity.com/v1"
3 | kind: "ClickHouseInstallation"
4 | metadata:
5 |
6 | name: "{{ .Values.clickhouse.name }}"
7 | labels:
8 | application_group: "{{ .Values.all.metadata.labels.application_group }}"
9 | spec:
10 | defaults:
11 | templates:
12 | serviceTemplate: "{{ .Values.clickhouse.service_type }}"
13 | configuration:
14 | users:
15 | {{ .Values.clickhouse.user }}/networks/ip: 0.0.0.0/0
16 | {{ .Values.clickhouse.user }}/access_management: 1
17 | {{ .Values.clickhouse.user }}/password:
18 | valueFrom:
19 | secretKeyRef:
20 | name: clickhouse-credentials
21 | key: password
22 | clusters:
23 | - name: "{{ .Values.clickhouse.cluster }}"
24 | layout:
25 | shards:
26 | - replicas:
27 | {{- range .Values.clickhouse.zones }}
28 | - templates:
29 | podTemplate: replica-in-zone-{{ . }}
30 | {{- end }}
31 | templates:
32 | podTemplate: replica
33 | volumeClaimTemplate: storage
34 | zookeeper:
35 | nodes:
36 | - host: "{{ .Values.clickhouse.keeper_name }}"
37 | port: 2181
38 | templates:
39 | podTemplates:
40 | {{- $root := . }}
41 | {{- range $zone := $root.Values.clickhouse.zones }}
42 | - name: replica-in-zone-{{ $zone }}
43 | zone:
44 | values:
45 | - {{ $zone }}
46 | podDistribution:
47 | - type: ClickHouseAntiAffinity
48 | scope: ClickHouseInstallation
49 | spec:
50 | containers:
51 | - name: clickhouse
52 | image: {{ $root.Values.clickhouse.image }}
53 | {{- with $root.Values.clickhouse.extraContainers }}
54 | {{- toYaml . | nindent 12 }}
55 | {{- end }}
56 | nodeSelector:
57 | node.kubernetes.io/instance-type: {{ $root.Values.clickhouse.node_selector }}
58 | tolerations:
59 | - key: "dedicated"
60 | operator: "Equal"
61 | value: "clickhouse"
62 | effect: "NoSchedule"
63 | {{- with $root.Values.clickhouse.extraVolumes }}
64 | volumes:
65 | {{- toYaml . | nindent 12 }}
66 | {{- end }}
67 | {{- end }}
68 | volumeClaimTemplates:
69 | - name: storage
70 | reclaimPolicy: Retain
71 | metadata:
72 | labels:
73 | application_group: "{{ .Values.all.metadata.labels.application_group }}"
74 | spec:
75 | storageClassName: "{{ .Values.clickhouse.storage_class_name }}"
76 | accessModes:
77 | - ReadWriteOnce
78 | resources:
79 | requests:
80 | storage: "{{ .Values.clickhouse.storage }}"
81 | serviceTemplates:
82 | - name: cluster-ip
83 | metadata:
84 | labels:
85 | application_group: "{{ .Values.all.metadata.labels.application_group }}"
86 | spec:
87 | type: ClusterIP
88 | clusterIP: None
89 | ports:
90 | - name: http
91 | port: 8123
92 | targetPort: 8123
93 | - name: tcp
94 | port: 9000
95 | targetPort: 9000
96 |
97 | - name: loadbalancer-internal
98 | metadata:
99 | labels:
100 | application_group: "{{ .Values.all.metadata.labels.application_group }}"
101 | annotations:
102 | cloud.google.com/load-balancer-type: "Internal"
103 | service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
104 | service.beta.kubernetes.io/azure-load-balancer-internal: "true"
105 | service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
106 | service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true"
107 | spec:
108 | type: "LoadBalancer"
109 | ports:
110 | - name: http
111 | port: 8123
112 | targetPort: 8123
113 | - name: tcp
114 | port: 9000
115 | targetPort: 9000
116 |
117 | - name: loadbalancer-external
118 | metadata:
119 | labels:
120 | application_group: "{{ .Values.all.metadata.labels.application_group }}"
121 | spec:
122 | type: "LoadBalancer"
123 | ports:
124 | - name: http
125 | port: 8123
126 | targetPort: 8123
127 | - name: tcp
128 | port: 9000
129 | targetPort: 9000
130 |
--------------------------------------------------------------------------------
/tests/fixtures/06-eks-multi-zone-production.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Production-grade EKS multi-zone deployment with all features
3 | # Tests: Multi-zone (3 AZs), anti-affinity, cluster secrets (secure),
4 | # LoadBalancer, service account, node selectors, tolerations,
5 | # topology spread, users, log volumes, extraConfig, keeper metrics
6 | # Expected pods: 6 ClickHouse (2 shards x 3 replicas across zones) + 3 Keeper = 9 total
7 | # NOTE: REQUIRES EKS CLUSTER with zone labels and gp3 storage class
8 | nameOverride: "production"
9 |
10 | clickhouse:
11 | replicasCount: 3
12 | shardsCount: 2
13 |
14 | # Deploy across three availability zones
15 | zones:
16 | - us-west-2a
17 | - us-west-2b
18 | - us-west-2c
19 |
20 | # Pod anti-affinity at shard scope
21 | antiAffinity: true
22 | antiAffinityScope: "Shard"
23 |
24 | defaultUser:
25 | password: "ProductionPassword123"
26 | allowExternalAccess: false
27 | hostIP: "10.0.0.0/8"
28 |
29 | # Production users
30 | users:
31 | - name: appreadonly
32 | password_sha256_hex: "5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8"
33 | hostIP: "10.0.0.0/8"
34 | accessManagement: 0
35 | grants:
36 | - "GRANT SELECT ON *.*"
37 |
38 | - name: appwriter
39 | password_sha256_hex: "a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3"
40 | hostIP: "10.0.0.0/8"
41 | accessManagement: 0
42 | grants:
43 | - "GRANT SELECT, INSERT ON default.*"
44 |
45 | # Secure cluster communication over SSL
46 | clusterSecret:
47 | enabled: true
48 | auto: true
49 | secure: true
50 |
51 | # Production storage with logs
52 | persistence:
53 | enabled: true
54 | size: 100Gi
55 | storageClass: gp3-encrypted
56 | accessMode: ReadWriteOnce
57 | logs:
58 | enabled: true
59 | size: 20Gi
60 | storageClass: gp3
61 | accessMode: ReadWriteOnce
62 |
63 | service:
64 | type: ClusterIP
65 | serviceAnnotations:
66 | prometheus.io/scrape: "true"
67 | prometheus.io/port: "8001"
68 |
69 | # Internal LoadBalancer for external access
70 | lbService:
71 | enabled: true
72 | loadBalancerSourceRanges:
73 | - "10.0.0.0/8"
74 | serviceAnnotations:
75 | service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
76 | service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
77 |
78 | # IAM role for service account
79 | serviceAccount:
80 | create: true
81 | annotations:
82 | eks.amazonaws.com/role-arn: "arn:aws:iam::123456789012:role/clickhouse-production"
83 |
84 | # EKS node selection
85 | nodeSelector:
86 | node.kubernetes.io/instance-type: "r6i.2xlarge"
87 | workload: database
88 |
89 | # Dedicated node tolerations
90 | tolerations:
91 | - key: "dedicated"
92 | operator: "Equal"
93 | value: "clickhouse"
94 | effect: "NoSchedule"
95 |
96 | # Enforce zone distribution
97 | topologySpreadConstraints:
98 | - maxSkew: 1
99 | topologyKey: topology.kubernetes.io/zone
100 | whenUnsatisfiable: DoNotSchedule
101 | labelSelector:
102 | matchLabels:
103 | app.kubernetes.io/name: clickhouse
104 | - maxSkew: 2
105 | topologyKey: kubernetes.io/hostname
106 | whenUnsatisfiable: ScheduleAnyway
107 | labelSelector:
108 | matchLabels:
109 | app.kubernetes.io/name: clickhouse
110 |
111 | podAnnotations:
112 | prometheus.io/scrape: "true"
113 | prometheus.io/port: "8001"
114 | backup.velero.io/backup-volumes: "data,logs"
115 |
116 | podLabels:
117 | app: clickhouse
118 | tier: database
119 | environment: production
120 |
121 | # Production-optimized configuration
122 | extraConfig: |
123 |
124 | 4096
125 | 200
126 | 0.8
127 | 0
128 |
129 | 16
130 |
131 |
132 | warning
133 |
134 |
135 |
136 | keeper:
137 | enabled: true
138 | replicaCount: 3
139 | zoneSpread: true
140 | localStorage:
141 | size: 20Gi
142 | storageClass: gp3-encrypted
143 | metricsPort: "7000"
144 | settings:
145 | prometheus/endpoint: /metrics
146 | prometheus/port: 7000
147 | prometheus/metrics: true
148 | prometheus/events: true
149 | prometheus/asynchronous_metrics: true
150 | prometheus/status_info: true
151 | nodeSelector:
152 | node.kubernetes.io/instance-type: "m6i.large"
153 | tolerations:
154 | - key: "dedicated"
155 | operator: "Equal"
156 | value: "clickhouse"
157 | effect: "NoSchedule"
158 | podAnnotations:
159 | prometheus.io/port: "7000"
160 | prometheus.io/scrape: "true"
161 | resources:
162 | cpuRequestsMs: 500
163 | memoryRequestsMiB: 2Gi
164 | cpuLimitsMs: 2000
165 | memoryLimitsMiB: 4Gi
166 |
167 | operator:
168 | enabled: true
169 |
170 |
171 |
--------------------------------------------------------------------------------
/charts/clickhouse/templates/chk.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.keeper.enabled }}
2 | {{- $cluster_name := tpl (include "clickhouse.clustername" . ) . -}}
3 | {{- $keeper_host := tpl (include "clickhouse.keeper.host" . ) . -}}
4 | {{- $count := (.Values.keeper.replicaCount | int) -}}
5 | ---
6 | apiVersion: "clickhouse-keeper.altinity.com/v1"
7 | kind: "ClickHouseKeeperInstallation"
8 | metadata:
9 | name: {{ include "clickhouse.fullname" . }}
10 | labels:
11 | {{- include "clickhouse.labels" . | nindent 4 }}
12 | spec:
13 | configuration:
14 | clusters:
15 | - name: "{{ $cluster_name }}"
16 | layout:
17 | replicas:
18 | {{- range $i, $e := until $count }}
19 | - name: "keeper-{{ $i }}"
20 | templates:
21 | podTemplate: "keeper-{{ $i }}"
22 | dataVolumeClaimTemplate: "keeper-{{ $i }}"
23 | replicaServiceTemplate: "keeper-{{ $i }}"
24 | {{- end }}
25 | {{- if not (empty .Values.keeper.settings) }}
26 | settings:
27 | {{- range $k, $v := .Values.keeper.settings }}
28 | {{ $k }}: {{ $v }}
29 | {{- end }}
30 | {{- end }}
31 | defaults:
32 | storageManagement:
33 | provisioner: Operator
34 | reclaimPolicy: Retain
35 | templates:
36 | serviceTemplates:
37 | {{- range $i, $e := until $count }}
38 | - name: "keeper-{{ $i }}"
39 | generateName: "{{ $keeper_host }}-{{ $i }}"
40 | spec:
41 | type: ClusterIP
42 | publishNotReadyAddresses: true
43 | ports:
44 | - name: zk
45 | port: 2181
46 | targetPort: 2181
47 | - name: raft
48 | port: 9444
49 | targetPort: 9444
50 | {{- if not (empty $.Values.keeper.metricsPort) }}
51 | - name: metrics
52 | port: {{ $.Values.keeper.metricsPort }}
53 | targetPort: {{ $.Values.keeper.metricsPort }}
54 | {{- end }}
55 | {{- end }}
56 | volumeClaimTemplates:
57 | {{- range $i, $e := until $count }}
58 | - name: "keeper-{{ $i }}"
59 | {{- with $.Values.keeper.volumeClaimAnnotations }}
60 | metadata:
61 | annotations:
62 | {{- toYaml . | nindent 12 }}
63 | {{- end }}
64 | spec:
65 | accessModes:
66 | - ReadWriteOnce
67 | resources:
68 | requests:
69 | storage: {{ $.Values.keeper.localStorage.size }}
70 | {{- if $.Values.keeper.localStorage.storageClass }}
71 | storageClassName: "{{ $.Values.keeper.localStorage.storageClass }}"
72 | {{- end }}
73 | {{- end }}
74 | podTemplates:
75 | {{- range $i, $e := until $count }}
76 | - name: "keeper-{{ $i }}"
77 | generateName: "{{ $keeper_host }}-{{ $i }}"
78 | {{- with $.Values.keeper.podAnnotations }}
79 | metadata:
80 | annotations:
81 | {{- toYaml . | nindent 12 }}
82 | {{- end }}
83 | spec:
84 | {{- if $.Values.keeper.nodeSelector }}
85 | affinity:
86 | nodeAffinity:
87 | requiredDuringSchedulingIgnoredDuringExecution:
88 | nodeSelectorTerms:
89 | - matchExpressions:
90 | {{- range $k, $v := $.Values.keeper.nodeSelector }}
91 | - key: {{ $k }}
92 | operator: In
93 | values:
94 | - {{ $v | quote }}
95 | {{- end }}
96 | {{- end }}
97 | topologySpreadConstraints:
98 | {{- if $.Values.keeper.zoneSpread }}
99 | - maxSkew: 1
100 | topologyKey: topology.kubernetes.io/zone
101 | whenUnsatisfiable: ScheduleAnyway
102 | labelSelector:
103 | matchLabels:
104 | clickhouse-keeper.altinity.com/cluster: "{{ $cluster_name }}"
105 | {{- end }}
106 | - maxSkew: 1
107 | topologyKey: kubernetes.io/hostname
108 | whenUnsatisfiable: DoNotSchedule
109 | labelSelector:
110 | matchLabels:
111 | clickhouse-keeper.altinity.com/cluster: "{{ $cluster_name }}"
112 | {{- with $.Values.keeper.tolerations }}
113 | tolerations:
114 | {{- toYaml . | nindent 12 }}
115 | {{- end }}
116 | containers:
117 | - name: clickhouse-keeper
118 | image: "{{ $.Values.keeper.image }}:{{ $.Values.keeper.tag }}"
119 | {{- if not (empty $.Values.keeper.metricsPort) }}
120 | ports:
121 | - name: metrics
122 | containerPort: {{ $.Values.keeper.metricsPort }}
123 | {{- end }}
124 | resources:
125 | requests:
126 | cpu: "{{ $.Values.keeper.resources.cpuRequestsMs }}m"
127 | memory: "{{ $.Values.keeper.resources.memoryRequestsMiB }}"
128 | limits:
129 | cpu: "{{ $.Values.keeper.resources.cpuLimitsMs }}m"
130 | memory: "{{ $.Values.keeper.resources.memoryLimitsMiB }}"
131 | {{- end }}
132 | {{- if not (empty .Values.namespaceDomainPattern) }}
133 | namespaceDomainPattern: {{ .Values.namespaceDomainPattern | quote }}
134 | {{- end }}
135 | {{- end }}
136 |
--------------------------------------------------------------------------------
/templates/clickhouse-README.md.gotmpl:
--------------------------------------------------------------------------------
1 | {{ template "chart.header" . }}
2 |
3 | {{ template "chart.badgesSection" . }}
4 |
5 | {{ template "chart.description" . }}
6 |
7 | ## Features
8 |
9 | - Single-node or multi-node ClickHouse clusters
10 | - Sharding and replication
11 | - ClickHouse Keeper integration
12 | - Persistent storage configuration
13 | - Init scripts
14 |
15 | {{ template "chart.requirementsSection" . }}
16 |
17 | ## Installing the Chart
18 |
19 | ```sh
20 | # add the altinity chart repository
21 | helm repo add altinity https://helm.altinity.com
22 |
23 | # use this command to install clickhouse chart (it will also create a `clickhouse` namespace)
24 | helm install release-name altinity/clickhouse --namespace clickhouse --create-namespace
25 | ```
26 |
27 | Note that by default the chart includes the Altinity Operator. For most production use cases you will want to disable this and install the operator explicitly from its own helm chart.
28 |
29 |
30 | ```sh
31 | # add the altinity operator chart repository
32 | helm repo add altinity-operator https://docs.altinity.com/clickhouse-operator
33 |
34 | # create the namespace
35 | kubectl create namespace clickhouse
36 |
37 | # install operator into namespace
38 | helm install clickhouse-operator altinity-docs/altinity-clickhouse-operator \
39 | --namespace clickhouse
40 |
41 | # add the altinity chart repository
42 | helm repo add altinity https://helm.altinity.com
43 |
44 | # install the clickhouse chart without the operator
45 | helm install release-name altinity/clickhouse --namespace clickhouse \
46 | --set operator.enabled=false
47 | ```
48 |
49 | > Yes, we're aware that the domains for the helm repos are a bit odd. We're working on it.
50 |
51 | ## Upgrading the Chart
52 |
53 | ### Upgrading from 0.2.x to 0.3.0
54 |
55 | **IMPORTANT**: Version 0.3.0 introduces a change that improves reconciliation timing by embedding templates directly in the ClickHouseInstallation resource instead of using separate ClickHouseInstallationTemplate resources.
56 |
57 | After upgrading, delete the old ClickHouseInstallationTemplate resources that were created by version 0.2.x:
58 |
59 | ```sh
60 | # List all ClickHouseInstallationTemplate resources
61 | kubectl get clickhouseinstallationtemplates -n clickhouse
62 |
63 | # Delete them (replace with your actual release name)
64 | kubectl delete clickhouseinstallationtemplates -n clickhouse \
65 | -clickhouse-pod \
66 | -clickhouse-service \
67 | -clickhouse-service-lb \
68 | -clickhouse-data \
69 | -clickhouse-logs
70 | ```
71 |
72 | The ClickHouseInstallation will be updated automatically with embedded templates, resulting in faster reconciliation.
73 |
74 | ### Standard Upgrade Process
75 | ```sh
76 | # get latest repository versions
77 | helm repo update
78 |
79 | # upgrade to a newer version using the release name (`clickhouse`)
80 | helm upgrade clickhouse altinity/clickhouse --namespace clickhouse
81 | ```
82 |
83 |
84 | ## Uninstalling the Chart
85 |
86 | ```sh
87 | # uninstall using the release name (`clickhouse`)
88 | helm uninstall clickhouse --namespace clickhouse
89 | ```
90 |
91 | **Note:** If you installed the Altinity Operator with this chart, your ClickHouse Installations will hang because the Operator will be deleted before their finalizers complete. To resolve this you must manually edit each `chi` resource and remove the finalizer.
92 |
93 | PVCs created by this helm chart will not be automatically deleted and must be deleted manually. An easy way to do this is to delete the namespace:
94 |
95 | ```sh
96 | kubectl delete namespace clickhouse
97 | ```
98 |
99 | > This command removes all the Kubernetes components associated with the chart and deletes the release.
100 |
101 | ## Connecting to your ClickHouse Cluster
102 |
103 | ```sh
104 | # list your pods
105 | kubectl get pods --namespace clickhouse
106 |
107 | # pick any of your available pods and connect through the clickhouse-client
108 | kubectl exec -it chi-clickhouse-0-0-0 --namespace clickhouse -- clickhouse-client
109 | ```
110 |
111 | > Use `kubectl port forward` to access your ClickHouse cluster from outside: `kubectl port-forward service clickhouse-eks 9000:9000 & clickhouse-client`
112 |
113 | {{ template "chart.homepageLine" . }}
114 |
115 | {{ template "chart.maintainersSection" . }}
116 |
117 | {{ template "chart.sourcesSection" . }}
118 |
119 | ## Using Init Scripts with ConfigMap
120 |
121 | The chart allows mounting a ConfigMap containing initialization scripts that will be executed during the ClickHouse container startup.
122 |
123 | ### How to use:
124 |
125 | 1. Create a ConfigMap containing your initialization scripts:
126 |
127 | ```bash
128 | kubectl create configmap my-init-scripts --from-file=01_create_database.sh --from-file=02_create_tables.sh
129 | ```
130 |
131 | 2. Enable the initScripts feature in your Helm values:
132 |
133 | ```yaml
134 | clickhouse:
135 | initScripts:
136 | enabled: true
137 | configMapName: my-init-scripts
138 | alwaysRun: true # Set to true to always run scripts on container restart
139 | ```
140 |
141 | The scripts will be mounted at `/docker-entrypoint-initdb.d/` in the ClickHouse container and executed in alphabetical order during startup.
142 |
143 | ### Example Script Format
144 |
145 | ```bash
146 | #!/bin/bash
147 | set -e
148 | clickhouse client -n <<-EOSQL
149 | CREATE DATABASE IF NOT EXISTS my_database;
150 | CREATE TABLE IF NOT EXISTS my_database.my_table (
151 | id UInt64,
152 | data String
153 | ) ENGINE = MergeTree()
154 | ORDER BY id;
155 | EOSQL
156 | ```
157 |
158 | {{ template "chart.valuesSection" . }}
159 |
--------------------------------------------------------------------------------
/tests/scenarios/smoke.py:
--------------------------------------------------------------------------------
1 | from testflows.core import *
2 |
3 | import os
4 | import tests.steps.kubernetes as kubernetes
5 | import tests.steps.minikube as minikube
6 | import tests.steps.helm as helm
7 | import tests.steps.clickhouse as clickhouse
8 | from tests.steps.deployment import HelmState
9 |
10 |
11 | FIXTURES = [
12 | "fixtures/01-minimal-single-node.yaml",
13 | "fixtures/02-replicated-with-users.yaml",
14 | # "fixtures/03-sharded-advanced.yaml",
15 | # "fixtures/04-external-keeper.yaml",
16 | # "fixtures/05-persistence-disabled.yaml",
17 | ]
18 |
19 | UPGRADE_SCENARIOS = [
20 | ("fixtures/upgrade/initial.yaml", "fixtures/upgrade/upgrade.yaml"),
21 | ]
22 |
23 |
24 | @TestScenario
25 | def check_deployment(self, fixture_file, skip_external_keeper=True):
26 | """Test a single ClickHouse deployment configuration.
27 |
28 | Args:
29 | fixture_file: Path to the fixture YAML file
30 | skip_external_keeper: Skip if fixture requires external keeper
31 | """
32 | fixture_name = os.path.basename(fixture_file).replace(".yaml", "")
33 | # Keep release name and namespace under 11 chars to avoid Kubernetes naming issues
34 | short_name = f"t{fixture_name[:9]}"
35 | release_name = short_name
36 | namespace = short_name
37 |
38 | with Given("paths to fixture file"):
39 | tests_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
40 | values_path = os.path.join(tests_dir, fixture_file)
41 |
42 | with And("load fixture configuration"):
43 | state = HelmState(values_path)
44 | note(f"Testing fixture: {fixture_file}")
45 | note(f"Expected pods: {state.get_expected_pod_count()}")
46 |
47 | if skip_external_keeper and "external-keeper" in fixture_name:
48 | skip("Skipping external keeper test (requires pre-existing keeper)")
49 | return
50 |
51 | with When("install ClickHouse with fixture configuration"):
52 | kubernetes.use_context(context_name="minikube")
53 | helm.install(
54 | namespace=namespace, release_name=release_name, values_file=fixture_file
55 | )
56 |
57 | with Then("verify deployment state"):
58 | state.verify_all(namespace=namespace)
59 |
60 | # Add Keeper HA test for replicated deployments with 3+ keepers
61 | if "replicated" in fixture_name:
62 | with And("test Keeper high availability (chaos test)"):
63 | admin_password = state.clickhouse_config.get("defaultUser", {}).get(
64 | "password", ""
65 | )
66 | clickhouse.test_keeper_high_availability(
67 | namespace=namespace, admin_password=admin_password
68 | )
69 |
70 | # Verify metrics endpoint is accessible
71 | with And("verify metrics endpoint"):
72 | clickhouse.verify_metrics_endpoint(namespace=namespace)
73 |
74 | with Finally("cleanup deployment"):
75 | helm.uninstall(namespace=namespace, release_name=release_name)
76 | kubernetes.delete_namespace(namespace=namespace)
77 |
78 |
79 | @TestScenario
80 | def check_upgrade(self, initial_fixture, upgrade_fixture):
81 | """Test ClickHouse Operator upgrade process.
82 |
83 | Args:
84 | initial_fixture: Path to initial configuration YAML
85 | upgrade_fixture: Path to upgraded configuration YAML
86 | """
87 | release_name = f"upgrade"
88 | namespace = f"upgrade"
89 |
90 | with Given("paths to fixture files"):
91 | tests_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
92 | initial_values_path = os.path.join(tests_dir, initial_fixture)
93 | upgrade_values_path = os.path.join(tests_dir, upgrade_fixture)
94 |
95 | with And("define Helm states for initial and upgraded configurations"):
96 | initial_state = HelmState(initial_values_path)
97 | upgrade_state = HelmState(upgrade_values_path)
98 | note(f"Initial pods: {initial_state.get_expected_pod_count()}")
99 | note(f"Upgraded pods: {upgrade_state.get_expected_pod_count()}")
100 |
101 | with When("install ClickHouse with initial configuration"):
102 | kubernetes.use_context(context_name="minikube")
103 | helm.install(
104 | namespace=namespace, release_name=release_name, values_file=initial_fixture
105 | )
106 |
107 | with Then("verify initial deployment state"):
108 | initial_state.verify_all(namespace=namespace)
109 |
110 | # Only test data survival if nameOverride stays the same (in-place upgrade)
111 | initial_name = initial_state.values.get("nameOverride", "")
112 | upgrade_name = upgrade_state.values.get("nameOverride", "")
113 | is_inplace_upgrade = initial_name == upgrade_name
114 |
115 | if is_inplace_upgrade:
116 | with And("create test data for upgrade survival verification"):
117 | admin_password = initial_state.clickhouse_config.get("defaultUser", {}).get(
118 | "password", ""
119 | )
120 | clickhouse.create_test_data(
121 | namespace=namespace,
122 | admin_password=admin_password,
123 | table_name="pre_upgrade_data",
124 | test_value=f"upgrade_survival_{namespace}",
125 | )
126 | else:
127 | note(
128 | f"Skipping data survival test: nameOverride changed from '{initial_name}' to '{upgrade_name}' (cluster replacement scenario)"
129 | )
130 |
131 | with When("upgrade ClickHouse to new configuration"):
132 | helm.upgrade(
133 | namespace=namespace, release_name=release_name, values_file=upgrade_fixture
134 | )
135 |
136 | with Then("verify upgraded deployment state"):
137 | upgrade_state.verify_all(namespace=namespace)
138 |
139 | if is_inplace_upgrade:
140 | with And("verify data survived the upgrade"):
141 | admin_password = upgrade_state.clickhouse_config.get("defaultUser", {}).get(
142 | "password", ""
143 | )
144 | clickhouse.verify_data_survival(
145 | namespace=namespace,
146 | admin_password=admin_password,
147 | table_name="pre_upgrade_data",
148 | expected_value=f"upgrade_survival_{namespace}",
149 | )
150 | else:
151 | note(f"Data survival verification skipped for cluster replacement scenario")
152 |
153 | with And("verify metrics endpoint"):
154 | clickhouse.verify_metrics_endpoint(namespace=namespace)
155 |
156 | with Finally("cleanup deployment"):
157 | helm.uninstall(namespace=namespace, release_name=release_name)
158 | kubernetes.delete_namespace(namespace=namespace)
159 |
160 |
161 | @TestFeature
162 | def check_all_fixtures(self):
163 | """Test all fixture configurations."""
164 |
165 | for fixture in FIXTURES:
166 | Scenario(
167 | test=check_deployment,
168 | name=f"deploy_{os.path.basename(fixture).replace('.yaml', '')}",
169 | )(fixture_file=fixture, skip_external_keeper=True)
170 |
171 |
172 | @TestFeature
173 | def check_all_upgrades(self):
174 | """Test all upgrade scenarios."""
175 |
176 | for initial, upgraded in UPGRADE_SCENARIOS:
177 | scenario_name = f"{os.path.basename(initial).replace('.yaml', '')}_to_{os.path.basename(upgraded).replace('.yaml', '')}"
178 | Scenario(
179 | test=check_upgrade,
180 | name=f"upgrade_{scenario_name}",
181 | )(initial_fixture=initial, upgrade_fixture=upgraded)
182 |
183 |
184 | @TestFeature
185 | @Name("comprehensive")
186 | def feature(self):
187 | """Run all comprehensive smoke tests."""
188 |
189 | with Given("minikube environment"):
190 | minikube.setup_minikube_environment()
191 | kubernetes.use_context(context_name="minikube")
192 |
193 | Feature(run=check_all_fixtures)
194 |
195 | Feature(run=check_all_upgrades)
196 |
--------------------------------------------------------------------------------
/charts/clickhouse/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # @ignore
3 | nameOverride: ""
4 | # @ignore
5 | fullnameOverride: ""
6 |
7 | # -- Custom domain pattern used for DNS names of `Service` and `Pod` resources.
8 | # Typically defined by the custom cluster domain of the Kubernetes cluster.
9 | # The pattern follows the `%s` C-style printf format, e.g. '%s.svc.my.test'.
10 | # If not specified, the default namespace domain suffix is `.svc.cluster.local`.
11 | namespaceDomainPattern: ""
12 |
13 | # Configure ClickHouse Installation and Pod Template
14 | clickhouse:
15 | defaultUser:
16 | password: ""
17 | # -- Name of an existing Kubernetes secret containing the default user password.
18 | # If set, the password will be read from the secret instead of using the password field.
19 | # The secret should contain a key named 'password'.
20 | password_secret_name: ""
21 | # -- Allow the default user to access ClickHouse from any IP.
22 | # If set, will override `hostIP` to always be `0.0.0.0/0`.
23 | allowExternalAccess: false
24 | # defaultUser.hostIP -- (string) Set a mask for IPs allowed for the
25 | # default user. Should not be set if allowExternalAccess is set.
26 | # @default -- 127.0.0.1/32
27 | hostIP: 127.0.0.1/32
28 |
29 | # -- Configure additional ClickHouse users.
30 | users: []
31 | # users:
32 | # - name: app_user
33 | # hostIP: ["10.0.0.0/8"] # default: "0.0.0.0/0"
34 | # accessManagement: 1 # default: 0
35 | # password_secret_name: "app-user-secret" # secret must contain 'password' key
36 | # grants:
37 | # - "GRANT SELECT ON database.*"
38 |
39 | # -- number of replicas. If greater than 1, keeper must be enabled
40 | # or a keeper host should be provided under clickhouse.keeper.host.
41 | # Will be ignored if `zones` is set.
42 | replicasCount: 1
43 |
44 | # -- number of shards.
45 | shardsCount: 1
46 |
47 | # -- Cluster secret configuration for secure inter-node communication
48 | clusterSecret:
49 | # -- Whether to enable secret-based cluster communication
50 | enabled: false
51 | # -- Whether to secure this behind the SSL port
52 | secure: false
53 | # -- Auto-generate cluster secret (recommended for security)
54 | auto: true
55 | # -- Plaintext cluster secret value (not recommended for production)
56 | value: ""
57 | # -- Reference to an existing Kubernetes secret containing the cluster secret
58 | valueFrom:
59 | secretKeyRef:
60 | # -- Name of the secret containing the cluster secret
61 | name: ""
62 | # -- Key in the secret that contains the cluster secret value
63 | key: "secret"
64 |
65 | # Specify which zones to run in.
66 | # `replicaCount` will be applied within each zone.
67 | zones: []
68 |
69 | # If enabled, will prevent ClickHouse pods from running on the same node
70 | antiAffinity: false
71 |
72 | # -- Scope for anti-affinity policy when antiAffinity is enabled.
73 | # Determines the level at which pod distribution is enforced.
74 | # Available scopes:
75 | # - ClickHouseInstallation: Pods from the same installation won't run on the same node (default)
76 | # - Shard: Pods from the same shard won't run on the same node
77 | # - Replica: Pods from the same replica won't run on the same node
78 | # - Cluster: Pods from the same cluster won't run on the same node
79 | # - Namespace: Pods from the same namespace won't run on the same node
80 | # @default -- ClickHouseInstallation
81 | antiAffinityScope: "ClickHouseInstallation"
82 |
83 | # -- Keeper connection settings for ClickHouse instances.
84 | keeper:
85 | # -- Specify a keeper host.
86 | # Should be left empty if `clickhouse-keeper.enabled` is `true`.
87 | # Will override the defaults set from `clickhouse-keeper.enabled`.
88 | host: ""
89 | # -- Override the default keeper port
90 | port: 2181
91 |
92 | persistence:
93 | # -- enable storage
94 | enabled: true
95 | # -- volume size (per replica)
96 | size: 10Gi
97 | accessMode: ReadWriteOnce
98 | ## -- StorageClass for PV (e.g. gp2, standard, ...)
99 | storageClass: ""
100 | logs:
101 | # -- enable pvc for logs
102 | enabled: false
103 | # -- size for logs pvc
104 | size: 10Gi
105 | accessMode: ReadWriteOnce
106 |
107 | image:
108 | repository: altinity/clickhouse-server
109 | pullPolicy: IfNotPresent
110 | # -- Override the image tag for a specific version
111 | tag: "25.3.6.10034.altinitystable"
112 |
113 | service:
114 | # e.g. `LoadBalancer`, `NodePort`
115 | type: ClusterIP
116 | serviceAnnotations: {}
117 | serviceLabels: {}
118 |
119 | lbService:
120 | enabled: false
121 | # -- Specify source IP ranges to the LoadBalancer service.
122 | # If supported by the platform, this will restrict traffic through the cloud-provider load-balancer
123 | # to the specified client IPs. This is ignored if the cloud-provider does not support the feature.
124 | loadBalancerSourceRanges: []
125 | serviceAnnotations: {}
126 | serviceLabels: {}
127 |
128 | # @ignore
129 | imagePullSecrets: []
130 |
131 | serviceAccount:
132 | # -- Specifies whether a service account should be created
133 | create: false
134 | # -- Annotations to add to the service account
135 | annotations: {}
136 | # -- The name of the service account to use.
137 | # If not set and create is true, a name is generated using the fullname template
138 | name: ""
139 |
140 | podAnnotations: {}
141 | podLabels: {}
142 |
143 | # @ignore
144 | podSecurityContext: {}
145 | # runAsUser: 101
146 | # runAsGroup: 101
147 |
148 | # @ignore
149 | securityContext: {}
150 |
151 | # @ignore
152 | nodeSelector: {}
153 |
154 | # @ignore
155 | tolerations: []
156 |
157 | # @ignore
158 | affinity: {}
159 |
160 | # @ignore
161 | topologySpreadConstraints: []
162 |
163 |
164 | # -- Additional ports to expose in the ClickHouse container
165 | # Example:
166 | # extraPorts:
167 | # - name: custom-port
168 | # containerPort: 8080
169 | extraPorts: []
170 |
171 | # -- Miscellanous config for ClickHouse (in xml format)
172 | extraConfig: |
173 |
174 |
175 | # -- Additional users config for ClickHouse (in xml format)
176 | extraUsers: |
177 |
178 |
179 |
180 | # -- Extra containers for clickhouse pods
181 | extraContainers: []
182 |
183 | # -- Extra volumes for clickhouse pods
184 | extraVolumes: []
185 |
186 | # -- Init scripts ConfigMap configuration
187 | initScripts:
188 | # -- Set to true to enable init scripts feature
189 | enabled: false
190 | # -- Name of an existing ConfigMap containing init scripts
191 | # The scripts will be mounted at /docker-entrypoint-initdb.d/
192 | configMapName: ""
193 | # -- Set to true to always run init scripts on container startup
194 | alwaysRun: true
195 |
196 | # CHK parameters
197 | keeper:
198 | # -- Whether to enable Keeper.
199 | # Required for replicated tables.
200 | enabled: false
201 | # -- Number of keeper replicas.
202 | # Must be an odd number.
203 | # !! DO NOT CHANGE AFTER INITIAL DEPLOYMENT
204 | replicaCount: 3
205 | image: "altinity/clickhouse-keeper"
206 | tag: "25.3.6.10034.altinitystable"
207 | # allows to configure multiple aspects and behavior for `clickhouse-keeper`
208 | # instance
209 | settings: {}
210 | localStorage:
211 | size: 5Gi
212 | storageClass: ""
213 | nodeSelector: {}
214 | tolerations: []
215 | podAnnotations: {}
216 | volumeClaimAnnotations: {}
217 | # topologySpreadConstraints over `zone`, by deafult there is only
218 | # podAntiAffinity by hostname
219 | zoneSpread: false
220 | metricsPort: ""
221 | resources:
222 | cpuRequestsMs: 100
223 | memoryRequestsMiB: 512Mi
224 | cpuLimitsMs: 500
225 | memoryLimitsMiB: 1Gi
226 |
227 | operator:
228 | # -- Whether to enable the Altinity Operator for ClickHouse.
229 | # Disable if you already have the Operator installed cluster-wide.
230 | enabled: true
231 |
232 |
--------------------------------------------------------------------------------
/charts/clickhouse/templates/chi.yaml:
--------------------------------------------------------------------------------
1 | {{- $service_name := tpl (include "clickhouse.serviceTemplateName" . ) . -}}
2 | ---
3 | apiVersion: "clickhouse.altinity.com/v1"
4 | kind: ClickHouseInstallation
5 | metadata:
6 | name: {{ include "clickhouse.fullname" . }}
7 | labels:
8 | {{- include "clickhouse.labels" . | nindent 4 }}
9 | spec:
10 | defaults:
11 | templates:
12 | serviceTemplate: {{ $service_name }}
13 | {{- if .Values.clickhouse.lbService.enabled }}
14 | clusterServiceTemplate: {{ $service_name }}-lb
15 | {{- end }}
16 | podTemplate: {{ include "clickhouse.podTemplateName" . }}
17 | {{- if .Values.clickhouse.persistence.enabled }}
18 | dataVolumeClaimTemplate: {{ include "clickhouse.volumeClaimTemplateName" . }}
19 | {{- end }}
20 | {{- if .Values.clickhouse.persistence.logs.enabled }}
21 | logVolumeClaimTemplate: {{ include "clickhouse.logsVolumeClaimTemplateName" . }}
22 | {{- end }}
23 | templates:
24 | podTemplates:
25 | - name: {{ include "clickhouse.podTemplateName" . }}
26 | {{ include "clickhouse.podTemplateBase" . }}
27 | {{- if not (empty .Values.clickhouse.zones) -}}
28 | {{- $originalContext := . -}}
29 | {{- range .Values.clickhouse.zones }}
30 | - name: {{ include "clickhouse.podTemplateName" $originalContext }}-{{ . }}
31 | {{ include "clickhouse.podTemplateBase" $originalContext }}
32 | zone:
33 | values:
34 | - {{ . }}
35 | {{- end }}
36 | {{- end }}
37 | serviceTemplates:
38 | - name: "{{ $service_name }}"
39 | metadata:
40 | {{- with .Values.clickhouse.service.serviceAnnotations }}
41 | annotations:
42 | {{- toYaml . | nindent 12 }}
43 | {{- end }}
44 | labels:
45 | {{- include "clickhouse.labels" . | nindent 12 }}
46 | {{- with .Values.clickhouse.service.serviceLabels }}
47 | {{- toYaml . | nindent 12 }}
48 | {{- end }}
49 | spec:
50 | type: {{ .Values.clickhouse.service.type }}
51 | ports:
52 | - name: http
53 | port: 8123
54 | targetPort: 8123
55 | - name: tcp
56 | port: 9000
57 | targetPort: 9000
58 | selector:
59 | {{- include "clickhouse.selectorLabels" . | nindent 12 }}
60 | {{- if .Values.clickhouse.lbService.enabled }}
61 | - name: "{{ $service_name }}-lb"
62 | metadata:
63 | labels:
64 | {{- include "clickhouse.labels" . | nindent 12 }}
65 | {{- with .Values.clickhouse.lbService.serviceLabels }}
66 | {{- toYaml . | nindent 12 }}
67 | {{- end }}
68 | {{- with .Values.clickhouse.lbService.serviceAnnotations }}
69 | annotations:
70 | {{- toYaml . | nindent 12 }}
71 | {{- end }}
72 | spec:
73 | type: "LoadBalancer"
74 | {{- if .Values.clickhouse.lbService.loadBalancerSourceRanges }}
75 | loadBalancerSourceRanges:
76 | {{- toYaml .Values.clickhouse.lbService.loadBalancerSourceRanges | nindent 12 }}
77 | {{- end }}
78 | ports:
79 | - name: http
80 | port: 8123
81 | targetPort: 8123
82 | - name: tcp
83 | port: 9000
84 | targetPort: 9000
85 | selector:
86 | {{- include "clickhouse.selectorLabels" . | nindent 12 }}
87 | {{- end }}
88 | {{- if or .Values.clickhouse.persistence.enabled .Values.clickhouse.persistence.logs.enabled }}
89 | volumeClaimTemplates:
90 | {{- if .Values.clickhouse.persistence.enabled }}
91 | - name: {{ include "clickhouse.volumeClaimTemplateName" . }}
92 | reclaimPolicy: Retain
93 | spec:
94 | {{- with .Values.clickhouse.persistence.accessMode }}
95 | accessModes:
96 | - {{ . }}
97 | {{- end }}
98 | {{- with .Values.clickhouse.persistence.storageClass }}
99 | storageClassName: {{ . }}
100 | {{- end }}
101 | resources:
102 | requests:
103 | storage: {{ .Values.clickhouse.persistence.size }}
104 | {{- end }}
105 | {{- if .Values.clickhouse.persistence.logs.enabled }}
106 | - name: {{ include "clickhouse.logsVolumeClaimTemplateName" . }}
107 | reclaimPolicy: Retain
108 | spec:
109 | {{- with .Values.clickhouse.persistence.logs.accessMode }}
110 | accessModes:
111 | - {{ . }}
112 | {{- end }}
113 | {{- with .Values.clickhouse.persistence.logs.storageClass }}
114 | storageClassName: {{ . }}
115 | {{- end }}
116 | resources:
117 | requests:
118 | storage: {{ .Values.clickhouse.persistence.logs.size }}
119 | {{- end }}
120 | {{- end }}
121 | configuration:
122 | users:
123 | default/networks/ip: {{ include "clickhouse.defaultUser.ip" . | quote }}
124 | default/access_management: 1
125 | default/named_collection_control: 1
126 | default/show_named_collections: 1
127 | default/show_named_collections_secrets: 1
128 | default/password:
129 | valueFrom:
130 | secretKeyRef:
131 | name: {{ .Values.clickhouse.defaultUser.password_secret_name | default (include "clickhouse.credentialsName" .) | quote }}
132 | key: password
133 | {{- range .Values.clickhouse.users }}
134 | {{ required "A user must have a name" .name }}/access_management: {{ .accessManagement | default 0}}
135 | {{- if kindIs "slice" .hostIP }}
136 | {{ .name }}/networks/ip:
137 | {{- range .hostIP }}
138 | - {{ . | quote }}
139 | {{- end }}
140 | {{- else }}
141 | {{ .name }}/networks/ip: {{ .hostIP | default "0.0.0.0/0" | quote }}
142 | {{- end }}
143 | {{- if .grants }}
144 | {{ .name }}/grants/query:
145 | {{- range .grants }}
146 | - {{ . | quote}}
147 | {{- end }}
148 | {{- end }}
149 | {{- if .password_secret_name }}
150 | {{ .name }}/password:
151 | valueFrom:
152 | secretKeyRef:
153 | name: {{ .password_secret_name | quote }}
154 | key: password
155 | {{- else if .password_sha256_hex }}
156 | {{ .name }}/password_sha256_hex: {{ .password_sha256_hex | quote }}
157 | {{- end }}
158 | {{- end }}
159 | clusters:
160 | - name: {{ include "clickhouse.clustername" . }}
161 | {{- if .Values.clickhouse.clusterSecret.enabled }}
162 | {{- if .Values.clickhouse.clusterSecret.secure }}
163 | secure: "yes"
164 | {{- end }}
165 | secret:
166 | {{- if .Values.clickhouse.clusterSecret.auto }}
167 | auto: "true"
168 | {{- else if .Values.clickhouse.clusterSecret.value }}
169 | value: {{ .Values.clickhouse.clusterSecret.value | quote }}
170 | {{- else if .Values.clickhouse.clusterSecret.valueFrom.secretKeyRef.name }}
171 | valueFrom:
172 | secretKeyRef:
173 | name: {{ .Values.clickhouse.clusterSecret.valueFrom.secretKeyRef.name | quote }}
174 | key: {{ .Values.clickhouse.clusterSecret.valueFrom.secretKeyRef.key | quote }}
175 | {{- end }}
176 | {{- end }}
177 | layout:
178 | {{- if (empty .Values.clickhouse.zones) }}
179 | shardsCount: {{ .Values.clickhouse.shardsCount | default 1 }}
180 | replicasCount: {{ .Values.clickhouse.replicasCount | default 1 }}
181 | {{- else }}
182 | shards:
183 | {{- $originalContext := . -}}
184 | {{- $shardsCount := .Values.clickhouse.shardsCount | default 1 | int -}}
185 | {{- range $shardIndex := until $shardsCount }}
186 | - name: shard{{ $shardIndex }}
187 | replicas:
188 | {{- range $zone := $originalContext.Values.clickhouse.zones }}
189 | - templates:
190 | podTemplate: {{ include "clickhouse.podTemplateName" $originalContext }}-{{ $zone }}
191 | {{- end -}}
192 | {{- end -}}
193 | {{- end -}}
194 | {{- $keeper_host := tpl (include "clickhouse.keeper.host" . ) . -}}
195 | {{- if not (empty $keeper_host) }}
196 | zookeeper:
197 | nodes:
198 | - host: {{ $keeper_host }}
199 | port: {{ .Values.clickhouse.keeper.port }}
200 | {{- end }}
201 | {{- $extraConfig := tpl (include "clickhouse.extraConfig" . ) . -}}
202 | {{- $extraUsers := tpl (include "clickhouse.extraUsers" . ) . -}}
203 | {{- if not (and (empty $extraConfig) (empty $extraUsers)) }}
204 | files:
205 | {{- if not (empty $extraConfig) }}
206 | config.d/extra_config.xml: |
207 | {{- tpl $extraConfig . | nindent 10 }}
208 | {{- end }}
209 | {{- if not (empty $extraUsers) }}
210 | users.d/extra_users.xml: |
211 | {{- tpl $extraUsers . | nindent 10 }}
212 | {{- end }}
213 | {{- end }}
214 | {{- if not (empty .Values.namespaceDomainPattern) }}
215 | namespaceDomainPattern: {{ .Values.namespaceDomainPattern | quote }}
216 | {{- end }}
217 |
218 | {{ include "validate.clickhouse.keeper" . }}
219 |
--------------------------------------------------------------------------------
/charts/clickhouse/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Validations
3 | */}}
4 | {{- define "validate.clickhouse.keeper" -}}
5 | {{- if and (or (gt (.Values.clickhouse.replicasCount | int) 1) (not (empty .Values.clickhouse.zones))) (not (or .Values.keeper.enabled .Values.clickhouse.keeper.host)) }}
6 | {{- fail "When 'clickhouse.replicasCount' > 1, either 'keeper.enabled' must be true or 'clickhouse.keeper.host' must be set." }}
7 | {{- end -}}
8 | {{- end -}}
9 |
10 | {{/*
11 | Expand the name of the chart.
12 | */}}
13 | {{- define "clickhouse.name" -}}
14 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
15 | {{- end }}
16 |
17 | {{/*
18 | Create a default fully qualified app name.
19 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
20 | If release name contains chart name it will be used as a full name.
21 | */}}
22 | {{- define "clickhouse.fullname" -}}
23 | {{- if .Values.fullnameOverride }}
24 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
25 | {{- else }}
26 | {{- $name := default .Chart.Name .Values.nameOverride }}
27 | {{- if contains $name .Release.Name }}
28 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
29 | {{- else }}
30 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 | {{- end }}
33 | {{- end }}
34 |
35 | {{- define "clickhouse.version" -}}
36 | {{ .Values.clickhouse.image.repository }}:{{ .Values.clickhouse.image.tag | default .Chart.AppVersion }}
37 | {{- end }}
38 |
39 | {{/*
40 | Create chart name and version as used by the chart label.
41 | */}}
42 | {{- define "clickhouse.chart" -}}
43 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
44 | {{- end }}
45 |
46 | {{/*
47 | Cluster Name
48 | */}}
49 | {{- define "clickhouse.clustername" -}}
50 | {{- printf "%s" .Release.Name | replace "+" "_" | trunc 15 | trimSuffix "-" }}
51 | {{- end }}
52 |
53 | {{/*
54 | Pod Distribution
55 | */}}
56 | {{- define "clickhouse.podDistribution" -}}
57 | {{- if .Values.clickhouse.antiAffinity -}}
58 | - type: ClickHouseAntiAffinity
59 | scope: {{ .Values.clickhouse.antiAffinityScope | default "ClickHouseInstallation" }}
60 | {{- else -}}
61 | []
62 | {{- end }}
63 | {{- end }}
64 |
65 | {{/*
66 | Pod Template Base
67 | */}}
68 | {{- define "clickhouse.podTemplateBase" }}
69 | metadata:
70 | {{- with .Values.clickhouse.podAnnotations }}
71 | annotations:
72 | {{- toYaml . | nindent 12 }}
73 | {{- end }}
74 | labels:
75 | {{- include "clickhouse.labels" . | nindent 12 }}
76 | {{- with .Values.clickhouse.podLabels }}
77 | {{- toYaml . | nindent 12 }}
78 | {{- end }}
79 | podDistribution:
80 | {{- include "clickhouse.podDistribution" . | nindent 10 }}
81 | spec:
82 | {{- with .Values.clickhouse.imagePullSecrets }}
83 | imagePullSecrets:
84 | {{- toYaml . | nindent 12 }}
85 | {{- end }}
86 | {{- if or .Values.clickhouse.serviceAccount.create .Values.clickhouse.serviceAccount.name }}
87 | serviceAccountName: {{ include "clickhouse.serviceAccountName" . }}
88 | {{- end }}
89 | securityContext:
90 | {{- toYaml .Values.clickhouse.podSecurityContext | nindent 12 }}
91 | containers:
92 | - name: {{ .Chart.Name }}
93 | securityContext:
94 | {{- toYaml .Values.clickhouse.securityContext | nindent 16 }}
95 | image: "{{ .Values.clickhouse.image.repository }}:{{ .Values.clickhouse.image.tag | default .Chart.AppVersion }}"
96 | imagePullPolicy: {{ .Values.clickhouse.image.pullPolicy }}
97 | ports:
98 | - name: http
99 | containerPort: 8123
100 | - name: client
101 | containerPort: 9000
102 | - name: interserver
103 | containerPort: 9009
104 | {{- if .Values.clickhouse.extraPorts }}
105 | {{- toYaml .Values.clickhouse.extraPorts | nindent 16 }}
106 | {{- end }}
107 | {{- with .Values.clickhouse.livenessProbe }}
108 | livenessProbe:
109 | {{- toYaml . | nindent 16 }}
110 | {{- end }}
111 | {{- if .Values.clickhouse.initScripts.enabled }}
112 | env:
113 | {{- if .Values.clickhouse.initScripts.alwaysRun }}
114 | - name: CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS
115 | value: "true"
116 | {{- end }}
117 | volumeMounts:
118 | - name: init-scripts-configmap
119 | mountPath: /docker-entrypoint-initdb.d
120 | {{- end }}
121 | resources:
122 | {{- toYaml .Values.clickhouse.resources | nindent 16 }}
123 | {{- with .Values.clickhouse.extraContainers }}
124 | {{- toYaml . | nindent 12 }}
125 | {{- end }}
126 | {{- if or .Values.clickhouse.initScripts.enabled .Values.clickhouse.extraVolumes }}
127 | volumes:
128 | {{- if .Values.clickhouse.initScripts.enabled }}
129 | - name: init-scripts-configmap
130 | configMap:
131 | name: {{ .Values.clickhouse.initScripts.configMapName }}
132 | {{- end }}
133 | {{- with .Values.clickhouse.extraVolumes }}
134 | {{- toYaml . | nindent 12 }}
135 | {{- end }}
136 | {{- end }}
137 | {{- with .Values.clickhouse.nodeSelector }}
138 | nodeSelector:
139 | {{- toYaml . | nindent 12 }}
140 | {{- end }}
141 | {{- with .Values.clickhouse.affinity }}
142 | affinity:
143 | {{- toYaml . | nindent 12 }}
144 | {{- end }}
145 | {{- with .Values.clickhouse.tolerations }}
146 | tolerations:
147 | {{- toYaml . | nindent 12 }}
148 | {{- end }}
149 | {{- with .Values.clickhouse.topologySpreadConstraints }}
150 | topologySpreadConstraints:
151 | {{- toYaml . | nindent 12 }}
152 | {{- end }}
153 | {{- end -}}
154 |
155 | {{/*
156 | Pod Template Name
157 | */}}
158 | {{- define "clickhouse.podTemplateName" -}}
159 | {{- $podDescString := printf "%s-%s" (include "clickhouse.fullname" .) (include "clickhouse.version" .) }}
160 | {{- $podHash := $podDescString | sha256sum | trunc 8 }}
161 | {{- printf "%s-pod-%s" (include "clickhouse.fullname" .) $podHash | replace "+" "_" | trunc 63 | trimSuffix "-" }}
162 | {{- end }}
163 |
164 | {{/*
165 | Service Template Name
166 | */}}
167 | {{- define "clickhouse.serviceTemplateName" -}}
168 | {{- printf "%s-service" (include "clickhouse.fullname" .) | replace "+" "_" | trunc 63 | trimSuffix "-" }}
169 | {{- end }}
170 |
171 | {{/*
172 | Data Volume Claim Template Name
173 | */}}
174 | {{- define "clickhouse.volumeClaimTemplateName" -}}
175 | {{- printf "%s-data" (include "clickhouse.fullname" .) | replace "+" "_" | trunc 63 | trimSuffix "-" }}
176 | {{- end }}
177 |
178 | {{/*
179 | Logs Volume Claim Template Name
180 | */}}
181 | {{- define "clickhouse.logsVolumeClaimTemplateName" -}}
182 | {{- printf "%s-logs" (include "clickhouse.fullname" .) | replace "+" "_" | trunc 63 | trimSuffix "-" }}
183 | {{- end }}
184 |
185 | {{/*
186 | User Credentials Name
187 | */}}
188 | {{- define "clickhouse.credentialsName" -}}
189 | {{- $fullname := include "clickhouse.fullname" . | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
190 | {{- printf "%s-credentials" $fullname -}}
191 | {{- end }}
192 |
193 | {{/*
194 | User Host IP
195 | */}}
196 | {{- define "clickhouse.defaultUser.ip" -}}
197 | {{- if .Values.clickhouse.defaultUser.allowExternalAccess -}}
198 | 0.0.0.0/0
199 | {{- else -}}
200 | {{- if .Values.clickhouse.defaultUser.hostIP -}}
201 | {{ .Values.clickhouse.defaultUser.hostIP }}
202 | {{- else -}}
203 | 127.0.0.1/32
204 | {{- end -}}
205 | {{- end -}}
206 | {{- end -}}
207 |
208 | {{/*
209 | Keeper Host
210 | */}}
211 | {{- define "clickhouse.keeper.host" -}}
212 | {{- if not (empty .Values.clickhouse.keeper.host) -}}
213 | {{ .Values.clickhouse.keeper.host }}
214 | {{- else -}}
215 | {{- if .Values.keeper.enabled -}}
216 | {{- printf "keeper-%s" (include "clickhouse.fullname" .) | replace "+" "_" | trunc 63 | trimSuffix "-" }}
217 | {{- else -}}
218 | {{- end -}}
219 | {{- end -}}
220 | {{- end -}}
221 |
222 | {{/*
223 | Extra Config
224 | */}}
225 | {{- define "clickhouse.extraConfig" -}}
226 | {{- if not (empty .Values.clickhouse.extraConfig) -}}
227 | {{ .Values.clickhouse.extraConfig }}
228 | {{- else -}}
229 | {{- end -}}
230 | {{- end -}}
231 | {{/*
232 | Extra Users
233 | */}}
234 | {{- define "clickhouse.extraUsers" -}}
235 | {{- if not (empty .Values.clickhouse.extraUsers) -}}
236 | {{ .Values.clickhouse.extraUsers }}
237 | {{- else -}}
238 | {{- end -}}
239 | {{- end -}}
240 | {{/*
241 | Common labels
242 | */}}
243 | {{- define "clickhouse.labels" -}}
244 | helm.sh/chart: {{ include "clickhouse.chart" . }}
245 | {{ include "clickhouse.selectorLabels" . }}
246 | {{- if .Chart.AppVersion }}
247 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
248 | {{- end }}
249 | app.kubernetes.io/managed-by: {{ .Release.Service }}
250 | {{- end }}
251 |
252 | {{/*
253 | Selector labels
254 | */}}
255 | {{- define "clickhouse.selectorLabels" -}}
256 | app.kubernetes.io/name: {{ include "clickhouse.name" . }}
257 | app.kubernetes.io/instance: {{ .Release.Name }}
258 | {{- end }}
259 |
260 | {{/*
261 | Create the name of the service account to use
262 | */}}
263 | {{- define "clickhouse.serviceAccountName" -}}
264 | {{- if .Values.clickhouse.serviceAccount.create }}
265 | {{- default (include "clickhouse.fullname" .) .Values.clickhouse.serviceAccount.name }}
266 | {{- else }}
267 | {{- default "default" .Values.clickhouse.serviceAccount.name }}
268 | {{- end }}
269 | {{- end }}
270 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2024 Altinity, Inc
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/tests/requirements/helm.md:
--------------------------------------------------------------------------------
1 | # QA-SRS Altinity.Cloud Anywhere Helm Charts
2 |
3 | ---
4 |
5 | # Software Requirements Specification for Helm
6 |
7 | ---
8 |
9 |
10 | ## Table of Contents
11 |
12 | * 1 [Introduction](#introduction)
13 | * 2 [RQ.SRS.Helm](#rqsrshelm)
14 | * 3 [Helm Chart Example](#helm-chart-example)
15 | * 3.1 [RQ.SRS.Helm.Chart.Values](#rqsrshelmchartvalues)
16 | * 4 [Chart Metadata](#chart-metadata)
17 | * 4.1 [Name Override](#name-override)
18 | * 4.1.1 [RQ.SRS.Helm.Metadata.NameOverride](#rqsrshelmmetadatanameoverride)
19 | * 4.2 [Fullname Override](#fullname-override)
20 | * 4.2.1 [RQ.SRS.Helm.Metadata.FullnameOverride](#rqsrshelmmetadatafullnameoverride)
21 | * 4.3 [Namespace Domain Pattern](#namespace-domain-pattern)
22 | * 4.3.1 [RQ.SRS.Helm.Metadata.NamespaceDomainPattern](#rqsrshelmmetadatanamespacedomainpattern)
23 | * 5 [ClickHouse Configuration](#clickhouse-configuration)
24 | * 5.1 [Default User](#default-user)
25 | * 5.1.1 [RQ.SRS.Helm.ClickHouse.DefaultUser](#rqsrshelmclickhousedefaultuser)
26 | * 5.2 [Users](#users)
27 | * 5.2.1 [RQ.SRS.Helm.ClickHouse.Users](#rqsrshelmclickhouseusers)
28 | * 5.3 [Replicas Count](#replicas-count)
29 | * 5.3.1 [RQ.SRS.Helm.ClickHouse.ReplicasCount](#rqsrshelmclickhousereplicascount)
30 | * 5.4 [Shards Count](#shards-count)
31 | * 5.4.1 [RQ.SRS.Helm.ClickHouse.ShardsCount](#rqsrshelmclickhouseshardscount)
32 | * 5.5 [Zones](#zones)
33 | * 5.5.1 [RQ.SRS.Helm.ClickHouse.Zones](#rqsrshelmclickhousezones)
34 | * 5.6 [Anti Affinity](#anti-affinity)
35 | * 5.6.1 [RQ.SRS.Helm.ClickHouse.AntiAffinity](#rqsrshelmclickhouseantiaffinity)
36 | * 5.7 [Keeper](#keeper)
37 | * 5.7.1 [RQ.SRS.Helm.ClickHouse.Keeper](#rqsrshelmclickhousekeeper)
38 | * 5.8 [Persistence](#persistence)
39 | * 5.8.1 [RQ.SRS.Helm.ClickHouse.Persistence](#rqsrshelmclickhousepersistence)
40 | * 5.9 [ClickHouse Image](#clickhouse-image)
41 | * 5.9.1 [RQ.SRS.Helm.ClickHouse.Image](#rqsrshelmclickhouseimage)
42 | * 5.10 [Service](#service)
43 | * 5.10.1 [RQ.SRS.Helm.ClickHouse.Service](#rqsrshelmclickhouseservice)
44 | * 5.11 [Load Balancer Service](#load-balancer-service)
45 | * 5.11.1 [RQ.SRS.Helm.ClickHouse.LbService](#rqsrshelmclickhouselbservice)
46 | * 5.12 [Pod Settings](#pod-settings)
47 | * 5.12.1 [RQ.SRS.Helm.ClickHouse.PodSettings](#rqsrshelmclickhousepodsettings)
48 | * 5.13 [Extra Config](#extra-config)
49 | * 5.13.1 [RQ.SRS.Helm.ClickHouse.ExtraConfig](#rqsrshelmclickhouseextraconfig)
50 | * 5.14 [Init Scripts](#init-scripts)
51 | * 5.14.1 [RQ.SRS.Helm.ClickHouse.InitScripts](#rqsrshelmclickhouseinitscripts)
52 | * 6 [Keeper Configuration](#keeper-configuration)
53 | * 6.1 [Keeper Enabled](#keeper-enabled)
54 | * 6.1.1 [RQ.SRS.Helm.Keeper.Enabled](#rqsrshelmkeeperenabled)
55 | * 6.2 [Replica Count](#replica-count)
56 | * 6.2.1 [RQ.SRS.Helm.Keeper.ReplicaCount](#rqsrshelmkeeperreplicacount)
57 | * 6.3 [Keeper Image](#keeper-image)
58 | * 6.3.1 [RQ.SRS.Helm.Keeper.Image](#rqsrshelmkeeperimage)
59 | * 6.4 [Storage](#storage)
60 | * 6.4.1 [RQ.SRS.Helm.Keeper.Storage](#rqsrshelmkeeperstorage)
61 | * 6.5 [Resources](#resources)
62 | * 6.5.1 [RQ.SRS.Helm.Keeper.Resources](#rqsrshelmkeeperresources)
63 | * 7 [Operator Configuration](#operator-configuration)
64 | * 7.1 [Operator Enabled](#operator-enabled)
65 | * 7.1.1 [RQ.SRS.Helm.Operator.Enabled](#rqsrshelmoperatorenabled)
66 | * 8 [Terminology](#terminology)
67 | * 9 [Helm Chart](#helm-chart)
68 | * 10 [Values.yaml](#valuesyaml)
69 | * 11 [Release](#release)
70 | * 12 [CRD](#crd)
71 | * 13 [PVC](#pvc)
72 | * 14 [Pod Anti-Affinity](#pod-anti-affinity)
73 |
74 | ## Introduction
75 |
76 | Altinity.Cloud Anywhere lets you take advantage of Altinity’s zero-maintenance ClickHouse SaaS platform in your own
77 | Kubernetes cluster. Customers bring their Kubernetes (BYOK) environments, and Altinity deploys ClickHouse clusters on
78 | top of them using Helm charts.
79 |
80 | This specification describes requirements related to using Helm for deploying and configuring Altinity.Cloud Anywhere
81 | in the customer’s infrastructure.
82 |
83 | ---
84 |
85 |
86 |
87 | ## RQ.SRS.Helm
88 | version: 1.0
89 |
90 | The [Helm Chart] SHALL allow users to deploy and configure ClickHouse environments in [Kubernetes] clusters.
91 |
92 | ## Helm Chart Example
93 |
94 | ### RQ.SRS.Helm.Chart.Values
95 | version: 1.0
96 |
97 | The Helm chart SHALL provide a `values.yaml` file where users define their desired environment configuration.
98 |
99 | ```yaml
100 | clickhouse:
101 | replicasCount: 3
102 | shardsCount: 2
103 | antiAffinity: true
104 | persistence:
105 | enabled: true
106 | size: 100Gi
107 | service:
108 | type: ClusterIP
109 | ```
110 |
111 | ---
112 |
113 | ## Chart Metadata
114 |
115 | ### Name Override
116 |
117 | #### RQ.SRS.Helm.Metadata.NameOverride
118 | version: 1.0
119 |
120 | The `values.yaml` SHALL support `nameOverride` to override the chart name.
121 |
122 | ```yaml
123 | nameOverride: "custom-clickhouse"
124 | ```
125 |
126 | If invalid characters are used (e.g., spaces, special characters), Helm SHALL raise a template rendering error.
127 |
128 | ### Fullname Override
129 |
130 | #### RQ.SRS.Helm.Metadata.FullnameOverride
131 | version: 1.0
132 |
133 | The `values.yaml` SHALL support `fullnameOverride` to override the full release name.
134 |
135 | ```yaml
136 | fullnameOverride: "acme-clickhouse-prod"
137 | ```
138 |
139 | ### Namespace Domain Pattern
140 |
141 | #### RQ.SRS.Helm.Metadata.NamespaceDomainPattern
142 | version: 1.0
143 |
144 | The `values.yaml` SHALL support `namespaceDomainPattern` for specifying a custom Kubernetes cluster domain.
145 |
146 | ```yaml
147 | namespaceDomainPattern: "acme.k8s.cluster.local"
148 | ```
149 |
150 | If empty, the default `cluster.local` SHALL be used.
151 |
152 | ---
153 |
154 | ## ClickHouse Configuration
155 |
156 | ### Default User
157 |
158 | #### RQ.SRS.Helm.ClickHouse.DefaultUser
159 | version: 1.0
160 |
161 | The chart SHALL configure a default ClickHouse user.
162 |
163 | ```yaml
164 | clickhouse:
165 | defaultUser:
166 | password: "SuperSecret"
167 | allowExternalAccess: true
168 | hostIP: "0.0.0.0/0"
169 | ```
170 |
171 | Error Handling:
172 |
173 | * If `password` is empty → Helm SHALL reject with: *"defaultUser.password is required"*.
174 | * If `hostIP` is invalid → Helm SHALL raise an error during CRD validation.
175 |
176 | ### Users
177 |
178 | #### RQ.SRS.Helm.ClickHouse.Users
179 | version: 1.0
180 |
181 | The chart SHALL allow defining additional users.
182 |
183 | ```yaml
184 | clickhouse:
185 | users:
186 | - name: analytics
187 | password_secret_name: analytics-secret
188 | grants:
189 | - "GRANT SELECT ON default.*"
190 | ```
191 |
192 | * `name` MUST match regex `^[a-zA-Z0-9]+$`.
193 | * If invalid → Helm SHALL raise: *"Invalid username format"*.
194 | * Either `password_secret_name` OR `password_sha256_hex` SHALL be required.
195 |
196 | ### Replicas Count
197 |
198 | #### RQ.SRS.Helm.ClickHouse.ReplicasCount
199 | version: 1.0
200 |
201 | The `replicasCount` SHALL define number of ClickHouse replicas.
202 |
203 | ```yaml
204 | clickhouse:
205 | replicasCount: 3
206 | ```
207 |
208 | * If greater than 1, `keeper.enabled` MUST be `true` or `keeper.host` MUST be provided.
209 |
210 | Error Handling:
211 |
212 | * If `replicasCount > 1` but Keeper not enabled → Helm SHALL raise: *"Keeper required for replicasCount > 1"*.
213 |
214 | ### Shards Count
215 |
216 | #### RQ.SRS.Helm.ClickHouse.ShardsCount
217 | version: 1.0
218 |
219 | The `shardsCount` SHALL define number of shards.
220 |
221 | ```yaml
222 | clickhouse:
223 | shardsCount: 2
224 | ```
225 |
226 | If set to 0 → Helm SHALL raise: *"shardsCount must be at least 1"*.
227 |
228 | ### Zones
229 |
230 | #### RQ.SRS.Helm.ClickHouse.Zones
231 | version: 1.0
232 |
233 | The `zones` SHALL define Kubernetes zones.
234 |
235 | ```yaml
236 | clickhouse:
237 | zones: ["zone-a", "zone-b"]
238 | ```
239 |
240 | If zone list does not match cluster topology → scheduling SHALL fail.
241 |
242 | ### Anti Affinity
243 |
244 | #### RQ.SRS.Helm.ClickHouse.AntiAffinity
245 | version: 1.0
246 |
247 | The `antiAffinity` flag SHALL enforce pod anti-affinity.
248 |
249 | ```yaml
250 | clickhouse:
251 | antiAffinity: true
252 | ```
253 |
254 | If enabled, ClickHouse pods SHALL not run on the same node.
255 |
256 | ### Keeper
257 |
258 | #### RQ.SRS.Helm.ClickHouse.Keeper
259 | version: 1.0
260 |
261 | The chart SHALL allow external or embedded Keeper.
262 |
263 | ```yaml
264 | clickhouse:
265 | keeper:
266 | host: "keeper-service"
267 | port: 2181
268 | ```
269 |
270 | If `replicasCount > 1` but Keeper is not configured, Helm SHALL raise an error.
271 |
272 | ### Persistence
273 |
274 | #### RQ.SRS.Helm.ClickHouse.Persistence
275 | version: 1.0
276 |
277 | The chart SHALL support persistent volumes.
278 |
279 | ```yaml
280 | clickhouse:
281 | persistence:
282 | enabled: true
283 | size: 100Gi
284 | accessMode: ReadWriteOnce
285 | ```
286 |
287 | Error Handling:
288 |
289 | * If `enabled: true` but `size` missing → Helm SHALL raise: *"persistence.size required"*.
290 |
291 | ### ClickHouse Image
292 |
293 | #### RQ.SRS.Helm.ClickHouse.Image
294 | version: 1.0
295 |
296 | The chart SHALL support custom image repo, tag, and pullPolicy.
297 |
298 | ```yaml
299 | clickhouse:
300 | image:
301 | repository: altinity/clickhouse-server
302 | tag: "24.3"
303 | pullPolicy: IfNotPresent
304 | ```
305 |
306 | ### Service
307 |
308 | #### RQ.SRS.Helm.ClickHouse.Service
309 | version: 1.0
310 |
311 | The chart SHALL configure a Kubernetes service.
312 |
313 | ```yaml
314 | clickhouse:
315 | service:
316 | type: ClusterIP
317 | ```
318 |
319 | If invalid type is specified → Helm SHALL raise: *"Invalid service type"*.
320 |
321 | ### Load Balancer Service
322 |
323 | #### RQ.SRS.Helm.ClickHouse.LbService
324 | version: 1.0
325 |
326 | The chart SHALL support LoadBalancer service.
327 |
328 | ```yaml
329 | clickhouse:
330 | lbService:
331 | enabled: true
332 | loadBalancerSourceRanges: ["0.0.0.0/0"]
333 | ```
334 |
335 | If `enabled: true` without ranges → default SHALL be `0.0.0.0/0`.
336 |
337 | ### Pod Settings
338 |
339 | #### RQ.SRS.Helm.ClickHouse.PodSettings
340 | version: 1.0
341 |
342 | The chart SHALL support pod annotations, labels, security context, tolerations, etc.
343 |
344 | ```yaml
345 | clickhouse:
346 | podLabels:
347 | app: clickhouse
348 | ```
349 |
350 | ### Extra Config
351 |
352 | #### RQ.SRS.Helm.ClickHouse.ExtraConfig
353 | version: 1.0
354 |
355 | The chart SHALL allow XML config overrides.
356 |
357 | ```yaml
358 | clickhouse:
359 | extraConfig: |
360 |
361 |
362 | 300
363 |
364 |
365 | ```
366 |
367 | ### Init Scripts
368 |
369 | #### RQ.SRS.Helm.ClickHouse.InitScripts
370 | version: 1.0
371 |
372 | The chart SHALL allow init scripts.
373 |
374 | ```yaml
375 | clickhouse:
376 | initScripts:
377 | enabled: true
378 | configMapName: "ch-init-scripts"
379 | ```
380 |
381 | If enabled without configMapName → Helm SHALL raise: *"initScripts.configMapName required"*.
382 |
383 | ---
384 |
385 | ## Keeper Configuration
386 |
387 | ### Keeper Enabled
388 |
389 | #### RQ.SRS.Helm.Keeper.Enabled
390 | version: 1.0
391 |
392 | ```yaml
393 | keeper:
394 | enabled: true
395 | ```
396 |
397 | ### Replica Count
398 |
399 | #### RQ.SRS.Helm.Keeper.ReplicaCount
400 | version: 1.0
401 |
402 | The `replicaCount` MUST be odd.
403 |
404 | ```yaml
405 | keeper:
406 | replicaCount: 3
407 | ```
408 |
409 | If even → Helm SHALL raise: *"Keeper replicaCount must be odd"*.
410 |
411 | ### Keeper Image
412 |
413 | #### RQ.SRS.Helm.Keeper.Image
414 | version: 1.0
415 |
416 | The chart SHALL allow Keeper image repo/tag.
417 |
418 | ### Storage
419 |
420 | #### RQ.SRS.Helm.Keeper.Storage
421 | version: 1.0
422 |
423 | The chart SHALL allow persistent storage for Keeper.
424 |
425 | ```yaml
426 | keeper:
427 | localStorage:
428 | size: 20Gi
429 | storageClass: fast-ssd
430 | ```
431 |
432 | ### Resources
433 |
434 | #### RQ.SRS.Helm.Keeper.Resources
435 | version: 1.0
436 |
437 | The chart SHALL allow CPU/memory requests and limits.
438 |
439 | ```yaml
440 | keeper:
441 | resources:
442 | cpuRequestsMs: 500
443 | memoryRequestsMiB: "512Mi"
444 | ```
445 |
446 | ---
447 |
448 | ## Operator Configuration
449 |
450 | ### Operator Enabled
451 |
452 | #### RQ.SRS.Helm.Operator.Enabled
453 | version: 1.0
454 |
455 | The chart SHALL allow enabling the Altinity Operator.
456 |
457 | ```yaml
458 | operator:
459 | enabled: true
460 | ```
461 |
462 | ---
463 |
464 | ## Terminology
465 |
466 | ## Helm Chart
467 |
468 | A collection of Kubernetes YAML manifests packaged with metadata and configurable values.
469 |
470 | ## Values.yaml
471 |
472 | Configuration file where users define parameters.
473 |
474 | ## Release
475 |
476 | A deployed instance of a Helm chart.
477 |
478 | ## CRD
479 |
480 | Custom Resource Definition – extends Kubernetes API.
481 |
482 | ## PVC
483 |
484 | PersistentVolumeClaim for stateful storage.
485 |
486 | ## Pod Anti-Affinity
487 |
488 | Kubernetes scheduling constraint preventing multiple pods from running on the same node.
489 |
490 | ---
491 |
--------------------------------------------------------------------------------
/tests/steps/users.py:
--------------------------------------------------------------------------------
1 | from tests.steps.system import *
2 | import json
3 | import hashlib
4 | import tests.steps.kubernetes as kubernetes
5 | import tests.steps.clickhouse as clickhouse
6 |
7 |
8 | @TestStep(When)
9 | def get_user_grants(
10 | self,
11 | namespace,
12 | pod_name,
13 | user,
14 | password="",
15 | admin_user="default",
16 | admin_password="",
17 | ):
18 | """Get grants for a specific user."""
19 | query = f"SHOW GRANTS FOR {user} FORMAT JSON"
20 |
21 | try:
22 | result = clickhouse.execute_clickhouse_query(
23 | namespace=namespace,
24 | pod_name=pod_name,
25 | query=query,
26 | user=admin_user,
27 | password=admin_password,
28 | check=False,
29 | )
30 |
31 | if result.returncode == 0 and result.stdout:
32 | data = json.loads(result.stdout)
33 | if data.get("data") and data.get("meta"):
34 | col_name = data["meta"][0]["name"]
35 | return [row[col_name] for row in data["data"]]
36 | except Exception as e:
37 | note(f"Failed to get grants for user {user}: {e}")
38 |
39 | return []
40 |
41 |
42 | @TestStep(When)
43 | def check_user_has_permission(
44 | self, namespace, pod_name, user, password, permission_query
45 | ):
46 | """Check if user can execute a specific query."""
47 | try:
48 | result = clickhouse.execute_clickhouse_query(
49 | namespace=namespace,
50 | pod_name=pod_name,
51 | query=permission_query,
52 | user=user,
53 | password=password,
54 | check=False,
55 | )
56 | return result.returncode == 0
57 | except:
58 | return False
59 |
60 |
61 | @TestStep(Then)
62 | def verify_user_exists(self, namespace, user_name, admin_password=""):
63 | """Verify that a user exists in ClickHouse."""
64 | clickhouse_pods = clickhouse.get_clickhouse_pods(namespace=namespace)
65 | assert len(clickhouse_pods) > 0, "No ClickHouse pods found"
66 |
67 | pod_name = clickhouse_pods[0]
68 |
69 | query = f"SELECT name FROM system.users WHERE name = '{user_name}'"
70 | result = clickhouse.execute_clickhouse_query(
71 | namespace=namespace,
72 | pod_name=pod_name,
73 | query=query,
74 | user="default",
75 | password=admin_password,
76 | check=False,
77 | )
78 |
79 | assert result.returncode == 0, f"Failed to query system.users"
80 | assert user_name in result.stdout, f"User '{user_name}' not found in system.users"
81 | note(f"✓ User exists: {user_name}")
82 |
83 |
84 | @TestStep(Then)
85 | def verify_user_connectivity(self, namespace, user, password):
86 | """Verify that a user can connect to ClickHouse."""
87 | clickhouse_pods = clickhouse.get_clickhouse_pods(namespace=namespace)
88 | assert len(clickhouse_pods) > 0, "No ClickHouse pods found"
89 |
90 | pod_name = clickhouse_pods[0]
91 |
92 | result = clickhouse.test_clickhouse_connection(
93 | namespace=namespace, pod_name=pod_name, user=user, password=password
94 | )
95 |
96 | assert result, f"Failed to connect to ClickHouse with user '{user}'"
97 | note(f"✓ User connection successful: {user}")
98 |
99 |
100 | @TestStep(Then)
101 | def verify_user_password_hash(
102 | self, namespace, user, expected_hash, plaintext_password, admin_password=""
103 | ):
104 | """Verify that the user's password hash configuration is correct."""
105 | clickhouse_pods = clickhouse.get_clickhouse_pods(namespace=namespace)
106 | assert len(clickhouse_pods) > 0, "No ClickHouse pods found"
107 |
108 | pod_name = clickhouse_pods[0]
109 |
110 | query = (
111 | f"SELECT name, auth_type FROM system.users WHERE name = '{user}' FORMAT JSON"
112 | )
113 | result = clickhouse.execute_clickhouse_query(
114 | namespace=namespace,
115 | pod_name=pod_name,
116 | query=query,
117 | user="default",
118 | password=admin_password,
119 | check=False,
120 | )
121 |
122 | assert result.returncode == 0, f"Failed to query auth type for user '{user}'"
123 |
124 | data = json.loads(result.stdout)
125 | if not data.get("data") or len(data["data"]) == 0:
126 | raise AssertionError(f"User '{user}' not found in system.users")
127 |
128 | user_data = data["data"][0]
129 | auth_types = user_data.get("auth_type", [])
130 |
131 | assert (
132 | "sha256_password" in auth_types
133 | ), f"User '{user}' is not configured with SHA256 authentication. Auth types: {auth_types}"
134 |
135 | computed_hash = hashlib.sha256(plaintext_password.encode("utf-8")).hexdigest()
136 |
137 | assert computed_hash.lower() == expected_hash.lower(), (
138 | f"Computed hash from password doesn't match expected hash for user '{user}'. "
139 | f"Expected: {expected_hash}, Computed: {computed_hash}"
140 | )
141 |
142 | note(f"✓ User '{user}' SHA256 hash verified: {expected_hash[:16]}...")
143 |
144 |
145 | @TestStep(Then)
146 | def verify_user_grants(self, namespace, user, expected_grants, admin_password=""):
147 | """Verify that a user has expected grants."""
148 | clickhouse_pods = clickhouse.get_clickhouse_pods(namespace=namespace)
149 | assert len(clickhouse_pods) > 0, "No ClickHouse pods found"
150 |
151 | pod_name = clickhouse_pods[0]
152 |
153 | actual_grants = get_user_grants(
154 | namespace=namespace,
155 | pod_name=pod_name,
156 | user=user,
157 | admin_user="default",
158 | admin_password=admin_password,
159 | )
160 |
161 | assert (
162 | actual_grants
163 | ), f"Failed to retrieve grants for user '{user}' - check admin privileges or ClickHouse configuration"
164 |
165 | for expected_grant in expected_grants:
166 | expected_normalized = " ".join(expected_grant.split())
167 |
168 | found = False
169 | for actual_grant in actual_grants:
170 | actual_normalized = " ".join(actual_grant.split())
171 | if expected_normalized.lower() in actual_normalized.lower():
172 | found = True
173 | note(f"✓ Grant verified: {expected_grant}")
174 | break
175 |
176 | assert (
177 | found
178 | ), f"Grant '{expected_grant}' not found for user '{user}'. Actual grants: {actual_grants}"
179 |
180 |
181 | @TestStep(Then)
182 | def verify_user_access_management(
183 | self, namespace, user, expected_access_management, admin_password=""
184 | ):
185 | """Verify user's access_management setting from CHI spec."""
186 | chi_info = clickhouse.get_chi_info(namespace=namespace)
187 | assert chi_info is not None, "ClickHouseInstallation not found"
188 |
189 | users_config = chi_info.get("spec", {}).get("configuration", {}).get("users", {})
190 | access_mgmt_key = f"{user}/access_management"
191 |
192 | actual_access_mgmt = users_config.get(access_mgmt_key)
193 |
194 | assert (
195 | actual_access_mgmt is not None
196 | ), f"access_management not configured for user '{user}' in CHI"
197 | assert (
198 | actual_access_mgmt == expected_access_management
199 | ), f"Expected access_management={expected_access_management}, got {actual_access_mgmt}"
200 |
201 | note(f"✓ User '{user}' access_management: {expected_access_management}")
202 |
203 |
204 | @TestStep(Then)
205 | def verify_user_host_ip(self, namespace, user, expected_host_ip):
206 | """Verify user's hostIP network restrictions from CHI spec."""
207 | chi_info = clickhouse.get_chi_info(namespace=namespace)
208 | assert chi_info is not None, "ClickHouseInstallation not found"
209 |
210 | users_config = chi_info.get("spec", {}).get("configuration", {}).get("users", {})
211 | networks_key = f"{user}/networks/ip"
212 |
213 | actual_host_ip = users_config.get(networks_key)
214 |
215 | assert actual_host_ip is not None, f"hostIP not configured for user '{user}' in CHI"
216 |
217 | if isinstance(expected_host_ip, list) and not isinstance(actual_host_ip, list):
218 | expected_host_ip = (
219 | expected_host_ip[0] if len(expected_host_ip) == 1 else expected_host_ip
220 | )
221 |
222 | assert (
223 | actual_host_ip == expected_host_ip
224 | ), f"Expected hostIP={expected_host_ip}, got {actual_host_ip}"
225 |
226 | note(f"✓ User '{user}' hostIP: {actual_host_ip}")
227 |
228 |
229 | @TestStep(Then)
230 | def verify_user_permissions(self, namespace, user, password, permission_tests):
231 | """Verify user has specific permissions by testing queries."""
232 | clickhouse_pods = clickhouse.get_clickhouse_pods(namespace=namespace)
233 | assert len(clickhouse_pods) > 0, "No ClickHouse pods found"
234 |
235 | pod_name = clickhouse_pods[0]
236 |
237 | for description, query in permission_tests.items():
238 | has_permission = check_user_has_permission(
239 | namespace=namespace,
240 | pod_name=pod_name,
241 | user=user,
242 | password=password,
243 | permission_query=query,
244 | )
245 |
246 | if has_permission:
247 | note(f"✓ User '{user}' can: {description}")
248 | else:
249 | note(f"✗ User '{user}' cannot: {description}")
250 |
251 |
252 | @TestStep(Then)
253 | def verify_readonly_user(self, namespace, user, password=""):
254 | """Verify that a user has read-only permissions."""
255 | clickhouse_pods = clickhouse.get_clickhouse_pods(namespace=namespace)
256 | assert len(clickhouse_pods) > 0, "No ClickHouse pods found"
257 |
258 | pod_name = clickhouse_pods[0]
259 |
260 | can_select = check_user_has_permission(
261 | namespace=namespace,
262 | pod_name=pod_name,
263 | user=user,
264 | password=password,
265 | permission_query="SELECT 1 FROM system.tables LIMIT 1",
266 | )
267 |
268 | if can_select:
269 | note(f"✓ User '{user}' can perform SELECT queries")
270 | else:
271 | note(f"⊘ User '{user}' cannot perform SELECT queries (might be expected)")
272 |
273 | can_insert = check_user_has_permission(
274 | namespace=namespace,
275 | pod_name=pod_name,
276 | user=user,
277 | password=password,
278 | permission_query="INSERT INTO system.query_log VALUES ()",
279 | )
280 |
281 | if not can_insert:
282 | note(f"✓ User '{user}' correctly denied INSERT permissions")
283 | else:
284 | note(f"⚠ Warning: User '{user}' has INSERT permissions (expected read-only)")
285 |
286 |
287 | @TestStep(Then)
288 | def verify_all_users(self, namespace, default_user_config=None, users_config=None):
289 | """Comprehensive verification of all user configurations."""
290 | clickhouse_pods = clickhouse.get_clickhouse_pods(namespace=namespace)
291 | if not clickhouse_pods:
292 | note("No ClickHouse pods found, skipping user verification")
293 | return
294 |
295 | pod_name = clickhouse_pods[0]
296 | admin_password = ""
297 |
298 | if default_user_config:
299 | if "password" in default_user_config:
300 | admin_password = default_user_config["password"]
301 | verify_user_connectivity(
302 | namespace=namespace, user="default", password=admin_password
303 | )
304 |
305 | note(f"✓ Default user verified")
306 |
307 | if users_config:
308 | for user_config in users_config:
309 | user_name = user_config.get("name")
310 | if not user_name:
311 | continue
312 |
313 | note(f"Verifying user: {user_name}")
314 |
315 | verify_user_exists(
316 | namespace=namespace, user_name=user_name, admin_password=admin_password
317 | )
318 |
319 | if "password" in user_config:
320 | verify_user_connectivity(
321 | namespace=namespace,
322 | user=user_name,
323 | password=user_config["password"],
324 | )
325 |
326 | if "password_sha256_hex" in user_config:
327 | verify_user_password_hash(
328 | namespace=namespace,
329 | user=user_name,
330 | expected_hash=user_config["password_sha256_hex"],
331 | plaintext_password=user_config["password"],
332 | admin_password=admin_password,
333 | )
334 | elif "password_sha256_hex" in user_config:
335 | note(
336 | f"⊘ User '{user_name}' uses hashed password but no plaintext provided for connectivity test"
337 | )
338 |
339 | if "accessManagement" in user_config:
340 | verify_user_access_management(
341 | namespace=namespace,
342 | user=user_name,
343 | expected_access_management=user_config["accessManagement"],
344 | admin_password=admin_password,
345 | )
346 |
347 | if "hostIP" in user_config:
348 | verify_user_host_ip(
349 | namespace=namespace,
350 | user=user_name,
351 | expected_host_ip=user_config["hostIP"],
352 | )
353 |
354 | if "grants" in user_config and user_config["grants"]:
355 | verify_user_grants(
356 | namespace=namespace,
357 | user=user_name,
358 | expected_grants=user_config["grants"],
359 | admin_password=admin_password,
360 | )
361 |
362 | if "readonly" in user_name.lower() and "password" in user_config:
363 | verify_readonly_user(
364 | namespace=namespace,
365 | user=user_name,
366 | password=user_config["password"],
367 | )
368 |
369 | if "permission_tests" in user_config and "password" in user_config:
370 | verify_user_permissions(
371 | namespace=namespace,
372 | user=user_name,
373 | password=user_config["password"],
374 | permission_tests=user_config["permission_tests"],
375 | )
376 |
377 | note(f"✓ User '{user_name}' verification complete")
378 |
--------------------------------------------------------------------------------
/charts/clickhouse/values.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "http://json-schema.org/draft-07/schema#",
3 | "type": "object",
4 | "properties": {
5 | "nameOverride": {
6 | "type": "string",
7 | "description": "Overrides the name of the chart."
8 | },
9 | "fullnameOverride": {
10 | "type": "string",
11 | "description": "Overrides the full name of the chart."
12 | },
13 | "namespaceDomainPattern": {
14 | "type": "string",
15 | "description": "Custom cluster domain of Kubernetes cluster."
16 | },
17 | "clickhouse": {
18 | "type": "object",
19 | "properties": {
20 | "defaultUser": {
21 | "type": "object",
22 | "properties": {
23 | "password": {
24 | "type": "string",
25 | "description": "Password for the default ClickHouse user."
26 | },
27 | "allowExternalAccess": {
28 | "type": "boolean",
29 | "description": "Allow the default user to access ClickHouse from any IP."
30 | },
31 | "hostIP": {
32 | "type": "string",
33 | "description": "Mask for IPs allowed for the default user.",
34 | "default": "127.0.0.1/32"
35 | }
36 | }
37 | },
38 | "users": {
39 | "type": "array",
40 | "description": "Users to initialize in ClickHouse",
41 | "items": {
42 | "type": "object",
43 | "properties": {
44 | "name": {
45 | "type": "string",
46 | "description": "Name for the user",
47 | "pattern": "^[a-zA-Z0-9]+$"
48 | },
49 | "hostIP": {
50 | "oneOf": [
51 | {
52 | "type": "string",
53 | "description": "Single IP mask allowed for this user."
54 | },
55 | {
56 | "type": "array",
57 | "description": "Multiple IP masks allowed for this user.",
58 | "items": {
59 | "type": "string"
60 | }
61 | }
62 | ],
63 | "description": "Mask for IPs allowed for this user. Can be a string or array of strings.",
64 | "default": "0.0.0.0/0"
65 | },
66 | "password_secret_name": {
67 | "type": "string",
68 | "description": "Name of a Kubernetes secret containing the plaintext password"
69 | },
70 | "password_sha256_hex": {
71 | "type": "string",
72 | "description": "SHA256 of the password (ignored if password_secret_name is passed)"
73 | },
74 | "grants": {
75 | "type": "array",
76 | "description": "List of grants to provide for the user",
77 | "items": {
78 | "type": "string",
79 | "examples": [
80 | "GRANT SELECT ON default.*",
81 | "GRANT SELECT, DELETE ON foo.bar"
82 | ]
83 | }
84 | }
85 | }
86 | }
87 | },
88 | "replicasCount": {
89 | "type": "integer",
90 | "description": "Number of ClickHouse replicas. If greater than 1, Keeper must be enabled or a Keeper host must be provided.",
91 | "default": 1
92 | },
93 | "shardsCount": {
94 | "type": "integer",
95 | "description": "Number of shards.",
96 | "default": 1
97 | },
98 | "zones": {
99 | "type": "array",
100 | "description": "Specify the zones for ClickHouse pods.",
101 | "items": {
102 | "type": "string"
103 | }
104 | },
105 | "antiAffinity": {
106 | "type": "boolean",
107 | "description": "If enabled, prevents ClickHouse pods from running on the same node."
108 | },
109 | "antiAffinityScope": {
110 | "type": "string",
111 | "description": "Scope for anti-affinity policy when antiAffinity is enabled.",
112 | "enum": [
113 | "Shard",
114 | "Replica",
115 | "Cluster",
116 | "ClickHouseInstallation",
117 | "Namespace"
118 | ],
119 | "default": "ClickHouseInstallation"
120 | },
121 | "keeper": {
122 | "type": "object",
123 | "properties": {
124 | "host": {
125 | "type": "string",
126 | "description": "Keeper host, if specified."
127 | },
128 | "port": {
129 | "type": "integer",
130 | "description": "Override the default Keeper port.",
131 | "default": 2181
132 | }
133 | }
134 | },
135 | "persistence": {
136 | "type": "object",
137 | "properties": {
138 | "enabled": {
139 | "type": "boolean",
140 | "description": "Enable storage for ClickHouse."
141 | },
142 | "size": {
143 | "type": "string",
144 | "description": "Volume size for ClickHouse storage."
145 | },
146 | "accessMode": {
147 | "type": "string",
148 | "description": "Access mode for ClickHouse storage."
149 | },
150 | "storageClass": {
151 | "type": "string",
152 | "description": "Storage class for the persistent volume."
153 | },
154 | "logs": {
155 | "type": "object",
156 | "properties": {
157 | "enabled": {
158 | "type": "boolean",
159 | "description": "Enable PVC for logs."
160 | },
161 | "size": {
162 | "type": "string",
163 | "description": "Volume size for logs."
164 | },
165 | "accessMode": {
166 | "type": "string",
167 | "description": "Access mode for logs storage."
168 | }
169 | }
170 | }
171 | }
172 | },
173 | "image": {
174 | "type": "object",
175 | "properties": {
176 | "repository": {
177 | "type": "string",
178 | "description": "Repository for the ClickHouse image."
179 | },
180 | "pullPolicy": {
181 | "type": "string",
182 | "description": "Image pull policy."
183 | },
184 | "tag": {
185 | "type": "string",
186 | "description": "Tag for the ClickHouse image."
187 | }
188 | }
189 | },
190 | "service": {
191 | "type": "object",
192 | "properties": {
193 | "type": {
194 | "type": "string",
195 | "description": "Service type, e.g., LoadBalancer or ClusterIP."
196 | },
197 | "serviceAnnotations": {
198 | "type": "object",
199 | "description": "Annotations for ClickHouse service."
200 | },
201 | "serviceLabels": {
202 | "type": "object",
203 | "description": "Labels for ClickHouse service."
204 | }
205 | }
206 | },
207 | "lbService": {
208 | "type": "object",
209 | "properties": {
210 | "enabled": {
211 | "type": "boolean",
212 | "description": "Enable LoadBalancer service for ClickHouse."
213 | },
214 | "loadBalancerSourceRanges": {
215 | "type": "array",
216 | "description": "Restricts traffic to specified client IPs.",
217 | "items": {
218 | "type": "string"
219 | }
220 | },
221 | "serviceAnnotations": {
222 | "type": "object",
223 | "description": "Annotations for LoadBalancer service."
224 | },
225 | "serviceLabels": {
226 | "type": "object",
227 | "description": "Labels for LoadBalancer service."
228 | }
229 | }
230 | },
231 | "imagePullSecrets": {
232 | "type": "array",
233 | "description": "Image pull secrets for ClickHouse pods.",
234 | "items": {
235 | "type": "object"
236 | }
237 | },
238 | "podAnnotations": {
239 | "type": "object",
240 | "description": "Annotations for ClickHouse pods."
241 | },
242 | "podLabels": {
243 | "type": "object",
244 | "description": "Labels for ClickHouse pods."
245 | },
246 | "podSecurityContext": {
247 | "type": "object",
248 | "description": "Pod security context for ClickHouse pods."
249 | },
250 | "securityContext": {
251 | "type": "object",
252 | "description": "Security context for ClickHouse containers."
253 | },
254 | "nodeSelector": {
255 | "type": "object",
256 | "description": "Node selector for ClickHouse pods."
257 | },
258 | "tolerations": {
259 | "type": "array",
260 | "description": "Tolerations for ClickHouse pods.",
261 | "items": {
262 | "type": "object"
263 | }
264 | },
265 | "affinity": {
266 | "type": "object",
267 | "description": "Affinity for ClickHouse pods."
268 | },
269 | "topologySpreadConstraints": {
270 | "type": "array",
271 | "description": "Topology spread constraints for ClickHouse pods.",
272 | "items": {
273 | "type": "object"
274 | }
275 | },
276 | "extraConfig": {
277 | "type": "string",
278 | "description": "Miscellaneous config for ClickHouse in XML format."
279 | },
280 | "extraUsers": {
281 | "type": "string",
282 | "description": "Miscellaneous users config for ClickHouse in XML format."
283 | },
284 | "extraContainers": {
285 | "type": "array",
286 | "description": "Extra containers for Clickhouse pod.",
287 | "items": {
288 | "type": "object"
289 | }
290 | },
291 | "extraVolumes": {
292 | "type": "array",
293 | "description": "Extra volumes for Clickhouse pod.",
294 | "items": {
295 | "type": "object"
296 | }
297 | },
298 | "initScripts": {
299 | "type": "object",
300 | "description": "Configuration for init scripts via ConfigMap.",
301 | "properties": {
302 | "enabled": {
303 | "type": "boolean",
304 | "description": "Enable init scripts feature.",
305 | "default": false
306 | },
307 | "configMapName": {
308 | "type": "string",
309 | "description": "Name of an existing ConfigMap containing init scripts to be mounted at /docker-entrypoint-initdb.d/."
310 | },
311 | "alwaysRun": {
312 | "type": "boolean",
313 | "description": "Set to true to always run init scripts on container startup.",
314 | "default": true
315 | }
316 | }
317 | }
318 | }
319 | },
320 | "keeper": {
321 | "type": "object",
322 | "properties": {
323 | "enabled": {
324 | "type": "boolean",
325 | "description": "Enable Keeper for ClickHouse replication."
326 | },
327 | "replicaCount": {
328 | "type": "integer",
329 | "description": "Number of Keeper replicas. Must be an odd number.",
330 | "default": 3
331 | },
332 | "image": {
333 | "type": "string",
334 | "description": "Image repository for Keeper."
335 | },
336 | "tag": {
337 | "type": "string",
338 | "description": "Image tag for Keeper."
339 | },
340 | "settings": {
341 | "type": "object",
342 | "description": "Configuration settings for ClickHouse Keeper."
343 | },
344 | "localStorage": {
345 | "type": "object",
346 | "properties": {
347 | "size": {
348 | "type": "string",
349 | "description": "Volume size for Keeper storage."
350 | },
351 | "storageClass": {
352 | "type": "string",
353 | "description": "Storage class for Keeper persistent volume."
354 | }
355 | }
356 | },
357 | "nodeSelector": {
358 | "type": "object",
359 | "description": "Node selector for Keeper pods."
360 | },
361 | "tolerations": {
362 | "type": "array",
363 | "description": "Tolerations for Keeper pods.",
364 | "items": {
365 | "type": "object"
366 | }
367 | },
368 | "podAnnotations": {
369 | "type": "object",
370 | "description": "Annotations for Keeper pods."
371 | },
372 | "volumeClaimAnnotations": {
373 | "type": "object",
374 | "description": "Annotations for Keeper volume claims."
375 | },
376 | "zoneSpread": {
377 | "type": "boolean",
378 | "description": "Enable topology spread constraints over zone for Keeper."
379 | },
380 | "metricsPort": {
381 | "type": "string",
382 | "description": "Port for Keeper metrics."
383 | },
384 | "resources": {
385 | "type": "object",
386 | "properties": {
387 | "cpuRequestsMs": {
388 | "type": "number",
389 | "description": "CPU requests in millicores for Keeper."
390 | },
391 | "memoryRequestsMiB": {
392 | "type": "string",
393 | "description": "Memory requests for Keeper."
394 | },
395 | "cpuLimitsMs": {
396 | "type": "number",
397 | "description": "CPU limits in millicores for Keeper."
398 | },
399 | "memoryLimitsMiB": {
400 | "type": "string",
401 | "description": "Memory limits for Keeper."
402 | }
403 | }
404 | }
405 | }
406 | },
407 | "operator": {
408 | "type": "object",
409 | "properties": {
410 | "enabled": {
411 | "type": "boolean",
412 | "description": "Enable Altinity Operator for ClickHouse."
413 | }
414 | }
415 | }
416 | }
417 | }
418 |
--------------------------------------------------------------------------------
/charts/clickhouse/README.md:
--------------------------------------------------------------------------------
1 | # clickhouse
2 |   
3 |
4 | A Helm chart for creating a ClickHouse® Cluster with the Altinity Operator for ClickHouse
5 |
6 | ## Features
7 |
8 | - Single-node or multi-node ClickHouse clusters
9 | - Sharding and replication
10 | - ClickHouse Keeper integration
11 | - Persistent storage configuration
12 | - Init scripts
13 |
14 | ## Requirements
15 |
16 | | Repository | Name | Version |
17 | |------------|------|---------|
18 | | https://helm.altinity.com | operator(altinity-clickhouse-operator) | 0.25.5 |
19 |
20 | ## Installing the Chart
21 |
22 | ```sh
23 | # add the altinity chart repository
24 | helm repo add altinity https://helm.altinity.com
25 |
26 | # use this command to install clickhouse chart (it will also create a `clickhouse` namespace)
27 | helm install release-name altinity/clickhouse --namespace clickhouse --create-namespace
28 | ```
29 |
30 | Note that by default the chart includes the Altinity Operator. For most production use cases you will want to disable this and install the operator explicitly from its own helm chart.
31 |
32 | ```sh
33 | # add the altinity operator chart repository
34 | helm repo add altinity-operator https://docs.altinity.com/clickhouse-operator
35 |
36 | # create the namespace
37 | kubectl create namespace clickhouse
38 |
39 | # install operator into namespace
40 | helm install clickhouse-operator altinity-docs/altinity-clickhouse-operator \
41 | --namespace clickhouse
42 |
43 | # add the altinity chart repository
44 | helm repo add altinity https://helm.altinity.com
45 |
46 | # install the clickhouse chart without the operator
47 | helm install release-name altinity/clickhouse --namespace clickhouse \
48 | --set operator.enabled=false
49 | ```
50 |
51 | > Yes, we're aware that the domains for the helm repos are a bit odd. We're working on it.
52 |
53 | ### Configuring the bundled operator
54 |
55 | The ClickHouse chart vendors the Altinity ClickHouse Operator as a dependency using the
56 | `operator` alias. Any values you pass under the `operator` key are forwarded to the
57 | dependency chart unchanged, which means you can configure the operator exactly the same
58 | way you would when installing it directly. By default the dependency installs into the
59 | same namespace as the Helm release, watches all namespaces, and creates cluster-scoped
60 | RBAC resources.
61 |
62 | Common examples include overriding the namespace where the operator runs and toggling
63 | `rbac.namespaceScoped`:
64 |
65 | ```sh
66 | helm install release-name altinity/clickhouse \
67 | --namespace test --create-namespace \
68 | --set operator.namespaceOverride=test \
69 | --set operator.rbac.namespaceScoped=true
70 | ```
71 |
72 | When you are running multiple operators across different namespaces, install a separate
73 | release into each namespace and scope it to that namespace only. Set the operator's
74 | `namespaceOverride`, enable `rbac.namespaceScoped`, and restrict `watch.namespaces` to the
75 | release namespace so each operator manages only its own resources.
76 |
77 | ```sh
78 | helm install second-release altinity/clickhouse \
79 | --namespace test \
80 | --set operator.namespaceOverride=test \
81 | --set operator.rbac.namespaceScoped=true \
82 | --set operator.configs.files.config\\.yaml.watch.namespaces=\{test\}
83 | ```
84 |
85 | Consult the [Altinity ClickHouse Operator chart documentation](https://helm.altinity.com/)
86 | for the full list of available options. Any of those settings can be applied through the
87 | `operator` value prefix when installing or upgrading this chart.
88 |
89 | ## Upgrading the Chart
90 |
91 | ### Upgrading from 0.2.x to 0.3.0
92 |
93 | **IMPORTANT**: Version 0.3.0 introduces a change that improves reconciliation timing by embedding templates directly in the ClickHouseInstallation resource instead of using separate ClickHouseInstallationTemplate resources.
94 |
95 | After upgrading, delete the old ClickHouseInstallationTemplate resources that were created by version 0.2.x:
96 |
97 | ```sh
98 | # List all ClickHouseInstallationTemplate resources
99 | kubectl get clickhouseinstallationtemplates -n clickhouse
100 |
101 | # Delete them (replace with your actual release name)
102 | kubectl delete clickhouseinstallationtemplates -n clickhouse \
103 | -clickhouse-pod \
104 | -clickhouse-service \
105 | -clickhouse-service-lb \
106 | -clickhouse-data \
107 | -clickhouse-logs
108 | ```
109 |
110 | The ClickHouseInstallation will be updated automatically with embedded templates, resulting in faster reconciliation.
111 |
112 | ### Standard Upgrade Process
113 | ```sh
114 | # get latest repository versions
115 | helm repo update
116 |
117 | # upgrade to a newer version using the release name (`clickhouse`)
118 | helm upgrade clickhouse altinity/clickhouse --namespace clickhouse
119 | ```
120 |
121 | ## Uninstalling the Chart
122 |
123 | ```sh
124 | # uninstall using the release name (`clickhouse`)
125 | helm uninstall clickhouse --namespace clickhouse
126 | ```
127 |
128 | **Note:** If you installed the Altinity Operator with this chart, your ClickHouse Installations will hang because the Operator will be deleted before their finalizers complete. To resolve this you must manually edit each `chi` resource and remove the finalizer.
129 |
130 | PVCs created by this helm chart will not be automatically deleted and must be deleted manually. An easy way to do this is to delete the namespace:
131 |
132 | ```sh
133 | kubectl delete namespace clickhouse
134 | ```
135 |
136 | > This command removes all the Kubernetes components associated with the chart and deletes the release.
137 |
138 | ## Connecting to your ClickHouse Cluster
139 |
140 | ```sh
141 | # list your pods
142 | kubectl get pods --namespace clickhouse
143 |
144 | # pick any of your available pods and connect through the clickhouse-client
145 | kubectl exec -it chi-clickhouse-0-0-0 --namespace clickhouse -- clickhouse-client
146 | ```
147 |
148 | > Use `kubectl port forward` to access your ClickHouse cluster from outside: `kubectl port-forward service clickhouse-eks 9000:9000 & clickhouse-client`
149 |
150 | ## Using Init Scripts with ConfigMap
151 |
152 | The chart allows mounting a ConfigMap containing initialization scripts that will be executed during the ClickHouse container startup.
153 |
154 | ### How to use:
155 |
156 | 1. Create a ConfigMap containing your initialization scripts:
157 |
158 | ```bash
159 | kubectl create configmap my-init-scripts --from-file=01_create_database.sh --from-file=02_create_tables.sh
160 | ```
161 |
162 | 2. Enable the initScripts feature in your Helm values:
163 |
164 | ```yaml
165 | clickhouse:
166 | initScripts:
167 | enabled: true
168 | configMapName: my-init-scripts
169 | alwaysRun: true # Set to true to always run scripts on container restart
170 | ```
171 |
172 | The scripts will be mounted at `/docker-entrypoint-initdb.d/` in the ClickHouse container and executed in alphabetical order during startup.
173 |
174 | ### Example Script Format
175 |
176 | ```bash
177 | #!/bin/bash
178 | set -e
179 | clickhouse client -n <<-EOSQL
180 | CREATE DATABASE IF NOT EXISTS my_database;
181 | CREATE TABLE IF NOT EXISTS my_database.my_table (
182 | id UInt64,
183 | data String
184 | ) ENGINE = MergeTree()
185 | ORDER BY id;
186 | EOSQL
187 | ```
188 |
189 | ## Values
190 |
191 | | Key | Type | Default | Description |
192 | |-----|------|---------|-------------|
193 | | clickhouse.antiAffinity | bool | `false` | |
194 | | clickhouse.antiAffinityScope | string | ClickHouseInstallation | Scope for anti-affinity policy when antiAffinity is enabled. Determines the level at which pod distribution is enforced. Available scopes: - ClickHouseInstallation: Pods from the same installation won't run on the same node (default) - Shard: Pods from the same shard won't run on the same node - Replica: Pods from the same replica won't run on the same node - Cluster: Pods from the same cluster won't run on the same node - Namespace: Pods from the same namespace won't run on the same node |
195 | | clickhouse.clusterSecret | object | `{"auto":true,"enabled":false,"secure":false,"value":"","valueFrom":{"secretKeyRef":{"key":"secret","name":""}}}` | Cluster secret configuration for secure inter-node communication |
196 | | clickhouse.clusterSecret.auto | bool | `true` | Auto-generate cluster secret (recommended for security) |
197 | | clickhouse.clusterSecret.enabled | bool | `false` | Whether to enable secret-based cluster communication |
198 | | clickhouse.clusterSecret.secure | bool | `false` | Whether to secure this behind the SSL port |
199 | | clickhouse.clusterSecret.value | string | `""` | Plaintext cluster secret value (not recommended for production) |
200 | | clickhouse.clusterSecret.valueFrom | object | `{"secretKeyRef":{"key":"secret","name":""}}` | Reference to an existing Kubernetes secret containing the cluster secret |
201 | | clickhouse.clusterSecret.valueFrom.secretKeyRef.key | string | `"secret"` | Key in the secret that contains the cluster secret value |
202 | | clickhouse.clusterSecret.valueFrom.secretKeyRef.name | string | `""` | Name of the secret containing the cluster secret |
203 | | clickhouse.defaultUser.allowExternalAccess | bool | `false` | Allow the default user to access ClickHouse from any IP. If set, will override `hostIP` to always be `0.0.0.0/0`. |
204 | | clickhouse.defaultUser.hostIP | string | `"127.0.0.1/32"` | |
205 | | clickhouse.defaultUser.password | string | `""` | |
206 | | clickhouse.defaultUser.password_secret_name | string | `""` | Name of an existing Kubernetes secret containing the default user password. If set, the password will be read from the secret instead of using the password field. The secret should contain a key named 'password'. |
207 | | clickhouse.extraConfig | string | `"\n\n"` | Miscellanous config for ClickHouse (in xml format) |
208 | | clickhouse.extraContainers | list | `[]` | |
209 | | clickhouse.extraPorts | list | `[]` | Additional ports to expose in the ClickHouse container Example: extraPorts: - name: custom-port containerPort: 8080 |
210 | | clickhouse.extraUsers | string | `"\n\n"` | Additional users config for ClickHouse (in xml format) |
211 | | clickhouse.extraVolumes | list | `[]` | Extra volumes for clickhouse pods |
212 | | clickhouse.image.pullPolicy | string | `"IfNotPresent"` | |
213 | | clickhouse.image.repository | string | `"altinity/clickhouse-server"` | |
214 | | clickhouse.image.tag | string | `"25.3.6.10034.altinitystable"` | Override the image tag for a specific version |
215 | | clickhouse.initScripts | object | `{"alwaysRun":true,"configMapName":"","enabled":false}` | Init scripts ConfigMap configuration |
216 | | clickhouse.initScripts.alwaysRun | bool | `true` | Set to true to always run init scripts on container startup |
217 | | clickhouse.initScripts.configMapName | string | `""` | Name of an existing ConfigMap containing init scripts The scripts will be mounted at /docker-entrypoint-initdb.d/ |
218 | | clickhouse.initScripts.enabled | bool | `false` | Set to true to enable init scripts feature |
219 | | clickhouse.keeper | object | `{"host":"","port":2181}` | Keeper connection settings for ClickHouse instances. |
220 | | clickhouse.keeper.host | string | `""` | Specify a keeper host. Should be left empty if `clickhouse-keeper.enabled` is `true`. Will override the defaults set from `clickhouse-keeper.enabled`. |
221 | | clickhouse.keeper.port | int | `2181` | Override the default keeper port |
222 | | clickhouse.lbService.enabled | bool | `false` | |
223 | | clickhouse.lbService.loadBalancerSourceRanges | list | `[]` | Specify source IP ranges to the LoadBalancer service. If supported by the platform, this will restrict traffic through the cloud-provider load-balancer to the specified client IPs. This is ignored if the cloud-provider does not support the feature. |
224 | | clickhouse.lbService.serviceAnnotations | object | `{}` | |
225 | | clickhouse.lbService.serviceLabels | object | `{}` | |
226 | | clickhouse.persistence.accessMode | string | `"ReadWriteOnce"` | |
227 | | clickhouse.persistence.enabled | bool | `true` | enable storage |
228 | | clickhouse.persistence.logs.accessMode | string | `"ReadWriteOnce"` | |
229 | | clickhouse.persistence.logs.enabled | bool | `false` | enable pvc for logs |
230 | | clickhouse.persistence.logs.size | string | `"10Gi"` | size for logs pvc |
231 | | clickhouse.persistence.size | string | `"10Gi"` | volume size (per replica) |
232 | | clickhouse.persistence.storageClass | string | `""` | |
233 | | clickhouse.podAnnotations | object | `{}` | |
234 | | clickhouse.podLabels | object | `{}` | |
235 | | clickhouse.replicasCount | int | `1` | number of replicas. If greater than 1, keeper must be enabled or a keeper host should be provided under clickhouse.keeper.host. Will be ignored if `zones` is set. |
236 | | clickhouse.service.serviceAnnotations | object | `{}` | |
237 | | clickhouse.service.serviceLabels | object | `{}` | |
238 | | clickhouse.service.type | string | `"ClusterIP"` | |
239 | | clickhouse.serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
240 | | clickhouse.serviceAccount.create | bool | `false` | Specifies whether a service account should be created |
241 | | clickhouse.serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
242 | | clickhouse.shardsCount | int | `1` | number of shards. |
243 | | clickhouse.users | list | `[]` | Configure additional ClickHouse users. |
244 | | clickhouse.zones | list | `[]` | |
245 | | keeper.enabled | bool | `false` | Whether to enable Keeper. Required for replicated tables. |
246 | | keeper.image | string | `"altinity/clickhouse-keeper"` | |
247 | | keeper.localStorage.size | string | `"5Gi"` | |
248 | | keeper.localStorage.storageClass | string | `""` | |
249 | | keeper.metricsPort | string | `""` | |
250 | | keeper.nodeSelector | object | `{}` | |
251 | | keeper.podAnnotations | object | `{}` | |
252 | | keeper.replicaCount | int | `3` | Number of keeper replicas. Must be an odd number. !! DO NOT CHANGE AFTER INITIAL DEPLOYMENT |
253 | | keeper.resources.cpuLimitsMs | int | `3` | |
254 | | keeper.resources.cpuRequestsMs | int | `2` | |
255 | | keeper.resources.memoryLimitsMiB | string | `"3Gi"` | |
256 | | keeper.resources.memoryRequestsMiB | string | `"3Gi"` | |
257 | | keeper.settings | object | `{}` | |
258 | | keeper.tag | string | `"25.3.6.10034.altinitystable"` | |
259 | | keeper.tolerations | list | `[]` | |
260 | | keeper.volumeClaimAnnotations | object | `{}` | |
261 | | keeper.zoneSpread | bool | `false` | |
262 | | namespaceDomainPattern | string | `""` | Custom domain pattern used for DNS names of `Service` and `Pod` resources. Typically defined by the custom cluster domain of the Kubernetes cluster. The pattern follows the `%s` C-style printf format, e.g. '%s.svc.my.test'. If not specified, the default namespace domain suffix is `.svc.cluster.local`. |
263 | | operator.enabled | bool | `true` | Whether to enable the Altinity Operator for ClickHouse. Disable if you already have the Operator installed cluster-wide. |
264 |
--------------------------------------------------------------------------------
/tests/steps/kubernetes.py:
--------------------------------------------------------------------------------
1 | from tests.steps.system import *
2 | import json
3 | import time
4 |
5 |
6 | @TestStep(When)
7 | def get_pods(self, namespace):
8 | """Get the list of pods in the specified namespace and return in a list."""
9 |
10 | pods = run(cmd=f"minikube kubectl -- get pods -n {namespace} -o json")
11 | pods = json.loads(pods.stdout)["items"]
12 |
13 | return [p["metadata"]["name"] for p in pods]
14 |
15 |
16 | @TestStep(When)
17 | def debug_namespace_state(self, namespace, expected_count=None, current_count=None):
18 | """Print detailed debugging information about namespace state.
19 |
20 | Args:
21 | namespace: Kubernetes namespace to debug
22 | expected_count: Expected number of pods (optional)
23 | current_count: Current number of pods (optional)
24 | """
25 | pods = get_pods(namespace=namespace)
26 |
27 | if expected_count and current_count is not None:
28 | note(f"❌ TIMEOUT: Expected {expected_count} pods, found {current_count}")
29 |
30 | # Show all pods and their states
31 | if pods:
32 | note(f"📋 Current pods: {', '.join(pods)}")
33 | for pod_name in pods:
34 | try:
35 | pod_info = get_pod_info(namespace=namespace, pod_name=pod_name)
36 | phase = pod_info["status"].get("phase", "Unknown")
37 | conditions = pod_info["status"].get("conditions", [])
38 | ready = any(
39 | c["type"] == "Ready" and c["status"] == "True" for c in conditions
40 | )
41 |
42 | # Get container statuses
43 | container_statuses = pod_info["status"].get("containerStatuses", [])
44 | container_info = []
45 | for cs in container_statuses:
46 | state = cs.get("state", {})
47 | if "waiting" in state:
48 | reason = state["waiting"].get("reason", "Unknown")
49 | message = state["waiting"].get("message", "")
50 | container_info.append(f"Waiting: {reason} - {message}")
51 | elif "terminated" in state:
52 | reason = state["terminated"].get("reason", "Unknown")
53 | container_info.append(f"Terminated: {reason}")
54 | elif "running" in state:
55 | container_info.append("Running")
56 |
57 | note(
58 | f" • {pod_name}: Phase={phase}, Ready={ready}, Containers=[{', '.join(container_info)}]"
59 | )
60 | except Exception as e:
61 | note(f" • {pod_name}: Failed to get info - {str(e)}")
62 | else:
63 | note(f"📋 No pods found in namespace {namespace}")
64 |
65 | # Get all resources in namespace to see what's being created
66 | note(f"\n📦 All resources in namespace {namespace}:")
67 | all_resources = run(cmd=f"kubectl get all -n {namespace}", check=False)
68 | if all_resources.returncode == 0:
69 | note(all_resources.stdout)
70 |
71 | # Get recent events to see why pods aren't being created
72 | note(f"\n📅 Recent events in namespace {namespace}:")
73 | events_result = run(
74 | cmd=f"kubectl get events -n {namespace} --sort-by='.lastTimestamp' | tail -20",
75 | check=False,
76 | )
77 | if events_result.returncode == 0:
78 | note(events_result.stdout)
79 |
80 | # Check for pending pods and describe them
81 | note(f"\n🔍 Describing all pods:")
82 | describe_result = run(cmd=f"kubectl describe pods -n {namespace}", check=False)
83 | if describe_result.returncode == 0:
84 | note(describe_result.stdout)
85 |
86 | # Check CHI (ClickHouseInstallation) configuration
87 | note(f"\n🔧 ClickHouseInstallation resources:")
88 | chi_result = run(cmd=f"kubectl get chi -n {namespace} -o yaml", check=False)
89 | if chi_result.returncode == 0:
90 | note(chi_result.stdout)
91 |
92 |
93 | @TestStep(When)
94 | def get_pod_info(self, namespace, pod_name):
95 | """Get detailed information for a specific pod.
96 |
97 | Args:
98 | namespace: Kubernetes namespace
99 | pod_name: Name of the pod
100 |
101 | Returns:
102 | Dict with pod information
103 | """
104 | pod_info = run(cmd=f"kubectl get pod {pod_name} -n {namespace} -o json")
105 | return json.loads(pod_info.stdout)
106 |
107 |
108 | @TestStep(Then)
109 | def check_status(self, pod_name, namespace, status="Running"):
110 | """Check if the specified pod is in the desired status and ready."""
111 |
112 | pod_info = get_pod_info(namespace=namespace, pod_name=pod_name)
113 | phase = pod_info["status"]["phase"]
114 | conditions = pod_info["status"].get("conditions", [])
115 | ready = any(c["type"] == "Ready" and c["status"] == "True" for c in conditions)
116 | return phase == status and ready
117 |
118 |
119 | @TestStep(Given)
120 | def use_context(self, context_name):
121 | """Set the kubectl context to the specified context name."""
122 |
123 | run(cmd=f"kubectl config use-context {context_name}")
124 |
125 |
126 | @TestStep(When)
127 | def wait_for_pod_count(self, namespace, expected_count, timeout=300):
128 | """Wait until the number of pods in the specified namespace matches the expected count."""
129 |
130 | start_time = time.time()
131 | last_count = -1
132 | while True:
133 | pods = get_pods(namespace=namespace)
134 | current_count = len(pods)
135 |
136 | # Log when pod count changes
137 | if current_count != last_count:
138 | note(f"Pod count in {namespace}: {current_count}/{expected_count}")
139 | last_count = current_count
140 |
141 | if current_count == expected_count:
142 | return pods
143 |
144 | if time.time() - start_time > timeout:
145 | # Show detailed debugging info before failing
146 | debug_namespace_state(
147 | namespace=namespace,
148 | expected_count=expected_count,
149 | current_count=current_count,
150 | )
151 |
152 | raise TimeoutError(
153 | f"Timeout waiting for {expected_count} pods in namespace {namespace}. Found {current_count} pods."
154 | )
155 | time.sleep(5)
156 |
157 |
158 | @TestStep(When)
159 | def get_pvcs(self, namespace):
160 | """Get the list of PVCs in the specified namespace."""
161 |
162 | pvcs = run(cmd=f"kubectl get pvc -n {namespace} -o json")
163 | pvcs = json.loads(pvcs.stdout)["items"]
164 |
165 | return [p["metadata"]["name"] for p in pvcs]
166 |
167 |
168 | @TestStep(When)
169 | def get_pvc_info(self, namespace, pvc_name):
170 | """Get detailed information for a specific PVC.
171 |
172 | Args:
173 | namespace: Kubernetes namespace
174 | pvc_name: Name of the PVC
175 |
176 | Returns:
177 | Dict with PVC information
178 | """
179 | pvc_info = run(cmd=f"kubectl get pvc {pvc_name} -n {namespace} -o json")
180 | return json.loads(pvc_info.stdout)
181 |
182 |
183 | @TestStep(When)
184 | def get_pvc_storage_size(self, namespace, pvc_name):
185 | """Get storage size for a specific PVC.
186 |
187 | Args:
188 | namespace: Kubernetes namespace
189 | pvc_name: Name of the PVC
190 |
191 | Returns:
192 | Storage size string (e.g., "5Gi") or None if not found
193 | """
194 | pvc_data = get_pvc_info(namespace=namespace, pvc_name=pvc_name)
195 | return (
196 | pvc_data.get("spec", {}).get("resources", {}).get("requests", {}).get("storage")
197 | )
198 |
199 |
200 | @TestStep(When)
201 | def get_services(self, namespace):
202 | """Get the list of services in the specified namespace."""
203 |
204 | services = run(cmd=f"kubectl get svc -n {namespace} -o json")
205 | services = json.loads(services.stdout)["items"]
206 |
207 | return [s["metadata"]["name"] for s in services]
208 |
209 |
210 | @TestStep(When)
211 | def get_service_info(self, service_name, namespace):
212 | """Get the full service information as a dictionary."""
213 |
214 | service_info = run(cmd=f"kubectl get svc {service_name} -n {namespace} -o json")
215 | service_info = json.loads(service_info.stdout)
216 |
217 | return service_info
218 |
219 |
220 | @TestStep(When)
221 | def get_service_type(self, service_name, namespace):
222 | """Get the type of a specific service."""
223 |
224 | service_info = get_service_info(service_name=service_name, namespace=namespace)
225 |
226 | return service_info["spec"]["type"]
227 |
228 |
229 | @TestStep(When)
230 | def get_pod_nodes(self, namespace, pod_names):
231 | """Get the nodes where the specified pods are running."""
232 |
233 | nodes = []
234 | for pod_name in pod_names:
235 | pod_info = get_pod_info(namespace=namespace, pod_name=pod_name)
236 | nodes.append(pod_info["spec"]["nodeName"])
237 |
238 | return nodes
239 |
240 |
241 | @TestStep(When)
242 | def get_pod_image(self, namespace, pod_name):
243 | """Get the image used by a specific pod."""
244 |
245 | pod_info = get_pod_info(namespace=namespace, pod_name=pod_name)
246 | return pod_info["spec"]["containers"][0]["image"]
247 |
248 |
249 | @TestStep(When)
250 | def get_statefulsets(self, namespace):
251 | """Get the list of StatefulSets in the specified namespace."""
252 |
253 | statefulsets = run(cmd=f"kubectl get statefulsets -n {namespace} -o json")
254 | statefulsets = json.loads(statefulsets.stdout)
255 |
256 | return [s["metadata"]["name"] for s in statefulsets["items"]]
257 |
258 |
259 | @TestStep(When)
260 | def wait_for_pods_running(self, namespace, timeout=300):
261 | """Wait until all pods in the namespace are running and ready."""
262 |
263 | start_time = time.time()
264 | while True:
265 | pods = get_pods(namespace=namespace)
266 | all_running = True
267 |
268 | for pod in pods:
269 | if not check_status(pod_name=pod, namespace=namespace, status="Running"):
270 | all_running = False
271 | break
272 |
273 | if all_running:
274 | return pods
275 |
276 | if time.time() - start_time > timeout:
277 | # Get status of all pods for debugging
278 | pod_statuses = []
279 | for pod in pods:
280 | status = (
281 | "Running"
282 | if check_status(pod_name=pod, namespace=namespace, status="Running")
283 | else "Not Running"
284 | )
285 | pod_statuses.append(f"{pod}: {status}")
286 | raise TimeoutError(
287 | f"Timeout waiting for pods to be running. Pod statuses: {pod_statuses}"
288 | )
289 |
290 | time.sleep(10)
291 |
292 |
293 | @TestStep(Then)
294 | def verify_pvc_storage_size(self, namespace, expected_size):
295 | """Verify that at least one PVC has the expected storage size."""
296 |
297 | pvcs = get_pvcs(namespace=namespace)
298 | assert len(pvcs) > 0, "No PVCs found for persistence"
299 | note(f"Created PVCs: {pvcs}")
300 |
301 | # Verify at least one PVC has the expected size
302 | for pvc in pvcs:
303 | storage_size = get_pvc_storage_size(namespace=namespace, pvc_name=pvc)
304 | if storage_size == expected_size:
305 | note(f"PVC {pvc} has correct storage size: {storage_size}")
306 | return pvc
307 |
308 | raise AssertionError(f"No PVC found with expected storage size {expected_size}")
309 |
310 |
311 | @TestStep(Then)
312 | def verify_loadbalancer_service_exists(self, namespace):
313 | """Verify that at least one LoadBalancer service exists."""
314 |
315 | services = get_services(namespace=namespace)
316 | lb_services = [
317 | s
318 | for s in services
319 | if get_service_type(service_name=s, namespace=namespace) == "LoadBalancer"
320 | ]
321 | assert len(lb_services) > 0, "LoadBalancer service not found"
322 | note(f"LoadBalancer services found: {lb_services}")
323 |
324 | return lb_services[0]
325 |
326 |
327 | @TestStep(Then)
328 | def verify_loadbalancer_source_ranges(self, namespace, service_name, expected_ranges):
329 | """Verify LoadBalancer service has correct source ranges."""
330 |
331 | service_info = get_service_info(service_name=service_name, namespace=namespace)
332 | source_ranges = service_info["spec"].get("loadBalancerSourceRanges", [])
333 |
334 | assert (
335 | source_ranges == expected_ranges
336 | ), f"Expected source ranges {expected_ranges}, got {source_ranges}"
337 | note(f"LoadBalancer source ranges verified: {source_ranges}")
338 |
339 |
340 | @TestStep(Then)
341 | def verify_loadbalancer_ports(self, namespace, service_name, expected_ports):
342 | """Verify LoadBalancer service has correct ports.
343 |
344 | Args:
345 | namespace: Kubernetes namespace
346 | service_name: Name of the service
347 | expected_ports: Dict mapping port names to port numbers, e.g. {"http": 8123, "tcp": 9000}
348 | """
349 |
350 | service_info = get_service_info(service_name=service_name, namespace=namespace)
351 | ports = service_info["spec"]["ports"]
352 | port_names = [p["name"] for p in ports]
353 |
354 | with By("verifying LoadBalancer ports"):
355 | for port_name in expected_ports.keys():
356 | assert (
357 | port_name in port_names
358 | ), f"Expected port '{port_name}' not found in {port_names}"
359 |
360 | with And("verifying port numbers"):
361 | for port in ports:
362 | if port["name"] in expected_ports:
363 | expected_port = expected_ports[port["name"]]
364 | assert (
365 | port["port"] == expected_port
366 | ), f"Expected {port['name']} port {expected_port}, got {port['port']}"
367 | note(f"Port {port['name']}: {port['port']}")
368 |
369 | note(f"All LoadBalancer ports verified")
370 |
371 |
372 | @TestStep(Then)
373 | def verify_loadbalancer_service(self, namespace, expected_ranges=None):
374 | """Verify LoadBalancer service exists and has correct configuration.
375 |
376 | Args:
377 | namespace: Kubernetes namespace
378 | expected_ranges: Optional list of expected source ranges
379 |
380 | Returns:
381 | Service name of the LoadBalancer
382 | """
383 | services = get_services(namespace=namespace)
384 | lb_services = [
385 | s
386 | for s in services
387 | if get_service_type(service_name=s, namespace=namespace) == "LoadBalancer"
388 | ]
389 |
390 | assert len(lb_services) > 0, "LoadBalancer service not found"
391 | lb_service_name = lb_services[0]
392 |
393 | if expected_ranges:
394 | service_info = get_service_info(
395 | service_name=lb_service_name, namespace=namespace
396 | )
397 | source_ranges = service_info["spec"].get("loadBalancerSourceRanges", [])
398 | assert (
399 | source_ranges == expected_ranges
400 | ), f"Expected source ranges {expected_ranges}, got {source_ranges}"
401 |
402 | note(f"✓ LoadBalancer service: {lb_service_name}")
403 | return lb_service_name
404 |
405 |
406 | @TestStep(Then)
407 | def verify_pvc_access_mode(
408 | self, namespace, expected_access_mode, pvc_name_filter, resource_matcher=None
409 | ):
410 | """Verify PVC access mode for PVCs matching filter.
411 |
412 | Args:
413 | namespace: Kubernetes namespace
414 | expected_access_mode: Expected access mode (e.g., "ReadWriteOnce")
415 | pvc_name_filter: String to filter PVC names (e.g., "data", "logs")
416 | resource_matcher: Optional function to check if PVC belongs to target resource
417 |
418 | Returns:
419 | Name of verified PVC
420 | """
421 | pvcs = get_pvcs(namespace=namespace)
422 |
423 | # Find matching PVCs
424 | for pvc in pvcs:
425 | if pvc_name_filter in pvc.lower():
426 | # Apply resource matcher if provided
427 | if resource_matcher and not resource_matcher(resource_name=pvc):
428 | continue
429 |
430 | pvc_info = get_pvc_info(namespace=namespace, pvc_name=pvc)
431 | access_modes = pvc_info.get("spec", {}).get("accessModes", [])
432 |
433 | assert (
434 | expected_access_mode in access_modes
435 | ), f"Expected accessMode {expected_access_mode} in PVC {pvc}, got {access_modes}"
436 |
437 | note(f"✓ PVC {pvc_name_filter} accessMode: {expected_access_mode}")
438 | return pvc
439 |
440 | raise AssertionError(f"No {pvc_name_filter} PVC found for verification")
441 |
442 |
443 | @TestStep(When)
444 | def get_endpoints_info(self, namespace, endpoints_name):
445 | """Get detailed information about Kubernetes endpoints.
446 |
447 | Args:
448 | namespace: Kubernetes namespace
449 | endpoints_name: Name of the endpoints resource
450 |
451 | Returns:
452 | dict: Endpoints information
453 | """
454 | endpoints_info = run(
455 | cmd=f"kubectl get endpoints {endpoints_name} -n {namespace} -o json"
456 | )
457 | return json.loads(endpoints_info.stdout)
458 |
459 |
460 | @TestStep(When)
461 | def get_secrets(self, namespace):
462 | """Get list of secret names in a namespace.
463 |
464 | Args:
465 | namespace: Kubernetes namespace
466 |
467 | Returns:
468 | list: List of secret names
469 | """
470 | secrets_info = run(cmd=f"kubectl get secrets -n {namespace} -o json")
471 | secrets_data = json.loads(secrets_info.stdout)
472 | return [item["metadata"]["name"] for item in secrets_data.get("items", [])]
473 |
474 |
475 | @TestStep(Finally)
476 | def delete_namespace(self, namespace):
477 | """Delete a Kubernetes namespace.
478 |
479 | Args:
480 | namespace: Kubernetes namespace to delete
481 | """
482 | note(f"Deleting namespace: {namespace}")
483 |
484 | # Just delete the namespace and force-remove finalizers if it hangs
485 | run(
486 | cmd=f"timeout 15 kubectl delete namespace {namespace} --wait=true 2>/dev/null || "
487 | f'kubectl patch namespace {namespace} -p \'{{"metadata":{{"finalizers":null}}}}\' --type=merge 2>/dev/null',
488 | check=False,
489 | )
490 |
491 | note(f"✓ Namespace {namespace} deleted")
492 |
493 |
494 | @TestStep(When)
495 | def delete_pod(self, namespace, pod_name):
496 | """Delete a Kubernetes pod.
497 |
498 | Args:
499 | namespace: Kubernetes namespace
500 | pod_name: Name of the pod to delete
501 | """
502 | run(cmd=f"kubectl delete pod {pod_name} -n {namespace}", check=True)
503 | note(f"✓ Pod {pod_name} deleted from namespace {namespace}")
504 |
--------------------------------------------------------------------------------
/tests/steps/deployment.py:
--------------------------------------------------------------------------------
1 | """
2 | Deployment verification helper for ClickHouse Helm chart tests.
3 |
4 | This module provides the HelmState class which acts as an orchestrator
5 | to verify deployments match their expected configuration.
6 | """
7 |
8 | from testflows.core import *
9 | import tests.steps.kubernetes as kubernetes
10 | import tests.steps.clickhouse as clickhouse
11 | import tests.steps.users as users
12 | import yaml
13 | from pathlib import Path
14 |
15 |
16 | @TestStep(Then)
17 | def wait_for_clickhouse_deployment(
18 | self,
19 | namespace: str,
20 | expected_pod_count: int = 2,
21 | expected_clickhouse_count: int = None,
22 | ):
23 | """Wait for ClickHouse deployment to be ready with all pods running.
24 |
25 | This is a common pattern used across most test scenarios:
26 | 1. Wait for expected number of pods to be created
27 | 2. Wait for all pods to be running
28 | 3. Wait for ClickHouse pods specifically to be running
29 |
30 | Args:
31 | namespace: Kubernetes namespace
32 | expected_pod_count: Total number of pods expected (default: 2)
33 | expected_clickhouse_count: Number of ClickHouse pods expected (default: same as total)
34 | """
35 | if expected_clickhouse_count is None:
36 | expected_clickhouse_count = expected_pod_count
37 |
38 | with When(f"wait for {expected_pod_count} pods to be created"):
39 | kubernetes.wait_for_pod_count(
40 | namespace=namespace, expected_count=expected_pod_count
41 | )
42 |
43 | with And("wait for all pods to be running"):
44 | pods = kubernetes.wait_for_pods_running(namespace=namespace)
45 | note(f"All {len(pods)} pods are now running and ready")
46 |
47 | with And("wait for ClickHouse pods to be running"):
48 | clickhouse_pods = clickhouse.wait_for_clickhouse_pods_running(
49 | namespace=namespace, expected_count=expected_clickhouse_count
50 | )
51 | note(f"ClickHouse pods running: {clickhouse_pods}")
52 |
53 |
54 | class HelmState:
55 | """Orchestrator for verifying Helm deployment state.
56 |
57 | This class reads a Helm values file and decides which verification checks
58 | to run based on the configuration. All actual verification logic is delegated
59 | to appropriate step functions in kubernetes.py and clickhouse.py.
60 | """
61 |
62 | def __init__(self, values_file_path):
63 | """Initialize HelmState with a values file.
64 |
65 | Args:
66 | values_file_path: Path to the Helm values YAML file
67 | """
68 | self.values_file = Path(values_file_path)
69 | with open(self.values_file, "r") as f:
70 | self.values = yaml.safe_load(f)
71 |
72 | self.clickhouse_config = self.values.get("clickhouse", {})
73 | self.keeper_config = self.values.get("keeper", {})
74 |
75 | def get_expected_pod_count(self):
76 | """Total pods = ClickHouse pods + Keeper pods."""
77 | ch_pods = self.get_expected_clickhouse_pod_count()
78 | keeper_pods = self.get_expected_keeper_count()
79 | return ch_pods + keeper_pods
80 |
81 | def get_expected_clickhouse_pod_count(self):
82 | """ClickHouse pods = replicas × shards."""
83 | replicas = self.clickhouse_config.get("replicasCount", 1)
84 | shards = self.clickhouse_config.get("shardsCount", 1)
85 | return replicas * shards
86 |
87 | def get_expected_keeper_count(self):
88 | """Keeper pod count (0 if not enabled)."""
89 | if not self.keeper_config.get("enabled", False):
90 | return 0
91 | return self.keeper_config.get("replicaCount", 0)
92 |
93 | def verify_deployment(self, namespace):
94 | """Wait for and verify deployment is ready."""
95 | expected_total = self.get_expected_pod_count()
96 | expected_ch = self.get_expected_clickhouse_pod_count()
97 | expected_keeper = self.get_expected_keeper_count()
98 |
99 | note(
100 | f"Expected pods - Total: {expected_total}, ClickHouse: {expected_ch}, Keeper: {expected_keeper}"
101 | )
102 |
103 | wait_for_clickhouse_deployment(
104 | namespace=namespace,
105 | expected_pod_count=expected_total,
106 | expected_clickhouse_count=expected_ch,
107 | )
108 |
109 | clickhouse.verify_clickhouse_pod_count(
110 | namespace=namespace, expected_count=expected_ch
111 | )
112 |
113 | if expected_keeper > 0:
114 | clickhouse.verify_keeper_pod_count(
115 | namespace=namespace, expected_count=expected_keeper
116 | )
117 |
118 | def verify_cluster_topology(self, namespace):
119 | """Verify replicas and shards counts match configuration."""
120 | expected_replicas = self.clickhouse_config.get("replicasCount", 1)
121 | expected_shards = self.clickhouse_config.get("shardsCount", 1)
122 |
123 | clickhouse.verify_chi_cluster_topology(
124 | namespace=namespace,
125 | expected_replicas=expected_replicas,
126 | expected_shards=expected_shards,
127 | )
128 |
129 | def verify_name_override(self, namespace):
130 | """Verify custom name is used in resources."""
131 | name_override = self.values.get("nameOverride")
132 | clickhouse.verify_custom_name_in_resources(
133 | namespace=namespace, custom_name=name_override
134 | )
135 | note(f"✓ nameOverride: {name_override}")
136 |
137 | def verify_persistence(self, namespace):
138 | """Verify persistence storage configuration."""
139 | persistence_config = self.clickhouse_config.get("persistence", {})
140 | expected_size = persistence_config.get("size")
141 | expected_access_mode = persistence_config.get("accessMode", "ReadWriteOnce")
142 |
143 | clickhouse.verify_persistence_configuration(
144 | namespace=namespace, expected_size=expected_size
145 | )
146 |
147 | clickhouse.verify_clickhouse_pvc_size(
148 | namespace=namespace, expected_size=expected_size
149 | )
150 |
151 | kubernetes.verify_pvc_access_mode(
152 | namespace=namespace,
153 | expected_access_mode=expected_access_mode,
154 | pvc_name_filter="data",
155 | resource_matcher=clickhouse.is_clickhouse_resource,
156 | )
157 |
158 | def verify_service(self, namespace):
159 | """Verify LoadBalancer service configuration."""
160 | lb_config = self.clickhouse_config.get("lbService", {})
161 | expected_ranges = lb_config.get("loadBalancerSourceRanges")
162 |
163 | kubernetes.verify_loadbalancer_service(
164 | namespace=namespace, expected_ranges=expected_ranges
165 | )
166 |
167 | def verify_users(self, namespace):
168 | """Verify comprehensive user configuration including permissions and grants."""
169 | default_user = self.clickhouse_config.get("defaultUser", {})
170 | user_configs = self.clickhouse_config.get("users")
171 |
172 | users.verify_all_users(
173 | namespace=namespace,
174 | default_user_config=default_user,
175 | users_config=user_configs,
176 | )
177 |
178 | if default_user.get("hostIP"):
179 | users.verify_user_host_ip(
180 | namespace=namespace,
181 | user="default",
182 | expected_host_ip=default_user["hostIP"],
183 | )
184 |
185 | note(f"✓ All users verified")
186 |
187 | def verify_keeper(self, namespace):
188 | """Verify Keeper pods are running."""
189 | expected_count = self.keeper_config.get("replicaCount", 0)
190 |
191 | clickhouse.verify_keeper_pods_running(
192 | namespace=namespace, expected_count=expected_count
193 | )
194 | note(f"✓ Keeper: {expected_count} pods running")
195 |
196 | def verify_image(self, namespace):
197 | """Verify pods use correct image tag."""
198 | image_config = self.clickhouse_config.get("image", {})
199 | expected_tag = image_config.get("tag")
200 |
201 | clickhouse.verify_image_tag(namespace=namespace, expected_tag=expected_tag)
202 |
203 | def verify_pod_annotations(self, namespace):
204 | """Verify pod annotations configuration."""
205 | pod_annotations = self.clickhouse_config.get("podAnnotations", {})
206 |
207 | clickhouse.verify_pod_annotations(
208 | namespace=namespace, expected_annotations=pod_annotations
209 | )
210 | note(f"✓ Pod annotations: {len(pod_annotations)} verified")
211 |
212 | def verify_pod_labels(self, namespace):
213 | """Verify pod labels configuration."""
214 | pod_labels = self.clickhouse_config.get("podLabels", {})
215 |
216 | clickhouse.verify_pod_labels(namespace=namespace, expected_labels=pod_labels)
217 | note(f"✓ Pod labels: {len(pod_labels)} verified")
218 |
219 | def verify_service_annotations(self, namespace):
220 | """Verify service annotations configuration."""
221 | service_config = self.clickhouse_config.get("service", {})
222 | service_annotations = service_config.get("serviceAnnotations", {})
223 | service_type = service_config.get("type", "ClusterIP")
224 |
225 | clickhouse.verify_service_annotations(
226 | namespace=namespace,
227 | expected_annotations=service_annotations,
228 | service_type=service_type,
229 | )
230 | note(f"✓ Service annotations: {len(service_annotations)} verified")
231 |
232 | def verify_service_labels(self, namespace):
233 | """Verify service labels configuration."""
234 | service_config = self.clickhouse_config.get("service", {})
235 | service_labels = service_config.get("serviceLabels", {})
236 | service_type = service_config.get("type", "ClusterIP")
237 |
238 | clickhouse.verify_service_labels(
239 | namespace=namespace,
240 | expected_labels=service_labels,
241 | service_type=service_type,
242 | )
243 | note(f"✓ Service labels: {len(service_labels)} verified")
244 |
245 | def verify_log_persistence(self, namespace):
246 | """Verify log persistence volumes configuration."""
247 | persistence_config = self.clickhouse_config.get("persistence", {})
248 | logs_config = persistence_config.get("logs", {})
249 |
250 | if logs_config.get("enabled"):
251 | expected_log_size = logs_config.get("size")
252 | expected_access_mode = logs_config.get("accessMode", "ReadWriteOnce")
253 |
254 | clickhouse.verify_log_persistence(
255 | namespace=namespace, expected_log_size=expected_log_size
256 | )
257 | note(f"✓ Log persistence: {expected_log_size}")
258 |
259 | kubernetes.verify_pvc_access_mode(
260 | namespace=namespace,
261 | expected_access_mode=expected_access_mode,
262 | pvc_name_filter="logs",
263 | resource_matcher=clickhouse.is_clickhouse_resource,
264 | )
265 |
266 | def verify_extra_config(self, namespace):
267 | """Verify extraConfig custom ClickHouse configuration."""
268 | extra_config = self.clickhouse_config.get("extraConfig", "")
269 | admin_password = self.clickhouse_config.get("defaultUser", {}).get(
270 | "password", ""
271 | )
272 |
273 | if extra_config:
274 | config_keys = clickhouse.extract_extra_config_keys(
275 | extra_config_xml=extra_config
276 | )
277 |
278 | clickhouse.verify_extra_config(
279 | namespace=namespace, expected_config_keys=config_keys
280 | )
281 |
282 | config_values = clickhouse.parse_extra_config_values(
283 | extra_config_xml=extra_config
284 | )
285 |
286 | if config_values:
287 | clickhouse.verify_extra_config_values(
288 | namespace=namespace,
289 | expected_config_values=config_values,
290 | admin_password=admin_password,
291 | )
292 |
293 | note(f"✓ ExtraConfig verified")
294 |
295 | def verify_keeper_storage(self, namespace):
296 | """Verify Keeper storage configuration."""
297 | local_storage = self.keeper_config.get("localStorage", {})
298 | storage_size = local_storage.get("size")
299 |
300 | if storage_size:
301 | clickhouse.verify_keeper_storage(
302 | namespace=namespace, expected_storage_size=storage_size
303 | )
304 | note(f"✓ Keeper storage: {storage_size}")
305 |
306 | def verify_keeper_annotations(self, namespace):
307 | """Verify Keeper pod annotations."""
308 | keeper_annotations = self.keeper_config.get("podAnnotations", {})
309 |
310 | if keeper_annotations:
311 | clickhouse.verify_keeper_annotations(
312 | namespace=namespace, expected_annotations=keeper_annotations
313 | )
314 | note(f"✓ Keeper annotations: {len(keeper_annotations)} verified")
315 |
316 | def verify_keeper_resources(self, namespace):
317 | """Verify Keeper resource requests and limits."""
318 | resources_config = self.keeper_config.get("resources", {})
319 |
320 | if resources_config:
321 | expected_resources = clickhouse.convert_helm_resources_to_k8s(
322 | helm_resources=resources_config
323 | )
324 |
325 | if expected_resources:
326 | clickhouse.verify_keeper_resources(
327 | namespace=namespace, expected_resources=expected_resources
328 | )
329 | note(f"✓ Keeper resources verified")
330 |
331 | def verify_replication_health(self, namespace):
332 | """Verify replication health through system tables."""
333 | admin_password = self.clickhouse_config.get("defaultUser", {}).get(
334 | "password", ""
335 | )
336 | expected_replicas = self.clickhouse_config.get("replicasCount", 1)
337 | expected_shards = self.clickhouse_config.get("shardsCount", 1)
338 |
339 | if expected_replicas > 1 or expected_shards > 1:
340 | # Cluster name equals namespace (which equals release_name in test setup)
341 | clickhouse.verify_system_clusters(
342 | namespace=namespace,
343 | cluster_name=namespace,
344 | expected_shards=expected_shards,
345 | expected_replicas=expected_replicas,
346 | admin_password=admin_password,
347 | )
348 |
349 | if expected_replicas > 1:
350 | clickhouse.verify_system_replicas_health(
351 | namespace=namespace, admin_password=admin_password
352 | )
353 |
354 | note(f"✓ Replication health verified")
355 |
356 | def verify_replication_working(self, namespace):
357 | """Verify replication actually works by creating and replicating a test table."""
358 | admin_password = self.clickhouse_config.get("defaultUser", {}).get(
359 | "password", ""
360 | )
361 | expected_replicas = self.clickhouse_config.get("replicasCount", 1)
362 |
363 | if expected_replicas > 1:
364 | clickhouse.verify_replication_working(
365 | namespace=namespace, admin_password=admin_password
366 | )
367 | note(f"✓ Replication data test passed")
368 |
369 | def verify_service_endpoints(self, namespace):
370 | """Verify service endpoints count matches expected ClickHouse replicas."""
371 | expected_ch_count = self.get_expected_clickhouse_pod_count()
372 |
373 | clickhouse.verify_service_endpoints(
374 | namespace=namespace, expected_endpoint_count=expected_ch_count
375 | )
376 | note(f"✓ Service endpoints: {expected_ch_count}")
377 |
378 | def verify_secrets(self, namespace):
379 | """Verify Kubernetes secrets exist for credentials."""
380 | clickhouse.verify_secrets_exist(namespace=namespace)
381 | note(f"✓ Secrets verified")
382 |
383 | def verify_all(self, namespace):
384 | """Run all verification checks based on configuration.
385 |
386 | This is the main orchestrator - it decides which checks to run
387 | based on the Helm values configuration.
388 | """
389 | note(f"Verifying deployment state from: {self.values_file.name}")
390 |
391 | self.verify_deployment(namespace=namespace)
392 | self.verify_cluster_topology(namespace=namespace)
393 |
394 | expected_replicas = self.clickhouse_config.get("replicasCount", 1)
395 | expected_shards = self.clickhouse_config.get("shardsCount", 1)
396 | if expected_replicas > 1 or expected_shards > 1:
397 | self.verify_replication_health(namespace=namespace)
398 |
399 | if expected_replicas > 1:
400 | self.verify_replication_working(namespace=namespace)
401 |
402 | self.verify_service_endpoints(namespace=namespace)
403 | self.verify_secrets(namespace=namespace)
404 |
405 | if self.values.get("nameOverride"):
406 | self.verify_name_override(namespace=namespace)
407 |
408 | if self.clickhouse_config.get("persistence", {}).get("enabled"):
409 | self.verify_persistence(namespace=namespace)
410 |
411 | if (
412 | self.clickhouse_config.get("persistence", {})
413 | .get("logs", {})
414 | .get("enabled")
415 | ):
416 | self.verify_log_persistence(namespace=namespace)
417 |
418 | if self.clickhouse_config.get("lbService", {}).get("enabled"):
419 | self.verify_service(namespace=namespace)
420 |
421 | if self.clickhouse_config.get("defaultUser") or self.clickhouse_config.get(
422 | "users"
423 | ):
424 | self.verify_users(namespace=namespace)
425 |
426 | if self.clickhouse_config.get("podAnnotations"):
427 | self.verify_pod_annotations(namespace=namespace)
428 |
429 | if self.clickhouse_config.get("podLabels"):
430 | self.verify_pod_labels(namespace=namespace)
431 |
432 | if self.clickhouse_config.get("service", {}).get("serviceAnnotations"):
433 | self.verify_service_annotations(namespace=namespace)
434 |
435 | if self.clickhouse_config.get("service", {}).get("serviceLabels"):
436 | self.verify_service_labels(namespace=namespace)
437 |
438 | if self.clickhouse_config.get("extraConfig"):
439 | self.verify_extra_config(namespace=namespace)
440 |
441 | if self.keeper_config.get("enabled"):
442 | self.verify_keeper(namespace=namespace)
443 |
444 | if self.keeper_config.get("localStorage", {}).get("size"):
445 | self.verify_keeper_storage(namespace=namespace)
446 |
447 | if self.keeper_config.get("podAnnotations"):
448 | self.verify_keeper_annotations(namespace=namespace)
449 |
450 | if self.keeper_config.get("resources"):
451 | self.verify_keeper_resources(namespace=namespace)
452 |
453 | if self.clickhouse_config.get("image", {}).get("tag"):
454 | self.verify_image(namespace=namespace)
455 |
--------------------------------------------------------------------------------