├── .actionlint.yaml ├── .dockerignore ├── .envrc.sample ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── 01-normal-issue.md │ ├── 02-bug_report.yml │ ├── bug_report.yml │ ├── config.yml │ ├── new_version.md │ └── normal-issue.md ├── PULL_REQUEST_TEMPLATE │ ├── pre-release-getting-started-script.md │ └── pre-release-rust-deps.md ├── actionlint.yaml ├── pull_request_template.md └── workflows │ ├── build.yml │ ├── general_daily_security.yml │ ├── integration-test.yml │ └── pr_pre-commit.yaml ├── .gitignore ├── .hadolint.yaml ├── .markdownlint.yaml ├── .pre-commit-config.yaml ├── .pylintrc ├── .readme ├── README.md.j2 ├── partials │ ├── borrowed │ │ ├── documentation.md.j2 │ │ ├── footer.md.j2 │ │ ├── header.md.j2 │ │ ├── links.md.j2 │ │ ├── overview_blurb.md.j2 │ │ └── related_reading.md.j2 │ └── main.md.j2 └── static │ └── borrowed │ ├── Icon_Stackable.svg │ ├── sdp_overview.png │ └── stackable_overview.png ├── .vscode ├── launch.json └── settings.json ├── .yamllint.yaml ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.nix ├── Cargo.toml ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── Tiltfile ├── crate-hashes.json ├── default.nix ├── deny.toml ├── deploy ├── DO_NOT_EDIT.md ├── config-spec │ └── properties.yaml ├── helm │ ├── .gitignore │ ├── chart_testing.yaml │ ├── ct.yaml │ └── zookeeper-operator │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── configs │ │ └── properties.yaml │ │ ├── crds │ │ └── crds.yaml │ │ ├── templates │ │ ├── _helpers.tpl │ │ ├── _telemetry.tpl │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── roles.yaml │ │ └── serviceaccount.yaml │ │ └── values.yaml └── stackable-operators-ns.yaml ├── docker └── Dockerfile ├── docs ├── antora.yml ├── modules │ └── zookeeper │ │ ├── examples │ │ ├── example-znode.yaml │ │ ├── getting_started │ │ │ └── code │ │ │ │ ├── getting_started.sh │ │ │ │ ├── getting_started.sh.j2 │ │ │ │ ├── install_output.txt │ │ │ │ ├── install_output.txt.j2 │ │ │ │ ├── test_getting_started_helm.sh │ │ │ │ ├── test_getting_started_stackablectl.sh │ │ │ │ ├── znode.yaml │ │ │ │ ├── zookeeper.yaml │ │ │ │ └── zookeeper.yaml.j2 │ │ └── usage_guide │ │ │ ├── example-cluster-tls-authentication-class.yaml │ │ │ ├── example-cluster-tls-authentication-secret.yaml │ │ │ ├── example-cluster-tls-authentication.yaml │ │ │ ├── example-cluster-tls-encryption.yaml │ │ │ ├── example-secret-operator-tls-secret.yaml │ │ │ └── znode │ │ │ ├── example-znode-discovery.yaml │ │ │ ├── example-znode-druid.yaml │ │ │ ├── example-znode-kafka.yaml │ │ │ └── example-znode.yaml │ │ ├── images │ │ └── zookeeper_overview.drawio.svg │ │ ├── pages │ │ ├── getting_started │ │ │ ├── first_steps.adoc │ │ │ ├── index.adoc │ │ │ └── installation.adoc │ │ ├── index.adoc │ │ ├── reference │ │ │ ├── commandline-parameters.adoc │ │ │ ├── crds.adoc │ │ │ ├── discovery.adoc │ │ │ ├── environment-variables.adoc │ │ │ └── index.adoc │ │ ├── usage_guide │ │ │ ├── authentication.adoc │ │ │ ├── cluster_operations.adoc │ │ │ ├── encryption.adoc │ │ │ ├── index.adoc │ │ │ ├── isolating_clients_with_znodes.adoc │ │ │ ├── listenerclass.adoc │ │ │ ├── log_aggregation.adoc │ │ │ ├── monitoring.adoc │ │ │ ├── operations │ │ │ │ ├── cluster-operations.adoc │ │ │ │ ├── graceful-shutdown.adoc │ │ │ │ ├── index.adoc │ │ │ │ ├── pod-disruptions.adoc │ │ │ │ └── pod-placement.adoc │ │ │ ├── overrides.adoc │ │ │ ├── resource_configuration.adoc │ │ │ └── using_multiple_role_groups.adoc │ │ └── znodes.adoc │ │ └── partials │ │ ├── nav.adoc │ │ └── supported-versions.adoc └── templating_vars.yaml ├── examples └── simple-zookeeper-tls-cluster.yaml ├── nix ├── README.md ├── meta.json ├── sources.json └── sources.nix ├── renovate.json ├── rust-toolchain.toml ├── rust └── operator-binary │ ├── Cargo.toml │ ├── build.rs │ └── src │ ├── command.rs │ ├── config │ ├── jvm.rs │ └── mod.rs │ ├── crd │ ├── affinity.rs │ ├── authentication.rs │ ├── mod.rs │ ├── security.rs │ └── tls.rs │ ├── discovery.rs │ ├── main.rs │ ├── operations │ ├── graceful_shutdown.rs │ ├── mod.rs │ └── pdb.rs │ ├── product_logging.rs │ ├── utils.rs │ ├── zk_controller.rs │ └── znode_controller.rs ├── rustfmt.toml ├── scripts ├── docs_templating.sh ├── ensure_one_trailing_newline.py ├── generate-manifests.sh ├── render_readme.sh ├── run-tests └── run_tests.sh ├── shell.nix └── tests ├── README-templating.md ├── infrastructure.yaml ├── kuttl-test.yaml.jinja2 ├── release.yaml ├── templates ├── .gitkeep └── kuttl │ ├── cluster-operation │ ├── 00-patch-ns.yaml.j2 │ ├── 05-assert.yaml.j2 │ ├── 05-install-vector-aggregator-discovery-configmap.yaml.j2 │ ├── 10-assert.yaml.j2 │ ├── 10-install-zookeeper.yaml.j2 │ ├── 20-assert.yaml.j2 │ ├── 20-stop-zookeeper.yaml.j2 │ ├── 30-assert.yaml.j2 │ ├── 30-pause-zookeeper.yaml.j2 │ ├── 40-assert.yaml.j2 │ └── 40-restart-zookeeper.yaml.j2 │ ├── delete-rolegroup │ ├── 00-patch-ns.yaml.j2 │ ├── 05-assert.yaml │ ├── 05-install-zookeeper.yaml.j2 │ ├── 10-assert.yaml │ ├── 10-errors.yaml │ └── 10-remove-secondary.yaml.j2 │ ├── logging │ ├── 00-patch-ns.yaml.j2 │ ├── 05-assert.yaml │ ├── 05-install-zookeeper-vector-aggregator.yaml │ ├── 10-create-configmap-with-prepared-logs.yaml │ ├── 11-assert.yaml │ ├── 11-install-zookeeper.yaml.j2 │ ├── 20-assert.yaml │ ├── 20-install-check.yaml │ ├── 30-assert.yaml.j2 │ ├── 30-prepare-test-zookeeper.yaml.j2 │ ├── prepared-logs.log4j.xml │ ├── test_log_aggregation.py │ └── zookeeper-vector-aggregator-values.yaml.j2 │ ├── smoke │ ├── 00-limit-range.yaml │ ├── 00-patch-ns.yaml.j2 │ ├── 10-assert.yaml.j2 │ ├── 10-install-zookeeper.yaml.j2 │ ├── 11-assert.yaml │ ├── 12-assert.yaml │ ├── 20-assert.yaml │ ├── 20-install-check.yaml │ ├── 21-assert.yaml.j2 │ ├── 21-prepare-test-zookeeper.yaml.j2 │ ├── test_heap.sh │ ├── test_tls.sh.j2 │ └── test_zookeeper.py │ └── znode │ ├── 00-patch-ns.yaml.j2 │ ├── 10-assert.yaml │ ├── 10-install-zookeeper.yaml.j2 │ ├── 20-assert.yaml │ └── 20-set-znode-override.yaml └── test-definition.yaml /.actionlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | self-hosted-runner: 3 | # Ubicloud machines we are using 4 | labels: 5 | - ubicloud-standard-8-arm 6 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | debug/ 2 | target/ 3 | **/*.rs.bk 4 | 5 | .idea/ 6 | *.iws 7 | 8 | Cargo.nix 9 | crate-hashes.json 10 | result 11 | image.tar 12 | 13 | # We do NOT want to ignore .git because we use the `built` crate to gather the current git commit hash at built time 14 | # This means we need the .git directory in our Docker image, it will be thrown away and won't be included in the final image 15 | -------------------------------------------------------------------------------- /.envrc.sample: -------------------------------------------------------------------------------- 1 | # vim: syntax=conf 2 | # 3 | # If you use direnv, you can autoload the nix shell: 4 | # You will need to allow the directory the first time. 5 | use nix 6 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | nix/** linguist-generated 2 | Cargo.nix linguist-generated 3 | crate-hashes.json linguist-generated 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/01-normal-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Normal issue 3 | about: This is just a normal empty issue with a simple checklist 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Issue checklist 11 | 12 | This is a simple checklist of things to bear in mind when creating a new issue. 13 | 14 | - [ ] **Describe the use-case**: As far as possible, use the pattern "As a [type of user], I would like [feature/functionality] to be able to do [specific action]." This helps identify the feature and the problem it addresses. 15 | - [ ] **Indicate importance and urgency**: Use a scale (e.g., low, medium, high) to indicate the level of importance and urgency. 16 | - [ ] **Work-around**: If there is a known work-around, describe it briefly. 17 | - [ ] **Environment**: Describe the environment where the issue occurs (e.g., SDP version, K8S version, etc.). 18 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/02-bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bug Report" 3 | description: "If something isn't working as expected 🤔." 4 | labels: ["type/bug"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: Thanks for taking the time to file a bug report! Please fill out this form as completely as possible. 9 | 10 | - type: input 11 | attributes: 12 | label: Affected Stackable version 13 | description: Which version of the Stackable Operator do you see this bug in? 14 | 15 | # - type: input 16 | attributes: 17 | label: Affected Apache ZooKeeper version 18 | description: Which version of Apache ZooKeeper do you see this bug in? 19 | # 20 | - type: textarea 21 | attributes: 22 | label: Current and expected behavior 23 | description: A clear and concise description of what the operator is doing and what you would expect. 24 | validations: 25 | required: true 26 | 27 | - type: textarea 28 | attributes: 29 | label: Possible solution 30 | description: "If you have suggestions on a fix for the bug." 31 | 32 | - type: textarea 33 | attributes: 34 | label: Additional context 35 | description: "Add any other context about the problem here. Or a screenshot if applicable." 36 | 37 | - type: textarea 38 | attributes: 39 | label: Environment 40 | description: | 41 | What type of kubernetes cluster you are running aginst (k3s/eks/aks/gke/other) and any other information about your environment? 42 | placeholder: | 43 | Examples: 44 | Output of `kubectl version --short` 45 | 46 | - type: dropdown 47 | attributes: 48 | label: Would you like to work on fixing this bug? 49 | description: | 50 | **NOTE**: Let us know if you would like to submit a PR for this. We are more than happy to help you through the process. 51 | options: 52 | - "yes" 53 | - "no" 54 | - "maybe" 55 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bug Report" 3 | description: "If something isn't working as expected 🤔." 4 | labels: ["type/bug"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: Thanks for taking the time to file a bug report! Please fill out this form as completely as possible. 9 | 10 | - type: input 11 | attributes: 12 | label: Affected Stackable version 13 | description: Which version of the Stackable Operator do you see this bug in? 14 | 15 | - type: input 16 | attributes: 17 | label: Affected Apache ZooKeeper version 18 | description: Which version of Apache ZooKeeper do you see this bug in? 19 | 20 | - type: textarea 21 | attributes: 22 | label: Current and expected behavior 23 | description: A clear and concise description of what the operator is doing and what you would expect. 24 | validations: 25 | required: true 26 | 27 | - type: textarea 28 | attributes: 29 | label: Possible solution 30 | description: "If you have suggestions on a fix for the bug." 31 | 32 | - type: textarea 33 | attributes: 34 | label: Additional context 35 | description: "Add any other context about the problem here. Or a screenshot if applicable." 36 | 37 | - type: textarea 38 | attributes: 39 | label: Environment 40 | description: | 41 | What type of kubernetes cluster you are running aginst (k3s/eks/aks/gke/other) and any other information about your environment? 42 | placeholder: | 43 | Examples: 44 | Output of `kubectl version --short` 45 | 46 | - type: dropdown 47 | attributes: 48 | label: Would you like to work on fixing this bug? 49 | description: | 50 | **NOTE**: Let us know if you would like to submit a PR for this. We are more than happy to help you through the process. 51 | options: 52 | - "yes" 53 | - "no" 54 | - "maybe" 55 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | blank_issues_enabled: true 3 | contact_links: 4 | - name: 🙋🏾 Question 5 | about: Use this to ask a question about this project 6 | url: https://github.com/orgs/stackabletech/discussions/new?category=q-a 7 | - name: 🚀 Feature Requests and other things 8 | about: Open an issue with your feature request or any other issue not covered elsewhere 9 | url: https://github.com/stackabletech/zookeeper-operator/issues/new 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/new_version.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: New Version 3 | about: Request support for a new product version 4 | title: "[NEW VERSION]" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Which new version of Apache ZooKeeper should we support? 11 | 12 | Please specify the version, version range or version numbers to support, please also add these to the issue title 13 | 14 | ## Additional information 15 | 16 | If possible, provide a link to release notes/changelog 17 | 18 | ## Changes required 19 | 20 | Are there any upstream changes that we need to support? 21 | e.g. new features, changed features, deprecated features etc. 22 | 23 | ## Implementation checklist 24 | 25 | 29 | 30 | - [ ] Update the Docker image 31 | - [ ] Update documentation to include supported version(s) 32 | - [ ] Update and test getting started guide with updated version(s) 33 | - [ ] Update operator to support the new version (if needed) 34 | - [ ] Update integration tests to test use the new versions (in addition or replacing old versions 35 | - [ ] Update examples to use new versions 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/normal-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Normal issue 3 | about: This is just a normal empty issue with a simple checklist 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Issue checklist 11 | 12 | This is a simple checklist of things to bear in mind when creating a new issue. 13 | 14 | - [ ] Describe the use-case, as far is possible. For instance, using the pattern "As a XXXX, I would like XXXX to be able to do XXXX" helps to identify the feature as well as the problem it is intended to address. 15 | - [ ] Indicate an approximate level of importance and urgency. 16 | - [ ] Indicate if there is a known work-around until such time as the issue has been implemented. 17 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE/pre-release-getting-started-script.md: -------------------------------------------------------------------------------- 1 | ## Check and Update Getting Started Script 2 | 3 | 7 | 8 | 11 | 12 | Part of 13 | 14 | > [!NOTE] 15 | > During a Stackable release we need to check (and optionally update) the 16 | > getting-started scripts to ensure they still work after product and operator 17 | > updates. 18 | 19 | ```shell 20 | # Some of the scripts are in a code/ subdirectory 21 | # pushd docs/modules/superset/examples/getting_started 22 | # pushd docs/modules/superset/examples/getting_started/code 23 | pushd $(fd -td getting_started | grep examples); cd code 2>/dev/null || true 24 | 25 | # Make a fresh cluster (~12 seconds) 26 | kind delete cluster && kind create cluster 27 | ./getting_started.sh stackablectl 28 | 29 | # Make a fresh cluster (~12 seconds) 30 | kind delete cluster && kind create cluster 31 | ./getting_started.sh helm 32 | 33 | popd 34 | ``` 35 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE/pre-release-rust-deps.md: -------------------------------------------------------------------------------- 1 | ## Bump Rust Dependencies for Stackable Release YY.M.X 2 | 3 | 7 | 8 | 11 | 12 | Part of 13 | 14 | > [!NOTE] 15 | > During a Stackable release we need to update various Rust dependencies before 16 | > entering the final release period to ensure we run the latest versions of 17 | > crates. These bumps also include previously updated and released crates from 18 | > the `operator-rs` repository. 19 | 20 | ### Tasks 21 | 22 | - [ ] Bump Rust Dependencies, see below for more details. 23 | - [ ] Add changelog entry stating which important crates were bumped (including the version). 24 | 25 | > [!NOTE] 26 | > The bumping / updating of Rust dependencies is done in multiple steps: 27 | > 28 | > 1. Update the minimum Version in the root `Cargo.toml` manifest. 29 | > 2. Run the `cargo update` command, which also updates the `Cargo.lock` file. 30 | > 3. Lastly, run `make regenerate-nix` to update the `Cargo.nix` file. 31 | 32 | ### Bump Rust Dependencies 33 | 34 | - [ ] Bump `stackable-operator` and friends 35 | - [ ] Bump `product-config` 36 | - [ ] Bump all other dependencies 37 | -------------------------------------------------------------------------------- /.github/actionlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | self-hosted-runner: 3 | # Ubicloud machines we are using 4 | labels: 5 | - ubicloud-standard-8-arm 6 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | *Please add a description here. This will become the commit message of the merge request later.* 4 | 5 | ## Definition of Done Checklist 6 | 7 | - Not all of these items are applicable to all PRs, the author should update this template to only leave the boxes in that are relevant 8 | - Please make sure all these things are done and tick the boxes 9 | 10 | ### Author 11 | 12 | - [ ] Changes are OpenShift compatible 13 | - [ ] CRD changes approved 14 | - [ ] CRD documentation for all fields, following the [style guide](https://docs.stackable.tech/home/nightly/contributor/docs/style-guide). 15 | - [ ] Helm chart can be installed and deployed operator works 16 | - [ ] Integration tests passed (for non trivial changes) 17 | - [ ] Changes need to be "offline" compatible 18 | - [ ] Links to generated (nightly) docs added 19 | - [ ] Release note snippet added 20 | 21 | ### Reviewer 22 | 23 | - [ ] Code contains useful comments 24 | - [ ] Code contains useful logging statements 25 | - [ ] (Integration-)Test cases added 26 | - [ ] Documentation added or updated. Follows the [style guide](https://docs.stackable.tech/home/nightly/contributor/docs/style-guide). 27 | - [ ] Changelog updated 28 | - [ ] Cargo.toml only contains references to git tags (not specific commits or branches) 29 | 30 | ### Acceptance 31 | 32 | - [ ] Feature Tracker has been updated 33 | - [ ] Proper release label has been added 34 | - [ ] Links to generated (nightly) docs added 35 | - [ ] Release note snippet added 36 | - [ ] Add `type/deprecation` label & add to the [deprecation schedule](https://github.com/orgs/stackabletech/projects/44/views/1) 37 | - [ ] Add `type/experimental` label & add to the [experimental features tracker](https://github.com/orgs/stackabletech/projects/47) 38 | -------------------------------------------------------------------------------- /.github/workflows/general_daily_security.yml: -------------------------------------------------------------------------------- 1 | # ============= 2 | # This file is automatically generated from the templates in stackabletech/operator-templating 3 | # DON'T MANUALLY EDIT THIS FILE 4 | # ============= 5 | --- 6 | name: Daily Security Audit 7 | 8 | on: 9 | schedule: 10 | - cron: '15 4 * * *' 11 | workflow_dispatch: 12 | 13 | permissions: {} 14 | 15 | jobs: 16 | audit: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 20 | with: 21 | persist-credentials: false 22 | - uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 # v2.0.0 23 | with: 24 | token: ${{ secrets.GITHUB_TOKEN }} 25 | -------------------------------------------------------------------------------- /.github/workflows/integration-test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Integration Test 3 | run-name: | 4 | Integration Test on ${{ inputs.test-platform }}-${{ inputs.test-architecture }} (${{ inputs.test-run == 'all' && 'all' || format('{0}={1}', inputs.test-run, inputs.test-parameter) }}) 5 | 6 | env: 7 | DEFAULT_TEST_PLATFORM: kind-1.31.0 8 | DEFAULT_TEST_ARCHITECTURE: amd64 9 | DEFAULT_TEST_RUN: all 10 | DEFAULT_TEST_PARAMETER: "" # Unused when the test-run is 'all' 11 | TEST_PLATFORM: ${{ inputs.test-platform }} 12 | TEST_ARCHITECTURE: ${{ inputs.test-architecture }} 13 | TEST_RUN: ${{ inputs.test-run }} 14 | TEST_PARAMETER: ${{ inputs.test-parameter }} 15 | 16 | on: 17 | # schedule: 18 | # At 00:00 on Sunday. See: https://crontab.guru/#0_0_*_*_0 19 | # - cron: "0 0 * * 0" 20 | workflow_dispatch: 21 | inputs: 22 | test-platform: 23 | description: | 24 | The test platform to run on 25 | required: true 26 | type: choice 27 | options: 28 | - kind-1.31.2 29 | - kind-1.30.6 30 | - rke2-1.31.2 31 | - rke2-1.30.6 32 | - k3s-1.31.2 33 | - k3s-1.30.6 34 | - aks-1.29 35 | - aks-1.28 36 | - aks-1.27 37 | - eks-1.29 38 | - eks-1.28 39 | - eks-1.27 40 | - gke-1.29 41 | - gke-1.28 42 | - gke-1.27 43 | - okd-4.15 44 | - okd-4.14 45 | - okd-4.13 46 | test-architecture: 47 | description: | 48 | The architecture the tests will run on. Consult the run-integration-test action README for 49 | more details on supported architectures for each distribution 50 | required: true 51 | type: choice 52 | options: 53 | - amd64 54 | - arm64 55 | test-run: 56 | description: Type of test run 57 | required: true 58 | type: choice 59 | options: 60 | - all 61 | - test-suite 62 | - test 63 | test-parameter: 64 | description: Parameter to `--test-suite` or `--test` (ignored for `all`) 65 | default: smoke 66 | 67 | jobs: 68 | test: 69 | name: Run Integration Test 70 | runs-on: ubuntu-latest 71 | steps: 72 | - name: Override integration test options for scheduled run 73 | if: github.event_name == 'schedule' 74 | shell: bash 75 | run: | 76 | set -euo pipefail 77 | 78 | echo "TEST_PLATFORM=$DEFAULT_TEST_PLATFORM" | tee -a "$GITHUB_ENV" 79 | echo "TEST_ARCHITECTURE=$DEFAULT_TEST_ARCHITECTURE" | tee -a "$GITHUB_ENV" 80 | echo "TEST_RUN=$DEFAULT_TEST_RUN" | tee -a "$GITHUB_ENV" 81 | echo "TEST_PARAMETER=$DEFAULT_TEST_PARAMETER" | tee -a "$GITHUB_ENV" 82 | 83 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 84 | with: 85 | submodules: recursive 86 | 87 | - name: Run Integration Test 88 | id: test 89 | uses: stackabletech/actions/run-integration-test@5901c3b1455488820c4be367531e07c3c3e82538 # v0.4.0 90 | with: 91 | test-platform: ${{ env.TEST_PLATFORM }}-${{ env.TEST_ARCHITECTURE }} 92 | test-run: ${{ env.TEST_RUN }} 93 | test-parameter: ${{ env.TEST_PARAMETER }} 94 | replicated-api-token: ${{ secrets.REPLICATED_API_TOKEN }} 95 | 96 | - name: Send Notification 97 | if: ${{ failure() }} 98 | env: 99 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_INTEGRATION_TEST_TOKEN }} 100 | uses: slackapi/slack-github-action@fcfb566f8b0aab22203f066d80ca1d7e4b5d05b3 # v1.27.1 101 | with: 102 | channel-id: "C07UYJYSMSN" # notifications-integration-tests 103 | payload: | 104 | { 105 | "text": "Integration Test *${{ github.repository }}* failed", 106 | "attachments": [ 107 | { 108 | "pretext": "Started at ${{ steps.test.outputs.start-time }}, failed at ${{ steps.test.outputs.end-time }}", 109 | "color": "#aa0000", 110 | "actions": [ 111 | { 112 | "type": "button", 113 | "text": "Go to integration test run", 114 | "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" 115 | } 116 | ] 117 | } 118 | ] 119 | } 120 | -------------------------------------------------------------------------------- /.github/workflows/pr_pre-commit.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: pre-commit 3 | 4 | on: 5 | pull_request: 6 | merge_group: 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | NIX_PKG_MANAGER_VERSION: "2.28.3" 11 | RUST_TOOLCHAIN_VERSION: "nightly-2025-05-26" 12 | HADOLINT_VERSION: "v2.12.0" 13 | PYTHON_VERSION: "3.12" 14 | 15 | jobs: 16 | pre-commit: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Install host dependencies 20 | uses: awalsh128/cache-apt-pkgs-action@5902b33ae29014e6ca012c5d8025d4346556bd40 # v1.4.3 21 | with: 22 | packages: protobuf-compiler krb5-user libkrb5-dev libclang-dev liblzma-dev libssl-dev pkg-config apt-transport-https 23 | version: ubuntu-latest 24 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 25 | with: 26 | persist-credentials: false 27 | submodules: recursive 28 | fetch-depth: 0 29 | - uses: stackabletech/actions/run-pre-commit@9aae2d1c14239021bfa33c041010f6fb7adec815 # v0.8.2 30 | with: 31 | python-version: ${{ env.PYTHON_VERSION }} 32 | rust: ${{ env.RUST_TOOLCHAIN_VERSION }} 33 | hadolint: ${{ env.HADOLINT_VERSION }} 34 | nix: ${{ env.NIX_PKG_MANAGER_VERSION }} 35 | nix-github-token: ${{ secrets.GITHUB_TOKEN }} 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tests/_work/ 2 | debug/ 3 | target/ 4 | **/*.rs.bk 5 | 6 | .idea/ 7 | *.iws 8 | *.iml 9 | 10 | *.tgz 11 | 12 | result 13 | image.tar 14 | 15 | tilt_options.json 16 | 17 | .direnv/ 18 | .direnvrc 19 | .envrc 20 | 21 | .DS_Store 22 | -------------------------------------------------------------------------------- /.hadolint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ignored: 3 | # Warning: Use the -y switch to avoid manual input dnf install -y 4 | # https://github.com/hadolint/hadolint/wiki/DL3038 5 | # Reason: We set `assumeyes=True` in dnf.conf in our base image 6 | - DL3038 7 | 8 | # Warning: Specify version with dnf install -y - 9 | # https://github.com/hadolint/hadolint/wiki/DL3041 10 | # Reason: It's good advice, but we're not set up to pin versions just yet 11 | - DL3041 12 | -------------------------------------------------------------------------------- /.markdownlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # All defaults or options can be checked here: 3 | # https://github.com/DavidAnson/markdownlint/blob/main/schema/.markdownlint.yaml 4 | 5 | # Default state for all rules 6 | default: true 7 | 8 | # MD013/line-length - Line length 9 | MD013: 10 | # Number of characters 11 | line_length: 9999 12 | # Number of characters for headings 13 | heading_line_length: 9999 14 | # Number of characters for code blocks 15 | code_block_line_length: 9999 16 | 17 | # MD033/no-inline-html 18 | MD033: 19 | allowed_elements: [h1, img, p] 20 | 21 | # MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content 22 | MD024: 23 | # Only check sibling headings 24 | siblings_only: true 25 | 26 | # MD041/first-line-heading/first-line-h1 First line in a file should be a top-level heading 27 | MD041: false # Github issues and PRs already have titles, and H1 is enormous in the description box. 28 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | exclude: ^(Cargo\.nix|crate-hashes\.json|nix/.*)$ 3 | 4 | default_language_version: 5 | node: system 6 | 7 | repos: 8 | - repo: https://github.com/pre-commit/pre-commit-hooks 9 | rev: cef0300fd0fc4d2a87a85fa2093c6b283ea36f4b # 5.0.0 10 | hooks: 11 | - id: trailing-whitespace 12 | - id: end-of-file-fixer 13 | - id: detect-aws-credentials 14 | args: ["--allow-missing-credentials"] 15 | - id: detect-private-key 16 | 17 | - repo: https://github.com/adrienverge/yamllint 18 | rev: 79a6b2b1392eaf49cdd32ac4f14be1a809bbd8f7 # 1.37.0 19 | hooks: 20 | - id: yamllint 21 | 22 | - repo: https://github.com/igorshubovych/markdownlint-cli 23 | rev: 192ad822316c3a22fb3d3cc8aa6eafa0b8488360 # 0.45.0 24 | hooks: 25 | - id: markdownlint 26 | types: [text] 27 | files: \.md(\.j2)*$ 28 | 29 | - repo: https://github.com/koalaman/shellcheck-precommit 30 | rev: 2491238703a5d3415bb2b7ff11388bf775372f29 # 0.10.0 31 | hooks: 32 | - id: shellcheck 33 | args: ["--severity=info"] 34 | 35 | # WARNING (@NickLarsenNZ): Nix users need to install ruff first. 36 | # If you do not, you will need to delete the cached ruff binary shown in the 37 | # error message 38 | - repo: https://github.com/astral-sh/ruff-pre-commit 39 | rev: d19233b89771be2d89273f163f5edc5a39bbc34a # 0.11.12 40 | hooks: 41 | # Run the linter. 42 | - id: ruff 43 | # Run the formatter. 44 | - id: ruff-format 45 | 46 | - repo: https://github.com/rhysd/actionlint 47 | rev: 03d0035246f3e81f36aed592ffb4bebf33a03106 # 1.7.7 48 | hooks: 49 | - id: actionlint 50 | 51 | - repo: https://github.com/hadolint/hadolint 52 | rev: b3555ba9c2bfd9401e79f2f0da68dd1ae38e10c7 # 2.12.0 53 | hooks: 54 | - id: hadolint 55 | 56 | - repo: local 57 | hooks: 58 | - id: regenerate-charts 59 | name: regenerate-charts 60 | language: system 61 | entry: make regenerate-charts 62 | stages: [pre-commit, pre-merge-commit] 63 | pass_filenames: false 64 | files: \.rs$|Cargo\.(toml|lock) 65 | 66 | - id: regenerate-nix 67 | name: regenerate-nix 68 | language: system 69 | entry: make regenerate-nix 70 | stages: [pre-commit, pre-merge-commit] 71 | pass_filenames: false 72 | files: Cargo\.lock 73 | 74 | - id: cargo-test 75 | name: cargo-test 76 | language: system 77 | entry: cargo test 78 | stages: [pre-commit, pre-merge-commit] 79 | pass_filenames: false 80 | files: \.rs$|Cargo\.(toml|lock) 81 | 82 | - id: cargo-rustfmt 83 | name: cargo-rustfmt 84 | language: system 85 | entry: cargo +nightly-2025-05-26 fmt --all -- --check 86 | stages: [pre-commit, pre-merge-commit] 87 | pass_filenames: false 88 | files: \.rs$ 89 | 90 | - id: cargo-clippy 91 | name: cargo-clippy 92 | language: system 93 | entry: cargo clippy --all-targets -- -D warnings 94 | stages: [pre-commit, pre-merge-commit] 95 | pass_filenames: false 96 | files: \.rs$ 97 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MESSAGES CONTROL] 2 | 3 | # These rules are for missing docstrings which doesn't matter much for most of our simple scripts 4 | disable=C0114,C0115,C0116 5 | 6 | [FORMAT] 7 | 8 | max-line-length=999 9 | indent-string=' ' 10 | -------------------------------------------------------------------------------- /.readme/README.md.j2: -------------------------------------------------------------------------------- 1 | {%- set title="Stackable Operator for Apache ZooKeeper" -%} 2 | {%- set operator_name="zookeeper" -%} 3 | {%- set operator_docs_slug="zookeeper" -%} 4 | {%- set related_reading_links=[] -%} 5 | 6 | {% filter trim %} 7 | {%- include "partials/borrowed/header.md.j2" -%} 8 | {% endfilter %} 9 | 10 | {% filter trim %} 11 | {%- include "partials/borrowed/links.md.j2" -%} 12 | {% endfilter %} 13 | 14 | {% filter trim %} 15 | {%- include "partials/main.md.j2" -%} 16 | {% endfilter %} 17 | 18 | {% filter trim %} 19 | {%- include "partials/borrowed/footer.md.j2" -%} 20 | {% endfilter %} 21 | 22 | {% filter trim %} 23 | {%- include "partials/borrowed/related_reading.md.j2" -%} 24 | {% endfilter %} 25 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/documentation.md.j2: -------------------------------------------------------------------------------- 1 | 2 | ## Documentation 3 | 4 | The stable documentation for this operator can be found in our [Stackable Data Platform documentation](https://docs.stackable.tech/home/stable/{{operator_docs_slug}}). 5 | If you are interested in the most recent state of this repository, check out the [nightly docs](https://docs.stackable.tech/home/nightly/{{operator_docs_slug}}) instead. 6 | 7 | The documentation for all Stackable products can be found at [docs.stackable.tech](https://docs.stackable.tech). 8 | 9 | If you have a question about the Stackable Data Platform, contact us via our [homepage](https://stackable.tech/) or ask a public question in our [Discussions forum](https://github.com/orgs/stackabletech/discussions). 10 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/footer.md.j2: -------------------------------------------------------------------------------- 1 | 2 | ## About The Stackable Data Platform 3 | 4 | This operator is written and maintained by [Stackable](https://stackable.tech) and it is part of a larger data platform. 5 | 6 | ![Stackable Data Platform Overview](./.readme/static/borrowed/stackable_overview.png) 7 | 8 | Stackable makes it easy to operate data applications in any Kubernetes cluster. 9 | 10 | The data platform offers many operators, new ones being added continuously. All our operators are designed and built to be easily interconnected and to be consistent to work with. 11 | 12 | The [Stackable GmbH](https://stackable.tech/) is the company behind the Stackable Data Platform. Offering professional services, paid support plans and custom development. 13 | 14 | We love open-source! 15 | 16 | ## Supported Platforms 17 | 18 | We develop and test our operators on the following cloud platforms: 19 | 20 | * AKS on Microsoft Azure 21 | * EKS on Amazon Web Services (AWS) 22 | * GKE on Google Cloud Platform (GCP) 23 | * [IONOS Cloud Managed Kubernetes](https://cloud.ionos.com/managed/kubernetes) 24 | * K3s 25 | * Kubernetes (for an up to date list of supported versions please check the release notes in our [docs](https://docs.stackable.tech)) 26 | * Red Hat OpenShift 27 | 28 | ## Other Operators 29 | 30 | These are the operators that are currently part of the Stackable Data Platform: 31 | 32 | * [Stackable Operator for Apache Airflow](https://github.com/stackabletech/airflow-operator) 33 | * [Stackable Operator for Apache Druid](https://github.com/stackabletech/druid-operator) 34 | * [Stackable Operator for Apache HBase](https://github.com/stackabletech/hbase-operator) 35 | * [Stackable Operator for Apache Hadoop HDFS](https://github.com/stackabletech/hdfs-operator) 36 | * [Stackable Operator for Apache Hive](https://github.com/stackabletech/hive-operator) 37 | * [Stackable Operator for Apache Kafka](https://github.com/stackabletech/kafka-operator) 38 | * [Stackable Operator for Apache NiFi](https://github.com/stackabletech/nifi-operator) 39 | * [Stackable Operator for Apache Spark](https://github.com/stackabletech/spark-k8s-operator) 40 | * [Stackable Operator for Apache Superset](https://github.com/stackabletech/superset-operator) 41 | * [Stackable Operator for Trino](https://github.com/stackabletech/trino-operator) 42 | * [Stackable Operator for Apache ZooKeeper](https://github.com/stackabletech/zookeeper-operator) 43 | 44 | And our internal operators: 45 | 46 | * [Commons Operator](https://github.com/stackabletech/commons-operator) 47 | * [Listener Operator](https://github.com/stackabletech/listener-operator) 48 | * [OpenPolicyAgent Operator](https://github.com/stackabletech/opa-operator) 49 | * [Secret Operator](https://github.com/stackabletech/secret-operator) 50 | 51 | ## Contributing 52 | 53 | Contributions are welcome. 54 | Follow our [Contributors Guide](https://docs.stackable.tech/home/stable/contributor/index.html) to learn how you can contribute. 55 | All contributors will have to sign a [Contributor License Agreement](https://github.com/stackabletech/.github/blob/main/cla.md). 56 | This is enforced automatically when you submit a Pull Request where a bot will guide you through the process. 57 | 58 | ## License 59 | 60 | [Open Software License version 3.0](./LICENSE). 61 | 62 | ## Support 63 | 64 | Get started with the community edition! If you want professional support, [we offer subscription plans and custom licensing](https://stackable.tech/en/plans/). 65 | 66 | ## Sponsor 67 | 68 | If you want to support our work but don't need professional support please consider [sponsoring](https://github.com/sponsors/stackabletech) our work. 69 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/header.md.j2: -------------------------------------------------------------------------------- 1 | 2 |

3 | Stackable Logo 4 |

5 | 6 |

{{title}}

7 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/links.md.j2: -------------------------------------------------------------------------------- 1 | 2 | [![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://GitHub.com/stackabletech/{{operator_name}}-operator/graphs/commit-activity) 3 | [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-green.svg)](https://docs.stackable.tech/home/stable/contributor/index.html) 4 | [![License OSL3.0](https://img.shields.io/badge/license-OSL3.0-green)](./LICENSE) 5 | 6 | [Documentation](https://docs.stackable.tech/home/stable/{{operator_docs_slug}}) {% if quickstart_link %}| [Quickstart]({{quickstart_link}}) {% endif %}| [Stackable Data Platform](https://stackable.tech/) | [Platform Docs](https://docs.stackable.tech/) | [Discussions](https://github.com/orgs/stackabletech/discussions) | [Discord](https://discord.gg/7kZ3BNnCAF) 7 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/overview_blurb.md.j2: -------------------------------------------------------------------------------- 1 | 2 | It is part of the Stackable Data Platform, a curated selection of the best open source data apps like Apache Kafka, Apache Druid, Trino or Apache Spark, [all](#other-operators) working together seamlessly. Based on Kubernetes, it runs everywhere – [on prem or in the cloud](#supported-platforms). 3 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/related_reading.md.j2: -------------------------------------------------------------------------------- 1 | 2 | {%- if related_reading_links -%} 3 | ## Related Reading 4 | {% for (text, link) in related_reading_links %} 5 | * [{{text}}]({{link}}) 6 | {%- endfor %} 7 | {%- endif -%} 8 | -------------------------------------------------------------------------------- /.readme/partials/main.md.j2: -------------------------------------------------------------------------------- 1 | This is a Kubernetes operator to manage [Apache ZooKeeper](https://zookeeper.apache.org/) ensembles. 2 | 3 | {% filter trim %} 4 | {%- include "partials/borrowed/overview_blurb.md.j2" -%} 5 | {% endfilter %} 6 | 7 | ## Installation 8 | 9 | You can install the operator using [stackablectl or helm](https://docs.stackable.tech/home/stable/{{operator_name}}/getting_started/installation). 10 | 11 | Read on to get started with it, or see it in action in one of our [demos](https://stackable.tech/en/demos/). 12 | 13 | ## Getting Started 14 | 15 | You can follow this [tutorial](https://docs.stackable.tech/home/stable/{{operator_name}}/getting_started/first_steps) . 16 | 17 | {% filter trim %} 18 | {%- include "partials/borrowed/documentation.md.j2" -%} 19 | {% endfilter %} 20 | -------------------------------------------------------------------------------- /.readme/static/borrowed/Icon_Stackable.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /.readme/static/borrowed/sdp_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/zookeeper-operator/a0493366550a8f6b6c41980950d10baece58597c/.readme/static/borrowed/sdp_overview.png -------------------------------------------------------------------------------- /.readme/static/borrowed/stackable_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/zookeeper-operator/a0493366550a8f6b6c41980950d10baece58597c/.readme/static/borrowed/stackable_overview.png -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "type": "lldb", 6 | "request": "launch", 7 | "name": "Debug operator binary", 8 | "cargo": { 9 | "args": ["build"], 10 | "filter": { 11 | "name": "stackable-{[ operator.name }]", 12 | "kind": "bin" 13 | } 14 | }, 15 | "args": ["run"], 16 | "cwd": "${workspaceFolder}" 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.rustfmt.overrideCommand": [ 3 | "rustfmt", 4 | "+nightly-2025-05-26", 5 | "--edition", 6 | "2024", 7 | "--" 8 | ], 9 | } 10 | -------------------------------------------------------------------------------- /.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | ignore: | 5 | deploy/helm/**/templates 6 | 7 | rules: 8 | line-length: disable 9 | truthy: 10 | check-keys: false 11 | comments: 12 | min-spaces-from-content: 1 # Needed due to https://github.com/adrienverge/yamllint/issues/443 13 | indentation: 14 | indent-sequences: consistent 15 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["rust/operator-binary"] 3 | resolver = "2" 4 | 5 | [workspace.package] 6 | version = "0.0.0-dev" 7 | authors = ["Stackable GmbH "] 8 | license = "OSL-3.0" 9 | edition = "2021" 10 | repository = "https://github.com/stackabletech/zookeeper-operator" 11 | 12 | [workspace.dependencies] 13 | product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.7.0" } 14 | stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", features = ["telemetry", "versioned"], tag = "stackable-operator-0.93.1" } 15 | 16 | anyhow = "1.0" 17 | built = { version = "0.8", features = ["chrono", "git2"] } 18 | clap = "4.5" 19 | const_format = "0.2" 20 | fnv = "1.0" 21 | futures = { version = "0.3", features = ["compat"] } 22 | indoc = "2.0" 23 | pin-project = "1.1" 24 | semver = "1.0" 25 | serde = { version = "1.0", features = ["derive"] } 26 | serde_json = "1.0" 27 | serde_yaml = "0.9" 28 | snafu = "0.8" 29 | strum = { version = "0.27", features = ["derive"] } 30 | tokio = { version = "1.40", features = ["full"] } 31 | tokio-zookeeper = "0.4" 32 | tracing = "0.1" 33 | 34 | #[patch."https://github.com/stackabletech/operator-rs"] 35 | # stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" } 36 | 37 | [patch.crates-io] 38 | # tokio-zookeeper = { path = "../tokio-zookeeper" } 39 | # tokio-zookeeper = { git = "https://github.com/stackabletech/tokio-zookeeper.git", branch = "bugfix/whatever-thread-safety" } 40 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | This product includes software developed at 2 | The Apache Software Foundation (http://www.apache.org/). 3 | 4 | The Initial Developer of parts of the code in util::is_valid_zookeeper_path 5 | was the Apache Software Foundation (http://www.apache.org/). 6 | The code has been copied from: 7 | https://github.com/apache/zookeeper/blob/release-3.7.0/zookeeper-server/src/main/java/org/apache/zookeeper/common/PathUtils.java#L41 8 | and adapted to this project by Stackable GmbH. 9 | The code was originally licensed under the Apache License Version 2.0, 10 | a copy of which can be obtained at http://www.apache.org/licenses/LICENSE-2.0 11 | -------------------------------------------------------------------------------- /Tiltfile: -------------------------------------------------------------------------------- 1 | # If tilt_options.json exists read it and load the default_registry value from it 2 | settings = read_json('tilt_options.json', default={}) 3 | registry = settings.get('default_registry', 'oci.stackable.tech/sandbox') 4 | 5 | # Configure default registry either read from config file above, or with default value of "oci.stackable.tech/sandbox" 6 | default_registry(registry) 7 | 8 | meta = read_json('nix/meta.json') 9 | operator_name = meta['operator']['name'] 10 | 11 | custom_build( 12 | registry + '/' + operator_name, 13 | 'make regenerate-nix && nix-build . -A docker --argstr dockerName "${EXPECTED_REGISTRY}/' + operator_name + '" && ./result/load-image | docker load', 14 | deps=['rust', 'Cargo.toml', 'Cargo.lock', 'default.nix', "nix", 'build.rs', 'vendor'], 15 | ignore=['*.~undo-tree~'], 16 | # ignore=['result*', 'Cargo.nix', 'target', *.yaml], 17 | outputs_image_ref_to='result/ref', 18 | ) 19 | 20 | # Load the latest CRDs from Nix 21 | watch_file('result') 22 | if os.path.exists('result'): 23 | k8s_yaml('result/crds.yaml') 24 | 25 | # We need to set the correct image annotation on the operator Deployment to use e.g. 26 | # oci.stackable.tech/sandbox/opa-operator:7y19m3d8clwxlv34v5q2x4p7v536s00g instead of 27 | # oci.stackable.tech/sandbox/opa-operator:0.0.0-dev (which does not exist) 28 | k8s_kind('Deployment', image_json_path='{.spec.template.metadata.annotations.internal\\.stackable\\.tech/image}') 29 | 30 | # Exclude stale CRDs from Helm chart, and apply the rest 31 | helm_crds, helm_non_crds = filter_yaml( 32 | helm( 33 | 'deploy/helm/' + operator_name, 34 | name=operator_name, 35 | namespace="stackable-operators", 36 | set=[ 37 | 'image.repository=' + registry + '/' + operator_name, 38 | ], 39 | ), 40 | api_version = "^apiextensions\\.k8s\\.io/.*$", 41 | kind = "^CustomResourceDefinition$", 42 | ) 43 | k8s_yaml(helm_non_crds) 44 | -------------------------------------------------------------------------------- /crate-hashes.json: -------------------------------------------------------------------------------- 1 | { 2 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#k8s-version@0.1.3": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 3 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-operator-derive@0.3.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 4 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-operator@0.93.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 5 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-shared@0.0.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 6 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-telemetry@0.6.0": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 7 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-versioned-macros@0.7.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 8 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-versioned@0.7.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 9 | "git+https://github.com/stackabletech/product-config.git?tag=0.7.0#product-config@0.7.0": "0gjsm80g6r75pm3824dcyiz4ysq1ka4c1if6k1mjm9cnd5ym0gny" 10 | } -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | { sources ? import ./nix/sources.nix # managed by https://github.com/nmattia/niv 2 | , nixpkgs ? sources.nixpkgs 3 | , pkgs ? import nixpkgs {} 4 | , cargo ? import ./Cargo.nix { 5 | inherit nixpkgs pkgs; release = false; 6 | defaultCrateOverrides = pkgs.defaultCrateOverrides // { 7 | prost-build = attrs: { 8 | buildInputs = [ pkgs.protobuf ]; 9 | }; 10 | tonic-reflection = attrs: { 11 | buildInputs = [ pkgs.rustfmt ]; 12 | }; 13 | csi-grpc = attrs: { 14 | nativeBuildInputs = [ pkgs.protobuf ]; 15 | }; 16 | stackable-secret-operator = attrs: { 17 | buildInputs = [ pkgs.protobuf pkgs.rustfmt ]; 18 | }; 19 | stackable-opa-user-info-fetcher = attrs: { 20 | # TODO: why is this not pulled in via libgssapi-sys? 21 | buildInputs = [ pkgs.krb5 ]; 22 | }; 23 | krb5-sys = attrs: { 24 | nativeBuildInputs = [ pkgs.pkg-config ]; 25 | buildInputs = [ pkgs.krb5 ]; 26 | LIBCLANG_PATH = "${pkgs.libclang.lib}/lib"; 27 | # Clang's resource directory is located at ${pkgs.clang.cc.lib}/lib/clang/. 28 | # Starting with Clang 16, only the major version is used for the resource directory, 29 | # whereas the full version was used in prior Clang versions (see 30 | # https://github.com/llvm/llvm-project/commit/e1b88c8a09be25b86b13f98755a9bd744b4dbf14). 31 | # The clang wrapper ${pkgs.clang} provides a symlink to the resource directory, which 32 | # we use instead. 33 | BINDGEN_EXTRA_CLANG_ARGS = "-I${pkgs.glibc.dev}/include -I${pkgs.clang}/resource-root/include"; 34 | }; 35 | libgssapi-sys = attrs: { 36 | buildInputs = [ pkgs.krb5 ]; 37 | LIBCLANG_PATH = "${pkgs.libclang.lib}/lib"; 38 | BINDGEN_EXTRA_CLANG_ARGS = "-I${pkgs.glibc.dev}/include -I${pkgs.clang}/resource-root/include"; 39 | }; 40 | }; 41 | } 42 | , meta ? pkgs.lib.importJSON ./nix/meta.json 43 | , dockerName ? "oci.stackable.tech/sandbox/${meta.operator.name}" 44 | , dockerTag ? null 45 | }: 46 | rec { 47 | inherit cargo sources pkgs meta; 48 | build = cargo.allWorkspaceMembers; 49 | entrypoint = build+"/bin/stackable-${meta.operator.name}"; 50 | crds = pkgs.runCommand "${meta.operator.name}-crds.yaml" {} 51 | '' 52 | ${entrypoint} crd > $out 53 | ''; 54 | 55 | dockerImage = pkgs.dockerTools.streamLayeredImage { 56 | name = dockerName; 57 | tag = dockerTag; 58 | contents = [ 59 | # Common debugging tools 60 | pkgs.bashInteractive pkgs.coreutils pkgs.util-linuxMinimal 61 | # Kerberos 5 must be installed globally to load plugins correctly 62 | pkgs.krb5 63 | # Make the whole cargo workspace available on $PATH 64 | build 65 | ]; 66 | config = { 67 | Env = 68 | let 69 | fileRefVars = { 70 | PRODUCT_CONFIG = deploy/config-spec/properties.yaml; 71 | }; 72 | in pkgs.lib.concatLists (pkgs.lib.mapAttrsToList (env: path: pkgs.lib.optional (pkgs.lib.pathExists path) "${env}=${path}") fileRefVars); 73 | Entrypoint = [ entrypoint ]; 74 | Cmd = [ "run" ]; 75 | }; 76 | }; 77 | docker = pkgs.linkFarm "listener-operator-docker" [ 78 | { 79 | name = "load-image"; 80 | path = dockerImage; 81 | } 82 | { 83 | name = "ref"; 84 | path = pkgs.writeText "${dockerImage.name}-image-tag" "${dockerImage.imageName}:${dockerImage.imageTag}"; 85 | } 86 | { 87 | name = "image-repo"; 88 | path = pkgs.writeText "${dockerImage.name}-repo" dockerImage.imageName; 89 | } 90 | { 91 | name = "image-tag"; 92 | path = pkgs.writeText "${dockerImage.name}-tag" dockerImage.imageTag; 93 | } 94 | { 95 | name = "crds.yaml"; 96 | path = crds; 97 | } 98 | ]; 99 | 100 | # need to use vendored crate2nix because of https://github.com/kolloch/crate2nix/issues/264 101 | crate2nix = import sources.crate2nix {}; 102 | tilt = pkgs.tilt; 103 | 104 | regenerateNixLockfiles = pkgs.writeScriptBin "regenerate-nix-lockfiles" 105 | '' 106 | #!/usr/bin/env bash 107 | set -euo pipefail 108 | echo Running crate2nix 109 | ${crate2nix}/bin/crate2nix generate 110 | 111 | # crate2nix adds a trailing newline (see 112 | # https://github.com/nix-community/crate2nix/commit/5dd04e6de2fbdbeb067ab701de8ec29bc228c389). 113 | # The pre-commit hook trailing-whitespace wants to remove it again 114 | # (see https://github.com/pre-commit/pre-commit-hooks?tab=readme-ov-file#trailing-whitespace). 115 | # So, remove the trailing newline already here to avoid that an 116 | # unnecessary change is shown in Git. 117 | if [[ "$(uname)" == "Darwin" ]]; then 118 | sed -i \"\" '$d' Cargo.nix 119 | else 120 | sed -i '$d' Cargo.nix 121 | fi 122 | ''; 123 | } 124 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | # This file is the source of truth for all our repos! 2 | # This includes repos not templated by operator-templating, please copy/paste the file for this repos. 3 | 4 | # TIP: Use "cargo deny check" to check if everything is fine 5 | 6 | [graph] 7 | targets = [ 8 | { triple = "x86_64-unknown-linux-gnu" }, 9 | { triple = "aarch64-unknown-linux-gnu" }, 10 | { triple = "x86_64-unknown-linux-musl" }, 11 | { triple = "aarch64-apple-darwin" }, 12 | { triple = "x86_64-apple-darwin" }, 13 | ] 14 | 15 | [advisories] 16 | yanked = "deny" 17 | ignore = [ 18 | # https://rustsec.org/advisories/RUSTSEC-2023-0071 19 | # "rsa" crate: Marvin Attack: potential key recovery through timing sidechannel 20 | # 21 | # No patch is yet available, however work is underway to migrate to a fully constant-time implementation 22 | # So we need to accept this, as of SDP 25.3 we are not using the rsa crate to create certificates used in production 23 | # setups. 24 | # 25 | # https://github.com/RustCrypto/RSA/issues/19 is the tracking issue 26 | "RUSTSEC-2023-0071", 27 | ] 28 | 29 | [bans] 30 | multiple-versions = "allow" 31 | 32 | [licenses] 33 | unused-allowed-license = "allow" 34 | confidence-threshold = 1.0 35 | allow = [ 36 | "Apache-2.0", 37 | "BSD-2-Clause", 38 | "BSD-3-Clause", 39 | "CC0-1.0", 40 | "ISC", 41 | "LicenseRef-ring", 42 | "LicenseRef-webpki", 43 | "MIT", 44 | "MPL-2.0", 45 | "OpenSSL", # Needed for the ring and/or aws-lc-sys crate. See https://github.com/stackabletech/operator-templating/pull/464 for details 46 | "Unicode-3.0", 47 | "Unicode-DFS-2016", 48 | "Zlib", 49 | "Unlicense", 50 | ] 51 | private = { ignore = true } 52 | 53 | [[licenses.clarify]] 54 | name = "ring" 55 | expression = "LicenseRef-ring" 56 | license-files = [ 57 | { path = "LICENSE", hash = 0xbd0eed23 }, 58 | ] 59 | 60 | [[licenses.clarify]] 61 | name = "webpki" 62 | expression = "LicenseRef-webpki" 63 | license-files = [ 64 | { path = "LICENSE", hash = 0x001c7e6c }, 65 | ] 66 | 67 | [sources] 68 | unknown-registry = "deny" 69 | unknown-git = "deny" 70 | 71 | [sources.allow-org] 72 | github = ["stackabletech"] 73 | -------------------------------------------------------------------------------- /deploy/DO_NOT_EDIT.md: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT 2 | 3 | These Helm charts and manifests are automatically generated. 4 | Please do not edit anything except for files explicitly mentioned below in this 5 | directory manually. 6 | 7 | The following files are ok to edit: 8 | 9 | - helm/zookeeper-operator/templates/roles.yaml 10 | - helm/zookeeper-operator/values.yaml 11 | 12 | The details are in-motion but check this repository for a few details: 13 | 14 | -------------------------------------------------------------------------------- /deploy/helm/.gitignore: -------------------------------------------------------------------------------- 1 | *.tgz 2 | -------------------------------------------------------------------------------- /deploy/helm/chart_testing.yaml: -------------------------------------------------------------------------------- 1 | remote: origin 2 | target-branch: main 3 | chart-dirs: 4 | - deploy/helm 5 | all: true 6 | -------------------------------------------------------------------------------- /deploy/helm/ct.yaml: -------------------------------------------------------------------------------- 1 | # This file is used for chart-testing (https://github.com/helm/chart-testing) 2 | # The name "ct.yaml" is not very self-descriptive but it is the default that chart-testing is looking for 3 | --- 4 | remote: origin 5 | target-branch: main 6 | chart-dirs: 7 | - deploy/helm 8 | all: true 9 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/.helmignore: -------------------------------------------------------------------------------- 1 | # ============= 2 | # This file is automatically generated from the templates in stackabletech/operator-templating 3 | # DON'T MANUALLY EDIT THIS FILE 4 | # ============= 5 | 6 | # Patterns to ignore when building packages. 7 | # This supports shell glob matching, relative path matching, and 8 | # negation (prefixed with !). Only one pattern per line. 9 | .DS_Store 10 | # Common VCS dirs 11 | .git/ 12 | .gitignore 13 | .bzr/ 14 | .bzrignore 15 | .hg/ 16 | .hgignore 17 | .svn/ 18 | # Common backup files 19 | *.swp 20 | *.bak 21 | *.tmp 22 | *.orig 23 | *~ 24 | # Various IDEs 25 | .project 26 | .idea/ 27 | *.tmproj 28 | .vscode/ 29 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v2 3 | name: zookeeper-operator 4 | version: "0.0.0-dev" 5 | appVersion: "0.0.0-dev" 6 | description: The Stackable Operator for Apache ZooKeeper 7 | home: https://github.com/stackabletech/zookeeper-operator 8 | maintainers: 9 | - name: Stackable 10 | url: https://www.stackable.tech 11 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Helm Chart for Stackable Operator for Apache ZooKeeper 3 | 4 | This Helm Chart can be used to install Custom Resource Definitions and the Operator for Apache ZooKeeper provided by Stackable. 5 | 6 | ## Requirements 7 | 8 | - Create a [Kubernetes Cluster](../Readme.md) 9 | - Install [Helm](https://helm.sh/docs/intro/install/) 10 | 11 | ## Install the Stackable Operator for Apache ZooKeeper 12 | 13 | ```bash 14 | # From the root of the operator repository 15 | make compile-chart 16 | 17 | helm install zookeeper-operator deploy/helm/zookeeper-operator 18 | ``` 19 | 20 | ## Usage of the CRDs 21 | 22 | The usage of this operator and its CRDs is described in the [documentation](https://docs.stackable.tech/zookeeper/index.html) 23 | 24 | The operator has example requests included in the [`/examples`](https://github.com/stackabletech/zookeeper-operator/tree/main/examples) directory. 25 | 26 | ## Links 27 | 28 | 29 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "operator.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-operator" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Expand the name of the chart. 10 | */}} 11 | {{- define "operator.appname" -}} 12 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 13 | {{- end }} 14 | 15 | {{/* 16 | Create a default fully qualified app name. 17 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 18 | If release name contains chart name it will be used as a full name. 19 | */}} 20 | {{- define "operator.fullname" -}} 21 | {{- if .Values.fullnameOverride }} 22 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 23 | {{- else }} 24 | {{- $name := default .Chart.Name .Values.nameOverride }} 25 | {{- if contains $name .Release.Name }} 26 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 27 | {{- else }} 28 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 29 | {{- end }} 30 | {{- end }} 31 | {{- end }} 32 | 33 | {{/* 34 | Create chart name and version as used by the chart label. 35 | */}} 36 | {{- define "operator.chart" -}} 37 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 38 | {{- end }} 39 | 40 | {{/* 41 | Common labels 42 | */}} 43 | {{- define "operator.labels" -}} 44 | helm.sh/chart: {{ include "operator.chart" . }} 45 | {{ include "operator.selectorLabels" . }} 46 | {{- if .Chart.AppVersion }} 47 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 48 | {{- end }} 49 | app.kubernetes.io/managed-by: {{ .Release.Service }} 50 | {{- end }} 51 | 52 | {{/* 53 | Selector labels 54 | */}} 55 | {{- define "operator.selectorLabels" -}} 56 | app.kubernetes.io/name: {{ include "operator.appname" . }} 57 | app.kubernetes.io/instance: {{ .Release.Name }} 58 | {{- with .Values.labels }} 59 | {{ toYaml . }} 60 | {{- end }} 61 | {{- end }} 62 | 63 | {{/* 64 | Create the name of the service account to use 65 | */}} 66 | {{- define "operator.serviceAccountName" -}} 67 | {{- if .Values.serviceAccount.create }} 68 | {{- default (include "operator.fullname" .) .Values.serviceAccount.name }} 69 | {{- else }} 70 | {{- default "default" .Values.serviceAccount.name }} 71 | {{- end }} 72 | {{- end }} 73 | 74 | {{/* 75 | Labels for Kubernetes objects created by helm test 76 | */}} 77 | {{- define "operator.testLabels" -}} 78 | helm.sh/test: {{ include "operator.chart" . }} 79 | {{- end }} 80 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/templates/_telemetry.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Create a list of telemetry related env vars. 3 | */}} 4 | {{- define "telemetry.envVars" -}} 5 | {{- with .Values.telemetry }} 6 | {{- if not .consoleLog.enabled }} 7 | - name: CONSOLE_LOG_DISABLED 8 | value: "true" 9 | {{- end }} 10 | {{- if and .consoleLog.enabled .consoleLog.level }} 11 | - name: CONSOLE_LOG_LEVEL 12 | value: {{ .consoleLog.level }} 13 | {{ end }} 14 | {{- if and .consoleLog.enabled .consoleLog.format }} 15 | - name: CONSOLE_LOG_FORMAT 16 | value: {{ .consoleLog.format }} 17 | {{ end }} 18 | {{- if .fileLog.enabled }} 19 | - name: FILE_LOG_DIRECTORY 20 | value: /stackable/logs/{{ include "operator.appname" $ }} 21 | {{- end }} 22 | {{- if and .fileLog.enabled .fileLog.level }} 23 | - name: FILE_LOG_LEVEL 24 | value: {{ .fileLog.level }} 25 | {{- end }} 26 | {{- if and .fileLog.enabled .fileLog.rotationPeriod }} 27 | - name: FILE_LOG_ROTATION_PERIOD 28 | value: {{ .fileLog.rotationPeriod }} 29 | {{- end }} 30 | {{- if and .fileLog.enabled .fileLog.maxFiles }} 31 | - name: FILE_LOG_MAX_FILES 32 | value: {{ quote .fileLog.maxFiles }} 33 | {{- end }} 34 | {{- if .otelLogExporter.enabled }} 35 | - name: OTEL_LOG_EXPORTER_ENABLED 36 | value: "true" 37 | {{- end }} 38 | {{- if and .otelLogExporter.enabled .otelLogExporter.level }} 39 | - name: OTEL_LOG_EXPORTER_LEVEL 40 | value: {{ .otelLogExporter.level }} 41 | {{- end }} 42 | {{- if and .otelLogExporter.enabled .otelLogExporter.endpoint }} 43 | - name: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT 44 | value: {{ .otelLogExporter.endpoint }} 45 | {{- end }} 46 | {{- if .otelTraceExporter.enabled }} 47 | - name: OTEL_TRACE_EXPORTER_ENABLED 48 | value: "true" 49 | {{- end }} 50 | {{- if and .otelTraceExporter.enabled .otelTraceExporter.level }} 51 | - name: OTEL_TRACE_EXPORTER_LEVEL 52 | value: {{ .otelTraceExporter.level }} 53 | {{- end }} 54 | {{- if and .otelTraceExporter.enabled .otelTraceExporter.endpoint }} 55 | - name: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT 56 | value: {{ .otelTraceExporter.endpoint }} 57 | {{- end }} 58 | {{- end }} 59 | {{- end }} 60 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | {{ (.Files.Glob "configs/*").AsConfig | indent 2 }} 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ include "operator.fullname" . }}-configmap 8 | labels: 9 | {{- include "operator.labels" . | nindent 4 }} 10 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: {{ include "operator.fullname" . }}-deployment 6 | labels: 7 | {{- include "operator.labels" . | nindent 4 }} 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | {{- include "operator.selectorLabels" . | nindent 6 }} 15 | template: 16 | metadata: 17 | annotations: 18 | internal.stackable.tech/image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 19 | checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} 20 | {{- with .Values.podAnnotations }} 21 | {{- toYaml . | nindent 8 }} 22 | {{- end }} 23 | labels: 24 | {{- include "operator.selectorLabels" . | nindent 8 }} 25 | spec: 26 | {{- with .Values.image.pullSecrets }} 27 | imagePullSecrets: 28 | {{- toYaml . | nindent 8 }} 29 | {{- end }} 30 | serviceAccountName: {{ include "operator.fullname" . }}-serviceaccount 31 | securityContext: 32 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 33 | containers: 34 | - name: {{ include "operator.appname" . }} 35 | securityContext: 36 | {{- toYaml .Values.securityContext | nindent 12 }} 37 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 38 | imagePullPolicy: {{ .Values.image.pullPolicy }} 39 | resources: 40 | {{- toYaml .Values.resources | nindent 12 }} 41 | volumeMounts: 42 | - mountPath: /etc/stackable/{{ include "operator.appname" . }}/config-spec 43 | name: config-spec 44 | env: 45 | - name: OPERATOR_IMAGE 46 | # Tilt can use annotations as image paths, but not env variables 47 | valueFrom: 48 | fieldRef: 49 | fieldPath: metadata.annotations['internal.stackable.tech/image'] 50 | {{- if .Values.kubernetesClusterDomain }} 51 | - name: KUBERNETES_CLUSTER_DOMAIN 52 | value: {{ .Values.kubernetesClusterDomain | quote }} 53 | {{- end }} 54 | {{- include "telemetry.envVars" . | nindent 12 }} 55 | volumes: 56 | - name: config-spec 57 | configMap: 58 | name: {{ include "operator.fullname" . }}-configmap 59 | {{- with .Values.nodeSelector }} 60 | nodeSelector: 61 | {{- toYaml . | nindent 8 }} 62 | {{- end }} 63 | {{- with .Values.affinity }} 64 | affinity: 65 | {{- toYaml . | nindent 8 }} 66 | {{- end }} 67 | {{- with .Values.tolerations }} 68 | tolerations: 69 | {{- toYaml . | nindent 8 }} 70 | {{- end }} 71 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/templates/roles.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ include "operator.fullname" . }}-clusterrole 6 | labels: 7 | {{- include "operator.labels" . | nindent 4 }} 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - nodes 13 | verbs: 14 | - list 15 | - watch 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - pods 20 | - configmaps 21 | - secrets 22 | - services 23 | - endpoints 24 | - serviceaccounts 25 | verbs: 26 | - create 27 | - delete 28 | - get 29 | - list 30 | - patch 31 | - update 32 | - watch 33 | - apiGroups: 34 | - rbac.authorization.k8s.io 35 | resources: 36 | - rolebindings 37 | verbs: 38 | - create 39 | - delete 40 | - get 41 | - list 42 | - patch 43 | - update 44 | - watch 45 | - apiGroups: 46 | - rbac.authorization.k8s.io 47 | resources: 48 | - clusterroles 49 | verbs: 50 | - bind 51 | resourceNames: 52 | - {{ include "operator.name" . }}-clusterrole 53 | - apiGroups: 54 | - apps 55 | resources: 56 | - statefulsets 57 | verbs: 58 | - get 59 | - create 60 | - delete 61 | - list 62 | - patch 63 | - update 64 | - watch 65 | - apiGroups: 66 | - batch 67 | resources: 68 | - jobs 69 | verbs: 70 | - create 71 | - delete 72 | - get 73 | - list 74 | - patch 75 | - update 76 | - watch 77 | - apiGroups: 78 | - policy 79 | resources: 80 | - poddisruptionbudgets 81 | verbs: 82 | - create 83 | - delete 84 | - get 85 | - list 86 | - patch 87 | - update 88 | - watch 89 | - apiGroups: 90 | - apiextensions.k8s.io 91 | resources: 92 | - customresourcedefinitions 93 | verbs: 94 | - get 95 | - apiGroups: 96 | - authentication.stackable.tech 97 | resources: 98 | - authenticationclasses 99 | verbs: 100 | - get 101 | - list 102 | - watch 103 | - apiGroups: 104 | - events.k8s.io 105 | resources: 106 | - events 107 | verbs: 108 | - create 109 | - patch 110 | - apiGroups: 111 | - {{ include "operator.name" . }}.stackable.tech 112 | resources: 113 | - {{ include "operator.name" . }}clusters 114 | - {{ include "operator.name" . }}znodes 115 | verbs: 116 | - get 117 | - list 118 | - patch 119 | - watch 120 | - apiGroups: 121 | - {{ include "operator.name" . }}.stackable.tech 122 | resources: 123 | - {{ include "operator.name" . }}clusters/status 124 | - {{ include "operator.name" . }}znodes/status 125 | verbs: 126 | - patch 127 | --- 128 | apiVersion: rbac.authorization.k8s.io/v1 129 | kind: ClusterRole 130 | metadata: 131 | name: {{ include "operator.name" . }}-clusterrole 132 | labels: 133 | {{- include "operator.labels" . | nindent 4 }} 134 | rules: 135 | - apiGroups: 136 | - "" 137 | resources: 138 | - configmaps 139 | - secrets 140 | - serviceaccounts 141 | verbs: 142 | - get 143 | - apiGroups: 144 | - events.k8s.io 145 | resources: 146 | - events 147 | verbs: 148 | - create 149 | {{ if .Capabilities.APIVersions.Has "security.openshift.io/v1" }} 150 | - apiGroups: 151 | - security.openshift.io 152 | resources: 153 | - securitycontextconstraints 154 | resourceNames: 155 | - nonroot-v2 156 | verbs: 157 | - use 158 | {{ end }} 159 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | {{ if .Values.serviceAccount.create -}} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ include "operator.fullname" . }}-serviceaccount 7 | labels: 8 | {{- include "operator.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | # This cluster role binding allows anyone in the "manager" group to read secrets in any namespace. 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: {{ include "operator.fullname" . }}-clusterrolebinding 19 | labels: 20 | {{- include "operator.labels" . | nindent 4 }} 21 | subjects: 22 | - kind: ServiceAccount 23 | name: {{ include "operator.fullname" . }}-serviceaccount 24 | namespace: {{ .Release.Namespace }} 25 | roleRef: 26 | kind: ClusterRole 27 | name: {{ include "operator.fullname" . }}-clusterrole 28 | apiGroup: rbac.authorization.k8s.io 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /deploy/helm/zookeeper-operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for zookeeper-operator. 2 | --- 3 | image: 4 | repository: oci.stackable.tech/sdp/zookeeper-operator 5 | pullPolicy: IfNotPresent 6 | pullSecrets: [] 7 | 8 | nameOverride: "" 9 | fullnameOverride: "" 10 | 11 | serviceAccount: 12 | # Specifies whether a service account should be created 13 | create: true 14 | # Annotations to add to the service account 15 | annotations: {} 16 | # The name of the service account to use. 17 | # If not set and create is true, a name is generated using the fullname template 18 | name: "" 19 | 20 | # Provide additional labels which get attached to all deployed resources 21 | labels: 22 | stackable.tech/vendor: Stackable 23 | 24 | podAnnotations: {} 25 | 26 | podSecurityContext: {} 27 | # fsGroup: 2000 28 | 29 | securityContext: {} 30 | # capabilities: 31 | # drop: 32 | # - ALL 33 | # readOnlyRootFilesystem: true 34 | # runAsNonRoot: true 35 | # runAsUser: 1000 36 | 37 | resources: 38 | limits: 39 | cpu: 100m 40 | memory: 128Mi 41 | requests: 42 | cpu: 100m 43 | memory: 128Mi 44 | 45 | nodeSelector: {} 46 | 47 | tolerations: [] 48 | 49 | affinity: {} 50 | 51 | # When running on a non-default Kubernetes cluster domain, the cluster domain can be configured here. 52 | # See the https://docs.stackable.tech/home/stable/guides/kubernetes-cluster-domain guide for details. 53 | # kubernetesClusterDomain: my-cluster.local 54 | 55 | # See all available options and detailed explanations about the concept here: 56 | # https://docs.stackable.tech/home/stable/concepts/telemetry/ 57 | telemetry: 58 | consoleLog: 59 | enabled: true 60 | fileLog: 61 | enabled: false 62 | rotationPeriod: hourly 63 | maxFiles: 6 64 | otelLogExporter: 65 | enabled: false 66 | otelTraceExporter: 67 | enabled: false 68 | -------------------------------------------------------------------------------- /deploy/stackable-operators-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: stackable-operators 6 | -------------------------------------------------------------------------------- /docs/antora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: home 3 | version: "nightly" 4 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/example-znode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperZnode 4 | metadata: 5 | name: example-znode # <1> 6 | spec: 7 | clusterRef: 8 | name: zookeeper-cluster # <2> 9 | namespace: my-namespace # <3> 10 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/getting_started.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # DO NOT EDIT THE SCRIPT 5 | # Instead, update the j2 template, and regenerate it for dev with `make render-docs`. 6 | 7 | # This script contains all the code snippets from the guide, as well as some assert tests 8 | # to test if the instructions in the guide work. The user *could* use it, but it is intended 9 | # for testing only. 10 | # The script will install the operator(s), create a product instance and interact with it. 11 | 12 | if [ $# -eq 0 ] 13 | then 14 | echo "Installation method argument ('helm' or 'stackablectl') required." 15 | exit 1 16 | fi 17 | 18 | cd "$(dirname "$0")" 19 | 20 | case "$1" in 21 | "helm") 22 | echo "Installing Operators with Helm" 23 | # tag::helm-install-operators[] 24 | helm install --wait commons-operator oci://oci.stackable.tech/sdp-charts/commons-operator --version 0.0.0-dev 25 | helm install --wait secret-operator oci://oci.stackable.tech/sdp-charts/secret-operator --version 0.0.0-dev 26 | helm install --wait listener-operator oci://oci.stackable.tech/sdp-charts/listener-operator --version 0.0.0-dev 27 | helm install --wait zookeeper-operator oci://oci.stackable.tech/sdp-charts/zookeeper-operator --version 0.0.0-dev 28 | # end::helm-install-operators[] 29 | ;; 30 | "stackablectl") 31 | echo "installing Operators with stackablectl" 32 | # tag::stackablectl-install-operators[] 33 | stackablectl operator install \ 34 | commons=0.0.0-dev \ 35 | secret=0.0.0-dev \ 36 | listener=0.0.0-dev \ 37 | zookeeper=0.0.0-dev 38 | # end::stackablectl-install-operators[] 39 | ;; 40 | *) 41 | echo "Need to give 'helm' or 'stackablectl' as an argument for which installation method to use!" 42 | exit 1 43 | ;; 44 | esac 45 | 46 | echo "Creating ZooKeeper cluster" 47 | # tag::install-zookeeper[] 48 | kubectl apply -f zookeeper.yaml 49 | # end::install-zookeeper[] 50 | 51 | sleep 15 52 | 53 | ### Connect to cluster 54 | 55 | echo "Awaiting ZooKeeper rollout finish" 56 | # tag::watch-zookeeper-rollout[] 57 | kubectl rollout status --watch --timeout=5m statefulset/simple-zk-server-default 58 | # end::watch-zookeeper-rollout[] 59 | 60 | # kubectl run sometimes misses log output, which is why we use run/logs/delete. 61 | # Issue for reference: https://github.com/kubernetes/kubernetes/issues/27264 62 | zkCli_ls() { 63 | # tag::zkcli-ls[] 64 | kubectl run my-pod \ 65 | --stdin --tty --quiet --restart=Never \ 66 | --image oci.stackable.tech/sdp/zookeeper:3.9.3-stackable0.0.0-dev -- \ 67 | bin/zkCli.sh -server simple-zk-server-default:2282 ls / > /dev/null && \ 68 | kubectl logs my-pod && \ 69 | kubectl delete pods my-pod 70 | # end::zkcli-ls[] 71 | } 72 | 73 | ls_result=$(zkCli_ls) >/dev/null 2>&1 74 | 75 | 76 | if echo "$ls_result" | grep '^\[zookeeper\]' > /dev/null; then 77 | echo "zkCli.sh ls command worked" 78 | else 79 | echo "zkCli.sh ls command did not work. command output:" 80 | echo "$ls_result" 81 | exit 1 82 | fi 83 | 84 | ### ZNode 85 | 86 | echo "Applying ZNode" 87 | # tag::apply-znode[] 88 | kubectl apply -f znode.yaml 89 | # end::apply-znode[] 90 | 91 | sleep 5 92 | 93 | ls_result=$(zkCli_ls) > /dev/null 2>&1 94 | 95 | if echo "$ls_result" | grep '^\[znode-.\{8\}-.\{4\}-.\{4\}-.\{4\}-.\{12\}, zookeeper\]' > /dev/null; then 96 | echo "zkCli.sh ls command worked" 97 | else 98 | echo "zkCli.sh ls command did not work. command output:" 99 | echo "$ls_result" 100 | exit 1 101 | fi 102 | 103 | get_configmap() { 104 | # tag::get-znode-cm 105 | kubectl describe configmap simple-znode 106 | # end::get-znode-cm 107 | } 108 | 109 | cm_output=$(get_configmap) 110 | 111 | # shellcheck disable=SC2181 # wont't fix this now, but ideally we should enable bash strict mode so we can avoid success checks. 112 | if [[ $? == 0 ]]; then 113 | echo "ConfigMap retrieved." 114 | else 115 | echo "Could not get ConfigMap 'simple-znode'" 116 | exit 1 117 | fi 118 | 119 | if echo "$cm_output" | grep 2282/znode > /dev/null; then 120 | echo "ConfigMap contains a reference of the ZNode" 121 | else 122 | echo "ConfigMap doesn't seem to reference the ZNode" 123 | exit 1 124 | fi 125 | 126 | echo "Script ran successfully!" 127 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/getting_started.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # DO NOT EDIT THE SCRIPT 5 | # Instead, update the j2 template, and regenerate it for dev with `make render-docs`. 6 | 7 | # This script contains all the code snippets from the guide, as well as some assert tests 8 | # to test if the instructions in the guide work. The user *could* use it, but it is intended 9 | # for testing only. 10 | # The script will install the operator(s), create a product instance and interact with it. 11 | 12 | if [ $# -eq 0 ] 13 | then 14 | echo "Installation method argument ('helm' or 'stackablectl') required." 15 | exit 1 16 | fi 17 | 18 | cd "$(dirname "$0")" 19 | 20 | case "$1" in 21 | "helm") 22 | echo "Installing Operators with Helm" 23 | # tag::helm-install-operators[] 24 | helm install --wait commons-operator oci://{{ helm.repo_url }}/{{ helm.repo_name }}/commons-operator --version {{ versions.commons }} 25 | helm install --wait secret-operator oci://{{ helm.repo_url }}/{{ helm.repo_name }}/secret-operator --version {{ versions.secret }} 26 | helm install --wait listener-operator oci://{{ helm.repo_url }}/{{ helm.repo_name }}/listener-operator --version {{ versions.listener }} 27 | helm install --wait zookeeper-operator oci://{{ helm.repo_url }}/{{ helm.repo_name }}/zookeeper-operator --version {{ versions.zookeeper }} 28 | # end::helm-install-operators[] 29 | ;; 30 | "stackablectl") 31 | echo "installing Operators with stackablectl" 32 | # tag::stackablectl-install-operators[] 33 | stackablectl operator install \ 34 | commons={{ versions.commons }} \ 35 | secret={{ versions.secret }} \ 36 | listener={{ versions.listener }} \ 37 | zookeeper={{ versions.zookeeper }} 38 | # end::stackablectl-install-operators[] 39 | ;; 40 | *) 41 | echo "Need to give 'helm' or 'stackablectl' as an argument for which installation method to use!" 42 | exit 1 43 | ;; 44 | esac 45 | 46 | echo "Creating ZooKeeper cluster" 47 | # tag::install-zookeeper[] 48 | kubectl apply -f zookeeper.yaml 49 | # end::install-zookeeper[] 50 | 51 | sleep 15 52 | 53 | ### Connect to cluster 54 | 55 | echo "Awaiting ZooKeeper rollout finish" 56 | # tag::watch-zookeeper-rollout[] 57 | kubectl rollout status --watch --timeout=5m statefulset/simple-zk-server-default 58 | # end::watch-zookeeper-rollout[] 59 | 60 | # kubectl run sometimes misses log output, which is why we use run/logs/delete. 61 | # Issue for reference: https://github.com/kubernetes/kubernetes/issues/27264 62 | zkCli_ls() { 63 | # tag::zkcli-ls[] 64 | kubectl run my-pod \ 65 | --stdin --tty --quiet --restart=Never \ 66 | --image oci.stackable.tech/sdp/zookeeper:3.9.3-stackable{{ versions.zookeeper }} -- \ 67 | bin/zkCli.sh -server simple-zk-server-default:2282 ls / > /dev/null && \ 68 | kubectl logs my-pod && \ 69 | kubectl delete pods my-pod 70 | # end::zkcli-ls[] 71 | } 72 | 73 | ls_result=$(zkCli_ls) >/dev/null 2>&1 74 | 75 | 76 | if echo "$ls_result" | grep '^\[zookeeper\]' > /dev/null; then 77 | echo "zkCli.sh ls command worked" 78 | else 79 | echo "zkCli.sh ls command did not work. command output:" 80 | echo "$ls_result" 81 | exit 1 82 | fi 83 | 84 | ### ZNode 85 | 86 | echo "Applying ZNode" 87 | # tag::apply-znode[] 88 | kubectl apply -f znode.yaml 89 | # end::apply-znode[] 90 | 91 | sleep 5 92 | 93 | ls_result=$(zkCli_ls) > /dev/null 2>&1 94 | 95 | if echo "$ls_result" | grep '^\[znode-.\{8\}-.\{4\}-.\{4\}-.\{4\}-.\{12\}, zookeeper\]' > /dev/null; then 96 | echo "zkCli.sh ls command worked" 97 | else 98 | echo "zkCli.sh ls command did not work. command output:" 99 | echo "$ls_result" 100 | exit 1 101 | fi 102 | 103 | get_configmap() { 104 | # tag::get-znode-cm 105 | kubectl describe configmap simple-znode 106 | # end::get-znode-cm 107 | } 108 | 109 | cm_output=$(get_configmap) 110 | 111 | # shellcheck disable=SC2181 # wont't fix this now, but ideally we should enable bash strict mode so we can avoid success checks. 112 | if [[ $? == 0 ]]; then 113 | echo "ConfigMap retrieved." 114 | else 115 | echo "Could not get ConfigMap 'simple-znode'" 116 | exit 1 117 | fi 118 | 119 | if echo "$cm_output" | grep 2282/znode > /dev/null; then 120 | echo "ConfigMap contains a reference of the ZNode" 121 | else 122 | echo "ConfigMap doesn't seem to reference the ZNode" 123 | exit 1 124 | fi 125 | 126 | echo "Script ran successfully!" 127 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/install_output.txt: -------------------------------------------------------------------------------- 1 | Installed commons=0.0.0-dev operator 2 | Installed secret=0.0.0-dev operator 3 | Installed listener=0.0.0-dev operator 4 | Installed zookeeper=0.0.0-dev operator 5 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/install_output.txt.j2: -------------------------------------------------------------------------------- 1 | Installed commons={{ versions.commons }} operator 2 | Installed secret={{ versions.secret }} operator 3 | Installed listener={{ versions.listener }} operator 4 | Installed zookeeper={{ versions.zookeeper }} operator 5 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/test_getting_started_helm.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | set -euo pipefail 3 | 4 | cd "$(dirname "$0")" 5 | ./getting_started.sh helm 6 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/test_getting_started_stackablectl.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | set -euo pipefail 3 | 4 | cd "$(dirname "$0")" 5 | ./getting_started.sh stackablectl 6 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/znode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperZnode 4 | metadata: 5 | name: simple-znode 6 | spec: 7 | clusterRef: 8 | name: simple-zk 9 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/zookeeper.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: simple-zk 6 | spec: 7 | clusterConfig: 8 | listenerClass: external-unstable 9 | image: 10 | productVersion: 3.9.3 11 | servers: 12 | roleGroups: 13 | default: 14 | replicas: 3 15 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/getting_started/code/zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: simple-zk 6 | spec: 7 | clusterConfig: 8 | listenerClass: external-unstable 9 | image: 10 | productVersion: 3.9.3 11 | servers: 12 | roleGroups: 13 | default: 14 | replicas: 3 15 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/example-cluster-tls-authentication-class.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: authentication.stackable.tech/v1alpha1 3 | kind: AuthenticationClass 4 | metadata: 5 | name: zk-client-tls # <2> 6 | spec: 7 | provider: 8 | tls: 9 | clientCertSecretClass: zk-client-auth-secret # <3> 10 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/example-cluster-tls-authentication-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.stackable.tech/v1alpha1 3 | kind: SecretClass 4 | metadata: 5 | name: zk-client-auth-secret # <4> 6 | spec: 7 | backend: 8 | autoTls: 9 | ca: 10 | secret: 11 | name: secret-provisioner-tls-zk-client-ca 12 | namespace: default 13 | autoGenerate: true 14 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/example-cluster-tls-authentication.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: simple-zk 6 | spec: 7 | image: 8 | productVersion: 3.9.3 9 | clusterConfig: 10 | authentication: 11 | - authenticationClass: zk-client-tls # <1> 12 | servers: 13 | roleGroups: 14 | default: 15 | replicas: 3 16 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/example-cluster-tls-encryption.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: simple-zk 6 | spec: 7 | image: 8 | productVersion: 3.9.3 9 | clusterConfig: 10 | tls: 11 | serverSecretClass: tls # <1> 12 | quorumSecretClass: tls # <2> 13 | servers: 14 | roleGroups: 15 | default: 16 | replicas: 3 17 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/example-secret-operator-tls-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.stackable.tech/v1alpha1 3 | kind: SecretClass 4 | metadata: 5 | name: tls 6 | spec: 7 | backend: 8 | autoTls: 9 | ca: 10 | secret: 11 | name: secret-provisioner-tls-ca 12 | namespace: default 13 | autoGenerate: true 14 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/znode/example-znode-discovery.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: ... # <1> 6 | namespace: ... 7 | data: 8 | ZOOKEEPER: pod-1:2181,pod-2:2181,pod-3:2181/{path} # <2> 9 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/znode/example-znode-druid.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperZnode 4 | metadata: 5 | name: druid-znode # <1> 6 | namespace: druid-ns # <2> 7 | spec: 8 | clusterRef: # <3> 9 | name: my-zookeeper 10 | namespace: data 11 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/znode/example-znode-kafka.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperZnode 4 | metadata: 5 | name: kafka-znode # <1> 6 | namespace: data # <2> 7 | spec: 8 | clusterRef: # <3> 9 | name: my-zookeeper 10 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/examples/usage_guide/znode/example-znode.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperZnode 4 | metadata: 5 | name: example-znode 6 | spec: 7 | clusterRef: # <1> 8 | name: zookeeper-cluster 9 | namespace: my-namespace # <2> 10 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/getting_started/first_steps.adoc: -------------------------------------------------------------------------------- 1 | = First steps 2 | 3 | Now that the operator is installed it is time to deploy a ZooKeeper cluster and connect to it. 4 | 5 | == Deploy ZooKeeper 6 | 7 | The ZooKeeper cluster is deployed with a very simple resource definition. 8 | Create a file called `zookeeper.yaml`: 9 | 10 | [source,yaml] 11 | include::example$getting_started/code/zookeeper.yaml[] 12 | 13 | and apply it: 14 | [source,bash] 15 | include::example$getting_started/code/getting_started.sh[tag=install-zookeeper] 16 | 17 | The operator creates a ZooKeeper cluster with two replicas. 18 | Use kubectl to observe the status of the cluster: 19 | 20 | [source,bash] 21 | include::example$getting_started/code/getting_started.sh[tag=watch-zookeeper-rollout] 22 | 23 | The operator deploys readiness probes to make sure the replicas are ready and established a quorum. 24 | Only then, the StatefulSet is actually marked as `Ready`. 25 | You see 26 | 27 | ---- 28 | partitioned roll out complete: 2 new pods have been updated... 29 | ---- 30 | 31 | The ZooKeeper cluster is now ready. 32 | 33 | == Deploy a ZNode 34 | 35 | ZooKeeper manages its data in a hierarchical node system. 36 | You can look at the nodes using the zkCli tool. 37 | It is included inside the Stackable ZooKeeper container, and you can invoke it using `kubectl run`: 38 | 39 | [source,bash] 40 | include::example$getting_started/code/getting_started.sh[tag=zkcli-ls] 41 | 42 | NOTE: You might wonder why the logs are used instead of the output from `kubectl run`. 43 | This is because `kubectl run` sometimes loses lines of the output, a link:https://github.com/kubernetes/kubernetes/issues/27264[known issue]. 44 | 45 | Among the log output you see the current list of nodes in the root directory `/`: 46 | 47 | [source] 48 | ---- 49 | [zookeeper] 50 | ---- 51 | 52 | The `zookeeper` node contains ZooKeeper configuration data. 53 | 54 | It is useful to use different nodes for different applications using ZooKeeper, and the Stackable Operator uses xref:znodes.adoc[ZNodes] for this. 55 | ZNodes are created with manifest files of the kind `ZookeeperZnode`. 56 | Create a file called `znode.yaml` with the following contents: 57 | 58 | [source,yaml] 59 | include::example$getting_started/code/znode.yaml[] 60 | 61 | And apply it: 62 | 63 | [source,bash] 64 | include::example$getting_started/code/getting_started.sh[tag=apply-znode] 65 | 66 | Use the same command as before to list the nodes: 67 | 68 | [source,bash] 69 | include::example$getting_started/code/getting_started.sh[tag=zkcli-ls] 70 | 71 | and the ZNode has appeared in the output: 72 | 73 | ---- 74 | [znode-4e0a6098-057a-42cc-926e-276ea6305e09, zookeeper] 75 | ---- 76 | 77 | == The discovery ConfigMap 78 | 79 | The operator creates a ConfigMap with connection information that has the same name as the ZNode - in this case `simple-znode`. 80 | Have a look at it using 81 | 82 | [source,bash] 83 | kubectl describe configmap simple-znode 84 | 85 | You see an output similar to this: 86 | 87 | [source] 88 | ZOOKEEPER: 89 | ---- 90 | simple-zk-server-default-0.simple-zk-server-default.default.svc.cluster.local:2282,simple-zk-server-default-1.simple-zk-server-default.default.svc.cluster.local:2282/znode-2a9d12be-bfee-49dc-9030-2cb3c3dd80d3 91 | ZOOKEEPER_CHROOT: 92 | ---- 93 | /znode-2a9d12be-bfee-49dc-9030-2cb3c3dd80d3 94 | ZOOKEEPER_HOSTS: 95 | ---- 96 | simple-zk-server-default-0.simple-zk-server-default.default.svc.cluster.local:2282,simple-zk-server-default-1.simple-zk-server-default.default.svc.cluster.local:2282 97 | 98 | The `ZOOKEEPER` entry contains a ZooKeeper connection string that you can use to connect to this specific ZNode. 99 | The `ZOOKEEPER_CHROOT` and `ZOOKEEPER_HOSTS` entries contain the node name and hosts list respectively. 100 | You can use these three entries mounted into a pod to connect to ZooKeeper at this specific ZNode and read/write in that ZNode directory. 101 | 102 | 103 | Great! 104 | This step concludes the Getting started guide. 105 | You have installed the ZooKeeper Operator and its dependencies and set up your first ZooKeeper cluster as well as your first ZNode. 106 | 107 | == What's next 108 | 109 | Have a look at the xref:usage_guide/index.adoc[] to learn more about configuration options for your ZooKeeper cluster like setting up encryption or authentication. 110 | You can also have a look at the xref:znodes.adoc[] page to learn more about ZNodes. 111 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/getting_started/index.adoc: -------------------------------------------------------------------------------- 1 | = Getting started 2 | 3 | This guide gets you started with Apache ZooKeeper using the Stackable Operator. 4 | It guides you through the installation of the Operator and its dependencies, setting up your first ZooKeeper cluster and connecting to it as well as setting up your first xref:znodes.adoc[ZNode]. 5 | 6 | == Prerequisites 7 | 8 | You need: 9 | 10 | * a Kubernetes cluster 11 | * kubectl 12 | * optional: Helm 13 | 14 | Resource sizing depends on cluster type(s), usage and scope, but as a starting point a minimum of the following resources is recommended for this operator: 15 | 16 | * 0.2 cores (e.g. i5 or similar) 17 | * 256MB RAM 18 | 19 | == What's next 20 | 21 | The Guide is divided into two steps: 22 | 23 | * xref:getting_started/installation.adoc[Installing the Operators]. 24 | * xref:getting_started/first_steps.adoc[Setting up the ZooKeeper cluster]. 25 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/getting_started/installation.adoc: -------------------------------------------------------------------------------- 1 | = Installation 2 | :description: Install the Stackable operator for Apache ZooKeeper using stackablectl or Helm. 3 | 4 | There are multiple ways to install the Stackable Operator for Apache Zookeeper. 5 | xref:management:stackablectl:index.adoc[] is the preferred way, but Helm is also supported. 6 | OpenShift users may prefer installing the operator from the RedHat Certified Operator catalog using the OpenShift web console. 7 | 8 | [tabs] 9 | ==== 10 | stackablectl (recommended):: 11 | + 12 | -- 13 | `stackablectl` is the command line tool to interact with Stackable operators and the recommended way to install 14 | Operators. Follow the xref:management:stackablectl:installation.adoc[installation steps] for your platform. 15 | 16 | After you have installed `stackablectl`, use it to install the ZooKeeper Operator and its dependencies: 17 | 18 | [source,bash] 19 | ---- 20 | include::example$getting_started/code/getting_started.sh[tag=stackablectl-install-operators] 21 | ---- 22 | 23 | The tool prints 24 | 25 | [source] 26 | include::example$getting_started/code/install_output.txt[] 27 | 28 | TIP: Consult the xref:management:stackablectl:quickstart.adoc[] to learn more about how to use `stackablectl`. For 29 | example, you can use the `--cluster kind` flag to create a Kubernetes cluster with link:https://kind.sigs.k8s.io/[kind]. 30 | -- 31 | 32 | Helm:: 33 | + 34 | -- 35 | You can also use Helm to install the operators. 36 | 37 | NOTE: `helm repo` subcommands are not supported for OCI registries. The operators are installed directly, without adding the Helm Chart repository first. 38 | 39 | Install the Stackable Operators: 40 | [source,bash] 41 | ---- 42 | include::example$getting_started/code/getting_started.sh[tag=helm-install-operators] 43 | ---- 44 | 45 | Helm deploys the operators in Kubernetes Deployments and applies the CRDs for the ZooKeeperCluster Stacklet. 46 | -- 47 | ==== 48 | 49 | == What's next? 50 | 51 | Use the operator to xref:getting_started/first_steps.adoc[deploy a ZooKeeper Stacklet]. 52 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/index.adoc: -------------------------------------------------------------------------------- 1 | = Stackable Operator for Apache ZooKeeper 2 | :description: Manage Apache ZooKeeper ensembles with the Stackable Kubernetes operator. Supports ZooKeeper versions, custom images, and integrates with Hadoop, Kafka, and more. 3 | :keywords: Stackable operator, Hadoop, Apache ZooKeeper, Kubernetes, k8s, operator, metadata, storage, cluster 4 | :zookeeper: https://zookeeper.apache.org/ 5 | :github: https://github.com/stackabletech/zookeeper-operator/ 6 | :crd: {crd-docs-base-url}/zookeeper-operator/{crd-docs-version}/ 7 | :crd-zookeepercluster: {crd-docs}/zookeeper.stackable.tech/zookeepercluster/v1alpha1/ 8 | :crd-zookeeperznode: {crd-docs}/zookeeper.stackable.tech/zookeeperznode/v1alpha1/ 9 | :feature-tracker: https://features.stackable.tech/unified 10 | 11 | [.link-bar] 12 | * {github}[GitHub {external-link-icon}^] 13 | * {feature-tracker}[Feature Tracker {external-link-icon}^] 14 | * {crd}[CRD documentation {external-link-icon}^] 15 | 16 | The Stackable operator for Apache ZooKeeper is a Kubernetes operator for deploying and managing {zookeeper}[Apache ZooKeeper] ensembles. 17 | Apache ZooKeeper is an open-source distributed coordination service that facilitates synchronization, configuration management and leader election in distributed systems. 18 | ZooKeeper is often used for these tasks in the Apache Hadoop ecosystem. 19 | Within the Stackable Platform, the Stackable operators for xref:hbase:index.adoc[Apache HBase], xref:hdfs:index.adoc[Apache Hadoop HDFS], xref:kafka:index.adoc[Apache Kafka], xref:nifi:index.adoc[Apache NiFi] and xref:druid:index.adoc[Apache Druid] depend on the ZooKeeper operator. 20 | 21 | == Getting started 22 | 23 | Get started with Apache ZooKeeper and the Stackable operator by following the xref:getting_started/index.adoc[Getting started] guide, it guides you through the xref:getting_started/installation.adoc[installation] process. 24 | Afterward, consult the xref:usage_guide/index.adoc[Usage guide] to learn more about configuring ZooKeeper for your needs. 25 | You can also deploy a <> to see an example deployment of ZooKeeper together with other data products. 26 | 27 | == Operator model 28 | 29 | The operator manages two custom resources: _ZookeeperCluster_ and _ZookeeperZnode_. 30 | ZooKeeper only has a single process that it runs, so the cluster resource only has a single corresponding xref:concepts:roles-and-role-groups.adoc[role] called _server_. 31 | 32 | image::zookeeper_overview.drawio.svg[A diagram depicting the Kubernetes resources created by the Stackable operator for Apache ZooKeeper] 33 | 34 | For every role group the operator creates a ConfigMap and StatefulSet which can have multiple replicas (Pods). 35 | Every role group is accessible through its own Service, and there is a Service for the whole Cluster. 36 | 37 | The operator creates a xref:concepts:service_discovery.adoc[service discovery ConfigMap] for the ZooKeeper instance, as well as for each ZNode. 38 | The discovery ConfigMaps contain information on how to connect to ZooKeeper. 39 | The ZNode discovery ConfigMap give access information for the ZNode. 40 | xref:zookeeper:znodes.adoc[Learn more about ZNodes]. 41 | 42 | == Dependencies 43 | 44 | Apache ZooKeeper and the Stackable operator have no dependencies besides the xref:commons-operator:index.adoc[], xref:secret-operator:index.adoc[] and xref:listener-operator:index.adoc[]. 45 | 46 | == [[demos]]Demos 47 | 48 | Apache ZooKeeper is a dependency of xref:hbase:index.adoc[Apache HBase], xref:hdfs:index.adoc[Apache Hadoop HDFS], xref:kafka:index.adoc[Apache Kafka] and xref:nifi:index.adoc[Apache NiFi], thus any demo that uses one or more of these components also deploys a ZooKeeper ensemble. 49 | Here is the list of the demos that include ZooKeeper: 50 | 51 | * xref:demos:data-lakehouse-iceberg-trino-spark.adoc[] 52 | * xref:demos:hbase-hdfs-load-cycling-data.adoc[] 53 | * xref:demos:jupyterhub-pyspark-hdfs-anomaly-detection-taxi-data.adoc[] 54 | * xref:demos:logging.adoc[] 55 | * xref:demos:nifi-kafka-druid-earthquake-data.adoc[] 56 | * xref:demos:nifi-kafka-druid-water-level-data.adoc[] 57 | 58 | == Supported versions 59 | 60 | The Stackable operator for Apache ZooKeeper currently supports the ZooKeeper versions listed below. 61 | To use a specific ZooKeeper version in your ZookeeperCluster, you have to specify an image - this is explained in the xref:concepts:product-image-selection.adoc[] documentation. 62 | The operator also supports running images from a custom registry or running entirely customized images; both of these cases are explained under xref:concepts:product-image-selection.adoc[] as well. 63 | 64 | include::partial$supported-versions.adoc[] 65 | 66 | == Useful links 67 | 68 | * The {github}[zookeeper-operator {external-link-icon}^] GitHub repository 69 | * The operator feature overview in the {feature-tracker}[feature tracker {external-link-icon}^] 70 | * The {crd-zookeepercluster}[ZookeeperCluster {external-link-icon}^] and {crd-zookeeperznode}[ZookeeperZnode {external-link-icon}^] CRD documentation 71 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/reference/commandline-parameters.adoc: -------------------------------------------------------------------------------- 1 | = Command line parameters 2 | 3 | This operator accepts the following command line parameters: 4 | 5 | == product-config 6 | 7 | *Default value*: `/etc/stackable/zookeeper-operator/config-spec/properties.yaml` 8 | 9 | *Required*: false 10 | 11 | *Multiple values:* false 12 | 13 | [source] 14 | ---- 15 | cargo run -- run --product-config /foo/bar/properties.yaml 16 | ---- 17 | 18 | == watch-namespace 19 | 20 | *Default value*: All namespaces 21 | 22 | *Required*: false 23 | 24 | *Multiple values:* false 25 | 26 | The operator **only** watches for resources in the provided namespace `test`: 27 | 28 | [source] 29 | ---- 30 | cargo run -- run --watch-namespace test 31 | ---- 32 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/reference/crds.adoc: -------------------------------------------------------------------------------- 1 | = CRD Reference 2 | 3 | Find all CRD reference for the Stackable Operator for Apache ZooKeeper at: {crd-docs-base-url}/zookeeper-operator/{crd-docs-version}. 4 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/reference/discovery.adoc: -------------------------------------------------------------------------------- 1 | = Discovery Profiles 2 | :page-aliases: discovery.adoc 3 | 4 | The Stackable Operator for Apache ZooKeeper creates a number of discovery profiles, which are client configuration bundles 5 | that allow access to the Apache ZooKeeper cluster. These are published into the Kubernetes cluster as 6 | https://kubernetes.io/docs/concepts/configuration/configmap/[ConfigMap] objects. 7 | 8 | Discovery profiles are generated for each ZookeeperCluster and xref:znodes.adoc[ZookeeperZnode] object, 9 | the name of which is the "base name" of the profile. 10 | 11 | == Profiles 12 | 13 | === Default 14 | 15 | This profile allows access to the Apache ZooKeeper cluster from inside the Kubernetes cluster, and connects directly to the Pod identity. 16 | 17 | The name of the ConfigMap created for this discovery profile is `$BASENAME`. 18 | 19 | === NodePort 20 | 21 | This profile allows access to the Apache ZooKeeper cluster from anywhere, inside or outside of the Kubernetes cluster. 22 | 23 | The name of the ConfigMap created for this discovery profile is `$BASENAME-nodeport`. 24 | 25 | == Contents 26 | 27 | Each discovery profile contains the following fields: 28 | 29 | `ZOOKEEPER`:: A connection string, as accepted by https://zookeeper.apache.org/doc/r3.9.3/apidocs/zookeeper-server/org/apache/zookeeper/ZooKeeper.html#ZooKeeper-java.lang.String-int-org.apache.zookeeper.Watcher-[the official Java client], e.g. `test-zk-server-default-0.test-zk-server-default.kuttl-test-proper-spaniel.svc.cluster.local:2282,test-zk-server-default-1.test-zk-server-default.kuttl-test-proper-spaniel.svc.cluster.local:2282/znode-4e169890-d2eb-4d62-9515-e4786f0ac58e` 30 | `ZOOKEEPER_HOSTS`:: A comma-separated list of `node1:port1,node2:port2,...`, e.g. `test-zk-server-default-0.test-zk-server-default.kuttl-test-proper-spaniel.svc.cluster.local:2282,test-zk-server-default-1.test-zk-server-default.kuttl-test-proper-spaniel.svc.cluster.local:2282` 31 | `ZOOKEEPER_CHROOT`:: The name of the root ZNode associated with the discovery profile, should be used if (and only if) connecting using `ZOOKEEPER_HOSTS` (rather than `ZOOKEEPER`), e.g. `/znode-4e169890-d2eb-4d62-9515-e4786f0ac58e` in case of a ZNode discovery or `/` in case of a ZookeeperServer discovery 32 | `ZOOKEEPER_CLIENT_PORT`:: The port clients should use when connecting, e.g. `2282` 33 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/reference/environment-variables.adoc: -------------------------------------------------------------------------------- 1 | = Environment variables 2 | 3 | This operator accepts the following environment variables: 4 | 5 | == KUBERNETES_CLUSTER_DOMAIN 6 | 7 | *Default value*: cluster.local 8 | 9 | *Required*: false 10 | 11 | *Multiple values*: false 12 | 13 | This instructs the operator, which value it should use for the Kubernetes `clusterDomain` setting. 14 | Make sure to keep this in sync with whatever setting your cluster uses. 15 | Please see the documentation xref:guides:kubernetes-cluster-domain.adoc[on configuring the Kubernetes cluster domain] for more information on this feature. 16 | 17 | [source] 18 | ---- 19 | export KUBERNETES_CLUSTER_DOMAIN=mycluster.local 20 | cargo run -- run 21 | ---- 22 | 23 | or via docker: 24 | 25 | [source] 26 | ---- 27 | docker run \ 28 | --name zookeeper-operator \ 29 | --network host \ 30 | --env KUBECONFIG=/home/stackable/.kube/config \ 31 | --env KUBERNETES_CLUSTER_DOMAIN=mycluster.local \ 32 | --mount type=bind,source="$HOME/.kube/config",target="/home/stackable/.kube/config" \ 33 | oci.stackable.tech/sdp/zookeeper-operator:0.0.0-dev 34 | ---- 35 | 36 | == PRODUCT_CONFIG 37 | 38 | *Default value*: `/etc/stackable/zookeeper-operator/config-spec/properties.yaml` 39 | 40 | *Required*: false 41 | 42 | *Multiple values*: false 43 | 44 | [source] 45 | ---- 46 | export PRODUCT_CONFIG=/foo/bar/properties.yaml 47 | cargo run -- run 48 | ---- 49 | 50 | or via docker: 51 | 52 | ---- 53 | docker run \ 54 | --name zookeeper-operator \ 55 | --network host \ 56 | --env KUBECONFIG=/home/stackable/.kube/config \ 57 | --env PRODUCT_CONFIG=/my/product/config.yaml \ 58 | --mount type=bind,source="$HOME/.kube/config",target="/home/stackable/.kube/config" \ 59 | oci.stackable.tech/sdp/zookeeper-operator:0.0.0-dev 60 | ---- 61 | 62 | == WATCH_NAMESPACE 63 | 64 | *Default value*: All namespaces 65 | 66 | *Required*: false 67 | 68 | *Multiple values*: false 69 | 70 | The operator **only** watches for resources in the provided namespace `test`: 71 | 72 | [source] 73 | ---- 74 | export WATCH_NAMESPACE=test 75 | cargo run -- run 76 | ---- 77 | 78 | or via docker: 79 | 80 | [source] 81 | ---- 82 | docker run \ 83 | --name zookeeper-operator \ 84 | --network host \ 85 | --env KUBECONFIG=/home/stackable/.kube/config \ 86 | --env WATCH_NAMESPACE=test \ 87 | --mount type=bind,source="$HOME/.kube/config",target="/home/stackable/.kube/config" \ 88 | oci.stackable.tech/sdp/zookeeper-operator:0.0.0-dev 89 | ---- 90 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/reference/index.adoc: -------------------------------------------------------------------------------- 1 | = Reference 2 | 3 | Consult the reference documentation section to find exhaustive information on: 4 | 5 | * Descriptions and default values of all properties in the CRDs used by this operator in the xref:reference/crds.adoc[]. 6 | * The properties in the xref:reference/discovery.adoc[]. 7 | * The xref:reference/commandline-parameters.adoc[] and xref:reference/environment-variables.adoc[] accepted by the operator. 8 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/authentication.adoc: -------------------------------------------------------------------------------- 1 | = Authentication 2 | :description: Enable TLS authentication for ZooKeeper with Stackable's Kubernetes operator. 3 | 4 | The communication between nodes (server to server) is encrypted via TLS by default. 5 | In order to enforce TLS authentication for client-to-server communication, you can set an xref:concepts:authentication.adoc[AuthenticationClass] reference in the `spec.clusterConfig.authentication` property. 6 | 7 | Currently it is possible to configure a single form of authentication (of type TLS) by adding one (and only one) entry 8 | in the `authentication` sequence as shown in the example below. Additional authentication methods, such as Kerberos, are 9 | not yet supported. 10 | 11 | [source,yaml] 12 | ---- 13 | include::example$usage_guide/example-cluster-tls-authentication.yaml[] 14 | include::example$usage_guide/example-cluster-tls-authentication-class.yaml[] 15 | include::example$usage_guide/example-cluster-tls-authentication-secret.yaml[] 16 | ---- 17 | <1> The `clusterConfig.authentication.authenticationClass` can be set to use TLS for authentication. This is optional. 18 | <2> The referenced AuthenticationClass that references a SecretClass to provide certificates. 19 | <3> The reference to a SecretClass. 20 | <4> The SecretClass that is referenced by the AuthenticationClass in order to provide certificates. 21 | 22 | If both `spec.clusterConfig.tls.server.secretClass` and `spec.clusterConfig.authentication.authenticationClass` are set, 23 | the authentication class takes precedence over the secret class. 24 | The cluster is encrypted and authenticates only against the authentication class. 25 | 26 | WARNING: Due to a https://issues.apache.org/jira/browse/ZOOKEEPER-4276[bug] in ZooKeeper, the `clientPort` property in 27 | combination with `client.portUnification=true` is used instead of the `secureClientPort`. This means that unencrypted 28 | and unauthenticated access to the ZooKeeper cluster is still possible. 29 | 30 | == Learn more 31 | 32 | * Read the xref:concepts:authentication.adoc[authentication concept] of the Stackable Data Platform. 33 | * Read the {crd-docs}/authentication.stackable.tech/authenticationclass/v1alpha1/[AuthenticationClass reference {external-link-icon}^]. 34 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/cluster_operations.adoc: -------------------------------------------------------------------------------- 1 | 2 | = Cluster Operation 3 | 4 | The managed ZooKeeper instances can be configured with different cluster operations like pausing reconciliation or stopping the cluster. See xref:concepts:cluster_operations.adoc[cluster operations] for more details. 5 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/encryption.adoc: -------------------------------------------------------------------------------- 1 | = Encryption 2 | :description: Quorum and client communication in ZooKeeper are encrypted via TLS by default. Customize certificates with the Secret Operator for added security 3 | 4 | The quorum and client communication are encrypted by default via TLS. 5 | This requires the xref:secret-operator:index.adoc[Secret Operator] to be present in order to provide certificates. 6 | The utilized certificates can be changed in a top-level config. 7 | 8 | [source,yaml] 9 | ---- 10 | include::example$usage_guide/example-cluster-tls-encryption.yaml[] 11 | ---- 12 | <1> The `tls.server.secretClass` refers to the client-to-server encryption. Defaults to the `tls` secret. 13 | <2> The `tls.quorum.secretClass` refers to the server-to-server quorum encryption. Defaults to the `tls` secret. 14 | 15 | The `tls` secret is deployed from the xref:secret-operator:index.adoc[Secret Operator] and looks like this: 16 | 17 | [source,yaml] 18 | ---- 19 | include::example$usage_guide/example-secret-operator-tls-secret.yaml[] 20 | ---- 21 | 22 | You can create your own secrets and reference them e.g. in the `tls.secretClass` to use different certificates. 23 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/index.adoc: -------------------------------------------------------------------------------- 1 | = Usage guide 2 | 3 | This section provides more in depth information about specific aspects of using the Stackable Operator for Apache ZooKeeper. 4 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/isolating_clients_with_znodes.adoc: -------------------------------------------------------------------------------- 1 | = Isolating clients with ZNodes 2 | :description: Isolate clients in ZooKeeper with unique ZNodes for each product. Set up ZNodes for each product and connect them using discovery ConfigMaps. 3 | 4 | ZooKeeper is a dependency of many products supported by the Stackable Data Platform. 5 | To ensure that all products can use the same ZooKeeper cluster safely, it is important to isolate them which is done using xref:znodes.adoc[]. 6 | 7 | This guide shows you how to set up multiple ZNodes to use with different products from the Stackable Data Platform, using Kafka and Druid as an example. 8 | For an explanation of the ZNode concept, read the xref:znodes.adoc[] concept page. 9 | 10 | == Prerequisites 11 | 12 | To follow this guide, you should have 13 | 14 | * Access to a Kubernetes cluster 15 | * The Stackable Operator for Apache ZooKeeper installed in said cluster 16 | * A ZookeeperCluster already deployed on the cluster 17 | 18 | If you have not yet set up the Operator and ZookeeperCluster, follow the xref:getting_started/index.adoc[getting started guide]. 19 | 20 | == Steps 21 | 22 | 23 | This guide assumes the ZookeeperCluster is called `my-zookeeper` and is running in a `data` namespace. 24 | 25 | === Setting up the ZNodes 26 | 27 | To set up a Kafka and Druid instance to use the ZookeeperCluster, two ZNodes are required, one for each product. 28 | This guide assumes the Kafka instance is running in the same namespace as the ZooKeeper, while the Druid instance is running in its own namespace called `druid-ns`. 29 | 30 | First, the Druid ZNode: 31 | 32 | [source,yaml] 33 | ---- 34 | include::example$usage_guide/znode/example-znode-druid.yaml[] 35 | ---- 36 | <1> The name of the Druid ZNode. 37 | <2> The namespace where the ZNode should be created. This should be the same as the namespace of the product or client that wants to use the ZNode. 38 | <3> The ZooKeeper cluster reference. Since ZooKeeper is running in a different namespace, both the cluster name and namespace need to be given. 39 | 40 | And the Kafka ZNode: 41 | 42 | [source,yaml] 43 | ---- 44 | include::example$usage_guide/znode/example-znode-kafka.yaml[] 45 | ---- 46 | <1> The name of the Kafka ZNode. 47 | <2> The namespace where the ZNode should be created. Since Kafka is running in the same namespace as ZooKeeper, this is the namespace of `my-zookeeper`. 48 | <3> The ZooKeeper cluster reference. The namespace is omitted here because the ZooKeeper is in the same namespace as the ZNode object. 49 | 50 | The Stackable Operator for ZooKeeper watches for ZookeeperZnode objects. 51 | If one is found it creates the ZNode _inside_ the ZooKeeper cluster and also creates a xref:concepts:service_discovery.adoc[discovery ConfigMap] in the same namespace as the ZookeeperZnode with the same name as the ZookeeperZnode. 52 | 53 | In this example, two ConfigMaps are created: 54 | 55 | * The Druid ZNode discovery ConfigMap `druid-znode` in the `druid-ns` namespace 56 | * The Kafka ZNode discovery ConfigMap `kafka-znode` in the `data` namespace 57 | 58 | === Connecting the products to the ZNodes 59 | 60 | The ConfigMaps with the name and namespaces as given above look similar to this: 61 | 62 | [source,yaml] 63 | ---- 64 | include::example$usage_guide/znode/example-znode-discovery.yaml[] 65 | ---- 66 | <1> Name and namespaces as specified above 67 | <2> `$PATH` is a unique and unpredictable path that is generated by the operator 68 | 69 | This ConfigMap can then be mounted into other Pods and the `ZOOKEEPER` key can be used to connect to the ZooKeeper instance and the correct ZNode. 70 | 71 | All products that need a ZNode can be configured with a `zookeeperConfigMapName` property. 72 | As the name implies, this property references the discovery ConfigMap for the requested ZNode. 73 | 74 | For Kafka: 75 | 76 | [source,yaml] 77 | ---- 78 | --- 79 | apiVersion: kafka.stackable.tech/v1alpha1 80 | kind: DruidCluster 81 | metadata: 82 | name: my-druid 83 | namespace: druid-ns 84 | spec: 85 | zookeeperConfigMapName: kafka-znode 86 | ... 87 | ---- 88 | 89 | And for Druid: 90 | 91 | [source,yaml] 92 | ---- 93 | --- 94 | apiVersion: kafka.stackable.tech/v1alpha1 95 | kind: KafkaCluster 96 | metadata: 97 | name: my-kafka 98 | namespace: data 99 | spec: 100 | zookeeperConfigMapName: kafka-znode 101 | ... 102 | ---- 103 | 104 | The Stackable Operators for Kafka and Druid use the discovery ConfigMaps to connect Kafka and Druid Pods with different ZNodes in a shared ZooKeeper cluster. 105 | 106 | == What's next 107 | 108 | You can find out more about the discovery ConfigMap xref:discovery.adoc[] and the xref:znodes.adoc[] in the concepts documentation. 109 | 110 | == Restoring from backups 111 | 112 | For security reasons, a unique ZNode path is generated every time the same ZookeeperZnode object is recreated, even if it has the same name. 113 | 114 | If a ZookeeperZnode needs to be associated with an existing ZNode path, the field `status.znodePath` can be set to the desired path. 115 | Note that since this is a subfield of `status`, it must explicitly be updated on the `status` subresource, and requires RBAC permissions to replace the `zookeeperznodes/status` resource. 116 | For example: 117 | 118 | [source,bash] 119 | ---- 120 | kubectl get zookeeperznode/test-znode -o json -n $NAMESPACE \ 121 | | jq '.status.znodePath = "/znode-override"' \ 122 | | kubectl replace -f- --subresource=status 123 | ---- 124 | 125 | NOTE: The auto-generated ZNode will still be kept, and should be cleaned up by an administrator. 126 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/listenerclass.adoc: -------------------------------------------------------------------------------- 1 | = Service exposition with ListenerClasses 2 | 3 | Apache ZooKeeper offers an API. The Operator deploys a service called `` (where `` is the name of the ZookeeperCluster) through which ZooKeeper can be reached. 4 | 5 | This service can have either the `cluster-internal` or `external-unstable` type. `external-stable` is not supported for ZooKeeper at the moment. 6 | Read more about the types in the xref:concepts:service-exposition.adoc[service exposition] documentation at platform level. 7 | 8 | This is how the listener class is configured: 9 | 10 | [source,yaml] 11 | ---- 12 | spec: 13 | clusterConfig: 14 | listenerClass: cluster-internal # <1> 15 | ---- 16 | <1> The default `cluster-internal` setting. 17 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/log_aggregation.adoc: -------------------------------------------------------------------------------- 1 | = Log aggregation 2 | :description: The logs can be forwarded to a Vector log aggregator by providing a discovery ConfigMap for the aggregator and by enabling the log agent. 3 | 4 | The logs can be forwarded to a Vector log aggregator by providing a discovery ConfigMap for the aggregator and by enabling the log agent: 5 | 6 | [source,yaml] 7 | ---- 8 | spec: 9 | clusterConfig: 10 | vectorAggregatorConfigMapName: vector-aggregator-discovery 11 | servers: 12 | config: 13 | logging: 14 | enableVectorAgent: true 15 | containers: 16 | prepare: 17 | console: 18 | level: INFO 19 | file: 20 | level: INFO 21 | loggers: 22 | ROOT: 23 | level: INFO 24 | roleGroups: 25 | default: 26 | replicas: 1 27 | ---- 28 | 29 | Further information on how to configure logging, can be found in xref:concepts:logging.adoc[]. 30 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/monitoring.adoc: -------------------------------------------------------------------------------- 1 | = Monitoring 2 | :description: The managed ZooKeeper instances are automatically configured to export Prometheus metrics. 3 | 4 | The managed ZooKeeper instances are automatically configured to export Prometheus metrics. 5 | See xref:operators:monitoring.adoc[] for more details. 6 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/operations/cluster-operations.adoc: -------------------------------------------------------------------------------- 1 | 2 | = Cluster Operation 3 | 4 | The managed ZooKeeper instances can be configured with different cluster operations like pausing reconciliation or stopping the cluster. See xref:concepts:operations/cluster_operations.adoc[cluster operations] for more details. 5 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/operations/graceful-shutdown.adoc: -------------------------------------------------------------------------------- 1 | = Graceful shutdown 2 | 3 | You can configure the graceful shutdown as described in xref:concepts:operations/graceful_shutdown.adoc[]. 4 | 5 | == Servers 6 | 7 | As a default, ZooKeeper servers have `2 minutes` to shut down gracefully. 8 | 9 | The ZooKeeper server process will receive a `SIGTERM` signal when Kubernetes wants to terminate the Pod. 10 | After the graceful shutdown timeout runs out, and the process still didn't exit, Kubernetes will issue a `SIGKILL` signal. 11 | 12 | This is equivalent to executing the `bin/zkServer.sh stop` command, which internally executes `kill ` (https://github.com/apache/zookeeper/blob/74db005175a4ec545697012f9069cb9dcc8cdda7/bin/zkServer.sh#L219[code]). 13 | 14 | However, there is no acknowledge message in the log indicating a graceful shutdown. 15 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/operations/index.adoc: -------------------------------------------------------------------------------- 1 | = Operations 2 | 3 | This section of the documentation is intended for the operations teams that maintain a Stackable Data Platform installation. 4 | 5 | Read on the xref:concepts:operations/index.adoc[concepts page on operations] with the necessary details to operate the platform in a production environment. 6 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/operations/pod-disruptions.adoc: -------------------------------------------------------------------------------- 1 | = Allowed Pod disruptions 2 | 3 | You can configure the permitted Pod disruptions for Zookeeper nodes as described in xref:concepts:operations/pod_disruptions.adoc[]. 4 | 5 | Unless you configure something else or disable the provided PodDisruptionBudgets (PDBs), the following PDBs are written: 6 | 7 | == Servers 8 | The provided PDBs only allow a single server to be offline at any given time, regardless of the number of replicas or `roleGroups`. 9 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/operations/pod-placement.adoc: -------------------------------------------------------------------------------- 1 | = Pod placement 2 | 3 | You can configure the Pod placement of the ZooKeeper pods as described in xref:concepts:operations/pod_placement.adoc[]. 4 | 5 | The default affinities created by the operator are: 6 | 7 | 1. Distribute all the ZooKeeper Pods (weight 70) 8 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/overrides.adoc: -------------------------------------------------------------------------------- 1 | 2 | = Configuration and environment overrides 3 | 4 | The cluster definition also supports overriding configuration properties and environment variables, either per role or per role group, where the more specific override (role group) has precedence over the less specific one (role). 5 | 6 | IMPORTANT: Overriding certain properties which are set by operator (such as the ports) can interfere with the operator and can lead to problems. 7 | 8 | == Configuration properties 9 | 10 | For a role or role group, at the same level of `config`, you can specify: `configOverrides` for the `zoo.cfg` and `security.properties` files. 11 | 12 | === Overriding entries in zoo.cfg 13 | 14 | For example, if you want to set the `4lw.commands.whitelist` to allow the `ruok` administrative command, it can be configured in the `ZookeeperCluster` resource like so: 15 | 16 | [source,yaml] 17 | ---- 18 | servers: 19 | roleGroups: 20 | default: 21 | configOverrides: 22 | zoo.cfg: 23 | 4lw.commands.whitelist: "srvr, ruok" 24 | replicas: 1 25 | ---- 26 | 27 | Just as for the `config`, it is possible to specify this at role level as well: 28 | 29 | [source,yaml] 30 | ---- 31 | servers: 32 | configOverrides: 33 | zoo.cfg: 34 | 4lw.commands.whitelist: "srvr, ruok" 35 | roleGroups: 36 | default: 37 | replicas: 1 38 | ---- 39 | 40 | All property values must be strings. 41 | 42 | For a full list of configuration options refer to the Apache ZooKeeper https://zookeeper.apache.org/doc/r3.9.3/zookeeperAdmin.html#sc_configuration[Configuration Reference]. 43 | 44 | === Overriding entries in security.properties 45 | 46 | The `security.properties` file is used to configure JVM security properties. It is very seldom that users need to tweak any of these, but there is one use-case that stands out, and that users need to be aware of: the JVM DNS cache. 47 | 48 | The JVM manages its own cache of successfully resolved host names as well as a cache of host names that cannot be resolved. 49 | Some products of the Stackable platform are very sensible to the contents of these caches and their performance is heavily affected by them. 50 | As of version 3.8.1, Apache ZooKeeper always requires up-to-date IP addresses to maintain its quorum. 51 | To guarantee this, the negative DNS cache of the JVM needs to be disabled. This can be achieved by setting the TTL of entries in the negative cache to zero, like this: 52 | 53 | [source,yaml] 54 | ---- 55 | servers: 56 | configOverrides: 57 | security.properties: 58 | networkaddress.cache.ttl: "5" 59 | networkaddress.cache.negative.ttl: "0" 60 | ---- 61 | 62 | NOTE: The operator configures DNS caching by default as shown in the example above. 63 | 64 | For details on the JVM security see https://docs.oracle.com/en/java/javase/11/security/java-security-overview1.html 65 | 66 | == Environment variables 67 | 68 | In a similar fashion, environment variables can be (over)written. For example per role group: 69 | 70 | [source,yaml] 71 | ---- 72 | servers: 73 | roleGroups: 74 | default: 75 | envOverrides: 76 | MY_ENV_VAR: "MY_VALUE" 77 | replicas: 1 78 | ---- 79 | 80 | or per role: 81 | 82 | [source,yaml] 83 | ---- 84 | servers: 85 | envOverrides: 86 | MY_ENV_VAR: "MY_VALUE" 87 | roleGroups: 88 | default: 89 | replicas: 1 90 | ---- 91 | 92 | == Pod overrides 93 | 94 | The ZooKeeper operator also supports Pod overrides, allowing you to override any property that you can set on a Kubernetes Pod. 95 | Read the xref:concepts:overrides.adoc#pod-overrides[Pod overrides documentation] to learn more about this feature. 96 | 97 | == JVM argument overrides 98 | 99 | Stackable operators automatically determine the set of needed JVM arguments, such as memory settings or trust- and keystores. 100 | Using JVM argument overrides you can configure the JVM arguments xref:concepts:overrides.adoc#jvm-argument-overrides[according to the concepts page]. 101 | 102 | One thing that is different for Zookeeper, is that all heap-related arguments will be passed in via the env variable `ZK_SERVER_HEAP`, all the other ones via `SERVER_JVMFLAGS`. 103 | `ZK_SERVER_HEAP` can *not* have a unit suffix, it will always be an integer representing the number of megabytes heap available. 104 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/resource_configuration.adoc: -------------------------------------------------------------------------------- 1 | = Storage and resource configuration 2 | :description: Configure ZooKeeper storage with PersistentVolumeClaims and set resource requests for CPU, memory, and storage. 3 | :pvcs: https://kubernetes.io/docs/concepts/storage/persistent-volumes 4 | 5 | == Storage for data volumes 6 | 7 | You can mount volumes where data is stored by specifying {pvcs}[PersistentVolumeClaims] for each individual role group: 8 | 9 | [source,yaml] 10 | ---- 11 | servers: 12 | roleGroups: 13 | default: 14 | config: 15 | resources: 16 | storage: 17 | data: 18 | capacity: 2Gi 19 | ---- 20 | 21 | In the above example, all ZooKeeper nodes in the default group will store data (the location of the property `dataDir`) on a `2Gi` volume. 22 | 23 | You can also configure which StorageClass to use, consult the xref:concepts:resources.adoc#storageclass[resources docs] to learn more. 24 | 25 | == Resource requests 26 | 27 | include::home:concepts:stackable_resource_requests.adoc[] 28 | 29 | A minimal HA setup consisting of 3 ZooKeeper instances has the following https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/[resource requirements]: 30 | 31 | * `1350m` CPU request 32 | * `3900m` CPU limit 33 | * `1920m` memory request and limit 34 | * `3072Mi` persistent storage 35 | 36 | Corresponding to the values above, the operator uses the following resource defaults: 37 | 38 | [source,yaml] 39 | ---- 40 | servers: 41 | roleGroups: 42 | default: 43 | config: 44 | resources: 45 | memory: 46 | limit: '512Mi' 47 | cpu: 48 | min: '200m' 49 | max: '800m' 50 | storage: 51 | data: 52 | capacity: '1Gi' 53 | ---- 54 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/usage_guide/using_multiple_role_groups.adoc: -------------------------------------------------------------------------------- 1 | = Using multiple role groups 2 | :description: ZooKeeper uses myid for server identification. Avoid conflicts in multiple role groups by setting myidOffset for unique IDs in each StatefulSet. 3 | :ordinal-index: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#ordinal-index 4 | 5 | ZooKeeper uses a unique ID called _myid_ to identify each server in the cluster. 6 | The Stackable Operator for Apache ZooKeeper assigns the _myid_ to each Pod from the {ordinal-index}[ordinal index] given to the Pod by Kubernetes. 7 | This index is unique over the Pods in the StatefulSet of the xref:concepts:roles-and-role-groups.adoc[role group]. 8 | 9 | When using multiple role groups in a cluster, this will lead to different ZooKeeper Pods using the same _myid_. 10 | Each role group is represented by its own StatefulSet, and therefore always identified starting with `0`. 11 | 12 | In order to avoid this _myid_ conflict, a property `myidOffset` needs to be specified in each rolegroup. 13 | The `myidOffset` defaults to zero, but if specified will be added to the ordinal index of the Pod. 14 | 15 | == Example configuration 16 | 17 | Here the property is used on the second role group in a ZooKeeperCluster: 18 | 19 | [source,yaml] 20 | ---- 21 | apiVersion: zookeeper.stackable.tech/v1alpha1 22 | kind: ZookeeperCluster 23 | metadata: 24 | name: zookeeper 25 | spec: 26 | servers: 27 | roleGroups: 28 | primary: 29 | replicas: 2 30 | secondary: 31 | replicas: 1 32 | config: 33 | myidOffset: 10 # <1> 34 | ---- 35 | <1> The `myidOffset` property set to 10 for the secondary role group 36 | 37 | The `secondary` role group _myid_ starts from id `10`. 38 | The `primary` role group will start from `0`. 39 | This means, the replicas of the role group `primary` should not be scaled higher than `10` which results in `10` `primary` Pods using a _myid_ from `0` to `9`, followed by the `secondary` Pods starting at _myid_ `10`. 40 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/pages/znodes.adoc: -------------------------------------------------------------------------------- 1 | = ZNodes 2 | :description: Manage ZooKeeper ZNodes with the ZookeeperZnode resource. Each client should use a unique root ZNode to prevent conflicts. Network access to ZooKeeper is required. 3 | 4 | Apache ZooKeeper organizes all data into a hierarchical system of https://zookeeper.apache.org/doc/r3.9.3/zookeeperProgrammers.html#ch_zkDataModel[ZNodes], 5 | which act as both files (they can have data associated with them) and folders (they can contain other ZNodes) when compared to a traditional (POSIX-like) file system. 6 | 7 | In order to isolate different clients using the same ZooKeeper cluster, each client application should be assigned a unique root ZNode, which it can then organize 8 | as it sees fit. This can be thought of like a namespace for that client, and prevents clashes between different clients. 9 | 10 | The Stackable Operator for Apache ZooKeeper manages ZNodes using the _ZookeeperZnode_ resource. 11 | 12 | IMPORTANT: The Operator connects directly to ZooKeeper to manage the ZNodes inside of the ZooKeeper ensemble. This means that network access to the ZooKeeper pods is necessary. If your Kubernetes cluster restricts network acess, you need to configure a NetworkPolicy to allow the operator to connect to ZooKeeper. 13 | 14 | == Configuring ZNodes 15 | 16 | ZNodes are configured with the ZookeeperZnode CustomResource. 17 | If a ZookeeperZnode resource is created, the operator creates the respective tree in ZooKeeper. 18 | Also, if the resource in Kubernetes is deleted, so is the data in ZooKeeper. 19 | 20 | CAUTION: The operator automatically deletes the ZNode from the ZooKeeper cluster if the Kubernetes ZookeeperZnode object is deleted. 21 | Recreating the ZookeeperZnode object will not restore access to the data. 22 | 23 | Here is an example of a ZookeeperZnode: 24 | 25 | [source,yaml] 26 | ---- 27 | include::example$example-znode.yaml[] 28 | ---- 29 | <1> The name of the ZNode in ZooKeeper. It is the same as the name of the Kubernetes resource. 30 | <2> Reference to the `ZookeeperCluster` object where the ZNode should be created. 31 | <3> The namespace of the `ZookeeperCluster`. 32 | Can be omitted and defaults to the namespace of the ZNode object. 33 | 34 | When a ZNode is created, the operator creates the required tree in ZooKeeper and a xref:concepts:service_discovery.adoc[discovery ConfigMap] with a xref:discovery.adoc[] for this ZNode. This discovery ConfigMap is used by other operators to configure clients with access to the ZNode. 35 | 36 | The operator _does not_ manage the contents of the ZNode. 37 | 38 | 39 | == Creating a ZNode per dependant 40 | 41 | To ensure that a product that uses ZooKeeper is running smoothly, you should make sure that each Stacklet or product instance is operating with its own ZNode. 42 | For example, a Kafka and a Hadoop cluster should not share the same ZNode. Also no two Kafka instances should share the same ZNode. 43 | 44 | Have a look at the xref:usage_guide/isolating_clients_with_znodes.adoc[] guide for hands-on instructions on how to set up multiple ZNodes for different Stacklets. 45 | 46 | == Split responsibilities for ZooKeeper and ZNodes 47 | 48 | One reason for the design of using multiple resources to configure the ZNodes instead of specifying them inside the ZookeeperCluster itself, was to allow different people in an organization to manage them separately. 49 | 50 | The ZookeeperCluster might be under the responsibility of a cluster administrator, and access control might prevent anyone from creating or modifying the ZookeeperCluster. 51 | 52 | ZNodes however are product specific and need to be managed by product teams that do not have cluster wide administration rights. 53 | 54 | == What's next 55 | 56 | Have a look at the usage guide for ZNodes: xref:usage_guide/isolating_clients_with_znodes.adoc[] or the CRD reference for the {crd-docs}/zookeeper.stackable.tech/zookeeperznode/v1alpha1/[ZookeeperZnode {external-link-icon}^] CustomResource. 57 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/partials/nav.adoc: -------------------------------------------------------------------------------- 1 | * xref:zookeeper:getting_started/index.adoc[] 2 | ** xref:zookeeper:getting_started/installation.adoc[] 3 | ** xref:zookeeper:getting_started/first_steps.adoc[] 4 | * Concepts 5 | ** xref:zookeeper:znodes.adoc[] 6 | * xref:zookeeper:usage_guide/index.adoc[] 7 | ** xref:zookeeper:usage_guide/listenerclass.adoc[] 8 | ** xref:zookeeper:usage_guide/encryption.adoc[] 9 | ** xref:zookeeper:usage_guide/authentication.adoc[] 10 | ** xref:zookeeper:usage_guide/resource_configuration.adoc[] 11 | ** xref:zookeeper:usage_guide/monitoring.adoc[] 12 | ** xref:zookeeper:usage_guide/log_aggregation.adoc[] 13 | ** xref:zookeeper:usage_guide/using_multiple_role_groups.adoc[] 14 | ** xref:zookeeper:usage_guide/isolating_clients_with_znodes.adoc[] 15 | ** xref:zookeeper:usage_guide/overrides.adoc[] 16 | ** xref:zookeeper:usage_guide/operations/index.adoc[] 17 | *** xref:zookeeper:usage_guide/operations/cluster-operations.adoc[] 18 | *** xref:zookeeper:usage_guide/operations/pod-placement.adoc[] 19 | *** xref:zookeeper:usage_guide/operations/pod-disruptions.adoc[] 20 | *** xref:zookeeper:usage_guide/operations/graceful-shutdown.adoc[] 21 | * xref:zookeeper:reference/index.adoc[] 22 | ** xref:zookeeper:reference/crds.adoc[] 23 | *** {crd-docs}/zookeeper.stackable.tech/zookeepercluster/v1alpha1/[ZookeeperCluster {external-link-icon}^] 24 | *** {crd-docs}/zookeeper.stackable.tech/zookeeperznode/v1alpha1/[ZookeeperZnode {external-link-icon}^] 25 | ** xref:zookeeper:reference/discovery.adoc[] 26 | ** xref:zookeeper:reference/commandline-parameters.adoc[] 27 | ** xref:zookeeper:reference/environment-variables.adoc[] 28 | -------------------------------------------------------------------------------- /docs/modules/zookeeper/partials/supported-versions.adoc: -------------------------------------------------------------------------------- 1 | // The version ranges supported by Zookeeper-Operator 2 | // This is a separate file, since it is used by both the direct ZooKeeper documentation, and the overarching 3 | // Stackable Platform documentation. 4 | 5 | - 3.9.3 (LTS) 6 | -------------------------------------------------------------------------------- /docs/templating_vars.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | helm: 3 | repo_name: sdp-charts 4 | repo_url: oci.stackable.tech 5 | versions: 6 | commons: 0.0.0-dev 7 | secret: 0.0.0-dev 8 | listener: 0.0.0-dev 9 | zookeeper: 0.0.0-dev 10 | -------------------------------------------------------------------------------- /examples/simple-zookeeper-tls-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: simple-zk 6 | spec: 7 | image: 8 | productVersion: 3.9.3 9 | clusterConfig: 10 | authentication: 11 | - authenticationClass: zk-client-tls 12 | tls: 13 | serverSecretClass: tls 14 | quorumSecretClass: tls 15 | servers: 16 | roleGroups: 17 | default: 18 | replicas: 3 19 | --- 20 | apiVersion: authentication.stackable.tech/v1alpha1 21 | kind: AuthenticationClass 22 | metadata: 23 | name: zk-client-tls 24 | spec: 25 | provider: 26 | tls: 27 | clientCertSecretClass: zk-client-auth-secret 28 | --- 29 | apiVersion: secrets.stackable.tech/v1alpha1 30 | kind: SecretClass 31 | metadata: 32 | name: zk-client-auth-secret 33 | spec: 34 | backend: 35 | autoTls: 36 | ca: 37 | secret: 38 | name: secret-provisioner-tls-zk-client-ca 39 | namespace: default 40 | autoGenerate: true 41 | --- 42 | apiVersion: zookeeper.stackable.tech/v1alpha1 43 | kind: ZookeeperZnode 44 | metadata: 45 | name: simple-znode 46 | spec: 47 | clusterRef: 48 | name: simple-zk 49 | # Optional when ZookeeperZnode is in the same Namespace as the ZookeeperCluster 50 | # namespace: default 51 | -------------------------------------------------------------------------------- /nix/README.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | # Updating nix dependencies 7 | 8 | ## Run the following for an operator 9 | 10 | > [!NOTE] 11 | > We track the `master` branch of crate2nix as that is relatively up to date, but the releases are infrequent. 12 | 13 | ```shell 14 | niv update crate2nix 15 | niv update nixpkgs 16 | niv update beku.py -b X.Y.Z # Using the release tag 17 | ``` 18 | 19 | ### Test 20 | 21 | - Run make `regenerate-nix` to ensure crate2nix works 22 | - Run a smoke test to ensure beku.py works. 23 | - Run `make run-dev` to ensure nixpkgs are fine. 24 | 25 | ## Update operator-templating 26 | 27 | Do the same as above, but from `template/` 28 | -------------------------------------------------------------------------------- /nix/meta.json: -------------------------------------------------------------------------------- 1 | {"operator": {"name": "zookeeper-operator", "pretty_string": "Apache ZooKeeper", "product_string": "zookeeper", "url": "stackabletech/zookeeper-operator.git"}} 2 | -------------------------------------------------------------------------------- /nix/sources.json: -------------------------------------------------------------------------------- 1 | { 2 | "beku.py": { 3 | "branch": "0.0.10", 4 | "description": "Test suite expander for Stackable Kuttl tests.", 5 | "homepage": null, 6 | "owner": "stackabletech", 7 | "repo": "beku.py", 8 | "rev": "fc75202a38529a4ac6776dd8a5dfee278d927f58", 9 | "sha256": "152yary0p11h87yabv74jnwkghsal7lx16az0qlzrzdrs6n5v8id", 10 | "type": "tarball", 11 | "url": "https://github.com/stackabletech/beku.py/archive/fc75202a38529a4ac6776dd8a5dfee278d927f58.tar.gz", 12 | "url_template": "https://github.com///archive/.tar.gz" 13 | }, 14 | "crate2nix": { 15 | "branch": "master", 16 | "description": "nix build file generator for rust crates", 17 | "homepage": "", 18 | "owner": "kolloch", 19 | "repo": "crate2nix", 20 | "rev": "be31feae9a82c225c0fd1bdf978565dc452a483a", 21 | "sha256": "14d0ymlrwk7dynv35qcw4xn0dylfpwjmf6f8znflbk2l6fk23l12", 22 | "type": "tarball", 23 | "url": "https://github.com/kolloch/crate2nix/archive/be31feae9a82c225c0fd1bdf978565dc452a483a.tar.gz", 24 | "url_template": "https://github.com///archive/.tar.gz" 25 | }, 26 | "nixpkgs": { 27 | "branch": "nixpkgs-unstable", 28 | "description": "Nix Packages collection", 29 | "homepage": "", 30 | "owner": "NixOS", 31 | "repo": "nixpkgs", 32 | "rev": "b1bebd0fe266bbd1820019612ead889e96a8fa2d", 33 | "sha256": "0fl2dji5whjydbxby9b7kqyqx9m4k44p72x1q28kfnx5m67nyqij", 34 | "type": "tarball", 35 | "url": "https://github.com/NixOS/nixpkgs/archive/b1bebd0fe266bbd1820019612ead889e96a8fa2d.tar.gz", 36 | "url_template": "https://github.com///archive/.tar.gz" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "local>stackabletech/.github:renovate-config" 5 | ], 6 | "ignorePaths": [".github/workflows/build.yml", ".github/workflows/general_daily_security.yml", ".github/workflows/integration-test.yml", ".github/workflows/pr_pre-commit.yaml"] 7 | } 8 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT, this file is generated by operator-templating 2 | [toolchain] 3 | channel = "1.85.0" 4 | profile = "default" 5 | -------------------------------------------------------------------------------- /rust/operator-binary/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "stackable-zookeeper-operator" 3 | description = "Stackable Operator for Apache ZooKeeper" 4 | version.workspace = true 5 | authors.workspace = true 6 | license.workspace = true 7 | edition.workspace = true 8 | repository.workspace = true 9 | publish = false 10 | build = "build.rs" 11 | 12 | [dependencies] 13 | product-config.workspace = true 14 | stackable-operator.workspace = true 15 | 16 | anyhow.workspace = true 17 | clap.workspace = true 18 | const_format.workspace = true 19 | fnv.workspace = true 20 | futures.workspace = true 21 | indoc.workspace = true 22 | pin-project.workspace = true 23 | semver.workspace = true 24 | serde.workspace = true 25 | serde_json.workspace = true 26 | snafu.workspace = true 27 | strum.workspace = true 28 | tokio-zookeeper.workspace = true 29 | tokio.workspace = true 30 | tracing.workspace = true 31 | 32 | [dev-dependencies] 33 | serde_yaml.workspace = true 34 | 35 | [build-dependencies] 36 | built.workspace = true 37 | -------------------------------------------------------------------------------- /rust/operator-binary/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | built::write_built_file().unwrap(); 3 | } 4 | -------------------------------------------------------------------------------- /rust/operator-binary/src/command.rs: -------------------------------------------------------------------------------- 1 | use crate::crd::{STACKABLE_CONFIG_DIR, STACKABLE_DATA_DIR, STACKABLE_RW_CONFIG_DIR}; 2 | 3 | pub fn create_init_container_command_args() -> Vec { 4 | vec![ 5 | // copy config files to a writeable empty folder in order to set key and 6 | // truststore passwords in the init container via script 7 | format!( 8 | "echo copying {conf} to {rw_conf}", 9 | conf = STACKABLE_CONFIG_DIR, 10 | rw_conf = STACKABLE_RW_CONFIG_DIR 11 | ), 12 | format!( 13 | "cp -RL {conf}/* {rw_conf}", 14 | conf = STACKABLE_CONFIG_DIR, 15 | rw_conf = STACKABLE_RW_CONFIG_DIR 16 | ), 17 | format!( 18 | "expr $MYID_OFFSET + $(echo $POD_NAME | sed 's/.*-//') > {dir}/myid", 19 | dir = STACKABLE_DATA_DIR 20 | ), 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /rust/operator-binary/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod jvm; 2 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/affinity.rs: -------------------------------------------------------------------------------- 1 | use stackable_operator::{ 2 | commons::affinity::{StackableAffinityFragment, affinity_between_role_pods}, 3 | k8s_openapi::api::core::v1::PodAntiAffinity, 4 | }; 5 | 6 | use crate::crd::{APP_NAME, ZookeeperRole}; 7 | 8 | pub fn get_affinity(cluster_name: &str, role: &ZookeeperRole) -> StackableAffinityFragment { 9 | let affinity_between_role_pods = 10 | affinity_between_role_pods(APP_NAME, cluster_name, &role.to_string(), 70); 11 | 12 | StackableAffinityFragment { 13 | pod_affinity: None, 14 | pod_anti_affinity: Some(PodAntiAffinity { 15 | preferred_during_scheduling_ignored_during_execution: Some(vec![ 16 | affinity_between_role_pods, 17 | ]), 18 | required_during_scheduling_ignored_during_execution: None, 19 | }), 20 | node_affinity: None, 21 | node_selector: None, 22 | } 23 | } 24 | 25 | #[cfg(test)] 26 | mod tests { 27 | 28 | use std::collections::BTreeMap; 29 | 30 | use stackable_operator::{ 31 | commons::affinity::StackableAffinity, 32 | k8s_openapi::{ 33 | api::core::v1::{PodAffinityTerm, PodAntiAffinity, WeightedPodAffinityTerm}, 34 | apimachinery::pkg::apis::meta::v1::LabelSelector, 35 | }, 36 | kube::runtime::reflector::ObjectRef, 37 | role_utils::RoleGroupRef, 38 | }; 39 | 40 | use crate::crd::{affinity::ZookeeperRole, v1alpha1}; 41 | 42 | #[test] 43 | fn test_affinity_defaults() { 44 | let input = r#" 45 | apiVersion: zookeeper.stackable.tech/v1alpha1 46 | kind: ZookeeperCluster 47 | metadata: 48 | name: simple-zk 49 | spec: 50 | image: 51 | productVersion: 3.9.3 52 | clusterConfig: 53 | authentication: 54 | - authenticationClass: zk-client-tls 55 | tls: 56 | serverSecretClass: tls 57 | quorumSecretClass: tls 58 | servers: 59 | roleGroups: 60 | default: 61 | replicas: 3 62 | "#; 63 | let zk: v1alpha1::ZookeeperCluster = 64 | serde_yaml::from_str(input).expect("illegal test input"); 65 | 66 | let rolegroup_ref = RoleGroupRef { 67 | cluster: ObjectRef::from_obj(&zk), 68 | role: ZookeeperRole::Server.to_string(), 69 | role_group: "default".to_string(), 70 | }; 71 | 72 | let expected: StackableAffinity = StackableAffinity { 73 | pod_affinity: None, 74 | pod_anti_affinity: Some(PodAntiAffinity { 75 | required_during_scheduling_ignored_during_execution: None, 76 | preferred_during_scheduling_ignored_during_execution: Some(vec![ 77 | WeightedPodAffinityTerm { 78 | pod_affinity_term: PodAffinityTerm { 79 | label_selector: Some(LabelSelector { 80 | match_expressions: None, 81 | match_labels: Some(BTreeMap::from([ 82 | ( 83 | "app.kubernetes.io/name".to_string(), 84 | "zookeeper".to_string(), 85 | ), 86 | ( 87 | "app.kubernetes.io/instance".to_string(), 88 | "simple-zk".to_string(), 89 | ), 90 | ( 91 | "app.kubernetes.io/component".to_string(), 92 | "server".to_string(), 93 | ), 94 | ])), 95 | }), 96 | namespace_selector: None, 97 | namespaces: None, 98 | topology_key: "kubernetes.io/hostname".to_string(), 99 | // NOTE (@Techassi): Both these fields were added in 100 | // Kubernetes 1.30, and cannot be used for now. 101 | match_label_keys: None, 102 | mismatch_label_keys: None, 103 | }, 104 | weight: 70, 105 | }, 106 | ]), 107 | }), 108 | 109 | node_affinity: None, 110 | node_selector: None, 111 | }; 112 | 113 | let affinity = zk 114 | .merged_config(&ZookeeperRole::Server, &rolegroup_ref) 115 | .unwrap() 116 | .affinity; 117 | 118 | assert_eq!(affinity, expected); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/tls.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::{ 3 | schemars::{self, JsonSchema}, 4 | versioned::versioned, 5 | }; 6 | 7 | const TLS_DEFAULT_SECRET_CLASS: &str = "tls"; 8 | 9 | #[versioned(version(name = "v1alpha1"))] 10 | pub mod versioned { 11 | #[derive(Clone, Deserialize, Debug, Eq, JsonSchema, PartialEq, Serialize)] 12 | #[serde(rename_all = "camelCase")] 13 | pub struct ZookeeperTls { 14 | /// The [SecretClass](DOCS_BASE_URL_PLACEHOLDER/secret-operator/secretclass) to use for 15 | /// internal quorum communication. Use mutual verification between Zookeeper Nodes 16 | /// (mandatory). This setting controls: 17 | /// - Which cert the servers should use to authenticate themselves against other servers 18 | /// - Which ca.crt to use when validating the other server 19 | /// 20 | /// Defaults to `tls` 21 | #[serde(default = "quorum_tls_default")] 22 | pub quorum_secret_class: String, 23 | 24 | /// The [SecretClass](DOCS_BASE_URL_PLACEHOLDER/secret-operator/secretclass) to use for 25 | /// client connections. This setting controls: 26 | /// - If TLS encryption is used at all 27 | /// - Which cert the servers should use to authenticate themselves against the client 28 | /// 29 | /// Defaults to `tls`. 30 | #[serde( 31 | default = "server_tls_default", 32 | skip_serializing_if = "Option::is_none" 33 | )] 34 | pub server_secret_class: Option, 35 | } 36 | } 37 | 38 | /// Default TLS settings. Internal and server communication default to "tls" secret class. 39 | pub fn default_zookeeper_tls() -> Option { 40 | Some(v1alpha1::ZookeeperTls { 41 | quorum_secret_class: quorum_tls_default(), 42 | server_secret_class: server_tls_default(), 43 | }) 44 | } 45 | 46 | /// Helper methods to provide defaults in the CRDs and tests 47 | pub fn server_tls_default() -> Option { 48 | Some(TLS_DEFAULT_SECRET_CLASS.into()) 49 | } 50 | 51 | /// Helper methods to provide defaults in the CRDs and tests 52 | pub fn quorum_tls_default() -> String { 53 | TLS_DEFAULT_SECRET_CLASS.into() 54 | } 55 | -------------------------------------------------------------------------------- /rust/operator-binary/src/operations/graceful_shutdown.rs: -------------------------------------------------------------------------------- 1 | use snafu::{ResultExt, Snafu}; 2 | use stackable_operator::builder::pod::PodBuilder; 3 | 4 | use crate::crd::v1alpha1; 5 | 6 | #[derive(Debug, Snafu)] 7 | pub enum Error { 8 | #[snafu(display("Failed to set terminationGracePeriod"))] 9 | SetTerminationGracePeriod { 10 | source: stackable_operator::builder::pod::Error, 11 | }, 12 | } 13 | 14 | pub fn add_graceful_shutdown_config( 15 | merged_config: &v1alpha1::ZookeeperConfig, 16 | pod_builder: &mut PodBuilder, 17 | ) -> Result<(), Error> { 18 | // This must be always set by the merge mechanism, as we provide a default value, 19 | // users can not disable graceful shutdown. 20 | if let Some(graceful_shutdown_timeout) = merged_config.graceful_shutdown_timeout { 21 | pod_builder 22 | .termination_grace_period(&graceful_shutdown_timeout) 23 | .context(SetTerminationGracePeriodSnafu)?; 24 | } 25 | 26 | Ok(()) 27 | } 28 | -------------------------------------------------------------------------------- /rust/operator-binary/src/operations/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod graceful_shutdown; 2 | pub mod pdb; 3 | -------------------------------------------------------------------------------- /rust/operator-binary/src/operations/pdb.rs: -------------------------------------------------------------------------------- 1 | use snafu::{ResultExt, Snafu}; 2 | use stackable_operator::{ 3 | builder::pdb::PodDisruptionBudgetBuilder, client::Client, cluster_resources::ClusterResources, 4 | commons::pdb::PdbConfig, kube::ResourceExt, 5 | }; 6 | 7 | use crate::{ 8 | crd::{APP_NAME, OPERATOR_NAME, ZookeeperRole, v1alpha1}, 9 | zk_controller::ZK_CONTROLLER_NAME, 10 | }; 11 | 12 | #[derive(Snafu, Debug)] 13 | pub enum Error { 14 | #[snafu(display("Cannot create PodDisruptionBudget for role [{role}]"))] 15 | CreatePdb { 16 | source: stackable_operator::builder::pdb::Error, 17 | role: String, 18 | }, 19 | #[snafu(display("Cannot apply PodDisruptionBudget [{name}]"))] 20 | ApplyPdb { 21 | source: stackable_operator::cluster_resources::Error, 22 | name: String, 23 | }, 24 | } 25 | 26 | pub async fn add_pdbs( 27 | pdb: &PdbConfig, 28 | zookeeper: &v1alpha1::ZookeeperCluster, 29 | role: &ZookeeperRole, 30 | client: &Client, 31 | cluster_resources: &mut ClusterResources, 32 | ) -> Result<(), Error> { 33 | if !pdb.enabled { 34 | return Ok(()); 35 | } 36 | let max_unavailable = pdb.max_unavailable.unwrap_or(match role { 37 | ZookeeperRole::Server => max_unavailable_servers(), 38 | }); 39 | let pdb = PodDisruptionBudgetBuilder::new_with_role( 40 | zookeeper, 41 | APP_NAME, 42 | &role.to_string(), 43 | OPERATOR_NAME, 44 | ZK_CONTROLLER_NAME, 45 | ) 46 | .with_context(|_| CreatePdbSnafu { 47 | role: role.to_string(), 48 | })? 49 | .with_max_unavailable(max_unavailable) 50 | .build(); 51 | let pdb_name = pdb.name_any(); 52 | cluster_resources 53 | .add(client, pdb) 54 | .await 55 | .with_context(|_| ApplyPdbSnafu { name: pdb_name })?; 56 | 57 | Ok(()) 58 | } 59 | 60 | fn max_unavailable_servers() -> u16 { 61 | 1 62 | } 63 | -------------------------------------------------------------------------------- /rust/operator-binary/src/product_logging.rs: -------------------------------------------------------------------------------- 1 | use snafu::{ResultExt, Snafu}; 2 | use stackable_operator::{ 3 | builder::configmap::ConfigMapBuilder, 4 | memory::BinaryMultiple, 5 | product_logging::{ 6 | self, 7 | spec::{ContainerLogConfig, ContainerLogConfigChoice}, 8 | }, 9 | role_utils::RoleGroupRef, 10 | }; 11 | 12 | use crate::crd::{ 13 | LOG4J_CONFIG_FILE, LOGBACK_CONFIG_FILE, LoggingFramework, MAX_ZK_LOG_FILES_SIZE, 14 | STACKABLE_LOG_DIR, ZOOKEEPER_LOG_FILE, ZookeeperRole, v1alpha1, 15 | }; 16 | 17 | #[derive(Snafu, Debug)] 18 | pub enum Error { 19 | #[snafu(display("object has no namespace"))] 20 | ObjectHasNoNamespace, 21 | 22 | #[snafu(display("failed to retrieve the ConfigMap {cm_name}"))] 23 | ConfigMapNotFound { 24 | source: stackable_operator::client::Error, 25 | cm_name: String, 26 | }, 27 | 28 | #[snafu(display("failed to retrieve the entry {entry} for ConfigMap {cm_name}"))] 29 | MissingConfigMapEntry { 30 | entry: &'static str, 31 | cm_name: String, 32 | }, 33 | 34 | #[snafu(display("crd validation failure"))] 35 | CrdValidationFailure { source: crate::crd::Error }, 36 | } 37 | 38 | type Result = std::result::Result; 39 | 40 | const CONSOLE_CONVERSION_PATTERN: &str = "%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n"; 41 | 42 | /// Extend the role group ConfigMap with logging and Vector configurations 43 | pub fn extend_role_group_config_map( 44 | zk: &v1alpha1::ZookeeperCluster, 45 | role: ZookeeperRole, 46 | rolegroup: &RoleGroupRef, 47 | cm_builder: &mut ConfigMapBuilder, 48 | ) -> Result<()> { 49 | let logging = zk 50 | .logging(&role, rolegroup) 51 | .context(CrdValidationFailureSnafu)?; 52 | 53 | if let Some(ContainerLogConfig { 54 | choice: Some(ContainerLogConfigChoice::Automatic(log_config)), 55 | }) = logging.containers.get(&v1alpha1::Container::Zookeeper) 56 | { 57 | match zk.logging_framework() { 58 | LoggingFramework::LOG4J => { 59 | cm_builder.add_data( 60 | LOG4J_CONFIG_FILE, 61 | product_logging::framework::create_log4j_config( 62 | &format!("{STACKABLE_LOG_DIR}/zookeeper"), 63 | ZOOKEEPER_LOG_FILE, 64 | MAX_ZK_LOG_FILES_SIZE 65 | .scale_to(BinaryMultiple::Mebi) 66 | .floor() 67 | .value as u32, 68 | CONSOLE_CONVERSION_PATTERN, 69 | log_config, 70 | ), 71 | ); 72 | } 73 | LoggingFramework::LOGBACK => { 74 | cm_builder.add_data( 75 | LOGBACK_CONFIG_FILE, 76 | product_logging::framework::create_logback_config( 77 | &format!("{STACKABLE_LOG_DIR}/zookeeper"), 78 | ZOOKEEPER_LOG_FILE, 79 | MAX_ZK_LOG_FILES_SIZE 80 | .scale_to(BinaryMultiple::Mebi) 81 | .floor() 82 | .value as u32, 83 | CONSOLE_CONVERSION_PATTERN, 84 | log_config, 85 | None, 86 | ), 87 | ); 88 | } 89 | } 90 | } 91 | 92 | let vector_log_config = if let Some(ContainerLogConfig { 93 | choice: Some(ContainerLogConfigChoice::Automatic(log_config)), 94 | }) = logging.containers.get(&v1alpha1::Container::Vector) 95 | { 96 | Some(log_config) 97 | } else { 98 | None 99 | }; 100 | 101 | if logging.enable_vector_agent { 102 | cm_builder.add_data( 103 | product_logging::framework::VECTOR_CONFIG_FILE, 104 | product_logging::framework::create_vector_config(rolegroup, vector_log_config), 105 | ); 106 | } 107 | 108 | Ok(()) 109 | } 110 | -------------------------------------------------------------------------------- /rust/operator-binary/src/utils.rs: -------------------------------------------------------------------------------- 1 | use stackable_operator::kvp::ObjectLabels; 2 | 3 | use crate::{APP_NAME, OPERATOR_NAME}; 4 | 5 | /// Creates recommended `ObjectLabels` to be used in deployed resources 6 | pub fn build_recommended_labels<'a, T>( 7 | owner: &'a T, 8 | controller_name: &'a str, 9 | app_version: &'a str, 10 | role: &'a str, 11 | role_group: &'a str, 12 | ) -> ObjectLabels<'a, T> { 13 | ObjectLabels { 14 | owner, 15 | app_name: APP_NAME, 16 | app_version, 17 | operator_name: OPERATOR_NAME, 18 | controller_name, 19 | role, 20 | role_group, 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # This file includes unstable features, so you need to run "cargo +nightly fmt" to format your code. 2 | # It's also ok to use the stable toolchain by simple running "cargo fmt", but using the nigthly formatter is prefered. 3 | 4 | # https://doc.rust-lang.org/nightly/edition-guide/rust-2024/rustfmt-style-edition.html 5 | style_edition = "2024" 6 | imports_granularity = "Crate" 7 | group_imports = "StdExternalCrate" 8 | reorder_impl_items = true 9 | use_field_init_shorthand = true 10 | -------------------------------------------------------------------------------- /scripts/docs_templating.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Reads a file with variables to insert into templates, and templates all .*.j2 files 5 | # in the 'docs' directory. 6 | # 7 | # dependencies 8 | # pip install jinja2-cli 9 | 10 | docs_dir="$(dirname "$0")/../docs" 11 | templating_vars_file="$docs_dir/templating_vars.yaml" 12 | 13 | # Check if files need templating 14 | if [[ -z $(find "$docs_dir" -name '*.j2') ]]; 15 | then 16 | echo "No files need templating, exiting." 17 | exit 18 | fi 19 | 20 | # Check if jinja2 is there 21 | if ! command -v jinja2 &> /dev/null 22 | then 23 | echo "jinja2 could not be found. Use 'pip install jinja2-cli' to install it." 24 | exit 1 25 | fi 26 | 27 | # Check if templating vars file exists 28 | if [[ ! -f "$templating_vars_file" ]]; 29 | then 30 | echo "$templating_vars_file does not exist, cannot start templating." 31 | fi 32 | 33 | find "$docs_dir" -name '*.j2' | 34 | while read -r file 35 | do 36 | new_file_name=${file%.j2} # Remove .j2 suffix 37 | echo "templating $new_file_name" 38 | jinja2 "$file" "$templating_vars_file" -o "$new_file_name" 39 | done 40 | 41 | echo "done" 42 | -------------------------------------------------------------------------------- /scripts/ensure_one_trailing_newline.py: -------------------------------------------------------------------------------- 1 | """ 2 | Given the location of a file, trims all trailing blank lines and 3 | places a single one. Used as post-processing step for README rendering. 4 | """ 5 | 6 | import re 7 | import unittest 8 | 9 | BLANK_LINE_REGEX_PATTERN = r"^\s*$" 10 | 11 | 12 | def has_trailing_newline(line): 13 | return line[-1:] == "\n" 14 | 15 | 16 | def process_lines(lines): 17 | trim_count = 0 18 | # trim trailing blank lines 19 | for line in lines[::-1]: 20 | if re.match(BLANK_LINE_REGEX_PATTERN, line): 21 | trim_count += 1 22 | else: 23 | break 24 | 25 | cutoff_index = len(lines) - trim_count 26 | new_lines = lines[:cutoff_index] 27 | 28 | # maybe add a newline character to the last sensible line 29 | if not has_trailing_newline(new_lines[-1]): 30 | new_lines[-1] = new_lines[-1] + "\n" 31 | 32 | # add a trailing blank line without newline 33 | new_lines.append("") 34 | return new_lines 35 | 36 | 37 | class TestCoreMethods(unittest.TestCase): 38 | def test_trailing_new_line(self): 39 | self.assertTrue(has_trailing_newline("something\n")) 40 | self.assertTrue(has_trailing_newline("\n")) 41 | self.assertFalse(has_trailing_newline("nope")) 42 | 43 | def test_trailing_real_line(self): 44 | lines = ["bla\n", "useful"] 45 | processed_lines = process_lines(lines) 46 | self.assertEqual(len(processed_lines), 3) 47 | self.assertTrue(has_trailing_newline(processed_lines[0])) 48 | self.assertTrue(has_trailing_newline(processed_lines[1])) 49 | self.assertFalse(has_trailing_newline(processed_lines[2])) 50 | 51 | def test_lots_of_empties(self): 52 | lines = ["bla\n", "\n", "\n", "\n", "\n"] 53 | processed_lines = process_lines(lines) 54 | self.assertEqual(len(processed_lines), 2) 55 | self.assertEqual(processed_lines[-1], "") 56 | 57 | def test_one_trailing_new_line(self): 58 | lines = ["bla\n", "\n"] 59 | processed_lines = process_lines(lines) 60 | self.assertEqual(len(processed_lines), 2) 61 | self.assertEqual(processed_lines[-1], "") 62 | 63 | def test_one_trailing_blank_line(self): 64 | lines = ["bla\n", ""] 65 | processed_lines = process_lines(lines) 66 | self.assertEqual(len(processed_lines), 2) 67 | self.assertEqual(processed_lines[-1], "") 68 | 69 | 70 | if __name__ == "__main__": 71 | # to run tests for this script: 72 | # python3 -m unittest ensure_one_trailing_newline.py 73 | 74 | import sys 75 | 76 | if len(sys.argv) != 2: 77 | print("Usage: {} filename_to_trim".format(sys.argv[0])) 78 | exit(1) 79 | 80 | file_name = sys.argv[1] 81 | 82 | lines = [] 83 | with open(file_name, "r") as f: 84 | lines = f.readlines() 85 | 86 | lines = process_lines(lines) 87 | 88 | with open(file_name, "w") as f: 89 | f.write("".join(lines)) 90 | -------------------------------------------------------------------------------- /scripts/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script reads a Helm chart from deploy/helm/zookeeper-operator and 3 | # generates manifest files into deploy/manifestss 4 | set -e 5 | 6 | tmp=$(mktemp -d ./manifests-XXXXX) 7 | 8 | helm template --output-dir "$tmp" \ 9 | --include-crds \ 10 | --name-template zookeeper-operator \ 11 | deploy/helm/zookeeper-operator 12 | 13 | for file in "$tmp"/zookeeper-operator/*/*; do 14 | yq eval -i 'del(.. | select(has("app.kubernetes.io/managed-by")) | ."app.kubernetes.io/managed-by")' /dev/stdin < "$file" 15 | yq eval -i 'del(.. | select(has("helm.sh/chart")) | ."helm.sh/chart")' /dev/stdin < "$file" 16 | sed -i '/# Source: .*/d' "$file" 17 | done 18 | 19 | cp -r "$tmp"/zookeeper-operator/*/* deploy/manifests/ 20 | 21 | rm -rf "$tmp" 22 | -------------------------------------------------------------------------------- /scripts/render_readme.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Check if jinja2 is there 5 | if ! command -v jinja2 &> /dev/null 6 | then 7 | echo "jinja2 could not be found. Use 'pip install jinja2-cli' to install it." 8 | exit 1 9 | fi 10 | 11 | SCRIPT_DIR=$(dirname "$0") 12 | cd "$SCRIPT_DIR/../.readme" 13 | jinja2 README.md.j2 -o ../README.md 14 | cd .. 15 | 16 | python3 scripts/ensure_one_trailing_newline.py README.md 17 | -------------------------------------------------------------------------------- /scripts/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./scripts/run-tests "$@" 4 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let 2 | self = import ./. {}; 3 | inherit (self) sources pkgs meta; 4 | 5 | beku = pkgs.callPackage (sources."beku.py" + "/beku.nix") {}; 6 | cargoDependencySetOfCrate = crate: [ crate ] ++ pkgs.lib.concatMap cargoDependencySetOfCrate (crate.dependencies ++ crate.buildDependencies); 7 | cargoDependencySet = pkgs.lib.unique (pkgs.lib.flatten (pkgs.lib.mapAttrsToList (crateName: crate: cargoDependencySetOfCrate crate.build) self.cargo.workspaceMembers)); 8 | in pkgs.mkShell rec { 9 | name = meta.operator.name; 10 | 11 | packages = with pkgs; [ 12 | ## cargo et-al 13 | rustup # this breaks pkg-config if it is in the nativeBuildInputs 14 | cargo-udeps 15 | 16 | ## Extra dependencies for use in a pure env (nix-shell --pure) 17 | ## These are mosuly useful for maintainers of this shell.nix 18 | ## to ensure all the dependencies are caught. 19 | # cacert 20 | # vim nvim nano 21 | ]; 22 | 23 | # derivation runtime dependencies 24 | buildInputs = pkgs.lib.concatMap (crate: crate.buildInputs) cargoDependencySet; 25 | 26 | # build time dependencies 27 | nativeBuildInputs = pkgs.lib.concatMap (crate: crate.nativeBuildInputs) cargoDependencySet ++ (with pkgs; [ 28 | beku 29 | docker 30 | gettext # for the proper envsubst 31 | git 32 | jq 33 | kind 34 | kubectl 35 | kubernetes-helm 36 | kuttl 37 | nix # this is implied, but needed in the pure env 38 | # tilt already defined in default.nix 39 | which 40 | yq-go 41 | ]); 42 | 43 | LIBCLANG_PATH = "${pkgs.libclang.lib}/lib"; 44 | BINDGEN_EXTRA_CLANG_ARGS = "-I${pkgs.glibc.dev}/include -I${pkgs.clang}/resource-root/include"; 45 | } 46 | -------------------------------------------------------------------------------- /tests/README-templating.md: -------------------------------------------------------------------------------- 1 | # Test Scenario Templating 2 | 3 | ## Introduction 4 | 5 | The tests in this directory are designed to be expanded into multiple test scenarios based on test dimensions that can be defined in a dimensions file. 6 | 7 | ## Defining Test Dimensions 8 | 9 | The dimensions file currently has to be named `test-definition.yaml` and reside in the same directory as the `kuttl-test.yaml.jinja2` file. 10 | 11 | An example of a minimal folder structure will be given further down in this file. 12 | 13 | An example of the content for the test definition file is shown here: 14 | 15 | ````yaml 16 | dimensions: 17 | - name: spark 18 | values: 19 | - 3.2.1 20 | - 3.2.2 21 | - 3.2.3 22 | - name: hadoop 23 | values: 24 | - 3.1.0 25 | - 3.2.0 26 | - name: aws 27 | - abc 28 | - xyz 29 | tests: 30 | - name: spark-pi-public-s3 31 | dimensions: 32 | - spark 33 | - hadoop 34 | ```` 35 | 36 | This file defines three dimensions for this test to be considered. 37 | It also defines one test case named _spark-pi-public-s3_ and the dimensions that this test case should use. 38 | In this example the test case uses only two of the three dimensions defined, so a run of this test case would be expanded into the following test structure: 39 | 40 | ````text 41 | └── spark-pi-public-s3 42 | ├── spark-3.2.1_hadoop-3.1.0 43 | ├── spark-3.2.1_hadoop-3.2.0 44 | ├── spark-3.2.2_hadoop-3.1.0 45 | ├── spark-3.2.2_hadoop-3.2.0 46 | ├── spark-3.2.3_hadoop-3.1.0 47 | └── spark-3.2.3_hadoop-3.2.0 48 | ```` 49 | 50 | The name of a test case defined under `tests` in this file has to refer back to a directory in the `templates/kuttl` directory, which will be used to create the test scenarios. 51 | 52 | Given the example of a test-definition.yaml shown above, the following folder structure would create the test scenarios shown above. 53 | 54 | ````text 55 | tests 56 | ├── kuttl-test.yaml.j2 57 | ├── templates 58 | │ └── kuttl 59 | │ └── spark-pi-public-s3 60 | └── test-definition.yaml 61 | ```` 62 | 63 | The `kuttl-test.yaml.jinja2` cannot currently be edited, as it comes from the operator templating and any changes would be overwritten again. 64 | This should be fairly easy to solve and we can look at this as soon as it becomes necessary. 65 | 66 | ## Using 67 | 68 | ### Requirements 69 | 70 | To run tests locally you need the following things installed: 71 | 72 | - python3 (version >= 3.9) 73 | - pyyaml library installed 74 | - jq 75 | 76 | ### Running 77 | 78 | To run tests please execute the following command from the gitroot of the operator repository: 79 | 80 | `scripts/run_tests.sh --parallel 2` 81 | 82 | This will expand the test templates into all defined test scenarios and execute kuttl to test these scenarios. Any arguments are passed on to `kuttl`. 83 | -------------------------------------------------------------------------------- /tests/infrastructure.yaml: -------------------------------------------------------------------------------- 1 | instance-size: medium 2 | disk: 100 3 | nodes: 8 4 | parallelism: 1 5 | -------------------------------------------------------------------------------- /tests/kuttl-test.yaml.jinja2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestSuite 4 | testDirs: 5 | {% for testcase in testinput.tests %} 6 | - ./tests/{{ testcase.name }} 7 | {% endfor %} 8 | 9 | startKIND: false 10 | suppress: ["events"] 11 | parallel: 2 12 | 13 | # The timeout (in seconds) is used when namespaces are created or 14 | # deleted, and, if not overridden, in TestSteps, TestAsserts, and 15 | # Commands. If not set, the timeout is 30 seconds by default. 16 | # 17 | # The deletion of a namespace can take a while until all resources, 18 | # especially PersistentVolumeClaims, are gracefully shut down. If the 19 | # timeout is reached in the meantime, even a successful test case is 20 | # considered a failure. 21 | # 22 | # For instance, the termination grace period of the Vector aggregator in 23 | # the logging tests is set to 60 seconds. If there are logs entries 24 | # which could not be forwarded yet to the external aggregator defined in 25 | # the VECTOR_AGGREGATOR environment variable, then the test aggregator 26 | # uses this period of time by trying to forward the events. In this 27 | # case, deleting a namespace with several Pods takes about 90 seconds. 28 | timeout: 300 29 | -------------------------------------------------------------------------------- /tests/release.yaml: -------------------------------------------------------------------------------- 1 | # Contains all operators required to run the test suite. 2 | --- 3 | releases: 4 | # Do not change the name of the release as it's referenced from run-tests 5 | tests: 6 | releaseDate: 1970-01-01 7 | description: Integration test 8 | products: 9 | commons: 10 | operatorVersion: 0.0.0-dev 11 | secret: 12 | operatorVersion: 0.0.0-dev 13 | listener: 14 | operatorVersion: 0.0.0-dev 15 | zookeeper: 16 | operatorVersion: 0.0.0-dev 17 | -------------------------------------------------------------------------------- /tests/templates/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/zookeeper-operator/a0493366550a8f6b6c41980950d10baece58597c/tests/templates/.gitkeep -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/05-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: vector-aggregator-discovery 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/05-install-vector-aggregator-discovery-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/10-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=available zookeeperclusters.zookeeper.stackable.tech/test-zk --timeout 601s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: test-zk-server-default 12 | spec: 13 | template: 14 | spec: 15 | containers: 16 | - name: zookeeper 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | - name: vector 19 | {% endif %} 20 | status: 21 | readyReplicas: 1 22 | replicas: 1 23 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/10-install-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | --- 11 | apiVersion: zookeeper.stackable.tech/v1alpha1 12 | kind: ZookeeperCluster 13 | metadata: 14 | name: test-zk 15 | spec: 16 | image: 17 | {% if test_scenario['values']['zookeeper-latest'].find(",") > 0 %} 18 | custom: "{{ test_scenario['values']['zookeeper-latest'].split(',')[1] }}" 19 | productVersion: "{{ test_scenario['values']['zookeeper-latest'].split(',')[0] }}" 20 | {% else %} 21 | productVersion: "{{ test_scenario['values']['zookeeper-latest'] }}" 22 | {% endif %} 23 | pullPolicy: IfNotPresent 24 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 25 | clusterConfig: 26 | vectorAggregatorConfigMapName: vector-aggregator-discovery 27 | {% endif %} 28 | servers: 29 | config: 30 | logging: 31 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 32 | roleGroups: 33 | default: 34 | replicas: 1 35 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/20-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=stopped zookeeperclusters.zookeeper.stackable.tech/test-zk --timeout 301s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: test-zk-server-default 12 | spec: 13 | template: 14 | spec: 15 | containers: 16 | - name: zookeeper 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | - name: vector 19 | {% endif %} 20 | status: 21 | availableReplicas: 0 22 | replicas: 0 23 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/20-stop-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: test-zk 6 | spec: 7 | image: 8 | {% if test_scenario['values']['zookeeper-latest'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['zookeeper-latest'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['zookeeper-latest'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['zookeeper-latest'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 16 | clusterConfig: 17 | logging: 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | clusterOperation: 21 | stopped: true 22 | servers: 23 | config: 24 | logging: 25 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 26 | roleGroups: 27 | default: 28 | replicas: 1 29 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/30-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=reconciliationPaused zookeeperclusters.zookeeper.stackable.tech/test-zk --timeout 301s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: test-zk-server-default 12 | spec: 13 | template: 14 | spec: 15 | containers: 16 | - name: zookeeper 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | - name: vector 19 | {% endif %} 20 | status: 21 | availableReplicas: 0 22 | replicas: 0 23 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/30-pause-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: test-zk 6 | spec: 7 | image: 8 | {% if test_scenario['values']['zookeeper-latest'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['zookeeper-latest'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['zookeeper-latest'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['zookeeper-latest'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 16 | clusterConfig: 17 | logging: 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | clusterOperation: 21 | stopped: false 22 | reconciliationPaused: true 23 | servers: 24 | config: 25 | logging: 26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 27 | roleGroups: 28 | default: 29 | replicas: 1 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/40-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=available zookeeperclusters.zookeeper.stackable.tech/test-zk --timeout 301s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: test-zk-server-default 12 | spec: 13 | template: 14 | spec: 15 | containers: 16 | - name: zookeeper 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | - name: vector 19 | {% endif %} 20 | status: 21 | readyReplicas: 1 22 | replicas: 1 23 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/40-restart-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: test-zk 6 | spec: 7 | image: 8 | {% if test_scenario['values']['zookeeper-latest'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['zookeeper-latest'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['zookeeper-latest'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['zookeeper-latest'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 16 | clusterConfig: 17 | logging: 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | clusterOperation: 21 | stopped: false 22 | reconciliationPaused: false 23 | servers: 24 | config: 25 | logging: 26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 27 | roleGroups: 28 | default: 29 | replicas: 1 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/delete-rolegroup/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/delete-rolegroup/05-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: test-zk-server-primary 10 | status: 11 | readyReplicas: 3 12 | replicas: 3 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: test-zk-server-secondary 18 | status: 19 | readyReplicas: 2 20 | replicas: 2 21 | -------------------------------------------------------------------------------- /tests/templates/kuttl/delete-rolegroup/05-install-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | --- 11 | apiVersion: zookeeper.stackable.tech/v1alpha1 12 | kind: ZookeeperCluster 13 | metadata: 14 | name: test-zk 15 | spec: 16 | image: 17 | {% if test_scenario['values']['zookeeper'].find(",") > 0 %} 18 | custom: "{{ test_scenario['values']['zookeeper'].split(',')[1] }}" 19 | productVersion: "{{ test_scenario['values']['zookeeper'].split(',')[0] }}" 20 | {% else %} 21 | productVersion: "{{ test_scenario['values']['zookeeper'] }}" 22 | {% endif %} 23 | pullPolicy: IfNotPresent 24 | clusterConfig: 25 | tls: 26 | serverSecretClass: null 27 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 28 | vectorAggregatorConfigMapName: vector-aggregator-discovery 29 | {% endif %} 30 | servers: 31 | config: 32 | logging: 33 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 34 | resources: 35 | storage: 36 | data: 37 | capacity: '1Gi' 38 | cpu: 39 | max: '500m' 40 | min: '250m' 41 | memory: 42 | limit: '0.5Gi' 43 | roleGroups: 44 | primary: 45 | replicas: 3 46 | config: 47 | myidOffset: 10 48 | secondary: 49 | replicas: 2 50 | config: 51 | myidOffset: 20 52 | -------------------------------------------------------------------------------- /tests/templates/kuttl/delete-rolegroup/10-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: test-zk-server-primary 10 | status: 11 | readyReplicas: 3 12 | replicas: 3 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/delete-rolegroup/10-errors.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: test-zk-server-secondary 6 | -------------------------------------------------------------------------------- /tests/templates/kuttl/delete-rolegroup/10-remove-secondary.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: test-zk 6 | spec: 7 | image: 8 | {% if test_scenario['values']['zookeeper'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['zookeeper'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['zookeeper'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['zookeeper'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | clusterConfig: 16 | tls: 17 | serverSecretClass: null 18 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 19 | logging: 20 | vectorAggregatorConfigMapName: vector-aggregator-discovery 21 | {% endif %} 22 | servers: 23 | config: 24 | logging: 25 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 26 | resources: 27 | storage: 28 | data: 29 | capacity: '1Gi' 30 | cpu: 31 | max: '500m' 32 | min: '250m' 33 | memory: 34 | limit: '0.5Gi' 35 | roleGroups: 36 | primary: 37 | replicas: 3 38 | config: 39 | myidOffset: 10 40 | secondary: 41 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/05-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: zookeeper-vector-aggregator 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/05-install-zookeeper-vector-aggregator.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: >- 6 | helm install zookeeper-vector-aggregator vector 7 | --namespace $NAMESPACE 8 | --version 0.43.0 9 | --repo https://helm.vector.dev 10 | --values zookeeper-vector-aggregator-values.yaml 11 | --- 12 | apiVersion: v1 13 | kind: ConfigMap 14 | metadata: 15 | name: zookeeper-vector-aggregator-discovery 16 | data: 17 | ADDRESS: zookeeper-vector-aggregator:6123 18 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/10-create-configmap-with-prepared-logs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: > 6 | kubectl create configmap prepared-logs 7 | --from-file=prepared-logs.log4j.xml 8 | --namespace=$NAMESPACE 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/11-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | metadata: 5 | name: install-test-zk 6 | timeout: 600 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: test-zk-server-automatic-log-config 12 | status: 13 | readyReplicas: 1 14 | replicas: 1 15 | --- 16 | apiVersion: apps/v1 17 | kind: StatefulSet 18 | metadata: 19 | name: test-zk-server-custom-log-config 20 | status: 21 | readyReplicas: 1 22 | replicas: 1 23 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/11-install-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: zk-log-config 6 | data: 7 | logback.xml: | 8 | 9 | 10 | /stackable/log/zookeeper/zookeeper.log4j.xml 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | log4j.properties: | 20 | log4j.rootLogger=INFO, FILE 21 | log4j.appender.FILE=org.apache.log4j.FileAppender 22 | log4j.appender.FILE.File=/stackable/log/zookeeper/zookeeper.log4j.xml 23 | log4j.appender.FILE.layout=org.apache.log4j.xml.XMLLayout 24 | --- 25 | apiVersion: zookeeper.stackable.tech/v1alpha1 26 | kind: ZookeeperCluster 27 | metadata: 28 | name: test-zk 29 | spec: 30 | image: 31 | {% if test_scenario['values']['zookeeper'].find(",") > 0 %} 32 | custom: "{{ test_scenario['values']['zookeeper'].split(',')[1] }}" 33 | productVersion: "{{ test_scenario['values']['zookeeper'].split(',')[0] }}" 34 | {% else %} 35 | productVersion: "{{ test_scenario['values']['zookeeper'] }}" 36 | {% endif %} 37 | pullPolicy: IfNotPresent 38 | clusterConfig: 39 | vectorAggregatorConfigMapName: zookeeper-vector-aggregator-discovery 40 | servers: 41 | roleGroups: 42 | automatic-log-config: 43 | replicas: 1 44 | config: 45 | logging: 46 | enableVectorAgent: true 47 | containers: 48 | prepare: 49 | console: 50 | level: INFO 51 | file: 52 | level: INFO 53 | loggers: 54 | ROOT: 55 | level: INFO 56 | vector: 57 | console: 58 | level: INFO 59 | file: 60 | level: INFO 61 | loggers: 62 | ROOT: 63 | level: INFO 64 | zookeeper: 65 | console: 66 | level: INFO 67 | file: 68 | level: INFO 69 | loggers: 70 | ROOT: 71 | level: INFO 72 | podOverrides: 73 | spec: 74 | containers: 75 | - name: vector 76 | volumeMounts: 77 | - name: prepared-logs 78 | mountPath: /stackable/log/prepared-logs 79 | volumes: 80 | - name: prepared-logs 81 | configMap: 82 | name: prepared-logs 83 | custom-log-config: 84 | replicas: 1 85 | config: 86 | logging: 87 | enableVectorAgent: true 88 | containers: 89 | zookeeper: 90 | custom: 91 | configMap: zk-log-config 92 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | metadata: 5 | name: install-zk-test-helper 6 | timeout: 300 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: zk-test-helper 12 | status: 13 | readyReplicas: 1 14 | replicas: 1 15 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/20-install-check.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: zk-test-helper 6 | labels: 7 | app: zk-test-helper 8 | spec: 9 | clusterIP: None 10 | selector: 11 | app: zk-test-helper 12 | --- 13 | apiVersion: apps/v1 14 | kind: StatefulSet 15 | metadata: 16 | name: zk-test-helper 17 | labels: 18 | app: zk-test-helper 19 | spec: 20 | replicas: 1 21 | serviceName: zk-test-helper 22 | selector: 23 | matchLabels: 24 | app: zk-test-helper 25 | template: 26 | metadata: 27 | labels: 28 | app: zk-test-helper 29 | spec: 30 | containers: 31 | - name: zk-test-helper 32 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev 33 | stdin: true 34 | tty: true 35 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/30-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | metadata: 5 | name: test-logs 6 | commands: 7 | - script: kubectl exec -n $NAMESPACE zk-test-helper-0 -- python /tmp/test_log_aggregation.py -n $NAMESPACE 8 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/30-prepare-test-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: kubectl cp -n $NAMESPACE ./test_log_aggregation.py zk-test-helper-0:/tmp 6 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/prepared-logs.log4j.xml: -------------------------------------------------------------------------------- 1 | 3 | Valid log event with all possible tags and attributes 4 | TestException 5 | 6 | 7 | 8 | 10 | Invalid log event without a timestamp 11 | 12 | 13 | 14 | 16 | Invalid log event with an unparsable timestamp 17 | 18 | 19 | 20 | 22 | Invalid log event without a logger 23 | 24 | 25 | 26 | 28 | Invalid log event without a level 29 | 30 | 31 | 32 | 34 | Invalid log event with an unknown level 35 | 36 | 37 | 38 | 40 | 41 | 42 | 43 | 44 | 46 | Valid log event before the one with the noevent tag 47 | 48 | 49 | 50 | 52 | Invalid log event without the event tag 53 | 54 | 55 | 56 | 58 | Unparsable log event 59 | 62 | 63 | 65 | Valid log event after the unparsable one 66 | 67 | 68 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/test_log_aggregation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import requests 3 | 4 | 5 | def check_sent_events(): 6 | response = requests.post( 7 | 'http://zookeeper-vector-aggregator:8686/graphql', 8 | json={ 9 | 'query': """ 10 | { 11 | transforms(first:100) { 12 | nodes { 13 | componentId 14 | metrics { 15 | sentEventsTotal { 16 | sentEventsTotal 17 | } 18 | } 19 | } 20 | } 21 | } 22 | """ 23 | } 24 | ) 25 | 26 | assert response.status_code == 200, \ 27 | 'Cannot access the API of the vector aggregator.' 28 | 29 | result = response.json() 30 | 31 | transforms = result['data']['transforms']['nodes'] 32 | for transform in transforms: 33 | sentEvents = transform['metrics']['sentEventsTotal'] 34 | componentId = transform['componentId'] 35 | 36 | if componentId == 'filteredInvalidEvents': 37 | assert sentEvents is None or \ 38 | sentEvents['sentEventsTotal'] == 0, \ 39 | 'Invalid log events were sent.' 40 | else: 41 | assert sentEvents is not None and \ 42 | sentEvents['sentEventsTotal'] > 0, \ 43 | f'No events were sent in "{componentId}".' 44 | 45 | 46 | if __name__ == '__main__': 47 | check_sent_events() 48 | print('Test successful!') 49 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/zookeeper-vector-aggregator-values.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | role: Aggregator 3 | service: 4 | ports: 5 | - name: api 6 | port: 8686 7 | protocol: TCP 8 | targetPort: 8686 9 | - name: vector 10 | port: 6123 11 | protocol: TCP 12 | targetPort: 6000 13 | customConfig: 14 | api: 15 | address: 0.0.0.0:8686 16 | enabled: true 17 | sources: 18 | vector: 19 | address: 0.0.0.0:6000 20 | type: vector 21 | version: "2" 22 | transforms: 23 | validEvents: 24 | type: filter 25 | inputs: [vector] 26 | condition: is_null(.errors) 27 | filteredAutomaticLogConfigPrepare: 28 | type: filter 29 | inputs: [validEvents] 30 | condition: |- 31 | .pod == "test-zk-server-automatic-log-config-0" && .container == "prepare" 32 | filteredAutomaticLogConfigVector: 33 | type: filter 34 | inputs: [validEvents] 35 | condition: |- 36 | .pod == "test-zk-server-automatic-log-config-0" && .container == "vector" 37 | filteredAutomaticLogConfigZookeeper: 38 | type: filter 39 | inputs: [validEvents] 40 | condition: |- 41 | .pod == "test-zk-server-automatic-log-config-0" && .container == "zookeeper" 42 | filteredCustomLogConfigPrepare: 43 | type: filter 44 | inputs: [validEvents] 45 | condition: |- 46 | .pod == "test-zk-server-custom-log-config-0" && .container == "prepare" 47 | filteredCustomLogConfigVector: 48 | type: filter 49 | inputs: [validEvents] 50 | condition: |- 51 | .pod == "test-zk-server-custom-log-config-0" && .container == "vector" 52 | filteredCustomLogConfigZookeeper: 53 | type: filter 54 | inputs: [validEvents] 55 | condition: |- 56 | .pod == "test-zk-server-custom-log-config-0" && .container == "zookeeper" 57 | filteredInvalidEvents: 58 | type: filter 59 | inputs: [vector] 60 | condition: |- 61 | .timestamp == from_unix_timestamp!(0) || 62 | is_null(.level) || 63 | is_null(.logger) || 64 | is_null(.message) 65 | sinks: 66 | test: 67 | inputs: [filtered*] 68 | type: blackhole 69 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 70 | aggregator: 71 | inputs: [vector] 72 | type: vector 73 | address: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 74 | buffer: 75 | # Avoid back pressure from VECTOR_AGGREGATOR. The test should 76 | # not fail if the aggregator is not available. 77 | when_full: drop_newest 78 | {% endif %} 79 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/00-limit-range.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: LimitRange 4 | metadata: 5 | name: limit-request-ratio 6 | spec: 7 | limits: 8 | - type: "Container" 9 | maxLimitRequestRatio: 10 | cpu: 5 11 | memory: 1 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/10-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | metadata: 5 | name: install-test-zk 6 | timeout: 600 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: test-zk-server-primary 12 | spec: 13 | template: 14 | spec: 15 | containers: 16 | - name: zookeeper 17 | resources: 18 | limits: 19 | cpu: 500m 20 | memory: 512Mi 21 | requests: 22 | cpu: 250m 23 | memory: 512Mi 24 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 25 | - name: vector 26 | {% endif %} 27 | terminationGracePeriodSeconds: 120 28 | status: 29 | readyReplicas: 2 30 | replicas: 2 31 | --- 32 | apiVersion: apps/v1 33 | kind: StatefulSet 34 | metadata: 35 | name: test-zk-server-secondary 36 | spec: 37 | template: 38 | spec: 39 | containers: 40 | - name: zookeeper 41 | resources: 42 | limits: 43 | cpu: 600m # From podOverrides 44 | memory: 512Mi 45 | requests: 46 | cpu: 300m # From podOverrides 47 | memory: 512Mi 48 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 49 | - name: vector 50 | {% endif %} 51 | terminationGracePeriodSeconds: 120 52 | status: 53 | readyReplicas: 1 54 | replicas: 1 55 | --- 56 | apiVersion: v1 57 | kind: PersistentVolumeClaim 58 | metadata: 59 | name: data-test-zk-server-primary-0 60 | spec: 61 | resources: 62 | requests: 63 | storage: 1Gi 64 | status: 65 | phase: Bound 66 | --- 67 | apiVersion: v1 68 | kind: PersistentVolumeClaim 69 | metadata: 70 | name: data-test-zk-server-secondary-0 71 | spec: 72 | resources: 73 | requests: 74 | storage: 2Gi 75 | status: 76 | phase: Bound 77 | --- 78 | apiVersion: policy/v1 79 | kind: PodDisruptionBudget 80 | metadata: 81 | name: test-zk-server 82 | status: 83 | expectedPods: 3 84 | currentHealthy: 3 85 | disruptionsAllowed: 1 86 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/10-install-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | --- 11 | apiVersion: zookeeper.stackable.tech/v1alpha1 12 | kind: ZookeeperCluster 13 | metadata: 14 | name: test-zk 15 | spec: 16 | image: 17 | {% if test_scenario['values']['zookeeper'].find(",") > 0 %} 18 | custom: "{{ test_scenario['values']['zookeeper'].split(',')[1] }}" 19 | productVersion: "{{ test_scenario['values']['zookeeper'].split(',')[0] }}" 20 | {% else %} 21 | productVersion: "{{ test_scenario['values']['zookeeper'] }}" 22 | {% endif %} 23 | pullPolicy: IfNotPresent 24 | clusterConfig: 25 | {% if test_scenario['values']['use-server-tls'] == 'true' %} 26 | tls: 27 | serverSecretClass: zk-client-secret 28 | {% else %} 29 | tls: 30 | serverSecretClass: null 31 | {% endif %} 32 | {% if test_scenario['values']['use-client-auth-tls'] == 'true' %} 33 | authentication: 34 | - authenticationClass: zk-client-auth-tls 35 | {% endif %} 36 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 37 | vectorAggregatorConfigMapName: vector-aggregator-discovery 38 | {% endif %} 39 | servers: 40 | config: 41 | logging: 42 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 43 | resources: 44 | storage: 45 | data: 46 | capacity: '1Gi' 47 | cpu: 48 | max: '500m' 49 | min: '250m' 50 | memory: 51 | limit: '512Mi' 52 | envOverrides: 53 | COMMON_VAR: role-value # overridden by role group below 54 | ROLE_VAR: role-value # only defined here at role level 55 | configOverrides: 56 | zoo.cfg: 57 | prop.common: role 58 | prop.role: role 59 | roleGroups: 60 | primary: 61 | replicas: 2 62 | config: 63 | myidOffset: 10 64 | envOverrides: 65 | COMMON_VAR: group-value # overrides role value 66 | GROUP_VAR: group-value # only defined here at group level 67 | configOverrides: 68 | zoo.cfg: 69 | prop.common: group 70 | prop.group: group 71 | secondary: 72 | replicas: 1 73 | config: 74 | myidOffset: 20 75 | resources: 76 | storage: 77 | data: 78 | capacity: '2Gi' 79 | podOverrides: 80 | spec: 81 | containers: 82 | - name: zookeeper 83 | resources: 84 | requests: 85 | cpu: 300m 86 | limits: 87 | cpu: 600m 88 | {% if test_scenario['values']['use-client-auth-tls'] == 'true' %} 89 | --- 90 | apiVersion: authentication.stackable.tech/v1alpha1 91 | kind: AuthenticationClass 92 | metadata: 93 | name: zk-client-auth-tls 94 | spec: 95 | provider: 96 | tls: 97 | clientCertSecretClass: zk-client-auth-secret 98 | --- 99 | apiVersion: secrets.stackable.tech/v1alpha1 100 | kind: SecretClass 101 | metadata: 102 | name: zk-client-auth-secret 103 | spec: 104 | backend: 105 | autoTls: 106 | ca: 107 | secret: 108 | name: secret-provisioner-tls-zk-client-auth-ca 109 | namespace: default 110 | autoGenerate: true 111 | {% endif %} 112 | {% if test_scenario['values']['use-server-tls'] == 'true' %} 113 | --- 114 | apiVersion: secrets.stackable.tech/v1alpha1 115 | kind: SecretClass 116 | metadata: 117 | name: zk-client-secret 118 | spec: 119 | backend: 120 | autoTls: 121 | ca: 122 | secret: 123 | name: secret-provisioner-tls-zk-client-ca 124 | namespace: default 125 | autoGenerate: true 126 | {% endif %} 127 | --- 128 | apiVersion: zookeeper.stackable.tech/v1alpha1 129 | kind: ZookeeperZnode 130 | metadata: 131 | name: test-znode 132 | spec: 133 | clusterRef: 134 | name: test-zk 135 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/11-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | # 7 | # Test envOverrides 8 | # 9 | - script: | 10 | kubectl -n $NAMESPACE get sts test-zk-server-primary -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "zookeeper") | .env[] | select (.name == "COMMON_VAR" and .value == "group-value")' 11 | kubectl -n $NAMESPACE get sts test-zk-server-primary -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "zookeeper") | .env[] | select (.name == "GROUP_VAR" and .value == "group-value")' 12 | kubectl -n $NAMESPACE get sts test-zk-server-primary -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "zookeeper") | .env[] | select (.name == "ROLE_VAR" and .value == "role-value")' 13 | # 14 | # Test configOverrides 15 | # 16 | - script: | 17 | kubectl -n $NAMESPACE get cm test-zk-server-primary -o yaml | yq -e '.data."zoo.cfg"' | grep "prop.common=group" 18 | kubectl -n $NAMESPACE get cm test-zk-server-primary -o yaml | yq -e '.data."zoo.cfg"' | grep "prop.group=group" 19 | kubectl -n $NAMESPACE get cm test-zk-server-primary -o yaml | yq -e '.data."zoo.cfg"' | grep "prop.role=role" 20 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/12-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This test checks if the containerdebug-state.json file is present and valid 3 | apiVersion: kuttl.dev/v1beta1 4 | kind: TestAssert 5 | timeout: 120 6 | commands: 7 | - script: kubectl exec -n $NAMESPACE --container zookeeper test-zk-server-primary-0 -- cat /stackable/log/containerdebug-state.json | jq --exit-status '"valid JSON"' 8 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | metadata: 5 | name: install-zk-test-helper 6 | timeout: 300 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: zk-test-helper 12 | status: 13 | readyReplicas: 1 14 | replicas: 1 15 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/20-install-check.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: zk-test-helper 6 | labels: 7 | app: zk-test-helper 8 | spec: 9 | clusterIP: None 10 | selector: 11 | app: zk-test-helper 12 | --- 13 | apiVersion: apps/v1 14 | kind: StatefulSet 15 | metadata: 16 | name: zk-test-helper 17 | labels: 18 | app: zk-test-helper 19 | spec: 20 | replicas: 1 21 | serviceName: zk-test-helper 22 | selector: 23 | matchLabels: 24 | app: zk-test-helper 25 | template: 26 | metadata: 27 | labels: 28 | app: zk-test-helper 29 | spec: 30 | containers: 31 | - name: zk-test-helper 32 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev 33 | stdin: true 34 | tty: true 35 | resources: 36 | requests: 37 | memory: "128Mi" 38 | cpu: "512m" 39 | limits: 40 | memory: "128Mi" 41 | cpu: "1" 42 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/21-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | metadata: 5 | name: test-regorule 6 | commands: 7 | - script: kubectl exec -n $NAMESPACE zk-test-helper-0 -- python /tmp/test_zookeeper.py -n $NAMESPACE 8 | - script: kubectl exec -n $NAMESPACE test-zk-server-primary-0 --container='zookeeper' -- /tmp/test_heap.sh 9 | {% if test_scenario['values']['use-client-auth-tls'] == 'true' or test_scenario['values']['use-server-tls'] == 'true' %} 10 | - script: kubectl exec -n $NAMESPACE test-zk-server-primary-0 --container='zookeeper' -- /tmp/test_tls.sh $NAMESPACE 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/21-prepare-test-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: kubectl cp -n $NAMESPACE ./test_zookeeper.py zk-test-helper-0:/tmp 6 | - script: kubectl cp -n $NAMESPACE ./test_heap.sh test-zk-server-primary-0:/tmp --container='zookeeper' 7 | {% if test_scenario['values']['use-client-auth-tls'] == 'true' or test_scenario['values']['use-server-tls'] == 'true' %} 8 | - script: kubectl cp -n $NAMESPACE ./test_tls.sh test-zk-server-primary-0:/tmp --container='zookeeper' 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/test_heap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Usage: test_heap.sh 3 | 4 | # 0.5Gi * 1024 -> 512Mebi * 0.8 -> 409 5 | EXPECTED_HEAP=409 6 | 7 | # Check if ZK_SERVER_HEAP is set to the correct calculated value 8 | if [[ $ZK_SERVER_HEAP == "$EXPECTED_HEAP" ]] 9 | then 10 | echo "[SUCCESS] ZK_SERVER_HEAP set to $EXPECTED_HEAP" 11 | else 12 | echo "[ERROR] ZK_SERVER_HEAP not set or set with wrong value: $ZK_SERVER_HEAP" 13 | exit 1 14 | fi 15 | 16 | echo "[SUCCESS] All heap settings tests successful!" 17 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/test_tls.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Usage: test_tls.sh namespace 3 | 4 | NAMESPACE=$1 5 | 6 | {% if test_scenario['values']['use-client-auth-tls'] == 'true' or test_scenario['values']['use-server-tls'] == 'true' %} 7 | SERVER="test-zk-server-primary-1.test-zk-server-primary.${NAMESPACE}.svc.cluster.local:2282" 8 | {% else %} 9 | SERVER="test-zk-server-primary-1.test-zk-server-primary.${NAMESPACE}.svc.cluster.local:2181" 10 | {% endif %} 11 | 12 | # just to be safe... 13 | unset QUORUM_STORE_SECRET 14 | unset CLIENT_STORE_SECRET 15 | unset CLIENT_JVMFLAGS 16 | 17 | echo "Start TLS testing..." 18 | ############################################################################ 19 | # Test the plaintext unsecured connection 20 | ############################################################################ 21 | if ! /stackable/zookeeper/bin/zkCli.sh -server "${SERVER}" ls / &> /dev/null; 22 | then 23 | echo "[ERROR] Could not establish unsecure connection!" 24 | exit 1 25 | fi 26 | echo "[SUCCESS] Unsecure client connection established!" 27 | 28 | ############################################################################ 29 | # We set the correct client tls credentials and expect to be able to connect 30 | ############################################################################ 31 | CLIENT_STORE_SECRET="$(< /stackable/rwconfig/zoo.cfg grep "ssl.keyStore.password" | cut -d "=" -f2)" 32 | export CLIENT_STORE_SECRET 33 | export CLIENT_JVMFLAGS=" 34 | -Dzookeeper.authProvider.x509=org.apache.zookeeper.server.auth.X509AuthenticationProvider 35 | -Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty 36 | -Dzookeeper.client.secure=true 37 | -Dzookeeper.ssl.keyStore.location=/stackable/server_tls/keystore.p12 38 | -Dzookeeper.ssl.keyStore.password=${CLIENT_STORE_SECRET} 39 | -Dzookeeper.ssl.trustStore.location=/stackable/server_tls/truststore.p12 40 | -Dzookeeper.ssl.trustStore.password=${CLIENT_STORE_SECRET}" 41 | 42 | if ! /stackable/zookeeper/bin/zkCli.sh -server "${SERVER}" ls / &> /dev/null; 43 | then 44 | echo "[ERROR] Could not establish secure connection using client certificates!" 45 | exit 1 46 | fi 47 | echo "[SUCCESS] Secure and authenticated client connection established!" 48 | 49 | ############################################################################ 50 | # We set the (wrong) quorum tls credentials and expect to fail (wrong certificate) 51 | ############################################################################ 52 | QUORUM_STORE_SECRET="$(< /stackable/rwconfig/zoo.cfg grep "ssl.quorum.keyStore.password" | cut -d "=" -f2)" 53 | export QUORUM_STORE_SECRET 54 | export CLIENT_JVMFLAGS=" 55 | -Dzookeeper.authProvider.x509=org.apache.zookeeper.server.auth.X509AuthenticationProvider 56 | -Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty 57 | -Dzookeeper.client.secure=true 58 | -Dzookeeper.ssl.keyStore.location=/stackable/quorum_tls/keystore.p12 59 | -Dzookeeper.ssl.keyStore.password=${QUORUM_STORE_SECRET} 60 | -Dzookeeper.ssl.trustStore.location=/stackable/quorum_tls/truststore.p12 61 | -Dzookeeper.ssl.trustStore.password=${QUORUM_STORE_SECRET}" 62 | 63 | if /stackable/zookeeper/bin/zkCli.sh -server "${SERVER}" ls / &> /dev/null; 64 | then 65 | echo "[ERROR] Could establish secure connection with quorum certificates (should not be happening)!" 66 | exit 1 67 | fi 68 | echo "[SUCCESS] Could not establish secure connection with (wrong) quorum certificates!" 69 | 70 | echo "All TLS tests successful!" 71 | exit 0 72 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/test_zookeeper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import requests 4 | import time 5 | import sys 6 | sys.tracebacklimit = 0 7 | 8 | 9 | def print_request_error_and_sleep(message, err, retry_count): 10 | print("[" + str(retry_count) + "] " + message, err) 11 | time.sleep(5) 12 | 13 | 14 | def try_get(url): 15 | retries = 3 16 | for i in range(retries): 17 | try: 18 | r = requests.get(url, timeout=5) 19 | r.raise_for_status() 20 | return r 21 | except requests.exceptions.HTTPError as errh: 22 | print_request_error_and_sleep("Http Error: ", errh, i) 23 | except requests.exceptions.ConnectionError as errc: 24 | print_request_error_and_sleep("Error Connecting: ", errc, i) 25 | except requests.exceptions.Timeout as errt: 26 | print_request_error_and_sleep("Timeout Error: ", errt, i) 27 | except requests.exceptions.RequestException as err: 28 | print_request_error_and_sleep("Error: ", err, i) 29 | 30 | exit(-1) 31 | 32 | 33 | def check_ruok(hosts): 34 | cmd_ruok = "ruok" 35 | 36 | for host in hosts: 37 | url = host + ":8080/commands/" + cmd_ruok 38 | response = try_get(url).json() 39 | 40 | if "command" in response and response["command"] == cmd_ruok \ 41 | and "error" in response and response["error"] is None: 42 | continue 43 | else: 44 | print("Error[" + cmd_ruok + "] for [" + url + "]: received " + str( 45 | response) + " - expected {'command': 'ruok', 'error': None} ") 46 | exit(-1) 47 | 48 | 49 | def check_monitoring(hosts): 50 | for host in hosts: 51 | url = host + ":9505" 52 | response = try_get(url) 53 | 54 | if response.ok: 55 | continue 56 | else: 57 | print("Error for [" + url + "]: could not access monitoring") 58 | exit(-1) 59 | 60 | 61 | if __name__ == '__main__': 62 | all_args = argparse.ArgumentParser(description="Test ZooKeeper.") 63 | all_args.add_argument("-n", "--namespace", help="The namespace to run in", required=True) 64 | args = vars(all_args.parse_args()) 65 | namespace = args["namespace"] 66 | 67 | host_primary_0 = "http://test-zk-server-primary-0.test-zk-server-primary." + namespace + ".svc.cluster.local" 68 | host_primary_1 = "http://test-zk-server-primary-1.test-zk-server-primary." + namespace + ".svc.cluster.local" 69 | host_secondary = "http://test-zk-server-secondary-0.test-zk-server-secondary." + namespace + ".svc.cluster.local" 70 | 71 | hosts = [host_primary_0, host_primary_1, host_secondary] 72 | 73 | check_ruok(hosts) 74 | check_monitoring(hosts) 75 | 76 | print("Test successful!") 77 | -------------------------------------------------------------------------------- /tests/templates/kuttl/znode/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/znode/10-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: test-zk-server-default 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | --- 14 | apiVersion: v1 15 | kind: ConfigMap 16 | metadata: 17 | name: test-znode 18 | -------------------------------------------------------------------------------- /tests/templates/kuttl/znode/10-install-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | --- 11 | apiVersion: zookeeper.stackable.tech/v1alpha1 12 | kind: ZookeeperCluster 13 | metadata: 14 | name: test-zk 15 | spec: 16 | image: 17 | {% if test_scenario['values']['zookeeper-latest'].find(",") > 0 %} 18 | custom: "{{ test_scenario['values']['zookeeper-latest'].split(',')[1] }}" 19 | productVersion: "{{ test_scenario['values']['zookeeper-latest'].split(',')[0] }}" 20 | {% else %} 21 | productVersion: "{{ test_scenario['values']['zookeeper-latest'] }}" 22 | {% endif %} 23 | pullPolicy: IfNotPresent 24 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 25 | clusterConfig: 26 | vectorAggregatorConfigMapName: vector-aggregator-discovery 27 | {% endif %} 28 | servers: 29 | config: 30 | logging: 31 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 32 | roleGroups: 33 | default: 34 | replicas: 1 35 | --- 36 | apiVersion: zookeeper.stackable.tech/v1alpha1 37 | kind: ZookeeperZnode 38 | metadata: 39 | name: test-znode 40 | spec: 41 | clusterRef: 42 | name: test-zk 43 | -------------------------------------------------------------------------------- /tests/templates/kuttl/znode/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | --- 5 | apiVersion: v1 6 | kind: ConfigMap 7 | metadata: 8 | name: test-znode 9 | data: 10 | ZOOKEEPER_CHROOT: /znode-override 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/znode/20-set-znode-override.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: kubectl get zookeeperznode/test-znode -o json -n $NAMESPACE | jq '.status.znodePath = "/znode-override"' | kubectl replace -f- --subresource=status 6 | -------------------------------------------------------------------------------- /tests/test-definition.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | dimensions: 3 | - name: zookeeper 4 | values: 5 | - 3.9.3 6 | # To use a custom image, add a comma and the full name after the product version 7 | # - 3.9.3,oci.stackable.tech/sdp/zookeeper:3.9.3-stackable0.0.0-dev 8 | - name: zookeeper-latest 9 | values: 10 | - 3.9.3 11 | # To use a custom image, add a comma and the full name after the product version 12 | # - 3.9.3,oci.stackable.tech/sdp/zookeeper:3.9.3-stackable0.0.0-dev 13 | - name: use-server-tls 14 | values: 15 | - "true" 16 | - "false" 17 | - name: use-client-auth-tls 18 | values: 19 | - "true" 20 | - "false" 21 | - name: openshift 22 | values: 23 | - "false" 24 | tests: 25 | - name: smoke 26 | dimensions: 27 | - zookeeper 28 | - use-server-tls 29 | - use-client-auth-tls 30 | - openshift 31 | - name: delete-rolegroup 32 | dimensions: 33 | - zookeeper 34 | - openshift 35 | - name: znode 36 | dimensions: 37 | - zookeeper-latest 38 | - openshift 39 | - name: logging 40 | dimensions: 41 | - zookeeper 42 | - openshift 43 | - name: cluster-operation 44 | dimensions: 45 | - zookeeper-latest 46 | - openshift 47 | suites: 48 | - name: nightly 49 | patch: 50 | - dimensions: 51 | - name: zookeeper 52 | expr: last 53 | - name: use-server-tls 54 | expr: "true" 55 | - name: use-client-auth-tls 56 | expr: "true" 57 | - name: smoke-latest 58 | select: 59 | - smoke 60 | patch: 61 | - dimensions: 62 | - expr: last 63 | - name: openshift 64 | patch: 65 | - dimensions: 66 | - expr: last 67 | - dimensions: 68 | - name: zookeeper 69 | expr: last 70 | - name: use-server-tls 71 | expr: "true" 72 | - name: use-client-auth-tls 73 | expr: "true" 74 | - name: openshift 75 | expr: "true" 76 | --------------------------------------------------------------------------------