├── tests
├── templates
│ ├── .gitkeep
│ └── kuttl
│ │ ├── smoke
│ │ ├── certs
│ │ │ ├── root-ca.crt.srl
│ │ │ ├── client.csr.pem
│ │ │ ├── generate.sh
│ │ │ ├── tls.crt
│ │ │ ├── client.crt.pem
│ │ │ ├── ca.crt
│ │ │ ├── root-ca.crt.pem
│ │ │ ├── tls.key
│ │ │ ├── client.key.pem
│ │ │ └── root-ca.key.pem
│ │ ├── 00-limit-range.yaml
│ │ ├── 70-assert.yaml
│ │ ├── 10-assert.yaml.j2
│ │ ├── 80-prepare-test-metastore.yaml
│ │ ├── 10-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 62-assert.yaml
│ │ ├── 50-assert.yaml
│ │ ├── 40-install-postgres.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── 40-assert.yaml
│ │ ├── 80-assert.yaml
│ │ ├── 30-setup-minio.yaml.j2
│ │ ├── 70-install-test-metastore.yaml
│ │ ├── 60-assert.yaml.j2
│ │ ├── helm-bitnami-postgresql-values.yaml.j2
│ │ ├── 61-assert.yaml
│ │ ├── helm-bitnami-minio-values.yaml.j2
│ │ ├── test_metastore_opa.py
│ │ └── 60-install-hive.yaml.j2
│ │ ├── orphaned-resources
│ │ ├── 03-assert.yaml
│ │ ├── 00-assert.yaml.j2
│ │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── 03-errors.yaml
│ │ ├── 04-errors.yaml
│ │ ├── 04-change-rolegroup.yaml
│ │ ├── 01-assert.yaml
│ │ ├── 04-assert.yaml
│ │ ├── 03-remove-role-group.yaml.j2
│ │ └── 01-install-hive.yaml.j2
│ │ ├── external-access
│ │ ├── 10-listener-classes.yaml
│ │ ├── listener-classes.yaml
│ │ ├── 20-install-hive.yaml
│ │ ├── 00-limit-range.yaml
│ │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── helm-bitnami-postgresql-values.yaml.j2
│ │ ├── 20-assert.yaml
│ │ └── install-hive.yaml.j2
│ │ ├── logging
│ │ ├── 06-test-log-aggregation.yaml
│ │ ├── 06-assert.yaml
│ │ ├── 00-assert.yaml.j2
│ │ ├── 05-assert.yaml
│ │ ├── 01-assert.yaml
│ │ ├── 03-create-configmap-with-prepared-logs.yaml
│ │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 02-install-postgres.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── 02-assert.yaml
│ │ ├── 04-assert.yaml
│ │ ├── 01-install-hbase-vector-aggregator.yaml
│ │ ├── 05-install-hive-test-runner.yaml
│ │ ├── helm-bitnami-postgresql-values.yaml.j2
│ │ ├── test_log_aggregation.py
│ │ └── hive-vector-aggregator-values.yaml.j2
│ │ ├── kerberos-hdfs
│ │ ├── 35-assert.yaml
│ │ ├── 70-assert.yaml
│ │ ├── 10-assert.yaml.j2
│ │ ├── 20-assert.yaml
│ │ ├── 10-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 01-assert.yaml.j2
│ │ ├── 40-install-postgres.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── 40-assert.yaml
│ │ ├── 30-assert.yaml
│ │ ├── 20-install-zk.yaml.j2
│ │ ├── 00-rbac.yaml.j2
│ │ ├── 60-assert.yaml.j2
│ │ ├── helm-bitnami-postgresql-values.yaml.j2
│ │ ├── 60-install-hive.yaml.j2
│ │ ├── 30-install-hdfs.yaml.j2
│ │ ├── 02-create-kerberos-secretclass.yaml.j2
│ │ └── 35-access-hdfs.yaml.j2
│ │ ├── kerberos-s3
│ │ ├── 70-assert.yaml
│ │ ├── 10-assert.yaml.j2
│ │ ├── 10-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 01-assert.yaml.j2
│ │ ├── 40-install-postgres.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── 40-assert.yaml
│ │ ├── 30-setup-minio.yaml.j2
│ │ ├── 00-rbac.yaml.j2
│ │ ├── 60-assert.yaml.j2
│ │ ├── helm-bitnami-postgresql-values.yaml.j2
│ │ ├── 02-create-kerberos-secretclass.yaml.j2
│ │ ├── helm-bitnami-minio-values.yaml.j2
│ │ └── 60-install-hive.yaml.j2
│ │ ├── upgrade
│ │ ├── 00-limit-range.yaml
│ │ ├── 10-assert.yaml.j2
│ │ ├── 10-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 20-install-postgres.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── 20-assert.yaml
│ │ ├── 31-upgrade-hive.yaml.j2
│ │ ├── 30-assert.yaml.j2
│ │ ├── 31-assert.yaml.j2
│ │ ├── helm-bitnami-postgresql-values.yaml.j2
│ │ └── 30-install-hive.yaml.j2
│ │ ├── resources
│ │ ├── 00-assert.yaml.j2
│ │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── 20-assert.yaml
│ │ ├── 10-assert.yaml.j2
│ │ └── 10-install-hive.yaml.j2
│ │ └── cluster-operation
│ │ ├── 00-assert.yaml.j2
│ │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2
│ │ ├── 00-patch-ns.yaml.j2
│ │ ├── 10-assert.yaml
│ │ ├── 40-assert.yaml
│ │ ├── 20-assert.yaml
│ │ ├── 30-assert.yaml
│ │ ├── 20-stop-hive.yaml.j2
│ │ ├── 30-pause-hive.yaml.j2
│ │ ├── 40-restart-hive.yaml.j2
│ │ └── 10-install-hive.yaml.j2
├── interu.yaml
├── release.yaml
├── kuttl-test.yaml.jinja2
└── README-templating.md
├── docs
├── antora.yml
├── modules
│ └── hive
│ │ ├── examples
│ │ └── getting_started
│ │ │ ├── test_getting_started_helm.sh
│ │ │ ├── test_getting_started_stackablectl.sh
│ │ │ ├── install-operator-output.txt
│ │ │ ├── install-operator-output.txt.j2
│ │ │ ├── hive-minio-credentials.yaml
│ │ │ ├── hive-minio-credentials-secret-class.yaml
│ │ │ ├── hive-minio-s3-connection.yaml
│ │ │ ├── release.yaml
│ │ │ ├── release.yaml.j2
│ │ │ ├── stackablectl-hive-postgres-minio-stack.yaml
│ │ │ ├── minio-stack.yaml
│ │ │ ├── hive-test-helper.yaml
│ │ │ ├── minio-stack.yaml.j2
│ │ │ ├── hive-postgres-s3.yaml
│ │ │ ├── hive-postgres-s3.yaml.j2
│ │ │ ├── postgres-stack.yaml
│ │ │ └── postgres-stack.yaml.j2
│ │ ├── pages
│ │ ├── reference
│ │ │ ├── crds.adoc
│ │ │ ├── index.adoc
│ │ │ ├── commandline-parameters.adoc
│ │ │ ├── environment-variables.adoc
│ │ │ └── discovery.adoc
│ │ ├── usage-guide
│ │ │ ├── operations
│ │ │ │ ├── cluster-operations.adoc
│ │ │ │ ├── index.adoc
│ │ │ │ ├── pod-disruptions.adoc
│ │ │ │ ├── graceful-shutdown.adoc
│ │ │ │ └── pod-placement.adoc
│ │ │ ├── monitoring.adoc
│ │ │ ├── index.adoc
│ │ │ ├── logging.adoc
│ │ │ ├── listenerclass.adoc
│ │ │ ├── resources.adoc
│ │ │ └── data-storage.adoc
│ │ ├── required-external-components.adoc
│ │ ├── troubleshooting
│ │ │ └── index.adoc
│ │ └── getting_started
│ │ │ ├── index.adoc
│ │ │ └── first_steps.adoc
│ │ └── partials
│ │ ├── supported-versions.adoc
│ │ └── nav.adoc
└── templating_vars.yaml
├── rust
└── operator-binary
│ ├── src
│ ├── config
│ │ └── mod.rs
│ ├── operations
│ │ ├── mod.rs
│ │ ├── graceful_shutdown.rs
│ │ └── pdb.rs
│ └── crd
│ │ └── security.rs
│ ├── build.rs
│ └── Cargo.toml
├── scripts
├── run_tests.sh
├── render_readme.sh
├── generate-manifests.sh
└── docs_templating.sh
├── .gitattributes
├── deploy
├── helm
│ ├── chart_testing.yaml
│ ├── ct.yaml
│ └── hive-operator
│ │ ├── templates
│ │ ├── configmap.yaml
│ │ ├── service.yaml
│ │ ├── _maintenance.tpl
│ │ ├── serviceaccount.yaml
│ │ ├── _telemetry.tpl
│ │ └── _helpers.tpl
│ │ ├── Chart.yaml
│ │ ├── .helmignore
│ │ ├── README.md
│ │ └── values.yaml
├── stackable-operators-ns.yaml
└── DO_NOT_EDIT.md
├── .actionlint.yaml
├── .github
├── actionlint.yaml
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── normal-issue.md
│ ├── 01-normal-issue.md
│ ├── new_version.md
│ └── 02-bug_report.yml
├── workflows
│ ├── general_daily_security.yml
│ ├── pr_pre-commit.yaml
│ └── integration-test.yml
├── PULL_REQUEST_TEMPLATE
│ ├── pre-release-getting-started-script.md
│ └── pre-release-rust-deps.md
└── pull_request_template.md
├── rust-toolchain.toml
├── .readme
├── static
│ └── borrowed
│ │ ├── stackable_overview.png
│ │ └── Icon_Stackable.svg
├── partials
│ ├── borrowed
│ │ ├── header.md.j2
│ │ ├── related_reading.md.j2
│ │ ├── overview_blurb.md.j2
│ │ ├── documentation.md.j2
│ │ └── links.md.j2
│ └── main.md.j2
└── README.md.j2
├── nix
├── meta.json
├── README.md
└── sources.json
├── .envrc.sample
├── .vscode
├── settings.json
└── launch.json
├── .pylintrc
├── .gitignore
├── renovate.json
├── .dockerignore
├── .hadolint.yaml
├── rustfmt.toml
├── .yamllint.yaml
├── examples
├── simple-hive-cluster.yaml
├── hive-opa-cluster.yaml
└── simple-hive-cluster-postgres-s3.yaml
├── .markdownlint.yaml
├── Cargo.toml
├── shell.nix
├── crate-hashes.json
├── deny.toml
└── Tiltfile
/tests/templates/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/antora.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: home
3 | version: "nightly"
4 |
--------------------------------------------------------------------------------
/rust/operator-binary/src/config/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod jvm;
2 | pub mod opa;
3 |
--------------------------------------------------------------------------------
/scripts/run_tests.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ./scripts/run-tests "$@"
4 |
--------------------------------------------------------------------------------
/rust/operator-binary/src/operations/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod graceful_shutdown;
2 | pub mod pdb;
3 |
--------------------------------------------------------------------------------
/rust/operator-binary/build.rs:
--------------------------------------------------------------------------------
1 | fn main() {
2 | built::write_built_file().unwrap();
3 | }
4 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/root-ca.crt.srl:
--------------------------------------------------------------------------------
1 | 7046F738B5BC7F7DC43E1E7E2EF5B23832C7A59A
2 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | nix/** linguist-generated
2 | Cargo.nix linguist-generated
3 | crate-hashes.json linguist-generated
4 |
--------------------------------------------------------------------------------
/deploy/helm/chart_testing.yaml:
--------------------------------------------------------------------------------
1 | remote: origin
2 | target-branch: main
3 | chart-dirs:
4 | - deploy/helm
5 | all: true
6 |
--------------------------------------------------------------------------------
/deploy/stackable-operators-ns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: stackable-operators
6 |
--------------------------------------------------------------------------------
/.actionlint.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | self-hosted-runner:
3 | # Ubicloud machines we are using
4 | labels:
5 | - ubicloud-standard-8-arm
6 |
--------------------------------------------------------------------------------
/.github/actionlint.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | self-hosted-runner:
3 | # Ubicloud machines we are using
4 | labels:
5 | - ubicloud-standard-8-arm
6 |
--------------------------------------------------------------------------------
/rust-toolchain.toml:
--------------------------------------------------------------------------------
1 | # DO NOT EDIT, this file is generated by operator-templating
2 | [toolchain]
3 | channel = "1.89.0"
4 | profile = "default"
5 |
--------------------------------------------------------------------------------
/.readme/static/borrowed/stackable_overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stackabletech/hive-operator/HEAD/.readme/static/borrowed/stackable_overview.png
--------------------------------------------------------------------------------
/nix/meta.json:
--------------------------------------------------------------------------------
1 | {"operator": {"name": "hive-operator", "pretty_string": "Apache Hive", "product_string": "hive", "url": "stackabletech/hive-operator.git"}}
2 |
--------------------------------------------------------------------------------
/.envrc.sample:
--------------------------------------------------------------------------------
1 | # vim: syntax=conf
2 | #
3 | # If you use direnv, you can autoload the nix shell:
4 | # You will need to allow the directory the first time.
5 | use nix
6 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/test_getting_started_helm.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 | set -euo pipefail
3 |
4 | cd "$(dirname "$0")"
5 | ./getting_started.sh helm
6 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/03-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | metadata:
5 | name: remove-datanode
6 | timeout: 600
7 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/test_getting_started_stackablectl.sh:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 | set -euo pipefail
3 |
4 | cd "$(dirname "$0")"
5 | ./getting_started.sh stackablectl
6 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/reference/crds.adoc:
--------------------------------------------------------------------------------
1 | = CRD Reference
2 |
3 | Find all CRD reference for the Stackable Operator for Apache Hive at: {crd-docs-base-url}/hive-operator/{crd-docs-version}.
4 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "rust-analyzer.rustfmt.overrideCommand": [
3 | "rustfmt",
4 | "+nightly-2025-10-23",
5 | "--edition",
6 | "2024",
7 | "--"
8 | ],
9 | }
10 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/install-operator-output.txt:
--------------------------------------------------------------------------------
1 | Installed commons=0.0.0-dev operator
2 | Installed secret=0.0.0-dev operator
3 | Installed listener=0.0.0-dev operator
4 | Installed hive=0.0.0-dev operator
5 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/10-listener-classes.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: |
6 | envsubst < listener-classes.yaml | kubectl apply -n "$NAMESPACE" -f -
7 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/06-test-log-aggregation.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: |
6 | kubectl cp ./test_log_aggregation.py "$NAMESPACE/hive-test-runner-0:/tmp"
7 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/listener-classes.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: listeners.stackable.tech/v1alpha1
3 | kind: ListenerClass
4 | metadata:
5 | name: test-cluster-internal-$NAMESPACE
6 | spec:
7 | serviceType: ClusterIP
8 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MESSAGES CONTROL]
2 |
3 | # These rules are for missing docstrings which doesn't matter much for most of our simple scripts
4 | disable=C0114,C0115,C0116
5 |
6 | [FORMAT]
7 |
8 | max-line-length=999
9 | indent-string=' '
10 |
--------------------------------------------------------------------------------
/.readme/partials/borrowed/header.md.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | {{title}}
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | tests/_work/
2 | debug/
3 | target/
4 | **/*.rs.bk
5 |
6 | .idea/
7 | *.iws
8 | *.iml
9 |
10 | *.tgz
11 |
12 | result
13 | image.tar
14 |
15 | tilt_options.json
16 |
17 | .direnv/
18 | .direnvrc
19 | .envrc
20 |
21 | .DS_Store
22 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/20-install-hive.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | timeout: 600
5 | commands:
6 | - script: >
7 | envsubst < install-hive.yaml |
8 | kubectl apply -n "$NAMESPACE" -f -
9 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/35-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: batch/v1
7 | kind: Job
8 | metadata:
9 | name: access-hdfs
10 | status:
11 | succeeded: 1
12 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/70-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | ---
6 | apiVersion: batch/v1
7 | kind: Job
8 | metadata:
9 | name: access-hive
10 | status:
11 | succeeded: 1
12 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/70-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | ---
6 | apiVersion: batch/v1
7 | kind: Job
8 | metadata:
9 | name: access-hive
10 | status:
11 | succeeded: 1
12 |
--------------------------------------------------------------------------------
/docs/templating_vars.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | helm:
3 | repo_name: sdp-charts
4 | repo_url: oci.stackable.tech
5 | versions:
6 | commons: 0.0.0-dev
7 | secret: 0.0.0-dev
8 | listener: 0.0.0-dev
9 | hive: 0.0.0-dev
10 | minio: 5.4.0
11 | postgresql: 16.5.0
12 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/06-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | commands:
5 | - script: >-
6 | kubectl exec --namespace="$NAMESPACE" hive-test-runner-0 --
7 | python /tmp/test_log_aggregation.py -n "$NAMESPACE"
8 |
--------------------------------------------------------------------------------
/.readme/partials/borrowed/related_reading.md.j2:
--------------------------------------------------------------------------------
1 |
2 | {%- if related_reading_links -%}
3 | ## Related Reading
4 | {% for (text, link) in related_reading_links %}
5 | * [{{text}}]({{link}})
6 | {%- endfor %}
7 | {%- endif -%}
8 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/00-limit-range.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: LimitRange
4 | metadata:
5 | name: limit-request-ratio
6 | spec:
7 | limits:
8 | - type: "Container"
9 | maxLimitRequestRatio:
10 | cpu: 5
11 | memory: 1
12 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/install-operator-output.txt.j2:
--------------------------------------------------------------------------------
1 | Installed commons={{ versions.commons }} operator
2 | Installed secret={{ versions.secret }} operator
3 | Installed listener={{ versions.listener }} operator
4 | Installed hive={{ versions.hive }} operator
5 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/00-limit-range.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: LimitRange
4 | metadata:
5 | name: limit-request-ratio
6 | spec:
7 | limits:
8 | - type: "Container"
9 | maxLimitRequestRatio:
10 | cpu: 5
11 | memory: 1
12 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/70-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: test-metastore
10 | status:
11 | readyReplicas: 1
12 | replicas: 1
13 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/00-limit-range.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: LimitRange
4 | metadata:
5 | name: limit-request-ratio
6 | spec:
7 | limits:
8 | - type: "Container"
9 | maxLimitRequestRatio:
10 | cpu: 5
11 | memory: 1
12 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/00-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: vector-aggregator-discovery
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/05-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hive-test-runner
10 | status:
11 | readyReplicas: 1
12 | replicas: 1
13 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/resources/00-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: vector-aggregator-discovery
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/10-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: vector-aggregator-discovery
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/10-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: vector-aggregator-discovery
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/10-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: vector-aggregator-discovery
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/10-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: vector-aggregator-discovery
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/01-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hive-vector-aggregator
10 | status:
11 | readyReplicas: 1
12 | replicas: 1
13 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/00-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: vector-aggregator-discovery
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/03-create-configmap-with-prepared-logs.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: >
6 | kubectl create configmap prepared-logs
7 | --from-file=prepared-logs.log4j2.xml
8 | --namespace="$NAMESPACE"
9 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/00-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
5 | ---
6 | apiVersion: v1
7 | kind: ConfigMap
8 | metadata:
9 | name: vector-aggregator-discovery
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/hive-minio-credentials.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: hive-s3-secret
6 | labels:
7 | secrets.stackable.tech/class: hive-s3-secret-class
8 | stringData:
9 | accessKey: hive
10 | secretKey: hivehive
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/20-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hdfs-zk-server-default
10 | status:
11 | readyReplicas: 1
12 | replicas: 1
13 |
--------------------------------------------------------------------------------
/deploy/helm/ct.yaml:
--------------------------------------------------------------------------------
1 | # This file is used for chart-testing (https://github.com/helm/chart-testing)
2 | # The name "ct.yaml" is not very self-descriptive but it is the default that chart-testing is looking for
3 | ---
4 | remote: origin
5 | target-branch: main
6 | chart-dirs:
7 | - deploy/helm
8 | all: true
9 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | data:
4 | {{ (.Files.Glob "configs/*").AsConfig | indent 2 }}
5 | kind: ConfigMap
6 | metadata:
7 | name: {{ include "operator.fullname" . }}-configmap
8 | labels:
9 | {{- include "operator.labels" . | nindent 4 }}
10 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/hive-minio-credentials-secret-class.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: secrets.stackable.tech/v1alpha1
3 | kind: SecretClass
4 | metadata:
5 | name: hive-s3-secret-class
6 | spec:
7 | backend:
8 | k8sSearch:
9 | searchNamespace:
10 | pod: {}
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/80-prepare-test-metastore.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: kubectl cp -n "$NAMESPACE" ./test_metastore.py test-metastore-0:/tmp
6 | - script: kubectl cp -n "$NAMESPACE" ./test_metastore_opa.py test-metastore-0:/tmp
7 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/hive-minio-s3-connection.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: s3.stackable.tech/v1alpha1
3 | kind: S3Connection
4 | metadata:
5 | name: minio
6 | spec:
7 | host: minio
8 | port: 9000
9 | accessStyle: Path
10 | credentials:
11 | secretClass: hive-s3-secret-class
12 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/operations/cluster-operations.adoc:
--------------------------------------------------------------------------------
1 |
2 | = Cluster operation
3 |
4 | Hive installations can be configured with different cluster operations like pausing reconciliation or stopping the cluster. See xref:concepts:operations/cluster_operations.adoc[cluster operations] for more details.
5 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/monitoring.adoc:
--------------------------------------------------------------------------------
1 | = Monitoring
2 | :description: The managed Hive instances are automatically configured to export Prometheus metrics.
3 |
4 | The managed Hive instances are automatically configured to export Prometheus metrics.
5 | See xref:operators:monitoring.adoc[] for more details.
6 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/00-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/10-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/10-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/10-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/10-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/resources/00-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/00-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/00-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/00-install-vector-aggregator-discovery-configmap.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: vector-aggregator-discovery
7 | data:
8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/Chart.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v2
3 | name: hive-operator
4 | version: "0.0.0-dev"
5 | appVersion: "0.0.0-dev"
6 | description: The Stackable Operator for Apache Hive
7 | home: https://github.com/stackabletech/hive-operator
8 | maintainers:
9 | - name: Stackable
10 | url: https://www.stackable.tech
11 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3 | "extends": [
4 | "local>stackabletech/.github:renovate-config"
5 | ],
6 | "ignorePaths": [".github/workflows/build.yaml", ".github/workflows/general_daily_security.yml", ".github/workflows/integration-test.yml", ".github/workflows/pr_pre-commit.yaml"]
7 | }
8 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/01-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | {% if test_scenario['values']['kerberos-backend'] == 'mit' %}
6 | ---
7 | apiVersion: apps/v1
8 | kind: StatefulSet
9 | metadata:
10 | name: krb5-kdc
11 | status:
12 | readyReplicas: 1
13 | replicas: 1
14 | {% endif %}
15 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/01-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | {% if test_scenario['values']['kerberos-backend'] == 'mit' %}
6 | ---
7 | apiVersion: apps/v1
8 | kind: StatefulSet
9 | metadata:
10 | name: krb5-kdc
11 | status:
12 | readyReplicas: 1
13 | replicas: 1
14 | {% endif %}
15 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/operations/index.adoc:
--------------------------------------------------------------------------------
1 | = Operations
2 |
3 | This section of the documentation is intended for the operations teams that maintain a Stackable Data Platform installation.
4 |
5 | Read the xref:concepts:operations/index.adoc[Concepts page on Operations] that contains the necessary details to operate the platform in a production environment.
6 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/62-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # This test checks if the containerdebug-state.json file is present and valid
3 | apiVersion: kuttl.dev/v1beta1
4 | kind: TestAssert
5 | timeout: 60
6 | commands:
7 | - script: kubectl exec -n "$NAMESPACE" --container hive hive-metastore-default-0 -- cat /stackable/log/containerdebug-state.json | jq --exit-status '"valid JSON"'
8 |
--------------------------------------------------------------------------------
/.readme/partials/borrowed/overview_blurb.md.j2:
--------------------------------------------------------------------------------
1 |
2 | It is part of the Stackable Data Platform, a curated selection of the best open source data apps like Apache Kafka, Apache Druid, Trino or Apache Spark, [all](#other-operators) working together seamlessly. Based on Kubernetes, it runs everywhere – [on prem or in the cloud](#supported-platforms).
3 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/50-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | commands:
6 | - script: kubectl -n $NAMESPACE rollout status daemonset opa-server-default --timeout 300s
7 | ---
8 | apiVersion: v1
9 | kind: ConfigMap
10 | metadata:
11 | name: hive-opa-bundle
12 | labels:
13 | opa.stackable.tech/bundle: "hms"
14 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/02-install-postgres.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: >-
6 | helm install hive
7 | --version={{ test_scenario['values']['postgres'] }}
8 | --namespace "$NAMESPACE"
9 | -f helm-bitnami-postgresql-values.yaml
10 | --repo https://charts.bitnami.com/bitnami postgresql
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/40-install-postgres.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: >-
6 | helm install postgresql
7 | --version={{ test_scenario['values']['postgres'] }}
8 | --namespace "$NAMESPACE"
9 | -f helm-bitnami-postgresql-values.yaml
10 | --repo https://charts.bitnami.com/bitnami postgresql
11 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/index.adoc:
--------------------------------------------------------------------------------
1 | = Usage guide
2 | :page-aliases: usage.adoc
3 |
4 | This Section helps you to use and configure the Stackable operator for Apache Hive in various ways.
5 | You should already be familiar with how to set up a basic instance.
6 | Follow the xref:getting_started/index.adoc[] guide to learn how to set up a basic instance with all the required dependencies.
7 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/20-install-postgres.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: >-
6 | helm install postgresql
7 | --version={{ test_scenario['values']['postgres'] }}
8 | --namespace "$NAMESPACE"
9 | -f helm-bitnami-postgresql-values.yaml
10 | --repo https://charts.bitnami.com/bitnami postgresql
11 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | debug/
2 | target/
3 | **/*.rs.bk
4 |
5 | .idea/
6 | *.iws
7 |
8 | Cargo.nix
9 | crate-hashes.json
10 | result
11 | image.tar
12 |
13 | # We do NOT want to ignore .git because we use the `built` crate to gather the current git commit hash at built time
14 | # This means we need the .git directory in our Docker image, it will be thrown away and won't be included in the final image
15 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/40-install-postgres.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: >-
6 | helm install postgresql
7 | --version={{ test_scenario['values']['postgres'] }}
8 | --namespace "$NAMESPACE"
9 | -f helm-bitnami-postgresql-values.yaml
10 | --repo https://charts.bitnami.com/bitnami postgresql
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/40-install-postgres.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: >-
6 | helm install postgresql
7 | --version={{ test_scenario['values']['postgres'] }}
8 | --namespace "$NAMESPACE"
9 | -f helm-bitnami-postgresql-values.yaml
10 | --repo https://charts.bitnami.com/bitnami postgresql
11 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/resources/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/40-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: postgresql
10 | labels:
11 | app.kubernetes.io/name: postgresql
12 | ---
13 | apiVersion: apps/v1
14 | kind: StatefulSet
15 | metadata:
16 | name: postgresql
17 | status:
18 | readyReplicas: 1
19 | replicas: 1
20 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/20-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: postgresql
10 | labels:
11 | app.kubernetes.io/name: postgresql
12 | ---
13 | apiVersion: apps/v1
14 | kind: StatefulSet
15 | metadata:
16 | name: postgresql
17 | status:
18 | readyReplicas: 1
19 | replicas: 1
20 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/40-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: postgresql
10 | labels:
11 | app.kubernetes.io/name: postgresql
12 | ---
13 | apiVersion: apps/v1
14 | kind: StatefulSet
15 | metadata:
16 | name: postgresql
17 | status:
18 | readyReplicas: 1
19 | replicas: 1
20 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/40-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: postgresql
10 | labels:
11 | app.kubernetes.io/name: postgresql
12 | ---
13 | apiVersion: apps/v1
14 | kind: StatefulSet
15 | metadata:
16 | name: postgresql
17 | status:
18 | readyReplicas: 1
19 | replicas: 1
20 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/02-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: hive-postgresql
10 | labels:
11 | app.kubernetes.io/name: postgresql
12 | ---
13 | apiVersion: apps/v1
14 | kind: StatefulSet
15 | metadata:
16 | name: hive-postgresql
17 | status:
18 | readyReplicas: 1
19 | replicas: 1
20 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/00-patch-ns.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if test_scenario['values']['openshift'] == 'true' %}
2 | # see https://github.com/stackabletech/issues/issues/566
3 | ---
4 | apiVersion: kuttl.dev/v1beta1
5 | kind: TestStep
6 | commands:
7 | - script: kubectl patch namespace "$NAMESPACE" -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}'
8 | timeout: 120
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/10-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 900
5 | commands:
6 | - script: kubectl -n "$NAMESPACE" wait --for=condition=available hiveclusters.hive.stackable.tech/test-hive --timeout 601s
7 | ---
8 | apiVersion: apps/v1
9 | kind: StatefulSet
10 | metadata:
11 | name: test-hive-metastore-default
12 | status:
13 | readyReplicas: 1
14 | replicas: 1
15 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/40-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | commands:
6 | - script: kubectl -n "$NAMESPACE" wait --for=condition=available hiveclusters.hive.stackable.tech/test-hive --timeout 601s
7 | ---
8 | apiVersion: apps/v1
9 | kind: StatefulSet
10 | metadata:
11 | name: test-hive-metastore-default
12 | status:
13 | readyReplicas: 1
14 | replicas: 1
15 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/20-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | commands:
6 | - script: kubectl -n "$NAMESPACE" wait --for=condition=stopped hiveclusters.hive.stackable.tech/test-hive --timeout 301s
7 | ---
8 | apiVersion: apps/v1
9 | kind: StatefulSet
10 | metadata:
11 | name: test-hive-metastore-default
12 | status:
13 | availableReplicas: 0
14 | replicas: 0
15 |
--------------------------------------------------------------------------------
/scripts/render_readme.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -euo pipefail
3 |
4 | # Check if jinja2 is there
5 | if ! command -v jinja2 &> /dev/null
6 | then
7 | echo "jinja2 could not be found. Use 'pip install jinja2-cli' to install it."
8 | exit 1
9 | fi
10 |
11 | SCRIPT_DIR=$(dirname "$0")
12 | cd "$SCRIPT_DIR/../.readme"
13 | jinja2 README.md.j2 -o ../README.md
14 | cd ..
15 |
16 | python3 scripts/ensure_one_trailing_newline.py README.md
17 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/reference/index.adoc:
--------------------------------------------------------------------------------
1 | = Reference
2 |
3 | Consult the reference documentation section to find exhaustive information on:
4 |
5 | * Descriptions and default values of all properties in the CRDs used by this operator in the xref:reference/crds.adoc[].
6 | * The properties in the xref:reference/discovery.adoc[].
7 | * The xref:reference/commandline-parameters.adoc[] and xref:reference/environment-variables.adoc[] accepted by the operator.
8 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/30-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 300
5 | commands:
6 | - script: kubectl -n "$NAMESPACE" wait --for=condition=reconciliationPaused hiveclusters.hive.stackable.tech/test-hive --timeout 301s
7 | ---
8 | apiVersion: apps/v1
9 | kind: StatefulSet
10 | metadata:
11 | name: test-hive-metastore-default
12 | status:
13 | availableReplicas: 0
14 | replicas: 0
15 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/release.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | releases:
3 | hive-getting-started:
4 | releaseDate: 2023-03-14
5 | description: Demo / Test release for Hive getting started guide
6 | products:
7 | commons:
8 | operatorVersion: 0.0.0-dev
9 | hive:
10 | operatorVersion: 0.0.0-dev
11 | listener:
12 | operatorVersion: 0.0.0-dev
13 | secret:
14 | operatorVersion: 0.0.0-dev
15 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/03-errors.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: StatefulSet
4 | metadata:
5 | name: test-hive-metastore-remove
6 | ---
7 | apiVersion: v1
8 | kind: Pod
9 | metadata:
10 | name: test-hive-metastore-remove-0
11 | ---
12 | apiVersion: v1
13 | kind: ConfigMap
14 | metadata:
15 | name: test-hive-metastore-remove
16 | ---
17 | apiVersion: v1
18 | kind: Service
19 | metadata:
20 | name: test-hive-metastore-remove
21 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/04-errors.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: StatefulSet
4 | metadata:
5 | name: test-hive-metastore-default
6 | ---
7 | apiVersion: v1
8 | kind: Pod
9 | metadata:
10 | name: test-hive-metastore-default-0
11 | ---
12 | apiVersion: v1
13 | kind: ConfigMap
14 | metadata:
15 | name: test-hive-metastore-default
16 | ---
17 | apiVersion: v1
18 | kind: Service
19 | metadata:
20 | name: test-hive-metastore-default
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | blank_issues_enabled: true
3 | contact_links:
4 | - name: 🙋🏾 Question
5 | about: Use this to ask a question about this project
6 | url: https://github.com/orgs/stackabletech/discussions/new?category=q-a
7 | - name: 🚀 Feature Requests and other things
8 | about: Open an issue with your feature request or any other issue not covered elsewhere
9 | url: https://github.com/stackabletech/hive-operator/issues/new
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/04-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 900
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: test-hive-metastore-automatic-log-config
10 | status:
11 | readyReplicas: 1
12 | replicas: 1
13 | ---
14 | apiVersion: apps/v1
15 | kind: StatefulSet
16 | metadata:
17 | name: test-hive-metastore-custom-log-config
18 | status:
19 | readyReplicas: 1
20 | replicas: 1
21 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/04-change-rolegroup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: test-hive
6 | spec:
7 | clusterConfig:
8 | database:
9 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
10 | credentialsSecret: hive-credentials
11 | dbType: derby
12 | metastore:
13 | roleGroups:
14 | default: null
15 | newrolegroup:
16 | replicas: 1
17 |
--------------------------------------------------------------------------------
/.hadolint.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ignored:
3 | # Warning: Use the -y switch to avoid manual input dnf install -y
4 | # https://github.com/hadolint/hadolint/wiki/DL3038
5 | # Reason: We set `assumeyes=True` in dnf.conf in our base image
6 | - DL3038
7 |
8 | # Warning: Specify version with dnf install -y -
9 | # https://github.com/hadolint/hadolint/wiki/DL3041
10 | # Reason: It's good advice, but we're not set up to pin versions just yet
11 | - DL3041
12 |
--------------------------------------------------------------------------------
/deploy/DO_NOT_EDIT.md:
--------------------------------------------------------------------------------
1 | # DO NOT EDIT
2 |
3 | These Helm charts and manifests are automatically generated.
4 | Please do not edit anything except for files explicitly mentioned below in this
5 | directory manually.
6 |
7 | The following files are ok to edit:
8 |
9 | - helm/hive-operator/templates/roles.yaml
10 | - helm/hive-operator/values.yaml
11 |
12 | The details are in-motion but check this repository for a few details:
13 |
14 |
--------------------------------------------------------------------------------
/rustfmt.toml:
--------------------------------------------------------------------------------
1 | # This file includes unstable features, so you need to run "cargo +nightly fmt" to format your code.
2 | # It's also ok to use the stable toolchain by simple running "cargo fmt", but using the nigthly formatter is prefered.
3 |
4 | # https://doc.rust-lang.org/nightly/edition-guide/rust-2024/rustfmt-style-edition.html
5 | style_edition = "2024"
6 | imports_granularity = "Crate"
7 | group_imports = "StdExternalCrate"
8 | reorder_impl_items = true
9 | use_field_init_shorthand = true
10 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/operations/pod-disruptions.adoc:
--------------------------------------------------------------------------------
1 | = Allowed Pod disruptions
2 |
3 | You can configure the permitted Pod disruptions for Hive nodes as described in xref:concepts:operations/pod_disruptions.adoc[].
4 |
5 | Unless you configure something else or disable the default PodDisruptionBudgets (PDBs), the operator writes the following PDBs:
6 |
7 | == Metastores
8 | Allow only a single metastore to be offline at any given time, regardless of the number of replicas or `roleGroups`.
9 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/01-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | metadata:
5 | name: install-hive
6 | timeout: 900
7 | ---
8 | apiVersion: apps/v1
9 | kind: StatefulSet
10 | metadata:
11 | name: test-hive-metastore-default
12 | status:
13 | readyReplicas: 1
14 | replicas: 1
15 | ---
16 | apiVersion: apps/v1
17 | kind: StatefulSet
18 | metadata:
19 | name: test-hive-metastore-remove
20 | status:
21 | readyReplicas: 1
22 | replicas: 1
23 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/31-upgrade-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive-new'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive-new'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive-new'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive-new'] }}"
13 | {% endif %}
14 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/release.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | releases:
3 | hive-getting-started:
4 | releaseDate: 2023-03-14
5 | description: Demo / Test release for Hive getting started guide
6 | products:
7 | commons:
8 | operatorVersion: {{ versions.commons }}
9 | hive:
10 | operatorVersion: {{ versions.hive }}
11 | listener:
12 | operatorVersion: {{ versions.listener }}
13 | secret:
14 | operatorVersion: {{ versions.secret }}
15 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/stackablectl-hive-postgres-minio-stack.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | stacks:
3 | hive-minio-postgres:
4 | stackableRelease: hive-getting-started
5 | description: Stack for Hive getting started guide
6 | stackableOperators:
7 | - commons
8 | - listener
9 | - secret
10 | - hive
11 | labels:
12 | - minio
13 | - postgresql
14 | manifests:
15 | - helmChart: minio-stack.yaml
16 | - helmChart: postgres-stack.yaml
17 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/01-install-hbase-vector-aggregator.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: >-
6 | helm install hive-vector-aggregator vector
7 | --namespace "$NAMESPACE"
8 | --version 0.45.0
9 | --repo https://helm.vector.dev
10 | --values hive-vector-aggregator-values.yaml
11 | ---
12 | apiVersion: v1
13 | kind: ConfigMap
14 | metadata:
15 | name: hive-vector-aggregator-discovery
16 | data:
17 | ADDRESS: hive-vector-aggregator:6123
18 |
--------------------------------------------------------------------------------
/.yamllint.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | extends: default
3 |
4 | ignore: |
5 | deploy/helm/**/templates
6 |
7 | rules:
8 | line-length: disable
9 | truthy:
10 | check-keys: false
11 | comments:
12 | min-spaces-from-content: 1 # Needed due to https://github.com/adrienverge/yamllint/issues/443
13 | indentation:
14 | indent-sequences: consistent
15 | comments-indentation: disable # This is generally useless and interferes with commented example values
16 | braces:
17 | max-spaces-inside: 1
18 | max-spaces-inside-empty: 0
19 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0.2.0",
3 | "configurations": [
4 | {
5 | "type": "lldb",
6 | "request": "launch",
7 | "name": "Debug operator binary",
8 | "cargo": {
9 | "args": ["build"],
10 | "filter": {
11 | "name": "stackable-{[ operator.name }]",
12 | "kind": "bin"
13 | }
14 | },
15 | "args": ["run"],
16 | "cwd": "${workspaceFolder}"
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/resources/20-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | commands:
6 | - script: kubectl -n "$NAMESPACE" get sts hive-metastore-resources-from-role -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "hive") | .env[] | select (.name == "HADOOP_HEAPSIZE" and .value == "3276")'
7 | - script: kubectl -n "$NAMESPACE" get sts hive-metastore-resources-from-role-group -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "hive") | .env[] | select (.name == "HADOOP_HEAPSIZE" and .value == "2457")'
8 |
--------------------------------------------------------------------------------
/tests/interu.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | runners:
3 | amd64:
4 | platform: aks-1.32
5 | ttl: 6h
6 | node-groups:
7 | - name: default
8 | arch: amd64
9 | size: medium
10 | disk-gb: 100
11 | nodes: 3
12 |
13 | profiles:
14 | # TODO (@Techassi): This will be enabled later
15 | # schedule:
16 | # strategy: use-runner
17 | # runner: amd64
18 | # options:
19 | # beku-parallelism: 2
20 | smoke-latest:
21 | strategy: use-runner
22 | runner: amd64
23 | options:
24 | beku-parallelism: 2
25 | beku-test-suite: smoke-latest
26 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/05-install-hive-test-runner.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: StatefulSet
4 | metadata:
5 | name: hive-test-runner
6 | labels:
7 | app: hive-test-runner
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: hive-test-runner
13 | template:
14 | metadata:
15 | labels:
16 | app: hive-test-runner
17 | spec:
18 | containers:
19 | - name: hive-test-runner
20 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev
21 | stdin: true
22 | tty: true
23 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/80-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | commands:
5 | - script: kubectl exec -n "$NAMESPACE" test-metastore-0 -- python /tmp/test_metastore.py -d test_metastore -m hive-metastore.$NAMESPACE.svc.cluster.local
6 | - script: kubectl exec -n "$NAMESPACE" test-metastore-0 -- python /tmp/test_metastore.py -d test_metastore -m hive-metastore-default-headless.$NAMESPACE.svc.cluster.local
7 | - script: kubectl exec -n "$NAMESPACE" test-metastore-0 -- python /tmp/test_metastore_opa.py -d db_not_allowed -m hive-metastore.$NAMESPACE.svc.cluster.local
8 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/30-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hdfs-namenode-default
10 | status:
11 | readyReplicas: 2
12 | replicas: 2
13 | ---
14 | apiVersion: apps/v1
15 | kind: StatefulSet
16 | metadata:
17 | name: hdfs-journalnode-default
18 | status:
19 | readyReplicas: 1
20 | replicas: 1
21 | ---
22 | apiVersion: apps/v1
23 | kind: StatefulSet
24 | metadata:
25 | name: hdfs-datanode-default
26 | status:
27 | readyReplicas: 1
28 | replicas: 1
29 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/04-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | metadata:
5 | name: change-datanode-rolegroup
6 | timeout: 600
7 | ---
8 | apiVersion: apps/v1
9 | kind: StatefulSet
10 | metadata:
11 | name: test-hive-metastore-newrolegroup
12 | ---
13 | apiVersion: v1
14 | kind: Pod
15 | metadata:
16 | name: test-hive-metastore-newrolegroup-0
17 | ---
18 | apiVersion: v1
19 | kind: ConfigMap
20 | metadata:
21 | name: test-hive-metastore-newrolegroup
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: test-hive-metastore
27 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/30-setup-minio.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | # NOTE: nothing is actually read/written to Minio/S3 in this test case.
3 | # It is still required to have a Minio instance because, for tables with an S3 location, HMS performs a check
4 | # to ensure that the configured bucket/path actually exist.
5 | apiVersion: kuttl.dev/v1beta1
6 | kind: TestStep
7 | commands:
8 | - script: >-
9 | helm install minio
10 | --namespace "$NAMESPACE"
11 | --version 12.6.4
12 | -f helm-bitnami-minio-values.yaml
13 | --repo https://charts.bitnami.com/bitnami minio
14 | timeout: 240
15 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/30-setup-minio.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | # NOTE: nothing is actually read/written to Minio/S3 in this test case.
3 | # It is still required to have a Minio instance because, for tables with an S3 location, HMS performs a check
4 | # to ensure that the configured bucket/path actually exist.
5 | apiVersion: kuttl.dev/v1beta1
6 | kind: TestStep
7 | commands:
8 | - script: >-
9 | helm install minio
10 | --namespace "$NAMESPACE"
11 | --version 12.6.4
12 | -f helm-bitnami-minio-values.yaml
13 | --repo https://charts.bitnami.com/bitnami minio
14 | timeout: 240
15 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/minio-stack.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | releaseName: minio
3 | name: minio
4 | repo:
5 | name: minio
6 | url: https://charts.min.io/
7 | version: 5.4.0
8 | options:
9 | rootUser: root
10 | rootPassword: rootroot
11 | mode: standalone
12 | users:
13 | - accessKey: hive
14 | secretKey: hivehive
15 | policy: readwrite
16 | buckets:
17 | - name: hive
18 | policy: public
19 | resources:
20 | requests:
21 | memory: 2Gi
22 | service:
23 | type: NodePort
24 | nodePort: null
25 | consoleService:
26 | type: NodePort
27 | nodePort: null
28 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/hive-test-helper.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: StatefulSet
4 | metadata:
5 | name: hive-test-helper
6 | labels:
7 | app: hive-test-helper
8 | spec:
9 | serviceName: hive-test-helper
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | app: hive-test-helper
14 | template:
15 | metadata:
16 | labels:
17 | app: hive-test-helper
18 | spec:
19 | containers:
20 | - name: hive-test-helper
21 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev
22 | stdin: true
23 | tty: true
24 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/operations/graceful-shutdown.adoc:
--------------------------------------------------------------------------------
1 | = Graceful shutdown
2 |
3 | You can configure the graceful shutdown as described in xref:concepts:operations/graceful_shutdown.adoc[].
4 |
5 | == Hive metastores
6 |
7 | As a default, Hive metastores have `5 minutes` to shut down gracefully.
8 |
9 | The Hive metastore process receives a `SIGTERM` signal when Kubernetes wants to terminate the Pod.
10 | After the graceful shutdown timeout runs out, and the process is still running, Kubernetes issues a `SIGKILL` signal.
11 |
12 | However, there is no acknowledge message in the log indicating a graceful shutdown.
13 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/minio-stack.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | releaseName: minio
3 | name: minio
4 | repo:
5 | name: minio
6 | url: https://charts.min.io/
7 | version: {{ versions.minio }}
8 | options:
9 | rootUser: root
10 | rootPassword: rootroot
11 | mode: standalone
12 | users:
13 | - accessKey: hive
14 | secretKey: hivehive
15 | policy: readwrite
16 | buckets:
17 | - name: hive
18 | policy: public
19 | resources:
20 | requests:
21 | memory: 2Gi
22 | service:
23 | type: NodePort
24 | nodePort: null
25 | consoleService:
26 | type: NodePort
27 | nodePort: null
28 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/20-install-zk.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: zookeeper.stackable.tech/v1alpha1
3 | kind: ZookeeperCluster
4 | metadata:
5 | name: hdfs-zk
6 | spec:
7 | image:
8 | productVersion: "{{ test_scenario['values']['zookeeper-latest'] }}"
9 | pullPolicy: IfNotPresent
10 | clusterConfig:
11 | servers:
12 | config:
13 | logging:
14 | enableVectorAgent: false
15 | roleGroups:
16 | default:
17 | replicas: 1
18 | ---
19 | apiVersion: zookeeper.stackable.tech/v1alpha1
20 | kind: ZookeeperZnode
21 | metadata:
22 | name: hdfs-znode
23 | spec:
24 | clusterRef:
25 | name: hdfs-zk
26 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/.helmignore:
--------------------------------------------------------------------------------
1 | # =============
2 | # This file is automatically generated from the templates in stackabletech/operator-templating
3 | # DON'T MANUALLY EDIT THIS FILE
4 | # =============
5 |
6 | # Patterns to ignore when building packages.
7 | # This supports shell glob matching, relative path matching, and
8 | # negation (prefixed with !). Only one pattern per line.
9 | .DS_Store
10 | # Common VCS dirs
11 | .git/
12 | .gitignore
13 | .bzr/
14 | .bzrignore
15 | .hg/
16 | .hgignore
17 | .svn/
18 | # Common backup files
19 | *.swp
20 | *.bak
21 | *.tmp
22 | *.orig
23 | *~
24 | # Various IDEs
25 | .project
26 | .idea/
27 | *.tmproj
28 | .vscode/
29 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/30-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 900
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hive-metastore-default
10 | labels:
11 | {% if test_scenario['values']['hive-old'].find(",") > 0 %}
12 | # Yes, this *might* not work with custom images, I'm sorry!
13 | app.kubernetes.io/version: "{{ test_scenario['values']['hive-old'].split(',')[0] }}-stackable0.0.0-dev"
14 | {% else %}
15 | app.kubernetes.io/version: "{{ test_scenario['values']['hive-old'] }}-stackable0.0.0-dev"
16 | {% endif %}
17 | status:
18 | readyReplicas: 1
19 | replicas: 1
20 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/31-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hive-metastore-default
10 | labels:
11 | {% if test_scenario['values']['hive-old'].find(",") > 0 %}
12 | # Yes, this *might* not work with custom images, I'm sorry!
13 | app.kubernetes.io/version: "{{ test_scenario['values']['hive-new'].split(',')[0] }}-stackable0.0.0-dev"
14 | {% else %}
15 | app.kubernetes.io/version: "{{ test_scenario['values']['hive-new'] }}-stackable0.0.0-dev"
16 | {% endif %}
17 | status:
18 | readyReplicas: 1
19 | replicas: 1
20 |
--------------------------------------------------------------------------------
/tests/release.yaml:
--------------------------------------------------------------------------------
1 | # Contains all operators required to run the test suite.
2 | ---
3 | releases:
4 | # Do not change the name of the release as it's referenced from run-tests
5 | tests:
6 | releaseDate: 1970-01-01
7 | description: Integration test
8 | products:
9 | commons:
10 | operatorVersion: 0.0.0-dev
11 | secret:
12 | operatorVersion: 0.0.0-dev
13 | listener:
14 | operatorVersion: 0.0.0-dev
15 | zookeeper:
16 | operatorVersion: 0.0.0-dev
17 | hdfs:
18 | operatorVersion: 0.0.0-dev
19 | hive:
20 | operatorVersion: 0.0.0-dev
21 | opa:
22 | operatorVersion: 0.0.0-dev
23 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/templates/service.yaml:
--------------------------------------------------------------------------------
1 |
2 | ---
3 | apiVersion: v1
4 | kind: Service
5 | metadata:
6 | # Note(@sbernauer): We could also call the Service something like
7 | # "product-operator-conversion-webhook". However, in the future we will have more webhooks, and
8 | # it seems like an overkill to have a dedicated Service per webhook.
9 | name: {{ include "operator.fullname" . }}
10 | labels:
11 | {{- include "operator.labels" . | nindent 4 }}
12 | spec:
13 | selector:
14 | {{- include "operator.selectorLabels" . | nindent 6 }}
15 | ports:
16 | - name: conversion-webhook
17 | protocol: TCP
18 | port: 8443
19 | targetPort: 8443
20 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/hive-postgres-s3.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: hive-postgres-s3
6 | spec:
7 | image:
8 | productVersion: 4.1.0
9 | clusterConfig:
10 | database:
11 | connString: jdbc:postgresql://postgresql:5432/hive
12 | credentialsSecret: hive-credentials
13 | dbType: postgres
14 | s3:
15 | reference: minio
16 | metastore:
17 | roleGroups:
18 | default:
19 | replicas: 1
20 | ---
21 | apiVersion: v1
22 | kind: Secret
23 | metadata:
24 | name: hive-credentials
25 | type: Opaque
26 | stringData:
27 | username: hive
28 | password: hive
29 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/hive-postgres-s3.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: hive-postgres-s3
6 | spec:
7 | image:
8 | productVersion: 4.1.0
9 | clusterConfig:
10 | database:
11 | connString: jdbc:postgresql://postgresql:5432/hive
12 | credentialsSecret: hive-credentials
13 | dbType: postgres
14 | s3:
15 | reference: minio
16 | metastore:
17 | roleGroups:
18 | default:
19 | replicas: 1
20 | ---
21 | apiVersion: v1
22 | kind: Secret
23 | metadata:
24 | name: hive-credentials
25 | type: Opaque
26 | stringData:
27 | username: hive
28 | password: hive
29 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/logging.adoc:
--------------------------------------------------------------------------------
1 | = Log aggregation
2 | :description: The logs can be forwarded to a Vector log aggregator by providing a discovery ConfigMap for the aggregator and by enabling the log agent.
3 |
4 | The logs can be forwarded to a Vector log aggregator by providing a discovery ConfigMap for the aggregator and by enabling the log agent:
5 |
6 | [source,yaml]
7 | ----
8 | spec:
9 | clusterConfig:
10 | vectorAggregatorConfigMapName: vector-aggregator-discovery
11 | metastore:
12 | config:
13 | logging:
14 | enableVectorAgent: true
15 | ----
16 |
17 | Further information on how to configure logging, can be found in xref:concepts:logging.adoc[].
18 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/helm-bitnami-postgresql-values.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | volumePermissions:
3 | enabled: false
4 | securityContext:
5 | runAsUser: auto
6 |
7 | primary:
8 | podSecurityContext:
9 | {% if test_scenario['values']['openshift'] == 'true' %}
10 | enabled: false
11 | {% else %}
12 | enabled: true
13 | {% endif %}
14 | containerSecurityContext:
15 | enabled: false
16 | resources:
17 | requests:
18 | memory: "128Mi"
19 | cpu: "512m"
20 | limits:
21 | memory: "128Mi"
22 | cpu: "1"
23 |
24 | shmVolume:
25 | chmod:
26 | enabled: false
27 |
28 | auth:
29 | username: hive
30 | password: hive
31 | database: hive
32 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/normal-issue.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Normal issue
3 | about: This is just a normal empty issue with a simple checklist
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Issue checklist
11 |
12 | This is a simple checklist of things to bear in mind when creating a new issue.
13 |
14 | - [ ] Describe the use-case, as far is possible. For instance, using the pattern "As a XXXX, I would like XXXX to be able to do XXXX" helps to identify the feature as well as the problem it is intended to address.
15 | - [ ] Indicate an approximate level of importance and urgency.
16 | - [ ] Indicate if there is a known work-around until such time as the issue has been implemented.
17 |
--------------------------------------------------------------------------------
/.github/workflows/general_daily_security.yml:
--------------------------------------------------------------------------------
1 | # =============
2 | # This file is automatically generated from the templates in stackabletech/operator-templating
3 | # DON'T MANUALLY EDIT THIS FILE
4 | # =============
5 | ---
6 | name: Daily Security Audit
7 |
8 | on:
9 | schedule:
10 | - cron: '15 4 * * *'
11 | workflow_dispatch:
12 |
13 | permissions: {}
14 |
15 | jobs:
16 | audit:
17 | runs-on: ubuntu-latest
18 | steps:
19 | - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
20 | with:
21 | persist-credentials: false
22 | - uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 # v2.0.0
23 | with:
24 | token: ${{ secrets.GITHUB_TOKEN }}
25 |
--------------------------------------------------------------------------------
/.readme/README.md.j2:
--------------------------------------------------------------------------------
1 |
2 | {%- set title="Stackable Operator for Apache Hive" -%}
3 | {%- set operator_name="hive" -%}
4 | {%- set operator_docs_slug="hive" -%}
5 | {%- set related_reading_links=[] -%}
6 |
7 | {% filter trim %}
8 | {%- include "partials/borrowed/header.md.j2" -%}
9 | {% endfilter %}
10 |
11 | {% filter trim %}
12 | {%- include "partials/borrowed/links.md.j2" -%}
13 | {% endfilter %}
14 |
15 | {% filter trim %}
16 | {%- include "partials/main.md.j2" -%}
17 | {% endfilter %}
18 |
19 | {% filter trim %}
20 | {%- include "partials/borrowed/footer.md.j2" -%}
21 | {% endfilter %}
22 |
23 | {% filter trim %}
24 | {%- include "partials/borrowed/related_reading.md.j2" -%}
25 | {% endfilter %}
26 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/00-rbac.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Role
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | metadata:
5 | name: test-role
6 | rules:
7 | {% if test_scenario['values']['openshift'] == "true" %}
8 | - apiGroups: ["security.openshift.io"]
9 | resources: ["securitycontextconstraints"]
10 | resourceNames: ["privileged"]
11 | verbs: ["use"]
12 | {% endif %}
13 | ---
14 | apiVersion: v1
15 | kind: ServiceAccount
16 | metadata:
17 | name: test-sa
18 | ---
19 | kind: RoleBinding
20 | apiVersion: rbac.authorization.k8s.io/v1
21 | metadata:
22 | name: test-rb
23 | subjects:
24 | - kind: ServiceAccount
25 | name: test-sa
26 | roleRef:
27 | kind: Role
28 | name: test-role
29 | apiGroup: rbac.authorization.k8s.io
30 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/00-rbac.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Role
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | metadata:
5 | name: test-role
6 | rules:
7 | {% if test_scenario['values']['openshift'] == "true" %}
8 | - apiGroups: ["security.openshift.io"]
9 | resources: ["securitycontextconstraints"]
10 | resourceNames: ["privileged"]
11 | verbs: ["use"]
12 | {% endif %}
13 | ---
14 | apiVersion: v1
15 | kind: ServiceAccount
16 | metadata:
17 | name: test-sa
18 | ---
19 | kind: RoleBinding
20 | apiVersion: rbac.authorization.k8s.io/v1
21 | metadata:
22 | name: test-rb
23 | subjects:
24 | - kind: ServiceAccount
25 | name: test-sa
26 | roleRef:
27 | kind: Role
28 | name: test-role
29 | apiGroup: rbac.authorization.k8s.io
30 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/postgres-stack.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | releaseName: postgresql
3 | name: postgresql
4 | repo:
5 | name: bitnami
6 | url: https://charts.bitnami.com/bitnami/
7 | version: 16.5.0
8 | options:
9 | global:
10 | security:
11 | allowInsecureImages: true
12 | image:
13 | repository: bitnamilegacy/postgresql
14 | volumePermissions:
15 | enabled: false
16 | image:
17 | repository: bitnamilegacy/os-shell
18 | securityContext:
19 | runAsUser: auto
20 | metrics:
21 | image:
22 | repository: bitnamilegacy/postgres-exporter
23 | primary:
24 | extendedConfiguration: |
25 | password_encryption=md5
26 | auth:
27 | username: hive
28 | password: hive
29 | database: hive
30 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/templates/_maintenance.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Create a list of maintenance related env vars.
3 | */}}
4 | {{- define "maintenance.envVars" -}}
5 | {{- with .Values.maintenance }}
6 | {{- if not .endOfSupportCheck.enabled }}
7 | - name: EOS_DISABLED
8 | value: "true"
9 | {{- end }}
10 | {{- if and .endOfSupportCheck.enabled .endOfSupportCheck.mode }}
11 | - name: EOS_CHECK_MODE
12 | value: {{ .endOfSupportCheck.mode }}
13 | {{ end }}
14 | {{- if and .endOfSupportCheck.enabled .endOfSupportCheck.interval }}
15 | - name: EOS_INTERVAL
16 | value: {{ .endOfSupportCheck.interval }}
17 | {{ end }}
18 | {{- if not .customResourceDefinitions.maintain }}
19 | - name: DISABLE_CRD_MAINTENANCE
20 | value: "true"
21 | {{- end }}
22 | {{- end }}
23 | {{- end }}
24 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/70-install-test-metastore.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: StatefulSet
4 | metadata:
5 | name: test-metastore
6 | labels:
7 | app: test-metastore
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: test-metastore
13 | template:
14 | metadata:
15 | labels:
16 | app: test-metastore
17 | spec:
18 | containers:
19 | - name: test-metastore
20 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev
21 | stdin: true
22 | tty: true
23 | resources:
24 | requests:
25 | memory: "128Mi"
26 | cpu: "512m"
27 | limits:
28 | memory: "128Mi"
29 | cpu: "1"
30 |
--------------------------------------------------------------------------------
/docs/modules/hive/examples/getting_started/postgres-stack.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | releaseName: postgresql
3 | name: postgresql
4 | repo:
5 | name: bitnami
6 | url: https://charts.bitnami.com/bitnami/
7 | version: {{ versions.postgresql }}
8 | options:
9 | global:
10 | security:
11 | allowInsecureImages: true
12 | image:
13 | repository: bitnamilegacy/postgresql
14 | volumePermissions:
15 | enabled: false
16 | image:
17 | repository: bitnamilegacy/os-shell
18 | securityContext:
19 | runAsUser: auto
20 | metrics:
21 | image:
22 | repository: bitnamilegacy/postgres-exporter
23 | primary:
24 | extendedConfiguration: |
25 | password_encryption=md5
26 | auth:
27 | username: hive
28 | password: hive
29 | database: hive
30 |
--------------------------------------------------------------------------------
/nix/README.md:
--------------------------------------------------------------------------------
1 |
5 |
6 | # Updating nix dependencies
7 |
8 | ## Run the following for an operator
9 |
10 | > [!NOTE]
11 | > We track the `master` branch of crate2nix as that is relatively up to date, but the releases are infrequent.
12 |
13 | ```shell
14 | niv update crate2nix
15 | niv update nixpkgs
16 | niv update beku.py -b X.Y.Z # Using the release tag
17 | ```
18 |
19 | ### Test
20 |
21 | - Run make `regenerate-nix` to ensure crate2nix works
22 | - Run a smoke test to ensure beku.py works.
23 | - Run `make run-dev` to ensure nixpkgs are fine.
24 |
25 | ## Update operator-templating
26 |
27 | Do the same as above, but from `template/`
28 |
--------------------------------------------------------------------------------
/.readme/partials/borrowed/documentation.md.j2:
--------------------------------------------------------------------------------
1 |
2 | ## Documentation
3 |
4 | The stable documentation for this operator can be found in our [Stackable Data Platform documentation](https://docs.stackable.tech/home/stable/{{operator_docs_slug}}).
5 | If you are interested in the most recent state of this repository, check out the [nightly docs](https://docs.stackable.tech/home/nightly/{{operator_docs_slug}}) instead.
6 |
7 | The documentation for all Stackable products can be found at [docs.stackable.tech](https://docs.stackable.tech).
8 |
9 | If you have a question about the Stackable Data Platform, contact us via our [homepage](https://stackable.tech/) or ask a public question in our [Discussions forum](https://github.com/orgs/stackabletech/discussions).
10 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/03-remove-role-group.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: test-hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive-latest'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive-latest'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive-latest'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive-latest'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
18 | credentialsSecret: hive-credentials
19 | dbType: derby
20 | metastore:
21 | roleGroups:
22 | remove: null
23 |
--------------------------------------------------------------------------------
/.readme/partials/borrowed/links.md.j2:
--------------------------------------------------------------------------------
1 |
2 | [](https://GitHub.com/stackabletech/{{operator_name}}-operator/graphs/commit-activity)
3 | [](https://docs.stackable.tech/home/stable/contributor/index.html)
4 | [](./LICENSE)
5 |
6 | [Documentation](https://docs.stackable.tech/home/stable/{{operator_docs_slug}}) {% if quickstart_link %}| [Quickstart]({{quickstart_link}}) {% endif %}| [Stackable Data Platform](https://stackable.tech/) | [Platform Docs](https://docs.stackable.tech/) | [Discussions](https://github.com/orgs/stackabletech/discussions) | [Discord](https://discord.gg/7kZ3BNnCAF)
7 |
--------------------------------------------------------------------------------
/scripts/generate-manifests.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # This script reads a Helm chart from deploy/helm/hive-operator and
3 | # generates manifest files into deploy/manifestss
4 | set -e
5 |
6 | tmp=$(mktemp -d ./manifests-XXXXX)
7 |
8 | helm template --output-dir "$tmp" \
9 | --include-crds \
10 | --name-template hive-operator \
11 | deploy/helm/hive-operator
12 |
13 | for file in "$tmp"/hive-operator/*/*; do
14 | yq eval -i 'del(.. | select(has("app.kubernetes.io/managed-by")) | ."app.kubernetes.io/managed-by")' /dev/stdin < "$file"
15 | yq eval -i 'del(.. | select(has("helm.sh/chart")) | ."helm.sh/chart")' /dev/stdin < "$file"
16 | sed -i '/# Source: .*/d' "$file"
17 | done
18 |
19 | cp -r "$tmp"/hive-operator/*/* deploy/manifests/
20 |
21 | rm -rf "$tmp"
22 |
--------------------------------------------------------------------------------
/.readme/partials/main.md.j2:
--------------------------------------------------------------------------------
1 |
2 | This is a Kubernetes operator to manage [Apache Hive](https://hive.apache.org/).
3 |
4 | {% filter trim %}
5 | {%- include "partials/borrowed/overview_blurb.md.j2" -%}
6 | {% endfilter %}
7 |
8 | ## Installation
9 |
10 | You can install the operator using [stackablectl or helm](https://docs.stackable.tech/home/stable/{{operator_name}}/getting_started/installation).
11 |
12 | Read on to get started with it, or see it in action in one of our [demos](https://stackable.tech/en/demos/).
13 |
14 | ## Getting Started
15 |
16 | You can follow this [tutorial](https://docs.stackable.tech/home/stable/{{operator_name}}/getting_started/first_steps).
17 |
18 | {% filter trim %}
19 | {%- include "partials/borrowed/documentation.md.j2" -%}
20 | {% endfilter %}
21 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/operations/pod-placement.adoc:
--------------------------------------------------------------------------------
1 | = Pod placement
2 |
3 | You can configure Pod placement for Hive metastores as described in xref:concepts:operations/pod_placement.adoc[].
4 |
5 | By default, the operator configures the following Pod placement constraints:
6 |
7 | [source,yaml]
8 | ----
9 | affinity:
10 | podAntiAffinity:
11 | preferredDuringSchedulingIgnoredDuringExecution:
12 | - podAffinityTerm:
13 | labelSelector:
14 | matchLabels:
15 | app.kubernetes.io/name: hive
16 | app.kubernetes.io/instance: cluster-name
17 | app.kubernetes.io/component: metastore
18 | topologyKey: kubernetes.io/hostname
19 | weight: 70
20 | ----
21 |
22 | In the example above `cluster-name` is the name of the HiveCluster custom resource that owns this Pod.
23 |
--------------------------------------------------------------------------------
/examples/simple-hive-cluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: simple-hive-derby
6 | spec:
7 | image:
8 | productVersion: 4.1.0
9 | stackableVersion: 0.0.0-dev
10 | clusterConfig:
11 | database:
12 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
13 | credentialsSecret: hive-credentials
14 | dbType: derby
15 | metastore:
16 | roleGroups:
17 | default:
18 | replicas: 1
19 | config:
20 | resources:
21 | cpu:
22 | min: 300m
23 | max: "2"
24 | memory:
25 | limit: 5Gi
26 | ---
27 | apiVersion: v1
28 | kind: Secret
29 | metadata:
30 | name: hive-credentials
31 | type: Opaque
32 | stringData:
33 | username: APP
34 | password: mine
35 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/listenerclass.adoc:
--------------------------------------------------------------------------------
1 | = Service exposition with ListenerClasses
2 | :description: Configure the Hive service exposure with listener classes: cluster-internal, external-unstable or external-stable
3 |
4 | Apache Hive offers an API.
5 | The operator deploys a xref:listener-operator:listener.adoc[Listener] for the Metastore pods.
6 |
7 | The listener defaults to only being accessible from within the Kubernetes cluster, but this can be changed by setting `.spec.metastore.roleConfig.listenerClass`:
8 |
9 | [source,yaml]
10 | ----
11 | spec:
12 | metastore:
13 | roleConfig:
14 | listenerClass: "cluster-internal" <1>
15 | roleGroups:
16 | default:
17 | replicas: 1
18 | ----
19 | <1> Specify one of `external-stable`, `external-unstable`, `cluster-internal` (the default setting is `cluster-internal`).
20 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/reference/commandline-parameters.adoc:
--------------------------------------------------------------------------------
1 | = Command line parameters
2 |
3 | This operator accepts the following command line parameters:
4 |
5 | == product-config
6 |
7 | *Default value*: `/etc/stackable/hive-operator/config-spec/properties.yaml`
8 |
9 | *Required*: false
10 |
11 | *Multiple values:* false
12 |
13 | [source]
14 | ----
15 | stackable-hive-operator run --product-config /foo/bar/properties.yaml
16 | ----
17 |
18 | == watch-namespace
19 |
20 | *Default value*: All namespaces
21 |
22 | *Required*: false
23 |
24 | *Multiple values:* false
25 |
26 | If provided, the operator **only** watches for resources in the provided namespace.
27 | If not provided, it watches in **all** namespaces.
28 |
29 | .Example: Only watch the `test` namespace
30 | [source,bash]
31 | ----
32 | stackable-hive-operator run --watch-namespace test
33 | ----
34 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/01-normal-issue.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Normal issue
3 | about: This is just a normal empty issue with a simple checklist
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Issue checklist
11 |
12 | This is a simple checklist of things to bear in mind when creating a new issue.
13 |
14 | - [ ] **Describe the use-case**: As far as possible, use the pattern "As a [type of user], I would like [feature/functionality] to be able to do [specific action]." This helps identify the feature and the problem it addresses.
15 | - [ ] **Indicate importance and urgency**: Use a scale (e.g., low, medium, high) to indicate the level of importance and urgency.
16 | - [ ] **Work-around**: If there is a known work-around, describe it briefly.
17 | - [ ] **Environment**: Describe the environment where the issue occurs (e.g., SDP version, K8S version, etc.).
18 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/helm-bitnami-postgresql-values.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | global:
3 | security:
4 | allowInsecureImages: true # needed starting with Chart version 16.3.0 if modifying images
5 |
6 | image:
7 | repository: bitnamilegacy/postgresql
8 |
9 | volumePermissions:
10 | enabled: false
11 | image:
12 | repository: bitnamilegacy/os-shell
13 | securityContext:
14 | runAsUser: auto
15 |
16 | metrics:
17 | image:
18 | repository: bitnamilegacy/postgres-exporter
19 |
20 | primary:
21 | extendedConfiguration: |
22 | password_encryption=md5
23 | podSecurityContext:
24 | {% if test_scenario['values']['openshift'] == 'true' %}
25 | enabled: false
26 | {% else %}
27 | enabled: true
28 | {% endif %}
29 | containerSecurityContext:
30 | enabled: false
31 |
32 | auth:
33 | username: hive
34 | password: hive
35 | database: hive
36 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/60-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 900
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hive-metastore-default
10 | spec:
11 | template:
12 | spec:
13 | containers:
14 | - name: hive
15 | resources:
16 | limits:
17 | cpu: "1"
18 | memory: 768Mi
19 | requests:
20 | cpu: 250m
21 | memory: 768Mi
22 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
23 | - name: vector
24 | {% endif %}
25 | terminationGracePeriodSeconds: 300
26 | status:
27 | readyReplicas: 1
28 | replicas: 1
29 | ---
30 | apiVersion: policy/v1
31 | kind: PodDisruptionBudget
32 | metadata:
33 | name: hive-metastore
34 | status:
35 | expectedPods: 1
36 | currentHealthy: 1
37 | disruptionsAllowed: 1
38 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/60-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 900
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hive-metastore-default
10 | spec:
11 | template:
12 | spec:
13 | containers:
14 | - name: hive
15 | resources:
16 | limits:
17 | cpu: "1"
18 | memory: 768Mi
19 | requests:
20 | cpu: 250m
21 | memory: 768Mi
22 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
23 | - name: vector
24 | {% endif %}
25 | terminationGracePeriodSeconds: 300
26 | status:
27 | readyReplicas: 1
28 | replicas: 1
29 | ---
30 | apiVersion: policy/v1
31 | kind: PodDisruptionBudget
32 | metadata:
33 | name: hive-metastore
34 | status:
35 | expectedPods: 1
36 | currentHealthy: 1
37 | disruptionsAllowed: 1
38 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/60-assert.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 900
5 | ---
6 | apiVersion: apps/v1
7 | kind: StatefulSet
8 | metadata:
9 | name: hive-metastore-default
10 | spec:
11 | template:
12 | spec:
13 | containers:
14 | - name: hive
15 | resources:
16 | limits:
17 | cpu: "1"
18 | memory: 768Mi
19 | requests:
20 | cpu: 250m
21 | memory: 768Mi
22 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
23 | - name: vector
24 | {% endif %}
25 | terminationGracePeriodSeconds: 300
26 | status:
27 | readyReplicas: 1
28 | replicas: 1
29 | ---
30 | apiVersion: policy/v1
31 | kind: PodDisruptionBudget
32 | metadata:
33 | name: hive-metastore
34 | status:
35 | expectedPods: 1
36 | currentHealthy: 1
37 | disruptionsAllowed: 1
38 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/required-external-components.adoc:
--------------------------------------------------------------------------------
1 | = Required external components
2 | :description: Hive Metastore requires a SQL database. Supported options include MySQL, Postgres, Oracle, and MS SQL Server. Stackable Hive supports PostgreSQL by default.
3 |
4 | The Hive Metastore requires a backend SQL database.
5 | Supported databases and versions are:
6 |
7 | * MySQL 5.6.17 and above
8 | * Postgres 9.1.13 and above
9 | * Oracle 11g and above
10 | * MS SQL Server 2008 R2 and above
11 |
12 | Reference: https://cwiki.apache.org/confluence/display/Hive/AdminManual+Metastore+Administration#AdminManualMetastoreAdministration-SupportedBackendDatabasesforMetastore[Hive Metastore documentation]
13 |
14 | The Stackable product images for Apache Hive come with built-in support for PostgreSQL.
15 | See xref:usage-guide/database-driver.adoc[] for details on how to make drivers for other databases (supported by Hive) available.
16 |
--------------------------------------------------------------------------------
/.markdownlint.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # All defaults or options can be checked here:
3 | # https://github.com/DavidAnson/markdownlint/blob/main/schema/.markdownlint.yaml
4 |
5 | # Default state for all rules
6 | default: true
7 |
8 | # MD013/line-length - Line length
9 | MD013:
10 | # Number of characters
11 | line_length: 9999
12 | # Number of characters for headings
13 | heading_line_length: 9999
14 | # Number of characters for code blocks
15 | code_block_line_length: 9999
16 |
17 | # MD033/no-inline-html
18 | MD033:
19 | allowed_elements: [h1, img, p]
20 |
21 | # MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content
22 | MD024:
23 | # Only check sibling headings
24 | siblings_only: true
25 |
26 | # MD041/first-line-heading/first-line-h1 First line in a file should be a top-level heading
27 | MD041: false # Github issues and PRs already have titles, and H1 is enormous in the description box.
28 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/troubleshooting/index.adoc:
--------------------------------------------------------------------------------
1 | = Troubleshooting
2 |
3 | == Probe error 'Thrift Error' while using Kerberos authentication
4 |
5 | In Hive 4.x with Kerberos enabled, health checks cause excessive error logs:
6 |
7 | [source]
8 | ----
9 | ERROR [Metastore-Handler-Pool: Thread-65] server.TThreadPoolServer: Thrift Error occurred during processing of message.
10 | ----
11 |
12 | This is because the health check doesn’t complete SASL authentication. The error is ignorable, though it can be hidden with the following configuration:
13 |
14 | [source,yaml]
15 | ----
16 | spec:
17 | metastore:
18 | config:
19 | logging:
20 | containers:
21 | hive:
22 | loggers:
23 | org.apache.thrift.server.TThreadPoolServer:
24 | level: NONE
25 | ----
26 |
27 | NOTE: This will suppress all logging from TThreadPoolServer, including log events that might be useful for diagnosing issues.
28 |
--------------------------------------------------------------------------------
/rust/operator-binary/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "stackable-hive-operator"
3 | description = "Stackable Operator for Apache Hive"
4 | version.workspace = true
5 | authors.workspace = true
6 | license.workspace = true
7 | edition.workspace = true
8 | repository.workspace = true
9 | publish = false
10 |
11 | [dependencies]
12 | product-config.workspace = true
13 | stackable-operator.workspace = true
14 |
15 | anyhow.workspace = true
16 | clap.workspace = true
17 | const_format.workspace = true
18 | fnv.workspace = true
19 | futures.workspace = true
20 | indoc.workspace = true
21 | pin-project.workspace = true
22 | semver.workspace = true
23 | serde.workspace = true
24 | serde_json.workspace = true
25 | snafu.workspace = true
26 | strum.workspace = true
27 | tokio.workspace = true
28 | tracing.workspace = true
29 |
30 | [dev-dependencies]
31 | rstest.workspace = true
32 | serde_yaml.workspace = true
33 |
34 | [build-dependencies]
35 | built.workspace = true
36 |
--------------------------------------------------------------------------------
/rust/operator-binary/src/operations/graceful_shutdown.rs:
--------------------------------------------------------------------------------
1 | use snafu::{ResultExt, Snafu};
2 | use stackable_operator::builder::pod::PodBuilder;
3 |
4 | use crate::crd::MetaStoreConfig;
5 |
6 | #[derive(Debug, Snafu)]
7 | pub enum Error {
8 | #[snafu(display("Failed to set terminationGracePeriod"))]
9 | SetTerminationGracePeriod {
10 | source: stackable_operator::builder::pod::Error,
11 | },
12 | }
13 |
14 | pub fn add_graceful_shutdown_config(
15 | merged_config: &MetaStoreConfig,
16 | pod_builder: &mut PodBuilder,
17 | ) -> Result<(), Error> {
18 | // This must be always set by the merge mechanism, as we provide a default value,
19 | // users can not disable graceful shutdown.
20 | if let Some(graceful_shutdown_timeout) = merged_config.graceful_shutdown_timeout {
21 | pod_builder
22 | .termination_grace_period(&graceful_shutdown_timeout)
23 | .context(SetTerminationGracePeriodSnafu)?;
24 | }
25 |
26 | Ok(())
27 | }
28 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Helm Chart for Stackable Operator for Apache Hive
3 |
4 | This Helm Chart can be used to install Custom Resource Definitions and the Operator for Apache Hive provided by Stackable.
5 |
6 | ## Requirements
7 |
8 | - Create a [Kubernetes Cluster](../Readme.md)
9 | - Install [Helm](https://helm.sh/docs/intro/install/)
10 |
11 | ## Install the Stackable Operator for Apache Hive
12 |
13 | ```bash
14 | # From the root of the operator repository
15 | make compile-chart
16 |
17 | helm install hive-operator deploy/helm/hive-operator
18 | ```
19 |
20 | ## Usage of the CRDs
21 |
22 | The usage of this operator and its CRDs is described in the [documentation](https://docs.stackable.tech/hive/index.html)
23 |
24 | The operator has example requests included in the [`/examples`](https://github.com/stackabletech/hive-operator/tree/main/examples) directory.
25 |
26 | ## Links
27 |
28 |
29 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/getting_started/index.adoc:
--------------------------------------------------------------------------------
1 | = Getting started
2 | :description: Learn to set up Apache Hive with the Stackable Operator. Includes installation, dependencies, and creating a Hive metastore on Kubernetes.
3 |
4 | This guide gets you started with Apache Hive using the Stackable Operator.
5 | It guides you through the installation of the operator, its dependencies and setting up your first Hive metastore instance.
6 |
7 | == Prerequisites
8 |
9 | You need:
10 |
11 | * a Kubernetes cluster
12 | * kubectl
13 | * optional: Helm
14 |
15 | Resource sizing depends on cluster type(s), usage and scope, but as a starting point we recommend a minimum of the following resources for this operator:
16 |
17 | * 0.2 cores (e.g. i5 or similar)
18 | * 256MB RAM
19 |
20 | == What's next
21 |
22 | The Guide is divided into two steps:
23 |
24 | * xref:getting_started/installation.adoc[Installing the operators].
25 | * xref:getting_started/first_steps.adoc[Setting up the Hive metastore instance with PostgreSQL and Minio].
26 |
--------------------------------------------------------------------------------
/rust/operator-binary/src/crd/security.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 | use stackable_operator::{
3 | commons::opa::OpaConfig,
4 | schemars::{self, JsonSchema},
5 | };
6 |
7 | #[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)]
8 | #[serde(rename_all = "camelCase")]
9 | pub struct AuthenticationConfig {
10 | /// Kerberos configuration.
11 | pub kerberos: KerberosConfig,
12 | }
13 |
14 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)]
15 | #[serde(rename_all = "camelCase")]
16 | pub struct AuthorizationConfig {
17 | // no doc - it's in the struct.
18 | #[serde(default, skip_serializing_if = "Option::is_none")]
19 | pub opa: Option,
20 | }
21 |
22 | #[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)]
23 | #[serde(rename_all = "camelCase")]
24 | pub struct KerberosConfig {
25 | /// Name of the SecretClass providing the keytab for the HBase services.
26 | pub secret_class: String,
27 | }
28 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/helm-bitnami-postgresql-values.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | global:
3 | security:
4 | allowInsecureImages: true # needed starting with Chart version 16.3.0 if modifying images
5 |
6 | image:
7 | repository: bitnamilegacy/postgresql
8 |
9 | volumePermissions:
10 | enabled: false
11 | image:
12 | repository: bitnamilegacy/os-shell
13 | securityContext:
14 | runAsUser: auto
15 |
16 | metrics:
17 | image:
18 | repository: bitnamilegacy/postgres-exporter
19 |
20 | primary:
21 | extendedConfiguration: |
22 | password_encryption=md5
23 | podSecurityContext:
24 | {% if test_scenario['values']['openshift'] == 'true' %}
25 | enabled: false
26 | {% else %}
27 | enabled: true
28 | {% endif %}
29 | containerSecurityContext:
30 | enabled: false
31 | resources:
32 | requests:
33 | memory: "128Mi"
34 | cpu: "512m"
35 | limits:
36 | memory: "128Mi"
37 | cpu: "1"
38 |
39 | auth:
40 | username: hive
41 | password: hive
42 | database: hive
43 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/helm-bitnami-postgresql-values.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | global:
3 | security:
4 | allowInsecureImages: true # needed starting with Chart version 16.3.0 if modifying images
5 |
6 | image:
7 | repository: bitnamilegacy/postgresql
8 |
9 | volumePermissions:
10 | enabled: false
11 | image:
12 | repository: bitnamilegacy/os-shell
13 | securityContext:
14 | runAsUser: auto
15 |
16 | metrics:
17 | image:
18 | repository: bitnamilegacy/postgres-exporter
19 |
20 | primary:
21 | extendedConfiguration: |
22 | password_encryption=md5
23 | podSecurityContext:
24 | {% if test_scenario['values']['openshift'] == 'true' %}
25 | enabled: false
26 | {% else %}
27 | enabled: true
28 | {% endif %}
29 | containerSecurityContext:
30 | enabled: false
31 | resources:
32 | requests:
33 | memory: "128Mi"
34 | cpu: "512m"
35 | limits:
36 | memory: "128Mi"
37 | cpu: "1"
38 |
39 | auth:
40 | username: hive
41 | password: hive
42 | database: hive
43 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/helm-bitnami-postgresql-values.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | global:
3 | security:
4 | allowInsecureImages: true # needed starting with Chart version 16.3.0 if modifying images
5 |
6 | image:
7 | repository: bitnamilegacy/postgresql
8 |
9 | volumePermissions:
10 | enabled: false
11 | image:
12 | repository: bitnamilegacy/os-shell
13 | securityContext:
14 | runAsUser: auto
15 |
16 | metrics:
17 | image:
18 | repository: bitnamilegacy/postgres-exporter
19 |
20 | primary:
21 | extendedConfiguration: |
22 | password_encryption=md5
23 | podSecurityContext:
24 | {% if test_scenario['values']['openshift'] == 'true' %}
25 | enabled: false
26 | {% else %}
27 | enabled: true
28 | {% endif %}
29 | containerSecurityContext:
30 | enabled: false
31 | resources:
32 | requests:
33 | memory: "128Mi"
34 | cpu: "512m"
35 | limits:
36 | memory: "128Mi"
37 | cpu: "1"
38 |
39 | auth:
40 | username: hive
41 | password: hive
42 | database: hive
43 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/helm-bitnami-postgresql-values.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | global:
3 | security:
4 | allowInsecureImages: true # needed starting with Chart version 16.3.0 if modifying images
5 |
6 | image:
7 | repository: bitnamilegacy/postgresql
8 |
9 | volumePermissions:
10 | enabled: false
11 | image:
12 | repository: bitnamilegacy/os-shell
13 | securityContext:
14 | runAsUser: auto
15 |
16 | metrics:
17 | image:
18 | repository: bitnamilegacy/postgres-exporter
19 |
20 | primary:
21 | extendedConfiguration: |
22 | password_encryption=md5
23 | podSecurityContext:
24 | {% if test_scenario['values']['openshift'] == 'true' %}
25 | enabled: false
26 | {% else %}
27 | enabled: true
28 | {% endif %}
29 | containerSecurityContext:
30 | enabled: false
31 | resources:
32 | requests:
33 | memory: "128Mi"
34 | cpu: "512m"
35 | limits:
36 | memory: "128Mi"
37 | cpu: "1"
38 |
39 | auth:
40 | username: hive
41 | password: hive
42 | database: hive
43 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | {{ if .Values.serviceAccount.create -}}
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | name: {{ include "operator.fullname" . }}-serviceaccount
7 | labels:
8 | {{- include "operator.labels" . | nindent 4 }}
9 | {{- with .Values.serviceAccount.annotations }}
10 | annotations:
11 | {{- toYaml . | nindent 4 }}
12 | {{- end }}
13 | ---
14 | apiVersion: rbac.authorization.k8s.io/v1
15 | # This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
16 | kind: ClusterRoleBinding
17 | metadata:
18 | name: {{ include "operator.fullname" . }}-clusterrolebinding
19 | labels:
20 | {{- include "operator.labels" . | nindent 4 }}
21 | subjects:
22 | - kind: ServiceAccount
23 | name: {{ include "operator.fullname" . }}-serviceaccount
24 | namespace: {{ .Release.Namespace }}
25 | roleRef:
26 | kind: ClusterRole
27 | name: {{ include "operator.fullname" . }}-clusterrole
28 | apiGroup: rbac.authorization.k8s.io
29 | {{- end }}
30 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/upgrade/30-install-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive-old'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive-old'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive-old'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive-old'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:postgresql://postgresql:5432/hive
18 | credentialsSecret: hive-credentials
19 | dbType: postgres
20 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
21 | vectorAggregatorConfigMapName: vector-aggregator-discovery
22 | {% endif %}
23 | metastore:
24 | roleGroups:
25 | default:
26 | replicas: 1
27 | ---
28 | apiVersion: v1
29 | kind: Secret
30 | metadata:
31 | name: hive-credentials
32 | type: Opaque
33 | stringData:
34 | username: hive
35 | password: hive
36 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/20-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | metadata:
6 | name: install-hive
7 | commands:
8 | - script: kubectl -n "$NAMESPACE" wait --for=condition=available hiveclusters.hive.stackable.tech/test-hive --timeout 601s
9 | ---
10 | apiVersion: apps/v1
11 | kind: StatefulSet
12 | metadata:
13 | name: test-hive-metastore-default
14 | status:
15 | readyReplicas: 1
16 | replicas: 1
17 | ---
18 | apiVersion: policy/v1
19 | kind: PodDisruptionBudget
20 | metadata:
21 | name: test-hive-metastore
22 | status:
23 | expectedPods: 1
24 | currentHealthy: 1
25 | disruptionsAllowed: 1
26 | ---
27 | apiVersion: v1
28 | kind: Service
29 | metadata:
30 | name: test-hive-metastore
31 | spec:
32 | type: ClusterIP # cluster-internal
33 | ---
34 | apiVersion: v1
35 | kind: Service
36 | metadata:
37 | name: test-hive-metastore-default-metrics
38 | spec:
39 | type: ClusterIP # exposed metrics
40 | ---
41 | apiVersion: v1
42 | kind: Service
43 | metadata:
44 | name: test-hive-metastore-default-headless
45 | spec:
46 | type: ClusterIP # exposed metrics
47 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/20-stop-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: test-hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive-latest'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive-latest'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive-latest'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive-latest'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
18 | credentialsSecret: hive-credentials
19 | dbType: derby
20 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
21 | vectorAggregatorConfigMapName: vector-aggregator-discovery
22 | {% endif %}
23 | clusterOperation:
24 | stopped: true
25 | reconciliationPaused: false
26 | metastore:
27 | config:
28 | logging:
29 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
30 | roleGroups:
31 | default:
32 | replicas: 1
33 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/30-pause-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: test-hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive-latest'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive-latest'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive-latest'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive-latest'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
18 | credentialsSecret: hive-credentials
19 | dbType: derby
20 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
21 | vectorAggregatorConfigMapName: vector-aggregator-discovery
22 | {% endif %}
23 | clusterOperation:
24 | stopped: false
25 | reconciliationPaused: true
26 | metastore:
27 | config:
28 | logging:
29 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
30 | roleGroups:
31 | default:
32 | replicas: 1
33 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/40-restart-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: test-hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive-latest'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive-latest'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive-latest'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive-latest'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
18 | credentialsSecret: hive-credentials
19 | dbType: derby
20 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
21 | vectorAggregatorConfigMapName: vector-aggregator-discovery
22 | {% endif %}
23 | clusterOperation:
24 | stopped: false
25 | reconciliationPaused: false
26 | metastore:
27 | config:
28 | logging:
29 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
30 | roleGroups:
31 | default:
32 | replicas: 1
33 |
--------------------------------------------------------------------------------
/scripts/docs_templating.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -euo pipefail
3 |
4 | # Reads a file with variables to insert into templates, and templates all .*.j2 files
5 | # in the 'docs' directory.
6 | #
7 | # dependencies
8 | # pip install jinja2-cli
9 |
10 | docs_dir="$(dirname "$0")/../docs"
11 | templating_vars_file="$docs_dir/templating_vars.yaml"
12 |
13 | # Check if files need templating
14 | if [[ -z $(find "$docs_dir" -name '*.j2') ]];
15 | then
16 | echo "No files need templating, exiting."
17 | exit
18 | fi
19 |
20 | # Check if jinja2 is there
21 | if ! command -v jinja2 &> /dev/null
22 | then
23 | echo "jinja2 could not be found. Use 'pip install jinja2-cli' to install it."
24 | exit 1
25 | fi
26 |
27 | # Check if templating vars file exists
28 | if [[ ! -f "$templating_vars_file" ]];
29 | then
30 | echo "$templating_vars_file does not exist, cannot start templating."
31 | fi
32 |
33 | find "$docs_dir" -name '*.j2' |
34 | while read -r file
35 | do
36 | new_file_name=${file%.j2} # Remove .j2 suffix
37 | echo "templating $new_file_name"
38 | jinja2 "$file" "$templating_vars_file" -o "$new_file_name"
39 | done
40 |
41 | echo "done"
42 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/external-access/install-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: test-hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
18 | credentialsSecret: hive-credentials
19 | dbType: derby
20 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
21 | vectorAggregatorConfigMapName: vector-aggregator-discovery
22 | {% endif %}
23 | metastore:
24 | roleConfig:
25 | listenerClass: test-cluster-internal-$NAMESPACE
26 | roleGroups:
27 | default:
28 | replicas: 1
29 | ---
30 | apiVersion: v1
31 | kind: Secret
32 | metadata:
33 | name: hive-credentials
34 | type: Opaque
35 | stringData:
36 | username: APP
37 | password: mine
38 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/client.csr.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE REQUEST-----
2 | MIIC0TCCAbkCAQAwXjELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1I
3 | b2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxEjAQBgNVBAoMCVN0YWNrYWJsZTEOMAwG
4 | A1UEAwwFbWluaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCjynVz
5 | +XHB8OCY4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbupoXBlNIJ
6 | PQaowJEo/SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6ZcGTUVha
7 | uiZGSgkWrejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84mOfIPxjk
8 | vIyJr5UkeF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1oQPfv3sh
9 | WrmRLzw6euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgxSdidmZ2m
10 | ER7Ez+c5B0m/LiIhAgMBAAGgLjAsBgkqhkiG9w0BCQ4xHzAdMBsGA1UdEQQUMBKC
11 | BW1pbmlvgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAIXs2PGYTo8N6IZc
12 | eVVa82AxtJPSaIeRqrTuAiKloQPZxhpEaTSAR8wxWpzyIeilMgp8UuMo2M0euxGM
13 | gxhzOyNXFekDSoLbuvKHidhvJ+rVEVHag3VdKA22P4/OYV8HwP6yXsNWNXK6Sp6J
14 | pKwRE3PpSN4vPbEmbxLndM9SOVghV9RCVdLMPFXg+pfTNPm2H3cYGg4yU+Cdl5Dj
15 | voUOQzRMuvflScf+gzjBIx7xVvwo/d9szsyqPfMyTlK40kU+KGl5Mz+C7Icyljnj
16 | 8F92l4NbDrXpWuyNjoUwEH8Kdb4ioPACHgStTY0Js8vdVS7wWj6ylXPBRUd9Yxxg
17 | BWC7YHc=
18 | -----END CERTIFICATE REQUEST-----
19 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/new_version.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: New Version
3 | about: Request support for a new product version
4 | title: "[NEW VERSION]"
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Which new version of Apache Hive should we support?
11 |
12 | Please specify the version, version range or version numbers to support, please also add these to the issue title
13 |
14 | ## Additional information
15 |
16 | If possible, provide a link to release notes/changelog
17 |
18 | ## Changes required
19 |
20 | Are there any upstream changes that we need to support?
21 | e.g. new features, changed features, deprecated features etc.
22 |
23 | ## Implementation checklist
24 |
25 |
29 |
30 | - [ ] Update the Docker image
31 | - [ ] Update documentation to include supported version(s)
32 | - [ ] Update and test getting started guide with updated version(s)
33 | - [ ] Update operator to support the new version (if needed)
34 | - [ ] Update integration tests to test use the new versions (in addition or replacing old versions
35 | - [ ] Update examples to use new versions
36 |
--------------------------------------------------------------------------------
/tests/kuttl-test.yaml.jinja2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestSuite
4 | testDirs:
5 | {% for testcase in testinput.tests %}
6 | - ./tests/{{ testcase.name }}
7 | {% endfor %}
8 |
9 | startKIND: false
10 | suppress: ["events"]
11 | parallel: 2
12 |
13 | # The timeout (in seconds) is used when namespaces are created or
14 | # deleted, and, if not overridden, in TestSteps, TestAsserts, and
15 | # Commands. If not set, the timeout is 30 seconds by default.
16 | #
17 | # The deletion of a namespace can take a while until all resources,
18 | # especially PersistentVolumeClaims, are gracefully shut down. If the
19 | # timeout is reached in the meantime, even a successful test case is
20 | # considered a failure.
21 | #
22 | # For instance, the termination grace period of the Vector aggregator in
23 | # the logging tests is set to 60 seconds. If there are logs entries
24 | # which could not be forwarded yet to the external aggregator defined in
25 | # the VECTOR_AGGREGATOR environment variable, then the test aggregator
26 | # uses this period of time by trying to forward the events. In this
27 | # case, deleting a namespace with several Pods takes about 90 seconds.
28 | timeout: 300
29 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE/pre-release-getting-started-script.md:
--------------------------------------------------------------------------------
1 | ## Check and Update Getting Started Script
2 |
3 |
7 |
8 |
11 |
12 | Part of
13 |
14 | > [!NOTE]
15 | > During a Stackable release we need to check (and optionally update) the
16 | > getting-started scripts to ensure they still work after product and operator
17 | > updates.
18 |
19 | ```shell
20 | # Some of the scripts are in a code/ subdirectory
21 | # pushd docs/modules/superset/examples/getting_started
22 | # pushd docs/modules/superset/examples/getting_started/code
23 | pushd $(fd -td getting_started | grep examples); cd code 2>/dev/null || true
24 |
25 | # Make a fresh cluster (~12 seconds)
26 | kind delete cluster && kind create cluster
27 | ./getting_started.sh stackablectl
28 |
29 | # Make a fresh cluster (~12 seconds)
30 | kind delete cluster && kind create cluster
31 | ./getting_started.sh helm
32 |
33 | popd
34 | ```
35 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/cluster-operation/10-install-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: test-hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive-latest'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive-latest'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive-latest'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive-latest'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
18 | credentialsSecret: hive-credentials
19 | dbType: derby
20 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
21 | vectorAggregatorConfigMapName: vector-aggregator-discovery
22 | {% endif %}
23 | metastore:
24 | config:
25 | logging:
26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
27 | roleGroups:
28 | default:
29 | replicas: 1
30 | ---
31 | apiVersion: v1
32 | kind: Secret
33 | metadata:
34 | name: hive-credentials
35 | type: Opaque
36 | stringData:
37 | username: APP
38 | password: mine
39 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/orphaned-resources/01-install-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: test-hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive-latest'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive-latest'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive-latest'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive-latest'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
18 | credentialsSecret: hive-credentials
19 | dbType: derby
20 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
21 | vectorAggregatorConfigMapName: vector-aggregator-discovery
22 | {% endif %}
23 | metastore:
24 | config:
25 | logging:
26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
27 | roleGroups:
28 | default:
29 | replicas: 1
30 | remove:
31 | replicas: 1
32 | ---
33 | apiVersion: v1
34 | kind: Secret
35 | metadata:
36 | name: hive-credentials
37 | type: Opaque
38 | stringData:
39 | username: APP
40 | password: mine
41 |
--------------------------------------------------------------------------------
/docs/modules/hive/partials/supported-versions.adoc:
--------------------------------------------------------------------------------
1 | // The version ranges supported by Hive-Operator
2 | // This is a separate file, since it is used by both the direct Hive-Operator documentation, and the overarching
3 | // Stackable Platform documentation.
4 |
5 | - 4.2.0 (experimental)
6 | - 4.1.0 (experimental)
7 | - 4.0.1 (LTS)
8 | - 4.0.0 (deprecated)
9 | - 3.1.3 (deprecated)
10 |
11 | === Hive 4 issues
12 |
13 | Hive 4 has known compatibility issues, especially when using it with Iceberg or Trino.
14 | The missing compatibility with Iceberg also means that Spark jobs using this storage may fail.
15 |
16 | Be aware of upgrading Hive (e.g. 4.0.0 to 4.0.1 or 4.0.1 to Hive 4.1.0), as this upgrade is not easily reversible.
17 | Test the new version before upgrading your production workloads and take backups of your database.
18 |
19 | **Workaround:** If you encounter issues with Hive 4.x, use Hive 3.1.3 instead until these upstream issues are resolved.
20 |
21 | For more details, see:
22 |
23 | * https://github.com/stackabletech/hive-operator/issues/626[Our Stackable tracking issue]
24 | * https://github.com/trinodb/trino/issues/26214[Trino: Repeated ANALYZE fails on Hive metastore 4.0.x]
25 | * https://github.com/apache/iceberg/issues/12878[Iceberg: org.apache.thrift.TApplicationException: Invalid method name: 'get_table']
26 |
--------------------------------------------------------------------------------
/docs/modules/hive/partials/nav.adoc:
--------------------------------------------------------------------------------
1 | * xref:hive:getting_started/index.adoc[]
2 | ** xref:hive:getting_started/installation.adoc[]
3 | ** xref:hive:getting_started/first_steps.adoc[]
4 | * xref:hive:required-external-components.adoc[]
5 | * xref:hive:usage-guide/index.adoc[]
6 | ** xref:hive:usage-guide/listenerclass.adoc[]
7 | ** xref:hive:usage-guide/data-storage.adoc[]
8 | ** xref:hive:usage-guide/derby-example.adoc[]
9 | ** xref:hive:usage-guide/database-driver.adoc[]
10 | ** xref:hive:usage-guide/logging.adoc[]
11 | ** xref:hive:usage-guide/monitoring.adoc[]
12 | ** xref:hive:usage-guide/resources.adoc[]
13 | ** xref:hive:usage-guide/security.adoc[]
14 | ** xref:hive:usage-guide/overrides.adoc[]
15 | ** xref:hive:usage-guide/operations/index.adoc[]
16 | *** xref:hive:usage-guide/operations/cluster-operations.adoc[]
17 | *** xref:hive:usage-guide/operations/pod-placement.adoc[]
18 | *** xref:hive:usage-guide/operations/pod-disruptions.adoc[]
19 | *** xref:hive:usage-guide/operations/graceful-shutdown.adoc[]
20 | * xref:hive:reference/index.adoc[]
21 | ** xref:hive:reference/crds.adoc[]
22 | *** {crd-docs}/hive.stackable.tech/hivecluster/v1alpha1/[HiveCluster {external-link-icon}^]
23 | ** xref:hive:reference/discovery.adoc[]
24 | ** xref:hive:reference/commandline-parameters.adoc[]
25 | ** xref:hive:reference/environment-variables.adoc[]
26 | * xref:hive:troubleshooting/index.adoc[]
27 |
--------------------------------------------------------------------------------
/.github/workflows/pr_pre-commit.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: pre-commit
3 |
4 | on:
5 | pull_request:
6 | merge_group:
7 |
8 | env:
9 | CARGO_TERM_COLOR: always
10 | NIX_PKG_MANAGER_VERSION: "2.30.0"
11 | RUST_TOOLCHAIN_VERSION: "nightly-2025-10-23"
12 | HADOLINT_VERSION: "v2.14.0"
13 | PYTHON_VERSION: "3.14"
14 | JINJA2_CLI_VERSION: "0.8.2"
15 |
16 | jobs:
17 | pre-commit:
18 | runs-on: ubuntu-latest
19 | steps:
20 | - name: Install host dependencies
21 | uses: awalsh128/cache-apt-pkgs-action@acb598e5ddbc6f68a970c5da0688d2f3a9f04d05 # v1.6.0
22 | with:
23 | packages: protobuf-compiler krb5-user libkrb5-dev libclang-dev liblzma-dev libssl-dev pkg-config apt-transport-https
24 | version: ubuntu-latest
25 | - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
26 | with:
27 | persist-credentials: false
28 | submodules: recursive
29 | fetch-depth: 0
30 | - uses: stackabletech/actions/run-pre-commit@29bea1b451c0c2e994bd495969286f95bf49ed6a # v0.11.0
31 | with:
32 | python-version: ${{ env.PYTHON_VERSION }}
33 | rust: ${{ env.RUST_TOOLCHAIN_VERSION }}
34 | hadolint: ${{ env.HADOLINT_VERSION }}
35 | nix: ${{ env.NIX_PKG_MANAGER_VERSION }}
36 | nix-github-token: ${{ secrets.GITHUB_TOKEN }}
37 | jinja2-cli: ${{ env.JINJA2_CLI_VERSION }}
38 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/generate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Creating client cert"
4 | FQDN="minio"
5 |
6 | echo "Creating Root Certificate Authority"
7 | openssl genrsa \
8 | -out root-ca.key.pem \
9 | 2048
10 |
11 | echo "Self-signing the Root Certificate Authority"
12 | openssl req \
13 | -x509 \
14 | -new \
15 | -nodes \
16 | -key root-ca.key.pem \
17 | -days 36500 \
18 | -out root-ca.crt.pem \
19 | -subj "/C=DE/ST=Schleswig-Holstein/L=Wedel/O=Stackable Signing Authority Inc/CN=stackable.de"
20 |
21 | openssl genrsa \
22 | -out client.key.pem \
23 | 2048
24 |
25 | echo "Creating the CSR"
26 | openssl req -new \
27 | -key client.key.pem \
28 | -out client.csr.pem \
29 | -subj "/C=DE/ST=Schleswig-Holstein/L=Wedel/O=Stackable/CN=${FQDN}" \
30 | -addext "subjectAltName = DNS:${FQDN}, DNS:localhost"
31 |
32 | echo "Signing the client cert with the root ca"
33 | openssl x509 \
34 | -req -in client.csr.pem \
35 | -CA root-ca.crt.pem \
36 | -CAkey root-ca.key.pem \
37 | -CAcreateserial \
38 | -out client.crt.pem \
39 | -days 36500 \
40 | -copy_extensions copy
41 |
42 | echo "Copying the files to match the api of the secret-operator"
43 | cp root-ca.crt.pem ca.crt
44 | cp client.key.pem tls.key
45 | cp client.crt.pem tls.crt
46 |
47 | echo "To create a k8s secret run"
48 | echo "kubectl create secret generic foo --from-file=ca.crt --from-file=tls.crt --from-file=tls.key"
49 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE/pre-release-rust-deps.md:
--------------------------------------------------------------------------------
1 | ## Bump Rust Dependencies for Stackable Release YY.M.X
2 |
3 |
7 |
8 |
11 |
12 | Part of
13 |
14 | > [!NOTE]
15 | > During a Stackable release we need to update various Rust dependencies before
16 | > entering the final release period to ensure we run the latest versions of
17 | > crates. These bumps also include previously updated and released crates from
18 | > the `operator-rs` repository.
19 |
20 | ### Tasks
21 |
22 | - [ ] Bump Rust Dependencies, see below for more details.
23 | - [ ] Add changelog entry stating which important crates were bumped (including the version).
24 |
25 | > [!NOTE]
26 | > The bumping / updating of Rust dependencies is done in multiple steps:
27 | >
28 | > 1. Update the minimum Version in the root `Cargo.toml` manifest.
29 | > 2. Run the `cargo update` command, which also updates the `Cargo.lock` file.
30 | > 3. Lastly, run `make regenerate-nix` to update the `Cargo.nix` file.
31 |
32 | ### Bump Rust Dependencies
33 |
34 | - [ ] Bump `stackable-operator` and friends
35 | - [ ] Bump `product-config`
36 | - [ ] Bump all other dependencies
37 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/tls.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDyDCCArCgAwIBAgIUCI2PNNrtzp6Ql7GkuaFxmDa6UBowDQYJKoZIhvcNAQEL
3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO
4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv
5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMzA2MTYxMjUxMDJa
6 | GA8yMTIzMDUyMzEyNTEwMlowXjELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl
7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxEjAQBgNVBAoMCVN0YWNrYWJs
8 | ZTEOMAwGA1UEAwwFbWluaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
9 | AQCjynVz+XHB8OCY4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbu
10 | poXBlNIJPQaowJEo/SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6
11 | ZcGTUVhauiZGSgkWrejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84
12 | mOfIPxjkvIyJr5UkeF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1
13 | oQPfv3shWrmRLzw6euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgx
14 | SdidmZ2mER7Ez+c5B0m/LiIhAgMBAAGjXzBdMBsGA1UdEQQUMBKCBW1pbmlvggls
15 | b2NhbGhvc3QwHQYDVR0OBBYEFJQ0gD5kEtQr+tDpDSZ7kwZ8H5hGMB8GA1UdIwQY
16 | MBaAFEI3RMMiyiBjyQ1RS8nlORJVd1pBMA0GCSqGSIb3DQEBCwUAA4IBAQBcdhd+
17 | R4JoGvqLBk59dqIUecctuFsrdPxsBiOFhYNgZqedLM0UL5DzyfAHfVO0LfSEDddX
18 | RJL9yL7+kMU0T76cvdC9XVAIE6HUwTo9GYsPqsuyZoVjNpEDJCwY3CvonlJVe4dq
19 | /gAbJMYB+TSmY5yDPz/JFY/XZzYaPb7OdeGujbVT5Ixp97ApS8YIiv73C0wUbc6R
20 | h0rcfRbykSQUh9vgVdXRSR8DT3WCfdqNzNBYXv9mqfW5z4sbGj+l3wUl/I3F/mIw
21 | fyO4Cti4akiGVHlffEy0wkzVaBxhcXj2I3BUThB4ZqjlssieVaFkwvXmmyRT0oEW
22 | 5H+NPHcquS1zPscl
23 | -----END CERTIFICATE-----
24 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/client.crt.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDyDCCArCgAwIBAgIUCI2PNNrtzp6Ql7GkuaFxmDa6UBowDQYJKoZIhvcNAQEL
3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO
4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv
5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMzA2MTYxMjUxMDJa
6 | GA8yMTIzMDUyMzEyNTEwMlowXjELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl
7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxEjAQBgNVBAoMCVN0YWNrYWJs
8 | ZTEOMAwGA1UEAwwFbWluaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
9 | AQCjynVz+XHB8OCY4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbu
10 | poXBlNIJPQaowJEo/SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6
11 | ZcGTUVhauiZGSgkWrejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84
12 | mOfIPxjkvIyJr5UkeF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1
13 | oQPfv3shWrmRLzw6euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgx
14 | SdidmZ2mER7Ez+c5B0m/LiIhAgMBAAGjXzBdMBsGA1UdEQQUMBKCBW1pbmlvggls
15 | b2NhbGhvc3QwHQYDVR0OBBYEFJQ0gD5kEtQr+tDpDSZ7kwZ8H5hGMB8GA1UdIwQY
16 | MBaAFEI3RMMiyiBjyQ1RS8nlORJVd1pBMA0GCSqGSIb3DQEBCwUAA4IBAQBcdhd+
17 | R4JoGvqLBk59dqIUecctuFsrdPxsBiOFhYNgZqedLM0UL5DzyfAHfVO0LfSEDddX
18 | RJL9yL7+kMU0T76cvdC9XVAIE6HUwTo9GYsPqsuyZoVjNpEDJCwY3CvonlJVe4dq
19 | /gAbJMYB+TSmY5yDPz/JFY/XZzYaPb7OdeGujbVT5Ixp97ApS8YIiv73C0wUbc6R
20 | h0rcfRbykSQUh9vgVdXRSR8DT3WCfdqNzNBYXv9mqfW5z4sbGj+l3wUl/I3F/mIw
21 | fyO4Cti4akiGVHlffEy0wkzVaBxhcXj2I3BUThB4ZqjlssieVaFkwvXmmyRT0oEW
22 | 5H+NPHcquS1zPscl
23 | -----END CERTIFICATE-----
24 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/ca.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIID2TCCAsGgAwIBAgIUNjquGYWtyJ5a6wy23Hz2GRcMlwMwDQYJKoZIhvcNAQEL
3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO
4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv
5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMzA2MTYxMjUxMDJa
6 | GA8yMTIzMDUyMzEyNTEwMlowezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl
7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJs
8 | ZSBTaWduaW5nIEF1dGhvcml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTCC
9 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANnV/vby3Ro57a2qvQRnn0je
10 | eKMU2+F0+lZNCAviGUD5bm8jk91oPZnk0bhQqeyErmDS4WT0zevERIBJJDjfL0D8
11 | 46Be7PiMKe0dGjoqI3z5cOIejc8aLPHSIlgN6lT3fIruS16coQgG4uaKiHF5+eWF
12 | DRULdu6dsYuz6dKjqRiUOhHwDwtUJkDwPv+EItqo0H+MLFLLYM0+lEIae7dN5CQ5
13 | So5WhL2cyv5VJ7lj/EAKCViIEgCmzDRDcRgSSjWyH4bn6yX2026fPIyWJFyEdL/6
14 | jAOJADDR0GyhNOXrEeqhocSMnIbQVqwAT0kMhuXSvwvlrnLyTpG5jZm4lUM34kMC
15 | AwEAAaNTMFEwHQYDVR0OBBYEFEI3RMMiyiBjyQ1RS8nlORJVd1pBMB8GA1UdIwQY
16 | MBaAFEI3RMMiyiBjyQ1RS8nlORJVd1pBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
17 | hvcNAQELBQADggEBAHtKRXdFdtUhtUjodmYQcedADhHZOhBpKinzou4brdk4HfhF
18 | Lr/WFlcRemlV6mBsLpyMuK+Td8ZUEQ6JERLy6lS/c6pOGxnB4aClE8at+C+TjJAO
19 | Vm3WSI6VR1cFXGeZjldVQ6xkQskMJzO7df6iMTPtV5RkMeJXtL6XamEi54rBogNH
20 | Nra+EJBQBl/Ze90NjeYbv20uQpZaaZFaaSmVoNHDpBwla0ouy+MjObC3SpgOq1IC
21 | Pl3NuwNLV8VbOr5HrhQAoKmgSNb3P8vaTVux/X0Yfjy/S7N9kPBaK9mFj74zwV9w
22 | qSQ14Kl5jO3V3hrGWYZEjDOgbZrrEX1KXEust+Q=
23 | -----END CERTIFICATE-----
24 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/root-ca.crt.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIID2TCCAsGgAwIBAgIUNjquGYWtyJ5a6wy23Hz2GRcMlwMwDQYJKoZIhvcNAQEL
3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO
4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv
5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMzA2MTYxMjUxMDJa
6 | GA8yMTIzMDUyMzEyNTEwMlowezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl
7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJs
8 | ZSBTaWduaW5nIEF1dGhvcml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTCC
9 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANnV/vby3Ro57a2qvQRnn0je
10 | eKMU2+F0+lZNCAviGUD5bm8jk91oPZnk0bhQqeyErmDS4WT0zevERIBJJDjfL0D8
11 | 46Be7PiMKe0dGjoqI3z5cOIejc8aLPHSIlgN6lT3fIruS16coQgG4uaKiHF5+eWF
12 | DRULdu6dsYuz6dKjqRiUOhHwDwtUJkDwPv+EItqo0H+MLFLLYM0+lEIae7dN5CQ5
13 | So5WhL2cyv5VJ7lj/EAKCViIEgCmzDRDcRgSSjWyH4bn6yX2026fPIyWJFyEdL/6
14 | jAOJADDR0GyhNOXrEeqhocSMnIbQVqwAT0kMhuXSvwvlrnLyTpG5jZm4lUM34kMC
15 | AwEAAaNTMFEwHQYDVR0OBBYEFEI3RMMiyiBjyQ1RS8nlORJVd1pBMB8GA1UdIwQY
16 | MBaAFEI3RMMiyiBjyQ1RS8nlORJVd1pBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI
17 | hvcNAQELBQADggEBAHtKRXdFdtUhtUjodmYQcedADhHZOhBpKinzou4brdk4HfhF
18 | Lr/WFlcRemlV6mBsLpyMuK+Td8ZUEQ6JERLy6lS/c6pOGxnB4aClE8at+C+TjJAO
19 | Vm3WSI6VR1cFXGeZjldVQ6xkQskMJzO7df6iMTPtV5RkMeJXtL6XamEi54rBogNH
20 | Nra+EJBQBl/Ze90NjeYbv20uQpZaaZFaaSmVoNHDpBwla0ouy+MjObC3SpgOq1IC
21 | Pl3NuwNLV8VbOr5HrhQAoKmgSNb3P8vaTVux/X0Yfjy/S7N9kPBaK9mFj74zwV9w
22 | qSQ14Kl5jO3V3hrGWYZEjDOgbZrrEX1KXEust+Q=
23 | -----END CERTIFICATE-----
24 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | members = ["rust/operator-binary"]
3 | resolver = "2"
4 |
5 | [workspace.package]
6 | version = "0.0.0-dev"
7 | authors = ["Stackable GmbH "]
8 | license = "OSL-3.0"
9 | edition = "2021"
10 | repository = "https://github.com/stackabletech/hive-operator"
11 |
12 | [workspace.dependencies]
13 | product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.8.0" }
14 | stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", features = ["telemetry", "versioned"], tag = "stackable-operator-0.100.1" }
15 |
16 | anyhow = "1.0"
17 | built = { version = "0.8", features = ["chrono", "git2"] }
18 | clap = "4.5"
19 | const_format = "0.2"
20 | fnv = "1.0"
21 | futures = { version = "0.3", features = ["compat"] }
22 | indoc = "2.0"
23 | # We pin the kube version, as we use a patch for 2.0.1 below
24 | kube = "=2.0.1"
25 | pin-project = "1.1"
26 | rstest = "0.26"
27 | semver = "1.0"
28 | serde = { version = "1.0", features = ["derive"] }
29 | serde_json = "1.0"
30 | serde_yaml = "0.9"
31 | snafu = "0.8"
32 | strum = { version = "0.27", features = ["derive"] }
33 | tokio = { version = "1.40", features = ["full"] }
34 | tracing = "0.1"
35 |
36 | # [patch."https://github.com/stackabletech/operator-rs.git"]
37 | # stackable-operator = { path = "../operator-rs/crates/stackable-operator" }
38 | # stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" }
39 |
40 | [patch.crates-io]
41 | kube = { git = "https://github.com/stackabletech/kube-rs", branch = "2.0.1-fix-schema-hoisting" }
42 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/logging/test_log_aggregation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import requests
3 |
4 |
5 | def check_sent_events():
6 | response = requests.post(
7 | "http://hive-vector-aggregator:8686/graphql",
8 | json={
9 | "query": """
10 | {
11 | transforms(first:100) {
12 | nodes {
13 | componentId
14 | metrics {
15 | sentEventsTotal {
16 | sentEventsTotal
17 | }
18 | }
19 | }
20 | }
21 | }
22 | """
23 | },
24 | )
25 |
26 | assert response.status_code == 200, (
27 | "Cannot access the API of the vector aggregator."
28 | )
29 |
30 | result = response.json()
31 |
32 | transforms = result["data"]["transforms"]["nodes"]
33 | for transform in transforms:
34 | sentEvents = transform["metrics"]["sentEventsTotal"]
35 | componentId = transform["componentId"]
36 |
37 | if componentId == "filteredInvalidEvents":
38 | assert sentEvents is None or sentEvents["sentEventsTotal"] == 0, (
39 | "Invalid log events were sent."
40 | )
41 | else:
42 | assert sentEvents is not None and sentEvents["sentEventsTotal"] > 0, (
43 | f'No events were sent in "{componentId}".'
44 | )
45 |
46 |
47 | if __name__ == "__main__":
48 | check_sent_events()
49 | print("Test successful!")
50 |
--------------------------------------------------------------------------------
/shell.nix:
--------------------------------------------------------------------------------
1 | let
2 | self = import ./. {};
3 | inherit (self) sources pkgs meta;
4 |
5 | beku = pkgs.callPackage (sources."beku.py" + "/beku.nix") {};
6 | cargoDependencySetOfCrate = crate: [ crate ] ++ pkgs.lib.concatMap cargoDependencySetOfCrate (crate.dependencies ++ crate.buildDependencies);
7 | cargoDependencySet = pkgs.lib.unique (pkgs.lib.flatten (pkgs.lib.mapAttrsToList (crateName: crate: cargoDependencySetOfCrate crate.build) self.cargo.workspaceMembers));
8 | in pkgs.mkShell rec {
9 | name = meta.operator.name;
10 |
11 | packages = with pkgs; [
12 | ## cargo et-al
13 | rustup # this breaks pkg-config if it is in the nativeBuildInputs
14 | cargo-udeps
15 |
16 | ## Extra dependencies for use in a pure env (nix-shell --pure)
17 | ## These are mosuly useful for maintainers of this shell.nix
18 | ## to ensure all the dependencies are caught.
19 | # cacert
20 | # vim nvim nano
21 | ];
22 |
23 | # derivation runtime dependencies
24 | buildInputs = pkgs.lib.concatMap (crate: crate.buildInputs) cargoDependencySet;
25 |
26 | # build time dependencies
27 | nativeBuildInputs = pkgs.lib.concatMap (crate: crate.nativeBuildInputs) cargoDependencySet ++ (with pkgs; [
28 | beku
29 | docker
30 | gettext # for the proper envsubst
31 | git
32 | jq
33 | kind
34 | kubectl
35 | kubernetes-helm
36 | kuttl
37 | nix # this is implied, but needed in the pure env
38 | # tilt already defined in default.nix
39 | which
40 | yq-go
41 | ]);
42 |
43 | LIBCLANG_PATH = "${pkgs.libclang.lib}/lib";
44 | BINDGEN_EXTRA_CLANG_ARGS = "-I${pkgs.glibc.dev}/include -I${pkgs.clang}/resource-root/include";
45 | }
46 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/60-install-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: |
6 | kubectl apply -n "$NAMESPACE" -f - < 0 %}
15 | custom: "{{ test_scenario['values']['hive'].split(',')[1] }}"
16 | productVersion: "{{ test_scenario['values']['hive'].split(',')[0] }}"
17 | {% else %}
18 | productVersion: "{{ test_scenario['values']['hive'] }}"
19 | {% endif %}
20 | pullPolicy: IfNotPresent
21 | clusterConfig:
22 | database:
23 | connString: jdbc:postgresql://postgresql:5432/hive
24 | credentialsSecret: hive-credentials
25 | dbType: postgres
26 | hdfs:
27 | configMap: hdfs
28 | authentication:
29 | kerberos:
30 | secretClass: kerberos-$NAMESPACE
31 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
32 | vectorAggregatorConfigMapName: vector-aggregator-discovery
33 | {% endif %}
34 | metastore:
35 | config:
36 | logging:
37 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
38 | roleGroups:
39 | default:
40 | replicas: 1
41 | EOF
42 | ---
43 | apiVersion: v1
44 | kind: Secret
45 | metadata:
46 | name: hive-credentials
47 | type: Opaque
48 | stringData:
49 | username: hive
50 | password: hive
51 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/61-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | timeout: 600
5 | commands:
6 | #
7 | # Test envOverrides
8 | #
9 | - script: |
10 | kubectl -n "$NAMESPACE" get sts hive-metastore-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "hive") | .env[] | select (.name == "COMMON_VAR" and .value == "group-value")'
11 | kubectl -n "$NAMESPACE" get sts hive-metastore-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "hive") | .env[] | select (.name == "GROUP_VAR" and .value == "group-value")'
12 | kubectl -n "$NAMESPACE" get sts hive-metastore-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "hive") | .env[] | select (.name == "ROLE_VAR" and .value == "role-value")'
13 | #
14 | # Test configOverrides
15 | #
16 | - script: |
17 | kubectl -n "$NAMESPACE" get cm hive-metastore-default -o yaml | yq -e '.data."hive-site.xml"' | yq -p=xml '.configuration.property[] | select(.name == "hive.metastore.warehouse.dir") | .value' | grep -qx "/stackable/warehouse/override"
18 | kubectl -n "$NAMESPACE" get cm hive-metastore-default -o yaml | yq -e '.data."hive-site.xml"' | yq -p=xml '.configuration.property[] | select(.name == "role-var") | .value' | grep -qx "role-value"
19 | kubectl -n "$NAMESPACE" get cm hive-metastore-default -o yaml | yq -e '.data."hive-site.xml"' | yq -p=xml '.configuration.property[] | select(.name == "group-var") | .value' | grep -qx "group-value"
20 | kubectl -n "$NAMESPACE" get cm hive-metastore-default -o yaml | yq -e '.data."hive-site.xml"' | yq -p=xml '.configuration.property[] | select(.name == "common-var") | .value' | grep -qx "group-value"
21 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/30-install-hdfs.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: |
6 | kubectl apply -n "$NAMESPACE" -f - <//archive/.tar.gz"
13 | },
14 | "crate2nix": {
15 | "branch": "master",
16 | "description": "nix build file generator for rust crates",
17 | "homepage": "",
18 | "owner": "kolloch",
19 | "repo": "crate2nix",
20 | "rev": "be31feae9a82c225c0fd1bdf978565dc452a483a",
21 | "sha256": "14d0ymlrwk7dynv35qcw4xn0dylfpwjmf6f8znflbk2l6fk23l12",
22 | "type": "tarball",
23 | "url": "https://github.com/kolloch/crate2nix/archive/be31feae9a82c225c0fd1bdf978565dc452a483a.tar.gz",
24 | "url_template": "https://github.com///archive/.tar.gz"
25 | },
26 | "nixpkgs": {
27 | "branch": "nixpkgs-unstable",
28 | "description": "Nix Packages collection",
29 | "homepage": "",
30 | "owner": "NixOS",
31 | "repo": "nixpkgs",
32 | "rev": "a7fc11be66bdfb5cdde611ee5ce381c183da8386",
33 | "sha256": "0h3gvjbrlkvxhbxpy01n603ixv0pjy19n9kf73rdkchdvqcn70j2",
34 | "type": "tarball",
35 | "url": "https://github.com/NixOS/nixpkgs/archive/a7fc11be66bdfb5cdde611ee5ce381c183da8386.tar.gz",
36 | "url_template": "https://github.com///archive/.tar.gz"
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/tls.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCjynVz+XHB8OCY
3 | 4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbupoXBlNIJPQaowJEo
4 | /SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6ZcGTUVhauiZGSgkW
5 | rejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84mOfIPxjkvIyJr5Uk
6 | eF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1oQPfv3shWrmRLzw6
7 | euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgxSdidmZ2mER7Ez+c5
8 | B0m/LiIhAgMBAAECggEAAd3t5suCE27WcIessqgshHP0dts++0W1z+xzX/1NxODY
9 | YXV6Bfn/fDrxtT8iQZgeUC2NE1PhvoyrWuc/2oqarctGu9AYWoG62KtoU2zSHWY/
10 | I7uDE1WWlNvRYTWNanC8exxjQG18wDJZ1itXSxIt5bD3yk/wTRXtt+uJzrV5jocu
11 | chxDLwowiu1Aj6dRCZNBz9TJxyNr50NYW2UXBaT/87XrFFdJwMTVT0B7HOnG7RBV
12 | QlKw8mqVbaNenhcvMjR29sxTzHR+jxIO3BwO6OGj/OFhFBYU7TLXeld1qoe0vb2G
13 | b8hPpGuptr5At9lw1w5wQ3IgiutPNH5qyDyCpEl6EQKBgQDcdbslOfKJj7O2LAyY
14 | FtkTpilE0V3j0qmQ93IjrV4+DRmLMEB299407BUYQQj1/DIbQcoZ2EEcUB5pdeHs
15 | 7DMED6XLHb2JU12+a7sWyCNdKece+T7/IblI8Tt340UlHQ6zSMSDcjvfcFHVgv0s
16 | CajhFx7NkLEXTZr8fT3YIhj4vQKBgQC+MgZ1UmoJw9IAQj2uIU5Cy9xjWeYDTAO/
17 | YaXIzwlge438Mcbb4cN2SaNSGDgV7nu5kqiihpjPYWIiiOBp9kTRHXOdPW47syeI
18 | t3kwrp2zVlUglcMZZ6mmV3QVaANZgjU4RSv4e/VxULjbZafjPtZRsjZGpK0YU1ot
19 | Vj8IeQ7fNQKBgCP+ZMuzJlInUCQSFQxPzqlSm7JMrJOhtWWhwNTqXVSsNttuyVej
20 | KHhjgx4uoBPpVRT2LNUDZb4FprF5OaXA+qNTGrKK7IMbRVbtp+IUUxHG4aFA+HQX
21 | QXUTTa5JQOTKVbgXzV3YrMXSRMojVMp32UbGy5SsZv1zAjbvC8XZ61HRAoGAdBcQ
22 | vhe5xZAS5DmKcHi/ziGkubern6OMPgqaKIHGlW+U8LRpTtj0d4Tm/TrvMOPJ/TE5
23 | YUqKhzpHrhCh+ctpocI6SWWvnRzzKo3imQZcF5TAjQ0ccqtFb9S9dDtyn/XMCjae
24 | aiMvYyUEUFYyLZCzPFZsrp3hUZG+3yFfhApwO2kCgYBHwYaPIdW6Ww7+B2hin0ow
25 | ja3cesvA4jaMPwSLT8ONtU1GBSMfwczMbnPHLrRvB87n9gPaR2wQGUmrFEO3LPX/
26 | kRcOGpYBHpDXEjDhKkWdRuLOFg4HLZdV8ANZlQ0VScE8u3dDDUO89pGDn08qTArl
27 | x9kxsudEVrkerZb5UxFVqQ==
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/client.key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCjynVz+XHB8OCY
3 | 4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbupoXBlNIJPQaowJEo
4 | /SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6ZcGTUVhauiZGSgkW
5 | rejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84mOfIPxjkvIyJr5Uk
6 | eF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1oQPfv3shWrmRLzw6
7 | euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgxSdidmZ2mER7Ez+c5
8 | B0m/LiIhAgMBAAECggEAAd3t5suCE27WcIessqgshHP0dts++0W1z+xzX/1NxODY
9 | YXV6Bfn/fDrxtT8iQZgeUC2NE1PhvoyrWuc/2oqarctGu9AYWoG62KtoU2zSHWY/
10 | I7uDE1WWlNvRYTWNanC8exxjQG18wDJZ1itXSxIt5bD3yk/wTRXtt+uJzrV5jocu
11 | chxDLwowiu1Aj6dRCZNBz9TJxyNr50NYW2UXBaT/87XrFFdJwMTVT0B7HOnG7RBV
12 | QlKw8mqVbaNenhcvMjR29sxTzHR+jxIO3BwO6OGj/OFhFBYU7TLXeld1qoe0vb2G
13 | b8hPpGuptr5At9lw1w5wQ3IgiutPNH5qyDyCpEl6EQKBgQDcdbslOfKJj7O2LAyY
14 | FtkTpilE0V3j0qmQ93IjrV4+DRmLMEB299407BUYQQj1/DIbQcoZ2EEcUB5pdeHs
15 | 7DMED6XLHb2JU12+a7sWyCNdKece+T7/IblI8Tt340UlHQ6zSMSDcjvfcFHVgv0s
16 | CajhFx7NkLEXTZr8fT3YIhj4vQKBgQC+MgZ1UmoJw9IAQj2uIU5Cy9xjWeYDTAO/
17 | YaXIzwlge438Mcbb4cN2SaNSGDgV7nu5kqiihpjPYWIiiOBp9kTRHXOdPW47syeI
18 | t3kwrp2zVlUglcMZZ6mmV3QVaANZgjU4RSv4e/VxULjbZafjPtZRsjZGpK0YU1ot
19 | Vj8IeQ7fNQKBgCP+ZMuzJlInUCQSFQxPzqlSm7JMrJOhtWWhwNTqXVSsNttuyVej
20 | KHhjgx4uoBPpVRT2LNUDZb4FprF5OaXA+qNTGrKK7IMbRVbtp+IUUxHG4aFA+HQX
21 | QXUTTa5JQOTKVbgXzV3YrMXSRMojVMp32UbGy5SsZv1zAjbvC8XZ61HRAoGAdBcQ
22 | vhe5xZAS5DmKcHi/ziGkubern6OMPgqaKIHGlW+U8LRpTtj0d4Tm/TrvMOPJ/TE5
23 | YUqKhzpHrhCh+ctpocI6SWWvnRzzKo3imQZcF5TAjQ0ccqtFb9S9dDtyn/XMCjae
24 | aiMvYyUEUFYyLZCzPFZsrp3hUZG+3yFfhApwO2kCgYBHwYaPIdW6Ww7+B2hin0ow
25 | ja3cesvA4jaMPwSLT8ONtU1GBSMfwczMbnPHLrRvB87n9gPaR2wQGUmrFEO3LPX/
26 | kRcOGpYBHpDXEjDhKkWdRuLOFg4HLZdV8ANZlQ0VScE8u3dDDUO89pGDn08qTArl
27 | x9kxsudEVrkerZb5UxFVqQ==
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for hive-operator.
2 | ---
3 | image:
4 | repository: oci.stackable.tech/sdp/hive-operator
5 | pullPolicy: IfNotPresent
6 | pullSecrets: []
7 |
8 | nameOverride: ""
9 | fullnameOverride: ""
10 |
11 | serviceAccount:
12 | # Specifies whether a service account should be created
13 | create: true
14 | # Annotations to add to the service account
15 | annotations: {}
16 | # The name of the service account to use.
17 | # If not set and create is true, a name is generated using the fullname template
18 | name: ""
19 |
20 | podAnnotations: {}
21 |
22 | # Provide additional labels which get attached to all deployed resources
23 | labels:
24 | stackable.tech/vendor: Stackable
25 |
26 | podSecurityContext: {}
27 | # fsGroup: 2000
28 |
29 | securityContext: {}
30 | # capabilities:
31 | # drop:
32 | # - ALL
33 | # readOnlyRootFilesystem: true
34 | # runAsNonRoot: true
35 | # runAsUser: 1000
36 |
37 | resources:
38 | limits:
39 | cpu: 100m
40 | memory: 128Mi
41 | requests:
42 | cpu: 100m
43 | memory: 128Mi
44 |
45 | nodeSelector: {}
46 |
47 | tolerations: []
48 |
49 | affinity: {}
50 |
51 | # When running on a non-default Kubernetes cluster domain, the cluster domain can be configured here.
52 | # See the https://docs.stackable.tech/home/stable/guides/kubernetes-cluster-domain guide for details.
53 | # kubernetesClusterDomain: my-cluster.local
54 |
55 | # See all available options and detailed explanations about the concept here:
56 | # https://docs.stackable.tech/home/stable/concepts/telemetry/
57 | telemetry:
58 | consoleLog:
59 | enabled: true
60 | fileLog:
61 | enabled: false
62 | rotationPeriod: hourly
63 | maxFiles: 6
64 | otelLogExporter:
65 | enabled: false
66 | otelTraceExporter:
67 | enabled: false
68 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/certs/root-ca.key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDZ1f728t0aOe2t
3 | qr0EZ59I3nijFNvhdPpWTQgL4hlA+W5vI5PdaD2Z5NG4UKnshK5g0uFk9M3rxESA
4 | SSQ43y9A/OOgXuz4jCntHRo6KiN8+XDiHo3PGizx0iJYDepU93yK7ktenKEIBuLm
5 | iohxefnlhQ0VC3bunbGLs+nSo6kYlDoR8A8LVCZA8D7/hCLaqNB/jCxSy2DNPpRC
6 | Gnu3TeQkOUqOVoS9nMr+VSe5Y/xACglYiBIApsw0Q3EYEko1sh+G5+sl9tNunzyM
7 | liRchHS/+owDiQAw0dBsoTTl6xHqoaHEjJyG0FasAE9JDIbl0r8L5a5y8k6RuY2Z
8 | uJVDN+JDAgMBAAECggEABVrQUKu5qapg4FMBIHmXncfyOTgLC6i/ep2cJAajzkgT
9 | YeIDAX9NfFn2mcxJ0QmV68VjSwMFiNUjRfAGVuuNktBknA2ZT6bKZQzBF0rv4mOT
10 | VcugesXO8wbSV03IQ9xtkFC5Q5MgFj1tGHOxVPDFptG1d533h3gS5DdA+S+SuYrn
11 | n8JUqjenVzYgC5CFprDXEy/ZOC/Is/oq/GujC3e6VJINueCOOrVkNKhMtktq8qkg
12 | UtkjZQYP/d0nzR8bYGXN818MBZOg+RyA0asgkDe+n6Lr6gNzaqhECDdITDejq/0h
13 | D8ldKD4v8CYTRAY1AteIAF0jUg2YuWZwgQ8IzL0viQKBgQD2R0AB0RHhxHJsnWoQ
14 | EhWPyD3fo3w8Q5dIxugpP2LcloyOH6Ew5xlwnmPu0wL31D8CHad+vZnUVEqMIZqk
15 | rZy9r0EZc2JmC1Sgpv7NkpY3johBrONIAKsStTpNpr/3EB0rAeQ9KzS0TafeYXKt
16 | buz8Fx3kcBPosLCs9+r/+TDzhQKBgQDib1K0oQV55pFTn9OkoHPTitLsOOwpLxjC
17 | ui8R73PhhvCK3UNftsq3U/Gj6L3wi5ATeE0SC1xCu2ZvU8K7EBRflvoGMmWfAh+O
18 | XMfE21yLyrSgUVhsDeC2VILyxIB0sgcT7uze3TjD2Dm0vg0NQrCNa8euLAn5WIHS
19 | QE6jd8t1JwKBgAlLDP5EjmEvkYXJttveYtPnIXaT67c2cbn8T3xm+OsL/0fJp8J5
20 | pfsa7vhvG/iQGMSSq+RbcTeS6rE4/2Xhaz25JEK6mOby3IGna4wEUQjNpxSbWoQ4
21 | CjyNfCK7/Rhskj0yOBOa0sVO/NumX7ZtriGhGa6qEAZCzJfqTwLTu2YlAoGAaEmt
22 | ZdPjmck/law+5cugjQWbL4DoA+/VD5qAo1oNnQlxMAPITAT8SIM4/6zqDie5K750
23 | gKMK0xFMlGmXfmBhgcUfUktT0nA/6GmC+H+vmBK8LjpI5ztdC5zQ0s79+sEj0WJx
24 | ZhOtWUX1DfGaQUk912SUivttfJHu+M71aQR7iHECgYEAqIPW6opxX5p3bSrfGDY0
25 | vqcxpTLjBAUtCig0UoI+01PiuEudCB9ed2AWkk+h0KzvtKP2VW8r4tYJFeDU2Jt3
26 | s3mcO3Ix1cwHdb2CjzBm4dReyBsIUIzJRPl9spz0cRYhdQkREIHJUoCskwrqCsS0
27 | O9W/M6BZHjhM/7eA8StNHEU=
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/usage-guide/data-storage.adoc:
--------------------------------------------------------------------------------
1 | = Data storage backends
2 | :description: Hive supports metadata storage on S3 and HDFS. Configure S3 with S3Connection and HDFS with configMap in clusterConfig.
3 |
4 | You can operate the Hive metastore service (HMS) without S3 or HDFS.
5 | Its whole purpose is to store metadata such as "Table foo has columns a, b and c and is stored as parquet in local://tmp/hive/foo".
6 |
7 | However, as soon as you start storing metadata in the HMS that refers to a `s3a://` or `hdfs://` locations, HMS will actually do some operations on the filesystem. This can be e.g. checking if the table location exists, creating it in case it is missing.
8 |
9 | So if you are storing tables in S3 (or HDFS for that matter), you need to give the HMS access to that filesystem as well.
10 | The Stackable Operator currently supports S3 and HFS.
11 |
12 | [s3]
13 | == S3 support
14 |
15 | HMS supports creating tables in S3 compatible object stores.
16 | To use this feature you need to provide connection details for the object store using the xref:concepts:s3.adoc[S3Connection] in the top level `clusterConfig`.
17 |
18 | An example usage can look like this:
19 |
20 | [source,yaml]
21 | ----
22 | clusterConfig:
23 | s3:
24 | inline:
25 | host: minio
26 | port: 9000
27 | accessStyle: Path
28 | credentials:
29 | secretClass: simple-hive-s3-secret-class
30 | ----
31 |
32 | [hdfs]
33 | == Apache HDFS support
34 |
35 | As well as S3, HMS also supports creating tables in HDFS.
36 | You can add the HDFS connection in the top level `clusterConfig` as follows:
37 |
38 | [source,yaml]
39 | ----
40 | clusterConfig:
41 | hdfs:
42 | configMap: my-hdfs-cluster # Name of the HdfsCluster
43 | ----
44 |
45 | Read about the xref:hdfs:index.adoc[Stackable Operator for Apache HDFS] to learn more about setting up HDFS.
46 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/resources/10-install-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: hive.stackable.tech/v1alpha1
3 | kind: HiveCluster
4 | metadata:
5 | name: hive
6 | spec:
7 | image:
8 | {% if test_scenario['values']['hive'].find(",") > 0 %}
9 | custom: "{{ test_scenario['values']['hive'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | database:
17 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
18 | credentialsSecret: hive-credentials
19 | dbType: derby
20 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
21 | vectorAggregatorConfigMapName: vector-aggregator-discovery
22 | {% endif %}
23 | metastore:
24 | config:
25 | logging:
26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
27 | resources:
28 | cpu:
29 | min: 400m
30 | max: "4"
31 | memory:
32 | limit: 4Gi
33 | roleGroups:
34 | resources-from-role:
35 | replicas: 1
36 | resources-from-role-group:
37 | replicas: 1
38 | config:
39 | resources:
40 | cpu:
41 | min: 300m
42 | max: "3"
43 | memory:
44 | limit: 3Gi
45 | resources-from-pod-overrides:
46 | podOverrides:
47 | spec:
48 | containers:
49 | - name: hive
50 | resources:
51 | requests:
52 | cpu: 500m
53 | limits:
54 | cpu: 3100m
55 | ---
56 | apiVersion: v1
57 | kind: Secret
58 | metadata:
59 | name: hive-credentials
60 | type: Opaque
61 | stringData:
62 | username: APP
63 | password: mine
64 |
--------------------------------------------------------------------------------
/.readme/static/borrowed/Icon_Stackable.svg:
--------------------------------------------------------------------------------
1 |
21 |
--------------------------------------------------------------------------------
/rust/operator-binary/src/operations/pdb.rs:
--------------------------------------------------------------------------------
1 | use snafu::{ResultExt, Snafu};
2 | use stackable_operator::{
3 | builder::pdb::PodDisruptionBudgetBuilder, client::Client, cluster_resources::ClusterResources,
4 | commons::pdb::PdbConfig, kube::ResourceExt,
5 | };
6 |
7 | use crate::{
8 | OPERATOR_NAME,
9 | controller::HIVE_CONTROLLER_NAME,
10 | crd::{APP_NAME, HiveRole, v1alpha1},
11 | };
12 |
13 | #[derive(Snafu, Debug)]
14 | pub enum Error {
15 | #[snafu(display("Cannot create PodDisruptionBudget for role [{role}]"))]
16 | CreatePdb {
17 | source: stackable_operator::builder::pdb::Error,
18 | role: String,
19 | },
20 | #[snafu(display("Cannot apply PodDisruptionBudget [{name}]"))]
21 | ApplyPdb {
22 | source: stackable_operator::cluster_resources::Error,
23 | name: String,
24 | },
25 | }
26 |
27 | pub async fn add_pdbs(
28 | pdb: &PdbConfig,
29 | hive: &v1alpha1::HiveCluster,
30 | role: &HiveRole,
31 | client: &Client,
32 | cluster_resources: &mut ClusterResources,
33 | ) -> Result<(), Error> {
34 | if !pdb.enabled {
35 | return Ok(());
36 | }
37 | let max_unavailable = pdb.max_unavailable.unwrap_or(match role {
38 | HiveRole::MetaStore => max_unavailable_metastores(),
39 | });
40 | let pdb = PodDisruptionBudgetBuilder::new_with_role(
41 | hive,
42 | APP_NAME,
43 | &role.to_string(),
44 | OPERATOR_NAME,
45 | HIVE_CONTROLLER_NAME,
46 | )
47 | .with_context(|_| CreatePdbSnafu {
48 | role: role.to_string(),
49 | })?
50 | .with_max_unavailable(max_unavailable)
51 | .build();
52 | let pdb_name = pdb.name_any();
53 | cluster_resources
54 | .add(client, pdb)
55 | .await
56 | .with_context(|_| ApplyPdbSnafu { name: pdb_name })?;
57 |
58 | Ok(())
59 | }
60 |
61 | fn max_unavailable_metastores() -> u16 {
62 | 1
63 | }
64 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/helm-bitnami-minio-values.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | global:
3 | security:
4 | allowInsecureImages: true # needed starting with Chart version 14.9.0 if modifying images
5 |
6 | image:
7 | repository: bitnamilegacy/minio
8 | clientImage:
9 | repository: bitnamilegacy/minio-client
10 | defaultInitContainers:
11 | volumePermissions: # volumePermissions moved under defaultInitContainers starting with Chart version 17.0.0
12 | enabled: false
13 | image:
14 | repository: bitnamilegacy/os-shell
15 | console:
16 | image:
17 | repository: bitnamilegacy/minio-object-browser
18 |
19 | mode: standalone
20 | disableWebUI: false
21 | extraEnvVars:
22 | - name: BITNAMI_DEBUG
23 | value: "true"
24 | - name: MINIO_LOG_LEVEL
25 | value: DEBUG
26 |
27 | #defaultBuckets: hive
28 |
29 | provisioning:
30 | enabled: true
31 | buckets:
32 | - name: hive
33 | usersExistingSecrets:
34 | - centralized-minio-users
35 | resources:
36 | requests:
37 | memory: 1Gi
38 | cpu: "512m"
39 | limits:
40 | memory: "1Gi"
41 | cpu: "1"
42 | podSecurityContext:
43 | enabled: false
44 | containerSecurityContext:
45 | enabled: false
46 |
47 | # volumePermissions can be removed starting with Chart version 17.0.0, moved under defaultInitContainers
48 | volumePermissions:
49 | enabled: false
50 | image:
51 | repository: bitnamilegacy/os-shell
52 |
53 | podSecurityContext:
54 | enabled: false
55 |
56 | containerSecurityContext:
57 | enabled: false
58 |
59 | persistence:
60 | enabled: false
61 |
62 | resources:
63 | requests:
64 | memory: 1Gi
65 | cpu: "512m"
66 | limits:
67 | memory: "1Gi"
68 | cpu: "1"
69 |
70 | auth:
71 | existingSecret: minio-credentials
72 |
73 | service:
74 | type: NodePort
75 |
76 | {% if test_scenario['values']['s3-use-tls'] == 'true' %}
77 | tls:
78 | enabled: true
79 | existingSecret: minio-tls-certificates
80 | {% endif %}
81 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/02-bug_report.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: "🐛 Bug Report"
3 | description: "If something isn't working as expected 🤔."
4 | labels: ["type/bug"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: Thanks for taking the time to file a bug report! Please fill out this form as completely as possible.
9 |
10 | - type: input
11 | attributes:
12 | label: Affected Stackable version
13 | description: Which version of the Stackable Operator do you see this bug in?
14 |
15 | #
16 | - type: input
17 | attributes:
18 | label: Affected Apache Hive version
19 | description: Which version of Apache Hive do you see this bug in?
20 | #
21 |
22 | - type: textarea
23 | attributes:
24 | label: Current and expected behavior
25 | description: A clear and concise description of what the operator is doing and what you would expect.
26 | validations:
27 | required: true
28 |
29 | - type: textarea
30 | attributes:
31 | label: Possible solution
32 | description: "If you have suggestions on a fix for the bug."
33 |
34 | - type: textarea
35 | attributes:
36 | label: Additional context
37 | description: "Add any other context about the problem here. Or a screenshot if applicable."
38 |
39 | - type: textarea
40 | attributes:
41 | label: Environment
42 | description: |
43 | What type of kubernetes cluster you are running against (k3s/eks/aks/gke/other) and any other information about your environment?
44 | placeholder: |
45 | Examples:
46 | Output of `kubectl version --short`
47 |
48 | - type: dropdown
49 | attributes:
50 | label: Would you like to work on fixing this bug?
51 | description: |
52 | **NOTE**: Let us know if you would like to submit a PR for this. We are more than happy to help you through the process.
53 | options:
54 | - "yes"
55 | - "no"
56 | - "maybe"
57 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/02-create-kerberos-secretclass.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: |
6 | kubectl apply -n "$NAMESPACE" -f - <-
31 | .pod == "test-hive-metastore-automatic-log-config-0" &&
32 | .container == "hive"
33 | filteredAutomaticLogConfigMetastoreVector:
34 | type: filter
35 | inputs: [validEvents]
36 | condition: >-
37 | .pod == "test-hive-metastore-automatic-log-config-0" &&
38 | .container == "vector"
39 | filteredCustomLogConfigMetastoreHive:
40 | type: filter
41 | inputs: [validEvents]
42 | condition: >-
43 | .pod == "test-hive-metastore-custom-log-config-0" &&
44 | .container == "hive"
45 | filteredCustomLogConfigMetastoreVector:
46 | type: filter
47 | inputs: [validEvents]
48 | condition: >-
49 | .pod == "test-hive-metastore-custom-log-config-0" &&
50 | .container == "vector"
51 | filteredInvalidEvents:
52 | type: filter
53 | inputs: [vector]
54 | condition: |-
55 | .timestamp == from_unix_timestamp!(0) ||
56 | is_null(.level) ||
57 | is_null(.logger) ||
58 | is_null(.message)
59 | sinks:
60 | test:
61 | inputs: [filtered*]
62 | type: blackhole
63 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
64 | aggregator:
65 | inputs: [vector]
66 | type: vector
67 | address: {{ lookup('env', 'VECTOR_AGGREGATOR') }}
68 | buffer:
69 | # Avoid back pressure from VECTOR_AGGREGATOR. The test should
70 | # not fail if the aggregator is not available.
71 | when_full: drop_newest
72 | {% endif %}
73 |
--------------------------------------------------------------------------------
/examples/hive-opa-cluster.yaml:
--------------------------------------------------------------------------------
1 | # helm install postgresql oci://registry-1.docker.io/bitnamicharts/postgresql \
2 | # --version 16.5.0 \
3 | # --namespace default \
4 | # --set image.repository=bitnamilegacy/postgresql \
5 | # --set volumePermissions.image.repository=bitnamilegacy/os-shell \
6 | # --set metrics.image.repository=bitnamilegacy/postgres-exporter \
7 | # --set global.security.allowInsecureImages=true \
8 | # --set auth.username=hive \
9 | # --set auth.password=hive \
10 | # --set auth.database=hive \
11 | # --set primary.extendedConfiguration="password_encryption=md5" \
12 | # --wait
13 | ---
14 | apiVersion: hive.stackable.tech/v1alpha1
15 | kind: HiveCluster
16 | metadata:
17 | name: hive
18 | spec:
19 | image:
20 | productVersion: 4.1.0
21 | pullPolicy: IfNotPresent
22 | clusterConfig:
23 | authorization:
24 | opa:
25 | configMapName: opa
26 | package: hms
27 | database:
28 | connString: jdbc:postgresql://postgresql:5432/hive
29 | credentialsSecret: hive-postgresql-credentials
30 | dbType: postgres
31 | metastore:
32 | roleGroups:
33 | default:
34 | replicas: 1
35 | config:
36 | resources:
37 | cpu:
38 | min: 300m
39 | max: "2"
40 | memory:
41 | limit: 5Gi
42 | ---
43 | apiVersion: v1
44 | kind: Secret
45 | metadata:
46 | name: hive-postgresql-credentials
47 | type: Opaque
48 | stringData:
49 | username: hive
50 | password: hive
51 | ---
52 | apiVersion: opa.stackable.tech/v1alpha1
53 | kind: OpaCluster
54 | metadata:
55 | name: opa
56 | spec:
57 | image:
58 | productVersion: 1.8.0
59 | servers:
60 | config:
61 | logging:
62 | enableVectorAgent: false
63 | containers:
64 | opa:
65 | console:
66 | level: INFO
67 | file:
68 | level: INFO
69 | loggers:
70 | decision:
71 | level: INFO
72 | roleGroups:
73 | default: {}
74 | ---
75 | apiVersion: v1
76 | kind: ConfigMap
77 | metadata:
78 | name: hive-opa-bundle
79 | labels:
80 | opa.stackable.tech/bundle: "hms"
81 | data:
82 | hive.rego: |
83 | package hms
84 |
85 | database_allow = true
86 | table_allow = true
87 | column_allow = true
88 | partition_allow = true
89 | user_allow = true
90 |
--------------------------------------------------------------------------------
/Tiltfile:
--------------------------------------------------------------------------------
1 | # If tilt_options.json exists read it and load the default_registry value from it
2 | settings = read_json('tilt_options.json', default={})
3 | registry = settings.get('default_registry', 'oci.stackable.tech/sandbox')
4 |
5 | # Configure default registry either read from config file above, or with default value of "oci.stackable.tech/sandbox"
6 | default_registry(registry)
7 |
8 | meta = read_json('nix/meta.json')
9 | operator_name = meta['operator']['name']
10 |
11 | custom_build(
12 | registry + '/' + operator_name,
13 | 'make regenerate-nix && nix-build . -A docker --argstr dockerName "${EXPECTED_REGISTRY}/' + operator_name + '" && ./result/load-image | docker load',
14 | deps=['rust', 'Cargo.toml', 'Cargo.lock', 'default.nix', "nix", 'build.rs', 'vendor'],
15 | ignore=['*.~undo-tree~'],
16 | # ignore=['result*', 'Cargo.nix', 'target', *.yaml],
17 | outputs_image_ref_to='result/ref',
18 | )
19 |
20 | # Load the latest CRDs from Nix
21 | watch_file('result')
22 | if os.path.exists('result'):
23 | k8s_yaml('result/crds.yaml')
24 |
25 | # We need to set the correct image annotation on the operator Deployment to use e.g.
26 | # oci.stackable.tech/sandbox/opa-operator:7y19m3d8clwxlv34v5q2x4p7v536s00g instead of
27 | # oci.stackable.tech/sandbox/opa-operator:0.0.0-dev (which does not exist)
28 | k8s_kind('Deployment', image_json_path='{.spec.template.metadata.annotations.internal\\.stackable\\.tech/image}')
29 | k8s_kind('DaemonSet', image_json_path='{.spec.template.metadata.annotations.internal\\.stackable\\.tech/image}')
30 |
31 | # Optionally specify a custom Helm values file to be passed to the Helm deployment below.
32 | # This file can for example be used to set custom telemetry options (like log level) which is not
33 | # supported by helm(set).
34 | helm_values = settings.get('helm_values', None)
35 |
36 | helm_override_image_repository = 'image.repository=' + registry + '/' + operator_name
37 |
38 | # Exclude stale CRDs from Helm chart, and apply the rest
39 | helm_crds, helm_non_crds = filter_yaml(
40 | helm(
41 | 'deploy/helm/' + operator_name,
42 | name=operator_name,
43 | namespace="stackable-operators",
44 | set=[
45 | helm_override_image_repository,
46 | ],
47 | values=helm_values,
48 | ),
49 | api_version = "^apiextensions\\.k8s\\.io/.*$",
50 | kind = "^CustomResourceDefinition$",
51 | )
52 | k8s_yaml(helm_non_crds)
53 |
--------------------------------------------------------------------------------
/.github/workflows/integration-test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Integration Test
3 |
4 | on:
5 | # schedule:
6 | # # At 00:00 on Sunday. See: https://crontab.guru/#0_0_*_*_0
7 | # - cron: "0 0 * * 0"
8 | workflow_dispatch:
9 | inputs:
10 | test-mode:
11 | description: Test mode
12 | required: true
13 | type: choice
14 | options:
15 | - profile
16 | - custom
17 | test-mode-input:
18 | description: |
19 | The profile or the runner used. Eg: `smoke-latest` or `amd64` (see test/interu.yaml)
20 | required: true
21 | test-suite:
22 | description: Name of the test-suite. Only used if test-mode is `custom`
23 | test:
24 | description: Name of the test. Only used of test-mode is `custom`
25 |
26 | jobs:
27 | test:
28 | name: Run Integration Test
29 | runs-on: ubuntu-latest
30 | # services:
31 | # otel-collector:
32 | # image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-k8s:0.131.1
33 | # volumes:
34 | # - .:/mnt
35 | steps:
36 | - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
37 | with:
38 | persist-credentials: false
39 | submodules: recursive
40 |
41 | # TODO: Enable the scheduled runs which hard-code what profile to use
42 | - name: Run Integration Test
43 | id: test
44 | uses: stackabletech/actions/run-integration-test@29bea1b451c0c2e994bd495969286f95bf49ed6a # v0.11.0
45 | with:
46 | replicated-api-token: ${{ secrets.REPLICATED_API_TOKEN }}
47 | test-mode-input: ${{ inputs.test-mode-input }}
48 | test-suite: ${{ inputs.test-suite }}
49 | test-mode: ${{ inputs.test-mode }}
50 | test: ${{ inputs.test }}
51 |
52 | - name: Send Notification
53 | if: ${{ failure() || github.run_attempt > 1 }}
54 | uses: stackabletech/actions/send-slack-notification@29bea1b451c0c2e994bd495969286f95bf49ed6a # v0.11.0
55 | with:
56 | slack-token: ${{ secrets.SLACK_INTEGRATION_TEST_TOKEN }}
57 | failed-tests: ${{ steps.test.outputs.failed-tests }}
58 | test-health: ${{ steps.test.outputs.health }}
59 | test-result: ${{ steps.test.conclusion }}
60 | channel-id: C07UYJYSMSN # notifications-integration-tests
61 | type: integration-test
62 |
--------------------------------------------------------------------------------
/examples/simple-hive-cluster-postgres-s3.yaml:
--------------------------------------------------------------------------------
1 | # helm install secret-operator \
2 | # oci://oci.stackable.tech/sdp-charts/secret-operator
3 | # helm install minio \
4 | # minio \
5 | # --repo https://charts.bitnami.com/bitnami \
6 | # --set image.repository=bitnamilegacy/minio \
7 | # --set clientImage.repository=bitnamilegacy/minio-client \
8 | # --set defaultInitContainers.volumePermissions.image.repository=bitnamilegacy/os-shell \
9 | # --set console.image.repository=bitnamilegacy/minio-object-browser \
10 | # --set global.security.allowInsecureImages=true \
11 | # --set auth.rootUser=minio-access-key \
12 | # --set auth.rootPassword=minio-secret-key
13 | # helm install hive bitnami/postgresql --version=12.1.5 \
14 | # --set image.repository=bitnamilegacy/postgresql \
15 | # --set volumePermissions.image.repository=bitnamilegacy/os-shell \
16 | # --set metrics.image.repository=bitnamilegacy/postgres-exporter \
17 | # --set global.security.allowInsecureImages=true \
18 | # --set postgresqlUsername=hive \
19 | # --set postgresqlPassword=hive \
20 | # --set postgresqlDatabase=hive
21 | ---
22 | apiVersion: hive.stackable.tech/v1alpha1
23 | kind: HiveCluster
24 | metadata:
25 | name: simple-hive-postgres
26 | spec:
27 | image:
28 | productVersion: 4.1.0
29 | stackableVersion: 0.0.0-dev
30 | clusterConfig:
31 | database:
32 | connString: jdbc:derby:;databaseName=/tmp/hive;create=true
33 | credentialsSecret: hive-credentials
34 | dbType: derby
35 | s3:
36 | inline:
37 | host: test-minio
38 | port: 9000
39 | accessStyle: Path
40 | credentials:
41 | secretClass: simple-hive-s3-secret-class
42 | metastore:
43 | roleGroups:
44 | default:
45 | replicas: 1
46 | ---
47 | apiVersion: secrets.stackable.tech/v1alpha1
48 | kind: SecretClass
49 | metadata:
50 | name: simple-hive-s3-secret-class
51 | spec:
52 | backend:
53 | k8sSearch:
54 | searchNamespace:
55 | pod: {}
56 | ---
57 | apiVersion: v1
58 | kind: Secret
59 | metadata:
60 | name: simple-hive-s3-secret
61 | labels:
62 | secrets.stackable.tech/class: simple-hive-s3-secret-class
63 | stringData:
64 | accessKey: minio-access-key
65 | secretKey: minio-secret-key
66 | ---
67 | apiVersion: v1
68 | kind: Secret
69 | metadata:
70 | name: hive-credentials
71 | type: Opaque
72 | stringData:
73 | username: APP
74 | password: mine
75 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/reference/environment-variables.adoc:
--------------------------------------------------------------------------------
1 | = Environment variables
2 |
3 | This operator accepts the following environment variables:
4 |
5 | == KUBERNETES_CLUSTER_DOMAIN
6 |
7 | *Default value*: cluster.local
8 |
9 | *Required*: false
10 |
11 | *Multiple values*: false
12 |
13 | This instructs the operator, which value it should use for the Kubernetes `clusterDomain` setting.
14 | Make sure to keep this in sync with whatever setting your cluster uses.
15 | Please see the documentation xref:guides:kubernetes-cluster-domain.adoc[on configuring the Kubernetes cluster domain] for more information on this feature.
16 |
17 | [source]
18 | ----
19 | export KUBERNETES_CLUSTER_DOMAIN=mycluster.local
20 | cargo run -- run
21 | ----
22 |
23 | or via docker:
24 |
25 | [source]
26 | ----
27 | docker run \
28 | --name hive-operator \
29 | --network host \
30 | --env KUBECONFIG=/home/stackable/.kube/config \
31 | --env KUBERNETES_CLUSTER_DOMAIN=mycluster.local \
32 | --mount type=bind,source="$HOME/.kube/config",target="/home/stackable/.kube/config" \
33 | oci.stackable.tech/sdp/hive-operator:0.0.0-dev
34 | ----
35 |
36 | == PRODUCT_CONFIG
37 |
38 | *Default value*: `/etc/stackable/hive-operator/config-spec/properties.yaml`
39 |
40 | *Required*: false
41 |
42 | *Multiple values*: false
43 |
44 | [source]
45 | ----
46 | export PRODUCT_CONFIG=/foo/bar/properties.yaml
47 | stackable-hive-operator run
48 | ----
49 |
50 | or via docker:
51 |
52 | ----
53 | docker run \
54 | --name hive-operator \
55 | --network host \
56 | --env KUBECONFIG=/home/stackable/.kube/config \
57 | --env PRODUCT_CONFIG=/my/product/config.yaml \
58 | --mount type=bind,source="$HOME/.kube/config",target="/home/stackable/.kube/config" \
59 | oci.stackable.tech/sdp/hive-operator:0.0.0-dev
60 | ----
61 |
62 | == WATCH_NAMESPACE
63 |
64 | *Default value*: All namespaces
65 |
66 | *Required*: false
67 |
68 | *Multiple values*: false
69 |
70 | The operator **only** watches for resources in the provided namespace `test`:
71 |
72 | [source]
73 | ----
74 | export WATCH_NAMESPACE=test
75 | stackable-hive-operator run
76 | ----
77 |
78 | or via docker:
79 |
80 | [source]
81 | ----
82 | docker run \
83 | --name hive-operator \
84 | --network host \
85 | --env KUBECONFIG=/home/stackable/.kube/config \
86 | --env WATCH_NAMESPACE=test \
87 | --mount type=bind,source="$HOME/.kube/config",target="/home/stackable/.kube/config" \
88 | oci.stackable.tech/sdp/hive-operator:0.0.0-dev
89 | ----
90 |
--------------------------------------------------------------------------------
/deploy/helm/hive-operator/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "operator.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-operator" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Expand the name of the chart.
10 | */}}
11 | {{- define "operator.appname" -}}
12 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
13 | {{- end }}
14 |
15 | {{/*
16 | Create a default fully qualified app name.
17 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
18 | If release name contains chart name it will be used as a full name.
19 | */}}
20 | {{- define "operator.fullname" -}}
21 | {{- if .Values.fullnameOverride }}
22 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
23 | {{- else }}
24 | {{- $name := default .Chart.Name .Values.nameOverride }}
25 | {{- if contains $name .Release.Name }}
26 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
27 | {{- else }}
28 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
29 | {{- end }}
30 | {{- end }}
31 | {{- end }}
32 |
33 | {{/*
34 | Create chart name and version as used by the chart label.
35 | */}}
36 | {{- define "operator.chart" -}}
37 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
38 | {{- end }}
39 |
40 | {{/*
41 | Common labels
42 | */}}
43 | {{- define "operator.labels" -}}
44 | helm.sh/chart: {{ include "operator.chart" . }}
45 | {{ include "operator.selectorLabels" . }}
46 | {{- if .Chart.AppVersion }}
47 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
48 | {{- end }}
49 | app.kubernetes.io/managed-by: {{ .Release.Service }}
50 | {{- end }}
51 |
52 | {{/*
53 | Selector labels
54 | */}}
55 | {{- define "operator.selectorLabels" -}}
56 | app.kubernetes.io/name: {{ include "operator.appname" . }}
57 | app.kubernetes.io/instance: {{ .Release.Name }}
58 | {{- with .Values.labels }}
59 | {{ toYaml . }}
60 | {{- end }}
61 | {{- end }}
62 |
63 | {{/*
64 | Create the name of the service account to use
65 | */}}
66 | {{- define "operator.serviceAccountName" -}}
67 | {{- if .Values.serviceAccount.create }}
68 | {{- default (include "operator.fullname" .) .Values.serviceAccount.name }}
69 | {{- else }}
70 | {{- default "default" .Values.serviceAccount.name }}
71 | {{- end }}
72 | {{- end }}
73 |
74 | {{/*
75 | Labels for Kubernetes objects created by helm test
76 | */}}
77 | {{- define "operator.testLabels" -}}
78 | helm.sh/test: {{ include "operator.chart" . }}
79 | {{- end }}
80 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-s3/60-install-hive.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: |
6 | kubectl apply -n "$NAMESPACE" -f - < 0 %}
15 | custom: "{{ test_scenario['values']['hive'].split(',')[1] }}"
16 | productVersion: "{{ test_scenario['values']['hive'].split(',')[0] }}"
17 | {% else %}
18 | productVersion: "{{ test_scenario['values']['hive'] }}"
19 | {% endif %}
20 | pullPolicy: IfNotPresent
21 | clusterConfig:
22 | database:
23 | connString: jdbc:postgresql://postgresql:5432/hive
24 | credentialsSecret: hive-credentials
25 | dbType: postgres
26 | s3:
27 | reference: minio
28 | authentication:
29 | kerberos:
30 | secretClass: kerberos-$NAMESPACE
31 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
32 | vectorAggregatorConfigMapName: vector-aggregator-discovery
33 | {% endif %}
34 | metastore:
35 | config:
36 | logging:
37 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
38 | roleGroups:
39 | default:
40 | replicas: 1
41 | EOF
42 | ---
43 | apiVersion: s3.stackable.tech/v1alpha1
44 | kind: S3Connection
45 | metadata:
46 | name: minio
47 | spec:
48 | host: minio
49 | port: 9000
50 | accessStyle: Path
51 | credentials:
52 | secretClass: test-hive-s3-secret-class
53 | {% if test_scenario['values']['s3-use-tls'] == 'true' %}
54 | tls:
55 | verification:
56 | server:
57 | caCert:
58 | secretClass: minio-tls-certificates
59 | {% endif %}
60 | ---
61 | apiVersion: secrets.stackable.tech/v1alpha1
62 | kind: SecretClass
63 | metadata:
64 | name: test-hive-s3-secret-class
65 | spec:
66 | backend:
67 | k8sSearch:
68 | searchNamespace:
69 | pod: {}
70 | ---
71 | apiVersion: v1
72 | kind: Secret
73 | metadata:
74 | name: test-hive-s3-secret
75 | labels:
76 | secrets.stackable.tech/class: test-hive-s3-secret-class
77 | stringData:
78 | accessKey: hive
79 | secretKey: hivehive
80 | ---
81 | apiVersion: v1
82 | kind: Secret
83 | metadata:
84 | name: hive-credentials
85 | type: Opaque
86 | stringData:
87 | username: hive
88 | password: hive
89 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/smoke/test_metastore_opa.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from hive_metastore_client import HiveMetastoreClient
3 | from hive_metastore_client.builders import (
4 | DatabaseBuilder,
5 | ColumnBuilder,
6 | SerDeInfoBuilder,
7 | StorageDescriptorBuilder,
8 | TableBuilder,
9 | )
10 | import argparse
11 |
12 |
13 | def table(db_name, table_name, location):
14 | columns = [ColumnBuilder("id", "string", "col comment").build()]
15 |
16 | serde_info = SerDeInfoBuilder(
17 | serialization_lib="org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"
18 | ).build()
19 |
20 | storage_descriptor = StorageDescriptorBuilder(
21 | columns=columns,
22 | location=location,
23 | input_format="org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat",
24 | output_format="org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat",
25 | serde_info=serde_info,
26 | compressed=True,
27 | ).build()
28 |
29 | test_table = TableBuilder(
30 | db_name=db_name,
31 | table_name=table_name,
32 | storage_descriptor=storage_descriptor,
33 | ).build()
34 |
35 | return test_table
36 |
37 |
38 | if __name__ == "__main__":
39 | all_args = argparse.ArgumentParser(
40 | description="Test hive-metastore-opa-authorizer and rego rules."
41 | )
42 | all_args.add_argument("-p", "--port", help="Metastore server port", default="9083")
43 | all_args.add_argument(
44 | "-d", "--database", help="Test DB name", default="db_not_allowed"
45 | )
46 | all_args.add_argument(
47 | "-m", "--metastore", help="The host or service to connect to", required=True
48 | )
49 | args = vars(all_args.parse_args())
50 |
51 | database_name = args["database"]
52 | port = args["port"]
53 | host = args["metastore"]
54 |
55 | # Creating database object using builder
56 | database = DatabaseBuilder(database_name).build()
57 |
58 | print(
59 | f"[INFO] Trying to access '{database_name}' which is expected to fail due to 'database_allow' authorization policy...!"
60 | )
61 |
62 | with HiveMetastoreClient(host, port) as hive_client:
63 | try:
64 | hive_client.create_database_if_not_exists(database)
65 | except Exception as e:
66 | print(f"[DENIED] {e}")
67 | print(
68 | f"[SUCCESS] Test hive-metastore-opa-authorizer succeeded. Could not access database '{database_name}'!"
69 | )
70 | exit(0)
71 |
72 | print(
73 | f"[ERROR] Test hive-metastore-opa-authorizer failed. Could access database '{database_name}'!"
74 | )
75 | exit(-1)
76 |
--------------------------------------------------------------------------------
/tests/README-templating.md:
--------------------------------------------------------------------------------
1 | # Test Scenario Templating
2 |
3 | ## Introduction
4 |
5 | The tests in this directory are designed to be expanded into multiple test scenarios based on test dimensions that can be defined in a dimensions file.
6 |
7 | ## Defining Test Dimensions
8 |
9 | The dimensions file currently has to be named `test-definition.yaml` and reside in the same directory as the `kuttl-test.yaml.jinja2` file.
10 |
11 | An example of a minimal folder structure will be given further down in this file.
12 |
13 | An example of the content for the test definition file is shown here:
14 |
15 | ````yaml
16 | dimensions:
17 | - name: spark
18 | values:
19 | - 3.2.1
20 | - 3.2.2
21 | - 3.2.3
22 | - name: hadoop
23 | values:
24 | - 3.1.0
25 | - 3.2.0
26 | - name: aws
27 | - abc
28 | - xyz
29 | tests:
30 | - name: spark-pi-public-s3
31 | dimensions:
32 | - spark
33 | - hadoop
34 | ````
35 |
36 | This file defines three dimensions for this test to be considered.
37 | It also defines one test case named _spark-pi-public-s3_ and the dimensions that this test case should use.
38 | In this example the test case uses only two of the three dimensions defined, so a run of this test case would be expanded into the following test structure:
39 |
40 | ````text
41 | └── spark-pi-public-s3
42 | ├── spark-3.2.1_hadoop-3.1.0
43 | ├── spark-3.2.1_hadoop-3.2.0
44 | ├── spark-3.2.2_hadoop-3.1.0
45 | ├── spark-3.2.2_hadoop-3.2.0
46 | ├── spark-3.2.3_hadoop-3.1.0
47 | └── spark-3.2.3_hadoop-3.2.0
48 | ````
49 |
50 | The name of a test case defined under `tests` in this file has to refer back to a directory in the `templates/kuttl` directory, which will be used to create the test scenarios.
51 |
52 | Given the example of a test-definition.yaml shown above, the following folder structure would create the test scenarios shown above.
53 |
54 | ````text
55 | tests
56 | ├── kuttl-test.yaml.j2
57 | ├── templates
58 | │ └── kuttl
59 | │ └── spark-pi-public-s3
60 | └── test-definition.yaml
61 | ````
62 |
63 | The `kuttl-test.yaml.jinja2` cannot currently be edited, as it comes from the operator templating and any changes would be overwritten again.
64 | This should be fairly easy to solve and we can look at this as soon as it becomes necessary.
65 |
66 | ## Using
67 |
68 | ### Requirements
69 |
70 | To run tests locally you need the following things installed:
71 |
72 | - python3 (version >= 3.9)
73 | - pyyaml library installed
74 | - jq
75 |
76 | ### Running
77 |
78 | To run tests please execute the following command from the gitroot of the operator repository:
79 |
80 | `scripts/run_tests.sh --parallel 2`
81 |
82 | This will expand the test templates into all defined test scenarios and execute kuttl to test these scenarios. Any arguments are passed on to `kuttl`.
83 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/reference/discovery.adoc:
--------------------------------------------------------------------------------
1 | :clusterName: \{clusterName\}
2 | :namespace: \{namespace\}
3 | :metastorePort: 9083
4 | :roleGroup: \{roleGroup\}
5 | :page-aliases: discovery.adoc
6 |
7 | = Discovery
8 |
9 | The Stackable Operator for Apache Hive publishes a discovery https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#configmap-v1-core[ConfigMap], which exposes a client configuration bundle that allows access to the Apache Hive cluster.
10 |
11 | The bundle includes an Apache Thrift connection string to access the Hive Metastore service. This string may be used by other operators or tools to configure their products with access to Hive. Access is limited to services within the same Kubernetes cluster.
12 |
13 | == Example
14 |
15 | Given the following Hive cluster:
16 |
17 | [source,yaml,subs="normal,callouts"]
18 | ----
19 | apiVersion: hive.stackable.tech/v1alpha1
20 | kind: HiveCluster
21 | metadata:
22 | name: {clusterName} # <1>
23 | namespace: {namespace} # <2>
24 | spec:
25 | clusterConfig:
26 | database:
27 | connString: jdbc:postgresql://postgresql:5432/hive
28 | credentialsSecret: hive-credentials
29 | dbType: postgres
30 | metastore:
31 | roleGroups:
32 | default: # <3>
33 | replicas: 2
34 | ---
35 | apiVersion: v1
36 | kind: Secret
37 | metadata:
38 | name: hive-credentials
39 | type: Opaque
40 | stringData:
41 | username: hive
42 | password: hive
43 | ----
44 | <1> The name of the Hive cluster, which is also the name of the created discovery ConfigMap.
45 | <2> The namespace of the discovery ConfigMap.
46 | <3> A role group name of the metastore role.
47 |
48 | The resulting discovery ConfigMap is `{namespace}/{clusterName}`.
49 |
50 | == Contents
51 |
52 | === Internal access
53 |
54 | The `{namespace}/{clusterName}` discovery ConfigMap contains the following fields where `{clusterName}` represents the name, `{namespace}` the namespace of the cluster and `{roleGroup}` a role group of the `metastore` role:
55 |
56 | `HIVE`::
57 | ====
58 | Contains the thrift protocol connection string for the Hive metastore service:
59 |
60 | [subs="attributes"]
61 | thrift://{clusterName}.{namespace}.svc.cluster.local:{metastorePort}
62 | ====
63 |
64 | WARNING: Using the Hive metastore in high availability mode (replicas > 1) does not work with Derby but instead requires a properly configured database like PostgreSQL or MySQL.
65 |
66 | === External access
67 |
68 | If `spec.clusterConfig.listenerClass` is set to `external-unstable` an additional ConfigMap is generated to expose external access to the cluster. This discovery ConfigMap is reachable via `{namespace}/{clusterName}-nodeport`.
69 |
70 | ====
71 | Contains the thrift protocol connection string for the Hive metastore NodePort service:
72 |
73 | thrift://:
74 | thrift://:
75 | ====
76 |
--------------------------------------------------------------------------------
/docs/modules/hive/pages/getting_started/first_steps.adoc:
--------------------------------------------------------------------------------
1 | = First steps
2 | :description: Deploy and verify a Hive metastore cluster with PostgreSQL and MinIO. Follow the setup guide and ensure all pods are ready for operation.
3 |
4 | After going through the xref:getting_started/installation.adoc[] section and having installed all the operators, deploy a Hive metastore cluster and it's dependencies.
5 | Afterward you can <<_verify_that_it_works, verify that it works>>.
6 |
7 | == Setup
8 |
9 | Several requirements should have already been installed in the xref:getting_started/installation.adoc[Installation guide]:
10 |
11 | * xref:commons-operator:index.adoc[Commons Operator]
12 | * xref:secret-operator:index.adoc[Secret Operator]
13 | * xref:listener-operator:index.adoc[Listener Operator]
14 | * xref:hive:index.adoc[Hive Operator]
15 | * PostgreSQL
16 | * MinIO for S3
17 |
18 | === S3Connection
19 |
20 | In order to connect Hive to MinIO we need to create several files (or concat in one file).
21 |
22 | An xref:concepts:s3.adoc[S3Connection] to connect to MinIO (`hive-minio-s3-connection.yaml`)
23 |
24 | [source,yaml]
25 | ----
26 | include::example$getting_started/hive-minio-s3-connection.yaml[]
27 | ----
28 |
29 | Credentials for the S3Connection to log into MinIO (`hive-minio-credentials.yaml`)
30 |
31 | [source,yaml]
32 | ----
33 | include::example$getting_started/hive-minio-credentials.yaml[]
34 | ----
35 |
36 | A SecretClass for the credentials to the Minio. The credentials were defined in the installation of Minio (`hive-minio-credentials-secret-class.yaml`)
37 |
38 | [source,yaml]
39 | ----
40 | include::example$getting_started/hive-minio-credentials-secret-class.yaml[]
41 | ----
42 |
43 | And lastly the actual Apache Hive cluster definition (`hive-postgres-s3.yaml`). Note how it references the `minio` S3 object
44 |
45 | [source,yaml]
46 | ----
47 | include::example$getting_started/hive-postgres-s3.yaml[]
48 | ----
49 |
50 | Once the files are created apply them to the cluster:
51 |
52 | [source,bash]
53 | ----
54 | include::example$getting_started/getting_started.sh[tag=install-hive]
55 | ----
56 |
57 | == Verify that it works
58 |
59 | Make sure that all the Pods in the StatefulSets are ready:
60 |
61 | [source,bash]
62 | ----
63 | kubectl get statefulset
64 | ----
65 |
66 | The output should show all pods ready:
67 |
68 | [source,bash]
69 | ----
70 | NAME READY AGE
71 | hive-postgres-s3-metastore-default 1/1 48s
72 | ----
73 |
74 | For further testing we recommend to use e.g. the python https://github.com/quintoandar/hive-metastore-client[hive-metastore-client]
75 | in order to e.g. create tables or connect a product like Trino via the xref:trino:index.adoc[Stackable Trino Operator].
76 |
77 | == What's next
78 |
79 | Have a look at the xref:usage-guide/index.adoc[usage guide] to find out more about the features of the Operator.
80 |
--------------------------------------------------------------------------------
/tests/templates/kuttl/kerberos-hdfs/35-access-hdfs.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestStep
4 | commands:
5 | - script: |
6 | kubectl apply -n "$NAMESPACE" -f - < 0 %}
9 | custom: "{{ test_scenario['values']['hive'].split(',')[1] }}"
10 | productVersion: "{{ test_scenario['values']['hive'].split(',')[0] }}"
11 | {% else %}
12 | productVersion: "{{ test_scenario['values']['hive'] }}"
13 | {% endif %}
14 | pullPolicy: IfNotPresent
15 | clusterConfig:
16 | authorization:
17 | opa:
18 | configMapName: opa
19 | package: hms
20 | database:
21 | connString: jdbc:postgresql://postgresql:5432/hive
22 | credentialsSecret: hive-credentials
23 | dbType: postgres
24 | s3:
25 | reference: minio
26 | {% if lookup('env', 'VECTOR_AGGREGATOR') %}
27 | vectorAggregatorConfigMapName: vector-aggregator-discovery
28 | {% endif %}
29 | metastore:
30 | config:
31 | logging:
32 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }}
33 | envOverrides:
34 | COMMON_VAR: role-value # overridden by role group below
35 | ROLE_VAR: role-value # only defined here at role level
36 | configOverrides:
37 | hive-site.xml:
38 | hive.metastore.warehouse.dir: "/stackable/warehouse/override" # Also set by the operator
39 | common-var: role-value # Overridden by role group below
40 | role-var: role-value # Only defined here at role level
41 | roleGroups:
42 | default:
43 | replicas: 1
44 | envOverrides:
45 | COMMON_VAR: group-value # overrides role value
46 | GROUP_VAR: group-value # only defined here at group level
47 | configOverrides:
48 | hive-site.xml:
49 | common-var: group-value
50 | group-var: group-value
51 | ---
52 | apiVersion: s3.stackable.tech/v1alpha1
53 | kind: S3Connection
54 | metadata:
55 | name: minio
56 | spec:
57 | host: minio
58 | port: 9000
59 | accessStyle: Path
60 | credentials:
61 | secretClass: test-hive-s3-secret-class
62 | {% if test_scenario['values']['s3-use-tls'] == 'true' %}
63 | tls:
64 | verification:
65 | server:
66 | caCert:
67 | secretClass: minio-tls-certificates
68 | {% endif %}
69 | ---
70 | apiVersion: secrets.stackable.tech/v1alpha1
71 | kind: SecretClass
72 | metadata:
73 | name: test-hive-s3-secret-class
74 | spec:
75 | backend:
76 | k8sSearch:
77 | searchNamespace:
78 | pod: {}
79 | ---
80 | apiVersion: v1
81 | kind: Secret
82 | metadata:
83 | name: test-hive-s3-secret
84 | labels:
85 | secrets.stackable.tech/class: test-hive-s3-secret-class
86 | stringData:
87 | accessKey: hive
88 | secretKey: hivehive
89 | ---
90 | apiVersion: v1
91 | kind: Secret
92 | metadata:
93 | name: hive-credentials
94 | type: Opaque
95 | stringData:
96 | username: hive
97 | password: hive
98 |
--------------------------------------------------------------------------------