├── .actionlint.yaml ├── .dockerignore ├── .envrc.sample ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── 01-normal-issue.md │ ├── 02-bug_report.yml │ ├── bug_report.yml │ ├── config.yml │ ├── new_version.md │ └── normal-issue.md ├── PULL_REQUEST_TEMPLATE │ ├── pre-release-getting-started-script.md │ └── pre-release-rust-deps.md ├── actionlint.yaml ├── pull_request_template.md └── workflows │ ├── build.yml │ ├── general_daily_security.yml │ ├── integration-test.yml │ └── pr_pre-commit.yaml ├── .gitignore ├── .hadolint.yaml ├── .markdownlint.yaml ├── .pre-commit-config.yaml ├── .pylintrc ├── .readme ├── README.md.j2 ├── partials │ ├── borrowed │ │ ├── documentation.md.j2 │ │ ├── footer.md.j2 │ │ ├── header.md.j2 │ │ ├── links.md.j2 │ │ ├── overview_blurb.md.j2 │ │ └── related_reading.md.j2 │ └── main.md.j2 └── static │ └── borrowed │ ├── Icon_Stackable.svg │ ├── sdp_overview.png │ └── stackable_overview.png ├── .vscode ├── launch.json └── settings.json ├── .yamllint.yaml ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.nix ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── Tiltfile ├── crate-hashes.json ├── default.nix ├── deny.toml ├── deploy ├── DO_NOT_EDIT.md ├── config-spec │ └── properties.yaml ├── helm │ ├── chart_testing.yaml │ ├── ct.yaml │ └── trino-operator │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── configs │ │ └── properties.yaml │ │ ├── crds │ │ └── crds.yaml │ │ ├── templates │ │ ├── _helpers.tpl │ │ ├── _telemetry.tpl │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── roles.yaml │ │ └── serviceaccount.yaml │ │ └── values.yaml └── stackable-operators-ns.yaml ├── docker └── Dockerfile ├── docs ├── antora.yml ├── modules │ └── trino │ │ ├── assets │ │ └── images │ │ │ ├── connect-with-dbeaver-1.png │ │ │ ├── connect-with-dbeaver-2.png │ │ │ ├── connect-with-dbeaver-3.png │ │ │ ├── trino-web-ui-finished.png │ │ │ └── trino-web-ui.png │ │ ├── examples │ │ ├── getting_started │ │ │ └── code │ │ │ │ ├── getting_started.sh │ │ │ │ ├── getting_started.sh.j2 │ │ │ │ ├── install_output.txt │ │ │ │ ├── install_output.txt.j2 │ │ │ │ ├── test_getting_started_helm.sh │ │ │ │ ├── test_getting_started_stackablectl.sh │ │ │ │ ├── trino.yaml │ │ │ │ └── trino.yaml.j2 │ │ └── usage-guide │ │ │ ├── opa-bundle-trino-cm.yaml │ │ │ ├── trino-file-auth-snippet.yaml │ │ │ ├── trino-insecure.yaml │ │ │ ├── trino-ldap-auth-snippet.yaml │ │ │ ├── trino-oidc-auth-snippet.yaml │ │ │ ├── trino-secure-internal-tls.yaml │ │ │ ├── trino-secure-tls-only.yaml │ │ │ └── trino-secure-tls.yaml │ │ ├── images │ │ ├── catalogs.drawio.svg │ │ └── trino_overview.drawio.svg │ │ ├── pages │ │ ├── concepts.adoc │ │ ├── getting_started │ │ │ ├── first_steps.adoc │ │ │ ├── index.adoc │ │ │ └── installation.adoc │ │ ├── index.adoc │ │ ├── reference │ │ │ ├── commandline-parameters.adoc │ │ │ ├── crds.adoc │ │ │ ├── environment-variables.adoc │ │ │ └── index.adoc │ │ └── usage-guide │ │ │ ├── catalogs │ │ │ ├── black-hole.adoc │ │ │ ├── delta-lake.adoc │ │ │ ├── generic.adoc │ │ │ ├── google-sheets.adoc │ │ │ ├── hive.adoc │ │ │ ├── iceberg.adoc │ │ │ ├── index.adoc │ │ │ ├── tpcds.adoc │ │ │ └── tpch.adoc │ │ │ ├── configuration.adoc │ │ │ ├── connect_to_trino.adoc │ │ │ ├── index.adoc │ │ │ ├── listenerclass.adoc │ │ │ ├── log_aggregation.adoc │ │ │ ├── monitoring.adoc │ │ │ ├── operations │ │ │ ├── cluster-operations.adoc │ │ │ ├── graceful-shutdown.adoc │ │ │ ├── index.adoc │ │ │ ├── pod-disruptions.adoc │ │ │ └── pod-placement.adoc │ │ │ ├── overrides.adoc │ │ │ ├── query.adoc │ │ │ ├── s3.adoc │ │ │ └── security.adoc │ │ └── partials │ │ ├── nav.adoc │ │ └── supported-versions.adoc └── templating_vars.yaml ├── examples ├── simple-trino-cluster-authentication-opa-authorization-s3.yaml ├── simple-trino-cluster-hive-ha-s3.yaml ├── simple-trino-cluster-resource-limits.yaml ├── simple-trino-cluster-s3.yaml ├── simple-trino-cluster.yaml └── simple-trino-oauth2.yaml ├── nix ├── README.md ├── meta.json ├── sources.json └── sources.nix ├── renovate.json ├── rust-toolchain.toml ├── rust └── operator-binary │ ├── Cargo.toml │ ├── build.rs │ └── src │ ├── authentication │ ├── mod.rs │ ├── oidc │ │ └── mod.rs │ └── password │ │ ├── file.rs │ │ ├── ldap.rs │ │ └── mod.rs │ ├── authorization │ ├── mod.rs │ └── opa.rs │ ├── catalog │ ├── black_hole.rs │ ├── commons.rs │ ├── config.rs │ ├── delta_lake.rs │ ├── generic.rs │ ├── google_sheet.rs │ ├── hive.rs │ ├── iceberg.rs │ ├── mod.rs │ ├── tpcds.rs │ └── tpch.rs │ ├── command.rs │ ├── config │ ├── jvm.rs │ └── mod.rs │ ├── controller.rs │ ├── crd │ ├── affinity.rs │ ├── authentication.rs │ ├── catalog │ │ ├── black_hole.rs │ │ ├── commons.rs │ │ ├── delta_lake.rs │ │ ├── generic.rs │ │ ├── google_sheet.rs │ │ ├── hive.rs │ │ ├── iceberg.rs │ │ ├── mod.rs │ │ ├── tpcds.rs │ │ └── tpch.rs │ ├── discovery.rs │ └── mod.rs │ ├── main.rs │ ├── operations │ ├── graceful_shutdown.rs │ ├── mod.rs │ └── pdb.rs │ └── product_logging.rs ├── rustfmt.toml ├── scripts ├── docs_templating.sh ├── ensure_one_trailing_newline.py ├── generate-manifests.sh ├── render_readme.sh ├── run-tests └── run_tests.sh ├── shell.nix └── tests ├── README-templating.md ├── infrastructure.yaml ├── kuttl-test.yaml.jinja2 ├── release.yaml ├── templates ├── .gitkeep └── kuttl │ ├── authentication │ ├── 00-assert.yaml.j2 │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2 │ ├── 00-patch-ns.yaml.j2 │ ├── 00-rbac.yaml.j2 │ ├── 01-assert.yaml │ ├── 01-install-openldap.yaml │ ├── 02-assert.yaml │ ├── 02-create-ldap-user.yaml │ ├── 03-keycloak-realm.yaml │ ├── 04-assert.yaml │ ├── 04-install-keycloak.yaml.j2 │ ├── 05-assert.yaml │ ├── 05-install-test-trino.yaml.j2 │ ├── 11-create-authentication-classes.yaml │ ├── 12-assert.yaml │ ├── 12-create-trino-cluster.yaml │ ├── 20-assert.yaml │ ├── 20-test-trino.yaml │ ├── 30-hot-reloading-add-user.yaml │ ├── 31-assert.yaml │ ├── 32-hot-reloading-remove-user.yaml │ ├── 33-assert.yaml │ ├── add_user.yaml.j2 │ ├── check-active-workers.py │ ├── check-oauth-login.py │ ├── create-authentication-classes.yaml.j2 │ ├── create-trino-cluster.yaml.j2 │ ├── create_ldap_user.sh │ ├── create_ldap_user_other.sh │ ├── install-openldap-other.yaml.j2 │ ├── install-openldap.yaml.j2 │ └── remove_user.yaml.j2 │ ├── cluster-operation │ ├── 00-assert.yaml.j2 │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2 │ ├── 00-patch-ns.yaml.j2 │ ├── 10-assert.yaml │ ├── 10-install-trino.yaml.j2 │ ├── 20-assert.yaml │ ├── 20-stop-trino.yaml.j2 │ ├── 30-assert.yaml │ ├── 30-pause-trino.yaml.j2 │ ├── 40-assert.yaml │ └── 40-restart-trino.yaml.j2 │ ├── logging │ ├── 00-patch-ns.yaml.j2 │ ├── 00-rbac.yaml.j2 │ ├── 01-assert.yaml │ ├── 01-install-trino-vector-aggregator.yaml │ ├── 02-create-configmap-with-prepared-logs.yaml │ ├── 03-assert.yaml │ ├── 03-install-trino.yaml.j2 │ ├── 04-assert.yaml │ ├── 04-install-trino-test-runner.yaml │ ├── 05-assert.yaml │ ├── 05-test-log-aggregation.yaml │ ├── prepared-logs.airlift.json │ ├── test_log_aggregation.py │ └── trino-vector-aggregator-values.yaml.j2 │ ├── opa-authorization │ ├── 00-assert.yaml.j2 │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2 │ ├── 00-limit-range.yaml │ ├── 00-patch-ns.yaml.j2 │ ├── 00-rbac.yaml.j2 │ ├── 00-secrets.yaml.j2 │ ├── 01-assert.yaml │ ├── 01-install-postgres.yaml │ ├── 02-install-minio.yaml.j2 │ ├── 03-assert.yaml │ ├── 03-install-hive.yaml.j2 │ ├── 04-assert.yaml │ ├── 04-install-keycloak.yaml.j2 │ ├── 04-keycloak-realm-cm.yaml │ ├── 10-assert.yaml │ ├── 10-install-opa.yaml.j2 │ ├── 11-opa-rego.yaml │ ├── 12-stop-opa.yaml.j2 │ ├── 20-assert.yaml │ ├── 20-install-trino.yaml.j2 │ ├── 30-assert.yaml │ ├── 30-install-opa-check.yaml │ ├── 40-assert.yaml │ ├── 40-copy-scripts.yaml │ ├── check-opa.py.j2 │ ├── helm-bitnami-minio-values.yaml.j2 │ ├── helm-bitnami-postgresql-values.yaml.j2 │ ├── trino_policies.rego │ └── trino_rules │ │ ├── .regal │ │ └── config.yaml │ │ ├── actual_permissions.rego │ │ ├── actual_permissions_test.rego │ │ ├── policies.rego │ │ ├── policies_test.rego │ │ ├── requested_permissions.rego │ │ ├── requested_permissions_test.rego │ │ ├── schema │ │ ├── input.json │ │ └── policies.json │ │ ├── util.rego │ │ ├── util_test.rego │ │ ├── verification.rego │ │ └── verification_test.rego │ ├── orphaned-resources │ ├── 00-assert.yaml.j2 │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2 │ ├── 00-patch-ns.yaml.j2 │ ├── 1-assert.yaml │ ├── 1-install-trino.yaml.j2 │ ├── 2-assert.yaml │ ├── 2-errors.yaml │ └── 2-remove-rolegroup.yaml │ ├── resources │ ├── 00-assert.yaml.j2 │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2 │ ├── 00-patch-ns.yaml.j2 │ ├── 10-assert.yaml.j2 │ ├── 10-install-trino.yaml.j2 │ └── 20-assert.yaml │ ├── smoke │ ├── 00-assert.yaml.j2 │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2 │ ├── 00-limit-range.yaml │ ├── 00-patch-ns.yaml.j2 │ ├── 00-rbac.yaml.j2 │ ├── 00-secrets.yaml.j2 │ ├── 02-install-minio.yaml.j2 │ ├── 04-prepare-bucket.yaml.j2 │ ├── 05-assert.yaml │ ├── 05-install-zookeeper.yaml.j2 │ ├── 06-assert.yaml │ ├── 06-install-hdfs.yaml.j2 │ ├── 07-assert.yaml │ ├── 07-install-postgres.yaml │ ├── 08-assert.yaml │ ├── 08-install-hive.yaml.j2 │ ├── 09-assert.yaml │ ├── 09-install-opa.yaml.j2 │ ├── 10-assert.yaml │ ├── 10-install-trino.yaml.j2 │ ├── 11-assert.yaml │ ├── 12-assert.yaml │ ├── 20-assert.yaml │ ├── 20-install-check.yaml │ ├── 21-assert.yaml │ ├── 21-copy-scripts.yaml │ ├── 30-assert.yaml │ ├── 30-scale-trino.yaml.j2 │ ├── 31-assert.yaml │ ├── certs │ │ ├── ca.crt │ │ ├── client.crt.pem │ │ ├── client.csr.pem │ │ ├── client.key.pem │ │ ├── generate.sh │ │ ├── root-ca.crt.pem │ │ ├── root-ca.crt.srl │ │ ├── root-ca.key.pem │ │ ├── tls.crt │ │ └── tls.key │ ├── check-active-workers.py │ ├── check-opa.py │ ├── check-s3.py │ ├── helm-bitnami-minio-values.yaml.j2 │ ├── helm-bitnami-postgresql-values.yaml.j2 │ └── yellow_tripdata_2021-07.csv │ ├── smoke_aws │ ├── 00-s3connection.yaml │ ├── 00-secretclass.yaml │ ├── 04-prepare-bucket.yaml │ ├── 05-assert.yaml │ ├── 05-install-zookeeper.yaml.j2 │ ├── 06-assert.yaml │ ├── 06-install-hdfs.yaml.j2 │ ├── 07-assert.yaml │ ├── 07-install-postgres.yaml │ ├── 08-assert.yaml │ ├── 08-install-hive.yaml.j2 │ ├── 09-assert.yaml │ ├── 09-install-opa.yaml.j2 │ ├── 10-assert.yaml │ ├── 10-install-trino.yaml.j2 │ ├── 11-assert.yaml │ ├── 20-assert.yaml │ ├── 20-install-check.yaml │ ├── 21-assert.yaml │ ├── 21-copy-scripts.yaml │ ├── README.md │ ├── aws_secret.yaml │ ├── check-active-workers.py │ ├── check-opa.py │ ├── check-s3.py │ ├── helm-bitnami-postgresql-values.yaml.j2 │ └── yellow_tripdata_2021-07.csv │ └── tls │ ├── 00-assert.yaml.j2 │ ├── 00-install-vector-aggregator-discovery-configmap.yaml.j2 │ ├── 00-patch-ns.yaml.j2 │ ├── 00-rbac.yaml.j2 │ ├── 10-assert.yaml.j2 │ ├── 10-install-trino.yaml.j2 │ ├── 20-assert.yaml │ ├── 20-install-check.yaml.j2 │ ├── 21-assert.yaml.j2 │ ├── 21-install-requirements.yaml.j2 │ ├── check-tls.py │ ├── test-config.json.j2 │ └── untrusted-cert.crt └── test-definition.yaml /.actionlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | self-hosted-runner: 3 | # Ubicloud machines we are using 4 | labels: 5 | - ubicloud-standard-8-arm 6 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | debug/ 2 | target/ 3 | **/*.rs.bk 4 | 5 | .idea/ 6 | *.iws 7 | 8 | Cargo.nix 9 | crate-hashes.json 10 | result 11 | image.tar 12 | 13 | # We do NOT want to ignore .git because we use the `built` crate to gather the current git commit hash at built time 14 | # This means we need the .git directory in our Docker image, it will be thrown away and won't be included in the final image 15 | -------------------------------------------------------------------------------- /.envrc.sample: -------------------------------------------------------------------------------- 1 | # vim: syntax=conf 2 | # 3 | # If you use direnv, you can autoload the nix shell: 4 | # You will need to allow the directory the first time. 5 | use nix 6 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | nix/** linguist-generated 2 | Cargo.nix linguist-generated 3 | crate-hashes.json linguist-generated 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/01-normal-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Normal issue 3 | about: This is just a normal empty issue with a simple checklist 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Issue checklist 11 | 12 | This is a simple checklist of things to bear in mind when creating a new issue. 13 | 14 | - [ ] **Describe the use-case**: As far as possible, use the pattern "As a [type of user], I would like [feature/functionality] to be able to do [specific action]." This helps identify the feature and the problem it addresses. 15 | - [ ] **Indicate importance and urgency**: Use a scale (e.g., low, medium, high) to indicate the level of importance and urgency. 16 | - [ ] **Work-around**: If there is a known work-around, describe it briefly. 17 | - [ ] **Environment**: Describe the environment where the issue occurs (e.g., SDP version, K8S version, etc.). 18 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/02-bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bug Report" 3 | description: "If something isn't working as expected 🤔." 4 | labels: ["type/bug"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: Thanks for taking the time to file a bug report! Please fill out this form as completely as possible. 9 | 10 | - type: input 11 | attributes: 12 | label: Affected Stackable version 13 | description: Which version of the Stackable Operator do you see this bug in? 14 | 15 | # - type: input 16 | attributes: 17 | label: Affected Trino version 18 | description: Which version of Trino do you see this bug in? 19 | # 20 | - type: textarea 21 | attributes: 22 | label: Current and expected behavior 23 | description: A clear and concise description of what the operator is doing and what you would expect. 24 | validations: 25 | required: true 26 | 27 | - type: textarea 28 | attributes: 29 | label: Possible solution 30 | description: "If you have suggestions on a fix for the bug." 31 | 32 | - type: textarea 33 | attributes: 34 | label: Additional context 35 | description: "Add any other context about the problem here. Or a screenshot if applicable." 36 | 37 | - type: textarea 38 | attributes: 39 | label: Environment 40 | description: | 41 | What type of kubernetes cluster you are running aginst (k3s/eks/aks/gke/other) and any other information about your environment? 42 | placeholder: | 43 | Examples: 44 | Output of `kubectl version --short` 45 | 46 | - type: dropdown 47 | attributes: 48 | label: Would you like to work on fixing this bug? 49 | description: | 50 | **NOTE**: Let us know if you would like to submit a PR for this. We are more than happy to help you through the process. 51 | options: 52 | - "yes" 53 | - "no" 54 | - "maybe" 55 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bug Report" 3 | description: "If something isn't working as expected 🤔." 4 | labels: ["type/bug"] 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: Thanks for taking the time to file a bug report! Please fill out this form as completely as possible. 9 | 10 | - type: input 11 | attributes: 12 | label: Affected Stackable version 13 | description: Which version of the Stackable Operator do you see this bug in? 14 | 15 | - type: input 16 | attributes: 17 | label: Affected Trino version 18 | description: Which version of Trino do you see this bug in? 19 | 20 | - type: textarea 21 | attributes: 22 | label: Current and expected behavior 23 | description: A clear and concise description of what the operator is doing and what you would expect. 24 | validations: 25 | required: true 26 | 27 | - type: textarea 28 | attributes: 29 | label: Possible solution 30 | description: "If you have suggestions on a fix for the bug." 31 | 32 | - type: textarea 33 | attributes: 34 | label: Additional context 35 | description: "Add any other context about the problem here. Or a screenshot if applicable." 36 | 37 | - type: textarea 38 | attributes: 39 | label: Environment 40 | description: | 41 | What type of kubernetes cluster you are running aginst (k3s/eks/aks/gke/other) and any other information about your environment? 42 | placeholder: | 43 | Examples: 44 | Output of `kubectl version --short` 45 | 46 | - type: dropdown 47 | attributes: 48 | label: Would you like to work on fixing this bug? 49 | description: | 50 | **NOTE**: Let us know if you would like to submit a PR for this. We are more than happy to help you through the process. 51 | options: 52 | - "yes" 53 | - "no" 54 | - "maybe" 55 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | blank_issues_enabled: true 3 | contact_links: 4 | - name: 🙋🏾 Question 5 | about: Use this to ask a question about this project 6 | url: https://github.com/orgs/stackabletech/discussions/new?category=q-a 7 | - name: 🚀 Feature Requests and other things 8 | about: Open an issue with your feature request or any other issue not covered elsewhere 9 | url: https://github.com/stackabletech/trino-operator/issues/new 10 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/new_version.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: New Version 3 | about: Request support for a new product version 4 | title: "[NEW VERSION]" 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Which new version of Trino should we support? 11 | 12 | Please specify the version, version range or version numbers to support, please also add these to the issue title 13 | 14 | ## Additional information 15 | 16 | If possible, provide a link to release notes/changelog 17 | 18 | ## Changes required 19 | 20 | Are there any upstream changes that we need to support? 21 | e.g. new features, changed features, deprecated features etc. 22 | 23 | ## Implementation checklist 24 | 25 | 29 | 30 | - [ ] Update the Docker image 31 | - [ ] Update documentation to include supported version(s) 32 | - [ ] Update and test getting started guide with updated version(s) 33 | - [ ] Update operator to support the new version (if needed) 34 | - [ ] Update integration tests to test use the new versions (in addition or replacing old versions 35 | - [ ] Update examples to use new versions 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/normal-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Normal issue 3 | about: This is just a normal empty issue with a simple checklist 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Issue checklist 11 | 12 | This is a simple checklist of things to bear in mind when creating a new issue. 13 | 14 | - [ ] Describe the use-case, as far is possible. For instance, using the pattern "As a XXXX, I would like XXXX to be able to do XXXX" helps to identify the feature as well as the problem it is intended to address. 15 | - [ ] Indicate an approximate level of importance and urgency. 16 | - [ ] Indicate if there is a known work-around until such time as the issue has been implemented. 17 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE/pre-release-getting-started-script.md: -------------------------------------------------------------------------------- 1 | ## Check and Update Getting Started Script 2 | 3 | 7 | 8 | 11 | 12 | Part of 13 | 14 | > [!NOTE] 15 | > During a Stackable release we need to check (and optionally update) the 16 | > getting-started scripts to ensure they still work after product and operator 17 | > updates. 18 | 19 | ```shell 20 | # Some of the scripts are in a code/ subdirectory 21 | # pushd docs/modules/superset/examples/getting_started 22 | # pushd docs/modules/superset/examples/getting_started/code 23 | pushd $(fd -td getting_started | grep examples); cd code 2>/dev/null || true 24 | 25 | # Make a fresh cluster (~12 seconds) 26 | kind delete cluster && kind create cluster 27 | ./getting_started.sh stackablectl 28 | 29 | # Make a fresh cluster (~12 seconds) 30 | kind delete cluster && kind create cluster 31 | ./getting_started.sh helm 32 | 33 | popd 34 | ``` 35 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE/pre-release-rust-deps.md: -------------------------------------------------------------------------------- 1 | ## Bump Rust Dependencies for Stackable Release YY.M.X 2 | 3 | 7 | 8 | 11 | 12 | Part of 13 | 14 | > [!NOTE] 15 | > During a Stackable release we need to update various Rust dependencies before 16 | > entering the final release period to ensure we run the latest versions of 17 | > crates. These bumps also include previously updated and released crates from 18 | > the `operator-rs` repository. 19 | 20 | ### Tasks 21 | 22 | - [ ] Bump Rust Dependencies, see below for more details. 23 | - [ ] Add changelog entry stating which important crates were bumped (including the version). 24 | 25 | > [!NOTE] 26 | > The bumping / updating of Rust dependencies is done in multiple steps: 27 | > 28 | > 1. Update the minimum Version in the root `Cargo.toml` manifest. 29 | > 2. Run the `cargo update` command, which also updates the `Cargo.lock` file. 30 | > 3. Lastly, run `make regenerate-nix` to update the `Cargo.nix` file. 31 | 32 | ### Bump Rust Dependencies 33 | 34 | - [ ] Bump `stackable-operator` and friends 35 | - [ ] Bump `product-config` 36 | - [ ] Bump all other dependencies 37 | -------------------------------------------------------------------------------- /.github/actionlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | self-hosted-runner: 3 | # Ubicloud machines we are using 4 | labels: 5 | - ubicloud-standard-8-arm 6 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | *Please add a description here. This will become the commit message of the merge request later.* 4 | 5 | ## Definition of Done Checklist 6 | 7 | - Not all of these items are applicable to all PRs, the author should update this template to only leave the boxes in that are relevant 8 | - Please make sure all these things are done and tick the boxes 9 | 10 | ### Author 11 | 12 | - [ ] Changes are OpenShift compatible 13 | - [ ] CRD changes approved 14 | - [ ] CRD documentation for all fields, following the [style guide](https://docs.stackable.tech/home/nightly/contributor/docs/style-guide). 15 | - [ ] Helm chart can be installed and deployed operator works 16 | - [ ] Integration tests passed (for non trivial changes) 17 | - [ ] Changes need to be "offline" compatible 18 | - [ ] Links to generated (nightly) docs added 19 | - [ ] Release note snippet added 20 | 21 | ### Reviewer 22 | 23 | - [ ] Code contains useful comments 24 | - [ ] Code contains useful logging statements 25 | - [ ] (Integration-)Test cases added 26 | - [ ] Documentation added or updated. Follows the [style guide](https://docs.stackable.tech/home/nightly/contributor/docs/style-guide). 27 | - [ ] Changelog updated 28 | - [ ] Cargo.toml only contains references to git tags (not specific commits or branches) 29 | 30 | ### Acceptance 31 | 32 | - [ ] Feature Tracker has been updated 33 | - [ ] Proper release label has been added 34 | - [ ] Links to generated (nightly) docs added 35 | - [ ] Release note snippet added 36 | - [ ] Add `type/deprecation` label & add to the [deprecation schedule](https://github.com/orgs/stackabletech/projects/44/views/1) 37 | - [ ] Add `type/experimental` label & add to the [experimental features tracker](https://github.com/orgs/stackabletech/projects/47) 38 | -------------------------------------------------------------------------------- /.github/workflows/general_daily_security.yml: -------------------------------------------------------------------------------- 1 | # ============= 2 | # This file is automatically generated from the templates in stackabletech/operator-templating 3 | # DON'T MANUALLY EDIT THIS FILE 4 | # ============= 5 | --- 6 | name: Daily Security Audit 7 | 8 | on: 9 | schedule: 10 | - cron: '15 4 * * *' 11 | workflow_dispatch: 12 | 13 | permissions: {} 14 | 15 | jobs: 16 | audit: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 20 | with: 21 | persist-credentials: false 22 | - uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 # v2.0.0 23 | with: 24 | token: ${{ secrets.GITHUB_TOKEN }} 25 | -------------------------------------------------------------------------------- /.github/workflows/pr_pre-commit.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: pre-commit 3 | 4 | on: 5 | pull_request: 6 | merge_group: 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | NIX_PKG_MANAGER_VERSION: "2.28.3" 11 | RUST_TOOLCHAIN_VERSION: "nightly-2025-05-26" 12 | HADOLINT_VERSION: "v2.12.0" 13 | PYTHON_VERSION: "3.12" 14 | 15 | jobs: 16 | pre-commit: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Install host dependencies 20 | uses: awalsh128/cache-apt-pkgs-action@5902b33ae29014e6ca012c5d8025d4346556bd40 # v1.4.3 21 | with: 22 | packages: protobuf-compiler krb5-user libkrb5-dev libclang-dev liblzma-dev libssl-dev pkg-config apt-transport-https 23 | version: ubuntu-latest 24 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 25 | with: 26 | persist-credentials: false 27 | submodules: recursive 28 | fetch-depth: 0 29 | - uses: stackabletech/actions/run-pre-commit@9aae2d1c14239021bfa33c041010f6fb7adec815 # v0.8.2 30 | with: 31 | python-version: ${{ env.PYTHON_VERSION }} 32 | rust: ${{ env.RUST_TOOLCHAIN_VERSION }} 33 | hadolint: ${{ env.HADOLINT_VERSION }} 34 | nix: ${{ env.NIX_PKG_MANAGER_VERSION }} 35 | nix-github-token: ${{ secrets.GITHUB_TOKEN }} 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tests/_work/ 2 | debug/ 3 | target/ 4 | **/*.rs.bk 5 | 6 | .idea/ 7 | *.iws 8 | *.iml 9 | 10 | *.tgz 11 | 12 | result 13 | image.tar 14 | 15 | tilt_options.json 16 | 17 | .direnv/ 18 | .direnvrc 19 | .envrc 20 | 21 | .DS_Store 22 | -------------------------------------------------------------------------------- /.hadolint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ignored: 3 | # Warning: Use the -y switch to avoid manual input dnf install -y 4 | # https://github.com/hadolint/hadolint/wiki/DL3038 5 | # Reason: We set `assumeyes=True` in dnf.conf in our base image 6 | - DL3038 7 | 8 | # Warning: Specify version with dnf install -y - 9 | # https://github.com/hadolint/hadolint/wiki/DL3041 10 | # Reason: It's good advice, but we're not set up to pin versions just yet 11 | - DL3041 12 | -------------------------------------------------------------------------------- /.markdownlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # All defaults or options can be checked here: 3 | # https://github.com/DavidAnson/markdownlint/blob/main/schema/.markdownlint.yaml 4 | 5 | # Default state for all rules 6 | default: true 7 | 8 | # MD013/line-length - Line length 9 | MD013: 10 | # Number of characters 11 | line_length: 9999 12 | # Number of characters for headings 13 | heading_line_length: 9999 14 | # Number of characters for code blocks 15 | code_block_line_length: 9999 16 | 17 | # MD033/no-inline-html 18 | MD033: 19 | allowed_elements: [h1, img, p] 20 | 21 | # MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content 22 | MD024: 23 | # Only check sibling headings 24 | siblings_only: true 25 | 26 | # MD041/first-line-heading/first-line-h1 First line in a file should be a top-level heading 27 | MD041: false # Github issues and PRs already have titles, and H1 is enormous in the description box. 28 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MESSAGES CONTROL] 2 | 3 | # These rules are for missing docstrings which doesn't matter much for most of our simple scripts 4 | disable=C0114,C0115,C0116 5 | 6 | [FORMAT] 7 | 8 | max-line-length=999 9 | indent-string=' ' 10 | -------------------------------------------------------------------------------- /.readme/README.md.j2: -------------------------------------------------------------------------------- 1 | {%- set title="Stackable Operator for Trino" -%} 2 | {%- set operator_name="trino" -%} 3 | {%- set operator_docs_slug="trino" -%} 4 | {%- set related_reading_links=[] -%} 5 | 6 | {% filter trim %} 7 | {%- include "partials/borrowed/header.md.j2" -%} 8 | {% endfilter %} 9 | 10 | {% filter trim %} 11 | {%- include "partials/borrowed/links.md.j2" -%} 12 | {% endfilter %} 13 | 14 | {% filter trim %} 15 | {%- include "partials/main.md.j2" -%} 16 | {% endfilter %} 17 | 18 | {% filter trim %} 19 | {%- include "partials/borrowed/footer.md.j2" -%} 20 | {% endfilter %} 21 | 22 | {% filter trim %} 23 | {%- include "partials/borrowed/related_reading.md.j2" -%} 24 | {% endfilter %} 25 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/documentation.md.j2: -------------------------------------------------------------------------------- 1 | 2 | ## Documentation 3 | 4 | The stable documentation for this operator can be found in our [Stackable Data Platform documentation](https://docs.stackable.tech/home/stable/{{operator_docs_slug}}). 5 | If you are interested in the most recent state of this repository, check out the [nightly docs](https://docs.stackable.tech/home/nightly/{{operator_docs_slug}}) instead. 6 | 7 | The documentation for all Stackable products can be found at [docs.stackable.tech](https://docs.stackable.tech). 8 | 9 | If you have a question about the Stackable Data Platform, contact us via our [homepage](https://stackable.tech/) or ask a public question in our [Discussions forum](https://github.com/orgs/stackabletech/discussions). 10 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/header.md.j2: -------------------------------------------------------------------------------- 1 | 2 |

3 | Stackable Logo 4 |

5 | 6 |

{{title}}

7 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/links.md.j2: -------------------------------------------------------------------------------- 1 | 2 | [![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://GitHub.com/stackabletech/{{operator_name}}-operator/graphs/commit-activity) 3 | [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-green.svg)](https://docs.stackable.tech/home/stable/contributor/index.html) 4 | [![License OSL3.0](https://img.shields.io/badge/license-OSL3.0-green)](./LICENSE) 5 | 6 | [Documentation](https://docs.stackable.tech/home/stable/{{operator_docs_slug}}) {% if quickstart_link %}| [Quickstart]({{quickstart_link}}) {% endif %}| [Stackable Data Platform](https://stackable.tech/) | [Platform Docs](https://docs.stackable.tech/) | [Discussions](https://github.com/orgs/stackabletech/discussions) | [Discord](https://discord.gg/7kZ3BNnCAF) 7 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/overview_blurb.md.j2: -------------------------------------------------------------------------------- 1 | 2 | It is part of the Stackable Data Platform, a curated selection of the best open source data apps like Apache Kafka, Apache Druid, Trino or Apache Spark, [all](#other-operators) working together seamlessly. Based on Kubernetes, it runs everywhere – [on prem or in the cloud](#supported-platforms). 3 | -------------------------------------------------------------------------------- /.readme/partials/borrowed/related_reading.md.j2: -------------------------------------------------------------------------------- 1 | 2 | {%- if related_reading_links -%} 3 | ## Related Reading 4 | {% for (text, link) in related_reading_links %} 5 | * [{{text}}]({{link}}) 6 | {%- endfor %} 7 | {%- endif -%} 8 | -------------------------------------------------------------------------------- /.readme/partials/main.md.j2: -------------------------------------------------------------------------------- 1 | This is a Kubernetes operator to manage [Trino](https://trino.io/) ensembles. 2 | 3 | {% filter trim %} 4 | {%- include "partials/borrowed/overview_blurb.md.j2" -%} 5 | {% endfilter %} 6 | 7 | ## Installation 8 | 9 | You can install the operator using [stackablectl or helm](https://docs.stackable.tech/home/stable/{{operator_name}}/getting_started/installation). 10 | 11 | Read on to get started with it, or see it in action in one of our [demos](https://stackable.tech/en/demos/). 12 | 13 | ## Getting Started 14 | 15 | You can follow this [tutorial](https://docs.stackable.tech/home/stable/{{operator_name}}/getting_started/first_steps) . 16 | 17 | {% filter trim %} 18 | {%- include "partials/borrowed/documentation.md.j2" -%} 19 | {% endfilter %} 20 | -------------------------------------------------------------------------------- /.readme/static/borrowed/Icon_Stackable.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /.readme/static/borrowed/sdp_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/trino-operator/f2f46fb5cb47938ac2f305b6ec4ce5bd8e915ba4/.readme/static/borrowed/sdp_overview.png -------------------------------------------------------------------------------- /.readme/static/borrowed/stackable_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/trino-operator/f2f46fb5cb47938ac2f305b6ec4ce5bd8e915ba4/.readme/static/borrowed/stackable_overview.png -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "type": "lldb", 6 | "request": "launch", 7 | "name": "Debug operator binary", 8 | "cargo": { 9 | "args": ["build"], 10 | "filter": { 11 | "name": "stackable-{[ operator.name }]", 12 | "kind": "bin" 13 | } 14 | }, 15 | "args": ["run"], 16 | "cwd": "${workspaceFolder}" 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.rustfmt.overrideCommand": [ 3 | "rustfmt", 4 | "+nightly-2025-05-26", 5 | "--edition", 6 | "2024", 7 | "--" 8 | ], 9 | } 10 | -------------------------------------------------------------------------------- /.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | ignore: | 5 | deploy/helm/**/templates 6 | 7 | rules: 8 | line-length: disable 9 | truthy: 10 | check-keys: false 11 | comments: 12 | min-spaces-from-content: 1 # Needed due to https://github.com/adrienverge/yamllint/issues/443 13 | indentation: 14 | indent-sequences: consistent 15 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["rust/operator-binary"] 3 | resolver = "2" 4 | 5 | [workspace.package] 6 | version = "0.0.0-dev" 7 | authors = ["Stackable GmbH "] 8 | license = "OSL-3.0" 9 | edition = "2021" 10 | repository = "https://github.com/stackabletech/trino-operator" 11 | 12 | [workspace.dependencies] 13 | product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.7.0" } 14 | stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", features = ["telemetry", "versioned"], tag = "stackable-operator-0.93.1" } 15 | 16 | anyhow = "1.0" 17 | async-trait = "0.1" 18 | built = { version = "0.8", features = ["chrono", "git2"] } 19 | clap = "4.5" 20 | const_format = "0.2" 21 | futures = { version = "0.3", features = ["compat"] } 22 | indoc = "2.0" 23 | openssl = "0.10" 24 | rstest = "0.25" 25 | serde = { version = "1.0", features = ["derive"] } 26 | serde_json = "1.0" 27 | serde_yaml = "0.9" 28 | snafu = "0.8" 29 | strum = { version = "0.27", features = ["derive"] } 30 | tokio = { version = "1.40", features = ["full"] } 31 | tracing = "0.1" 32 | 33 | # [patch."https://github.com/stackabletech/operator-rs.git"] 34 | # stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" } 35 | # stackable-operator = { path = "../operator-rs/crates/stackable-operator" } 36 | -------------------------------------------------------------------------------- /Tiltfile: -------------------------------------------------------------------------------- 1 | # If tilt_options.json exists read it and load the default_registry value from it 2 | settings = read_json('tilt_options.json', default={}) 3 | registry = settings.get('default_registry', 'oci.stackable.tech/sandbox') 4 | 5 | # Configure default registry either read from config file above, or with default value of "oci.stackable.tech/sandbox" 6 | default_registry(registry) 7 | 8 | meta = read_json('nix/meta.json') 9 | operator_name = meta['operator']['name'] 10 | 11 | custom_build( 12 | registry + '/' + operator_name, 13 | 'make regenerate-nix && nix-build . -A docker --argstr dockerName "${EXPECTED_REGISTRY}/' + operator_name + '" && ./result/load-image | docker load', 14 | deps=['rust', 'Cargo.toml', 'Cargo.lock', 'default.nix', "nix", 'build.rs', 'vendor'], 15 | ignore=['*.~undo-tree~'], 16 | # ignore=['result*', 'Cargo.nix', 'target', *.yaml], 17 | outputs_image_ref_to='result/ref', 18 | ) 19 | 20 | # Load the latest CRDs from Nix 21 | watch_file('result') 22 | if os.path.exists('result'): 23 | k8s_yaml('result/crds.yaml') 24 | 25 | # We need to set the correct image annotation on the operator Deployment to use e.g. 26 | # oci.stackable.tech/sandbox/opa-operator:7y19m3d8clwxlv34v5q2x4p7v536s00g instead of 27 | # oci.stackable.tech/sandbox/opa-operator:0.0.0-dev (which does not exist) 28 | k8s_kind('Deployment', image_json_path='{.spec.template.metadata.annotations.internal\\.stackable\\.tech/image}') 29 | 30 | # Exclude stale CRDs from Helm chart, and apply the rest 31 | helm_crds, helm_non_crds = filter_yaml( 32 | helm( 33 | 'deploy/helm/' + operator_name, 34 | name=operator_name, 35 | namespace="stackable-operators", 36 | set=[ 37 | 'image.repository=' + registry + '/' + operator_name, 38 | ], 39 | ), 40 | api_version = "^apiextensions\\.k8s\\.io/.*$", 41 | kind = "^CustomResourceDefinition$", 42 | ) 43 | k8s_yaml(helm_non_crds) 44 | -------------------------------------------------------------------------------- /crate-hashes.json: -------------------------------------------------------------------------------- 1 | { 2 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#k8s-version@0.1.3": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 3 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-operator-derive@0.3.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 4 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-operator@0.93.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 5 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-shared@0.0.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 6 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-telemetry@0.6.0": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 7 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-versioned-macros@0.7.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 8 | "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.93.1#stackable-versioned@0.7.1": "16faz0f3dsv095hk94kmb7mk3pr6lban1v3k0g6yawak6gk5xln6", 9 | "git+https://github.com/stackabletech/product-config.git?tag=0.7.0#product-config@0.7.0": "0gjsm80g6r75pm3824dcyiz4ysq1ka4c1if6k1mjm9cnd5ym0gny" 10 | } -------------------------------------------------------------------------------- /deploy/DO_NOT_EDIT.md: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT 2 | 3 | These Helm charts and manifests are automatically generated. 4 | Please do not edit anything except for files explicitly mentioned below in this 5 | directory manually. 6 | 7 | The following files are ok to edit: 8 | 9 | - helm/trino-operator/templates/roles.yaml 10 | - helm/trino-operator/values.yaml 11 | 12 | The details are in-motion but check this repository for a few details: 13 | 14 | -------------------------------------------------------------------------------- /deploy/helm/chart_testing.yaml: -------------------------------------------------------------------------------- 1 | remote: origin 2 | target-branch: main 3 | chart-dirs: 4 | - deploy/helm 5 | all: true 6 | -------------------------------------------------------------------------------- /deploy/helm/ct.yaml: -------------------------------------------------------------------------------- 1 | # This file is used for chart-testing (https://github.com/helm/chart-testing) 2 | # The name "ct.yaml" is not very self-descriptive but it is the default that chart-testing is looking for 3 | --- 4 | remote: origin 5 | target-branch: main 6 | chart-dirs: 7 | - deploy/helm 8 | all: true 9 | -------------------------------------------------------------------------------- /deploy/helm/trino-operator/.helmignore: -------------------------------------------------------------------------------- 1 | # ============= 2 | # This file is automatically generated from the templates in stackabletech/operator-templating 3 | # DON'T MANUALLY EDIT THIS FILE 4 | # ============= 5 | 6 | # Patterns to ignore when building packages. 7 | # This supports shell glob matching, relative path matching, and 8 | # negation (prefixed with !). Only one pattern per line. 9 | .DS_Store 10 | # Common VCS dirs 11 | .git/ 12 | .gitignore 13 | .bzr/ 14 | .bzrignore 15 | .hg/ 16 | .hgignore 17 | .svn/ 18 | # Common backup files 19 | *.swp 20 | *.bak 21 | *.tmp 22 | *.orig 23 | *~ 24 | # Various IDEs 25 | .project 26 | .idea/ 27 | *.tmproj 28 | .vscode/ 29 | -------------------------------------------------------------------------------- /deploy/helm/trino-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v2 3 | name: trino-operator 4 | version: "0.0.0-dev" 5 | appVersion: "0.0.0-dev" 6 | description: The Stackable Operator for Trino 7 | home: https://github.com/stackabletech/trino-operator 8 | maintainers: 9 | - name: Stackable 10 | url: https://www.stackable.tech 11 | -------------------------------------------------------------------------------- /deploy/helm/trino-operator/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Helm Chart for Stackable Operator for Trino 3 | 4 | This Helm Chart can be used to install Custom Resource Definitions and the Operator for Trino provided by Stackable. 5 | 6 | ## Requirements 7 | 8 | - Create a [Kubernetes Cluster](../Readme.md) 9 | - Install [Helm](https://helm.sh/docs/intro/install/) 10 | 11 | ## Install the Stackable Operator for Trino 12 | 13 | ```bash 14 | # From the root of the operator repository 15 | make compile-chart 16 | 17 | helm install trino-operator deploy/helm/trino-operator 18 | ``` 19 | 20 | ## Usage of the CRDs 21 | 22 | The usage of this operator and its CRDs is described in the [documentation](https://docs.stackable.tech/trino/index.html) 23 | 24 | The operator has example requests included in the [`/examples`](https://github.com/stackabletech/trino-operator/tree/main/examples) directory. 25 | 26 | ## Links 27 | 28 | 29 | -------------------------------------------------------------------------------- /deploy/helm/trino-operator/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | {{ (.Files.Glob "configs/*").AsConfig | indent 2 }} 5 | kind: ConfigMap 6 | metadata: 7 | name: {{ include "operator.fullname" . }}-configmap 8 | labels: 9 | {{- include "operator.labels" . | nindent 4 }} 10 | -------------------------------------------------------------------------------- /deploy/helm/trino-operator/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | {{ if .Values.serviceAccount.create -}} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ include "operator.fullname" . }}-serviceaccount 7 | labels: 8 | {{- include "operator.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | # This cluster role binding allows anyone in the "manager" group to read secrets in any namespace. 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: {{ include "operator.fullname" . }}-clusterrolebinding 19 | labels: 20 | {{- include "operator.labels" . | nindent 4 }} 21 | subjects: 22 | - kind: ServiceAccount 23 | name: {{ include "operator.fullname" . }}-serviceaccount 24 | namespace: {{ .Release.Namespace }} 25 | roleRef: 26 | kind: ClusterRole 27 | name: {{ include "operator.fullname" . }}-clusterrole 28 | apiGroup: rbac.authorization.k8s.io 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /deploy/helm/trino-operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for trino-operator. 2 | --- 3 | image: 4 | repository: oci.stackable.tech/sdp/trino-operator 5 | pullPolicy: IfNotPresent 6 | pullSecrets: [] 7 | 8 | nameOverride: "" 9 | fullnameOverride: "" 10 | 11 | serviceAccount: 12 | # Specifies whether a service account should be created 13 | create: true 14 | # Annotations to add to the service account 15 | annotations: {} 16 | # The name of the service account to use. 17 | # If not set and create is true, a name is generated using the fullname template 18 | name: "" 19 | 20 | podAnnotations: {} 21 | 22 | # Provide additional labels which get attached to all deployed resources 23 | labels: 24 | stackable.tech/vendor: Stackable 25 | 26 | podSecurityContext: {} 27 | # fsGroup: 2000 28 | 29 | securityContext: {} 30 | # capabilities: 31 | # drop: 32 | # - ALL 33 | # readOnlyRootFilesystem: true 34 | # runAsNonRoot: true 35 | # runAsUser: 1000 36 | 37 | resources: 38 | limits: 39 | cpu: 100m 40 | memory: 128Mi 41 | requests: 42 | cpu: 100m 43 | memory: 128Mi 44 | 45 | nodeSelector: {} 46 | 47 | tolerations: [] 48 | 49 | affinity: {} 50 | 51 | # When running on a non-default Kubernetes cluster domain, the cluster domain can be configured here. 52 | # See the https://docs.stackable.tech/home/stable/guides/kubernetes-cluster-domain guide for details. 53 | # kubernetesClusterDomain: my-cluster.local 54 | 55 | # See all available options and detailed explanations about the concept here: 56 | # https://docs.stackable.tech/home/stable/concepts/telemetry/ 57 | telemetry: 58 | consoleLog: 59 | enabled: true 60 | fileLog: 61 | enabled: false 62 | rotationPeriod: hourly 63 | maxFiles: 6 64 | otelLogExporter: 65 | enabled: false 66 | otelTraceExporter: 67 | enabled: false 68 | -------------------------------------------------------------------------------- /deploy/stackable-operators-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: stackable-operators 6 | -------------------------------------------------------------------------------- /docs/antora.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: home 3 | version: "nightly" 4 | -------------------------------------------------------------------------------- /docs/modules/trino/assets/images/connect-with-dbeaver-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/trino-operator/f2f46fb5cb47938ac2f305b6ec4ce5bd8e915ba4/docs/modules/trino/assets/images/connect-with-dbeaver-1.png -------------------------------------------------------------------------------- /docs/modules/trino/assets/images/connect-with-dbeaver-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/trino-operator/f2f46fb5cb47938ac2f305b6ec4ce5bd8e915ba4/docs/modules/trino/assets/images/connect-with-dbeaver-2.png -------------------------------------------------------------------------------- /docs/modules/trino/assets/images/connect-with-dbeaver-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/trino-operator/f2f46fb5cb47938ac2f305b6ec4ce5bd8e915ba4/docs/modules/trino/assets/images/connect-with-dbeaver-3.png -------------------------------------------------------------------------------- /docs/modules/trino/assets/images/trino-web-ui-finished.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/trino-operator/f2f46fb5cb47938ac2f305b6ec4ce5bd8e915ba4/docs/modules/trino/assets/images/trino-web-ui-finished.png -------------------------------------------------------------------------------- /docs/modules/trino/assets/images/trino-web-ui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/trino-operator/f2f46fb5cb47938ac2f305b6ec4ce5bd8e915ba4/docs/modules/trino/assets/images/trino-web-ui.png -------------------------------------------------------------------------------- /docs/modules/trino/examples/getting_started/code/install_output.txt: -------------------------------------------------------------------------------- 1 | Installed commons=0.0.0-dev operator 2 | Installed secret=0.0.0-dev operator 3 | Installed listener=0.0.0-dev operator 4 | Installed trino=0.0.0-dev operator 5 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/getting_started/code/install_output.txt.j2: -------------------------------------------------------------------------------- 1 | Installed commons={{ versions.commons }} operator 2 | Installed secret={{ versions.secret }} operator 3 | Installed listener={{ versions.listener }} operator 4 | Installed trino={{ versions.trino }} operator 5 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/getting_started/code/test_getting_started_helm.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | set -euo pipefail 3 | 4 | cd "$(dirname "$0")" 5 | ./getting_started.sh helm 6 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/getting_started/code/test_getting_started_stackablectl.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | set -euo pipefail 3 | 4 | cd "$(dirname "$0")" 5 | ./getting_started.sh stackablectl 6 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/getting_started/code/trino.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: simple-trino 6 | spec: 7 | image: 8 | productVersion: "470" 9 | clusterConfig: 10 | catalogLabelSelector: 11 | matchLabels: 12 | trino: simple-trino 13 | listenerClass: external-unstable 14 | coordinators: 15 | roleGroups: 16 | default: 17 | replicas: 1 18 | workers: 19 | roleGroups: 20 | default: 21 | replicas: 1 22 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/getting_started/code/trino.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: simple-trino 6 | spec: 7 | image: 8 | productVersion: "470" 9 | clusterConfig: 10 | catalogLabelSelector: 11 | matchLabels: 12 | trino: simple-trino 13 | listenerClass: external-unstable 14 | coordinators: 15 | roleGroups: 16 | default: 17 | replicas: 1 18 | workers: 19 | roleGroups: 20 | default: 21 | replicas: 1 22 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/usage-guide/opa-bundle-trino-cm.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: opa-bundle-trino 6 | labels: 7 | opa.stackable.tech/bundle: "true" 8 | data: 9 | trino.rego: | 10 | package trino 11 | 12 | default allow = false 13 | 14 | # Allow non-batched access 15 | allow if { 16 | is_admin 17 | } 18 | 19 | # Allow batched access 20 | batch contains i if { 21 | some i 22 | input.action.filterResources[i] 23 | is_admin 24 | } 25 | 26 | # Corner case: filtering columns is done with a single table item, and many columns inside 27 | batch contains i if { 28 | some i 29 | input.action.operation == "FilterColumns" 30 | count(input.action.filterResources) == 1 31 | input.action.filterResources[0].table.columns[i] 32 | is_admin 33 | } 34 | 35 | # Filter rows according to an expression 36 | rowFilters contains row_filter if { 37 | input.action.operation == "GetRowFilters" 38 | 39 | input.action.resource.table.catalogName == "default" 40 | input.action.resource.table.schemaName == "hr" 41 | input.action.resource.table.tableName == "employee" 42 | 43 | row_filter := { 44 | "expression": "user = current_user", 45 | "identity": "system_user", 46 | } 47 | } 48 | 49 | # Mask columns according to an expression 50 | columnMask := column_mask if { 51 | input.action.operation == "GetColumnMask" 52 | 53 | input.action.resource.column.catalogName == "default" 54 | input.action.resource.column.schemaName == "default" 55 | input.action.resource.column.tableName == "cards" 56 | input.action.resource.column.columnName == "SSN" 57 | 58 | column_mask := { 59 | "expression": "'XXX-XX-' + substring(credit_card, -4)", 60 | "identity": "system_user", 61 | } 62 | } 63 | 64 | is_admin() if { 65 | input.context.identity.user == "admin" 66 | } 67 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/usage-guide/trino-file-auth-snippet.yaml: -------------------------------------------------------------------------------- 1 | # yamllint disable-file 2 | --- 3 | # tag::authentication_class[] 4 | apiVersion: authentication.stackable.tech/v1alpha1 5 | kind: AuthenticationClass 6 | metadata: 7 | name: simple-trino-users 8 | spec: 9 | provider: 10 | static: 11 | userCredentialsSecret: 12 | name: trino-users 13 | # end::authentication_class[] 14 | --- 15 | # tag::secret[] 16 | apiVersion: v1 17 | kind: Secret 18 | metadata: 19 | name: trino-users 20 | type: kubernetes.io/opaque 21 | stringData: 22 | admin: admin 23 | alice: alice 24 | bob: bob 25 | # end::secret[] 26 | --- 27 | # tag::trino[] 28 | apiVersion: trino.stackable.tech/v1alpha1 29 | kind: TrinoCluster 30 | metadata: 31 | name: simple-trino 32 | spec: 33 | clusterConfig: 34 | authentication: 35 | - authenticationClass: simple-trino-users 36 | - authenticationClass: ... 37 | # end::trino[] 38 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/usage-guide/trino-insecure.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCatalog 4 | metadata: 5 | name: hive 6 | labels: 7 | trino: simple-trino 8 | spec: 9 | connector: 10 | hive: 11 | metastore: 12 | configMap: simple-hive-derby 13 | --- 14 | apiVersion: trino.stackable.tech/v1alpha1 15 | kind: TrinoCluster 16 | metadata: 17 | name: simple-trino 18 | spec: 19 | image: 20 | productVersion: "470" 21 | clusterConfig: 22 | catalogLabelSelector: 23 | matchLabels: 24 | trino: simple-trino 25 | coordinators: 26 | roleGroups: 27 | default: 28 | replicas: 1 29 | workers: 30 | roleGroups: 31 | default: 32 | replicas: 1 33 | --- 34 | apiVersion: hive.stackable.tech/v1alpha1 35 | kind: HiveCluster 36 | metadata: 37 | name: simple-hive-derby 38 | spec: 39 | image: 40 | productVersion: 4.0.0 41 | clusterConfig: 42 | database: 43 | connString: jdbc:derby:;databaseName=/tmp/metastore_db;create=true 44 | user: APP 45 | password: mine 46 | dbType: derby 47 | metastore: 48 | roleGroups: 49 | default: 50 | replicas: 1 51 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/usage-guide/trino-ldap-auth-snippet.yaml: -------------------------------------------------------------------------------- 1 | # yamllint disable-file 2 | --- 3 | # tag::trino[] 4 | apiVersion: trino.stackable.tech/v1alpha1 5 | kind: TrinoCluster 6 | metadata: 7 | name: trino-with-ldap 8 | spec: 9 | clusterConfig: 10 | authentication: 11 | - authenticationClass: my-ldap 12 | # end::trino[] 13 | --- 14 | # tag::authentication_class[] 15 | apiVersion: authentication.stackable.tech/v1alpha1 16 | kind: AuthenticationClass 17 | metadata: 18 | name: my-ldap 19 | spec: 20 | provider: 21 | ldap: 22 | hostname: openldap.default.svc.cluster.local 23 | searchBase: ou=users,dc=example,dc=org 24 | ... 25 | # end::authentication_class[] 26 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/usage-guide/trino-oidc-auth-snippet.yaml: -------------------------------------------------------------------------------- 1 | # yamllint disable-file 2 | --- 3 | # tag::secret[] 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: oidc-secret 8 | type: kubernetes.io/opaque 9 | stringData: 10 | clientId: trino 11 | clientSecret: trino-client-secret 12 | # end::secret[] 13 | --- 14 | # tag::authentication_class[] 15 | apiVersion: authentication.stackable.tech/v1alpha1 16 | kind: AuthenticationClass 17 | metadata: 18 | name: oidc 19 | spec: 20 | provider: 21 | oidc: 22 | hostname: keycloak.default.svc.cluster.local 23 | port: 8080 24 | rootPath: /realms/stackable/ 25 | scopes: 26 | - openid 27 | principalClaim: preferred_username 28 | ... 29 | # end::authentication_class[] 30 | --- 31 | # tag::trino[] 32 | apiVersion: trino.stackable.tech/v1alpha1 33 | kind: TrinoCluster 34 | metadata: 35 | name: trino-with-ldap 36 | spec: 37 | clusterConfig: 38 | authentication: 39 | - authenticationClass: oidc 40 | oidc: 41 | clientCredentialsSecret: oidc-secret 42 | tls: 43 | serverSecretClass: tls 44 | ... 45 | # end::trino[] 46 | -------------------------------------------------------------------------------- /docs/modules/trino/examples/usage-guide/trino-secure-tls-only.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCatalog 4 | metadata: 5 | name: hive 6 | labels: 7 | trino: simple-trino 8 | spec: 9 | connector: 10 | hive: 11 | metastore: 12 | configMap: simple-hive-derby 13 | --- 14 | apiVersion: trino.stackable.tech/v1alpha1 15 | kind: TrinoCluster 16 | metadata: 17 | name: simple-trino 18 | spec: 19 | image: 20 | productVersion: "470" 21 | clusterConfig: 22 | tls: 23 | serverSecretClass: trino-tls # <1> 24 | catalogLabelSelector: 25 | matchLabels: 26 | trino: simple-trino # <2> 27 | coordinators: 28 | roleGroups: 29 | default: 30 | replicas: 1 31 | workers: 32 | roleGroups: 33 | default: 34 | replicas: 1 35 | --- 36 | apiVersion: secrets.stackable.tech/v1alpha1 37 | kind: SecretClass 38 | metadata: 39 | name: trino-tls # <1> 40 | spec: 41 | backend: 42 | autoTls: # <3> 43 | ca: 44 | secret: 45 | name: secret-provisioner-trino-tls-ca 46 | namespace: default 47 | autoGenerate: true 48 | --- 49 | apiVersion: hive.stackable.tech/v1alpha1 50 | kind: HiveCluster 51 | metadata: 52 | name: simple-hive-derby 53 | spec: 54 | image: 55 | productVersion: 4.0.0 56 | clusterConfig: 57 | database: 58 | connString: jdbc:derby:;databaseName=/tmp/metastore_db;create=true 59 | user: APP 60 | password: mine 61 | dbType: derby 62 | metastore: 63 | roleGroups: 64 | default: 65 | replicas: 1 66 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/getting_started/index.adoc: -------------------------------------------------------------------------------- 1 | = Getting started 2 | :description: Get started with Trino on Kubernetes using the Stackable Operator. Follow steps for installation, setup, and resource recommendations. 3 | 4 | This guide gets you started with Trino using the Stackable Operator. 5 | It guides you through the installation of the operator and its dependencies and setting up your first Trino cluster. 6 | 7 | == Prerequisites 8 | 9 | You need: 10 | 11 | * a Kubernetes cluster 12 | * kubectl 13 | * optional: Helm 14 | * optional: Trino CLI tool and curl (for testing) 15 | 16 | Resource sizing depends on cluster type(s), usage and scope, but as a starting point we recommend a minimum of the following resources for this operator: 17 | 18 | * 0.2 cores (e.g. i5 or similar) 19 | * 256MB RAM 20 | 21 | == What's next 22 | 23 | The guide is divided into the following steps: 24 | 25 | * xref:getting_started/installation.adoc[Installing the Operators] 26 | * xref:getting_started/first_steps.adoc[Setting up the Trino cluster] 27 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/reference/commandline-parameters.adoc: -------------------------------------------------------------------------------- 1 | = Command line parameters 2 | 3 | This operator accepts the following command line parameters: 4 | 5 | == product-config 6 | 7 | *Default value*: `/etc/stackable/trino-operator/config-spec/properties.yaml` 8 | 9 | *Required*: false 10 | 11 | *Multiple values:* false 12 | 13 | [source] 14 | ---- 15 | cargo run -- run --product-config /foo/bar/properties.yaml 16 | ---- 17 | 18 | == watch-namespace 19 | 20 | *Default value*: All namespaces 21 | 22 | *Required*: false 23 | 24 | *Multiple values:* false 25 | 26 | The operator **only** watches for resources in the provided namespace `test`: 27 | 28 | [source] 29 | ---- 30 | cargo run -- run --watch-namespace test 31 | ---- 32 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/reference/crds.adoc: -------------------------------------------------------------------------------- 1 | = CRD Reference 2 | 3 | Find all CRD reference for the Stackable Operator for Trino at: {crd-docs-base-url}/trino-operator/{crd-docs-version}. 4 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/reference/index.adoc: -------------------------------------------------------------------------------- 1 | = Reference 2 | 3 | Consult the reference documentation section to find exhaustive information on: 4 | 5 | * Descriptions and default values of all properties in the CRDs used by this operator in the xref:reference/crds.adoc[]. 6 | * The xref:reference/commandline-parameters.adoc[] and xref:reference/environment-variables.adoc[] accepted by the operator. 7 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/catalogs/black-hole.adoc: -------------------------------------------------------------------------------- 1 | = Black Hole 2 | 3 | Primarily the https://trino.io/docs/current/connector/blackhole.html[Black Hole connector] is designed for high performance testing of other components. 4 | It works like the `/dev/null` device on Unix-like operating systems for data writing and like `/dev/null` or `/dev/zero` for data reading. 5 | 6 | == Example Black Hole catalog configuration 7 | [source,yaml] 8 | ---- 9 | apiVersion: trino.stackable.tech/v1alpha1 10 | kind: TrinoCatalog 11 | metadata: 12 | # The name of the catalog as it will appear in Trino 13 | name: blackhole 14 | # TrinoCluster can use these labels to select which catalogs to include 15 | labels: 16 | trino: simple-trino 17 | spec: 18 | connector: 19 | blackHole: {} # No options needed 20 | ---- 21 | 22 | [NOTE] 23 | ==== 24 | This connector does not work properly with multiple coordinators, since each coordinator has different metadata. 25 | ==== 26 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/catalogs/delta-lake.adoc: -------------------------------------------------------------------------------- 1 | = Delta Lake 2 | 3 | [Delta Lake](https://delta.io/) is an open-source storage framework that enables building a Lakehouse architecture with support for multiple compute engines. 4 | It depends on a Hive metastore being present and makes use of the same metastore ConfigMap used by the Hive connector. 5 | 6 | == Example Delta Lake catalog configuration 7 | 8 | [source,yaml] 9 | ---- 10 | apiVersion: trino.stackable.tech/v1alpha1 11 | kind: TrinoCatalog 12 | metadata: 13 | # The name of the catalog as it will appear in Trino 14 | name: delta 15 | labels: 16 | trino: simple-trino 17 | spec: 18 | connector: 19 | # Specify deltaLake here when defining a Delta Lake catalog 20 | deltaLake: 21 | metastore: 22 | configMap: simple-hive 23 | s3: 24 | inline: 25 | host: test-minio 26 | port: 9000 27 | accessStyle: Path 28 | credentials: 29 | secretClass: minio-credentials 30 | ---- 31 | 32 | == Connect to S3 store or HDFS 33 | The Delta Lake connector connects to S3 or HDFS in the same way the xref:usage-guide/catalogs/hive.adoc[] connector does. 34 | Refer to that documentation for access configuration. 35 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/catalogs/google-sheets.adoc: -------------------------------------------------------------------------------- 1 | = Google sheets 2 | :trino-google-sheets-connector: https://trino.io/docs/current/connector/googlesheets.html 3 | 4 | With this connector Trino can connect to Google Sheets. 5 | It needs a service-user to access the Google APIs. 6 | Consult the {trino-google-sheets-connector}[official documentation] on how to use the Google Sheets connector. 7 | 8 | == Example Google sheets catalog configuration 9 | [source,yaml] 10 | ---- 11 | apiVersion: trino.stackable.tech/v1alpha1 12 | kind: TrinoCatalog 13 | metadata: 14 | name: gsheet 15 | labels: 16 | trino: trino 17 | spec: 18 | connector: 19 | googleSheet: 20 | credentialsSecret: gsheet-credentials 21 | metadataSheetId: 1dT4dRWo9tAKBk5GdH-a54dcizuoxOTn98X8igZcnYr8 22 | cache: # optional 23 | sheetsDataMaxCacheSize: 1000 # optional, defaults to 1000 24 | sheetsDataExpireAfterWrite: 5m # optional, defaults to 5m 25 | --- 26 | apiVersion: v1 27 | kind: Secret 28 | metadata: 29 | name: gsheet-credentials 30 | stringData: 31 | credentials: |+ 32 | { 33 | "type": "service_account", 34 | "project_id": "neat-bricolage-231015", 35 | "private_key_id": "XXX", 36 | "private_key": "Private key contents including BEGIN/END PRIVATE KEY lines, and \n for newlines", 37 | "client_email": "trino-550@neat-bricolage-231015.iam.gserviceaccount.com", 38 | "client_id": "XXX", 39 | "auth_uri": "https://accounts.google.com/o/oauth2/auth", 40 | "token_uri": "https://oauth2.googleapis.com/token", 41 | "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", 42 | "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/trino-550%40neat-bricolage-231015.iam.gserviceaccount.com" 43 | } 44 | ---- 45 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/catalogs/iceberg.adoc: -------------------------------------------------------------------------------- 1 | = Apache Iceberg 2 | 3 | Apache Iceberg is a format for huge analytic tables designed to address some of the scaling issues with traditional Hive tables. 4 | Iceberg depends on a Hive metastore being present and makes use of the same metastore ConfigMap used by the Hive connector. 5 | 6 | You can deploy a Hive Metastore Stacklet with the xref:hive:index.adoc[]. 7 | 8 | == Example Iceberg catalog configuration 9 | 10 | [source,yaml] 11 | ---- 12 | apiVersion: trino.stackable.tech/v1alpha1 13 | kind: TrinoCatalog 14 | metadata: 15 | name: iceberg # <1> 16 | labels: 17 | trino: simple-trino 18 | spec: 19 | connector: 20 | # Specify iceberg here when defining a iceberg catalog 21 | iceberg: 22 | metastore: 23 | configMap: simple-hive # <2> 24 | s3: 25 | inline: 26 | host: test-minio 27 | port: 9000 28 | accessStyle: Path 29 | credentials: 30 | secretClass: minio-credentials 31 | ---- 32 | <1> The name of the catalog as it will appear in Trino 33 | <2> This is the name of your Hive Stacklet 34 | 35 | == Connect to S3 store or HDFS 36 | The Iceberg connector can connect to S3 or HDFS in the same way the xref:usage-guide/catalogs/hive.adoc[] connector does. 37 | Check that documentation on how to configure the access. 38 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/catalogs/tpcds.adoc: -------------------------------------------------------------------------------- 1 | = TPC-DS 2 | 3 | The https://trino.io/docs/current/connector/tpcds.html[TPC-DS connector] provides a data source for the TCP-DS benchmark data set. 4 | This is not a stored data set and is instead a virtual data set generated at query time using a deterministic algorithm. 5 | 6 | == Example TPC-DS catalog configuration 7 | [source,yaml] 8 | ---- 9 | apiVersion: trino.stackable.tech/v1alpha1 10 | kind: TrinoCatalog 11 | metadata: 12 | # The name of the catalog as it will appear in Trino 13 | name: tpcds 14 | # TrinoCluster can use these labels to select which catalogs to include 15 | labels: 16 | trino: simple-trino 17 | spec: 18 | connector: 19 | tpcds: {} # No options needed 20 | ---- 21 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/catalogs/tpch.adoc: -------------------------------------------------------------------------------- 1 | = TPC-H 2 | 3 | The https://trino.io/docs/current/connector/tpch.html[TPC-H connector] provides a data source for the TCP-H benchmark data set. 4 | This is not a stored data set and is instead a virtual data set generated at query time using a deterministic algorithm. 5 | 6 | == Example TPC-H catalog configuration 7 | [source,yaml] 8 | ---- 9 | apiVersion: trino.stackable.tech/v1alpha1 10 | kind: TrinoCatalog 11 | metadata: 12 | # The name of the catalog as it will appear in Trino 13 | name: tpch 14 | # TrinoCluster can use these labels to select which catalogs to include 15 | labels: 16 | trino: simple-trino 17 | spec: 18 | connector: 19 | tpch: {} # No options needed 20 | ---- 21 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/index.adoc: -------------------------------------------------------------------------------- 1 | = Usage guide 2 | 3 | This section helps you to use the Trino Stackable operator. 4 | It shows you how to set up a Trino cluster in different ways and to test it with Hive and S3. 5 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/listenerclass.adoc: -------------------------------------------------------------------------------- 1 | = Service exposition with ListenerClasses 2 | 3 | Trino offers a web UI and an API, both are exposed by the `connector` xref:concepts:roles-and-role-groups.adoc[role]. 4 | The Operator deploys a service called `-connector` (where `` is the name of the TrinoCluster) through which Trino can be reached. 5 | 6 | This service can have three different types: `cluster-internal`, `external-unstable` and `external-stable`. 7 | Read more about the types in the xref:concepts:service-exposition.adoc[service exposition] documentation at platform level. 8 | 9 | This is how the ListenerClass is configured: 10 | 11 | [source,yaml] 12 | ---- 13 | spec: 14 | clusterConfig: 15 | listenerClass: cluster-internal # <1> 16 | ---- 17 | <1> The default `cluster-internal` setting. 18 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/log_aggregation.adoc: -------------------------------------------------------------------------------- 1 | = Log aggregation 2 | :description: The logs can be forwarded to a Vector log aggregator by providing a discovery ConfigMap for the aggregator and by enabling the log agent 3 | 4 | The logs can be forwarded to a Vector log aggregator by providing a discovery ConfigMap for the aggregator and by enabling the log agent: 5 | 6 | [source,yaml] 7 | ---- 8 | spec: 9 | clusterConfig: 10 | vectorAggregatorConfigMapName: vector-aggregator-discovery 11 | coordinators: 12 | config: 13 | logging: 14 | enableVectorAgent: true 15 | containers: 16 | trino: 17 | loggers: 18 | io.trino: 19 | level: INFO 20 | ---- 21 | 22 | Currently, the logs are collected only for `server.log`. 23 | Logging for `http-request.log` is disabled by default. 24 | 25 | Further information on how to configure logging, can be found in xref:concepts:logging.adoc[]. 26 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/monitoring.adoc: -------------------------------------------------------------------------------- 1 | = Monitoring 2 | :description: The managed Trino instances are automatically configured to export Prometheus metrics. 3 | 4 | The managed Trino instances are automatically configured to export Prometheus metrics. 5 | See xref:operators:monitoring.adoc[] for more details. 6 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/operations/cluster-operations.adoc: -------------------------------------------------------------------------------- 1 | 2 | = Cluster Operation 3 | 4 | Trino installations can be configured with different cluster operations like pausing reconciliation or stopping the cluster. See xref:concepts:operations/cluster_operations.adoc[cluster operations] for more details. 5 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/operations/index.adoc: -------------------------------------------------------------------------------- 1 | = Operations 2 | 3 | This section of the documentation is intended for the operations teams that maintain a Stackable Data Platform installation. 4 | 5 | Read on the xref:concepts:operations/index.adoc[concepts page on operations] with the necessary details to operate the platform in a production environment. 6 | -------------------------------------------------------------------------------- /docs/modules/trino/pages/usage-guide/operations/pod-placement.adoc: -------------------------------------------------------------------------------- 1 | = Pod placement 2 | 3 | You can configure the Pod placement of the Trino pods as described in xref:concepts:operations/pod_placement.adoc[]. 4 | 5 | The default affinities created by the operator are: 6 | 7 | 1. Co-locate all the Trino Pods (weight 20) 8 | 2. Distribute all Pods within the same role (coordinators, workers) (weight 70) 9 | 10 | Additionally the operator looks through every `TrinoCatalog` you configure and sets up the following affinities: 11 | 12 | 1. Hive + Iceberg connector: Co-locate the coordinators with the hive metastores (weight 50) 13 | 2. Hive + Iceberg connector: Co-locate the workers with the hdfs datanodes (if hdfs is used) (weight 50) 14 | -------------------------------------------------------------------------------- /docs/modules/trino/partials/nav.adoc: -------------------------------------------------------------------------------- 1 | * xref:trino:getting_started/index.adoc[] 2 | ** xref:trino:getting_started/installation.adoc[] 3 | ** xref:trino:getting_started/first_steps.adoc[] 4 | * xref:trino:concepts.adoc[] 5 | * xref:trino:usage-guide/index.adoc[] 6 | ** xref:trino:usage-guide/connect_to_trino.adoc[] 7 | ** xref:trino:usage-guide/listenerclass.adoc[] 8 | ** xref:trino:usage-guide/configuration.adoc[] 9 | ** xref:trino:usage-guide/s3.adoc[] 10 | ** xref:trino:usage-guide/security.adoc[] 11 | ** xref:trino:usage-guide/monitoring.adoc[] 12 | ** xref:trino:usage-guide/log_aggregation.adoc[] 13 | ** xref:trino:usage-guide/query.adoc[] 14 | ** xref:trino:usage-guide/overrides.adoc[] 15 | ** xref:trino:usage-guide/catalogs/index.adoc[] 16 | *** xref:trino:usage-guide/catalogs/black-hole.adoc[] 17 | *** xref:trino:usage-guide/catalogs/delta-lake.adoc[] 18 | *** xref:trino:usage-guide/catalogs/generic.adoc[] 19 | *** xref:trino:usage-guide/catalogs/google-sheets.adoc[] 20 | *** xref:trino:usage-guide/catalogs/hive.adoc[] 21 | *** xref:trino:usage-guide/catalogs/iceberg.adoc[] 22 | *** xref:trino:usage-guide/catalogs/tpcds.adoc[] 23 | *** xref:trino:usage-guide/catalogs/tpch.adoc[] 24 | ** xref:trino:usage-guide/operations/index.adoc[] 25 | *** xref:trino:usage-guide/operations/cluster-operations.adoc[] 26 | *** xref:trino:usage-guide/operations/pod-placement.adoc[] 27 | *** xref:trino:usage-guide/operations/pod-disruptions.adoc[] 28 | *** xref:trino:usage-guide/operations/graceful-shutdown.adoc[] 29 | * xref:trino:reference/index.adoc[] 30 | ** xref:trino:reference/crds.adoc[] 31 | *** {crd-docs}/trino.stackable.tech/trinocluster/v1alpha1/[TrinoCluster {external-link-icon}^] 32 | *** {crd-docs}/trino.stackable.tech/trinocatalog/v1alpha1/[TrinoCatalog {external-link-icon}^] 33 | ** xref:trino:reference/commandline-parameters.adoc[] 34 | ** xref:trino:reference/environment-variables.adoc[] 35 | -------------------------------------------------------------------------------- /docs/modules/trino/partials/supported-versions.adoc: -------------------------------------------------------------------------------- 1 | // The version ranges supported by Trino-Operator 2 | // This is a separate file, since it is used by both the direct Trino documentation, and the overarching 3 | // Stackable Platform documentation. 4 | 5 | - 470 6 | - 455 (deprecated) 7 | - 451 (LTS) 8 | -------------------------------------------------------------------------------- /docs/templating_vars.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | helm: 3 | repo_name: sdp-charts 4 | repo_url: oci.stackable.tech 5 | versions: 6 | commons: 0.0.0-dev 7 | secret: 0.0.0-dev 8 | listener: 0.0.0-dev 9 | trino: 0.0.0-dev 10 | -------------------------------------------------------------------------------- /examples/simple-trino-cluster-resource-limits.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: simple-trino 6 | spec: 7 | image: 8 | productVersion: "470" 9 | clusterConfig: 10 | catalogLabelSelector: {} 11 | coordinators: 12 | roleGroups: 13 | default: 14 | replicas: 1 15 | workers: 16 | roleGroups: 17 | default: 18 | replicas: 1 19 | config: 20 | resources: 21 | storage: 22 | data: 23 | capacity: 3Gi 24 | cpu: 25 | min: 300m 26 | max: "2" 27 | memory: 28 | limit: 3Gi 29 | -------------------------------------------------------------------------------- /examples/simple-trino-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: simple-trino 6 | spec: 7 | image: 8 | productVersion: "470" 9 | clusterConfig: 10 | catalogLabelSelector: 11 | matchLabels: 12 | trino: simple-trino 13 | coordinators: 14 | roleGroups: 15 | default: 16 | replicas: 1 17 | workers: 18 | roleGroups: 19 | default: 20 | replicas: 1 21 | --- 22 | apiVersion: trino.stackable.tech/v1alpha1 23 | kind: TrinoCatalog 24 | metadata: 25 | name: hive 26 | labels: 27 | trino: simple-trino 28 | spec: 29 | connector: 30 | hive: 31 | metastore: 32 | configMap: simple-hive-derby 33 | --- 34 | apiVersion: v1 35 | kind: Secret 36 | metadata: 37 | name: hive-credentials 38 | type: Opaque 39 | stringData: 40 | username: APP 41 | password: mine 42 | --- 43 | apiVersion: hive.stackable.tech/v1alpha1 44 | kind: HiveCluster 45 | metadata: 46 | name: simple-hive-derby 47 | spec: 48 | image: 49 | productVersion: 4.0.0 50 | clusterConfig: 51 | database: 52 | connString: jdbc:derby:;databaseName=/tmp/metastore_db;create=true 53 | credentialsSecret: hive-credentials 54 | dbType: derby 55 | metastore: 56 | roleGroups: 57 | default: 58 | replicas: 1 59 | -------------------------------------------------------------------------------- /nix/README.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | # Updating nix dependencies 7 | 8 | ## Run the following for an operator 9 | 10 | > [!NOTE] 11 | > We track the `master` branch of crate2nix as that is relatively up to date, but the releases are infrequent. 12 | 13 | ```shell 14 | niv update crate2nix 15 | niv update nixpkgs 16 | niv update beku.py -b X.Y.Z # Using the release tag 17 | ``` 18 | 19 | ### Test 20 | 21 | - Run make `regenerate-nix` to ensure crate2nix works 22 | - Run a smoke test to ensure beku.py works. 23 | - Run `make run-dev` to ensure nixpkgs are fine. 24 | 25 | ## Update operator-templating 26 | 27 | Do the same as above, but from `template/` 28 | -------------------------------------------------------------------------------- /nix/meta.json: -------------------------------------------------------------------------------- 1 | {"operator": {"name": "trino-operator", "pretty_string": "Trino", "product_string": "trino", "url": "stackabletech/trino-operator.git"}} 2 | -------------------------------------------------------------------------------- /nix/sources.json: -------------------------------------------------------------------------------- 1 | { 2 | "beku.py": { 3 | "branch": "0.0.10", 4 | "description": "Test suite expander for Stackable Kuttl tests.", 5 | "homepage": null, 6 | "owner": "stackabletech", 7 | "repo": "beku.py", 8 | "rev": "fc75202a38529a4ac6776dd8a5dfee278d927f58", 9 | "sha256": "152yary0p11h87yabv74jnwkghsal7lx16az0qlzrzdrs6n5v8id", 10 | "type": "tarball", 11 | "url": "https://github.com/stackabletech/beku.py/archive/fc75202a38529a4ac6776dd8a5dfee278d927f58.tar.gz", 12 | "url_template": "https://github.com///archive/.tar.gz" 13 | }, 14 | "crate2nix": { 15 | "branch": "master", 16 | "description": "nix build file generator for rust crates", 17 | "homepage": "", 18 | "owner": "kolloch", 19 | "repo": "crate2nix", 20 | "rev": "be31feae9a82c225c0fd1bdf978565dc452a483a", 21 | "sha256": "14d0ymlrwk7dynv35qcw4xn0dylfpwjmf6f8znflbk2l6fk23l12", 22 | "type": "tarball", 23 | "url": "https://github.com/kolloch/crate2nix/archive/be31feae9a82c225c0fd1bdf978565dc452a483a.tar.gz", 24 | "url_template": "https://github.com///archive/.tar.gz" 25 | }, 26 | "nixpkgs": { 27 | "branch": "nixpkgs-unstable", 28 | "description": "Nix Packages collection", 29 | "homepage": "", 30 | "owner": "NixOS", 31 | "repo": "nixpkgs", 32 | "rev": "b1bebd0fe266bbd1820019612ead889e96a8fa2d", 33 | "sha256": "0fl2dji5whjydbxby9b7kqyqx9m4k44p72x1q28kfnx5m67nyqij", 34 | "type": "tarball", 35 | "url": "https://github.com/NixOS/nixpkgs/archive/b1bebd0fe266bbd1820019612ead889e96a8fa2d.tar.gz", 36 | "url_template": "https://github.com///archive/.tar.gz" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "local>stackabletech/.github:renovate-config" 5 | ], 6 | "ignorePaths": [".github/workflows/build.yml", ".github/workflows/general_daily_security.yml", ".github/workflows/integration-test.yml", ".github/workflows/pr_pre-commit.yaml"] 7 | } 8 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT, this file is generated by operator-templating 2 | [toolchain] 3 | channel = "1.85.0" 4 | profile = "default" 5 | -------------------------------------------------------------------------------- /rust/operator-binary/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "stackable-trino-operator" 3 | description = "Stackable Operator for Trino" 4 | version.workspace = true 5 | authors.workspace = true 6 | license.workspace = true 7 | edition.workspace = true 8 | repository.workspace = true 9 | publish = false 10 | build = "build.rs" 11 | 12 | [dependencies] 13 | product-config.workspace = true 14 | stackable-operator.workspace = true 15 | 16 | anyhow.workspace = true 17 | async-trait.workspace = true 18 | clap.workspace = true 19 | const_format.workspace = true 20 | futures.workspace = true 21 | indoc.workspace = true 22 | openssl.workspace = true 23 | snafu.workspace = true 24 | strum.workspace = true 25 | tokio.workspace = true 26 | tracing.workspace = true 27 | serde_yaml.workspace = true 28 | serde.workspace = true 29 | serde_json.workspace = true 30 | 31 | [dev-dependencies] 32 | rstest.workspace = true 33 | serde_yaml.workspace = true 34 | 35 | [build-dependencies] 36 | built.workspace = true 37 | -------------------------------------------------------------------------------- /rust/operator-binary/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | built::write_built_file().unwrap(); 3 | } 4 | -------------------------------------------------------------------------------- /rust/operator-binary/src/authorization/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod opa; 2 | -------------------------------------------------------------------------------- /rust/operator-binary/src/catalog/black_hole.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use stackable_operator::client::Client; 3 | 4 | use super::{FromTrinoCatalogError, ToCatalogConfig, config::CatalogConfig}; 5 | use crate::crd::catalog::black_hole::BlackHoleConnector; 6 | 7 | pub const CONNECTOR_NAME: &str = "blackhole"; 8 | 9 | #[async_trait] 10 | impl ToCatalogConfig for BlackHoleConnector { 11 | async fn to_catalog_config( 12 | &self, 13 | catalog_name: &str, 14 | _catalog_namespace: Option, 15 | _client: &Client, 16 | _trino_version: u16, 17 | ) -> Result { 18 | // No additional properties needed 19 | Ok(CatalogConfig::new(catalog_name.to_string(), CONNECTOR_NAME)) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /rust/operator-binary/src/catalog/generic.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use stackable_operator::client::Client; 3 | 4 | use super::{FromTrinoCatalogError, ToCatalogConfig, config::CatalogConfig}; 5 | use crate::crd::catalog::generic::{GenericConnector, Property}; 6 | 7 | #[async_trait] 8 | impl ToCatalogConfig for GenericConnector { 9 | async fn to_catalog_config( 10 | &self, 11 | catalog_name: &str, 12 | _catalog_namespace: Option, 13 | _client: &Client, 14 | _trino_version: u16, 15 | ) -> Result { 16 | let connector_name = &self.connector_name; 17 | let mut config = CatalogConfig::new(catalog_name.to_string(), connector_name); 18 | 19 | for (property_name, property) in &self.properties { 20 | match property { 21 | Property::Value(value) => config.add_property(property_name, value), 22 | Property::ValueFromSecret { 23 | secret_key_selector, 24 | } => { 25 | config.add_env_property_from_secret(property_name, secret_key_selector.clone()) 26 | } 27 | Property::ValueFromConfigMap { 28 | config_map_key_selector, 29 | } => config.add_env_property_from_config_map( 30 | property_name, 31 | config_map_key_selector.clone(), 32 | ), 33 | } 34 | } 35 | 36 | Ok(config) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /rust/operator-binary/src/catalog/tpcds.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use stackable_operator::client::Client; 3 | 4 | use super::{FromTrinoCatalogError, ToCatalogConfig, config::CatalogConfig}; 5 | use crate::crd::catalog::tpcds::TpcdsConnector; 6 | 7 | pub const CONNECTOR_NAME: &str = "tpcds"; 8 | 9 | #[async_trait] 10 | impl ToCatalogConfig for TpcdsConnector { 11 | async fn to_catalog_config( 12 | &self, 13 | catalog_name: &str, 14 | _catalog_namespace: Option, 15 | _client: &Client, 16 | _trino_version: u16, 17 | ) -> Result { 18 | // No additional properties needed 19 | Ok(CatalogConfig::new(catalog_name.to_string(), CONNECTOR_NAME)) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /rust/operator-binary/src/catalog/tpch.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use stackable_operator::client::Client; 3 | 4 | use super::{FromTrinoCatalogError, ToCatalogConfig, config::CatalogConfig}; 5 | use crate::crd::catalog::tpch::TpchConnector; 6 | 7 | pub const CONNECTOR_NAME: &str = "tpch"; 8 | 9 | #[async_trait] 10 | impl ToCatalogConfig for TpchConnector { 11 | async fn to_catalog_config( 12 | &self, 13 | catalog_name: &str, 14 | _catalog_namespace: Option, 15 | _client: &Client, 16 | _trino_version: u16, 17 | ) -> Result { 18 | // No additional properties needed 19 | Ok(CatalogConfig::new(catalog_name.to_string(), CONNECTOR_NAME)) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /rust/operator-binary/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod jvm; 2 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/catalog/black_hole.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::schemars::{self, JsonSchema}; 3 | 4 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 5 | #[serde(rename_all = "camelCase")] 6 | pub struct BlackHoleConnector {} 7 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/catalog/commons.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::schemars::{self, JsonSchema}; 3 | 4 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 5 | #[serde(rename_all = "camelCase")] 6 | pub struct MetastoreConnection { 7 | /// Name of the [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery) providing information about the Hive metastore. 8 | pub config_map: String, 9 | } 10 | 11 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 12 | #[serde(rename_all = "camelCase")] 13 | pub struct HdfsConnection { 14 | /// Name of the [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery) providing information about the HDFS cluster. 15 | pub config_map: String, 16 | } 17 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/catalog/delta_lake.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::{ 3 | crd::s3, 4 | schemars::{self, JsonSchema}, 5 | }; 6 | 7 | use super::commons::{HdfsConnection, MetastoreConnection}; 8 | 9 | // This struct is similar to [`super::hive::HiveConnector`], but we do not `#[serde(flatten)]` it here, to avoid changing 10 | // stuff there and missing that these settings don't apply to other connectors (such as Iceberg or Delta Lake). 11 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 12 | #[serde(rename_all = "camelCase")] 13 | pub struct DeltaLakeConnector { 14 | /// Mandatory connection to a Hive Metastore, which will be used as a storage for metadata. 15 | pub metastore: MetastoreConnection, 16 | 17 | /// Connection to an S3 store. 18 | /// Please make sure that the underlying Hive metastore also has access to the S3 store. 19 | /// Learn more about S3 configuration in the [S3 concept docs](DOCS_BASE_URL_PLACEHOLDER/concepts/s3). 20 | pub s3: Option, 21 | 22 | /// Connection to an HDFS cluster. 23 | /// Please make sure that the underlying Hive metastore also has access to the HDFS. 24 | pub hdfs: Option, 25 | } 26 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/catalog/google_sheet.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::schemars::{self, JsonSchema}; 3 | 4 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 5 | #[serde(rename_all = "camelCase")] 6 | pub struct GoogleSheetConnector { 7 | /// The Secret containing the Google API JSON key file. 8 | /// The key used from the Secret is `credentials`. 9 | pub credentials_secret: String, 10 | /// Sheet ID of the spreadsheet, that contains the table mapping. 11 | pub metadata_sheet_id: String, 12 | /// Cache the contents of sheets. 13 | /// This is used to reduce Google Sheets API usage and latency. 14 | pub cache: Option, 15 | } 16 | 17 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 18 | #[serde(rename_all = "camelCase")] 19 | pub struct GoogleSheetConnectorCache { 20 | /// Maximum number of spreadsheets to cache, defaults to 1000. 21 | pub sheets_data_max_cache_size: Option, 22 | /// How long to cache spreadsheet data or metadata, defaults to `5m`. 23 | pub sheets_data_expire_after_write: Option, 24 | } 25 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/catalog/hive.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::{ 3 | crd::s3, 4 | schemars::{self, JsonSchema}, 5 | }; 6 | 7 | use super::commons::{HdfsConnection, MetastoreConnection}; 8 | 9 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 10 | #[serde(rename_all = "camelCase")] 11 | pub struct HiveConnector { 12 | /// Mandatory connection to a Hive Metastore, which will be used as a storage for metadata. 13 | pub metastore: MetastoreConnection, 14 | 15 | /// Connection to an S3 store. 16 | /// Please make sure that the underlying Hive metastore also has access to the S3 store. 17 | /// Learn more about S3 configuration in the [S3 concept docs](DOCS_BASE_URL_PLACEHOLDER/concepts/s3). 18 | pub s3: Option, 19 | 20 | /// Connection to an HDFS cluster. 21 | /// Please make sure that the underlying Hive metastore also has access to the HDFS. 22 | pub hdfs: Option, 23 | } 24 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/catalog/iceberg.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::{ 3 | crd::s3, 4 | schemars::{self, JsonSchema}, 5 | }; 6 | 7 | use super::commons::{HdfsConnection, MetastoreConnection}; 8 | 9 | // This struct is similar to [`super::hive::HiveConnector`], but we do not `#[serde(flatten)]` it here, to avoid changing 10 | // stuff there and missing that these settings don't apply to other connectors (such as Iceberg or Delta Lake). 11 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 12 | #[serde(rename_all = "camelCase")] 13 | pub struct IcebergConnector { 14 | /// Mandatory connection to a Hive Metastore, which will be used as a storage for metadata. 15 | pub metastore: MetastoreConnection, 16 | 17 | /// Connection to an S3 store. 18 | /// Please make sure that the underlying Hive metastore also has access to the S3 store. 19 | /// Learn more about S3 configuration in the [S3 concept docs](DOCS_BASE_URL_PLACEHOLDER/concepts/s3). 20 | pub s3: Option, 21 | 22 | /// Connection to an HDFS cluster. 23 | /// Please make sure that the underlying Hive metastore also has access to the HDFS. 24 | pub hdfs: Option, 25 | } 26 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/catalog/tpcds.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::schemars::{self, JsonSchema}; 3 | 4 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 5 | #[serde(rename_all = "camelCase")] 6 | pub struct TpcdsConnector {} 7 | -------------------------------------------------------------------------------- /rust/operator-binary/src/crd/catalog/tpch.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use stackable_operator::schemars::{self, JsonSchema}; 3 | 4 | #[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] 5 | #[serde(rename_all = "camelCase")] 6 | pub struct TpchConnector {} 7 | -------------------------------------------------------------------------------- /rust/operator-binary/src/operations/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod graceful_shutdown; 2 | pub mod pdb; 3 | 4 | pub use graceful_shutdown::*; 5 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # This file includes unstable features, so you need to run "cargo +nightly fmt" to format your code. 2 | # It's also ok to use the stable toolchain by simple running "cargo fmt", but using the nigthly formatter is prefered. 3 | 4 | # https://doc.rust-lang.org/nightly/edition-guide/rust-2024/rustfmt-style-edition.html 5 | style_edition = "2024" 6 | imports_granularity = "Crate" 7 | group_imports = "StdExternalCrate" 8 | reorder_impl_items = true 9 | use_field_init_shorthand = true 10 | -------------------------------------------------------------------------------- /scripts/docs_templating.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Reads a file with variables to insert into templates, and templates all .*.j2 files 5 | # in the 'docs' directory. 6 | # 7 | # dependencies 8 | # pip install jinja2-cli 9 | 10 | docs_dir="$(dirname "$0")/../docs" 11 | templating_vars_file="$docs_dir/templating_vars.yaml" 12 | 13 | # Check if files need templating 14 | if [[ -z $(find "$docs_dir" -name '*.j2') ]]; 15 | then 16 | echo "No files need templating, exiting." 17 | exit 18 | fi 19 | 20 | # Check if jinja2 is there 21 | if ! command -v jinja2 &> /dev/null 22 | then 23 | echo "jinja2 could not be found. Use 'pip install jinja2-cli' to install it." 24 | exit 1 25 | fi 26 | 27 | # Check if templating vars file exists 28 | if [[ ! -f "$templating_vars_file" ]]; 29 | then 30 | echo "$templating_vars_file does not exist, cannot start templating." 31 | fi 32 | 33 | find "$docs_dir" -name '*.j2' | 34 | while read -r file 35 | do 36 | new_file_name=${file%.j2} # Remove .j2 suffix 37 | echo "templating $new_file_name" 38 | jinja2 "$file" "$templating_vars_file" -o "$new_file_name" 39 | done 40 | 41 | echo "done" 42 | -------------------------------------------------------------------------------- /scripts/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script reads a Helm chart from deploy/helm/trino-operator and 3 | # generates manifest files into deploy/manifestss 4 | set -e 5 | 6 | tmp=$(mktemp -d ./manifests-XXXXX) 7 | 8 | helm template --output-dir "$tmp" \ 9 | --include-crds \ 10 | --name-template trino-operator \ 11 | deploy/helm/trino-operator 12 | 13 | for file in "$tmp"/trino-operator/*/*; do 14 | yq eval -i 'del(.. | select(has("app.kubernetes.io/managed-by")) | ."app.kubernetes.io/managed-by")' /dev/stdin < "$file" 15 | yq eval -i 'del(.. | select(has("helm.sh/chart")) | ."helm.sh/chart")' /dev/stdin < "$file" 16 | sed -i '/# Source: .*/d' "$file" 17 | done 18 | 19 | cp -r "$tmp"/trino-operator/*/* deploy/manifests/ 20 | 21 | rm -rf "$tmp" 22 | -------------------------------------------------------------------------------- /scripts/render_readme.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Check if jinja2 is there 5 | if ! command -v jinja2 &> /dev/null 6 | then 7 | echo "jinja2 could not be found. Use 'pip install jinja2-cli' to install it." 8 | exit 1 9 | fi 10 | 11 | SCRIPT_DIR=$(dirname "$0") 12 | cd "$SCRIPT_DIR/../.readme" 13 | jinja2 README.md.j2 -o ../README.md 14 | cd .. 15 | 16 | python3 scripts/ensure_one_trailing_newline.py README.md 17 | -------------------------------------------------------------------------------- /scripts/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./scripts/run-tests "$@" 4 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let 2 | self = import ./. {}; 3 | inherit (self) sources pkgs meta; 4 | 5 | beku = pkgs.callPackage (sources."beku.py" + "/beku.nix") {}; 6 | cargoDependencySetOfCrate = crate: [ crate ] ++ pkgs.lib.concatMap cargoDependencySetOfCrate (crate.dependencies ++ crate.buildDependencies); 7 | cargoDependencySet = pkgs.lib.unique (pkgs.lib.flatten (pkgs.lib.mapAttrsToList (crateName: crate: cargoDependencySetOfCrate crate.build) self.cargo.workspaceMembers)); 8 | in pkgs.mkShell rec { 9 | name = meta.operator.name; 10 | 11 | packages = with pkgs; [ 12 | ## cargo et-al 13 | rustup # this breaks pkg-config if it is in the nativeBuildInputs 14 | cargo-udeps 15 | 16 | ## Extra dependencies for use in a pure env (nix-shell --pure) 17 | ## These are mosuly useful for maintainers of this shell.nix 18 | ## to ensure all the dependencies are caught. 19 | # cacert 20 | # vim nvim nano 21 | ]; 22 | 23 | # derivation runtime dependencies 24 | buildInputs = pkgs.lib.concatMap (crate: crate.buildInputs) cargoDependencySet; 25 | 26 | # build time dependencies 27 | nativeBuildInputs = pkgs.lib.concatMap (crate: crate.nativeBuildInputs) cargoDependencySet ++ (with pkgs; [ 28 | beku 29 | docker 30 | gettext # for the proper envsubst 31 | git 32 | jq 33 | kind 34 | kubectl 35 | kubernetes-helm 36 | kuttl 37 | nix # this is implied, but needed in the pure env 38 | # tilt already defined in default.nix 39 | which 40 | yq-go 41 | ]); 42 | 43 | LIBCLANG_PATH = "${pkgs.libclang.lib}/lib"; 44 | BINDGEN_EXTRA_CLANG_ARGS = "-I${pkgs.glibc.dev}/include -I${pkgs.clang}/resource-root/include"; 45 | } 46 | -------------------------------------------------------------------------------- /tests/infrastructure.yaml: -------------------------------------------------------------------------------- 1 | cluster-ttl: 6h 2 | instance-size: medium 3 | disk: 100 4 | nodes: 3 5 | parallelism: 1 6 | -------------------------------------------------------------------------------- /tests/kuttl-test.yaml.jinja2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestSuite 4 | testDirs: 5 | {% for testcase in testinput.tests %} 6 | - ./tests/{{ testcase.name }} 7 | {% endfor %} 8 | 9 | startKIND: false 10 | suppress: ["events"] 11 | parallel: 2 12 | 13 | # The timeout (in seconds) is used when namespaces are created or 14 | # deleted, and, if not overridden, in TestSteps, TestAsserts, and 15 | # Commands. If not set, the timeout is 30 seconds by default. 16 | # 17 | # The deletion of a namespace can take a while until all resources, 18 | # especially PersistentVolumeClaims, are gracefully shut down. If the 19 | # timeout is reached in the meantime, even a successful test case is 20 | # considered a failure. 21 | # 22 | # For instance, the termination grace period of the Vector aggregator in 23 | # the logging tests is set to 60 seconds. If there are logs entries 24 | # which could not be forwarded yet to the external aggregator defined in 25 | # the VECTOR_AGGREGATOR environment variable, then the test aggregator 26 | # uses this period of time by trying to forward the events. In this 27 | # case, deleting a namespace with several Pods takes about 90 seconds. 28 | timeout: 300 29 | -------------------------------------------------------------------------------- /tests/release.yaml: -------------------------------------------------------------------------------- 1 | # Contains all operators required to run the test suite. 2 | --- 3 | releases: 4 | # Do not change the name of the release as it's referenced from run-tests 5 | tests: 6 | releaseDate: 1970-01-01 7 | description: Integration test 8 | products: 9 | commons: 10 | operatorVersion: 0.0.0-dev 11 | secret: 12 | operatorVersion: 0.0.0-dev 13 | listener: 14 | operatorVersion: 0.0.0-dev 15 | zookeeper: 16 | operatorVersion: 0.0.0-dev 17 | hdfs: 18 | operatorVersion: 0.0.0-dev 19 | hive: 20 | operatorVersion: 0.0.0-dev 21 | opa: 22 | operatorVersion: 0.0.0-dev 23 | trino: 24 | operatorVersion: 0.0.0-dev 25 | -------------------------------------------------------------------------------- /tests/templates/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stackabletech/trino-operator/f2f46fb5cb47938ac2f305b6ec4ce5bd8e915ba4/tests/templates/.gitkeep -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/00-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: vector-aggregator-discovery 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/00-install-vector-aggregator-discovery-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/00-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: use-integration-tests-scc 6 | rules: 7 | {% if test_scenario['values']['openshift'] == "true" %} 8 | - apiGroups: ["security.openshift.io"] 9 | resources: ["securitycontextconstraints"] 10 | resourceNames: ["privileged"] 11 | verbs: ["use"] 12 | {% endif %} 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: integration-tests-sa 18 | --- 19 | kind: RoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: use-integration-tests-scc 23 | subjects: 24 | - kind: ServiceAccount 25 | name: integration-tests-sa 26 | roleRef: 27 | kind: Role 28 | name: use-integration-tests-scc 29 | apiGroup: rbac.authorization.k8s.io 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/01-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: openldap 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: openldap-other 18 | status: 19 | readyReplicas: 1 20 | replicas: 1 21 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/01-install-openldap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | # We need to replace $NAMESPACE (by KUTTL) in the install-openldap.yaml 6 | - script: eval "echo \"$(cat install-openldap.yaml)\"" | kubectl apply -f - 7 | - script: eval "echo \"$(cat install-openldap-other.yaml)\"" | kubectl apply -f - 8 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/02-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | commands: 5 | # openldap 6 | - script: kubectl exec -n $NAMESPACE openldap-0 -- ldapsearch -H ldap://localhost:1389 -D cn=integrationtest,ou=users,dc=example,dc=org -w integrationtest -b ou=users,dc=example,dc=org > /dev/null 7 | - script: kubectl exec -n $NAMESPACE openldap-0 -- bash -c LDAPTLS_CACERT=/tls/ca.crt ldapsearch -Z -H ldaps://localhost:1636 -D cn=integrationtest,ou=users,dc=example,dc=org -w integrationtest -b ou=users,dc=example,dc=org > /dev/null 8 | # openldap-other 9 | - script: kubectl exec -n $NAMESPACE openldap-other-0 -- ldapsearch -H ldap://localhost:1389 -D cn=integrationtest-other,ou=users,dc=example,dc=org -w integrationtest-other -b ou=users,dc=example,dc=org > /dev/null 10 | - script: kubectl exec -n $NAMESPACE openldap-other-0 -- bash -c LDAPTLS_CACERT=/tls/ca.crt ldapsearch -Z -H ldaps://localhost:1636 -D cn=integrationtest-other,ou=users,dc=example,dc=org -w integrationtest-other -b ou=users,dc=example,dc=org > /dev/null 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/02-create-ldap-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | metadata: 5 | name: create-ldap-user 6 | commands: 7 | # openldap 8 | - script: kubectl cp -n $NAMESPACE ./create_ldap_user.sh openldap-0:/tmp 9 | - script: kubectl exec -n $NAMESPACE openldap-0 -- sh /tmp/create_ldap_user.sh 10 | # openldap-other 11 | - script: kubectl cp -n $NAMESPACE ./create_ldap_user_other.sh openldap-other-0:/tmp 12 | - script: kubectl exec -n $NAMESPACE openldap-other-0 -- sh /tmp/create_ldap_user_other.sh 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/03-keycloak-realm.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: keycloak-realms 6 | data: 7 | keycloak-realm.json: |- 8 | { 9 | "realm": "stackable", 10 | "enabled": true, 11 | "users": [ 12 | { 13 | "enabled": true, 14 | "username": "test", 15 | "credentials": [ 16 | { 17 | "type": "password", 18 | "value": "test" 19 | } 20 | ], 21 | "realmRoles": [ 22 | "user" 23 | ] 24 | } 25 | ], 26 | "roles": { 27 | "realm": [ 28 | { 29 | "name": "user", 30 | "description": "User privileges" 31 | } 32 | ] 33 | }, 34 | "clients": [ 35 | { 36 | "clientId": "trino", 37 | "enabled": true, 38 | "clientAuthenticatorType": "client-secret", 39 | "secret": "trino-client-secret", 40 | "redirectUris": [ 41 | "*" 42 | ], 43 | "webOrigins": [ 44 | "*" 45 | ], 46 | "standardFlowEnabled": true, 47 | "protocol": "openid-connect" 48 | } 49 | ] 50 | } 51 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/04-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | labels: 10 | app: keycloak 11 | name: keycloak 12 | status: 13 | readyReplicas: 1 14 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/05-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: test-trino 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/05-install-test-trino.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: test-trino 6 | labels: 7 | app: test-trino 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: test-trino 13 | template: 14 | metadata: 15 | labels: 16 | app: test-trino 17 | spec: 18 | serviceAccount: integration-tests-sa 19 | {% if test_scenario['values']['openshift'] == 'false' %} 20 | securityContext: 21 | fsGroup: 1000 22 | {% endif %} 23 | containers: 24 | - name: test-trino 25 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev 26 | command: ["sleep", "infinity"] 27 | volumeMounts: 28 | - name: tls 29 | mountPath: /stackable/tls 30 | env: 31 | - name: REQUESTS_CA_BUNDLE 32 | value: /stackable/tls/ca.crt 33 | volumes: 34 | - name: tls 35 | csi: 36 | driver: secrets.stackable.tech 37 | volumeAttributes: 38 | secrets.stackable.tech/class: tls 39 | secrets.stackable.tech/scope: pod 40 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/11-create-authentication-classes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | metadata: 5 | name: create-ldap-user 6 | commands: 7 | # We need to replace $NAMESPACE (by KUTTL) in the create-authentication-classes.yaml(.j2) 8 | - script: eval "echo \"$(cat create-authentication-classes.yaml)\"" | kubectl apply -f - 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/12-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 1200 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-coordinator-default 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: trino-worker-default 18 | status: 19 | readyReplicas: 1 20 | replicas: 1 21 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/12-create-trino-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | metadata: 5 | name: create-trino-cluster 6 | commands: 7 | # We need to replace $NAMESPACE (by KUTTL) in the create-trino-cluster.yaml(.j2) 8 | - script: eval "echo \"$(cat create-trino-cluster.yaml)\"" | kubectl apply -f - 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | # file 7 | - script: kubectl exec -n $NAMESPACE test-trino-0 -- python /tmp/check-active-workers.py -u test_user_1 -p test_user_1 -n $NAMESPACE -w 1 8 | - script: kubectl exec -n $NAMESPACE test-trino-0 -- python /tmp/check-active-workers.py -u test_user_2_other -p test_user_2_other -n $NAMESPACE -w 1 9 | # ldap 10 | - script: kubectl exec -n $NAMESPACE test-trino-0 -- python /tmp/check-active-workers.py -u integrationtest -p integrationtest -n $NAMESPACE -w 1 11 | - script: kubectl exec -n $NAMESPACE test-trino-0 -- python /tmp/check-active-workers.py -u integrationtest-other -p integrationtest-other -n $NAMESPACE -w 1 12 | # oidc/oauth2 13 | - script: kubectl exec -n $NAMESPACE test-trino-0 -- python /tmp/check-oauth-login.py https://trino-coordinator-default.$NAMESPACE.svc.cluster.local:8443/ui/ 14 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/20-test-trino.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: kubectl cp -n $NAMESPACE ./check-active-workers.py test-trino-0:/tmp 6 | - script: kubectl cp -n $NAMESPACE ./check-oauth-login.py test-trino-0:/tmp 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/30-hot-reloading-add-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | # We need to replace $NAMESPACE (by KUTTL) in the add_user.yaml(.j2) 6 | - script: eval "echo \"$(cat add_user.yaml)\"" | kubectl replace -f - 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/31-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | # file 7 | # new user? 8 | - script: kubectl exec -n $NAMESPACE test-trino-0 -- python /tmp/check-active-workers.py -u hot_reloaded -p hot_reloaded -n $NAMESPACE -w 1 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/32-hot-reloading-remove-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | # We need to replace $NAMESPACE (by KUTTL) in the remove_user.yaml(.j2) 6 | - script: eval "echo \"$(cat remove_user.yaml)\"" | kubectl replace -f - 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/33-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | # We use the check-active-workers script for the login. Since we do want to wait until we cannot log in anymore 7 | # we flip the return value in the end. 8 | - script: kubectl exec -n $NAMESPACE test-trino-0 -- python /tmp/check-active-workers.py -u hot_reloaded -p hot_reloaded -n $NAMESPACE -w 1; if [ $? -eq 0 ]; then exit 1; fi 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/add_user.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: password-file-users 6 | namespace: $NAMESPACE 7 | stringData: 8 | test_user_1: test_user_1 9 | test_user_2: test_user_2 10 | hot_reloaded: hot_reloaded 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/check-active-workers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import trino 3 | import argparse 4 | import sys 5 | 6 | if not sys.warnoptions: 7 | import warnings 8 | warnings.simplefilter("ignore") 9 | 10 | 11 | def get_connection(username, password, namespace): 12 | host = 'trino-coordinator-default-0.trino-coordinator-default.' + namespace + '.svc.cluster.local' 13 | conn = trino.dbapi.connect( 14 | host=host, 15 | port=8443, 16 | user=username, 17 | http_scheme='https', 18 | auth=trino.auth.BasicAuthentication(username, password), 19 | ) 20 | conn._http_session.verify = False 21 | return conn 22 | 23 | 24 | if __name__ == '__main__': 25 | # Construct an argument parser 26 | all_args = argparse.ArgumentParser() 27 | 28 | # Add arguments to the parser 29 | all_args.add_argument("-u", "--user", required=True, 30 | help="Username to connect as") 31 | all_args.add_argument("-p", "--password", required=True, 32 | help="Password for the user") 33 | all_args.add_argument("-n", "--namespace", required=True, 34 | help="Namespace the test is running in") 35 | all_args.add_argument("-w", "--workers", required=True, 36 | help="Expected amount of workers to be present") 37 | 38 | args = vars(all_args.parse_args()) 39 | 40 | expected_workers = args['workers'] 41 | conn = get_connection(args['user'], args['password'], args['namespace']) 42 | 43 | cursor = conn.cursor() 44 | cursor.execute("SELECT COUNT(*) as nodes FROM system.runtime.nodes WHERE coordinator=false AND state='active'") 45 | 46 | (active_workers,) = cursor.fetchone() 47 | 48 | if int(active_workers) != int(expected_workers): 49 | print("Missmatch: [expected/active] workers [" + str(expected_workers) + "/" + str(active_workers) + "]") 50 | exit(-1) 51 | 52 | print("Test check-active-workers.py succeeded!") 53 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/check-oauth-login.py: -------------------------------------------------------------------------------- 1 | """Perform the OpenID Connect authentication flow to access a given page. 2 | 3 | This script opens a given URL and expects to be redirected to a 4 | Keycloak login page. It extracts the login action from the HTML content 5 | of the Keycloak page and posts the credentials of a test user to it. 6 | Finally it tests that Keycloak redirects back to the original page. 7 | """ 8 | import logging 9 | import requests 10 | import sys 11 | import urllib3 12 | from bs4 import BeautifulSoup 13 | 14 | 15 | def test_login_flow(login_url): 16 | session = requests.Session() 17 | 18 | result = session.get(login_url) 19 | 20 | result.raise_for_status() 21 | 22 | html = BeautifulSoup(result.text, 'html.parser') 23 | authenticate_url = html.form['action'] 24 | result = session.post(authenticate_url, data={ 25 | 'username': "test", 26 | 'password': "test" 27 | }) 28 | 29 | result.raise_for_status() 30 | 31 | assert result.url == login_url, \ 32 | "Redirection to the Trino UI expected" 33 | 34 | 35 | def main(): 36 | logging.basicConfig(level=logging.DEBUG) 37 | # disable a warning (InsecureRequestWarning) because it's just noise here 38 | urllib3.disable_warnings() 39 | 40 | login_url = sys.argv[1] 41 | 42 | assert len(login_url) > 0 43 | 44 | test_login_flow(login_url) 45 | 46 | logging.info("Success!") 47 | 48 | 49 | if __name__ == "__main__": 50 | main() 51 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/create-trino-cluster.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: trino-ldap-bind-secret 6 | namespace: $NAMESPACE 7 | labels: 8 | secrets.stackable.tech/class: trino-ldap-bind-$NAMESPACE 9 | stringData: 10 | user: cn=admin,dc=example,dc=org 11 | password: admin 12 | --- 13 | apiVersion: trino.stackable.tech/v1alpha1 14 | kind: TrinoCluster 15 | metadata: 16 | name: trino 17 | namespace: $NAMESPACE 18 | spec: 19 | image: 20 | {% if test_scenario['values']['trino-latest'].find(",") > 0 %} 21 | custom: "{{ test_scenario['values']['trino-latest'].split(',')[1] }}" 22 | productVersion: "'"{{ test_scenario['values']['trino-latest'].split(',')[0] }}"'" 23 | {% else %} 24 | productVersion: "'"{{ test_scenario['values']['trino-latest'] }}"'" 25 | {% endif %} 26 | pullPolicy: IfNotPresent 27 | clusterConfig: 28 | catalogLabelSelector: {} 29 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 30 | vectorAggregatorConfigMapName: vector-aggregator-discovery 31 | {% endif %} 32 | authentication: 33 | - authenticationClass: oidc-$NAMESPACE 34 | oidc: 35 | clientCredentialsSecret: oidc-secret 36 | - authenticationClass: password-$NAMESPACE 37 | - authenticationClass: password-other-$NAMESPACE 38 | - authenticationClass: ldap-$NAMESPACE 39 | - authenticationClass: ldap-other-$NAMESPACE 40 | coordinators: 41 | config: 42 | logging: 43 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 44 | roleGroups: 45 | default: 46 | replicas: 1 47 | workers: 48 | config: 49 | logging: 50 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 51 | roleGroups: 52 | default: 53 | replicas: 1 54 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/create_ldap_user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # To check the existing users 4 | # ldapsearch -H ldap://localhost:1389 -D cn=admin,dc=example,dc=org -w admin -b ou=users,dc=example,dc=org 5 | 6 | # To check the new user 7 | # ldapsearch -H ldap://localhost:1389 -D cn=integrationtest,ou=users,dc=example,dc=org -w integrationtest -b ou=users,dc=example,dc=org 8 | 9 | cat << 'EOF' | ldapadd -H ldap://localhost:1389 -D cn=admin,dc=example,dc=org -w admin 10 | dn: cn=integrationtest,ou=users,dc=example,dc=org 11 | objectClass: inetOrgPerson 12 | objectClass: posixAccount 13 | objectClass: shadowAccount 14 | cn: integrationtest 15 | uid: integrationtest 16 | givenName: Stackable 17 | sn: Integration-Test 18 | mail: integrationtest@stackable.de 19 | uidNumber: 16842 20 | gidNumber: 100 21 | homeDirectory: /home/integrationtest 22 | loginShell: /bin/bash 23 | userPassword: {crypt}x 24 | shadowLastChange: 0 25 | shadowMax: 0 26 | shadowWarning: 0 27 | EOF 28 | 29 | ldappasswd -H ldap://localhost:1389 -D cn=admin,dc=example,dc=org -w admin -s integrationtest "cn=integrationtest,ou=users,dc=example,dc=org" 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/create_ldap_user_other.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # To check the existing users 4 | # ldapsearch -H ldap://localhost:1389 -D cn=admin,dc=example,dc=org -w admin -b ou=users,dc=example,dc=org 5 | 6 | # To check the new user 7 | # ldapsearch -H ldap://localhost:1389 -D cn=integrationtest,ou=users,dc=example,dc=org -w integrationtest -b ou=users,dc=example,dc=org 8 | 9 | cat << 'EOF' | ldapadd -H ldap://localhost:1389 -D cn=admin,dc=example,dc=org -w admin 10 | dn: cn=integrationtest-other,ou=users,dc=example,dc=org 11 | objectClass: inetOrgPerson 12 | objectClass: posixAccount 13 | objectClass: shadowAccount 14 | cn: integrationtest-other 15 | uid: integrationtest-other 16 | givenName: Stackable 17 | sn: Integration-Test 18 | mail: integrationtest-other@stackable.de 19 | uidNumber: 16842 20 | gidNumber: 100 21 | homeDirectory: /home/integrationtest-other 22 | loginShell: /bin/bash 23 | userPassword: {crypt}x 24 | shadowLastChange: 0 25 | shadowMax: 0 26 | shadowWarning: 0 27 | EOF 28 | 29 | ldappasswd -H ldap://localhost:1389 -D cn=admin,dc=example,dc=org -w admin -s integrationtest-other "cn=integrationtest-other,ou=users,dc=example,dc=org" 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/authentication/remove_user.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: password-file-users 6 | namespace: $NAMESPACE 7 | stringData: 8 | test_user_1: test_user_1 9 | test_user_2: test_user_2 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/00-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: vector-aggregator-discovery 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/00-install-vector-aggregator-discovery-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/10-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 1200 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=available trinoclusters.trino.stackable.tech/trino --timeout 1201s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: trino-coordinator-default 12 | status: 13 | readyReplicas: 1 14 | replicas: 1 15 | --- 16 | apiVersion: apps/v1 17 | kind: StatefulSet 18 | metadata: 19 | name: trino-worker-default 20 | status: 21 | readyReplicas: 1 22 | replicas: 1 23 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/10-install-trino.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: trino 6 | spec: 7 | image: 8 | {% if test_scenario['values']['trino-latest'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['trino-latest'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['trino-latest'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['trino-latest'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | clusterConfig: 16 | catalogLabelSelector: {} 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | coordinators: 21 | config: 22 | logging: 23 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 24 | roleGroups: 25 | default: 26 | replicas: 1 27 | workers: 28 | config: 29 | logging: 30 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 31 | roleGroups: 32 | default: 33 | replicas: 1 34 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=stopped trinoclusters.trino.stackable.tech/trino --timeout 301s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: trino-coordinator-default 12 | status: 13 | replicas: 0 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: trino-worker-default 19 | status: 20 | replicas: 0 21 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/20-stop-trino.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: trino 6 | spec: 7 | image: 8 | {% if test_scenario['values']['trino-latest'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['trino-latest'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['trino-latest'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['trino-latest'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | clusterConfig: 16 | catalogLabelSelector: {} 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | clusterOperation: 21 | stopped: true 22 | reconciliationPaused: false 23 | coordinators: 24 | config: 25 | logging: 26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 27 | roleGroups: 28 | default: 29 | replicas: 1 30 | workers: 31 | config: 32 | logging: 33 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 34 | roleGroups: 35 | default: 36 | replicas: 1 37 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/30-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=reconciliationPaused trinoclusters.trino.stackable.tech/trino --timeout 601s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: trino-coordinator-default 12 | status: 13 | replicas: 0 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: trino-worker-default 19 | status: 20 | replicas: 0 21 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/30-pause-trino.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: trino 6 | spec: 7 | image: 8 | {% if test_scenario['values']['trino-latest'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['trino-latest'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['trino-latest'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['trino-latest'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | clusterConfig: 16 | catalogLabelSelector: {} 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | clusterOperation: 21 | stopped: false 22 | reconciliationPaused: true 23 | coordinators: 24 | config: 25 | logging: 26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 27 | roleGroups: 28 | default: 29 | replicas: 1 30 | workers: 31 | config: 32 | logging: 33 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 34 | roleGroups: 35 | default: 36 | replicas: 1 37 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/40-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=available trinoclusters.trino.stackable.tech/trino --timeout 601s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: trino-coordinator-default 12 | status: 13 | readyReplicas: 1 14 | replicas: 1 15 | --- 16 | apiVersion: apps/v1 17 | kind: StatefulSet 18 | metadata: 19 | name: trino-worker-default 20 | status: 21 | readyReplicas: 1 22 | replicas: 1 23 | -------------------------------------------------------------------------------- /tests/templates/kuttl/cluster-operation/40-restart-trino.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: trino 6 | spec: 7 | image: 8 | {% if test_scenario['values']['trino-latest'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['trino-latest'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['trino-latest'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['trino-latest'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | clusterConfig: 16 | catalogLabelSelector: {} 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | clusterOperation: 21 | stopped: false 22 | reconciliationPaused: false 23 | coordinators: 24 | config: 25 | logging: 26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 27 | roleGroups: 28 | default: 29 | replicas: 1 30 | workers: 31 | config: 32 | logging: 33 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 34 | roleGroups: 35 | default: 36 | replicas: 1 37 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/00-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: use-integration-tests-scc 6 | rules: 7 | {% if test_scenario['values']['openshift'] == "true" %} 8 | - apiGroups: ["security.openshift.io"] 9 | resources: ["securitycontextconstraints"] 10 | resourceNames: ["privileged"] 11 | verbs: ["use"] 12 | {% endif %} 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: integration-tests-sa 18 | --- 19 | kind: RoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: use-integration-tests-scc 23 | subjects: 24 | - kind: ServiceAccount 25 | name: integration-tests-sa 26 | roleRef: 27 | kind: Role 28 | name: use-integration-tests-scc 29 | apiGroup: rbac.authorization.k8s.io 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/01-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-vector-aggregator 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | --- 14 | apiVersion: v1 15 | kind: ConfigMap 16 | metadata: 17 | name: trino-vector-aggregator-discovery 18 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/01-install-trino-vector-aggregator.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: >- 6 | helm install trino-vector-aggregator vector 7 | --namespace $NAMESPACE 8 | --version 0.42.1 9 | --repo https://helm.vector.dev 10 | --values trino-vector-aggregator-values.yaml 11 | --- 12 | apiVersion: v1 13 | kind: ConfigMap 14 | metadata: 15 | name: trino-vector-aggregator-discovery 16 | data: 17 | ADDRESS: trino-vector-aggregator:6123 18 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/02-create-configmap-with-prepared-logs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: > 6 | kubectl create configmap prepared-logs 7 | --from-file=prepared-logs.airlift.json 8 | --namespace=$NAMESPACE 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/03-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: test-trino-coordinator-automatic-log-config 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: test-trino-worker-automatic-log-config 18 | status: 19 | readyReplicas: 1 20 | replicas: 1 21 | --- 22 | apiVersion: apps/v1 23 | kind: StatefulSet 24 | metadata: 25 | name: test-trino-coordinator-custom-log-config 26 | status: 27 | readyReplicas: 1 28 | replicas: 1 29 | --- 30 | apiVersion: apps/v1 31 | kind: StatefulSet 32 | metadata: 33 | name: test-trino-worker-custom-log-config 34 | status: 35 | readyReplicas: 1 36 | replicas: 1 37 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/04-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-test-runner 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/04-install-trino-test-runner.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: trino-test-runner 6 | labels: 7 | app: trino-test-runner 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: trino-test-runner 13 | template: 14 | metadata: 15 | labels: 16 | app: trino-test-runner 17 | spec: 18 | serviceAccount: integration-tests-sa 19 | containers: 20 | - name: trino-test-runner 21 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev 22 | stdin: true 23 | tty: true 24 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/05-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | commands: 5 | - script: >- 6 | kubectl exec --namespace=$NAMESPACE trino-test-runner-0 -- 7 | python /tmp/test_log_aggregation.py -n $NAMESPACE 8 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/05-test-log-aggregation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: | 6 | kubectl cp ./test_log_aggregation.py $NAMESPACE/trino-test-runner-0:/tmp 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/prepared-logs.airlift.json: -------------------------------------------------------------------------------- 1 | {"timestamp":"2024-01-01T00:00:00.000000000Z","level":"INFO","thread":"main","logger":"TestLogger","message":"Valid log event"} 2 | {"timestamp":"2024-01-01T00:00:00.000000000Z","level":"INFO","thread":"main","logger":"TestLogger","message":"Valid log event with a stack trace","stacktrace":"TestException"} 3 | {"timestamp":"2024-01-01T00:00:00.000000000Z","level":"INFO","logger":"TestLogger","message":"Valid log event without a thread"} 4 | {"level":"INFO","thread":"main","logger":"TestLogger","message":"Invalid log event without a timestamp"} 5 | {"timestamp":"unparsable timestamp","level":"INFO","thread":"main","logger":"TestLogger","message":"Invalid log event with an unparsable timestamp"} 6 | {"timestamp":"2024-01-01T00:00:00.000000000Z","level":"INFO","thread":"main","message":"Invalid log event without a logger"} 7 | {"timestamp":"2024-01-01T00:00:00.000000000Z","level":"CRITICAL","thread":"main","logger":"TestLogger","message":"Invalid log event with an unknown level"} 8 | {"timestamp":"2024-01-01T00:00:00.000000000Z","thread":"main","logger":"TestLogger","message":"Invalid log event without a level"} 9 | {"timestamp":"2024-01-01T00:00:00.000000000Z","level":"INFO","thread":"main","logger":"TestLogger"} 10 | {"timestamp":"2024-01-01T00:00:00.000000000Z","level":"INFO","thread":"main","logger":"TestLogger","message":"Unparsable log event",... 11 | "true" 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/logging/test_log_aggregation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import requests 3 | 4 | 5 | def check_sent_events(): 6 | response = requests.post( 7 | 'http://trino-vector-aggregator:8686/graphql', 8 | json={ 9 | 'query': """ 10 | { 11 | transforms(first:100) { 12 | nodes { 13 | componentId 14 | metrics { 15 | sentEventsTotal { 16 | sentEventsTotal 17 | } 18 | } 19 | } 20 | } 21 | } 22 | """ 23 | } 24 | ) 25 | 26 | assert response.status_code == 200, \ 27 | 'Cannot access the API of the vector aggregator.' 28 | 29 | result = response.json() 30 | 31 | transforms = result['data']['transforms']['nodes'] 32 | for transform in transforms: 33 | sentEvents = transform['metrics']['sentEventsTotal'] 34 | componentId = transform['componentId'] 35 | 36 | if componentId == 'filteredInvalidEvents': 37 | assert sentEvents is None or \ 38 | sentEvents['sentEventsTotal'] == 0, \ 39 | 'Invalid log events were sent.' 40 | else: 41 | assert sentEvents is not None and \ 42 | sentEvents['sentEventsTotal'] > 0, \ 43 | f'No events were sent in "{componentId}".' 44 | 45 | 46 | if __name__ == '__main__': 47 | check_sent_events() 48 | print('Test successful!') 49 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/00-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: vector-aggregator-discovery 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/00-install-vector-aggregator-discovery-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/00-limit-range.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: LimitRange 4 | metadata: 5 | name: limit-request-ratio 6 | spec: 7 | limits: 8 | - type: "Container" 9 | maxLimitRequestRatio: 10 | cpu: 5 11 | memory: 1 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/00-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: use-integration-tests-scc 6 | rules: 7 | {% if test_scenario['values']['openshift'] == "true" %} 8 | - apiGroups: ["security.openshift.io"] 9 | resources: ["securitycontextconstraints"] 10 | resourceNames: ["privileged"] 11 | verbs: ["use"] 12 | {% endif %} 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: integration-tests-sa 18 | --- 19 | kind: RoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: use-integration-tests-scc 23 | subjects: 24 | - kind: ServiceAccount 25 | name: integration-tests-sa 26 | roleRef: 27 | kind: Role 28 | name: use-integration-tests-scc 29 | apiGroup: rbac.authorization.k8s.io 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/01-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: postgresql 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/01-install-postgres.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | timeout: 300 5 | commands: 6 | - script: >- 7 | helm upgrade postgresql 8 | --install 9 | --version=12.5.6 10 | --namespace $NAMESPACE 11 | -f helm-bitnami-postgresql-values.yaml 12 | --repo https://charts.bitnami.com/bitnami postgresql 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/02-install-minio.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: >- 6 | helm install minio 7 | --namespace $NAMESPACE 8 | --version 15.0.7 9 | -f helm-bitnami-minio-values.yaml 10 | oci://registry-1.docker.io/bitnamicharts/minio 11 | timeout: 240 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/03-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 900 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: hive-metastore-default 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/03-install-hive.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: hive.stackable.tech/v1alpha1 3 | kind: HiveCluster 4 | metadata: 5 | name: hive 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['hive-latest'] }}" 9 | pullPolicy: IfNotPresent 10 | clusterConfig: 11 | database: 12 | connString: jdbc:postgresql://postgresql:5432/hive 13 | credentialsSecret: postgres-credentials 14 | dbType: postgres 15 | s3: 16 | reference: minio 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | metastore: 21 | config: 22 | logging: 23 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 24 | roleGroups: 25 | default: 26 | replicas: 1 27 | --- 28 | apiVersion: v1 29 | kind: Secret 30 | metadata: 31 | name: postgres-credentials 32 | type: Opaque 33 | stringData: 34 | username: hive 35 | password: hive 36 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/04-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: keycloak 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/10-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl -n $NAMESPACE rollout status daemonset opa-server-default --timeout 300s 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/10-install-opa.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: | 6 | kubectl apply -n $NAMESPACE -f - < 0 %} 15 | custom: "{{ test_scenario['values']['opa'].split(',')[1] }}" 16 | productVersion: "{{ test_scenario['values']['opa'].split(',')[0] }}" 17 | {% else %} 18 | productVersion: "{{ test_scenario['values']['opa'] }}" 19 | {% endif %} 20 | pullPolicy: IfNotPresent 21 | clusterConfig: 22 | userInfo: 23 | backend: 24 | keycloak: 25 | hostname: keycloak.$NAMESPACE.svc.cluster.local 26 | port: 8443 27 | tls: 28 | verification: 29 | server: 30 | caCert: 31 | secretClass: keycloak-tls-$NAMESPACE 32 | clientCredentialsSecret: user-info-fetcher-client-credentials 33 | adminRealm: my-dataspace 34 | userRealm: my-dataspace 35 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 36 | vectorAggregatorConfigMapName: vector-aggregator-discovery 37 | {% endif %} 38 | servers: 39 | config: 40 | logging: 41 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 42 | containers: 43 | opa: 44 | loggers: 45 | decision: 46 | level: INFO 47 | roleGroups: 48 | default: {} 49 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/11-opa-rego.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: | 6 | # Trino Rego rules provided by the Stackable Data Platform 7 | kubectl create configmap trino-opa-rules \ 8 | --namespace=$NAMESPACE \ 9 | --from-file=trino_rules/actual_permissions.rego \ 10 | --from-file=trino_rules/policies.rego \ 11 | --from-file=trino_rules/requested_permissions.rego \ 12 | --from-file=trino_rules/util.rego \ 13 | --from-file=trino_rules/verification.rego 14 | kubectl label configmaps trino-opa-rules \ 15 | opa.stackable.tech/bundle=true \ 16 | --namespace=$NAMESPACE 17 | 18 | # Trino policies provided by the customer 19 | kubectl create configmap trino-opa-policies \ 20 | --namespace=$NAMESPACE \ 21 | --from-file=trino_policies.rego 22 | kubectl label configmaps trino-opa-policies \ 23 | opa.stackable.tech/bundle=true \ 24 | --namespace=$NAMESPACE 25 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/12-stop-opa.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opa.stackable.tech/v1alpha1 3 | kind: OpaCluster 4 | metadata: 5 | name: opa 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['opa'] }}" 9 | pullPolicy: IfNotPresent 10 | clusterOperation: 11 | reconciliationPaused: true 12 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 13 | clusterConfig: 14 | vectorAggregatorConfigMapName: vector-aggregator-discovery 15 | {% endif %} 16 | servers: 17 | roleGroups: 18 | default: {} 19 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 720 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=available=true trinoclusters.trino.stackable.tech/trino --timeout 301s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: trino-coordinator-default 12 | spec: 13 | template: 14 | spec: 15 | terminationGracePeriodSeconds: 900 16 | status: 17 | readyReplicas: 1 18 | replicas: 1 19 | --- 20 | apiVersion: apps/v1 21 | kind: StatefulSet 22 | metadata: 23 | name: trino-worker-default 24 | spec: 25 | template: 26 | spec: 27 | terminationGracePeriodSeconds: 80 # 10s gracefulShutdownTimeout + 2 x 30s grace period + 10s safety buffer 28 | status: 29 | readyReplicas: 1 30 | replicas: 1 31 | --- 32 | apiVersion: policy/v1 33 | kind: PodDisruptionBudget 34 | metadata: 35 | name: trino-coordinator 36 | status: 37 | expectedPods: 1 38 | currentHealthy: 1 39 | disruptionsAllowed: 1 40 | --- 41 | apiVersion: policy/v1 42 | kind: PodDisruptionBudget 43 | metadata: 44 | name: trino-worker 45 | status: 46 | expectedPods: 1 47 | currentHealthy: 1 48 | disruptionsAllowed: 1 49 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/30-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-test-opa 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/30-install-opa-check.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: trino-test-opa 6 | labels: 7 | app: trino-test-opa 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: trino-test-opa 13 | template: 14 | metadata: 15 | labels: 16 | app: trino-test-opa 17 | spec: 18 | serviceAccount: integration-tests-sa 19 | containers: 20 | - name: trino-test-opa 21 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev 22 | command: ["sleep", "infinity"] 23 | resources: 24 | requests: 25 | cpu: "250m" 26 | memory: "64Mi" 27 | limits: 28 | cpu: "500m" 29 | memory: "64Mi" 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/40-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl exec -n $NAMESPACE trino-test-opa-0 -- python /tmp/check-opa.py -n $NAMESPACE 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/40-copy-scripts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: kubectl cp -n $NAMESPACE ./check-opa.py trino-test-opa-0:/tmp || true 6 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/helm-bitnami-minio-values.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | mode: standalone 3 | disableWebUI: false 4 | extraEnvVars: 5 | - name: BITNAMI_DEBUG 6 | value: "true" 7 | - name: MINIO_LOG_LEVEL 8 | value: DEBUG 9 | 10 | #defaultBuckets: hive 11 | 12 | provisioning: 13 | enabled: true 14 | buckets: 15 | - name: trino 16 | usersExistingSecrets: 17 | - centralized-minio-users 18 | resources: 19 | requests: 20 | memory: 1Gi 21 | cpu: "512m" 22 | limits: 23 | memory: "1Gi" 24 | cpu: "1" 25 | podSecurityContext: 26 | enabled: false 27 | containerSecurityContext: 28 | enabled: false 29 | 30 | volumePermissions: 31 | enabled: false 32 | 33 | podSecurityContext: 34 | enabled: false 35 | 36 | containerSecurityContext: 37 | enabled: false 38 | 39 | persistence: 40 | enabled: false 41 | 42 | resources: 43 | requests: 44 | memory: 1Gi 45 | cpu: "512m" 46 | limits: 47 | memory: "1Gi" 48 | cpu: "1" 49 | 50 | auth: 51 | existingSecret: minio-credentials 52 | 53 | service: 54 | type: NodePort 55 | 56 | tls: 57 | enabled: true 58 | existingSecret: minio-tls-certificates 59 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/helm-bitnami-postgresql-values.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | volumePermissions: 3 | enabled: false 4 | securityContext: 5 | runAsUser: auto 6 | 7 | primary: 8 | extendedConfiguration: | 9 | password_encryption=md5 10 | podSecurityContext: 11 | {% if test_scenario['values']['openshift'] == 'true' %} 12 | enabled: false 13 | {% else %} 14 | enabled: true 15 | {% endif %} 16 | containerSecurityContext: 17 | enabled: false 18 | resources: 19 | requests: 20 | memory: "512Mi" 21 | cpu: "512m" 22 | limits: 23 | memory: "512Mi" 24 | cpu: "1" 25 | 26 | auth: 27 | username: hive 28 | password: hive 29 | database: hive 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/trino_rules/.regal/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | rules: 3 | style: 4 | external-reference: 5 | ignore: 6 | files: 7 | # The policy rules are used in the first_matching_rule 8 | # functions. To avoid these external references to the policy 9 | # rules, it would be necessary to add them as a parameter to 10 | # all functions, and call these functions even from 11 | # verification.rego with this parameter. Therefore, this 12 | # linter warning is ignored because the policy rules are 13 | # seen as the context for the actual_permissions.rego. 14 | - actual_permissions.rego 15 | file-length: 16 | ignore: 17 | files: 18 | # We do not want to split this file. 19 | - requested_permissions.rego 20 | line-length: 21 | ignore: 22 | files: 23 | # `opa fmt` puts the first `with` statement on the same line 24 | # which creates long lines especially in the test file where 25 | # long variable names are used to describe the test case. 26 | - actual_permissions_test.rego 27 | prefer-snake-case: 28 | ignore: 29 | files: 30 | # Entrypoints are named `columnMask` and `rowFilters`. 31 | - verification.rego 32 | rule-length: 33 | ignore: 34 | files: 35 | # The test data is sometimes lengthy but readable. 36 | - "*_test.rego" 37 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/trino_rules/util.rego: -------------------------------------------------------------------------------- 1 | # METADATA 2 | # description: Utility package which extends the built-in functions 3 | package util 4 | 5 | # METADATA 6 | # description: | 7 | # Matches the entire string against a regular expression. 8 | # 9 | # pattern (string) regular expression 10 | # value (string) value to match against pattern 11 | # 12 | # Returns: 13 | # result (boolean) 14 | match_entire(pattern, value) if { 15 | # Add the anchors ^ and $ 16 | pattern_with_anchors := concat("", ["^", pattern, "$"]) 17 | 18 | regex.match(pattern_with_anchors, value) 19 | } 20 | -------------------------------------------------------------------------------- /tests/templates/kuttl/opa-authorization/trino_rules/util_test.rego: -------------------------------------------------------------------------------- 1 | package util_test 2 | 3 | import data.util 4 | 5 | test_match_entire if { 6 | util.match_entire(`a`, "a") 7 | util.match_entire(`^a`, "a") 8 | util.match_entire(`a$`, "a") 9 | util.match_entire(`^a$`, "a") 10 | not util.match_entire(`a`, "abc") 11 | not util.match_entire(`b`, "abc") 12 | not util.match_entire(`c`, "abc") 13 | } 14 | -------------------------------------------------------------------------------- /tests/templates/kuttl/orphaned-resources/00-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: vector-aggregator-discovery 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/orphaned-resources/00-install-vector-aggregator-discovery-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/orphaned-resources/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/orphaned-resources/1-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | metadata: 5 | name: install-trino 6 | timeout: 1200 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: trino-coordinator-default 12 | status: 13 | readyReplicas: 1 14 | replicas: 1 15 | --- 16 | apiVersion: apps/v1 17 | kind: StatefulSet 18 | metadata: 19 | name: trino-coordinator-remove 20 | status: 21 | readyReplicas: 1 22 | replicas: 1 23 | --- 24 | apiVersion: apps/v1 25 | kind: StatefulSet 26 | metadata: 27 | name: trino-worker-default 28 | status: 29 | readyReplicas: 1 30 | replicas: 1 31 | --- 32 | apiVersion: apps/v1 33 | kind: StatefulSet 34 | metadata: 35 | name: trino-worker-remove 36 | status: 37 | readyReplicas: 1 38 | replicas: 1 39 | -------------------------------------------------------------------------------- /tests/templates/kuttl/orphaned-resources/1-install-trino.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: trino 6 | spec: 7 | image: 8 | {% if test_scenario['values']['trino-latest'].find(",") > 0 %} 9 | custom: "{{ test_scenario['values']['trino-latest'].split(',')[1] }}" 10 | productVersion: "{{ test_scenario['values']['trino-latest'].split(',')[0] }}" 11 | {% else %} 12 | productVersion: "{{ test_scenario['values']['trino-latest'] }}" 13 | {% endif %} 14 | pullPolicy: IfNotPresent 15 | clusterConfig: 16 | catalogLabelSelector: {} 17 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 18 | vectorAggregatorConfigMapName: vector-aggregator-discovery 19 | {% endif %} 20 | coordinators: 21 | config: 22 | logging: 23 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 24 | roleGroups: 25 | default: 26 | replicas: 1 27 | remove: 28 | replicas: 1 29 | workers: 30 | config: 31 | gracefulShutdownTimeout: 5s # Let the test run faster 32 | logging: 33 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 34 | roleGroups: 35 | default: 36 | replicas: 1 37 | remove: 38 | replicas: 1 39 | -------------------------------------------------------------------------------- /tests/templates/kuttl/orphaned-resources/2-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | metadata: 5 | name: remove-rolegroup 6 | timeout: 600 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/orphaned-resources/2-errors.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: trino-coordinator-remove 6 | --- 7 | apiVersion: v1 8 | kind: Pod 9 | metadata: 10 | name: trino-coordinator-remove-0 11 | --- 12 | apiVersion: v1 13 | kind: Service 14 | metadata: 15 | name: trino-coordinator-remove 16 | --- 17 | apiVersion: v1 18 | kind: ConfigMap 19 | metadata: 20 | name: trino-coordinator-remove 21 | --- 22 | apiVersion: v1 23 | kind: ConfigMap 24 | metadata: 25 | name: trino-coordinator-remove-catalog 26 | --- 27 | apiVersion: apps/v1 28 | kind: StatefulSet 29 | metadata: 30 | name: trino-worker-remove 31 | --- 32 | apiVersion: v1 33 | kind: Pod 34 | metadata: 35 | name: trino-worker-remove-0 36 | --- 37 | apiVersion: v1 38 | kind: Service 39 | metadata: 40 | name: trino-worker-remove 41 | --- 42 | apiVersion: v1 43 | kind: ConfigMap 44 | metadata: 45 | name: trino-worker-remove 46 | --- 47 | apiVersion: v1 48 | kind: ConfigMap 49 | metadata: 50 | name: trino-worker-remove-catalog 51 | -------------------------------------------------------------------------------- /tests/templates/kuttl/orphaned-resources/2-remove-rolegroup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: trino.stackable.tech/v1alpha1 3 | kind: TrinoCluster 4 | metadata: 5 | name: trino 6 | spec: 7 | coordinators: 8 | roleGroups: 9 | remove: null 10 | workers: 11 | roleGroups: 12 | remove: null 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/resources/00-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: vector-aggregator-discovery 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/resources/00-install-vector-aggregator-discovery-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/resources/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/resources/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 120 5 | commands: 6 | - script: kubectl get cm -n $NAMESPACE trino-coordinator-resources-default -o yaml | grep -- '-Xmx3276m' | xargs test ! -z 7 | - script: kubectl get cm -n $NAMESPACE trino-worker-resources-from-role -o yaml | grep -- '-Xmx2457m' | xargs test ! -z 8 | - script: kubectl get cm -n $NAMESPACE trino-worker-resources-from-role-group -o yaml | grep -- '-Xmx3276m' | xargs test ! -z 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/00-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: vector-aggregator-discovery 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/00-install-vector-aggregator-discovery-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/00-limit-range.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: LimitRange 4 | metadata: 5 | name: limit-request-ratio 6 | spec: 7 | limits: 8 | - type: "Container" 9 | maxLimitRequestRatio: 10 | cpu: 5 11 | memory: 1 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/00-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: use-integration-tests-scc 6 | rules: 7 | {% if test_scenario['values']['openshift'] == "true" %} 8 | - apiGroups: ["security.openshift.io"] 9 | resources: ["securitycontextconstraints"] 10 | resourceNames: ["privileged"] 11 | verbs: ["use"] 12 | {% endif %} 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: integration-tests-sa 18 | --- 19 | kind: RoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: use-integration-tests-scc 23 | subjects: 24 | - kind: ServiceAccount 25 | name: integration-tests-sa 26 | roleRef: 27 | kind: Role 28 | name: use-integration-tests-scc 29 | apiGroup: rbac.authorization.k8s.io 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/02-install-minio.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: >- 6 | helm install minio 7 | --namespace $NAMESPACE 8 | --version 12.6.4 9 | -f helm-bitnami-minio-values.yaml 10 | --repo https://charts.bitnami.com/bitnami minio 11 | timeout: 240 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/04-prepare-bucket.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | # give minio enough time to start 6 | - command: sleep 5 7 | - script: | 8 | POD=$(kubectl -n $NAMESPACE get pod -l app.kubernetes.io/instance=minio -o name | head -n 1 | sed -e 's#pod/##') 9 | kubectl cp -n $NAMESPACE yellow_tripdata_2021-07.csv $POD:/tmp 10 | kubectl -n $NAMESPACE exec $POD -- mc cp /tmp/yellow_tripdata_2021-07.csv local/trino/taxi-data/ 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/05-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: zookeeper-server-default 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/05-install-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: zookeeper 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['zookeeper'] }}" 9 | pullPolicy: IfNotPresent 10 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 11 | clusterConfig: 12 | vectorAggregatorConfigMapName: vector-aggregator-discovery 13 | {% endif %} 14 | servers: 15 | config: 16 | logging: 17 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 18 | roleGroups: 19 | default: 20 | replicas: 1 21 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/06-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: hdfs-namenode-default 10 | status: 11 | readyReplicas: 2 12 | replicas: 2 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: hdfs-journalnode-default 18 | status: 19 | readyReplicas: 1 20 | replicas: 1 21 | --- 22 | apiVersion: apps/v1 23 | kind: StatefulSet 24 | metadata: 25 | name: hdfs-datanode-default 26 | status: 27 | readyReplicas: 1 28 | replicas: 1 29 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/06-install-hdfs.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: hdfs.stackable.tech/v1alpha1 3 | kind: HdfsCluster 4 | metadata: 5 | name: hdfs 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['hdfs'] }}" 9 | pullPolicy: IfNotPresent 10 | clusterConfig: 11 | dfsReplication: 1 12 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 13 | vectorAggregatorConfigMapName: vector-aggregator-discovery 14 | {% endif %} 15 | zookeeperConfigMapName: hdfs-znode 16 | nameNodes: 17 | config: 18 | logging: 19 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 20 | roleGroups: 21 | default: 22 | replicas: 2 23 | dataNodes: 24 | config: 25 | logging: 26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 27 | roleGroups: 28 | default: 29 | replicas: 1 30 | journalNodes: 31 | config: 32 | logging: 33 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 34 | roleGroups: 35 | default: 36 | replicas: 1 37 | --- 38 | apiVersion: zookeeper.stackable.tech/v1alpha1 39 | kind: ZookeeperZnode 40 | metadata: 41 | name: hdfs-znode 42 | spec: 43 | clusterRef: 44 | name: zookeeper 45 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/07-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: postgresql 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/07-install-postgres.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | timeout: 300 5 | commands: 6 | - script: >- 7 | helm upgrade postgresql 8 | --install 9 | --version=12.5.6 10 | --namespace $NAMESPACE 11 | -f helm-bitnami-postgresql-values.yaml 12 | --repo https://charts.bitnami.com/bitnami postgresql 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/08-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 900 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: hive-metastore-default 10 | status: 11 | readyReplicas: 2 12 | replicas: 2 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/08-install-hive.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: hive.stackable.tech/v1alpha1 3 | kind: HiveCluster 4 | metadata: 5 | name: hive 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['hive'] }}" 9 | pullPolicy: IfNotPresent 10 | clusterConfig: 11 | database: 12 | connString: jdbc:postgresql://postgresql:5432/hive 13 | credentialsSecret: postgres-credentials 14 | dbType: postgres 15 | hdfs: 16 | configMap: hdfs 17 | s3: 18 | reference: minio 19 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 20 | vectorAggregatorConfigMapName: vector-aggregator-discovery 21 | {% endif %} 22 | metastore: 23 | config: 24 | logging: 25 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 26 | roleGroups: 27 | default: 28 | replicas: 2 29 | --- 30 | apiVersion: v1 31 | kind: Secret 32 | metadata: 33 | name: postgres-credentials 34 | type: Opaque 35 | stringData: 36 | username: hive 37 | password: hive 38 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/09-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl -n $NAMESPACE rollout status daemonset opa-server-default --timeout 300s 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/09-install-opa.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opa.stackable.tech/v1alpha1 3 | kind: OpaCluster 4 | metadata: 5 | name: opa 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['opa'] }}" 9 | pullPolicy: IfNotPresent 10 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 11 | clusterConfig: 12 | vectorAggregatorConfigMapName: vector-aggregator-discovery 13 | {% endif %} 14 | servers: 15 | roleGroups: 16 | default: {} 17 | --- 18 | apiVersion: v1 19 | kind: ConfigMap 20 | metadata: 21 | name: simple-trino-opa-bundle 22 | labels: 23 | opa.stackable.tech/bundle: "trino" 24 | data: 25 | trino.rego: | 26 | package trino 27 | 28 | default allow = false 29 | 30 | allow if { 31 | is_admin 32 | } 33 | batch contains i if { 34 | some i 35 | input.action.filterResources[i] 36 | is_admin 37 | } 38 | 39 | allow if { 40 | input.action.operation in ["ExecuteQuery", "AccessCatalog"] 41 | is_bob 42 | } 43 | batch contains i if { 44 | input.action.operation in ["FilterCatalogs"] 45 | some i 46 | input.action.filterResources[i] 47 | is_bob 48 | } 49 | 50 | is_admin() if { 51 | input.context.identity.user == "admin" 52 | } 53 | 54 | is_bob() if { 55 | input.context.identity.user == "bob" 56 | } 57 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/10-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 720 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=available=true trinoclusters.trino.stackable.tech/trino --timeout 301s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: trino-coordinator-default 12 | spec: 13 | template: 14 | spec: 15 | terminationGracePeriodSeconds: 900 16 | status: 17 | readyReplicas: 1 18 | replicas: 1 19 | --- 20 | apiVersion: apps/v1 21 | kind: StatefulSet 22 | metadata: 23 | name: trino-worker-default 24 | spec: 25 | template: 26 | spec: 27 | terminationGracePeriodSeconds: 75 # 5s gracefulShutdownTimeout + 2 x 30s grace period + 10s safety buffer 28 | status: 29 | readyReplicas: 1 30 | replicas: 1 31 | --- 32 | apiVersion: policy/v1 33 | kind: PodDisruptionBudget 34 | metadata: 35 | name: trino-coordinator 36 | status: 37 | expectedPods: 1 38 | currentHealthy: 1 39 | disruptionsAllowed: 1 40 | --- 41 | apiVersion: policy/v1 42 | kind: PodDisruptionBudget 43 | metadata: 44 | name: trino-worker 45 | status: 46 | expectedPods: 1 47 | currentHealthy: 1 48 | disruptionsAllowed: 1 49 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/11-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | # 7 | # Test envOverrides 8 | # 9 | - script: | 10 | kubectl -n $NAMESPACE get sts trino-coordinator-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "COMMON_VAR" and .value == "group-value")' 11 | kubectl -n $NAMESPACE get sts trino-coordinator-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "GROUP_VAR" and .value == "group-value")' 12 | kubectl -n $NAMESPACE get sts trino-coordinator-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "ROLE_VAR" and .value == "role-value")' 13 | 14 | kubectl -n $NAMESPACE get sts trino-worker-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "COMMON_VAR" and .value == "group-value")' 15 | kubectl -n $NAMESPACE get sts trino-worker-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "GROUP_VAR" and .value == "group-value")' 16 | kubectl -n $NAMESPACE get sts trino-worker-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "ROLE_VAR" and .value == "role-value")' 17 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/12-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This test checks if the containerdebug-state.json file is present and valid 3 | apiVersion: kuttl.dev/v1beta1 4 | kind: TestAssert 5 | timeout: 600 6 | commands: 7 | - script: kubectl exec -n $NAMESPACE --container trino trino-coordinator-default-0 -- cat /stackable/log/containerdebug-state.json | jq --exit-status '"valid JSON"' 8 | - script: kubectl exec -n $NAMESPACE --container trino trino-worker-default-0 -- cat /stackable/log/containerdebug-state.json | jq --exit-status '"valid JSON"' 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-test-helper 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/20-install-check.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: trino-test-helper 6 | labels: 7 | app: trino-test-helper 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: trino-test-helper 13 | template: 14 | metadata: 15 | labels: 16 | app: trino-test-helper 17 | spec: 18 | serviceAccount: integration-tests-sa 19 | containers: 20 | - name: trino-test-helper 21 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev 22 | command: ["sleep", "infinity"] 23 | resources: 24 | requests: 25 | cpu: "250m" 26 | memory: "64Mi" 27 | limits: 28 | cpu: "500m" 29 | memory: "64Mi" 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/21-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-active-workers.py -u admin -p admin -n $NAMESPACE -w 1 7 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-opa.py -n $NAMESPACE 8 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-s3.py -n $NAMESPACE 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/21-copy-scripts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: kubectl cp -n $NAMESPACE ./check-active-workers.py trino-test-helper-0:/tmp || true 6 | - script: kubectl cp -n $NAMESPACE ./check-opa.py trino-test-helper-0:/tmp || true 7 | - script: kubectl cp -n $NAMESPACE ./check-s3.py trino-test-helper-0:/tmp || true 8 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/30-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-coordinator-default 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: trino-worker-default 18 | status: 19 | readyReplicas: 2 20 | replicas: 2 21 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/30-scale-trino.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: >- 6 | kubectl --namespace $NAMESPACE 7 | patch trinoclusters.trino.stackable.tech trino 8 | --type=merge --patch '{"spec":{"workers": {"roleGroups": {"default": {"replicas": 2}}}}}' 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/31-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-active-workers.py -u admin -p admin -n $NAMESPACE -w 2 7 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-opa.py -n $NAMESPACE 8 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-s3.py -n $NAMESPACE 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIID2TCCAsGgAwIBAgIUNjquGYWtyJ5a6wy23Hz2GRcMlwMwDQYJKoZIhvcNAQEL 3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO 4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv 5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMzA2MTYxMjUxMDJa 6 | GA8yMTIzMDUyMzEyNTEwMlowezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl 7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJs 8 | ZSBTaWduaW5nIEF1dGhvcml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTCC 9 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANnV/vby3Ro57a2qvQRnn0je 10 | eKMU2+F0+lZNCAviGUD5bm8jk91oPZnk0bhQqeyErmDS4WT0zevERIBJJDjfL0D8 11 | 46Be7PiMKe0dGjoqI3z5cOIejc8aLPHSIlgN6lT3fIruS16coQgG4uaKiHF5+eWF 12 | DRULdu6dsYuz6dKjqRiUOhHwDwtUJkDwPv+EItqo0H+MLFLLYM0+lEIae7dN5CQ5 13 | So5WhL2cyv5VJ7lj/EAKCViIEgCmzDRDcRgSSjWyH4bn6yX2026fPIyWJFyEdL/6 14 | jAOJADDR0GyhNOXrEeqhocSMnIbQVqwAT0kMhuXSvwvlrnLyTpG5jZm4lUM34kMC 15 | AwEAAaNTMFEwHQYDVR0OBBYEFEI3RMMiyiBjyQ1RS8nlORJVd1pBMB8GA1UdIwQY 16 | MBaAFEI3RMMiyiBjyQ1RS8nlORJVd1pBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI 17 | hvcNAQELBQADggEBAHtKRXdFdtUhtUjodmYQcedADhHZOhBpKinzou4brdk4HfhF 18 | Lr/WFlcRemlV6mBsLpyMuK+Td8ZUEQ6JERLy6lS/c6pOGxnB4aClE8at+C+TjJAO 19 | Vm3WSI6VR1cFXGeZjldVQ6xkQskMJzO7df6iMTPtV5RkMeJXtL6XamEi54rBogNH 20 | Nra+EJBQBl/Ze90NjeYbv20uQpZaaZFaaSmVoNHDpBwla0ouy+MjObC3SpgOq1IC 21 | Pl3NuwNLV8VbOr5HrhQAoKmgSNb3P8vaTVux/X0Yfjy/S7N9kPBaK9mFj74zwV9w 22 | qSQ14Kl5jO3V3hrGWYZEjDOgbZrrEX1KXEust+Q= 23 | -----END CERTIFICATE----- 24 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/client.crt.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDyDCCArCgAwIBAgIUCI2PNNrtzp6Ql7GkuaFxmDa6UBowDQYJKoZIhvcNAQEL 3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO 4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv 5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMzA2MTYxMjUxMDJa 6 | GA8yMTIzMDUyMzEyNTEwMlowXjELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl 7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxEjAQBgNVBAoMCVN0YWNrYWJs 8 | ZTEOMAwGA1UEAwwFbWluaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB 9 | AQCjynVz+XHB8OCY4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbu 10 | poXBlNIJPQaowJEo/SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6 11 | ZcGTUVhauiZGSgkWrejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84 12 | mOfIPxjkvIyJr5UkeF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1 13 | oQPfv3shWrmRLzw6euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgx 14 | SdidmZ2mER7Ez+c5B0m/LiIhAgMBAAGjXzBdMBsGA1UdEQQUMBKCBW1pbmlvggls 15 | b2NhbGhvc3QwHQYDVR0OBBYEFJQ0gD5kEtQr+tDpDSZ7kwZ8H5hGMB8GA1UdIwQY 16 | MBaAFEI3RMMiyiBjyQ1RS8nlORJVd1pBMA0GCSqGSIb3DQEBCwUAA4IBAQBcdhd+ 17 | R4JoGvqLBk59dqIUecctuFsrdPxsBiOFhYNgZqedLM0UL5DzyfAHfVO0LfSEDddX 18 | RJL9yL7+kMU0T76cvdC9XVAIE6HUwTo9GYsPqsuyZoVjNpEDJCwY3CvonlJVe4dq 19 | /gAbJMYB+TSmY5yDPz/JFY/XZzYaPb7OdeGujbVT5Ixp97ApS8YIiv73C0wUbc6R 20 | h0rcfRbykSQUh9vgVdXRSR8DT3WCfdqNzNBYXv9mqfW5z4sbGj+l3wUl/I3F/mIw 21 | fyO4Cti4akiGVHlffEy0wkzVaBxhcXj2I3BUThB4ZqjlssieVaFkwvXmmyRT0oEW 22 | 5H+NPHcquS1zPscl 23 | -----END CERTIFICATE----- 24 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/client.csr.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIC0TCCAbkCAQAwXjELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1I 3 | b2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxEjAQBgNVBAoMCVN0YWNrYWJsZTEOMAwG 4 | A1UEAwwFbWluaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCjynVz 5 | +XHB8OCY4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbupoXBlNIJ 6 | PQaowJEo/SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6ZcGTUVha 7 | uiZGSgkWrejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84mOfIPxjk 8 | vIyJr5UkeF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1oQPfv3sh 9 | WrmRLzw6euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgxSdidmZ2m 10 | ER7Ez+c5B0m/LiIhAgMBAAGgLjAsBgkqhkiG9w0BCQ4xHzAdMBsGA1UdEQQUMBKC 11 | BW1pbmlvgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQADggEBAIXs2PGYTo8N6IZc 12 | eVVa82AxtJPSaIeRqrTuAiKloQPZxhpEaTSAR8wxWpzyIeilMgp8UuMo2M0euxGM 13 | gxhzOyNXFekDSoLbuvKHidhvJ+rVEVHag3VdKA22P4/OYV8HwP6yXsNWNXK6Sp6J 14 | pKwRE3PpSN4vPbEmbxLndM9SOVghV9RCVdLMPFXg+pfTNPm2H3cYGg4yU+Cdl5Dj 15 | voUOQzRMuvflScf+gzjBIx7xVvwo/d9szsyqPfMyTlK40kU+KGl5Mz+C7Icyljnj 16 | 8F92l4NbDrXpWuyNjoUwEH8Kdb4ioPACHgStTY0Js8vdVS7wWj6ylXPBRUd9Yxxg 17 | BWC7YHc= 18 | -----END CERTIFICATE REQUEST----- 19 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/client.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCjynVz+XHB8OCY 3 | 4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbupoXBlNIJPQaowJEo 4 | /SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6ZcGTUVhauiZGSgkW 5 | rejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84mOfIPxjkvIyJr5Uk 6 | eF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1oQPfv3shWrmRLzw6 7 | euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgxSdidmZ2mER7Ez+c5 8 | B0m/LiIhAgMBAAECggEAAd3t5suCE27WcIessqgshHP0dts++0W1z+xzX/1NxODY 9 | YXV6Bfn/fDrxtT8iQZgeUC2NE1PhvoyrWuc/2oqarctGu9AYWoG62KtoU2zSHWY/ 10 | I7uDE1WWlNvRYTWNanC8exxjQG18wDJZ1itXSxIt5bD3yk/wTRXtt+uJzrV5jocu 11 | chxDLwowiu1Aj6dRCZNBz9TJxyNr50NYW2UXBaT/87XrFFdJwMTVT0B7HOnG7RBV 12 | QlKw8mqVbaNenhcvMjR29sxTzHR+jxIO3BwO6OGj/OFhFBYU7TLXeld1qoe0vb2G 13 | b8hPpGuptr5At9lw1w5wQ3IgiutPNH5qyDyCpEl6EQKBgQDcdbslOfKJj7O2LAyY 14 | FtkTpilE0V3j0qmQ93IjrV4+DRmLMEB299407BUYQQj1/DIbQcoZ2EEcUB5pdeHs 15 | 7DMED6XLHb2JU12+a7sWyCNdKece+T7/IblI8Tt340UlHQ6zSMSDcjvfcFHVgv0s 16 | CajhFx7NkLEXTZr8fT3YIhj4vQKBgQC+MgZ1UmoJw9IAQj2uIU5Cy9xjWeYDTAO/ 17 | YaXIzwlge438Mcbb4cN2SaNSGDgV7nu5kqiihpjPYWIiiOBp9kTRHXOdPW47syeI 18 | t3kwrp2zVlUglcMZZ6mmV3QVaANZgjU4RSv4e/VxULjbZafjPtZRsjZGpK0YU1ot 19 | Vj8IeQ7fNQKBgCP+ZMuzJlInUCQSFQxPzqlSm7JMrJOhtWWhwNTqXVSsNttuyVej 20 | KHhjgx4uoBPpVRT2LNUDZb4FprF5OaXA+qNTGrKK7IMbRVbtp+IUUxHG4aFA+HQX 21 | QXUTTa5JQOTKVbgXzV3YrMXSRMojVMp32UbGy5SsZv1zAjbvC8XZ61HRAoGAdBcQ 22 | vhe5xZAS5DmKcHi/ziGkubern6OMPgqaKIHGlW+U8LRpTtj0d4Tm/TrvMOPJ/TE5 23 | YUqKhzpHrhCh+ctpocI6SWWvnRzzKo3imQZcF5TAjQ0ccqtFb9S9dDtyn/XMCjae 24 | aiMvYyUEUFYyLZCzPFZsrp3hUZG+3yFfhApwO2kCgYBHwYaPIdW6Ww7+B2hin0ow 25 | ja3cesvA4jaMPwSLT8ONtU1GBSMfwczMbnPHLrRvB87n9gPaR2wQGUmrFEO3LPX/ 26 | kRcOGpYBHpDXEjDhKkWdRuLOFg4HLZdV8ANZlQ0VScE8u3dDDUO89pGDn08qTArl 27 | x9kxsudEVrkerZb5UxFVqQ== 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Creating client cert" 4 | FQDN="minio" 5 | 6 | echo "Creating Root Certificate Authority" 7 | openssl genrsa \ 8 | -out root-ca.key.pem \ 9 | 2048 10 | 11 | echo "Self-signing the Root Certificate Authority" 12 | openssl req \ 13 | -x509 \ 14 | -new \ 15 | -nodes \ 16 | -key root-ca.key.pem \ 17 | -days 36500 \ 18 | -out root-ca.crt.pem \ 19 | -subj "/C=DE/ST=Schleswig-Holstein/L=Wedel/O=Stackable Signing Authority Inc/CN=stackable.de" 20 | 21 | openssl genrsa \ 22 | -out client.key.pem \ 23 | 2048 24 | 25 | echo "Creating the CSR" 26 | openssl req -new \ 27 | -key client.key.pem \ 28 | -out client.csr.pem \ 29 | -subj "/C=DE/ST=Schleswig-Holstein/L=Wedel/O=Stackable/CN=${FQDN}" \ 30 | -addext "subjectAltName = DNS:${FQDN}, DNS:localhost" 31 | 32 | echo "Signing the client cert with the root ca" 33 | openssl x509 \ 34 | -req -in client.csr.pem \ 35 | -CA root-ca.crt.pem \ 36 | -CAkey root-ca.key.pem \ 37 | -CAcreateserial \ 38 | -out client.crt.pem \ 39 | -days 36500 \ 40 | -copy_extensions copy 41 | 42 | echo "Copying the files to match the api of the secret-operator" 43 | cp root-ca.crt.pem ca.crt 44 | cp client.key.pem tls.key 45 | cp client.crt.pem tls.crt 46 | 47 | echo "To create a k8s secret run" 48 | echo "kubectl create secret generic foo --from-file=ca.crt --from-file=tls.crt --from-file=tls.key" 49 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/root-ca.crt.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIID2TCCAsGgAwIBAgIUNjquGYWtyJ5a6wy23Hz2GRcMlwMwDQYJKoZIhvcNAQEL 3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO 4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv 5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMzA2MTYxMjUxMDJa 6 | GA8yMTIzMDUyMzEyNTEwMlowezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl 7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJs 8 | ZSBTaWduaW5nIEF1dGhvcml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTCC 9 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANnV/vby3Ro57a2qvQRnn0je 10 | eKMU2+F0+lZNCAviGUD5bm8jk91oPZnk0bhQqeyErmDS4WT0zevERIBJJDjfL0D8 11 | 46Be7PiMKe0dGjoqI3z5cOIejc8aLPHSIlgN6lT3fIruS16coQgG4uaKiHF5+eWF 12 | DRULdu6dsYuz6dKjqRiUOhHwDwtUJkDwPv+EItqo0H+MLFLLYM0+lEIae7dN5CQ5 13 | So5WhL2cyv5VJ7lj/EAKCViIEgCmzDRDcRgSSjWyH4bn6yX2026fPIyWJFyEdL/6 14 | jAOJADDR0GyhNOXrEeqhocSMnIbQVqwAT0kMhuXSvwvlrnLyTpG5jZm4lUM34kMC 15 | AwEAAaNTMFEwHQYDVR0OBBYEFEI3RMMiyiBjyQ1RS8nlORJVd1pBMB8GA1UdIwQY 16 | MBaAFEI3RMMiyiBjyQ1RS8nlORJVd1pBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI 17 | hvcNAQELBQADggEBAHtKRXdFdtUhtUjodmYQcedADhHZOhBpKinzou4brdk4HfhF 18 | Lr/WFlcRemlV6mBsLpyMuK+Td8ZUEQ6JERLy6lS/c6pOGxnB4aClE8at+C+TjJAO 19 | Vm3WSI6VR1cFXGeZjldVQ6xkQskMJzO7df6iMTPtV5RkMeJXtL6XamEi54rBogNH 20 | Nra+EJBQBl/Ze90NjeYbv20uQpZaaZFaaSmVoNHDpBwla0ouy+MjObC3SpgOq1IC 21 | Pl3NuwNLV8VbOr5HrhQAoKmgSNb3P8vaTVux/X0Yfjy/S7N9kPBaK9mFj74zwV9w 22 | qSQ14Kl5jO3V3hrGWYZEjDOgbZrrEX1KXEust+Q= 23 | -----END CERTIFICATE----- 24 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/root-ca.crt.srl: -------------------------------------------------------------------------------- 1 | 7046F738B5BC7F7DC43E1E7E2EF5B23832C7A59A 2 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/root-ca.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDZ1f728t0aOe2t 3 | qr0EZ59I3nijFNvhdPpWTQgL4hlA+W5vI5PdaD2Z5NG4UKnshK5g0uFk9M3rxESA 4 | SSQ43y9A/OOgXuz4jCntHRo6KiN8+XDiHo3PGizx0iJYDepU93yK7ktenKEIBuLm 5 | iohxefnlhQ0VC3bunbGLs+nSo6kYlDoR8A8LVCZA8D7/hCLaqNB/jCxSy2DNPpRC 6 | Gnu3TeQkOUqOVoS9nMr+VSe5Y/xACglYiBIApsw0Q3EYEko1sh+G5+sl9tNunzyM 7 | liRchHS/+owDiQAw0dBsoTTl6xHqoaHEjJyG0FasAE9JDIbl0r8L5a5y8k6RuY2Z 8 | uJVDN+JDAgMBAAECggEABVrQUKu5qapg4FMBIHmXncfyOTgLC6i/ep2cJAajzkgT 9 | YeIDAX9NfFn2mcxJ0QmV68VjSwMFiNUjRfAGVuuNktBknA2ZT6bKZQzBF0rv4mOT 10 | VcugesXO8wbSV03IQ9xtkFC5Q5MgFj1tGHOxVPDFptG1d533h3gS5DdA+S+SuYrn 11 | n8JUqjenVzYgC5CFprDXEy/ZOC/Is/oq/GujC3e6VJINueCOOrVkNKhMtktq8qkg 12 | UtkjZQYP/d0nzR8bYGXN818MBZOg+RyA0asgkDe+n6Lr6gNzaqhECDdITDejq/0h 13 | D8ldKD4v8CYTRAY1AteIAF0jUg2YuWZwgQ8IzL0viQKBgQD2R0AB0RHhxHJsnWoQ 14 | EhWPyD3fo3w8Q5dIxugpP2LcloyOH6Ew5xlwnmPu0wL31D8CHad+vZnUVEqMIZqk 15 | rZy9r0EZc2JmC1Sgpv7NkpY3johBrONIAKsStTpNpr/3EB0rAeQ9KzS0TafeYXKt 16 | buz8Fx3kcBPosLCs9+r/+TDzhQKBgQDib1K0oQV55pFTn9OkoHPTitLsOOwpLxjC 17 | ui8R73PhhvCK3UNftsq3U/Gj6L3wi5ATeE0SC1xCu2ZvU8K7EBRflvoGMmWfAh+O 18 | XMfE21yLyrSgUVhsDeC2VILyxIB0sgcT7uze3TjD2Dm0vg0NQrCNa8euLAn5WIHS 19 | QE6jd8t1JwKBgAlLDP5EjmEvkYXJttveYtPnIXaT67c2cbn8T3xm+OsL/0fJp8J5 20 | pfsa7vhvG/iQGMSSq+RbcTeS6rE4/2Xhaz25JEK6mOby3IGna4wEUQjNpxSbWoQ4 21 | CjyNfCK7/Rhskj0yOBOa0sVO/NumX7ZtriGhGa6qEAZCzJfqTwLTu2YlAoGAaEmt 22 | ZdPjmck/law+5cugjQWbL4DoA+/VD5qAo1oNnQlxMAPITAT8SIM4/6zqDie5K750 23 | gKMK0xFMlGmXfmBhgcUfUktT0nA/6GmC+H+vmBK8LjpI5ztdC5zQ0s79+sEj0WJx 24 | ZhOtWUX1DfGaQUk912SUivttfJHu+M71aQR7iHECgYEAqIPW6opxX5p3bSrfGDY0 25 | vqcxpTLjBAUtCig0UoI+01PiuEudCB9ed2AWkk+h0KzvtKP2VW8r4tYJFeDU2Jt3 26 | s3mcO3Ix1cwHdb2CjzBm4dReyBsIUIzJRPl9spz0cRYhdQkREIHJUoCskwrqCsS0 27 | O9W/M6BZHjhM/7eA8StNHEU= 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/tls.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDyDCCArCgAwIBAgIUCI2PNNrtzp6Ql7GkuaFxmDa6UBowDQYJKoZIhvcNAQEL 3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO 4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv 5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMzA2MTYxMjUxMDJa 6 | GA8yMTIzMDUyMzEyNTEwMlowXjELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl 7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxEjAQBgNVBAoMCVN0YWNrYWJs 8 | ZTEOMAwGA1UEAwwFbWluaW8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB 9 | AQCjynVz+XHB8OCY4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbu 10 | poXBlNIJPQaowJEo/SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6 11 | ZcGTUVhauiZGSgkWrejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84 12 | mOfIPxjkvIyJr5UkeF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1 13 | oQPfv3shWrmRLzw6euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgx 14 | SdidmZ2mER7Ez+c5B0m/LiIhAgMBAAGjXzBdMBsGA1UdEQQUMBKCBW1pbmlvggls 15 | b2NhbGhvc3QwHQYDVR0OBBYEFJQ0gD5kEtQr+tDpDSZ7kwZ8H5hGMB8GA1UdIwQY 16 | MBaAFEI3RMMiyiBjyQ1RS8nlORJVd1pBMA0GCSqGSIb3DQEBCwUAA4IBAQBcdhd+ 17 | R4JoGvqLBk59dqIUecctuFsrdPxsBiOFhYNgZqedLM0UL5DzyfAHfVO0LfSEDddX 18 | RJL9yL7+kMU0T76cvdC9XVAIE6HUwTo9GYsPqsuyZoVjNpEDJCwY3CvonlJVe4dq 19 | /gAbJMYB+TSmY5yDPz/JFY/XZzYaPb7OdeGujbVT5Ixp97ApS8YIiv73C0wUbc6R 20 | h0rcfRbykSQUh9vgVdXRSR8DT3WCfdqNzNBYXv9mqfW5z4sbGj+l3wUl/I3F/mIw 21 | fyO4Cti4akiGVHlffEy0wkzVaBxhcXj2I3BUThB4ZqjlssieVaFkwvXmmyRT0oEW 22 | 5H+NPHcquS1zPscl 23 | -----END CERTIFICATE----- 24 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/certs/tls.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCjynVz+XHB8OCY 3 | 4psEEYmjobpdzTlowwcSQN+YDPCkBeor0Tb87EgLzJK+JYbupoXBlNIJPQaowJEo 4 | /SzSk8fu2XSMyvAZY4FWGxJy2yxIxvP/ibOGOYuiPGXK24t6ZcGTUVhauiZGSgkW 5 | rejWWh7MjFS+c1vaYZqB+Q1zPs5PFMlc8l5V/+b8Z7jMJi84mOfIPxjkvIyJr5Uk 6 | eF3UfLqJS5y4LF4ty4Ft2iAd7bbfHanfvYmv6UoDVtMXtWo1oQPfv3shWrmRLzw6 7 | euIAtbXc5CjByIakCbiDnAU8rKg+B1J4mvQgrLwo3qPryJgxSdidmZ2mER7Ez+c5 8 | B0m/LiIhAgMBAAECggEAAd3t5suCE27WcIessqgshHP0dts++0W1z+xzX/1NxODY 9 | YXV6Bfn/fDrxtT8iQZgeUC2NE1PhvoyrWuc/2oqarctGu9AYWoG62KtoU2zSHWY/ 10 | I7uDE1WWlNvRYTWNanC8exxjQG18wDJZ1itXSxIt5bD3yk/wTRXtt+uJzrV5jocu 11 | chxDLwowiu1Aj6dRCZNBz9TJxyNr50NYW2UXBaT/87XrFFdJwMTVT0B7HOnG7RBV 12 | QlKw8mqVbaNenhcvMjR29sxTzHR+jxIO3BwO6OGj/OFhFBYU7TLXeld1qoe0vb2G 13 | b8hPpGuptr5At9lw1w5wQ3IgiutPNH5qyDyCpEl6EQKBgQDcdbslOfKJj7O2LAyY 14 | FtkTpilE0V3j0qmQ93IjrV4+DRmLMEB299407BUYQQj1/DIbQcoZ2EEcUB5pdeHs 15 | 7DMED6XLHb2JU12+a7sWyCNdKece+T7/IblI8Tt340UlHQ6zSMSDcjvfcFHVgv0s 16 | CajhFx7NkLEXTZr8fT3YIhj4vQKBgQC+MgZ1UmoJw9IAQj2uIU5Cy9xjWeYDTAO/ 17 | YaXIzwlge438Mcbb4cN2SaNSGDgV7nu5kqiihpjPYWIiiOBp9kTRHXOdPW47syeI 18 | t3kwrp2zVlUglcMZZ6mmV3QVaANZgjU4RSv4e/VxULjbZafjPtZRsjZGpK0YU1ot 19 | Vj8IeQ7fNQKBgCP+ZMuzJlInUCQSFQxPzqlSm7JMrJOhtWWhwNTqXVSsNttuyVej 20 | KHhjgx4uoBPpVRT2LNUDZb4FprF5OaXA+qNTGrKK7IMbRVbtp+IUUxHG4aFA+HQX 21 | QXUTTa5JQOTKVbgXzV3YrMXSRMojVMp32UbGy5SsZv1zAjbvC8XZ61HRAoGAdBcQ 22 | vhe5xZAS5DmKcHi/ziGkubern6OMPgqaKIHGlW+U8LRpTtj0d4Tm/TrvMOPJ/TE5 23 | YUqKhzpHrhCh+ctpocI6SWWvnRzzKo3imQZcF5TAjQ0ccqtFb9S9dDtyn/XMCjae 24 | aiMvYyUEUFYyLZCzPFZsrp3hUZG+3yFfhApwO2kCgYBHwYaPIdW6Ww7+B2hin0ow 25 | ja3cesvA4jaMPwSLT8ONtU1GBSMfwczMbnPHLrRvB87n9gPaR2wQGUmrFEO3LPX/ 26 | kRcOGpYBHpDXEjDhKkWdRuLOFg4HLZdV8ANZlQ0VScE8u3dDDUO89pGDn08qTArl 27 | x9kxsudEVrkerZb5UxFVqQ== 28 | -----END PRIVATE KEY----- 29 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/helm-bitnami-minio-values.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | mode: standalone 3 | disableWebUI: false 4 | extraEnvVars: 5 | - name: BITNAMI_DEBUG 6 | value: "true" 7 | - name: MINIO_LOG_LEVEL 8 | value: DEBUG 9 | 10 | #defaultBuckets: hive 11 | 12 | provisioning: 13 | enabled: true 14 | buckets: 15 | - name: trino 16 | usersExistingSecrets: 17 | - centralized-minio-users 18 | resources: 19 | requests: 20 | memory: 1Gi 21 | cpu: "512m" 22 | limits: 23 | memory: "1Gi" 24 | cpu: "1" 25 | podSecurityContext: 26 | enabled: false 27 | containerSecurityContext: 28 | enabled: false 29 | 30 | volumePermissions: 31 | enabled: false 32 | 33 | podSecurityContext: 34 | enabled: false 35 | 36 | containerSecurityContext: 37 | enabled: false 38 | 39 | persistence: 40 | enabled: false 41 | 42 | resources: 43 | requests: 44 | memory: 1Gi 45 | cpu: "512m" 46 | limits: 47 | memory: "1Gi" 48 | cpu: "1" 49 | 50 | auth: 51 | existingSecret: minio-credentials 52 | 53 | service: 54 | type: NodePort 55 | 56 | tls: 57 | enabled: true 58 | existingSecret: minio-tls-certificates 59 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke/helm-bitnami-postgresql-values.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | volumePermissions: 3 | enabled: false 4 | securityContext: 5 | runAsUser: auto 6 | 7 | primary: 8 | extendedConfiguration: | 9 | password_encryption=md5 10 | podSecurityContext: 11 | {% if test_scenario['values']['openshift'] == 'true' %} 12 | enabled: false 13 | {% else %} 14 | enabled: true 15 | {% endif %} 16 | containerSecurityContext: 17 | enabled: false 18 | resources: 19 | requests: 20 | memory: "512Mi" 21 | cpu: "512m" 22 | limits: 23 | memory: "512Mi" 24 | cpu: "1" 25 | 26 | auth: 27 | username: hive 28 | password: hive 29 | database: hive 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/00-s3connection.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: s3.stackable.tech/v1alpha1 3 | kind: S3Connection 4 | metadata: 5 | name: aws-s3 6 | spec: 7 | host: s3.amazonaws.com 8 | region: 9 | name: eu-central-1 10 | accessStyle: Path 11 | credentials: 12 | secretClass: s3-credentials-class 13 | tls: 14 | verification: 15 | server: 16 | caCert: 17 | webPki: {} 18 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/00-secretclass.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.stackable.tech/v1alpha1 3 | kind: SecretClass 4 | metadata: 5 | name: s3-credentials-class 6 | spec: 7 | backend: 8 | k8sSearch: 9 | searchNamespace: 10 | pod: {} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/04-prepare-bucket.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: | 6 | set -eu 7 | 8 | echo "Manually create the bucket and load data into it first" 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/05-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: zookeeper-server-default 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/05-install-zookeeper.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: zookeeper.stackable.tech/v1alpha1 3 | kind: ZookeeperCluster 4 | metadata: 5 | name: zookeeper 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['zookeeper'] }}" 9 | pullPolicy: IfNotPresent 10 | servers: 11 | config: 12 | logging: 13 | roleGroups: 14 | default: 15 | replicas: 1 16 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/06-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: hdfs-namenode-default 10 | status: 11 | readyReplicas: 2 12 | replicas: 2 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: hdfs-journalnode-default 18 | status: 19 | readyReplicas: 1 20 | replicas: 1 21 | --- 22 | apiVersion: apps/v1 23 | kind: StatefulSet 24 | metadata: 25 | name: hdfs-datanode-default 26 | status: 27 | readyReplicas: 1 28 | replicas: 1 29 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/06-install-hdfs.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: hdfs.stackable.tech/v1alpha1 3 | kind: HdfsCluster 4 | metadata: 5 | name: hdfs 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['hdfs'] }}" 9 | pullPolicy: IfNotPresent 10 | clusterConfig: 11 | dfsReplication: 1 12 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 13 | vectorAggregatorConfigMapName: vector-aggregator-discovery 14 | {% endif %} 15 | zookeeperConfigMapName: hdfs-znode 16 | nameNodes: 17 | config: 18 | logging: 19 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 20 | roleGroups: 21 | default: 22 | replicas: 2 23 | dataNodes: 24 | config: 25 | logging: 26 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 27 | roleGroups: 28 | default: 29 | replicas: 1 30 | journalNodes: 31 | config: 32 | logging: 33 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 34 | roleGroups: 35 | default: 36 | replicas: 1 37 | --- 38 | apiVersion: zookeeper.stackable.tech/v1alpha1 39 | kind: ZookeeperZnode 40 | metadata: 41 | name: hdfs-znode 42 | spec: 43 | clusterRef: 44 | name: zookeeper 45 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/07-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: postgresql 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/07-install-postgres.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | timeout: 300 5 | commands: 6 | - script: >- 7 | helm upgrade postgresql 8 | --install 9 | --version=12.5.6 10 | --namespace $NAMESPACE 11 | -f helm-bitnami-postgresql-values.yaml 12 | --repo https://charts.bitnami.com/bitnami postgresql 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/08-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 900 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: hive-metastore-default 10 | status: 11 | readyReplicas: 2 12 | replicas: 2 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/08-install-hive.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: hive.stackable.tech/v1alpha1 3 | kind: HiveCluster 4 | metadata: 5 | name: hive 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['hive'] }}" 9 | pullPolicy: IfNotPresent 10 | clusterConfig: 11 | database: 12 | connString: jdbc:postgresql://postgresql:5432/hive 13 | credentialsSecret: postgres-credentials 14 | dbType: postgres 15 | hdfs: 16 | configMap: hdfs 17 | s3: 18 | reference: aws-s3 19 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 20 | vectorAggregatorConfigMapName: vector-aggregator-discovery 21 | {% endif %} 22 | metastore: 23 | config: 24 | logging: 25 | enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} 26 | roleGroups: 27 | default: 28 | replicas: 2 29 | --- 30 | apiVersion: v1 31 | kind: Secret 32 | metadata: 33 | name: postgres-credentials 34 | type: Opaque 35 | stringData: 36 | username: hive 37 | password: hive 38 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/09-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl -n $NAMESPACE rollout status daemonset opa-server-default --timeout 300s 7 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/09-install-opa.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: opa.stackable.tech/v1alpha1 3 | kind: OpaCluster 4 | metadata: 5 | name: opa 6 | spec: 7 | image: 8 | productVersion: "{{ test_scenario['values']['opa'] }}" 9 | pullPolicy: IfNotPresent 10 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 11 | clusterConfig: 12 | vectorAggregatorConfigMapName: vector-aggregator-discovery 13 | {% endif %} 14 | servers: 15 | roleGroups: 16 | default: {} 17 | --- 18 | apiVersion: v1 19 | kind: ConfigMap 20 | metadata: 21 | name: simple-trino-opa-bundle 22 | labels: 23 | opa.stackable.tech/bundle: "trino" 24 | data: 25 | trino.rego: | 26 | package trino 27 | 28 | default allow = false 29 | 30 | allow if { 31 | is_admin 32 | } 33 | batch contains i if { 34 | some i 35 | input.action.filterResources[i] 36 | is_admin 37 | } 38 | 39 | allow if { 40 | input.action.operation in ["ExecuteQuery", "AccessCatalog"] 41 | is_bob 42 | } 43 | batch contains i if { 44 | input.action.operation in ["FilterCatalogs"] 45 | some i 46 | input.action.filterResources[i] 47 | is_bob 48 | } 49 | 50 | is_admin() if { 51 | input.context.identity.user == "admin" 52 | } 53 | 54 | is_bob() if { 55 | input.context.identity.user == "bob" 56 | } 57 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/10-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 720 5 | commands: 6 | - script: kubectl -n $NAMESPACE wait --for=condition=available=true trinoclusters.trino.stackable.tech/trino --timeout 301s 7 | --- 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | metadata: 11 | name: trino-coordinator-default 12 | spec: 13 | template: 14 | spec: 15 | terminationGracePeriodSeconds: 900 16 | status: 17 | readyReplicas: 1 18 | replicas: 1 19 | --- 20 | apiVersion: apps/v1 21 | kind: StatefulSet 22 | metadata: 23 | name: trino-worker-default 24 | spec: 25 | template: 26 | spec: 27 | terminationGracePeriodSeconds: 75 # 5s gracefulShutdownTimeout + 2 x 30s grace period + 10s safety buffer 28 | status: 29 | readyReplicas: 1 30 | replicas: 1 31 | --- 32 | apiVersion: policy/v1 33 | kind: PodDisruptionBudget 34 | metadata: 35 | name: trino-coordinator 36 | status: 37 | expectedPods: 1 38 | currentHealthy: 1 39 | disruptionsAllowed: 1 40 | --- 41 | apiVersion: policy/v1 42 | kind: PodDisruptionBudget 43 | metadata: 44 | name: trino-worker 45 | status: 46 | expectedPods: 1 47 | currentHealthy: 1 48 | disruptionsAllowed: 1 49 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/11-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 600 5 | commands: 6 | # 7 | # Test envOverrides 8 | # 9 | - script: | 10 | kubectl -n $NAMESPACE get sts trino-coordinator-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "COMMON_VAR" and .value == "group-value")' 11 | kubectl -n $NAMESPACE get sts trino-coordinator-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "GROUP_VAR" and .value == "group-value")' 12 | kubectl -n $NAMESPACE get sts trino-coordinator-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "ROLE_VAR" and .value == "role-value")' 13 | 14 | kubectl -n $NAMESPACE get sts trino-worker-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "COMMON_VAR" and .value == "group-value")' 15 | kubectl -n $NAMESPACE get sts trino-worker-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "GROUP_VAR" and .value == "group-value")' 16 | kubectl -n $NAMESPACE get sts trino-worker-default -o yaml | yq -e '.spec.template.spec.containers[] | select (.name == "trino") | .env[] | select (.name == "ROLE_VAR" and .value == "role-value")' 17 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-test-helper 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/20-install-check.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: trino-test-helper 6 | labels: 7 | app: trino-test-helper 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: trino-test-helper 13 | template: 14 | metadata: 15 | labels: 16 | app: trino-test-helper 17 | spec: 18 | # serviceAccount: integration-tests-sa 19 | containers: 20 | - name: trino-test-helper 21 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev 22 | command: ["sleep", "infinity"] 23 | resources: 24 | requests: 25 | cpu: "250m" 26 | memory: "64Mi" 27 | limits: 28 | cpu: "500m" 29 | memory: "64Mi" 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/21-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-active-workers.py -u admin -p admin -n $NAMESPACE -w 1 7 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-opa.py -n $NAMESPACE 8 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-s3.py -n $NAMESPACE 9 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/21-copy-scripts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: kubectl cp -n $NAMESPACE ./check-active-workers.py trino-test-helper-0:/tmp || true 6 | - script: kubectl cp -n $NAMESPACE ./check-opa.py trino-test-helper-0:/tmp || true 7 | - script: kubectl cp -n $NAMESPACE ./check-s3.py trino-test-helper-0:/tmp || true 8 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/README.md: -------------------------------------------------------------------------------- 1 | # Smoke test with AWS S3 2 | 3 | This is a variation to the plain smoke tests which use Minio. 4 | 5 | ## Setup a Bucket 6 | 7 | ...and load data into it: 8 | 9 | ```shell 10 | BUCKET_NAME="my-bucket" 11 | aws s3api create-bucket --bucket ${BUCKET_NAME} --region eu-central-1 --create-bucket-configuration LocationConstraint=eu-central-1 12 | aws s3 cp yellow_tripdata_2021-07.csv s3://${BUCKET_NAME}/taxi-data/ 13 | ``` 14 | 15 | You will need to update the bucket name in [check-s3.py](check-s3.py). 16 | 17 | ## Add AWS credentials 18 | 19 | The user or role that the access key belongs to needs to have read/write access to the S3 bucket. 20 | 21 | Update [aws_secret.yaml](./aws_secret.yaml), and apply it to the cluster: 22 | 23 | ```shell 24 | kubectl apply -f aws_secret.yaml 25 | ``` 26 | 27 | ## Run the tests 28 | 29 | Add a new test definition to [test-definition.yaml](/tests/test-definition.yaml). 30 | 31 | ```yaml 32 | tests: 33 | - name: smoke_aws 34 | dimensions: 35 | - trino 36 | - hive 37 | - opa 38 | - hdfs 39 | - zookeeper 40 | - openshift 41 | ``` 42 | 43 | Then run a tests: 44 | 45 | ```sh 46 | ./scripts/run-tests --test smoke_aws 47 | ``` 48 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/aws_secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: aws-s3-credentials 6 | labels: 7 | secrets.stackable.tech/class: s3-credentials-class 8 | timeout: 240 9 | stringData: 10 | accessKey: AwsAccessKey 11 | secretKey: AwsAccessKey 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/smoke_aws/helm-bitnami-postgresql-values.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | volumePermissions: 3 | enabled: false 4 | securityContext: 5 | runAsUser: auto 6 | 7 | primary: 8 | extendedConfiguration: | 9 | password_encryption=md5 10 | podSecurityContext: 11 | {% if test_scenario['values']['openshift'] == 'true' %} 12 | enabled: false 13 | {% else %} 14 | enabled: true 15 | {% endif %} 16 | containerSecurityContext: 17 | enabled: false 18 | resources: 19 | requests: 20 | memory: "512Mi" 21 | cpu: "512m" 22 | limits: 23 | memory: "512Mi" 24 | cpu: "1" 25 | 26 | auth: 27 | username: hive 28 | password: hive 29 | database: hive 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/00-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: vector-aggregator-discovery 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/00-install-vector-aggregator-discovery-configmap.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if lookup('env', 'VECTOR_AGGREGATOR') %} 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: vector-aggregator-discovery 7 | data: 8 | ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/00-patch-ns.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if test_scenario['values']['openshift'] == 'true' %} 2 | # see https://github.com/stackabletech/issues/issues/566 3 | --- 4 | apiVersion: kuttl.dev/v1beta1 5 | kind: TestStep 6 | commands: 7 | - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' 8 | timeout: 120 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/00-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: use-integration-tests-scc 6 | rules: 7 | {% if test_scenario['values']['openshift'] == "true" %} 8 | - apiGroups: ["security.openshift.io"] 9 | resources: ["securitycontextconstraints"] 10 | resourceNames: ["privileged"] 11 | verbs: ["use"] 12 | {% endif %} 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: integration-tests-sa 18 | --- 19 | kind: RoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: use-integration-tests-scc 23 | subjects: 24 | - kind: ServiceAccount 25 | name: integration-tests-sa 26 | roleRef: 27 | kind: Role 28 | name: use-integration-tests-scc 29 | apiGroup: rbac.authorization.k8s.io 30 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/10-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 720 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-coordinator-default 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | --- 14 | apiVersion: apps/v1 15 | kind: StatefulSet 16 | metadata: 17 | name: trino-worker-default 18 | status: 19 | readyReplicas: 1 20 | replicas: 1 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: trino-coordinator 26 | spec: 27 | ports: 28 | - name: metrics 29 | port: 8081 30 | protocol: TCP 31 | targetPort: 8081 32 | {% if test_scenario['values']['use-tls'] == 'false' %} 33 | - name: http 34 | port: 8080 35 | protocol: TCP 36 | targetPort: 8080 37 | {% else %} 38 | - name: https 39 | port: 8443 40 | protocol: TCP 41 | targetPort: 8443 42 | {% endif %} 43 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/20-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | --- 6 | apiVersion: apps/v1 7 | kind: StatefulSet 8 | metadata: 9 | name: trino-test-helper 10 | status: 11 | readyReplicas: 1 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/20-install-check.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: trino-test-helper 6 | labels: 7 | app: trino-test-helper 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: trino-test-helper 13 | template: 14 | metadata: 15 | labels: 16 | app: trino-test-helper 17 | spec: 18 | serviceAccount: integration-tests-sa 19 | securityContext: 20 | runAsUser: 1000 21 | runAsGroup: 1000 22 | fsGroup: 1000 23 | containers: 24 | - name: trino-test-helper 25 | image: oci.stackable.tech/sdp/testing-tools:0.2.0-stackable0.0.0-dev 26 | command: ["sleep", "infinity"] 27 | {% if test_scenario['values']['use-tls'] == 'true' %} 28 | volumeMounts: 29 | - mountPath: "/stackable/trusted" 30 | name: server-tls-mount 31 | volumes: 32 | - name: server-tls-mount 33 | ephemeral: 34 | volumeClaimTemplate: 35 | metadata: 36 | annotations: 37 | secrets.stackable.tech/class: trino-tls 38 | secrets.stackable.tech/scope: pod,node 39 | spec: 40 | accessModes: ["ReadWriteOnce"] 41 | resources: 42 | requests: 43 | storage: "1" 44 | storageClassName: secrets.stackable.tech 45 | {% endif %} 46 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/21-assert.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 300 5 | commands: 6 | - script: kubectl exec -n $NAMESPACE trino-test-helper-0 -- python /tmp/check-tls.py -n $NAMESPACE 7 | {% if test_scenario['values']['use-internal-tls'] == 'true' %} 8 | - script: kubectl get -n $NAMESPACE configmap trino-coordinator-default -o yaml | grep "internal-communication.shared-secret" 9 | - script: kubectl get -n $NAMESPACE configmap trino-coordinator-default -o yaml | grep "internal-communication.https.truststore.path" 10 | - script: kubectl get -n $NAMESPACE configmap trino-coordinator-default -o yaml | grep "internal-communication.https.truststore.key" 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/21-install-requirements.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: kubectl cp -n $NAMESPACE ./check-tls.py trino-test-helper-0:/tmp || true 6 | - script: kubectl cp -n $NAMESPACE ./test-config.json trino-test-helper-0:/tmp 7 | {% if test_scenario['values']['use-authentication'] == 'true' or test_scenario['values']['use-tls'] == 'true' %} 8 | - script: kubectl cp -n $NAMESPACE ./untrusted-cert.crt trino-test-helper-0:/stackable/untrusted-cert.crt || true 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/test-config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | {% if test_scenario['values']['use-authentication'] == 'true' and test_scenario['values']['use-tls'] == 'true' %} 3 | "useAuthentication": true, 4 | {% else %} 5 | "useAuthentication": false, 6 | {% endif %} 7 | "useTls": {{ test_scenario['values']['use-tls'] }}, 8 | "useInternalTls": {{ test_scenario['values']['use-internal-tls'] }} 9 | } 10 | -------------------------------------------------------------------------------- /tests/templates/kuttl/tls/untrusted-cert.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIID2TCCAsGgAwIBAgIUBIxXEL+7zEuWUcO0r4nPvYFi6xAwDQYJKoZIhvcNAQEL 3 | BQAwezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxlc3dpZy1Ib2xzdGVpbjEO 4 | MAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJsZSBTaWduaW5nIEF1dGhv 5 | cml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTAgFw0yMjA2MjgxNDI1NDJa 6 | GA8yMTIyMDYwNDE0MjU0MlowezELMAkGA1UEBhMCREUxGzAZBgNVBAgMElNjaGxl 7 | c3dpZy1Ib2xzdGVpbjEOMAwGA1UEBwwFV2VkZWwxKDAmBgNVBAoMH1N0YWNrYWJs 8 | ZSBTaWduaW5nIEF1dGhvcml0eSBJbmMxFTATBgNVBAMMDHN0YWNrYWJsZS5kZTCC 9 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALOBnDXGhMt8QV9TZsArVVtP 10 | wp9MxM0DFtUIFc7sbL7WcpkIWkjDZ78L6+fCUnpjeInaFefGsTMBt66daPdZ/grI 11 | 37hRnw/Fd06CcRoqROMivQEYz5xuQtalAVdqteMVPR6pS1g19J0s09ZD3LuJrICe 12 | sW4MzsyGaoz/zKSp6+8JDJKCB3qXIAFWQOCa3oOmoSe86TtN4MSuxWvKqOmUeA65 13 | vwj8DJrYq3sw1291OtCHW+Hoyiai2pp0ofaSajA1gsASa+wrXwqU8cyAOKk0N5Xs 14 | lyewdwHBCAka87FDCRMUI9FgjsDk/kzY/Hw/PKMuGFPt4hpIvX0zE+aTuPhyOrEC 15 | AwEAAaNTMFEwHQYDVR0OBBYEFNDTcQYOC8ULaK5GWVeqJllckTc8MB8GA1UdIwQY 16 | MBaAFNDTcQYOC8ULaK5GWVeqJllckTc8MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI 17 | hvcNAQELBQADggEBAHLzyAQKrbDBQNXX2smWlqX/2JAWM0xSCUGhlgCQITjdrzcv 18 | q9g0h/U6RoEEJppaFi5f4ReoqNtMa+eMvmq+Nt8Xt7c1+gJ0fQn08vok8buqiFtI 19 | BSDpwVs65D98DMThQXksGToScOIhFJU8vpUtt79CmbukGyw/uc49rBqejb4xHTPi 20 | srWPJIQkfpPmFAPkjXWsaX7rvXsGABOdfp/qMM3e2X4aO5owe8AihqmKE3XvhmeC 21 | v045+nIoFLz4mfGHiuaWK4Rpwu9HL3jHDdE4Qyn1ZEwvtQD7zE05sdUslv/zJ8gR 22 | jzo+8memTnv8W2/QfmLVnes2TKW3kVjn2YPtNRE= 23 | -----END CERTIFICATE----- 24 | --------------------------------------------------------------------------------