├── .dockerignore ├── .github ├── pull_request_template.md ├── stale.yml └── workflows │ └── docker-image.yml ├── .gitignore ├── ADOPTERS.md ├── Dockerfile ├── LICENSE ├── MAINTAINERS.md ├── Makefile ├── PROJECT ├── README.md ├── apis └── druid │ └── v1alpha1 │ ├── doc.go │ ├── druid_types.go │ ├── druidingestion_types.go │ ├── groupversion_info.go │ └── zz_generated.deepcopy.go ├── chart ├── .helmignore ├── Chart.yaml ├── crds │ ├── druid.apache.org_druidingestions.yaml │ └── druid.apache.org_druids.yaml ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── deployment.yaml │ ├── rbac_leader_election.yaml │ ├── rbac_manager.yaml │ ├── rbac_metrics.yaml │ ├── rbac_proxy.yaml │ ├── service.yaml │ └── service_account.yaml └── values.yaml ├── config ├── crd │ ├── bases │ │ ├── druid.apache.org_druidingestions.yaml │ │ └── druid.apache.org_druids.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_druid_druidingestions.yaml │ │ ├── cainjection_in_druids.yaml │ │ ├── webhook_in_druid_druidingestions.yaml │ │ └── webhook_in_druids.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── druid.apache.org_druids.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── druid_druidingestion_editor_role.yaml │ ├── druid_druidingestion_viewer_role.yaml │ ├── druid_editor_role.yaml │ ├── druid_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml └── samples │ ├── druid_v1alpha1_druid.yaml │ └── druid_v1alpha1_druidingestion.yaml ├── controllers ├── druid │ ├── additional_containers.go │ ├── additional_containers_test.go │ ├── configuration.go │ ├── deep_storage_dep_mgmt.go │ ├── druid_controller.go │ ├── druid_controller_test.go │ ├── dynamic_config.go │ ├── ext │ │ ├── deep_storage_default_ext.go │ │ ├── metadata_store_default_ext.go │ │ └── zookeeper_default_ext.go │ ├── finalizers.go │ ├── finalizers_test.go │ ├── handler.go │ ├── handler_test.go │ ├── interface.go │ ├── metadata_store_dep_mgmt.go │ ├── ordering.go │ ├── ordering_test.go │ ├── predicates.go │ ├── status.go │ ├── suite_test.go │ ├── testdata │ │ ├── additional-containers.yaml │ │ ├── broker-config-map.yaml │ │ ├── broker-deployment.yaml │ │ ├── broker-headless-service.yaml │ │ ├── broker-load-balancer-service.yaml │ │ ├── broker-pod-disruption-budget.yaml │ │ ├── broker-statefulset-noprobe.yaml │ │ ├── broker-statefulset-sidecar.yaml │ │ ├── broker-statefulset.yaml │ │ ├── common-config-map.yaml │ │ ├── druid-smoke-test-cluster.yaml │ │ ├── druid-test-cr-noprobe.yaml │ │ ├── druid-test-cr-sidecar.yaml │ │ ├── druid-test-cr.yaml │ │ ├── finalizers.yaml │ │ ├── ordering.yaml │ │ └── volume-expansion.yaml │ ├── types.go │ ├── util.go │ ├── util_test.go │ ├── volume_expansion.go │ ├── volume_expansion_test.go │ ├── zookeeper_dep_mgmt.go │ └── zookeeper_dep_mgmt_test.go └── ingestion │ ├── ingestion_controller.go │ ├── reconciler.go │ └── reconciler_test.go ├── docs ├── README.md ├── api_specifications │ └── druid.md ├── dev_doc.md ├── druid_cr.md ├── examples.md ├── features.md ├── getting_started.md ├── images │ └── druid-operator.png └── kubebuilder_v3_migration.md ├── e2e ├── Dockerfile-testpod ├── configs │ ├── druid-cr.yaml │ ├── druid-ingestion-cr.yaml │ ├── druid-mmless.yaml │ ├── extra-common-config.yaml │ ├── kafka-ingestion-native.yaml │ ├── kafka-ingestion.yaml │ ├── minio-operator-override.yaml │ └── minio-tenant-override.yaml ├── druid-ingestion-test.sh ├── e2e.sh ├── kind.sh ├── monitor-task.sh ├── test-extra-common-config.sh └── wikipedia-test.sh ├── examples ├── aws │ └── eks.yaml ├── ingestion.yaml ├── kafka-ingestion-native.yaml ├── kafka-ingestion.yaml ├── tiny-cluster-hpa.yaml ├── tiny-cluster-mmless.yaml ├── tiny-cluster-zk.yaml └── tiny-cluster.yaml ├── go.mod ├── go.sum ├── hack ├── api-docs │ ├── config.json │ └── template │ │ ├── members.tpl │ │ ├── pkg.tpl │ │ └── type.tpl └── boilerplate.go.txt ├── main.go ├── pkg ├── druidapi │ ├── druidapi.go │ └── druidapi_test.go ├── http │ └── http.go └── util │ ├── util.go │ └── util_test.go └── tutorials └── druid-on-kind ├── README.md └── druid-mmless.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | testbin/ 5 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | Fixes #XXXX. 4 | 5 | 6 | 7 | ### Description 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 |
16 | 17 | This PR has: 18 | - [ ] been tested on a real K8S cluster to ensure creation of a brand new Druid cluster works. 19 | - [ ] been tested for backward compatibility on a real K*S cluster by applying the changes introduced here on an existing Druid cluster. If there are any backward incompatible changes then they have been noted in the PR description. 20 | - [ ] added comments explaining the "why" and the intent of the code wherever would not be obvious for an unfamiliar reader. 21 | - [ ] added documentation for new or modified features or behaviors. 22 | 23 |
24 | 25 | ##### Key changed/added files in this PR 26 | * `MyFoo` 27 | * `OurBar` 28 | * `TheirBaz` 29 | -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Configuration for probot-stale - https://github.com/probot/stale 2 | 3 | exemptMilestones: true 4 | exemptProjects: true 5 | exemptAssignees: true 6 | 7 | # Label applied when closing 8 | staleLabel: stale 9 | 10 | # Configuration settings that are specific to just 'issues' or 'pulls': 11 | pulls: 12 | daysUntilStale: 60 13 | daysUntilClose: 28 14 | markComment: > 15 | This pull request has been marked as stale due to 60 days of inactivity. 16 | It will be closed in 4 weeks if no further activity occurs. If you think 17 | that's incorrect or this pull request should instead be reviewed, please simply 18 | write any comment. Even if closed, you can still revive the PR at any time or 19 | discuss it. 20 | Thank you for your contributions. 21 | unmarkComment: > 22 | This pull request/issue is no longer marked as stale. 23 | closeComment: > 24 | This pull request/issue has been closed due to lack of activity. If you think that 25 | is incorrect, or the pull request requires review, you can revive the PR at any time. 26 | 27 | # Unlike for issues, there are no exempt labels for PRs, apart from "Evergreen". This is 28 | # to foster PR authors to complete their work. 29 | 30 | issues: 31 | daysUntilStale: 60 32 | daysUntilClose: 28 33 | markComment: > 34 | This issue has been marked as stale due to 60 days of inactivity. 35 | It will be closed in 4 weeks if no further activity occurs. If this issue is still 36 | relevant, please simply write any comment. Even if closed, you can still revive the 37 | issue at any time or discuss it. 38 | Thank you for your contributions. 39 | unmarkComment: > 40 | This issue is no longer marked as stale. 41 | closeComment: > 42 | This issue has been closed due to lack of activity. If you think that 43 | is incorrect, or the issue requires additional review, you can revive the issue at 44 | any time. 45 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: Druid Operator 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | 11 | tests: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Run Kubebuilder smoke and unit tests 16 | run: make test 17 | - name: Run helm lint 18 | run: make helm-lint 19 | - name: Run helm template 20 | run: make helm-template 21 | - name: Run e2e tests 22 | run: make e2e 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary Build Files 2 | build/_output 3 | build/_test 4 | bin 5 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 6 | ### Emacs ### 7 | # -*- mode: gitignore; -*- 8 | *~ 9 | \#*\# 10 | /.emacs.desktop 11 | /.emacs.desktop.lock 12 | *.elc 13 | auto-save-list 14 | tramp 15 | .\#* 16 | # Org-mode 17 | .org-id-locations 18 | *_archive 19 | # flymake-mode 20 | *_flymake.* 21 | # eshell files 22 | /eshell/history 23 | /eshell/lastdir 24 | # elpa packages 25 | /elpa/ 26 | # reftex files 27 | *.rel 28 | # AUCTeX auto folder 29 | /auto/ 30 | # cask packages 31 | .cask/ 32 | dist/ 33 | # Flycheck 34 | flycheck_*.el 35 | # server auth directory 36 | /server/ 37 | # projectiles files 38 | .projectile 39 | projectile-bookmarks.eld 40 | # directory configuration 41 | .dir-locals.el 42 | # saveplace 43 | places 44 | # url cache 45 | url/cache/ 46 | # cedet 47 | ede-projects.el 48 | # smex 49 | smex-items 50 | # company-statistics 51 | company-statistics-cache.el 52 | # anaconda-mode 53 | anaconda-mode/ 54 | ### Go ### 55 | # Binaries for programs and plugins 56 | *.exe 57 | *.exe~ 58 | *.dll 59 | *.so 60 | *.dylib 61 | # Test binary, build with 'go test -c' 62 | *.test 63 | # Output of the go coverage tool, specifically when used with LiteIDE 64 | *.out 65 | ### Vim ### 66 | # swap 67 | .sw[a-p] 68 | .*.sw[a-p] 69 | # session 70 | Session.vim 71 | # temporary 72 | .netrwhist 73 | # auto-generated tag files 74 | tags 75 | ### VisualStudioCode ### 76 | .vscode/* 77 | .history 78 | .idea* 79 | .idea/* 80 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 81 | -------------------------------------------------------------------------------- /ADOPTERS.md: -------------------------------------------------------------------------------- 1 | # Druid Operator Adopters 2 | 3 | This is a list of production adopters of Druid Operator: 4 | 5 | | Company | Industry | 6 | | :--- |:----------------------------------| 7 | |[Dailymotion](https://dailymotion.com/)| Video streaming, Adtech | 8 | |[AppsFlyer](https://www.appsflyer.com/)| Mobile Marketing Software, Adtech | 9 | 10 | Open Source Solutions based on Druid Operator: 11 | | Company | Industry | 12 | | :--- |:----------------------------------| 13 | |[AWS](https://github.com/aws-solutions/scalable-analytics-using-apache-druid-on-aws)| 14 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.21 as builder 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | WORKDIR /workspace 7 | # Copy the Go Modules manifests 8 | COPY go.mod go.mod 9 | COPY go.sum go.sum 10 | # cache deps before building and copying source so that we don't need to re-download as much 11 | # and so that source changes don't invalidate our downloaded layer 12 | RUN go mod download 13 | 14 | # Copy the go source 15 | COPY main.go main.go 16 | COPY apis/ apis/ 17 | COPY controllers/ controllers/ 18 | COPY pkg/ pkg/ 19 | 20 | # Build 21 | # the GOARCH has not a default value to allow the binary be built according to the host where the command 22 | # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO 23 | # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, 24 | # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. 25 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go 26 | 27 | # Use distroless as minimal base image to package the manager binary 28 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 29 | FROM gcr.io/distroless/static:nonroot 30 | WORKDIR / 31 | COPY --from=builder /workspace/manager . 32 | USER 65532:65532 33 | 34 | ENTRYPOINT ["/manager"] 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2019 Splunk, Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | --- 15 | Copyright 2023 Cloudnatively Services, Pvt Ltd 16 | 17 | Licensed under the Apache License, Version 2.0 (the "License"); 18 | you may not use this file except in compliance with the License. 19 | You may obtain a copy of the License at 20 | 21 | http://www.apache.org/licenses/LICENSE-2.0 22 | 23 | Unless required by applicable law or agreed to in writing, software 24 | distributed under the License is distributed on an "AS IS" BASIS, 25 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 26 | See the License for the specific language governing permissions and 27 | limitations under the License. -------------------------------------------------------------------------------- /MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | # Maintainers for the druid operator project. 2 | 3 | - [Adheip Singh](https://github.com/AdheipSingh) 4 | - [TessaIO](https://github.com/TessaIO) 5 | 6 | # Collaborators for the druid operator project 7 | 8 | - [Itamar Marom](https://github.com/itamar-marom) 9 | - [Cyril Corbon](https://github.com/cyril-corbon) 10 | - [Avtar Singh](https://github.com/avtarOPS) 11 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | # Code generated by tool. DO NOT EDIT. 2 | # This file is used to track the info used to scaffold your project 3 | # and allow the plugins properly work. 4 | # More info: https://book.kubebuilder.io/reference/project-config.html 5 | domain: apache.org 6 | layout: 7 | - go.kubebuilder.io/v3 8 | multigroup: true 9 | projectName: druid-operator 10 | repo: github.com/datainfrahq/druid-operator 11 | resources: 12 | - api: 13 | crdVersion: v1 14 | namespaced: true 15 | controller: true 16 | domain: apache.org 17 | group: druid 18 | kind: Druid 19 | path: github.com/datainfrahq/druid-operator/apis/druid/v1alpha1 20 | version: v1alpha1 21 | - api: 22 | crdVersion: v1 23 | namespaced: true 24 | controller: true 25 | domain: apache.org 26 | group: druid 27 | kind: DruidIngestion 28 | path: github.com/datainfrahq/druid-operator/apis/druid/v1alpha1 29 | version: v1alpha1 30 | version: "3" 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | DataInfra Logo 4 | 5 |
6 | Kubernetes Operator For Apache Druid 7 |

8 | 9 |
10 | 11 | ![Build Status](https://github.com/datainfrahq/druid-operator/actions/workflows/docker-image.yml/badge.svg) ![Docker pull](https://img.shields.io/docker/pulls/datainfrahq/druid-operator.svg) [![Latest Version](https://img.shields.io/github/tag/datainfrahq/druid-operator)](https://github.com/datainfrahq/druid-operator/releases) [![Slack](https://img.shields.io/badge/slack-brightgreen.svg?logo=slack&label=Community&style=flat&color=%2373DC8C&)](https://kubernetes.slack.com/archives/C04F4M6HT2L) 12 | 13 |
14 | 15 | - Druid Operator provisions and manages [Apache Druid](https://druid.apache.org/) cluster on kubernetes. 16 | - Druid Operator is designed to provision and manage [Apache Druid](https://druid.apache.org/) in distributed mode only. 17 | - It is built in Golang using [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). 18 | - Refer to [Documentation](./docs/README.md) for getting started. 19 | - Feel free to join Kubernetes slack and join [druid-operator](https://kubernetes.slack.com/archives/C04F4M6HT2L) 20 | 21 | ### Newsletter - Monthly updates on running druid on kubernetes. 22 | - [Apache Druid on Kubernetes](https://druidonk8s.substack.com/) 23 | 24 | ### Talks and Blogs on Druid Operator 25 | 26 | - [Druid Summit 2023](https://druidsummit.org/agenda?agendaPath=session/1256850) 27 | - [Dok Community](https://www.youtube.com/live/X4A3lWJRGHk?feature=share) 28 | - [Druid Summit](https://youtu.be/UqPrttXRBDg) 29 | - [Druid Operator Blog](https://www.cloudnatively.com/apache-druid-on-kubernetes/) 30 | - [Druid On K8s Without ZK](https://youtu.be/TRYOvkz5Wuw) 31 | - [Building Apache Druid on Kubernetes: How Dailymotion Serves Partner Data](https://youtu.be/FYFq-tGJOQk) 32 | 33 | ### Supported CR's 34 | 35 | - The operator supports CR's of type ```Druid``` and ```DruidIngestion```. 36 | - ```Druid``` and ```DruidIngestion``` CR belongs to api Group ```druid.apache.org``` and version ```v1alpha1``` 37 | 38 | ### Druid Operator Architecture 39 | 40 | ![Druid Operator](docs/images/druid-operator.png?raw=true "Druid Operator") 41 | 42 | ### Notifications 43 | 44 | - The project moved to Kubebuilder v3 which requires a [manual change](docs/kubebuilder_v3_migration.md) in the operator. 45 | - Users are encourage to use operator version 0.0.9+. 46 | - The operator has moved from HPA apiVersion autoscaling/v2beta1 to autoscaling/v2 API users will need to update there HPA Specs according v2 api in order to work with the latest druid-operator release. 47 | - druid-operator has moved Ingress apiVersion networking/v1beta1 to networking/v1. Users will need to update there Ingress Spec in the druid CR according networking/v1 syntax. In case users are using schema validated CRD, the CRD will also be needed to be updated. 48 | - The v1.0.0 release for druid-operator is compatible with k8s version 1.25. HPA API is kept to version v2beta2. 49 | - Release v1.2.2 had a bug for namespace scoped operator deployments, this is fixed in 1.2.3. 50 | 51 | ### Kubernetes version compatibility 52 | 53 | | druid-operator | 0.0.9 | v1.0.0 | v1.1.0 | v1.2.2 | v1.2.3 | v1.2.4 | v1.2.5 | v1.3.0 | 54 | | :------------- | :-------------: | :-----: | :---: | :---: | :---: | :---: | :---: | :---: | 55 | | kubernetes <= 1.20 | :x:| :x: | :x: | :x: | :x: | :x: | :x: | :x: | 56 | | kubernetes == 1.21 | :white_check_mark:| :x: | :x: | :x: | :x: | :x: | :x: | :x: | 57 | | kubernetes >= 1.22 and <= 1.25 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | 58 | | kubernetes > 1.25 and <= 1.30.1 | :x: | :x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | 59 | 60 | ### Contributors 61 | 62 | 63 | 64 | ### Note 65 | Apache®, [Apache Druid, Druid®](https://druid.apache.org/) are either registered trademarks or trademarks of the Apache Software Foundation in the United States and/or other countries. This project, druid-operator, is not an Apache Software Foundation project. 66 | -------------------------------------------------------------------------------- /apis/druid/v1alpha1/doc.go: -------------------------------------------------------------------------------- 1 | // +kubebuilder:object:generate=true 2 | // +groupName=druid.apache.org 3 | package v1alpha1 4 | -------------------------------------------------------------------------------- /apis/druid/v1alpha1/druidingestion_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | DataInfra 2023 Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | druidapi "github.com/datainfrahq/druid-operator/pkg/druidapi" 21 | v1 "k8s.io/api/core/v1" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | "k8s.io/apimachinery/pkg/runtime" 24 | ) 25 | 26 | type DruidIngestionMethod string 27 | 28 | const ( 29 | Kafka DruidIngestionMethod = "kafka" 30 | Kinesis DruidIngestionMethod = "kinesis" 31 | NativeBatchIndexParallel DruidIngestionMethod = "native-batch" 32 | QueryControllerSQL DruidIngestionMethod = "sql" 33 | HadoopIndexHadoop DruidIngestionMethod = "index-hadoop" 34 | ) 35 | 36 | type DruidIngestionSpec struct { 37 | // +optional 38 | Suspend bool `json:"suspend"` 39 | // +required 40 | DruidClusterName string `json:"druidCluster"` 41 | // +required 42 | Ingestion IngestionSpec `json:"ingestion"` 43 | // +optional 44 | Auth druidapi.Auth `json:"auth"` 45 | } 46 | 47 | type IngestionSpec struct { 48 | // +required 49 | Type DruidIngestionMethod `json:"type"` 50 | // +optional 51 | // Spec should be passed in as a JSON string. 52 | // Note: This field is planned for deprecation in favor of nativeSpec. 53 | Spec string `json:"spec,omitempty"` 54 | // +optional 55 | // nativeSpec allows the ingestion specification to be defined in a native Kubernetes format. 56 | // This is particularly useful for environment-specific configurations and will eventually 57 | // replace the JSON-based Spec field. 58 | // Note: Spec will be ignored if nativeSpec is provided. 59 | NativeSpec runtime.RawExtension `json:"nativeSpec,omitempty"` 60 | // +optional 61 | Compaction runtime.RawExtension `json:"compaction,omitempty"` 62 | // +optional 63 | Rules []runtime.RawExtension `json:"rules,omitempty"` 64 | } 65 | 66 | type DruidIngestionStatus struct { 67 | TaskId string `json:"taskId"` 68 | Type string `json:"type,omitempty"` 69 | Status v1.ConditionStatus `json:"status,omitempty"` 70 | Reason string `json:"reason,omitempty"` 71 | Message string `json:"message,omitempty"` 72 | LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` 73 | // CurrentIngestionSpec is a string instead of RawExtension to maintain compatibility with existing 74 | // IngestionSpecs that are stored as JSON strings. 75 | CurrentIngestionSpec string `json:"currentIngestionSpec.json"` 76 | CurrentRules []runtime.RawExtension `json:"rules,omitempty"` 77 | } 78 | 79 | // +kubebuilder:object:root=true 80 | // +kubebuilder:subresource:status 81 | // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" 82 | // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.ingestionSpec.type" 83 | // Ingestion is the Schema for the Ingestion API 84 | type DruidIngestion struct { 85 | metav1.TypeMeta `json:",inline"` 86 | metav1.ObjectMeta `json:"metadata,omitempty"` 87 | 88 | Spec DruidIngestionSpec `json:"spec"` 89 | Status DruidIngestionStatus `json:"status,omitempty"` 90 | } 91 | 92 | // +kubebuilder:object:root=true 93 | // IngestionList contains a list of Ingestion 94 | type DruidIngestionList struct { 95 | metav1.TypeMeta `json:",inline"` 96 | metav1.ListMeta `json:"metadata,omitempty"` 97 | Items []DruidIngestion `json:"items"` 98 | } 99 | 100 | func init() { 101 | SchemeBuilder.Register(&DruidIngestion{}, &DruidIngestionList{}) 102 | } 103 | -------------------------------------------------------------------------------- /apis/druid/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | */ 4 | 5 | // Package v1alpha1 contains API Schema definitions for the druid v1alpha1 API group 6 | // +kubebuilder:object:generate=true 7 | // +groupName=druid.apache.org 8 | package v1alpha1 9 | 10 | import ( 11 | "k8s.io/apimachinery/pkg/runtime/schema" 12 | "sigs.k8s.io/controller-runtime/pkg/scheme" 13 | ) 14 | 15 | var ( 16 | // GroupVersion is group version used to register these objects 17 | GroupVersion = schema.GroupVersion{Group: "druid.apache.org", Version: "v1alpha1"} 18 | 19 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 20 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 21 | 22 | // AddToScheme adds the types in this group-version to the given scheme. 23 | AddToScheme = SchemeBuilder.AddToScheme 24 | ) 25 | -------------------------------------------------------------------------------- /chart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: druid-operator 3 | description: Druid Kubernetes Operator 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.3.9 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | appVersion: v1.3.0 24 | # icon 25 | icon: "https://www.apache.org/logos/res/druid/druid-1.png" 26 | -------------------------------------------------------------------------------- /chart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Refer to https://github.com/datainfrahq/druid-operator/blob/master/docs/README.md to get started. 2 | -------------------------------------------------------------------------------- /chart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "druid-operator.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "druid-operator.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "druid-operator.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "druid-operator.labels" -}} 37 | helm.sh/chart: {{ include "druid-operator.chart" . }} 38 | {{ include "druid-operator.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "druid-operator.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "druid-operator.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /chart/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | {{- include "druid-operator.labels" . | nindent 4 }} 7 | app.kubernetes.io/component: manager 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | control-plane: controller-manager 11 | name: {{ include "druid-operator.fullname" . }} 12 | namespace: {{ .Release.Namespace }} 13 | spec: 14 | replicas: {{ .Values.replicaCount }} 15 | selector: 16 | matchLabels: 17 | {{- include "druid-operator.selectorLabels" . | nindent 6 }} 18 | control-plane: controller-manager 19 | template: 20 | metadata: 21 | {{- with .Values.podAnnotations }} 22 | annotations: 23 | {{- toYaml . | nindent 8 }} 24 | {{- end }} 25 | labels: 26 | {{- include "druid-operator.selectorLabels" . | nindent 8 }} 27 | {{- with .Values.podLabels }} 28 | {{ toYaml . | nindent 8 }} 29 | {{- end }} 30 | control-plane: controller-manager 31 | spec: 32 | {{- with .Values.nodeSelector }} 33 | nodeSelector: 34 | {{- toYaml . | nindent 8 }} 35 | {{- end }} 36 | {{- with .Values.affinity }} 37 | affinity: 38 | {{- toYaml . | nindent 8 }} 39 | {{- end }} 40 | {{- with .Values.tolerations }} 41 | tolerations: 42 | {{- toYaml . | nindent 8 }} 43 | {{- end }} 44 | {{- with .Values.imagePullSecrets }} 45 | imagePullSecrets: 46 | {{- toYaml . | nindent 8 }} 47 | {{- end }} 48 | containers: 49 | - args: 50 | - --secure-listen-address=0.0.0.0:8443 51 | - --upstream=http://127.0.0.1:8080/ 52 | - --logtostderr=true 53 | - --v=0 54 | image: "{{ .Values.kube_rbac_proxy.image.repository }}:{{ .Values.kube_rbac_proxy.image.tag }}" 55 | imagePullPolicy: {{ .Values.kube_rbac_proxy.image.pullPolicy }} 56 | name: kube-rbac-proxy 57 | ports: 58 | - containerPort: 8443 59 | name: https 60 | protocol: TCP 61 | resources: 62 | limits: 63 | cpu: 500m 64 | memory: 128Mi 65 | requests: 66 | cpu: 5m 67 | memory: 64Mi 68 | securityContext: 69 | allowPrivilegeEscalation: false 70 | capabilities: 71 | drop: 72 | - ALL 73 | - args: 74 | - --health-probe-bind-address=:8081 75 | - --metrics-bind-address=127.0.0.1:8080 76 | - --leader-elect 77 | env: 78 | {{- range $key, $value := .Values.env }} 79 | - name: {{ $key }} 80 | value: {{ tpl $value $ | quote }} 81 | {{- end }} 82 | - name: POD_NAME 83 | valueFrom: 84 | fieldRef: 85 | fieldPath: metadata.name 86 | command: 87 | - /manager 88 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 89 | imagePullPolicy: {{ .Values.image.pullPolicy }} 90 | {{- with .Values.livenessProbe }} 91 | livenessProbe: 92 | {{ toYaml . | nindent 12 }} 93 | {{- end }} 94 | name: manager 95 | {{- with .Values.readinessProbe }} 96 | readinessProbe: 97 | {{ toYaml . | nindent 12 }} 98 | {{- end }} 99 | resources: 100 | {{- toYaml .Values.resources | nindent 12 }} 101 | securityContext: 102 | {{- toYaml .Values.securityContext | nindent 12 }} 103 | securityContext: 104 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 105 | serviceAccountName: {{ include "druid-operator.fullname" . }} 106 | terminationGracePeriodSeconds: 10 107 | -------------------------------------------------------------------------------- /chart/templates/rbac_leader_election.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | labels: 6 | {{- include "druid-operator.labels" . | nindent 4 }} 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | name: {{ include "druid-operator.fullname" . }}-leader-election-role 11 | namespace: {{ .Release.Namespace }} 12 | rules: 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - configmaps 17 | verbs: 18 | - get 19 | - list 20 | - watch 21 | - create 22 | - update 23 | - patch 24 | - delete 25 | - apiGroups: 26 | - coordination.k8s.io 27 | resources: 28 | - leases 29 | verbs: 30 | - get 31 | - list 32 | - watch 33 | - create 34 | - update 35 | - patch 36 | - delete 37 | - apiGroups: 38 | - "" 39 | resources: 40 | - events 41 | verbs: 42 | - create 43 | - patch 44 | --- 45 | apiVersion: rbac.authorization.k8s.io/v1 46 | kind: RoleBinding 47 | metadata: 48 | labels: 49 | {{- include "druid-operator.labels" . | nindent 4 }} 50 | app.kubernetes.io/component: rbac 51 | app.kubernetes.io/created-by: druid-operator 52 | app.kubernetes.io/part-of: druid-operator 53 | name: {{ include "druid-operator.fullname" . }}-leader-election-rolebinding 54 | namespace: {{ .Release.Namespace }} 55 | roleRef: 56 | apiGroup: rbac.authorization.k8s.io 57 | kind: Role 58 | name: {{ include "druid-operator.fullname" . }}-leader-election-role 59 | subjects: 60 | - kind: ServiceAccount 61 | name: {{ include "druid-operator.fullname" . }} 62 | namespace: {{ .Release.Namespace }} -------------------------------------------------------------------------------- /chart/templates/rbac_metrics.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | {{- if .Values.global.createClusterRole }} 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | labels: 7 | {{- include "druid-operator.labels" . | nindent 4 }} 8 | app.kubernetes.io/component: kube-rbac-proxy 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | name: {{ include "druid-operator.fullname" . }}-metrics-reader 12 | rules: 13 | - nonResourceURLs: 14 | - /metrics 15 | verbs: 16 | - get 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /chart/templates/rbac_proxy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | {{- if .Values.global.createClusterRole }} 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | labels: 7 | {{- include "druid-operator.labels" . | nindent 4 }} 8 | app.kubernetes.io/component: kube-rbac-proxy 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | name: {{ include "druid-operator.fullname" . }}-proxy-role 12 | rules: 13 | - apiGroups: 14 | - authentication.k8s.io 15 | resources: 16 | - tokenreviews 17 | verbs: 18 | - create 19 | - apiGroups: 20 | - authorization.k8s.io 21 | resources: 22 | - subjectaccessreviews 23 | verbs: 24 | - create 25 | --- 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | kind: ClusterRoleBinding 28 | metadata: 29 | labels: 30 | {{- include "druid-operator.labels" . | nindent 4 }} 31 | app.kubernetes.io/component: kube-rbac-proxy 32 | app.kubernetes.io/created-by: druid-operator 33 | app.kubernetes.io/part-of: druid-operator 34 | name: {{ include "druid-operator.fullname" . }}-proxy-rolebinding 35 | roleRef: 36 | apiGroup: rbac.authorization.k8s.io 37 | kind: ClusterRole 38 | name: {{ include "druid-operator.fullname" . }}-proxy-role 39 | subjects: 40 | - kind: ServiceAccount 41 | name: {{ include "druid-operator.fullname" . }} 42 | namespace: {{ .Release.Namespace }} 43 | {{- end }} 44 | -------------------------------------------------------------------------------- /chart/templates/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | labels: 6 | {{- include "druid-operator.labels" . | nindent 4 }} 7 | app.kubernetes.io/component: manager 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | control-plane: controller-manager 11 | name: {{ include "druid-operator.fullname" . }}-metrics-service 12 | namespace: {{ .Release.Namespace }} 13 | spec: 14 | ports: 15 | - name: https 16 | port: 8443 17 | protocol: TCP 18 | targetPort: https 19 | selector: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /chart/templates/service_account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | {{- include "druid-operator.labels" . | nindent 4 }} 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | name: {{ include "druid-operator.fullname" . }} 11 | namespace: {{ .Release.Namespace }} 12 | {{- with .Values.serviceAccount.annotations }} 13 | annotations: 14 | {{- toYaml . | nindent 4 }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /chart/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for druid-operator. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | global: 6 | createClusterRole: true 7 | 8 | env: 9 | DENY_LIST: "default,kube-system" # Comma-separated list of namespaces to ignore 10 | RECONCILE_WAIT: "10s" # Reconciliation delay 11 | WATCH_NAMESPACE: "" # Namespace to watch or empty string to watch all namespaces, To watch multiple namespaces add , into string. Ex: WATCH_NAMESPACE: "ns1,ns2,ns3" 12 | #MAX_CONCURRENT_RECONCILES:: "" # MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. 13 | 14 | replicaCount: 1 15 | 16 | image: 17 | repository: datainfrahq/druid-operator 18 | pullPolicy: IfNotPresent 19 | # Overrides the image tag whose default is the chart appVersion. 20 | tag: "" 21 | 22 | kube_rbac_proxy: 23 | image: 24 | repository: gcr.io/kubebuilder/kube-rbac-proxy 25 | pullPolicy: IfNotPresent 26 | tag: "v0.13.1" 27 | 28 | imagePullSecrets: [] 29 | nameOverride: "" 30 | fullnameOverride: "" 31 | 32 | livenessProbe: 33 | httpGet: 34 | path: /healthz 35 | port: 8081 36 | initialDelaySeconds: 15 37 | periodSeconds: 20 38 | 39 | readinessProbe: 40 | httpGet: 41 | path: /readyz 42 | port: 8081 43 | initialDelaySeconds: 5 44 | periodSeconds: 10 45 | 46 | serviceAccount: 47 | # Annotations to add to the service account 48 | annotations: 49 | kubectl.kubernetes.io/default-container: manager 50 | # The name of the service account to use. 51 | # If not set and create is true, a name is generated using the fullname template 52 | name: "druid-operator-controller-manager" 53 | 54 | podAnnotations: {} 55 | 56 | podLabels: {} 57 | 58 | podSecurityContext: 59 | runAsNonRoot: true 60 | fsGroup: 65532 61 | runAsUser: 65532 62 | runAsGroup: 65532 63 | 64 | securityContext: 65 | allowPrivilegeEscalation: false 66 | capabilities: 67 | drop: 68 | - ALL 69 | 70 | resources: 71 | limits: 72 | cpu: 500m 73 | memory: 128Mi 74 | requests: 75 | cpu: 10m 76 | memory: 64Mi 77 | 78 | nodeSelector: {} 79 | 80 | tolerations: [] 81 | 82 | affinity: 83 | nodeAffinity: 84 | requiredDuringSchedulingIgnoredDuringExecution: 85 | nodeSelectorTerms: 86 | - matchExpressions: 87 | - key: kubernetes.io/arch 88 | operator: In 89 | values: 90 | - amd64 91 | - arm64 92 | - ppc64le 93 | - s390x 94 | - key: kubernetes.io/os 95 | operator: In 96 | values: 97 | - linux 98 | 99 | crd: 100 | enabled: true 101 | keep: true 102 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/druid.apache.org_druids.yaml 6 | - bases/druid.apache.org_druidingestions.yaml 7 | #+kubebuilder:scaffold:crdkustomizeresource 8 | 9 | patchesStrategicMerge: 10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 11 | # patches here are for enabling the conversion webhook for each CRD 12 | #- patches/webhook_in_druids.yaml 13 | #- patches/webhook_in_druidingestions.yaml 14 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 15 | 16 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 17 | # patches here are for enabling the CA injection for each CRD 18 | #- patches/cainjection_in_druids.yaml 19 | #- patches/cainjection_in_druidingestions.yaml 20 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 21 | 22 | # the following config is for teaching kustomize how to do kustomization for CRDs. 23 | configurations: 24 | - kustomizeconfig.yaml 25 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_druid_druidingestions.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: druidingestions.druid.apache.org 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_druids.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: druids.druid.apache.org 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_druid_druidingestions.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: druidingestions.druid.apache.org 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_druids.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: druids.druid.apache.org 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: druid-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: druid-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | #- ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | #- ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | #- ../prometheus 26 | 27 | patchesStrategicMerge: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | - manager_auth_proxy_patch.yaml 32 | 33 | 34 | 35 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 36 | # crd/kustomization.yaml 37 | #- manager_webhook_patch.yaml 38 | 39 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 40 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 41 | # 'CERTMANAGER' needs to be enabled to use ca injection 42 | #- webhookcainjection_patch.yaml 43 | 44 | # the following config is for teaching kustomize how to do var substitution 45 | vars: 46 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 47 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 48 | # objref: 49 | # kind: Certificate 50 | # group: cert-manager.io 51 | # version: v1 52 | # name: serving-cert # this name should match the one in certificate.yaml 53 | # fieldref: 54 | # fieldpath: metadata.namespace 55 | #- name: CERTIFICATE_NAME 56 | # objref: 57 | # kind: Certificate 58 | # group: cert-manager.io 59 | # version: v1 60 | # name: serving-cert # this name should match the one in certificate.yaml 61 | #- name: SERVICE_NAMESPACE # namespace of the service 62 | # objref: 63 | # kind: Service 64 | # version: v1 65 | # name: webhook-service 66 | # fieldref: 67 | # fieldpath: metadata.namespace 68 | #- name: SERVICE_NAME 69 | # objref: 70 | # kind: Service 71 | # version: v1 72 | # name: webhook-service 73 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | affinity: 12 | nodeAffinity: 13 | requiredDuringSchedulingIgnoredDuringExecution: 14 | nodeSelectorTerms: 15 | - matchExpressions: 16 | - key: kubernetes.io/arch 17 | operator: In 18 | values: 19 | - amd64 20 | - arm64 21 | - ppc64le 22 | - s390x 23 | - key: kubernetes.io/os 24 | operator: In 25 | values: 26 | - linux 27 | containers: 28 | - name: kube-rbac-proxy 29 | securityContext: 30 | allowPrivilegeEscalation: false 31 | capabilities: 32 | drop: 33 | - "ALL" 34 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 35 | args: 36 | - "--secure-listen-address=0.0.0.0:8443" 37 | - "--upstream=http://127.0.0.1:8080/" 38 | - "--logtostderr=true" 39 | - "--v=0" 40 | ports: 41 | - containerPort: 8443 42 | protocol: TCP 43 | name: https 44 | resources: 45 | limits: 46 | cpu: 500m 47 | memory: 128Mi 48 | requests: 49 | cpu: 5m 50 | memory: 64Mi 51 | - name: manager 52 | args: 53 | - "--health-probe-bind-address=:8081" 54 | - "--metrics-bind-address=127.0.0.1:8080" 55 | - "--leader-elect" 56 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | images: 6 | - name: controller 7 | newName: datainfrahq/druid-operator 8 | newTag: latest 9 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: namespace 7 | app.kubernetes.io/instance: system 8 | app.kubernetes.io/component: manager 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: system 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: controller-manager 18 | namespace: system 19 | labels: 20 | control-plane: controller-manager 21 | app.kubernetes.io/name: deployment 22 | app.kubernetes.io/instance: controller-manager 23 | app.kubernetes.io/component: manager 24 | app.kubernetes.io/created-by: druid-operator 25 | app.kubernetes.io/part-of: druid-operator 26 | app.kubernetes.io/managed-by: kustomize 27 | spec: 28 | selector: 29 | matchLabels: 30 | control-plane: controller-manager 31 | replicas: 1 32 | template: 33 | metadata: 34 | annotations: 35 | kubectl.kubernetes.io/default-container: manager 36 | labels: 37 | control-plane: controller-manager 38 | spec: 39 | # TODO(user): Uncomment the following code to configure the nodeAffinity expression 40 | # according to the platforms which are supported by your solution. 41 | # It is considered best practice to support multiple architectures. You can 42 | # build your manager image using the makefile target docker-buildx. 43 | # affinity: 44 | # nodeAffinity: 45 | # requiredDuringSchedulingIgnoredDuringExecution: 46 | # nodeSelectorTerms: 47 | # - matchExpressions: 48 | # - key: kubernetes.io/arch 49 | # operator: In 50 | # values: 51 | # - amd64 52 | # - arm64 53 | # - ppc64le 54 | # - s390x 55 | # - key: kubernetes.io/os 56 | # operator: In 57 | # values: 58 | # - linux 59 | securityContext: 60 | runAsNonRoot: true 61 | # TODO(user): For common cases that do not require escalating privileges 62 | # it is recommended to ensure that all your Pods/Containers are restrictive. 63 | # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted 64 | # Please uncomment the following code if your project does NOT have to work on old Kubernetes 65 | # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). 66 | # seccompProfile: 67 | # type: RuntimeDefault 68 | containers: 69 | - command: 70 | - /manager 71 | args: 72 | - --leader-elect 73 | image: controller:latest 74 | name: manager 75 | securityContext: 76 | allowPrivilegeEscalation: false 77 | capabilities: 78 | drop: 79 | - "ALL" 80 | livenessProbe: 81 | httpGet: 82 | path: /healthz 83 | port: 8081 84 | initialDelaySeconds: 15 85 | periodSeconds: 20 86 | readinessProbe: 87 | httpGet: 88 | path: /readyz 89 | port: 8081 90 | initialDelaySeconds: 5 91 | periodSeconds: 10 92 | # TODO(user): Configure the resources accordingly based on the project requirements. 93 | # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 94 | resources: 95 | limits: 96 | cpu: 500m 97 | memory: 128Mi 98 | requests: 99 | cpu: 10m 100 | memory: 64Mi 101 | serviceAccountName: controller-manager 102 | terminationGracePeriodSeconds: 10 103 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | app.kubernetes.io/name: servicemonitor 9 | app.kubernetes.io/instance: controller-manager-metrics-monitor 10 | app.kubernetes.io/component: metrics 11 | app.kubernetes.io/created-by: druid-operator 12 | app.kubernetes.io/part-of: druid-operator 13 | app.kubernetes.io/managed-by: kustomize 14 | name: controller-manager-metrics-monitor 15 | namespace: system 16 | spec: 17 | endpoints: 18 | - path: /metrics 19 | port: https 20 | scheme: https 21 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | selector: 25 | matchLabels: 26 | control-plane: controller-manager 27 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: metrics-reader 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: metrics-reader 12 | rules: 13 | - nonResourceURLs: 14 | - "/metrics" 15 | verbs: 16 | - get 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: proxy-role 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-role 12 | rules: 13 | - apiGroups: 14 | - authentication.k8s.io 15 | resources: 16 | - tokenreviews 17 | verbs: 18 | - create 19 | - apiGroups: 20 | - authorization.k8s.io 21 | resources: 22 | - subjectaccessreviews 23 | verbs: 24 | - create 25 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: proxy-rolebinding 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: proxy-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: service 7 | app.kubernetes.io/instance: controller-manager-metrics-service 8 | app.kubernetes.io/component: kube-rbac-proxy 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: controller-manager-metrics-service 13 | namespace: system 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | protocol: TCP 19 | targetPort: https 20 | selector: 21 | control-plane: controller-manager 22 | -------------------------------------------------------------------------------- /config/rbac/druid_druidingestion_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit druidingestions. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: druidingestion-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: druidingestion-editor-role 13 | rules: 14 | - apiGroups: 15 | - druid.apache.org 16 | resources: 17 | - druidingestions 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - druid.apache.org 28 | resources: 29 | - druidingestions/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/druid_druidingestion_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view druidingestions. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: druidingestion-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: druidingestion-viewer-role 13 | rules: 14 | - apiGroups: 15 | - druid.apache.org 16 | resources: 17 | - druidingestions 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - druid.apache.org 24 | resources: 25 | - druidingestions/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/druid_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit druids. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: druid-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: druid-editor-role 13 | rules: 14 | - apiGroups: 15 | - druid.apache.org 16 | resources: 17 | - druids 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - druid.apache.org 28 | resources: 29 | - druids/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/druid_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view druids. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: druid-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: druid-viewer-role 13 | rules: 14 | - apiGroups: 15 | - druid.apache.org 16 | resources: 17 | - druids 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - druid.apache.org 24 | resources: 25 | - druids/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: role 7 | app.kubernetes.io/instance: leader-election-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: druid-operator 10 | app.kubernetes.io/part-of: druid-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: leader-election-role 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - configmaps 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - create 23 | - update 24 | - patch 25 | - delete 26 | - apiGroups: 27 | - coordination.k8s.io 28 | resources: 29 | - leases 30 | verbs: 31 | - get 32 | - list 33 | - watch 34 | - create 35 | - update 36 | - patch 37 | - delete 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - events 42 | verbs: 43 | - create 44 | - patch 45 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: rolebinding 6 | app.kubernetes.io/instance: leader-election-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: leader-election-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: leader-election-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - events 23 | verbs: 24 | - create 25 | - get 26 | - list 27 | - patch 28 | - watch 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - persistentvolumeclaims 33 | verbs: 34 | - create 35 | - delete 36 | - get 37 | - list 38 | - patch 39 | - update 40 | - watch 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - pods 45 | verbs: 46 | - create 47 | - delete 48 | - get 49 | - list 50 | - patch 51 | - update 52 | - watch 53 | - apiGroups: 54 | - "" 55 | resources: 56 | - services 57 | verbs: 58 | - create 59 | - delete 60 | - get 61 | - list 62 | - patch 63 | - update 64 | - watch 65 | - apiGroups: 66 | - apps 67 | resources: 68 | - deployments 69 | verbs: 70 | - create 71 | - delete 72 | - get 73 | - list 74 | - patch 75 | - update 76 | - watch 77 | - apiGroups: 78 | - apps 79 | resources: 80 | - statefulsets 81 | verbs: 82 | - create 83 | - delete 84 | - get 85 | - list 86 | - patch 87 | - update 88 | - watch 89 | - apiGroups: 90 | - autoscaling 91 | resources: 92 | - horizontalpodautoscalers 93 | verbs: 94 | - create 95 | - delete 96 | - get 97 | - list 98 | - patch 99 | - update 100 | - watch 101 | - apiGroups: 102 | - druid.apache.org 103 | resources: 104 | - druids 105 | verbs: 106 | - create 107 | - delete 108 | - get 109 | - list 110 | - patch 111 | - update 112 | - watch 113 | - apiGroups: 114 | - druid.apache.org 115 | resources: 116 | - druids/status 117 | verbs: 118 | - get 119 | - patch 120 | - update 121 | - apiGroups: 122 | - networking.k8s.io 123 | resources: 124 | - ingresses 125 | verbs: 126 | - create 127 | - delete 128 | - get 129 | - list 130 | - patch 131 | - update 132 | - watch 133 | - apiGroups: 134 | - policy 135 | resources: 136 | - poddisruptionbudgets 137 | verbs: 138 | - create 139 | - delete 140 | - get 141 | - list 142 | - patch 143 | - update 144 | - watch 145 | - apiGroups: 146 | - storage.k8s.io 147 | resources: 148 | - storageclasses 149 | verbs: 150 | - get 151 | - list 152 | - watch 153 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: manager-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: manager-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: manager-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: serviceaccount 6 | app.kubernetes.io/instance: controller-manager 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: druid-operator 9 | app.kubernetes.io/part-of: druid-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/samples/druid_v1alpha1_druid.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: Druid 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: druid 6 | app.kubernetes.io/instance: druid-sample 7 | app.kubernetes.io/part-of: druid-operator 8 | app.kubernetes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: druid-operator 10 | name: druid-sample 11 | spec: 12 | # TODO(user): Add fields here 13 | -------------------------------------------------------------------------------- /config/samples/druid_v1alpha1_druidingestion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: DruidIngestion 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: druidingestion 6 | app.kubernetes.io/instance: druidingestion-sample 7 | name: wikipedia-10 8 | spec: 9 | suspend: false 10 | druidCluster: tiny-cluster 11 | ingestion: 12 | type: native-batch 13 | spec: |- 14 | { 15 | "type" : "index_parallel", 16 | "spec" : { 17 | "dataSchema" : { 18 | "dataSource" : "wikipedia-10", 19 | "dimensionsSpec" : { 20 | "dimensions" : [ 21 | "channel", 22 | "cityName", 23 | "comment", 24 | "countryIsoCode", 25 | "countryName", 26 | "isAnonymous", 27 | "isMinor", 28 | "isNew", 29 | "isRobot", 30 | "isUnpatrolled", 31 | "metroCode", 32 | "namespace", 33 | "page", 34 | "regionIsoCode", 35 | "regionName", 36 | "user", 37 | { "name": "added", "type": "long" }, 38 | { "name": "deleted", "type": "long" }, 39 | { "name": "delta", "type": "long" } 40 | ] 41 | }, 42 | "timestampSpec": { 43 | "column": "time", 44 | "format": "iso" 45 | }, 46 | "metricsSpec" : [], 47 | "granularitySpec" : { 48 | "type" : "uniform", 49 | "segmentGranularity" : "day", 50 | "queryGranularity" : "none", 51 | "intervals" : ["2015-09-12/2015-09-13"], 52 | "rollup" : false 53 | } 54 | }, 55 | "ioConfig" : { 56 | "type" : "index_parallel", 57 | "inputSource" : { 58 | "type" : "local", 59 | "baseDir" : "quickstart/tutorial/", 60 | "filter" : "wikiticker-2015-09-12-sampled.json.gz" 61 | }, 62 | "inputFormat" : { 63 | "type": "json" 64 | }, 65 | "appendToExisting" : false 66 | }, 67 | "tuningConfig" : { 68 | "type" : "index_parallel", 69 | "partitionsSpec": { 70 | "type": "dynamic" 71 | }, 72 | "maxRowsInMemory" : 25000 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /controllers/druid/additional_containers.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 7 | v1 "k8s.io/api/core/v1" 8 | ) 9 | 10 | func addAdditionalContainers(m *v1alpha1.Druid, nodeSpec *v1alpha1.DruidNodeSpec, podSpec *v1.PodSpec) { 11 | allAdditional := getAllAdditionalContainers(m, nodeSpec) 12 | 13 | for _, additional := range allAdditional { 14 | container := convertAdditionalContainer(&additional) 15 | 16 | if additional.RunAsInit { 17 | podSpec.InitContainers = append(podSpec.InitContainers, container) 18 | } else { 19 | podSpec.Containers = append(podSpec.Containers, container) 20 | } 21 | } 22 | } 23 | 24 | func getAllAdditionalContainers(m *v1alpha1.Druid, nodeSpec *v1alpha1.DruidNodeSpec) []v1alpha1.AdditionalContainer { 25 | var allAdditional []v1alpha1.AdditionalContainer 26 | if m.Spec.AdditionalContainer != nil { 27 | allAdditional = append(allAdditional, m.Spec.AdditionalContainer...) 28 | } 29 | if nodeSpec.AdditionalContainer != nil { 30 | allAdditional = append(allAdditional, nodeSpec.AdditionalContainer...) 31 | } 32 | return allAdditional 33 | } 34 | 35 | func convertAdditionalContainer(additional *v1alpha1.AdditionalContainer) v1.Container { 36 | return v1.Container{ 37 | Image: additional.Image, 38 | Name: additional.ContainerName, 39 | Resources: additional.Resources, 40 | VolumeMounts: additional.VolumeMounts, 41 | Command: additional.Command, 42 | Args: additional.Args, 43 | ImagePullPolicy: additional.ImagePullPolicy, 44 | SecurityContext: additional.ContainerSecurityContext, 45 | Env: additional.Env, 46 | EnvFrom: additional.EnvFrom, 47 | } 48 | } 49 | 50 | func validateAdditionalContainersSpec(drd *v1alpha1.Druid) error { 51 | for _, nodeSpec := range drd.Spec.Nodes { 52 | if err := validateNodeAdditionalContainersSpec(drd, &nodeSpec); err != nil { 53 | return err 54 | } 55 | } 56 | return nil 57 | } 58 | 59 | func validateNodeAdditionalContainersSpec(drd *v1alpha1.Druid, nodeSpec *v1alpha1.DruidNodeSpec) error { 60 | allAdditional := getAllAdditionalContainers(drd, nodeSpec) 61 | var containerNames []string 62 | for _, container := range allAdditional { 63 | containerNames = append(containerNames, container.ContainerName) 64 | } 65 | if duplicate, containerName := hasDuplicateString(containerNames); duplicate { 66 | return fmt.Errorf("node group %s has duplicate container name: %s", 67 | nodeSpec.NodeType, containerName) 68 | } 69 | return nil 70 | } 71 | -------------------------------------------------------------------------------- /controllers/druid/additional_containers_test.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | v1 "k8s.io/api/apps/v1" 11 | "k8s.io/apimachinery/pkg/types" 12 | ) 13 | 14 | // +kubebuilder:docs-gen:collapse=Imports 15 | 16 | var _ = Describe("Test Additional Containers", func() { 17 | const ( 18 | timeout = time.Second * 45 19 | interval = time.Millisecond * 250 20 | ) 21 | 22 | Context("When adding cluster-level additional containers", func() { 23 | It("Should add the containers to the pod", func() { 24 | By("By creating a Druid object") 25 | filePath := "testdata/additional-containers.yaml" 26 | druid, err := readDruidClusterSpecFromFile(filePath) 27 | Expect(err).Should(BeNil()) 28 | 29 | Expect(k8sClient.Create(ctx, druid)).To(Succeed()) 30 | 31 | existDruid := &v1alpha1.Druid{} 32 | Eventually(func() bool { 33 | err := k8sClient.Get(ctx, types.NamespacedName{Name: druid.Name, Namespace: druid.Namespace}, existDruid) 34 | return err == nil 35 | }, timeout, interval).Should(BeTrue()) 36 | 37 | brokerDeployment := &v1.Deployment{} 38 | Eventually(func() bool { 39 | err := k8sClient.Get(ctx, types.NamespacedName{ 40 | Namespace: druid.Namespace, 41 | Name: fmt.Sprintf("druid-%s-%s", druid.Name, "brokers"), 42 | }, brokerDeployment) 43 | return err == nil 44 | }, timeout, interval).Should(BeTrue()) 45 | 46 | Expect(brokerDeployment.Spec.Template.Spec.Containers).ShouldNot(BeNil()) 47 | 48 | isClusterContainerExists := false 49 | isNodeContainerExists := false 50 | for _, container := range brokerDeployment.Spec.Template.Spec.Containers { 51 | if container.Name == "cluster-level" { 52 | isClusterContainerExists = true 53 | continue 54 | } 55 | if container.Name == "node-level" { 56 | isNodeContainerExists = true 57 | continue 58 | } 59 | } 60 | 61 | Expect(isClusterContainerExists).Should(BeTrue()) 62 | Expect(isNodeContainerExists).Should(BeTrue()) 63 | }) 64 | }) 65 | }) 66 | -------------------------------------------------------------------------------- /controllers/druid/configuration.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 8 | v1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/types" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | func makeConfigMap(name string, namespace string, labels map[string]string, data map[string]string) (*v1.ConfigMap, error) { 15 | return &v1.ConfigMap{ 16 | TypeMeta: metav1.TypeMeta{ 17 | APIVersion: "v1", 18 | Kind: "ConfigMap", 19 | }, 20 | ObjectMeta: metav1.ObjectMeta{ 21 | Name: name, 22 | Namespace: namespace, 23 | Labels: labels, 24 | }, 25 | Data: data, 26 | }, nil 27 | } 28 | 29 | func makeCommonConfigMap(ctx context.Context, sdk client.Client, m *v1alpha1.Druid, ls map[string]string) (*v1.ConfigMap, error) { 30 | prop := m.Spec.CommonRuntimeProperties 31 | 32 | if m.Spec.Zookeeper != nil { 33 | if zm, err := createZookeeperManager(m.Spec.Zookeeper); err != nil { 34 | return nil, err 35 | } else { 36 | prop = prop + "\n" + zm.Configuration() + "\n" 37 | } 38 | } 39 | 40 | if m.Spec.MetadataStore != nil { 41 | if msm, err := createMetadataStoreManager(m.Spec.MetadataStore); err != nil { 42 | return nil, err 43 | } else { 44 | prop = prop + "\n" + msm.Configuration() + "\n" 45 | } 46 | } 47 | 48 | if m.Spec.DeepStorage != nil { 49 | if dsm, err := createDeepStorageManager(m.Spec.DeepStorage); err != nil { 50 | return nil, err 51 | } else { 52 | prop = prop + "\n" + dsm.Configuration() + "\n" 53 | } 54 | } 55 | 56 | data := map[string]string{ 57 | "common.runtime.properties": prop, 58 | } 59 | 60 | if m.Spec.DimensionsMapPath != "" { 61 | data["metricDimensions.json"] = m.Spec.DimensionsMapPath 62 | } 63 | if m.Spec.HdfsSite != "" { 64 | data["hdfs-site.xml"] = m.Spec.HdfsSite 65 | } 66 | if m.Spec.CoreSite != "" { 67 | data["core-site.xml"] = m.Spec.CoreSite 68 | } 69 | 70 | if err := addExtraCommonConfig(ctx, sdk, m, data); err != nil { 71 | return nil, err 72 | } 73 | 74 | cfg, err := makeConfigMap( 75 | fmt.Sprintf("%s-druid-common-config", m.ObjectMeta.Name), 76 | m.Namespace, 77 | ls, 78 | data) 79 | return cfg, err 80 | } 81 | 82 | func addExtraCommonConfig(ctx context.Context, sdk client.Client, m *v1alpha1.Druid, data map[string]string) error { 83 | if m.Spec.ExtraCommonConfig == nil { 84 | return nil 85 | } 86 | 87 | for _, cmRef := range m.Spec.ExtraCommonConfig { 88 | cm := &v1.ConfigMap{} 89 | if err := sdk.Get(ctx, types.NamespacedName{ 90 | Name: cmRef.Name, 91 | Namespace: cmRef.Namespace}, cm); err != nil { 92 | // If a configMap is not found - output error and keep reconciliation 93 | continue 94 | } 95 | 96 | for fileName, fileContent := range cm.Data { 97 | data[fileName] = fileContent 98 | } 99 | } 100 | 101 | return nil 102 | } 103 | 104 | func makeConfigMapForNodeSpec(nodeSpec *v1alpha1.DruidNodeSpec, m *v1alpha1.Druid, lm map[string]string, nodeSpecUniqueStr string) (*v1.ConfigMap, error) { 105 | 106 | data := map[string]string{ 107 | "runtime.properties": fmt.Sprintf("druid.port=%d\n%s", nodeSpec.DruidPort, nodeSpec.RuntimeProperties), 108 | "jvm.config": fmt.Sprintf("%s\n%s", firstNonEmptyStr(nodeSpec.JvmOptions, m.Spec.JvmOptions), nodeSpec.ExtraJvmOptions), 109 | } 110 | log4jconfig := firstNonEmptyStr(nodeSpec.Log4jConfig, m.Spec.Log4jConfig) 111 | if log4jconfig != "" { 112 | data["log4j2.xml"] = log4jconfig 113 | } 114 | 115 | return makeConfigMap( 116 | fmt.Sprintf("%s-config", nodeSpecUniqueStr), 117 | m.Namespace, 118 | lm, 119 | data) 120 | } 121 | 122 | func getNodeConfigMountPath(nodeSpec *v1alpha1.DruidNodeSpec) string { 123 | return fmt.Sprintf("/druid/conf/druid/%s", nodeSpec.NodeType) 124 | } 125 | -------------------------------------------------------------------------------- /controllers/druid/deep_storage_dep_mgmt.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "reflect" 7 | 8 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 9 | "github.com/datainfrahq/druid-operator/controllers/druid/ext" 10 | ) 11 | 12 | var deepStorageExtTypes = map[string]reflect.Type{} 13 | 14 | func init() { 15 | deepStorageExtTypes["default"] = reflect.TypeOf(ext.DefaultDeepStorageManager{}) 16 | } 17 | 18 | // We might have to add more methods to this interface to enable extensions that completely manage 19 | // deploy, upgrade and termination of deep storage. 20 | type deepStorageManager interface { 21 | Configuration() string 22 | } 23 | 24 | func createDeepStorageManager(spec *v1alpha1.DeepStorageSpec) (deepStorageManager, error) { 25 | if t, ok := deepStorageExtTypes[spec.Type]; ok { 26 | v := reflect.New(t).Interface() 27 | if err := json.Unmarshal(spec.Spec, v); err != nil { 28 | return nil, fmt.Errorf("Couldn't unmarshall deepStorage type[%s]. Error[%s].", spec.Type, err.Error()) 29 | } else { 30 | return v.(deepStorageManager), nil 31 | } 32 | } else { 33 | return nil, fmt.Errorf("Can't find type[%s] for DeepStorage Mgmt.", spec.Type) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /controllers/druid/druid_controller.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "time" 7 | 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/client-go/tools/record" 10 | 11 | "github.com/go-logr/logr" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/controller-runtime/pkg/controller" 16 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 17 | 18 | druidv1alpha1 "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 19 | ) 20 | 21 | // DruidReconciler reconciles a Druid object 22 | type DruidReconciler struct { 23 | client.Client 24 | Log logr.Logger 25 | Scheme *runtime.Scheme 26 | // reconcile time duration, defaults to 10s 27 | ReconcileWait time.Duration 28 | Recorder record.EventRecorder 29 | } 30 | 31 | func NewDruidReconciler(mgr ctrl.Manager) *DruidReconciler { 32 | return &DruidReconciler{ 33 | Client: mgr.GetClient(), 34 | Log: ctrl.Log.WithName("controllers").WithName("Druid"), 35 | Scheme: mgr.GetScheme(), 36 | ReconcileWait: LookupReconcileTime(), 37 | Recorder: mgr.GetEventRecorderFor("druid-operator"), 38 | } 39 | } 40 | 41 | // +kubebuilder:rbac:groups=druid.apache.org,resources=druids,verbs=get;list;watch;create;update;patch;delete 42 | // +kubebuilder:rbac:groups=druid.apache.org,resources=druids/status,verbs=get;update;patch 43 | // +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete 44 | // +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete 45 | // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete 46 | // +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete 47 | // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;patch 48 | // +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete 49 | // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete 50 | // +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete 51 | // +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete 52 | // +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete 53 | // +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch 54 | 55 | func (r *DruidReconciler) Reconcile(ctx context.Context, request reconcile.Request) (ctrl.Result, error) { 56 | _ = r.Log.WithValues("druid", request.NamespacedName) 57 | 58 | // Fetch the Druid instance 59 | instance := &druidv1alpha1.Druid{} 60 | err := r.Get(ctx, request.NamespacedName, instance) 61 | if err != nil { 62 | if errors.IsNotFound(err) { 63 | // Request object not found, could have been deleted after reconcile request. 64 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 65 | // Return and don't requeue 66 | return ctrl.Result{}, nil 67 | } 68 | // Error reading the object - requeue the request. 69 | return ctrl.Result{}, err 70 | } 71 | 72 | // Initialize Emit Events 73 | var emitEvent EventEmitter = EmitEventFuncs{r.Recorder} 74 | 75 | // Deploy Druid Cluster 76 | if err := deployDruidCluster(ctx, r.Client, instance, emitEvent); err != nil { 77 | return ctrl.Result{}, err 78 | } 79 | 80 | // Update Druid Dynamic Configs 81 | if err := updateDruidDynamicConfigs(ctx, r.Client, instance, emitEvent); err != nil { 82 | return ctrl.Result{}, err 83 | } 84 | 85 | // If both operations succeed, requeue after specified wait time 86 | return ctrl.Result{RequeueAfter: r.ReconcileWait}, nil 87 | } 88 | 89 | func (r *DruidReconciler) SetupWithManager(mgr ctrl.Manager) error { 90 | return ctrl.NewControllerManagedBy(mgr). 91 | For(&druidv1alpha1.Druid{}). 92 | WithEventFilter(GenericPredicates{}). 93 | WithOptions(controller.Options{ 94 | MaxConcurrentReconciles: getMaxConcurrentReconciles(), 95 | }). 96 | Complete(r) 97 | } 98 | 99 | func LookupReconcileTime() time.Duration { 100 | val, exists := os.LookupEnv("RECONCILE_WAIT") 101 | if !exists { 102 | return time.Second * 10 103 | } else { 104 | v, err := time.ParseDuration(val) 105 | if err != nil { 106 | logger.Error(err, err.Error()) 107 | // Exit Program if not valid 108 | os.Exit(1) 109 | } 110 | return v 111 | } 112 | } 113 | 114 | func getMaxConcurrentReconciles() int { 115 | var MaxConcurrentReconciles = "MAX_CONCURRENT_RECONCILES" 116 | 117 | nu, found := os.LookupEnv(MaxConcurrentReconciles) 118 | if !found { 119 | return 1 120 | } 121 | return Str2Int(nu) 122 | } 123 | -------------------------------------------------------------------------------- /controllers/druid/dynamic_config.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | 8 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 9 | druidapi "github.com/datainfrahq/druid-operator/pkg/druidapi" 10 | internalhttp "github.com/datainfrahq/druid-operator/pkg/http" 11 | "github.com/datainfrahq/druid-operator/pkg/util" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | // updateDruidDynamicConfigs updates the Druid cluster's dynamic configurations 16 | // for both overlords (middlemanagers) and coordinators. 17 | func updateDruidDynamicConfigs( 18 | ctx context.Context, 19 | client client.Client, 20 | druid *v1alpha1.Druid, 21 | emitEvent EventEmitter, 22 | ) error { 23 | nodeTypes := []string{"middlemanagers", "coordinators"} 24 | 25 | for _, nodeType := range nodeTypes { 26 | nodeConfig, exists := druid.Spec.Nodes[nodeType] 27 | if !exists || nodeConfig.DynamicConfig.Size() == 0 { 28 | // Skip if dynamic configurations are not provided for the node type 29 | continue 30 | } 31 | 32 | dynamicConfig := nodeConfig.DynamicConfig.Raw 33 | 34 | svcName, err := druidapi.GetRouterSvcUrl(druid.Namespace, druid.Name, client) 35 | if err != nil { 36 | emitEvent.EmitEventGeneric( 37 | druid, 38 | string(druidGetRouterSvcUrlFailed), 39 | fmt.Sprintf("Failed to get router service URL for %s", nodeType), 40 | err, 41 | ) 42 | return err 43 | } 44 | 45 | basicAuth, err := druidapi.GetAuthCreds( 46 | ctx, 47 | client, 48 | druid.Spec.Auth, 49 | ) 50 | if err != nil { 51 | emitEvent.EmitEventGeneric( 52 | druid, 53 | string(druidGetAuthCredsFailed), 54 | fmt.Sprintf("Failed to get authentication credentials for %s", nodeType), 55 | err, 56 | ) 57 | return err 58 | } 59 | 60 | // Create the HTTP client with basic authentication 61 | httpClient := internalhttp.NewHTTPClient( 62 | &http.Client{}, 63 | &internalhttp.Auth{BasicAuth: basicAuth}, 64 | ) 65 | 66 | // Determine the URL path for dynamic configurations based on the nodeType 67 | var dynamicConfigPath string 68 | switch nodeType { 69 | case "middlemanagers": 70 | dynamicConfigPath = druidapi.MakePath(svcName, "indexer", "worker") 71 | case "coordinators": 72 | dynamicConfigPath = druidapi.MakePath(svcName, "coordinator", "config") 73 | default: 74 | return fmt.Errorf("unsupported node type: %s", nodeType) 75 | } 76 | 77 | // Fetch current dynamic configurations 78 | currentResp, err := httpClient.Do( 79 | http.MethodGet, 80 | dynamicConfigPath, 81 | nil, 82 | ) 83 | if err != nil { 84 | emitEvent.EmitEventGeneric( 85 | druid, 86 | string(druidFetchCurrentConfigsFailed), 87 | fmt.Sprintf("Failed to fetch current %s dynamic configurations", nodeType), 88 | err, 89 | ) 90 | return err 91 | } 92 | if currentResp.StatusCode != http.StatusOK { 93 | err = fmt.Errorf( 94 | "failed to retrieve current Druid %s dynamic configurations. Status code: %d, Response body: %s", 95 | nodeType, currentResp.StatusCode, string(currentResp.ResponseBody), 96 | ) 97 | emitEvent.EmitEventGeneric( 98 | druid, 99 | string(druidFetchCurrentConfigsFailed), 100 | fmt.Sprintf("Failed to fetch current %s dynamic configurations", nodeType), 101 | err, 102 | ) 103 | return err 104 | } 105 | 106 | // Handle empty response body 107 | var currentConfigsJson string 108 | if len(currentResp.ResponseBody) == 0 { 109 | currentConfigsJson = "{}" // Initialize as empty JSON object if response body is empty 110 | } else { 111 | currentConfigsJson = currentResp.ResponseBody 112 | } 113 | 114 | // Compare current and desired configurations 115 | equal, err := util.IncludesJson(currentConfigsJson, string(dynamicConfig)) 116 | if err != nil { 117 | emitEvent.EmitEventGeneric( 118 | druid, 119 | string(druidConfigComparisonFailed), 120 | fmt.Sprintf("Failed to compare %s configurations", nodeType), 121 | err, 122 | ) 123 | return err 124 | } 125 | if equal { 126 | // Configurations are already up-to-date 127 | continue 128 | } 129 | 130 | // Update the Druid cluster's dynamic configurations if needed 131 | respDynamicConfigs, err := httpClient.Do( 132 | http.MethodPost, 133 | dynamicConfigPath, 134 | dynamicConfig, 135 | ) 136 | if err != nil { 137 | emitEvent.EmitEventGeneric( 138 | druid, 139 | string(druidUpdateConfigsFailed), 140 | fmt.Sprintf("Failed to update %s dynamic configurations", nodeType), 141 | err, 142 | ) 143 | return err 144 | } 145 | if respDynamicConfigs.StatusCode != http.StatusOK { 146 | return fmt.Errorf("failed to update Druid %s dynamic configurations", nodeType) 147 | } 148 | 149 | emitEvent.EmitEventGeneric( 150 | druid, 151 | string(druidUpdateConfigsSuccess), 152 | fmt.Sprintf("Successfully updated %s dynamic configurations", nodeType), 153 | nil, 154 | ) 155 | } 156 | 157 | return nil 158 | } 159 | -------------------------------------------------------------------------------- /controllers/druid/ext/deep_storage_default_ext.go: -------------------------------------------------------------------------------- 1 | package ext 2 | 3 | type DefaultDeepStorageManager struct { 4 | Properties string `json:"properties"` 5 | } 6 | 7 | func (p *DefaultDeepStorageManager) Configuration() string { 8 | return p.Properties 9 | } 10 | -------------------------------------------------------------------------------- /controllers/druid/ext/metadata_store_default_ext.go: -------------------------------------------------------------------------------- 1 | package ext 2 | 3 | type DefaultMetadataStoreManager struct { 4 | Properties string `json:"properties"` 5 | } 6 | 7 | func (p *DefaultMetadataStoreManager) Configuration() string { 8 | return p.Properties 9 | } 10 | -------------------------------------------------------------------------------- /controllers/druid/ext/zookeeper_default_ext.go: -------------------------------------------------------------------------------- 1 | package ext 2 | 3 | type DefaultZkManager struct { 4 | Properties string `json:"properties"` 5 | } 6 | 7 | func (p *DefaultZkManager) Configuration() string { 8 | return p.Properties 9 | } 10 | -------------------------------------------------------------------------------- /controllers/druid/finalizers.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | 8 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 9 | appsv1 "k8s.io/api/apps/v1" 10 | v1 "k8s.io/api/core/v1" 11 | "k8s.io/apimachinery/pkg/api/equality" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/types" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | const ( 18 | deletePVCFinalizerName = "deletepvc.finalizers.druid.apache.org" 19 | ) 20 | 21 | var ( 22 | defaultFinalizers []string 23 | ) 24 | 25 | func updateFinalizers(ctx context.Context, sdk client.Client, m *v1alpha1.Druid, emitEvents EventEmitter) error { 26 | desiredFinalizers := m.GetFinalizers() 27 | additionFinalizers := defaultFinalizers 28 | 29 | desiredFinalizers = RemoveString(desiredFinalizers, deletePVCFinalizerName) 30 | if !m.Spec.DisablePVCDeletionFinalizer { 31 | additionFinalizers = append(additionFinalizers, deletePVCFinalizerName) 32 | } 33 | 34 | for _, finalizer := range additionFinalizers { 35 | if !ContainsString(desiredFinalizers, finalizer) { 36 | desiredFinalizers = append(desiredFinalizers, finalizer) 37 | } 38 | } 39 | 40 | if !equality.Semantic.DeepEqual(m.GetFinalizers(), desiredFinalizers) { 41 | m.SetFinalizers(desiredFinalizers) 42 | 43 | finalizersBytes, err := json.Marshal(m.GetFinalizers()) 44 | if err != nil { 45 | return fmt.Errorf("failed to serialize finalizers patch to bytes: %v", err) 46 | } 47 | 48 | patch := []byte(fmt.Sprintf(`[{"op": "replace", "path": "/metadata/finalizers", "value": %s}]`, finalizersBytes)) 49 | 50 | err = sdk.Patch(ctx, m, client.RawPatch(types.JSONPatchType, patch)) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | } 56 | 57 | return nil 58 | } 59 | 60 | func executeFinalizers(ctx context.Context, sdk client.Client, m *v1alpha1.Druid, emitEvents EventEmitter) error { 61 | if m.Spec.DisablePVCDeletionFinalizer == false { 62 | if err := executePVCFinalizer(ctx, sdk, m, emitEvents); err != nil { 63 | return err 64 | } 65 | } 66 | return nil 67 | } 68 | 69 | /* 70 | executePVCFinalizer will execute a PVC deletion of all Druid's PVCs. 71 | Flow: 72 | 1. Get sts List and PVC List 73 | 2. Range and Delete sts first and then delete pvc. PVC must be deleted after sts termination has been executed 74 | else pvc finalizer shall block deletion since a pod/sts is referencing it. 75 | 3. Once delete is executed we block program and return. 76 | */ 77 | func executePVCFinalizer(ctx context.Context, sdk client.Client, druid *v1alpha1.Druid, eventEmitter EventEmitter) error { 78 | if ContainsString(druid.ObjectMeta.Finalizers, deletePVCFinalizerName) { 79 | pvcLabels := map[string]string{ 80 | "druid_cr": druid.Name, 81 | } 82 | 83 | pvcList, err := readers.List(ctx, sdk, druid, pvcLabels, eventEmitter, func() objectList { return &v1.PersistentVolumeClaimList{} }, func(listObj runtime.Object) []object { 84 | items := listObj.(*v1.PersistentVolumeClaimList).Items 85 | result := make([]object, len(items)) 86 | for i := 0; i < len(items); i++ { 87 | result[i] = &items[i] 88 | } 89 | return result 90 | }) 91 | if err != nil { 92 | return err 93 | } 94 | 95 | stsList, err := readers.List(ctx, sdk, druid, makeLabelsForDruid(druid), eventEmitter, func() objectList { return &appsv1.StatefulSetList{} }, func(listObj runtime.Object) []object { 96 | items := listObj.(*appsv1.StatefulSetList).Items 97 | result := make([]object, len(items)) 98 | for i := 0; i < len(items); i++ { 99 | result[i] = &items[i] 100 | } 101 | return result 102 | }) 103 | if err != nil { 104 | return err 105 | } 106 | 107 | eventEmitter.EmitEventGeneric(druid, string(druidFinalizerTriggered), 108 | fmt.Sprintf("Trigerring finalizer [%s] for CR [%s] in namespace [%s]", deletePVCFinalizerName, druid.Name, druid.Namespace), nil) 109 | 110 | if err = deleteSTSAndPVC(ctx, sdk, druid, stsList, pvcList, eventEmitter); err != nil { 111 | eventEmitter.EmitEventGeneric(druid, string(druidFinalizerFailed), 112 | fmt.Sprintf("Finalizer [%s] failed for CR [%s] in namespace [%s]", deletePVCFinalizerName, druid.Name, druid.Namespace), err) 113 | 114 | return err 115 | } 116 | 117 | eventEmitter.EmitEventGeneric(druid, string(druidFinalizerSuccess), 118 | fmt.Sprintf("Finalizer [%s] success for CR [%s] in namespace [%s]", deletePVCFinalizerName, druid.Name, druid.Namespace), nil) 119 | 120 | // remove our finalizer from the list and update it. 121 | druid.ObjectMeta.Finalizers = RemoveString(druid.ObjectMeta.Finalizers, deletePVCFinalizerName) 122 | 123 | _, err = writers.Update(ctx, sdk, druid, druid, eventEmitter) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | } 129 | return nil 130 | } 131 | -------------------------------------------------------------------------------- /controllers/druid/metadata_store_dep_mgmt.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "reflect" 7 | 8 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 9 | "github.com/datainfrahq/druid-operator/controllers/druid/ext" 10 | ) 11 | 12 | var metadataStoreExtTypes = map[string]reflect.Type{} 13 | 14 | func init() { 15 | metadataStoreExtTypes["default"] = reflect.TypeOf(ext.DefaultMetadataStoreManager{}) 16 | } 17 | 18 | // We might have to add more methods to this interface to enable extensions that completely manage 19 | // deploy, upgrade and termination of metadata store. 20 | type metadataStoreManager interface { 21 | Configuration() string 22 | } 23 | 24 | func createMetadataStoreManager(spec *v1alpha1.MetadataStoreSpec) (metadataStoreManager, error) { 25 | if t, ok := metadataStoreExtTypes[spec.Type]; ok { 26 | v := reflect.New(t).Interface() 27 | if err := json.Unmarshal(spec.Spec, v); err != nil { 28 | return nil, fmt.Errorf("Couldn't unmarshall metadataStore type[%s]. Error[%s].", spec.Type, err.Error()) 29 | } else { 30 | return v.(metadataStoreManager), nil 31 | } 32 | } else { 33 | return nil, fmt.Errorf("Can't find type[%s] for MetadataStore Mgmt.", spec.Type) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /controllers/druid/ordering.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 4 | 5 | var ( 6 | druidServicesOrder = []string{historical, overlord, middleManager, indexer, broker, coordinator, router} 7 | ) 8 | 9 | type ServiceGroup struct { 10 | key string 11 | spec v1alpha1.DruidNodeSpec 12 | } 13 | 14 | // getNodeSpecsByOrder returns all NodeSpecs f a given Druid object. 15 | // Recommended order is described at http://druid.io/docs/latest/operations/rolling-updates.html 16 | func getNodeSpecsByOrder(m *v1alpha1.Druid) []*ServiceGroup { 17 | 18 | scaledServiceSpecsByNodeType := map[string][]*ServiceGroup{} 19 | for _, t := range druidServicesOrder { 20 | scaledServiceSpecsByNodeType[t] = []*ServiceGroup{} 21 | } 22 | 23 | for key, nodeSpec := range m.Spec.Nodes { 24 | scaledServiceSpec := scaledServiceSpecsByNodeType[nodeSpec.NodeType] 25 | scaledServiceSpecsByNodeType[nodeSpec.NodeType] = append(scaledServiceSpec, &ServiceGroup{key: key, spec: nodeSpec}) 26 | } 27 | 28 | allScaledServiceSpecs := make([]*ServiceGroup, 0, len(m.Spec.Nodes)) 29 | 30 | for _, t := range druidServicesOrder { 31 | allScaledServiceSpecs = append(allScaledServiceSpecs, scaledServiceSpecsByNodeType[t]...) 32 | } 33 | 34 | return allScaledServiceSpecs 35 | } 36 | -------------------------------------------------------------------------------- /controllers/druid/ordering_test.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "time" 5 | 6 | druidv1alpha1 "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | "k8s.io/apimachinery/pkg/types" 10 | ) 11 | 12 | // +kubebuilder:docs-gen:collapse=Imports 13 | 14 | /* 15 | ordering_test 16 | */ 17 | var _ = Describe("Test ordering logic", func() { 18 | const ( 19 | filePath = "testdata/ordering.yaml" 20 | timeout = time.Second * 45 21 | interval = time.Millisecond * 250 22 | ) 23 | 24 | var ( 25 | druid = &druidv1alpha1.Druid{} 26 | ) 27 | 28 | Context("When creating a druid cluster with multiple nodes", func() { 29 | It("Should create the druid object", func() { 30 | By("Creating a new druid") 31 | druidCR, err := readDruidClusterSpecFromFile(filePath) 32 | Expect(err).Should(BeNil()) 33 | Expect(k8sClient.Create(ctx, druidCR)).To(Succeed()) 34 | 35 | By("Getting a newly created druid") 36 | Eventually(func() bool { 37 | err := k8sClient.Get(ctx, types.NamespacedName{Name: druidCR.Name, Namespace: druidCR.Namespace}, druid) 38 | return err == nil 39 | }, timeout, interval).Should(BeTrue()) 40 | }) 41 | It("Should return an ordered list of nodes", func() { 42 | orderedServiceGroups := getNodeSpecsByOrder(druid) 43 | Expect(orderedServiceGroups[0].key).Should(MatchRegexp("historicals")) 44 | Expect(orderedServiceGroups[1].key).Should(MatchRegexp("historicals")) 45 | Expect(orderedServiceGroups[2].key).Should(Equal("overlords")) 46 | Expect(orderedServiceGroups[3].key).Should(Equal("middle-managers")) 47 | Expect(orderedServiceGroups[4].key).Should(Equal("indexers")) 48 | Expect(orderedServiceGroups[5].key).Should(Equal("brokers")) 49 | Expect(orderedServiceGroups[6].key).Should(Equal("coordinators")) 50 | Expect(orderedServiceGroups[7].key).Should(Equal("routers")) 51 | }) 52 | }) 53 | }) 54 | -------------------------------------------------------------------------------- /controllers/druid/predicates.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "fmt" 5 | 6 | "sigs.k8s.io/controller-runtime/pkg/event" 7 | "sigs.k8s.io/controller-runtime/pkg/predicate" 8 | ) 9 | 10 | // All methods to implement GenericPredicates type 11 | // GenericPredicates to be passed to manager 12 | type GenericPredicates struct { 13 | predicate.Funcs 14 | } 15 | 16 | // create() to filter create events 17 | func (GenericPredicates) Create(e event.CreateEvent) bool { 18 | return IgnoreNamespacePredicate(e.Object) && IgnoreIgnoredObjectPredicate(e.Object) 19 | } 20 | 21 | // update() to filter update events 22 | func (GenericPredicates) Update(e event.UpdateEvent) bool { 23 | return IgnoreNamespacePredicate(e.ObjectNew) && IgnoreIgnoredObjectPredicate(e.ObjectNew) 24 | } 25 | 26 | func IgnoreNamespacePredicate(obj object) bool { 27 | namespaces := getEnvAsSlice("DENY_LIST", nil, ",") 28 | 29 | for _, namespace := range namespaces { 30 | if obj.GetNamespace() == namespace { 31 | msg := fmt.Sprintf("druid operator will not re-concile namespace [%s], alter DENY_LIST to re-concile", obj.GetNamespace()) 32 | logger.Info(msg) 33 | return false 34 | } 35 | } 36 | return true 37 | } 38 | 39 | func IgnoreIgnoredObjectPredicate(obj object) bool { 40 | if ignoredStatus := obj.GetAnnotations()[ignoredAnnotation]; ignoredStatus == "true" { 41 | msg := fmt.Sprintf("druid operator will not re-concile ignored Druid [%s], removed annotation to re-concile", obj.GetName()) 42 | logger.Info(msg) 43 | return false 44 | } 45 | return true 46 | } 47 | -------------------------------------------------------------------------------- /controllers/druid/status.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "reflect" 8 | 9 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 10 | v1 "k8s.io/api/core/v1" 11 | "k8s.io/apimachinery/pkg/types" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | // constructor to DruidNodeTypeStatus status 16 | // handles error 17 | func newDruidNodeTypeStatus( 18 | nodeConditionStatus v1.ConditionStatus, 19 | nodeCondition v1alpha1.DruidNodeConditionType, 20 | nodeTierOrType string, 21 | err error) *v1alpha1.DruidNodeTypeStatus { 22 | 23 | var reason string 24 | 25 | if nodeCondition == v1alpha1.DruidClusterReady { 26 | nodeTierOrType = "All" 27 | reason = "All Druid Nodes are in Ready Condition" 28 | } else if nodeCondition == v1alpha1.DruidNodeRollingUpdate { 29 | reason = "Druid Node [" + nodeTierOrType + "] is Rolling Update" 30 | } else if err != nil { 31 | reason = err.Error() 32 | nodeCondition = v1alpha1.DruidNodeErrorState 33 | } 34 | 35 | return &v1alpha1.DruidNodeTypeStatus{ 36 | DruidNode: nodeTierOrType, 37 | DruidNodeConditionStatus: nodeConditionStatus, 38 | DruidNodeConditionType: nodeCondition, 39 | Reason: reason, 40 | } 41 | 42 | } 43 | 44 | // wrapper to patch druid cluster status 45 | func druidClusterStatusPatcher(ctx context.Context, sdk client.Client, updatedStatus v1alpha1.DruidClusterStatus, m *v1alpha1.Druid, emitEvent EventEmitter) error { 46 | 47 | if !reflect.DeepEqual(updatedStatus, m.Status) { 48 | patchBytes, err := json.Marshal(map[string]v1alpha1.DruidClusterStatus{"status": updatedStatus}) 49 | if err != nil { 50 | return fmt.Errorf("failed to serialize status patch to bytes: %v", err) 51 | } 52 | _ = writers.Patch(ctx, sdk, m, m, true, client.RawPatch(types.MergePatchType, patchBytes), emitEvent) 53 | } 54 | return nil 55 | } 56 | 57 | // In case of state change, patch the status and emit event. 58 | // emit events only on state change, to avoid event pollution. 59 | func druidNodeConditionStatusPatch(ctx context.Context, 60 | updatedStatus v1alpha1.DruidClusterStatus, 61 | sdk client.Client, 62 | nodeSpecUniqueStr string, 63 | m *v1alpha1.Druid, 64 | emitEvent EventEmitter, 65 | emptyObjFn func() object) (err error) { 66 | 67 | if !reflect.DeepEqual(updatedStatus.DruidNodeStatus, m.Status.DruidNodeStatus) { 68 | 69 | err = druidClusterStatusPatcher(ctx, sdk, updatedStatus, m, emitEvent) 70 | if err != nil { 71 | return err 72 | } 73 | 74 | obj, err := readers.Get(ctx, sdk, nodeSpecUniqueStr, m, emptyObjFn, emitEvent) 75 | if err != nil { 76 | return err 77 | } 78 | 79 | emitEvent.EmitEventRollingDeployWait(m, obj, nodeSpecUniqueStr) 80 | 81 | return nil 82 | 83 | } 84 | return nil 85 | } 86 | -------------------------------------------------------------------------------- /controllers/druid/suite_test.go: -------------------------------------------------------------------------------- 1 | // +kubebuilder:docs-gen:collapse=Apache License 2 | 3 | package druid 4 | 5 | import ( 6 | "context" 7 | "path/filepath" 8 | "testing" 9 | 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | 12 | . "github.com/onsi/ginkgo/v2" 13 | . "github.com/onsi/gomega" 14 | "k8s.io/client-go/kubernetes/scheme" 15 | "k8s.io/client-go/rest" 16 | "sigs.k8s.io/controller-runtime/pkg/client" 17 | "sigs.k8s.io/controller-runtime/pkg/envtest" 18 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 19 | 20 | druidv1alpha1 "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 21 | //+kubebuilder:scaffold:imports 22 | ) 23 | 24 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 25 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 26 | 27 | // +kubebuilder:docs-gen:collapse=Imports 28 | 29 | // Now, let's go through the code generated. 30 | 31 | var ( 32 | cfg *rest.Config 33 | k8sClient client.Client // You'll be using this client in your tests. 34 | testEnv *envtest.Environment 35 | ctx context.Context 36 | cancel context.CancelFunc 37 | emitEvent EventEmitter 38 | ) 39 | 40 | func TestAPIs(t *testing.T) { 41 | RegisterFailHandler(Fail) 42 | 43 | RunSpecs(t, "Controller Suite") 44 | } 45 | 46 | var _ = BeforeSuite(func() { 47 | ctrl.SetLogger(zap.New()) 48 | 49 | ctx, cancel = context.WithCancel(context.TODO()) 50 | 51 | /* 52 | First, the envtest cluster is configured to read CRDs from the CRD directory Kubebuilder scaffolds for you. 53 | */ 54 | By("bootstrapping test environment") 55 | testEnv = &envtest.Environment{ 56 | CRDInstallOptions: envtest.CRDInstallOptions{ 57 | Paths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 58 | }, 59 | ErrorIfCRDPathMissing: true, 60 | } 61 | 62 | // Then, we start the envtest cluster. 63 | var err error 64 | // cfg is defined in this file globally. 65 | cfg, err = testEnv.Start() 66 | Expect(err).NotTo(HaveOccurred()) 67 | Expect(cfg).NotTo(BeNil()) 68 | 69 | err = druidv1alpha1.AddToScheme(scheme.Scheme) 70 | Expect(err).NotTo(HaveOccurred()) 71 | /* 72 | After the schemas, you will see the following marker. 73 | This marker is what allows new schemas to be added here automatically when a new API is added to the project. 74 | */ 75 | 76 | //+kubebuilder:scaffold:scheme 77 | 78 | // A client is created for our test CRUD operations. 79 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 80 | Expect(err).NotTo(HaveOccurred()) 81 | Expect(k8sClient).NotTo(BeNil()) 82 | 83 | k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ 84 | Scheme: scheme.Scheme, 85 | }) 86 | Expect(err).ToNot(HaveOccurred()) 87 | 88 | err = (&DruidReconciler{ 89 | Client: k8sManager.GetClient(), 90 | Log: ctrl.Log.WithName("controllers").WithName("Druid"), 91 | Scheme: k8sManager.GetScheme(), 92 | ReconcileWait: LookupReconcileTime(), 93 | Recorder: k8sManager.GetEventRecorderFor("druid-operator"), 94 | }).SetupWithManager(k8sManager) 95 | Expect(err).ToNot(HaveOccurred()) 96 | 97 | emitEvent = EmitEventFuncs{k8sManager.GetEventRecorderFor("druid-operator")} 98 | 99 | go func() { 100 | defer GinkgoRecover() 101 | err = k8sManager.Start(ctx) 102 | Expect(err).ToNot(HaveOccurred(), "failed to run manager") 103 | }() 104 | 105 | }) 106 | 107 | /* 108 | Kubebuilder also generates boilerplate functions for cleaning up envtest and actually running your test files in your controllers/ directory. 109 | You won't need to touch these. 110 | */ 111 | 112 | var _ = AfterSuite(func() { 113 | cancel() 114 | By("tearing down the test environment") 115 | err := testEnv.Stop() 116 | Expect(err).NotTo(HaveOccurred()) 117 | }) 118 | -------------------------------------------------------------------------------- /controllers/druid/testdata/additional-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: Druid 3 | metadata: 4 | name: additional-containers 5 | namespace: default 6 | spec: 7 | image: apache/druid:25.0.0 8 | startScript: /druid.sh 9 | rollingDeploy: false 10 | additionalContainer: 11 | - command: 12 | - /bin/sh echo hello 13 | containerName: cluster-level 14 | image: hello-world 15 | securityContext: 16 | fsGroup: 1000 17 | runAsUser: 1000 18 | runAsGroup: 1000 19 | services: 20 | - spec: 21 | type: ClusterIP 22 | commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common" 23 | jvm.options: |- 24 | -server 25 | -XX:MaxDirectMemorySize=10240g 26 | -Duser.timezone=UTC 27 | -Dfile.encoding=UTF-8 28 | -Djava.io.tmpdir=/druid/data 29 | common.runtime.properties: |- 30 | # Metadata Store 31 | druid.metadata.storage.type=derby 32 | druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/druid/data/derbydb/metadata.db;create=true 33 | druid.metadata.storage.connector.host=localhost 34 | druid.metadata.storage.connector.port=1527 35 | druid.metadata.storage.connector.createTables=true 36 | 37 | # Deep Storage 38 | druid.storage.type=local 39 | druid.storage.storageDirectory=/druid/deepstorage 40 | 41 | # Service discovery 42 | druid.selectors.indexing.serviceName=druid/overlord 43 | druid.selectors.coordinator.serviceName=druid/coordinator 44 | nodes: 45 | brokers: 46 | nodeType: "broker" 47 | kind: "Deployment" 48 | druid.port: 8088 49 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker" 50 | replicas: 1 51 | runtime.properties: |- 52 | druid.service=druid/broker 53 | additionalContainer: 54 | - command: 55 | - /bin/sh echo hello 56 | containerName: node-level 57 | image: hello-world 58 | coordinators: 59 | nodeType: "coordinator" 60 | druid.port: 8080 61 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/coordinator-overlord" 62 | replicas: 1 63 | runtime.properties: |- 64 | druid.service=druid/coordinator 65 | druid.coordinator.asOverlord.enabled=true 66 | druid.coordinator.asOverlord.overlordService=druid/overlord 67 | historicals: 68 | nodeType: "historical" 69 | druid.port: 8080 70 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical" 71 | replicas: 1 72 | runtime.properties: |- 73 | druid.service=druid/historical 74 | druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] 75 | druid.server.maxSize=10737418240 76 | -------------------------------------------------------------------------------- /controllers/druid/testdata/broker-config-map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | jvm.config: |- 4 | -server 5 | -XX:MaxDirectMemorySize=10240g 6 | -Duser.timezone=UTC 7 | -Dfile.encoding=UTF-8 8 | -Dlog4j.debug 9 | -XX:+ExitOnOutOfMemoryError 10 | -XX:+HeapDumpOnOutOfMemoryError 11 | -XX:+UseG1GC 12 | -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager 13 | -Xmx1G 14 | -Xms1G 15 | log4j2.xml: |- 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | runtime.properties: |- 30 | druid.port=8080 31 | druid.service=druid/broker 32 | 33 | # HTTP server threads 34 | druid.broker.http.numConnections=5 35 | druid.server.http.numThreads=25 36 | 37 | # Processing threads and buffers 38 | druid.processing.buffer.sizeBytes=1 39 | druid.processing.numMergeBuffers=1 40 | druid.processing.numThreads=1 41 | kind: ConfigMap 42 | metadata: 43 | labels: 44 | app: druid 45 | druid_cr: druid-test 46 | nodeSpecUniqueStr: druid-druid-test-brokers 47 | component: broker 48 | name: druid-druid-test-brokers-config 49 | namespace: test-namespace 50 | annotations: 51 | druidOpResourceHash: O3jmICgrTjJkMBlGlE05W7dGhA0= 52 | -------------------------------------------------------------------------------- /controllers/druid/testdata/broker-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: druid-druid-test-brokers 5 | namespace: test-namespace 6 | labels: 7 | app: druid 8 | druid_cr: druid-test 9 | nodeSpecUniqueStr: druid-druid-test-brokers 10 | component: broker 11 | annotations: 12 | druidOpResourceHash: nQv/LmxctTSRsI5dtBe3jvN5WM8= 13 | kubernetes.io/cluster-scoped-annotation: "cluster" 14 | kubernetes.io/node-scoped-annotation: "broker" 15 | kubernetes.io/override-annotation: "node-scoped-annotation" 16 | spec: 17 | replicas: 2 18 | selector: 19 | matchLabels: 20 | app: druid 21 | druid_cr: druid-test 22 | nodeSpecUniqueStr: druid-druid-test-brokers 23 | component: broker 24 | strategy: 25 | rollingUpdate: {} 26 | type: RollingUpdate 27 | template: 28 | metadata: 29 | labels: 30 | app: druid 31 | druid_cr: druid-test 32 | nodeSpecUniqueStr: druid-druid-test-brokers 33 | component: broker 34 | annotations: 35 | key1: value1 36 | key2: value2 37 | spec: 38 | tolerations: [] 39 | affinity: {} 40 | priorityClassName: high-priority 41 | containers: 42 | - command: 43 | - bin/run-druid.sh 44 | - broker 45 | image: himanshu01/druid:druid-0.12.0-1 46 | name: druid-druid-test-brokers 47 | env: 48 | - name: configMapSHA 49 | value: blah 50 | ports: 51 | - containerPort: 8083 52 | name: random 53 | readinessProbe: 54 | httpGet: 55 | path: /status 56 | port: 8080 57 | livenessProbe: 58 | httpGet: 59 | path: /status 60 | port: 8080 61 | startupProbe: 62 | failureThreshold: 20 63 | httpGet: 64 | path: /druid/broker/v1/readiness 65 | port: 8080 66 | initialDelaySeconds: 5 67 | periodSeconds: 10 68 | successThreshold: 1 69 | timeoutSeconds: 5 70 | resources: 71 | limits: 72 | cpu: "4" 73 | memory: 2Gi 74 | requests: 75 | cpu: "4" 76 | memory: 2Gi 77 | volumeMounts: 78 | - mountPath: /druid/conf/druid/_common 79 | readOnly: true 80 | name: common-config-volume 81 | - mountPath: /druid/conf/druid/broker 82 | readOnly: true 83 | name: nodetype-config-volume 84 | - mountPath: /druid/data 85 | readOnly: true 86 | name: data-volume 87 | securityContext: 88 | fsGroup: 107 89 | runAsUser: 106 90 | volumes: 91 | - configMap: 92 | name: druid-test-druid-common-config 93 | name: common-config-volume 94 | - configMap: 95 | name: druid-druid-test-brokers-config 96 | name: nodetype-config-volume 97 | -------------------------------------------------------------------------------- /controllers/druid/testdata/broker-headless-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | druidOpResourceHash: 5zzzIiXTlupyCeyb/P8llEZN1Ag= 6 | creationTimestamp: null 7 | labels: 8 | app: druid 9 | druid_cr: druid-test 10 | nodeSpecUniqueStr: druid-druid-test-brokers 11 | component: broker 12 | name: druid-druid-test-brokers 13 | namespace: test-namespace 14 | spec: 15 | clusterIP: None 16 | ports: 17 | - name: service-port 18 | port: 8080 19 | targetPort: 8080 20 | selector: 21 | app: druid 22 | druid_cr: druid-test 23 | nodeSpecUniqueStr: druid-druid-test-brokers 24 | component: broker 25 | type: ClusterIP 26 | status: 27 | loadBalancer: {} 28 | -------------------------------------------------------------------------------- /controllers/druid/testdata/broker-load-balancer-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 6 | druidOpResourceHash: vQE/6DfWCQnWFVvW3KxfOLBfdlA= 7 | labels: 8 | app: druid 9 | druid_cr: druid-test 10 | nodeSpecUniqueStr: druid-druid-test-brokers 11 | component: broker 12 | name: broker-druid-druid-test-brokers-service 13 | namespace: test-namespace 14 | spec: 15 | ports: 16 | - name: service-port 17 | port: 8090 18 | targetPort: 8080 19 | selector: 20 | app: druid 21 | druid_cr: druid-test 22 | nodeSpecUniqueStr: druid-druid-test-brokers 23 | component: broker 24 | type: LoadBalancer 25 | -------------------------------------------------------------------------------- /controllers/druid/testdata/broker-pod-disruption-budget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | labels: 5 | app: druid 6 | druid_cr: druid-test 7 | nodeSpecUniqueStr: druid-druid-test-brokers 8 | component: broker 9 | name: druid-druid-test-brokers 10 | namespace: test-namespace 11 | annotations: 12 | druidOpResourceHash: DmYcIjqpkJs9KWZ/tfHgHPBJ/wo= 13 | spec: 14 | maxUnavailable: 1 15 | selector: 16 | matchLabels: 17 | app: druid 18 | druid_cr: druid-test 19 | nodeSpecUniqueStr: druid-druid-test-brokers 20 | component: broker 21 | -------------------------------------------------------------------------------- /controllers/druid/testdata/broker-statefulset-noprobe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: druid-druid-test-brokers 5 | namespace: test-namespace 6 | labels: 7 | app: druid 8 | druid_cr: druid-test 9 | nodeSpecUniqueStr: druid-druid-test-brokers 10 | component: broker 11 | annotations: 12 | druidOpResourceHash: SfMSTxw3YZTBJP0JKsd5ZBiGgkI= 13 | spec: 14 | podManagementPolicy: Parallel 15 | replicas: 2 16 | selector: 17 | matchLabels: 18 | app: druid 19 | druid_cr: druid-test 20 | nodeSpecUniqueStr: druid-druid-test-brokers 21 | component: broker 22 | serviceName: druid-druid-test-brokers 23 | template: 24 | metadata: 25 | labels: 26 | app: druid 27 | druid_cr: druid-test 28 | nodeSpecUniqueStr: druid-druid-test-brokers 29 | component: broker 30 | annotations: 31 | key1: value1 32 | key2: value2 33 | spec: 34 | tolerations: [] 35 | affinity: {} 36 | containers: 37 | - command: 38 | - bin/run-druid.sh 39 | - broker 40 | image: himanshu01/druid:druid-0.12.0-1 41 | name: druid-druid-test-brokers 42 | env: 43 | - name: configMapSHA 44 | value: blah 45 | ports: 46 | - containerPort: 8083 47 | name: random 48 | readinessProbe: 49 | httpGet: 50 | path: /status 51 | port: 8080 52 | resources: 53 | limits: 54 | cpu: "4" 55 | memory: 2Gi 56 | requests: 57 | cpu: "4" 58 | memory: 2Gi 59 | volumeMounts: 60 | - mountPath: /druid/conf/druid/_common 61 | readOnly: true 62 | name: common-config-volume 63 | - mountPath: /druid/conf/druid/broker 64 | readOnly: true 65 | name: nodetype-config-volume 66 | - mountPath: /druid/data 67 | readOnly: true 68 | name: data-volume 69 | securityContext: 70 | fsGroup: 107 71 | runAsUser: 106 72 | volumes: 73 | - configMap: 74 | name: druid-test-druid-common-config 75 | name: common-config-volume 76 | - configMap: 77 | name: druid-druid-test-brokers-config 78 | name: nodetype-config-volume 79 | volumeClaimTemplates: 80 | - metadata: 81 | name: data-volume 82 | spec: 83 | accessModes: 84 | - ReadWriteOnce 85 | resources: 86 | requests: 87 | storage: 2Gi 88 | storageClassName: gp2 89 | -------------------------------------------------------------------------------- /controllers/druid/testdata/broker-statefulset-sidecar.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: druid-druid-test-brokers 5 | namespace: test-namespace 6 | labels: 7 | app: druid 8 | druid_cr: druid-test 9 | nodeSpecUniqueStr: druid-druid-test-brokers 10 | component: broker 11 | annotations: 12 | druidOpResourceHash: Q+HvxwhK3mmgnAm97GEQpeWbDv0= 13 | spec: 14 | podManagementPolicy: Parallel 15 | replicas: 2 16 | selector: 17 | matchLabels: 18 | app: druid 19 | druid_cr: druid-test 20 | nodeSpecUniqueStr: druid-druid-test-brokers 21 | component: broker 22 | serviceName: druid-druid-test-brokers 23 | template: 24 | metadata: 25 | labels: 26 | app: druid 27 | druid_cr: druid-test 28 | nodeSpecUniqueStr: druid-druid-test-brokers 29 | component: broker 30 | annotations: 31 | key1: value1 32 | key2: value2 33 | spec: 34 | tolerations: [] 35 | affinity: {} 36 | containers: 37 | - command: 38 | - bin/run-druid.sh 39 | - broker 40 | image: apache/druid:0.22.1 41 | name: druid-druid-test-brokers 42 | env: 43 | - name: configMapSHA 44 | value: blah 45 | ports: 46 | - containerPort: 8083 47 | name: random 48 | readinessProbe: 49 | httpGet: 50 | path: /status 51 | port: 8080 52 | livenessProbe: 53 | httpGet: 54 | path: /status 55 | port: 8080 56 | startupProbe: 57 | failureThreshold: 20 58 | httpGet: 59 | path: /druid/broker/v1/readiness 60 | port: 8080 61 | initialDelaySeconds: 5 62 | periodSeconds: 10 63 | successThreshold: 1 64 | timeoutSeconds: 5 65 | resources: 66 | limits: 67 | cpu: "4" 68 | memory: 2Gi 69 | requests: 70 | cpu: "4" 71 | memory: 2Gi 72 | volumeMounts: 73 | - mountPath: /druid/conf/druid/_common 74 | readOnly: true 75 | name: common-config-volume 76 | - mountPath: /druid/conf/druid/broker 77 | readOnly: true 78 | name: nodetype-config-volume 79 | - mountPath: /druid/data 80 | readOnly: true 81 | name: data-volume 82 | - command: 83 | - /bin/sidekick 84 | image: universalforwarder-sidekick:next 85 | name: forwarder 86 | resources: 87 | requests: 88 | memory: "1Gi" 89 | cpu: "500m" 90 | limits: 91 | memory: "1Gi" 92 | cpu: "500m" 93 | args: 94 | - -loggingEnabled=true 95 | - -dataCenter=dataCenter 96 | - -environment=environment 97 | - -application=application 98 | - -instance=instance 99 | - -logFiles=logFiles 100 | securityContext: 101 | runAsUser: 506 102 | imagePullPolicy: Always 103 | volumeMounts: 104 | - name: logstore 105 | mountPath: /logstore 106 | env: 107 | - name: SAMPLE_ENV 108 | value: SAMPLE_VALUE 109 | securityContext: 110 | fsGroup: 107 111 | runAsUser: 106 112 | volumes: 113 | - configMap: 114 | name: druid-test-druid-common-config 115 | name: common-config-volume 116 | - configMap: 117 | name: druid-druid-test-brokers-config 118 | name: nodetype-config-volume 119 | volumeClaimTemplates: 120 | - metadata: 121 | name: data-volume 122 | spec: 123 | accessModes: 124 | - ReadWriteOnce 125 | resources: 126 | requests: 127 | storage: 2Gi 128 | storageClassName: gp2 129 | -------------------------------------------------------------------------------- /controllers/druid/testdata/broker-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: druid-druid-test-brokers 5 | namespace: test-namespace 6 | labels: 7 | app: druid 8 | druid_cr: druid-test 9 | nodeSpecUniqueStr: druid-druid-test-brokers 10 | component: broker 11 | annotations: 12 | druidOpResourceHash: LXsKmbxQkX+94LkevlKDy4wemRQ= 13 | kubernetes.io/cluster-scoped-annotation: "cluster" 14 | kubernetes.io/node-scoped-annotation: "broker" 15 | kubernetes.io/override-annotation: "node-scoped-annotation" 16 | spec: 17 | podManagementPolicy: Parallel 18 | replicas: 2 19 | selector: 20 | matchLabels: 21 | app: druid 22 | druid_cr: druid-test 23 | nodeSpecUniqueStr: druid-druid-test-brokers 24 | component: broker 25 | serviceName: druid-druid-test-brokers 26 | template: 27 | metadata: 28 | labels: 29 | app: druid 30 | druid_cr: druid-test 31 | nodeSpecUniqueStr: druid-druid-test-brokers 32 | component: broker 33 | annotations: 34 | key1: value1 35 | key2: value2 36 | spec: 37 | tolerations: [] 38 | affinity: {} 39 | priorityClassName: high-priority 40 | containers: 41 | - command: 42 | - bin/run-druid.sh 43 | - broker 44 | image: himanshu01/druid:druid-0.12.0-1 45 | name: druid-druid-test-brokers 46 | env: 47 | - name: configMapSHA 48 | value: blah 49 | ports: 50 | - containerPort: 8083 51 | name: random 52 | readinessProbe: 53 | httpGet: 54 | path: /status 55 | port: 8080 56 | livenessProbe: 57 | httpGet: 58 | path: /status 59 | port: 8080 60 | startupProbe: 61 | failureThreshold: 20 62 | httpGet: 63 | path: /druid/broker/v1/readiness 64 | port: 8080 65 | initialDelaySeconds: 5 66 | periodSeconds: 10 67 | successThreshold: 1 68 | timeoutSeconds: 5 69 | resources: 70 | limits: 71 | cpu: "4" 72 | memory: 2Gi 73 | requests: 74 | cpu: "4" 75 | memory: 2Gi 76 | volumeMounts: 77 | - mountPath: /druid/conf/druid/_common 78 | readOnly: true 79 | name: common-config-volume 80 | - mountPath: /druid/conf/druid/broker 81 | readOnly: true 82 | name: nodetype-config-volume 83 | - mountPath: /druid/data 84 | readOnly: true 85 | name: data-volume 86 | securityContext: 87 | fsGroup: 107 88 | runAsUser: 106 89 | volumes: 90 | - configMap: 91 | name: druid-test-druid-common-config 92 | name: common-config-volume 93 | - configMap: 94 | name: druid-druid-test-brokers-config 95 | name: nodetype-config-volume 96 | volumeClaimTemplates: 97 | - metadata: 98 | name: data-volume 99 | spec: 100 | accessModes: 101 | - ReadWriteOnce 102 | resources: 103 | requests: 104 | storage: 2Gi 105 | storageClassName: gp2 106 | -------------------------------------------------------------------------------- /controllers/druid/testdata/common-config-map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | common.runtime.properties: | 4 | # 5 | # Extensions 6 | # 7 | druid.extensions.loadList=["druid-datasketches", "druid-s3-extensions", "postgresql-metadata-storage"] 8 | 9 | # 10 | # Logging 11 | # 12 | # Log all runtime properties on startup. Disable to avoid logging properties on startup: 13 | druid.startup.logging.logProperties=true 14 | 15 | # 16 | # Indexing service logs 17 | # 18 | # Store indexing logs in an S3 bucket named 'druid-deep-storage' with the 19 | # prefix 'druid/indexing-logs' 20 | druid.indexer.logs.type=s3 21 | druid.indexer.logs.s3Bucket=mybucket 22 | druid.indexer.logs.s3Prefix=druid/indexing-logs 23 | 24 | # 25 | # Service discovery 26 | # 27 | druid.selectors.indexing.serviceName=druid/overlord 28 | druid.selectors.coordinator.serviceName=druid/coordinator 29 | 30 | # 31 | # Monitoring 32 | # 33 | druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"] 34 | druid.emitter=logging 35 | druid.emitter.logging.logLevel=info 36 | 37 | # Storage type of double columns 38 | # ommiting this will lead to index double as float at the storage layer 39 | druid.indexing.doubleStorage=double 40 | druid.zk.service.host=zookeeper-0.zookeeper,zookeeper-1.zookeeper,zookeeper-2.zookeeper 41 | druid.zk.paths.base=/druid 42 | druid.zk.service.compress=false 43 | 44 | druid.metadata.storage.type=postgresql 45 | druid.metadata.storage.connector.connectURI=jdbc:postgresql://rdsaddr.us-west-2.rds.amazonaws.com:5432/druiddb 46 | druid.metadata.storage.connector.user=iamuser 47 | druid.metadata.storage.connector.password=changeme 48 | druid.metadata.storage.connector.createTables=true 49 | 50 | druid.storage.type=s3 51 | druid.storage.bucket=mybucket 52 | druid.storage.baseKey=druid/segments 53 | druid.s3.accessKey=accesskey 54 | druid.s3.secretKey=secretkey 55 | metricDimensions.json: |- 56 | { 57 | "query/time" : { "dimensions" : ["dataSource", "type"], "type" : "timer"} 58 | } 59 | kind: ConfigMap 60 | metadata: 61 | labels: 62 | app: druid 63 | druid_cr: druid-test 64 | name: druid-test-druid-common-config 65 | namespace: test-namespace 66 | annotations: 67 | druidOpResourceHash: 1/BkoZEvfj+rB80LzeQeu6T7mhs= 68 | -------------------------------------------------------------------------------- /controllers/druid/testdata/finalizers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: Druid 3 | metadata: 4 | name: finalizers 5 | namespace: default 6 | spec: 7 | image: apache/druid:25.0.0 8 | startScript: /druid.sh 9 | rollingDeploy: false 10 | securityContext: 11 | fsGroup: 1000 12 | runAsUser: 1000 13 | runAsGroup: 1000 14 | services: 15 | - spec: 16 | type: ClusterIP 17 | commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common" 18 | jvm.options: |- 19 | -server 20 | -XX:MaxDirectMemorySize=10240g 21 | -Duser.timezone=UTC 22 | -Dfile.encoding=UTF-8 23 | -Djava.io.tmpdir=/druid/data 24 | common.runtime.properties: |- 25 | # Metadata Store 26 | druid.metadata.storage.type=derby 27 | druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/druid/data/derbydb/metadata.db;create=true 28 | druid.metadata.storage.connector.host=localhost 29 | druid.metadata.storage.connector.port=1527 30 | druid.metadata.storage.connector.createTables=true 31 | 32 | # Deep Storage 33 | druid.storage.type=local 34 | druid.storage.storageDirectory=/druid/deepstorage 35 | 36 | # Service discovery 37 | druid.selectors.indexing.serviceName=druid/overlord 38 | druid.selectors.coordinator.serviceName=druid/coordinator 39 | nodes: 40 | brokers: 41 | nodeType: "broker" 42 | kind: "Deployment" 43 | druid.port: 8088 44 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker" 45 | replicas: 1 46 | runtime.properties: |- 47 | druid.service=druid/broker 48 | additionalContainer: 49 | - command: 50 | - /bin/sh echo hello 51 | containerName: node-level 52 | image: hello-world 53 | coordinators: 54 | nodeType: "coordinator" 55 | druid.port: 8080 56 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/coordinator-overlord" 57 | replicas: 1 58 | runtime.properties: |- 59 | druid.service=druid/coordinator 60 | druid.coordinator.asOverlord.enabled=true 61 | druid.coordinator.asOverlord.overlordService=druid/overlord 62 | historicals: 63 | nodeType: "historical" 64 | druid.port: 8080 65 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical" 66 | replicas: 1 67 | runtime.properties: |- 68 | druid.service=druid/historical 69 | druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] 70 | druid.server.maxSize=10737418240 71 | -------------------------------------------------------------------------------- /controllers/druid/testdata/ordering.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: Druid 3 | metadata: 4 | name: ordering 5 | namespace: default 6 | spec: 7 | image: apache/druid:25.0.0 8 | startScript: /druid.sh 9 | rollingDeploy: false 10 | securityContext: 11 | fsGroup: 1000 12 | runAsUser: 1000 13 | runAsGroup: 1000 14 | services: 15 | - spec: 16 | type: ClusterIP 17 | commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common" 18 | jvm.options: |- 19 | -server 20 | -XX:MaxDirectMemorySize=10240g 21 | -Duser.timezone=UTC 22 | -Dfile.encoding=UTF-8 23 | -Djava.io.tmpdir=/druid/data 24 | common.runtime.properties: |- 25 | # Metadata Store 26 | druid.metadata.storage.type=derby 27 | druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/druid/data/derbydb/metadata.db;create=true 28 | druid.metadata.storage.connector.host=localhost 29 | druid.metadata.storage.connector.port=1527 30 | druid.metadata.storage.connector.createTables=true 31 | 32 | # Deep Storage 33 | druid.storage.type=local 34 | druid.storage.storageDirectory=/druid/deepstorage 35 | 36 | # Service discovery 37 | druid.selectors.indexing.serviceName=druid/overlord 38 | druid.selectors.coordinator.serviceName=druid/coordinator 39 | nodes: 40 | indexers: 41 | nodeType: "indexer" 42 | druid.port: 8080 43 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/indexers" 44 | replicas: 1 45 | runtime.properties: |- 46 | druid.service=druid/indexer 47 | brokers: 48 | nodeType: "broker" 49 | kind: "Deployment" 50 | druid.port: 8088 51 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker" 52 | replicas: 1 53 | runtime.properties: |- 54 | druid.service=druid/broker 55 | additionalContainer: 56 | - command: 57 | - /bin/sh echo hello 58 | containerName: node-level 59 | image: hello-world 60 | historicals2: 61 | nodeType: "historical" 62 | druid.port: 8080 63 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical" 64 | replicas: 1 65 | runtime.properties: |- 66 | druid.service=druid/historical 67 | druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] 68 | druid.server.maxSize=10737418240 69 | routers: 70 | nodeType: "router" 71 | kind: "Deployment" 72 | druid.port: 8088 73 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/router" 74 | replicas: 1 75 | runtime.properties: | 76 | # General 77 | druid.service=druid/router 78 | # Service discovery 79 | druid.router.defaultBrokerServiceName=druid/broker 80 | druid.router.coordinatorServiceName=druid/coordinator 81 | # Management proxy to coordinator / overlord: required for unified web console. 82 | druid.router.managementProxy.enabled=true 83 | coordinators: 84 | nodeType: "coordinator" 85 | druid.port: 8080 86 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/coordinator-overlord" 87 | replicas: 1 88 | runtime.properties: |- 89 | druid.service=druid/coordinator 90 | druid.coordinator.asOverlord.enabled=true 91 | druid.coordinator.asOverlord.overlordService=druid/overlord 92 | middle-managers: 93 | nodeType: "middleManager" 94 | kind: "Deployment" 95 | druid.port: 8091 96 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/middleManager" 97 | replicas: 1 98 | runtime.properties: | 99 | # Caching 100 | druid.realtime.cache.useCache=true 101 | druid.realtime.cache.populateCache=true 102 | druid.indexer.runner.javaOptsArray=["-server","-Duser.timezone=UTC","-Dfile.encoding=UTF-8","-XX:+ExitOnOutOfMemoryError","-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager","--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED","--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED","--add-opens=java.base/java.lang=ALL-UNNAMED","--add-opens=java.base/java.io=ALL-UNNAMED","--add-opens=java.base/java.nio=ALL-UNNAMED","--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED","--add-opens=java.base/sun.nio.ch=ALL-UNNAMED"] 103 | druid.indexer.task.restoreTasksOnRestart=true 104 | historicals: 105 | nodeType: "historical" 106 | druid.port: 8080 107 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical" 108 | replicas: 1 109 | runtime.properties: |- 110 | druid.service=druid/historical 111 | druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] 112 | druid.server.maxSize=10737418240 113 | overlords: 114 | nodeType: "overlord" 115 | druid.port: 8080 116 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/overlord" 117 | replicas: 1 118 | runtime.properties: |- 119 | druid.service=druid/overlord 120 | -------------------------------------------------------------------------------- /controllers/druid/testdata/volume-expansion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: Druid 3 | metadata: 4 | name: volume-expansion 5 | namespace: default 6 | spec: 7 | image: apache/druid:25.0.0 8 | startScript: /druid.sh 9 | rollingDeploy: false 10 | securityContext: 11 | fsGroup: 1000 12 | runAsUser: 1000 13 | runAsGroup: 1000 14 | services: 15 | - spec: 16 | type: ClusterIP 17 | commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common" 18 | jvm.options: |- 19 | -server 20 | -XX:MaxDirectMemorySize=10240g 21 | -Duser.timezone=UTC 22 | -Dfile.encoding=UTF-8 23 | -Djava.io.tmpdir=/druid/data 24 | common.runtime.properties: |- 25 | # Metadata Store 26 | druid.metadata.storage.type=derby 27 | druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/druid/data/derbydb/metadata.db;create=true 28 | druid.metadata.storage.connector.host=localhost 29 | druid.metadata.storage.connector.port=1527 30 | druid.metadata.storage.connector.createTables=true 31 | 32 | # Deep Storage 33 | druid.storage.type=local 34 | druid.storage.storageDirectory=/druid/deepstorage 35 | 36 | # Service discovery 37 | druid.selectors.indexing.serviceName=druid/overlord 38 | druid.selectors.coordinator.serviceName=druid/coordinator 39 | nodes: 40 | brokers: 41 | nodeType: "broker" 42 | kind: "Deployment" 43 | druid.port: 8088 44 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker" 45 | replicas: 1 46 | runtime.properties: |- 47 | druid.service=druid/broker 48 | additionalContainer: 49 | - command: 50 | - /bin/sh echo hello 51 | containerName: node-level 52 | image: hello-world 53 | coordinators: 54 | nodeType: "coordinator" 55 | druid.port: 8080 56 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/master/coordinator-overlord" 57 | replicas: 1 58 | runtime.properties: |- 59 | druid.service=druid/coordinator 60 | druid.coordinator.asOverlord.enabled=true 61 | druid.coordinator.asOverlord.overlordService=druid/overlord 62 | historicals: 63 | nodeType: "historical" 64 | kind: "StatefulSet" 65 | druid.port: 8080 66 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical" 67 | replicas: 1 68 | runtime.properties: |- 69 | druid.service=druid/historical 70 | druid.segmentCache.locations=[{\"path\":\"/druid/data/segments\",\"maxSize\":10737418240}] 71 | druid.server.maxSize=10737418240 72 | volumeMounts: 73 | - mountPath: /druid/data 74 | name: data-volume 75 | volumeClaimTemplates: 76 | - metadata: 77 | name: data-volume 78 | spec: 79 | storageClassName: default 80 | accessModes: 81 | - ReadWriteOnce 82 | resources: 83 | requests: 84 | storage: 10M 85 | -------------------------------------------------------------------------------- /controllers/druid/types.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | const ( 4 | ignoredAnnotation = "druid.apache.org/ignored" 5 | 6 | broker = "broker" 7 | coordinator = "coordinator" 8 | overlord = "overlord" 9 | middleManager = "middleManager" 10 | indexer = "indexer" 11 | historical = "historical" 12 | router = "router" 13 | ) 14 | -------------------------------------------------------------------------------- /controllers/druid/util.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "reflect" 8 | "strconv" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | func firstNonEmptyStr(s1 string, s2 string) string { 14 | if len(s1) > 0 { 15 | return s1 16 | } else { 17 | return s2 18 | } 19 | } 20 | 21 | // Note that all the arguments passed to this function must have zero value of Nil. 22 | func firstNonNilValue(v1, v2 interface{}) interface{} { 23 | if !reflect.ValueOf(v1).IsNil() { 24 | return v1 25 | } else { 26 | return v2 27 | } 28 | } 29 | 30 | // lookup DENY_LIST, default is nil 31 | func getDenyListEnv(key string, defaultVal string) string { 32 | if value, exists := os.LookupEnv(key); exists { 33 | return value 34 | } 35 | return defaultVal 36 | } 37 | 38 | // pass slice of strings for namespaces 39 | func getEnvAsSlice(name string, defaultVal []string, sep string) []string { 40 | valStr := getDenyListEnv(name, "") 41 | if valStr == "" { 42 | return defaultVal 43 | } 44 | // split on "," 45 | val := strings.Split(valStr, sep) 46 | return val 47 | } 48 | 49 | func ContainsString(slice []string, s string) bool { 50 | for _, item := range slice { 51 | if item == s { 52 | return true 53 | } 54 | } 55 | return false 56 | } 57 | 58 | func RemoveString(slice []string, s string) (result []string) { 59 | for _, item := range slice { 60 | if item == s { 61 | continue 62 | } 63 | result = append(result, item) 64 | } 65 | return 66 | } 67 | 68 | // returns pointer to bool 69 | func boolFalse() *bool { 70 | bool := false 71 | return &bool 72 | } 73 | 74 | // to be used in max concurrent reconciles only 75 | // defaulting to return 1 76 | func Str2Int(s string) int { 77 | i, err := strconv.Atoi(s) 78 | if err != nil { 79 | return 1 80 | } 81 | return i 82 | } 83 | 84 | func IsEqualJson(s1, s2 string) (bool, error) { 85 | var o1 interface{} 86 | var o2 interface{} 87 | 88 | var err error 89 | err = json.Unmarshal([]byte(s1), &o1) 90 | if err != nil { 91 | return false, fmt.Errorf("error mashalling string 1 :: %s", err.Error()) 92 | } 93 | err = json.Unmarshal([]byte(s2), &o2) 94 | if err != nil { 95 | return false, fmt.Errorf("error mashalling string 2 :: %s", err.Error()) 96 | } 97 | 98 | return reflect.DeepEqual(o1, o2), nil 99 | } 100 | 101 | // to find the time difference between two epoch times 102 | func timeDifference(epochTime1, epochTime2 int64) int64 { 103 | t1 := time.Unix(epochTime1, 0) 104 | t2 := time.Unix(epochTime2, 0) 105 | 106 | diff := time.Duration(t2.Sub(t1)) 107 | return int64(diff.Seconds()) 108 | } 109 | 110 | func containsString(all []string, string string) bool { 111 | for _, s := range all { 112 | if s == string { 113 | return true 114 | } 115 | } 116 | 117 | return false 118 | } 119 | 120 | func hasDuplicateString(slice []string) (bool, string) { 121 | seen := make(map[string]bool) 122 | for _, s := range slice { 123 | if seen[s] { 124 | return true, s 125 | } 126 | seen[s] = true 127 | } 128 | return false, "" 129 | } 130 | -------------------------------------------------------------------------------- /controllers/druid/util_test.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | v1 "k8s.io/api/core/v1" 9 | 10 | druidv1alpha1 "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 11 | ) 12 | 13 | // +kubebuilder:docs-gen:collapse=Imports 14 | 15 | /* 16 | util test 17 | */ 18 | var _ = Describe("Test util", func() { 19 | Context("When testing util", func() { 20 | It("should test first non nil value", func() { 21 | var js = []byte(` 22 | { 23 | "image": "apache/druid:25.0.0", 24 | "securityContext": { "fsGroup": 107, "runAsUser": 106 }, 25 | "env": [{ "name": "k", "value": "v" }], 26 | "nodes": 27 | { 28 | "brokers": { 29 | "nodeType": "broker", 30 | "druid.port": 8080, 31 | "replicas": 2 32 | } 33 | } 34 | }`) 35 | 36 | clusterSpec := druidv1alpha1.DruidSpec{} 37 | Expect(json.Unmarshal(js, &clusterSpec)).Should(BeNil()) 38 | 39 | By("By testing first non nil value of PodSecurityContext.RunAsUser") 40 | x := firstNonNilValue(clusterSpec.Nodes["brokers"].PodSecurityContext, clusterSpec.PodSecurityContext).(*v1.PodSecurityContext) 41 | Expect(*x.RunAsUser).Should(Equal(int64(106))) 42 | 43 | By("By testing first non nil value of Env.Name") 44 | y := firstNonNilValue(clusterSpec.Nodes["brokers"].Env, clusterSpec.Env).([]v1.EnvVar) 45 | Expect(y[0].Name).Should(Equal("k")) 46 | }) 47 | 48 | It("should test first non empty string", func() { 49 | By("By testing first non empty string 1") 50 | Expect(firstNonEmptyStr("a", "b")).Should(Equal("a")) 51 | 52 | By("By testing first non empty string 2") 53 | Expect(firstNonEmptyStr("", "b")).Should(Equal("b")) 54 | }) 55 | 56 | It("should test contains string", func() { 57 | By("By testing contains string") 58 | Expect(ContainsString([]string{"a", "b"}, "a")).Should(BeTrue()) 59 | }) 60 | 61 | It("should test removes string", func() { 62 | By("By testing removes string") 63 | rs := RemoveString([]string{"a", "b"}, "a") 64 | Expect(rs).Should(Not(ConsistOf("a"))) 65 | }) 66 | 67 | }) 68 | }) 69 | -------------------------------------------------------------------------------- /controllers/druid/volume_expansion_test.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "time" 5 | 6 | druidv1alpha1 "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | "k8s.io/apimachinery/pkg/types" 10 | ) 11 | 12 | // +kubebuilder:docs-gen:collapse=Imports 13 | 14 | /* 15 | volume_expansion_test 16 | */ 17 | var _ = Describe("Test volume expansion feature", func() { 18 | const ( 19 | filePath = "testdata/volume-expansion.yaml" 20 | timeout = time.Second * 45 21 | interval = time.Millisecond * 250 22 | ) 23 | 24 | var ( 25 | druid = &druidv1alpha1.Druid{} 26 | ) 27 | 28 | Context("When creating a druid cluster with volume expansion", func() { 29 | It("Should create the druid object", func() { 30 | By("Creating a new druid") 31 | druidCR, err := readDruidClusterSpecFromFile(filePath) 32 | Expect(err).Should(BeNil()) 33 | Expect(k8sClient.Create(ctx, druidCR)).To(Succeed()) 34 | 35 | By("Getting a newly created druid") 36 | Eventually(func() bool { 37 | err := k8sClient.Get(ctx, types.NamespacedName{Name: druidCR.Name, Namespace: druidCR.Namespace}, druid) 38 | return err == nil 39 | }, timeout, interval).Should(BeTrue()) 40 | }) 41 | It("Should error on the CR verify stage if storage class is nil", func() { 42 | By("Setting storage class name to nil") 43 | druid.Spec.Nodes["historicals"].VolumeClaimTemplates[0].Spec.StorageClassName = nil 44 | Expect(druid.Spec.Nodes["historicals"].VolumeClaimTemplates[0].Spec.StorageClassName).Should(BeNil()) 45 | 46 | By("Validating the created druid") 47 | Expect(validateVolumeClaimTemplateSpec(druid)).Error() 48 | }) 49 | It("Should error if validate didn't worked and storageClassName does not exists", func() { 50 | By("By getting the historicals nodeSpec") 51 | allNodeSpecs := getNodeSpecsByOrder(druid) 52 | 53 | nodeSpec := &druidv1alpha1.DruidNodeSpec{} 54 | for _, elem := range allNodeSpecs { 55 | if elem.key == "historicals" { 56 | nodeSpec = &elem.spec 57 | } 58 | } 59 | Expect(nodeSpec).ShouldNot(BeNil()) 60 | 61 | By("By calling the expand volume function with storageClass nil") 62 | Expect(isVolumeExpansionEnabled(ctx, k8sClient, druid, nodeSpec, nil)).Error() 63 | }) 64 | }) 65 | }) 66 | -------------------------------------------------------------------------------- /controllers/druid/zookeeper_dep_mgmt.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "reflect" 7 | 8 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 9 | "github.com/datainfrahq/druid-operator/controllers/druid/ext" 10 | ) 11 | 12 | var zkExtTypes = map[string]reflect.Type{} 13 | 14 | func init() { 15 | zkExtTypes["default"] = reflect.TypeOf(ext.DefaultZkManager{}) 16 | } 17 | 18 | // We might have to add more methods to this interface to enable extensions that completely manage 19 | // deploy, upgrade and termination of zk cluster. 20 | type zookeeperManager interface { 21 | Configuration() string 22 | } 23 | 24 | func createZookeeperManager(spec *v1alpha1.ZookeeperSpec) (zookeeperManager, error) { 25 | if t, ok := zkExtTypes[spec.Type]; ok { 26 | v := reflect.New(t).Interface() 27 | if err := json.Unmarshal(spec.Spec, v); err != nil { 28 | return nil, fmt.Errorf("Couldn't unmarshall zk type[%s]. Error[%s].", spec.Type, err.Error()) 29 | } else { 30 | return v.(zookeeperManager), nil 31 | } 32 | } else { 33 | return nil, fmt.Errorf("Can't find type[%s] for Zookeeper Mgmt.", spec.Type) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /controllers/druid/zookeeper_dep_mgmt_test.go: -------------------------------------------------------------------------------- 1 | package druid 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | . "github.com/onsi/gomega" 6 | 7 | druidv1alpha1 "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 8 | ) 9 | 10 | // +kubebuilder:docs-gen:collapse=Imports 11 | 12 | /* 13 | zookeeper_dep_mgmt_test 14 | */ 15 | var _ = Describe("Test zookeeper dep mgmt", func() { 16 | Context("When testing zookeeper dep mgmt", func() { 17 | It("should test zookeeper dep mgmt", func() { 18 | v := druidv1alpha1.ZookeeperSpec{ 19 | Type: "default", 20 | Spec: []byte(`{ "properties": "my-zookeeper-config" }`), 21 | } 22 | 23 | zm, err := createZookeeperManager(&v) 24 | Expect(err).Should(BeNil()) 25 | Expect(zm.Configuration()).Should(Equal("my-zookeeper-config")) 26 | 27 | }) 28 | }) 29 | }) 30 | -------------------------------------------------------------------------------- /controllers/ingestion/ingestion_controller.go: -------------------------------------------------------------------------------- 1 | package ingestion 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "time" 7 | 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/client-go/tools/record" 10 | 11 | "github.com/go-logr/logr" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/controller-runtime/pkg/log" 16 | 17 | "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 18 | druidv1alpha1 "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 19 | ) 20 | 21 | // IngestionReconciler 22 | type DruidIngestionReconciler struct { 23 | client.Client 24 | Log logr.Logger 25 | Scheme *runtime.Scheme 26 | // reconcile time duration, defaults to 10s 27 | ReconcileWait time.Duration 28 | Recorder record.EventRecorder 29 | } 30 | 31 | func NewDruidIngestionReconciler(mgr ctrl.Manager) *DruidIngestionReconciler { 32 | return &DruidIngestionReconciler{ 33 | Client: mgr.GetClient(), 34 | Log: ctrl.Log.WithName("controllers").WithName("Ingestion"), 35 | Scheme: mgr.GetScheme(), 36 | Recorder: mgr.GetEventRecorderFor("druid-ingestion"), 37 | } 38 | } 39 | 40 | // +kubebuilder:rbac:groups=druid.apache.org,resources=ingestions,verbs=get;list;watch;create;update;patch;delete 41 | // +kubebuilder:rbac:groups=druid.apache.org,resources=ingestion/status,verbs=get;update;patch 42 | func (r *DruidIngestionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 43 | logr := log.FromContext(ctx) 44 | 45 | druidIngestionCR := &v1alpha1.DruidIngestion{} 46 | err := r.Get(ctx, req.NamespacedName, druidIngestionCR) 47 | if err != nil { 48 | if errors.IsNotFound(err) { 49 | return ctrl.Result{}, nil 50 | } 51 | return ctrl.Result{}, err 52 | } 53 | 54 | if err := r.do(ctx, druidIngestionCR); err != nil { 55 | logr.Error(err, err.Error()) 56 | return ctrl.Result{}, err 57 | } else { 58 | return ctrl.Result{RequeueAfter: LookupReconcileTime()}, nil 59 | } 60 | 61 | } 62 | 63 | func LookupReconcileTime() time.Duration { 64 | val, exists := os.LookupEnv("RECONCILE_WAIT") 65 | if !exists { 66 | return time.Second * 10 67 | } else { 68 | v, err := time.ParseDuration(val) 69 | if err != nil { 70 | // Exit Program if not valid 71 | os.Exit(1) 72 | } 73 | return v 74 | } 75 | } 76 | 77 | func (r *DruidIngestionReconciler) SetupWithManager(mgr ctrl.Manager) error { 78 | return ctrl.NewControllerManagedBy(mgr). 79 | For(&druidv1alpha1.DruidIngestion{}). 80 | Complete(r) 81 | } 82 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | Druid Operator is a Kubernetes controller that manages the lifecycle of [Apache Druid](https://druid.apache.org/) clusters. 4 | The operator simplifies the management of Druid clusters with its custom logic that is configurable via custom API 5 | (Kubernetes CRD). 6 | 7 | ## Druid Operator Documentation 8 | 9 | * [Getting Started](./getting_started.md) 10 | * API Specifications 11 | * [Druid API](./api_specifications/druid.md) 12 | * [Supported Features](./features.md) 13 | * [Example Specs](./examples.md) 14 | * [Developer Documentation](./dev_doc.md) 15 | * [Migration To Kubebuilder V3 in the Upcoming Version](./kubebuilder_v3_migration.md) 16 | 17 | --- 18 | 19 | :warning: You won't find any documentation about druid itself in this repository. 20 | If you need details about how to architecture your druid cluster you can consult theses documentations: 21 | 22 | * [Druid introduction]() 23 | * [Druid architecture](https://druid.apache.org/docs/latest/design/architecture.html) 24 | * [Druid configuration reference](https://druid.apache.org/docs/latest/configuration/index.html) 25 | 26 | --- 27 | 28 | [German company iunera has published their druid cluster spec](https://www.iunera.com/) in github which is used in the context of a software project by the German Ministry for Digital and Transport. The spec have the following features: 29 | 30 | * Kubernetes-native Druid 31 | * K8S jobs instead of Middlemanager with separated [pod-templates](https://github.com/iunera/druid-cluster-config/blob/main/kubernetes/druid/druidcluster/podTemplates/default-task-template.yaml) 32 | * [Service Discovery by Kubernetes](https://github.com/iunera/druid-cluster-config/blob/main/kubernetes/druid/druidcluster/iuneradruid-cluster.yaml#L172) aka. no zookeeper 33 | * [HPA for historical nodes](https://github.com/iunera/druid-cluster-config/blob/main/kubernetes/druid/druidcluster/hpa.yaml) / extended [Metrics Exporter](https://github.com/iunera/druid-cluster-config/blob/main/kubernetes/druid/metrics/druid-exporter.helm.yaml) 34 | * Multiple [Authenticator/Authorizer](https://github.com/iunera/druid-cluster-config/blob/main/kubernetes/druid/druidcluster/iuneradruid-cluster.yaml#L88) (Basic Auth and Azure AD Authentication with pac4j) 35 | * [Examples](https://github.com/iunera/druid-cluster-config/tree/main/_authentication-and-authorization-druid) for authorization and authentication 36 | * Based on druid-operator and [flux-cd](https://fluxcd.io/flux/) 37 | * Secrets managed by [SOPS](https://fluxcd.io/flux/guides/mozilla-sops/) and [ingested as Environment Variables](https://github.com/iunera/druid-cluster-config/blob/main/kubernetes/druid/druidcluster/iuneradruid-cluster.yaml#L245) 38 | * Postgres as Metadata Store (incl. [Helmchart Config](https://github.com/iunera/druid-cluster-config/blob/main/kubernetes/druid/postgres/postgres.helm.yaml)) 39 | * All endpoints TLS encrypted incl. [Howto](https://github.com/iunera/druid-cluster-config/blob/main/README.md#cluster-internal-tls-encryption) 40 | 41 | Link to the complete config file: https://github.com/iunera/druid-cluster-config 42 | -------------------------------------------------------------------------------- /docs/dev_doc.md: -------------------------------------------------------------------------------- 1 | ## Dev Dependencies 2 | 3 | - Golang 1.20+ 4 | - Kubebuilder v3 5 | - It is recommended to install kind since the project's e2e tests are using it. 6 | 7 | ## Running Operator Locally 8 | We're using Kubebuilder so we are working with its `Makefile` and extra custom commands: 9 | ```shell 10 | # If needed, create a kubernetes cluster (requires kind) 11 | make kind 12 | 13 | # Install the CRDs 14 | make install 15 | 16 | # Run the operator locally 17 | make run 18 | ``` 19 | 20 | ## Watch a namespace 21 | ```shell 22 | # Watch all namespaces 23 | export WATCH_NAMESPACE="" 24 | 25 | # Watch a single namespace 26 | export WATCH_NAMESPACE="mynamespace" 27 | 28 | # Watch all namespaces except: kube-system, default 29 | export DENY_LIST="kube-system,default" 30 | ``` 31 | 32 | ## Building The Operator Docker Image 33 | ```shell 34 | make docker-build 35 | 36 | # In case you want to build it with a custom image: 37 | make docker-build IMG=custom-name:custom-tag 38 | ``` 39 | 40 | ## Testing 41 | Before submitting a PR, make sure the tests are running successfully. 42 | ```shell 43 | # Run unit tests 44 | make test 45 | 46 | # Run E2E tests (requires kind) 47 | make e2e 48 | ``` 49 | 50 | ## Documentation 51 | If you changed the CRD API, please make sure the documentation is also updated: 52 | ```shell 53 | make api-docs 54 | ``` 55 | 56 | ## Help 57 | The `Makefile` should contain all commands with explanations. You can also run: 58 | ```shell 59 | # For help 60 | make help 61 | ``` 62 | -------------------------------------------------------------------------------- /docs/druid_cr.md: -------------------------------------------------------------------------------- 1 | ## Druid CR Spec 2 | 3 | - Druid CR has a ```clusterSpec``` which is common for all the druid nodes deployed and a ```nodeSpec``` which is specific to druid nodes. 4 | - Some key values are ```Required```, ie they must be present in the spec to get the cluster deployed. Other's are optional. 5 | - For full details on spec refer to ```pkg/apis/druid/v1alpha1/druid_types.go``` 6 | - The operator supports both deployments and statefulsets for druid Nodes. ```kind``` can be specified in the druid NodeSpec's to ```Deployment``` / ```StatefulSet```. 7 | - ```NOTE: The default behavior shall provision all the nodes as statefulsets.``` 8 | - The following are cluster scoped and common to all the druid nodes. 9 | 10 | ```yaml 11 | spec: 12 | # Enable rolling deploy for druid, not required but suggested for production setup 13 | # more information in features.md and in druid documentation 14 | # http://druid.io/docs/latest/operations/rolling-updates.html 15 | rollingDeploy: true 16 | # Image for druid, Required Key 17 | image: apache/druid:25.0.0 18 | .... 19 | # Optionally specify image for all nodes. Can be specify on nodes also 20 | # imagePullSecrets: 21 | # - name: tutu 22 | .... 23 | # Entrypoint for druid cluster, Required Key 24 | startScript: /druid.sh 25 | ... 26 | # Labels populated by pods 27 | podLabels: 28 | .... 29 | # Pod Security Context 30 | securityContext: 31 | ... 32 | # Service Spec created for all nodes 33 | services: 34 | ... 35 | # Mount Path to mount the common runtime,jvm and log4xml configs inside druid pods. Required Key 36 | commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common" 37 | ... 38 | # JVM Options common for all druid nodes 39 | jvm.options: |- 40 | ... 41 | # log4j.config common for all druid nodes 42 | 43 | log4j.config: |- 44 | # common runtime properties for all druid nodes 45 | common.runtime.properties: | 46 | ``` 47 | 48 | - The following are specific to a node. 49 | 50 | ```yaml 51 | nodes: 52 | # String value, can be anything to define a node name. 53 | brokers: 54 | # nodeType can be broker, historical, middleManager, indexer, router, coordinator and overlord. 55 | # Required Key 56 | nodeType: "broker" 57 | # Optionally specify for broker nodes 58 | # imagePullSecrets: 59 | # - name: tutu 60 | # Port for the node 61 | druid.port: 8088 62 | # MountPath where are the all node properties get mounted as configMap 63 | nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker" 64 | # replica count, required must be greater than > 0. 65 | replicas: 1 66 | # Runtime Properties for the node 67 | # Required Key 68 | runtime.properties: | 69 | ... 70 | ``` 71 | 72 | ## Authentication Setup 73 | 74 | Authentication can be configured to secure communication with the cluster API using credentials stored in Kubernetes secrets. 75 | 76 | Currently this is used for compaction, rules, dynamic configs, and ingestion configurations. 77 | 78 | This not only applies to the `Druid` CR but also to the `DruidIngestion` CR. 79 | 80 | ### Configuring Basic Authentication 81 | 82 | To use basic authentication, you need to create a Kubernetes secret containing the username and password. This secret is then referenced in the Druid CR. 83 | 84 | Steps to Configure Basic Authentication: 85 | 86 | 1. **Create a Kubernetes Secret:** Store your username and password in a Kubernetes secret. Below is an example of how to define the secret in a YAML file: 87 | 88 | ```yaml 89 | apiVersion: v1 90 | kind: Secret 91 | metadata: 92 | name: mycluster-admin-operator 93 | namespace: druid 94 | type: Opaque 95 | data: 96 | OperatorUserName: 97 | OperatorPassword: 98 | ``` 99 | 100 | Replace and with the base64-encoded values of your desired username and password. 101 | 102 | 2. Define Authentication in the Druid CRD: Reference the secret in your Druid custom resource. Here is an example `Druid`: 103 | 104 | ```yaml 105 | apiVersion: druid.apache.org/v1alpha1 106 | kind: Druid 107 | metadata: 108 | name: agent 109 | spec: 110 | auth: 111 | secretRef: 112 | name: mycluster-admin-operator 113 | namespace: druid 114 | type: basic-auth 115 | ``` 116 | 117 | This configuration specifies that the Druid cluster should use basic authentication with credentials retrieved from the mycluster-admin-operator secret. 118 | -------------------------------------------------------------------------------- /docs/getting_started.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | The Helm chart is available at the [DataInfra chart repository](https://charts.datainfra.io). 4 | 5 | The operator can be deployed in one of the following modes: 6 | - namespace scope (default) 7 | - cluster scope 8 | 9 | ### Add the Helm repository 10 | ```shell 11 | helm repo add datainfra https://charts.datainfra.io 12 | helm repo update 13 | ``` 14 | 15 | ### Cluster Scope Installation 16 | `NOTE:` the default installation restrics the reconciliation on the default and kube-system namespaces 17 | ```bash 18 | # Install Druid operator using Helm 19 | helm -n druid-operator-system upgrade -i --create-namespace cluster-druid-operator datainfra/druid-operator 20 | 21 | # ... or generate manifest.yaml to install using other means: 22 | helm -n druid-operator-system template --create-namespace cluster-druid-operator datainfra/druid-operator > manifest.yaml 23 | ``` 24 | 25 | ### Custom Namespaces Installation 26 | ```bash 27 | # Install Druid operator using Helm 28 | kubectl create ns mynamespace 29 | helm -n druid-operator-system upgrade -i --create-namespace --set env.WATCH_NAMESPACE="mynamespace" namespaced-druid-operator datainfra/druid-operator 30 | 31 | # Override the default namespace DENY_LIST 32 | helm -n druid-operator-system upgrade -i --create-namespace --set env.DENY_LIST="kube-system" namespaced-druid-operator datainfra/druid-operator 33 | 34 | # ... or generate manifest.yaml to install using other means: 35 | helm -n druid-operator-system template --set env.WATCH_NAMESPACE="" namespaced-druid-operator datainfra/druid-operator --create-namespace > manifest.yaml 36 | ``` 37 | 38 | ### Uninstall 39 | ```bash 40 | # To avoid destroying existing clusters, helm will not uninstall its CRD. For 41 | # complete cleanup annotation needs to be removed first: 42 | kubectl annotate crd druids.druid.apache.org helm.sh/resource-policy- 43 | 44 | # This will uninstall operator 45 | helm -n druid-operator-system uninstall cluster-druid-operator 46 | ``` 47 | 48 | ## Deploy a sample Druid cluster 49 | Bellow is an example spec to deploy a tiny Druid cluster. 50 | For full details on spec please see [Druid CRD API reference](api_specifications/druid.md) 51 | or the [Druid API code](../apis/druid/v1alpha1/druid_types.go). 52 | 53 | ```bash 54 | # Deploy single node zookeeper 55 | kubectl apply -f examples/tiny-cluster-zk.yaml 56 | 57 | # Deploy druid cluster spec 58 | # NOTE: add a namespace when applying the cluster if you installed the operator with the default DENY_LIST 59 | kubectl apply -f examples/tiny-cluster.yaml 60 | ``` 61 | 62 | `NOTE:` the above tiny-cluster only works on a single node kubernetes cluster (e.g. typical k8s cluster setup for dev 63 | using kind or minikube) as it uses local disk as "deep storage". Other example specs in the `examples/` directory use distributed "deep storage" and therefore expect to be deployed into a k8s cluster with s3-compatible storage. To bootstrap your k8s cluster with s3-compatible storage, you can run `make helm-minio-install`. See the [Makefile](../Makefile) for more details. 64 | 65 | 66 | ## Debugging Problems 67 | 68 | ```bash 69 | # get druid-operator pod name 70 | kubectl get po | grep druid-operator 71 | 72 | # check druid-operator pod logs 73 | kubectl logs 74 | 75 | # check the druid spec 76 | kubectl describe druids tiny-cluster 77 | 78 | # check if druid cluster is deployed 79 | kubectl get svc | grep tiny 80 | kubectl get cm | grep tiny 81 | kubectl get sts | grep tiny 82 | ``` 83 | -------------------------------------------------------------------------------- /docs/images/druid-operator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datainfrahq/druid-operator/99010476155eba85dd8cf33b80b49b89802d7031/docs/images/druid-operator.png -------------------------------------------------------------------------------- /e2e/Dockerfile-testpod: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17.2 2 | RUN apk add --update-cache \ 3 | curl jq\ 4 | && rm -rf /var/cache/apk/* 5 | 6 | ADD e2e/wikipedia-test.sh . 7 | ADD e2e/druid-ingestion-test.sh . 8 | -------------------------------------------------------------------------------- /e2e/configs/druid-ingestion-cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: DruidIngestion 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: druidingestion 6 | app.kubernetes.io/instance: druidingestion-sample 7 | name: wikipedia-ingestion 8 | spec: 9 | suspend: false 10 | druidCluster: tiny-cluster 11 | ingestion: 12 | type: native-batch 13 | spec: |- 14 | { 15 | "type" : "index_parallel", 16 | "spec" : { 17 | "dataSchema" : { 18 | "dataSource" : "wikipedia-2", 19 | "timestampSpec": { 20 | "column": "time", 21 | "format": "iso" 22 | }, 23 | "dimensionsSpec" : { 24 | "dimensions" : [ 25 | "channel", 26 | "cityName", 27 | "comment", 28 | "countryIsoCode", 29 | "countryName", 30 | "isAnonymous", 31 | "isMinor", 32 | "isNew", 33 | "isRobot", 34 | "isUnpatrolled", 35 | "metroCode", 36 | "namespace", 37 | "page", 38 | "regionIsoCode", 39 | "regionName", 40 | "user", 41 | { "name": "added", "type": "long" }, 42 | { "name": "deleted", "type": "long" }, 43 | { "name": "delta", "type": "long" } 44 | ] 45 | }, 46 | "metricsSpec" : [], 47 | "granularitySpec" : { 48 | "type" : "uniform", 49 | "segmentGranularity" : "day", 50 | "queryGranularity" : "none", 51 | "intervals" : ["2015-09-12/2015-09-13"], 52 | "rollup" : false 53 | } 54 | }, 55 | "ioConfig" : { 56 | "type" : "index_parallel", 57 | "inputSource" : { 58 | "type" : "local", 59 | "baseDir" : "quickstart/tutorial/", 60 | "filter" : "wikiticker-2015-09-12-sampled.json.gz" 61 | }, 62 | "inputFormat" : { 63 | "type" : "json" 64 | }, 65 | "appendToExisting" : false 66 | }, 67 | "tuningConfig" : { 68 | "type" : "index_parallel", 69 | "maxRowsPerSegment" : 5000000, 70 | "maxRowsInMemory" : 25000 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /e2e/configs/kafka-ingestion-native.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: DruidIngestion 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: druidingestion 6 | app.kubernetes.io/instance: druidingestion-sample 7 | name: kafka-2 8 | spec: 9 | suspend: false 10 | druidCluster: tiny-cluster 11 | ingestion: 12 | type: kafka 13 | compaction: 14 | tuningConfig: 15 | type: "kafka" 16 | partitionsSpec: 17 | type: "dynamic" 18 | skipOffsetFromLatest: "PT0S" 19 | granularitySpec: 20 | segmentGranularity: "DAY" 21 | rules: 22 | - type: dropByPeriod 23 | period: P1M 24 | includeFuture: true 25 | - type: broadcastByPeriod 26 | period: P1M 27 | includeFuture: true 28 | nativeSpec: 29 | type: kafka 30 | spec: 31 | dataSchema: 32 | dataSource: metrics-kafka-2 33 | timestampSpec: 34 | column: timestamp 35 | format: auto 36 | dimensionsSpec: 37 | dimensions: [] 38 | dimensionExclusions: 39 | - timestamp 40 | - value 41 | metricsSpec: 42 | - name: count 43 | type: count 44 | - name: value_sum 45 | fieldName: value 46 | type: doubleSum 47 | - name: value_min 48 | fieldName: value 49 | type: doubleMin 50 | - name: value_max 51 | fieldName: value 52 | type: doubleMax 53 | granularitySpec: 54 | type: uniform 55 | segmentGranularity: HOUR 56 | queryGranularity: NONE 57 | ioConfig: 58 | topic: metrics 59 | inputFormat: 60 | type: json 61 | consumerProperties: 62 | bootstrap.servers: localhost:9092 63 | taskCount: 1 64 | replicas: 1 65 | taskDuration: PT1H 66 | tuningConfig: 67 | type: kafka 68 | maxRowsPerSegment: 5000000 69 | -------------------------------------------------------------------------------- /e2e/configs/kafka-ingestion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: druid.apache.org/v1alpha1 2 | kind: DruidIngestion 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: druidingestion 6 | app.kubernetes.io/instance: druidingestion-sample 7 | name: kafka-1 8 | spec: 9 | suspend: false 10 | druidCluster: tiny-cluster 11 | ingestion: 12 | type: kafka 13 | compaction: 14 | tuningConfig: 15 | type: "kafka" 16 | partitionsSpec: 17 | type: "dynamic" 18 | skipOffsetFromLatest: "PT0S" 19 | granularitySpec: 20 | segmentGranularity: "DAY" 21 | rules: 22 | - type: dropByPeriod 23 | period: P1M 24 | includeFuture: true 25 | - type: broadcastByPeriod 26 | period: P1M 27 | includeFuture: true 28 | spec: |- 29 | { 30 | "type": "kafka", 31 | "spec": { 32 | "dataSchema": { 33 | "dataSource": "metrics-kafka", 34 | "timestampSpec": { 35 | "column": "timestamp", 36 | "format": "auto" 37 | }, 38 | "dimensionsSpec": { 39 | "dimensions": [], 40 | "dimensionExclusions": [ 41 | "timestamp", 42 | "value" 43 | ] 44 | }, 45 | "metricsSpec": [ 46 | { 47 | "name": "count", 48 | "type": "count" 49 | }, 50 | { 51 | "name": "value_sum", 52 | "fieldName": "value", 53 | "type": "doubleSum" 54 | }, 55 | { 56 | "name": "value_min", 57 | "fieldName": "value", 58 | "type": "doubleMin" 59 | }, 60 | { 61 | "name": "value_max", 62 | "fieldName": "value", 63 | "type": "doubleMax" 64 | } 65 | ], 66 | "granularitySpec": { 67 | "type": "uniform", 68 | "segmentGranularity": "HOUR", 69 | "queryGranularity": "NONE" 70 | } 71 | }, 72 | "ioConfig": { 73 | "topic": "metrics", 74 | "inputFormat": { 75 | "type": "json" 76 | }, 77 | "consumerProperties": { 78 | "bootstrap.servers": "localhost:9092" 79 | }, 80 | "taskCount": 1, 81 | "replicas": 1, 82 | "taskDuration": "PT1H" 83 | }, 84 | "tuningConfig": { 85 | "type": "kafka", 86 | "maxRowsPerSegment": 5000000 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /e2e/configs/minio-operator-override.yaml: -------------------------------------------------------------------------------- 1 | operator: 2 | replicaCount: 1 3 | env: 4 | - name: MINIO_OPERATOR_TLS_ENABLE 5 | value: "off" 6 | - name: MINIO_CONSOLE_TLS_ENABLE 7 | value: "off" 8 | -------------------------------------------------------------------------------- /e2e/configs/minio-tenant-override.yaml: -------------------------------------------------------------------------------- 1 | tenant: 2 | pools: 3 | - name: "minio" 4 | servers: 1 5 | volumesPerServer: 1 6 | certificate: 7 | requestAutoCert: false 8 | buckets: 9 | - name: "druid" 10 | -------------------------------------------------------------------------------- /e2e/druid-ingestion-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | TASK_ID=$1 6 | 7 | echo "Checking Status for task $TASK_ID..." 8 | STATUS=$(curl -s http://druid-tiny-cluster-coordinators.druid.svc:8088/druid/indexer/v1/task/${TASK_ID}/status | jq '.status.status' -r); 9 | while [ $STATUS == "RUNNING" ] 10 | do 11 | sleep 8; 12 | echo "TASK is "$STATUS "..." 13 | STATUS=$(curl -s http://druid-tiny-cluster-coordinators.druid.svc:8088/druid/indexer/v1/task/${TASK_ID}/status | jq '.status.status' -r) 14 | done 15 | 16 | if [ $STATUS == "SUCCESS" ] 17 | then 18 | echo "TASK $TASK_ID COMPLETED SUCCESSFULLY" 19 | sleep 60 # need time for the segments to become queryable 20 | else 21 | echo "TASK $TASK_ID FAILED !!!!" 22 | exit 1 23 | fi 24 | 25 | echo "Querying Data ... " 26 | echo "Running query SELECT COUNT(*) AS \"Count\" FROM \"wikipedia-2\" WHERE isMinor = 'false'" 27 | 28 | cat > query.json </dev/null || true)" != 'true' ]; then 9 | docker run \ 10 | -d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \ 11 | registry:2 12 | fi 13 | 14 | if [ $(kind get clusters | grep ^kind$) ] 15 | then 16 | echo "Kind cluster Cluster exists skipping creation ..." 17 | echo "Switching context to kind ..." 18 | kubectl config use-context kind-kind 19 | else 20 | 21 | # create a cluster with the local registry enabled in containerd 22 | cat <" ] 11 | then 12 | echo "Seems to be in progress ..." 13 | elif [ $STAT == 1 ] 14 | then 15 | echo "Job completed Successfully !!!" 16 | break 17 | fi 18 | if [ $i == 9 ] 19 | then 20 | echo "================" 21 | echo "Task Timeout ..." 22 | echo "FAILED EXITING !!!" 23 | echo "================" 24 | exit 1 25 | fi 26 | done 27 | -------------------------------------------------------------------------------- /e2e/test-extra-common-config.sh: -------------------------------------------------------------------------------- 1 | echo "Test: ExtraCommonConfig" 2 | sed -e "s/CM_NAMESPACE/${NAMESPACE}/g" e2e/configs/extra-common-config.yaml | kubectl apply -n "${NAMESPACE}" -f - 3 | sleep 10 4 | # Wait for Druid 5 | for d in $(kubectl get pods -n "${NAMESPACE}" -l app=druid -l druid_cr=tiny-cluster -o name) 6 | do 7 | kubectl wait -n "${NAMESPACE}" "$d" --for=condition=Ready --timeout=5m 8 | done 9 | # wait for druid pods 10 | for s in $(kubectl get sts -n "${NAMESPACE}" -l app="${NAMESPACE}" -l druid_cr=tiny-cluster -o name) 11 | do 12 | kubectl rollout status "$s" -n "${NAMESPACE}" --timeout=5m 13 | done 14 | 15 | extraDataTXT=$(kubectl get configmap -n "${NAMESPACE}" tiny-cluster-druid-common-config -o 'jsonpath={.data.test\.txt}') 16 | if [[ "${extraDataTXT}" != "This Is Test" ]] 17 | then 18 | echo "Bad value for key: test.txt" 19 | echo "Test: ExtraCommonConfig => FAILED\!" 20 | fi 21 | 22 | extraDataYAML=$(kubectl get configmap -n "${NAMESPACE}" tiny-cluster-druid-common-config -o 'jsonpath={.data.test\.yaml}') 23 | if [[ "${extraDataYAML}" != "YAML" ]] 24 | then 25 | echo "Bad value for key: test.yaml" 26 | echo "Test: ExtraCommonConfig => FAILED\!" 27 | fi 28 | 29 | kubectl delete -f e2e/configs/extra-common-config.yaml -n "${NAMESPACE}" 30 | for d in $(kubectl get pods -n "${NAMESPACE}" -l app=druid -l druid_cr=tiny-cluster -o name) 31 | do 32 | kubectl wait -n "${NAMESPACE}" "$d" --for=delete --timeout=5m 33 | done 34 | 35 | echo "Test: ExtraCommonConfig => SUCCESS\!" -------------------------------------------------------------------------------- /e2e/wikipedia-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo "Downloading Index" 3 | wget -q https://raw.githubusercontent.com/apache/druid/master/examples/quickstart/tutorial/wikipedia-index.json 4 | 5 | echo "Creating Task" 6 | task_id=$(curl -s -X 'POST' -H 'Content-Type:application/json' -d @wikipedia-index.json http://druid-tiny-cluster-coordinators.druid.svc:8088/druid/indexer/v1/task | jq '.task' -r) 7 | if [ $? == 0 ] 8 | then 9 | echo "Task created with ID $task_id" 10 | fi 11 | 12 | echo "Checking Status for task ..." 13 | STATUS=$(curl -s http://druid-tiny-cluster-coordinators.druid.svc:8088/druid/indexer/v1/task/$task_id/status | jq '.status.status' -r); 14 | while [ $STATUS == "RUNNING" ] 15 | do 16 | sleep 8; 17 | echo "TASK is "$STATUS "..." 18 | STATUS=$(curl -s http://druid-tiny-cluster-coordinators.druid.svc:8088/druid/indexer/v1/task/$task_id/status | jq '.status.status' -r) 19 | done 20 | 21 | if [ $STATUS == "SUCCESS" ] 22 | then 23 | echo "TASK $task_id COMPLETED SUCCESSFULLY" 24 | sleep 60 # need time for the segments to become queryable 25 | else 26 | echo "TASK $task_id FAILED !!!!" 27 | fi 28 | 29 | echo "Querying Data ... " 30 | echo "Running query SELECT COUNT(*) AS \"Count\" FROM \"wikipedia\" WHERE isMinor = 'false'" 31 | 32 | cat > query.json < 5 | 6 | {{ fieldName . }}
7 | 8 | {{ if linkForType .Type }} 9 | 10 | {{ typeDisplayName .Type }} 11 | 12 | {{ else }} 13 | {{ typeDisplayName .Type }} 14 | {{ end }} 15 | 16 | 17 | 18 | {{ if fieldEmbedded . }} 19 |

20 | (Members of {{ fieldName . }} are embedded into this type.) 21 |

22 | {{ end}} 23 | 24 | {{ if isOptionalMember .}} 25 | (Optional) 26 | {{ end }} 27 | 28 | {{ safe (renderComments .CommentLines) }} 29 | 30 | {{ if and (eq (.Type.Name.Name) "ObjectMeta") }} 31 | Refer to the Kubernetes API documentation for the fields of the 32 | metadata field. 33 | {{ end }} 34 | 35 | {{ if or (eq (fieldName .) "spec") }} 36 |
37 |
38 | 39 | {{ template "members" .Type }} 40 |
41 | {{ end }} 42 | 43 | 44 | {{ end }} 45 | {{ end }} 46 | {{ end }} 47 | -------------------------------------------------------------------------------- /hack/api-docs/template/pkg.tpl: -------------------------------------------------------------------------------- 1 | {{ define "packages" }} 2 |

Druid API reference

3 | 4 | {{ with .packages}} 5 |

Packages:

6 | 13 | {{ end}} 14 | 15 | {{ range .packages }} 16 |

17 | {{- packageDisplayName . -}} 18 |

19 | 20 | {{ with (index .GoPackages 0 )}} 21 | {{ with .DocComments }} 22 | {{ safe (renderComments .) }} 23 | {{ end }} 24 | {{ end }} 25 | 26 | Resource Types: 27 | 28 |
    29 | {{- range (visibleTypes (sortedTypes .Types)) -}} 30 | {{ if isExportedType . -}} 31 |
  • 32 | {{ typeDisplayName . }} 33 |
  • 34 | {{- end }} 35 | {{- end -}} 36 |
37 | 38 | {{ range (visibleTypes (sortedTypes .Types))}} 39 | {{ template "type" . }} 40 | {{ end }} 41 | {{ end }} 42 | 43 |
44 |

This page was automatically generated with gen-crd-api-reference-docs

45 |
46 | {{ end }} 47 | -------------------------------------------------------------------------------- /hack/api-docs/template/type.tpl: -------------------------------------------------------------------------------- 1 | {{ define "type" }} 2 |

3 | {{- .Name.Name }} 4 | {{ if eq .Kind "Alias" }}({{.Underlying}} alias){{ end -}} 5 |

6 | 7 | {{ with (typeReferences .) }} 8 |

9 | (Appears on: 10 | {{- $prev := "" -}} 11 | {{- range . -}} 12 | {{- if $prev -}}, {{ end -}} 13 | {{ $prev = . }} 14 | {{ typeDisplayName . }} 15 | {{- end -}} 16 | ) 17 |

18 | {{ end }} 19 | 20 | {{ with .CommentLines }} 21 | {{ safe (renderComments .) }} 22 | {{ end }} 23 | 24 | {{ if .Members }} 25 |
26 |
27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | {{ if isExportedType . }} 36 | 37 | 40 | 43 | 44 | 45 | 49 | 52 | 53 | {{ end }} 54 | {{ template "members" . }} 55 | 56 |
FieldDescription
38 | apiVersion
39 | string
41 | {{ apiGroup . }} 42 |
46 | kind
47 | string 48 |
50 | {{ .Name.Name }} 51 |
57 |
58 |
59 | {{ end }} 60 | {{ end }} 61 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "flag" 21 | "os" 22 | "strings" 23 | 24 | "github.com/datainfrahq/druid-operator/controllers/druid" 25 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 26 | // to ensure that exec-entrypoint and run can make use of them. 27 | _ "k8s.io/client-go/plugin/pkg/client/auth" 28 | "sigs.k8s.io/controller-runtime/pkg/cache" 29 | 30 | "k8s.io/apimachinery/pkg/runtime" 31 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 32 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 33 | ctrl "sigs.k8s.io/controller-runtime" 34 | "sigs.k8s.io/controller-runtime/pkg/healthz" 35 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 36 | 37 | druidv1alpha1 "github.com/datainfrahq/druid-operator/apis/druid/v1alpha1" 38 | druidingestioncontrollers "github.com/datainfrahq/druid-operator/controllers/ingestion" 39 | //+kubebuilder:scaffold:imports 40 | ) 41 | 42 | var ( 43 | scheme = runtime.NewScheme() 44 | setupLog = ctrl.Log.WithName("setup") 45 | watchNamespace = os.Getenv("WATCH_NAMESPACE") 46 | ) 47 | 48 | func init() { 49 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 50 | 51 | utilruntime.Must(druidv1alpha1.AddToScheme(scheme)) 52 | //+kubebuilder:scaffold:scheme 53 | } 54 | 55 | func main() { 56 | var metricsAddr string 57 | var enableLeaderElection bool 58 | var probeAddr string 59 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 60 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 61 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 62 | "Enable leader election for controller manager. "+ 63 | "Enabling this will ensure there is only one active controller manager.") 64 | opts := zap.Options{ 65 | Development: true, 66 | } 67 | opts.BindFlags(flag.CommandLine) 68 | flag.Parse() 69 | 70 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 71 | 72 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 73 | Scheme: scheme, 74 | MetricsBindAddress: metricsAddr, 75 | Port: 9443, 76 | HealthProbeBindAddress: probeAddr, 77 | LeaderElection: enableLeaderElection, 78 | LeaderElectionID: "e6946145.apache.org", 79 | Namespace: os.Getenv("WATCH_NAMESPACE"), 80 | NewCache: watchNamespaceCache(), 81 | // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily 82 | // when the Manager ends. This requires the binary to immediately end when the 83 | // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly 84 | // speeds up voluntary leader transitions as the new leader don't have to wait 85 | // LeaseDuration time first. 86 | // 87 | // In the default scaffold provided, the program ends immediately after 88 | // the manager stops, so would be fine to enable this option. However, 89 | // if you are doing or is intended to do any operation such as perform cleanups 90 | // after the manager stops then its usage might be unsafe. 91 | // LeaderElectionReleaseOnCancel: true, 92 | }) 93 | if err != nil { 94 | setupLog.Error(err, "unable to start manager") 95 | os.Exit(1) 96 | } 97 | 98 | if err = (druid.NewDruidReconciler(mgr)).SetupWithManager(mgr); err != nil { 99 | setupLog.Error(err, "unable to create controller", "controller", "Druid") 100 | os.Exit(1) 101 | } 102 | 103 | if err = (druidingestioncontrollers.NewDruidIngestionReconciler(mgr)).SetupWithManager(mgr); err != nil { 104 | setupLog.Error(err, "unable to create controller", "controller", "DruidIngestion") 105 | os.Exit(1) 106 | } 107 | 108 | //+kubebuilder:scaffold:builder 109 | 110 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 111 | setupLog.Error(err, "unable to set up health check") 112 | os.Exit(1) 113 | } 114 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 115 | setupLog.Error(err, "unable to set up ready check") 116 | os.Exit(1) 117 | } 118 | 119 | setupLog.Info("starting manager") 120 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 121 | setupLog.Error(err, "problem running manager") 122 | os.Exit(1) 123 | } 124 | } 125 | 126 | func watchNamespaceCache() cache.NewCacheFunc { 127 | var managerWatchCache cache.NewCacheFunc 128 | ns := strings.Split(watchNamespace, ",") 129 | 130 | if len(ns) > 1 { 131 | for i := range ns { 132 | ns[i] = strings.TrimSpace(ns[i]) 133 | } 134 | managerWatchCache = cache.MultiNamespacedCacheBuilder(ns) 135 | return managerWatchCache 136 | } 137 | managerWatchCache = (cache.NewCacheFunc)(nil) 138 | return managerWatchCache 139 | } 140 | -------------------------------------------------------------------------------- /pkg/druidapi/druidapi.go: -------------------------------------------------------------------------------- 1 | package druidapi 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net/url" 8 | "path" 9 | 10 | internalhttp "github.com/datainfrahq/druid-operator/pkg/http" 11 | v1 "k8s.io/api/core/v1" 12 | "k8s.io/apimachinery/pkg/types" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | ) 15 | 16 | const ( 17 | DruidRouterPort = "8088" 18 | OperatorUserName = "OperatorUserName" 19 | OperatorPassword = "OperatorPassword" 20 | ) 21 | 22 | type AuthType string 23 | 24 | const ( 25 | BasicAuth AuthType = "basic-auth" 26 | ) 27 | 28 | type Auth struct { 29 | // +required 30 | Type AuthType `json:"type"` 31 | // +required 32 | SecretRef v1.SecretReference `json:"secretRef"` 33 | } 34 | 35 | // GetAuthCreds retrieves basic authentication credentials from a Kubernetes secret. 36 | // If the Auth object is empty, it returns an empty BasicAuth object. 37 | // Parameters: 38 | // 39 | // ctx: The context object. 40 | // c: The Kubernetes client. 41 | // auth: The Auth object containing the secret reference. 42 | // 43 | // Returns: 44 | // 45 | // BasicAuth: The basic authentication credentials. 46 | func GetAuthCreds( 47 | ctx context.Context, 48 | c client.Client, 49 | auth Auth, 50 | ) (internalhttp.BasicAuth, error) { 51 | // Check if the mentioned secret exists 52 | if auth != (Auth{}) { 53 | secret := v1.Secret{} 54 | if err := c.Get(ctx, types.NamespacedName{ 55 | Namespace: auth.SecretRef.Namespace, 56 | Name: auth.SecretRef.Name, 57 | }, &secret); err != nil { 58 | return internalhttp.BasicAuth{}, err 59 | } 60 | creds := internalhttp.BasicAuth{ 61 | UserName: string(secret.Data[OperatorUserName]), 62 | Password: string(secret.Data[OperatorPassword]), 63 | } 64 | 65 | return creds, nil 66 | } 67 | 68 | return internalhttp.BasicAuth{}, nil 69 | } 70 | 71 | // MakePath constructs the appropriate path for the specified Druid API. 72 | // Parameters: 73 | // 74 | // baseURL: The base URL of the Druid cluster. For example, http://router-svc.namespace.svc.cluster.local:8088. 75 | // componentType: The type of Druid component. For example, "indexer". 76 | // apiType: The type of Druid API. For example, "worker". 77 | // additionalPaths: Additional path components to be appended to the URL. 78 | // 79 | // Returns: 80 | // 81 | // string: The constructed path. 82 | func MakePath(baseURL, componentType, apiType string, additionalPaths ...string) string { 83 | u, err := url.Parse(baseURL) 84 | if err != nil { 85 | fmt.Println("Error parsing URL:", err) 86 | return "" 87 | } 88 | 89 | // Construct the initial path 90 | u.Path = path.Join("druid", componentType, "v1", apiType) 91 | 92 | // Append additional path components 93 | for _, p := range additionalPaths { 94 | u.Path = path.Join(u.Path, p) 95 | } 96 | 97 | return u.String() 98 | } 99 | 100 | // GetRouterSvcUrl retrieves the URL of the Druid router service. 101 | // Parameters: 102 | // 103 | // namespace: The namespace of the Druid cluster. 104 | // druidClusterName: The name of the Druid cluster. 105 | // c: The Kubernetes client. 106 | // 107 | // Returns: 108 | // 109 | // string: The URL of the Druid router service. 110 | func GetRouterSvcUrl(namespace, druidClusterName string, c client.Client) (string, error) { 111 | listOpts := []client.ListOption{ 112 | client.InNamespace(namespace), 113 | client.MatchingLabels(map[string]string{ 114 | "druid_cr": druidClusterName, 115 | "component": "router", 116 | }), 117 | } 118 | svcList := &v1.ServiceList{} 119 | if err := c.List(context.Background(), svcList, listOpts...); err != nil { 120 | return "", err 121 | } 122 | var svcName string 123 | 124 | for range svcList.Items { 125 | svcName = svcList.Items[0].Name 126 | } 127 | 128 | if svcName == "" { 129 | return "", errors.New("router svc discovery fail") 130 | } 131 | 132 | newName := "http://" + svcName + "." + namespace + ":" + DruidRouterPort 133 | 134 | return newName, nil 135 | } 136 | -------------------------------------------------------------------------------- /pkg/druidapi/druidapi_test.go: -------------------------------------------------------------------------------- 1 | package druidapi 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestMakePath(t *testing.T) { 8 | tests := []struct { 9 | name string 10 | baseURL string 11 | componentType string 12 | apiType string 13 | additionalPaths []string 14 | expected string 15 | }{ 16 | { 17 | name: "NoAdditionalPath", 18 | baseURL: "http://example-druid-service", 19 | componentType: "indexer", 20 | apiType: "task", 21 | expected: "http://example-druid-service/druid/indexer/v1/task", 22 | }, 23 | { 24 | name: "OneAdditionalPath", 25 | baseURL: "http://example-druid-service", 26 | componentType: "indexer", 27 | apiType: "task", 28 | additionalPaths: []string{"extra"}, 29 | expected: "http://example-druid-service/druid/indexer/v1/task/extra", 30 | }, 31 | { 32 | name: "MultipleAdditionalPaths", 33 | baseURL: "http://example-druid-service", 34 | componentType: "coordinator", 35 | apiType: "rules", 36 | additionalPaths: []string{"wikipedia", "history"}, 37 | expected: "http://example-druid-service/druid/coordinator/v1/rules/wikipedia/history", 38 | }, 39 | { 40 | name: "EmptyBaseURL", 41 | baseURL: "", 42 | componentType: "indexer", 43 | apiType: "task", 44 | expected: "druid/indexer/v1/task", 45 | }, 46 | } 47 | 48 | for _, tt := range tests { 49 | t.Run(tt.name, func(t *testing.T) { 50 | actual := MakePath(tt.baseURL, tt.componentType, tt.apiType, tt.additionalPaths...) 51 | if actual != tt.expected { 52 | t.Errorf("makePath() = %v, expected %v", actual, tt.expected) 53 | } 54 | }) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /pkg/http/http.go: -------------------------------------------------------------------------------- 1 | package http 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "net/http" 7 | ) 8 | 9 | // DruidHTTP interface 10 | type DruidHTTP interface { 11 | Do(method, url string, body []byte) (*Response, error) 12 | } 13 | 14 | // HTTP client 15 | type DruidClient struct { 16 | HTTPClient *http.Client 17 | Auth *Auth 18 | } 19 | 20 | func NewHTTPClient(client *http.Client, auth *Auth) DruidHTTP { 21 | newClient := &DruidClient{ 22 | HTTPClient: client, 23 | Auth: auth, 24 | } 25 | 26 | return newClient 27 | } 28 | 29 | // Auth mechanisms supported by Druid control plane to authenticate 30 | // with druid clusters 31 | type Auth struct { 32 | BasicAuth BasicAuth 33 | } 34 | 35 | // BasicAuth 36 | type BasicAuth struct { 37 | UserName string 38 | Password string 39 | } 40 | 41 | // Response passed to controller 42 | type Response struct { 43 | ResponseBody string 44 | StatusCode int 45 | } 46 | 47 | // Do method to be used schema and tenant controller. 48 | func (c *DruidClient) Do(Method, url string, body []byte) (*Response, error) { 49 | 50 | req, err := http.NewRequest(Method, url, bytes.NewBuffer(body)) 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | if c.Auth.BasicAuth != (BasicAuth{}) { 56 | req.SetBasicAuth(c.Auth.BasicAuth.UserName, c.Auth.BasicAuth.Password) 57 | } 58 | 59 | req.Header.Add("Content-Type", "application/json") 60 | resp, err := c.HTTPClient.Do(req) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | defer resp.Body.Close() 66 | 67 | responseBody, err := io.ReadAll(resp.Body) 68 | if err != nil { 69 | return nil, err 70 | } 71 | 72 | return &Response{ResponseBody: string(responseBody), StatusCode: resp.StatusCode}, nil 73 | } 74 | -------------------------------------------------------------------------------- /pkg/util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "reflect" 7 | ) 8 | 9 | // ToJsonString marshals the given data into a JSON string. 10 | func ToJsonString(data interface{}) (string, error) { 11 | jsonData, err := json.Marshal(data) 12 | if err != nil { 13 | return "", err 14 | } 15 | return string(jsonData), nil 16 | } 17 | 18 | // IncludesJson checks if all key-value pairs in the desired JSON string are present in the current JSON string. 19 | func IncludesJson(currentJson, desiredJson string) (bool, error) { 20 | var current, desired map[string]interface{} 21 | 22 | // Parse the current JSON string 23 | if err := json.Unmarshal([]byte(currentJson), ¤t); err != nil { 24 | return false, fmt.Errorf("error parsing current JSON: %w", err) 25 | } 26 | 27 | // Parse the desired JSON string 28 | if err := json.Unmarshal([]byte(desiredJson), &desired); err != nil { 29 | return false, fmt.Errorf("error parsing desired JSON: %w", err) 30 | } 31 | 32 | // Check if all key-value pairs in desired are present in current 33 | return includes(current, desired), nil 34 | } 35 | 36 | // includes recursively checks if all key-value pairs in the desired map are present in the current map. 37 | func includes(current, desired map[string]interface{}) bool { 38 | for key, desiredValue := range desired { 39 | currentValue, exists := current[key] 40 | if !exists { 41 | return false 42 | } 43 | 44 | if !reflect.DeepEqual(desiredValue, currentValue) { 45 | switch desiredValueTyped := desiredValue.(type) { 46 | case map[string]interface{}: 47 | currentValueTyped, ok := currentValue.(map[string]interface{}) 48 | if !ok || !includes(currentValueTyped, desiredValueTyped) { 49 | return false 50 | } 51 | case []interface{}: 52 | currentValueTyped, ok := currentValue.([]interface{}) 53 | if !ok || !sliceIncludes(currentValueTyped, desiredValueTyped) { 54 | return false 55 | } 56 | default: 57 | return false 58 | } 59 | } 60 | } 61 | return true 62 | } 63 | 64 | // sliceIncludes checks if all elements of the desired slice are present in the current slice. 65 | func sliceIncludes(current, desired []interface{}) bool { 66 | for _, desiredItem := range desired { 67 | found := false 68 | for _, currentItem := range current { 69 | if reflect.DeepEqual(desiredItem, currentItem) { 70 | found = true 71 | break 72 | } 73 | } 74 | if !found { 75 | return false 76 | } 77 | } 78 | return true 79 | } 80 | -------------------------------------------------------------------------------- /tutorials/druid-on-kind/README.md: -------------------------------------------------------------------------------- 1 | # Deploying Druid On KIND 2 | 3 | - In this tutorial, we are going to deploy an Apache Druid cluster on KIND. 4 | - This tutorial can easily run on your local machine. 5 | 6 | ## Prerequisites 7 | To follow this tutorial you will need: 8 | 9 | - The [KIND CLI](https://kind.sigs.k8s.io/) installed. 10 | - The KUBECTL CLI installed. 11 | - Docker up and Running. 12 | 13 | ## Install Kind Cluster 14 | Create kind cluster on your machine. 15 | 16 | ```kind create cluster --name druid``` 17 | 18 | ## Install Druid Operator 19 | 20 | - Add Helm Repo 21 | ``` 22 | helm repo add datainfra https://charts.datainfra.io 23 | helm repo update 24 | ``` 25 | 26 | - Install Operator 27 | ``` 28 | # Install Druid operator using Helm 29 | helm -n druid-operator-system upgrade -i --create-namespace cluster-druid-operator datainfra/druid-operator 30 | ``` 31 | 32 | ## Apply Druid Customer Resource 33 | 34 | - This druid CR runs druid without zookeeper, using druid k8s extension. 35 | - MM less deployment. 36 | - Derby for metadata. 37 | - Minio for deepstorage. 38 | 39 | - Run ```make helm-minio-install ```. This will deploy minio using minio operator. 40 | 41 | - Once the minio pod is up and running in druid namespace, apply the druid CR. 42 | - ```kubectl apply -f tutorials/druid-on-kind/druid-mmless.yaml -n druid``` 43 | 44 | Here's a view of the druid namespace. 45 | 46 | ``` 47 | NAMESPACE NAME READY STATUS RESTARTS AGE 48 | druid druid-tiny-cluster-brokers-5ddcb655cf-plq6x 1/1 Running 0 2d 49 | druid druid-tiny-cluster-cold-0 1/1 Running 0 2d 50 | druid druid-tiny-cluster-coordinators-846df8f545-9qrsw 1/1 Running 1 2d 51 | druid druid-tiny-cluster-hot-0 1/1 Running 0 2d 52 | druid druid-tiny-cluster-routers-5c9677bf9d-qk9q7 1/1 Running 0 2d 53 | druid myminio-ss-0-0 2/2 Running 0 2d 54 | 55 | ``` 56 | 57 | ## Access Router Console 58 | 59 | - Port forward router 60 | - ```kubectl port-forward svc/druid-tiny-cluster-routers 8088 -n druid``` 61 | --------------------------------------------------------------------------------