├── Chart.yaml ├── charts ├── zookeeper │ ├── OWNERS │ ├── .helmignore │ ├── templates │ │ ├── poddisruptionbudget.yaml │ │ ├── NOTES.txt │ │ ├── service.yaml │ │ ├── config-jmx-exporter.yaml │ │ ├── service-headless.yaml │ │ ├── _helpers.tpl │ │ ├── job-chroots.yaml │ │ └── statefulset.yaml │ ├── Chart.yaml │ ├── README.md │ └── values.yaml ├── solr │ ├── Chart.yaml │ ├── .helmignore │ ├── templates │ │ ├── service.yaml │ │ ├── ingress.yaml │ │ ├── _helpers.tpl │ │ ├── NOTES.txt │ │ └── statefulset.yaml │ └── values.yaml └── cassandra │ ├── sample │ └── create-storage-gce.yaml │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ ├── pdb.yaml │ ├── service.yaml │ ├── _helpers.tpl │ ├── backup │ │ ├── rbac.yaml │ │ └── cronjob.yaml │ ├── NOTES.txt │ └── statefulset.yaml │ ├── values.yaml │ └── README.md ├── secret.yaml ├── requirements.yaml ├── .helmignore ├── templates ├── _helpers.tpl ├── service.yaml ├── ingress.yaml ├── NOTES.txt ├── deployment.yaml └── configmap.yaml ├── Dockerfile ├── README.md └── values.yaml /Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: A Helm chart for Kubernetes 3 | name: atlas 4 | version: 0.1.0 -------------------------------------------------------------------------------- /charts/zookeeper/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - lachie83 3 | - kow3ns 4 | reviewers: 5 | - lachie83 6 | - kow3ns 7 | -------------------------------------------------------------------------------- /charts/solr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: solr 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /charts/cassandra/sample/create-storage-gce.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: generic 5 | provisioner: kubernetes.io/gce-pd 6 | parameters: 7 | type: pd-ssd 8 | -------------------------------------------------------------------------------- /secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: atlas-zendesk-users-credentials 5 | type: Opaque 6 | data: 7 | users-credentials.properties: YWRtaW49QURNSU46OmRkOGZjZTc4OWZjZWFkNDJmOGI0NjM1ZmZmZTllYTBhMWJmNzAwYzI3MTdhY2Q1NWY1ODhjMTRkMTBkNGMzZjIK 8 | -------------------------------------------------------------------------------- /requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: zookeeper 3 | version: "1.2.3" 4 | repository: "file://../zookeeper" 5 | - name: cassandra 6 | version: "1.2.3" 7 | repository: "file://../cassandra" 8 | - name: solr 9 | version: "1.2.3" 10 | repository: "file://../solr" 11 | -------------------------------------------------------------------------------- /charts/cassandra/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | # Common backup files 9 | *.swp 10 | *.bak 11 | *.tmp 12 | *~ 13 | # Various IDEs 14 | .project 15 | .idea/ 16 | *.tmproj 17 | OWNERS 18 | -------------------------------------------------------------------------------- /.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /charts/solr/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /charts/zookeeper/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /charts/solr/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "solr.fullname" . }}-headless 5 | labels: 6 | app: {{ template "solr.name" . }} 7 | chart: {{ template "solr.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - port: {{ .Values.ports.client.containerPort }} 14 | protocol: TCP 15 | name: solr-container 16 | selector: 17 | app: {{ template "solr.name" . }} 18 | release: {{ .Release.Name }} 19 | -------------------------------------------------------------------------------- /charts/cassandra/Chart.yaml: -------------------------------------------------------------------------------- 1 | appVersion: 3.11.3 2 | description: Apache Cassandra is a free and open-source distributed database management 3 | system designed to handle large amounts of data across many commodity servers, providing 4 | high availability with no single point of failure. 5 | engine: gotpl 6 | home: http://cassandra.apache.org 7 | icon: https://upload.wikimedia.org/wikipedia/commons/5/5e/Cassandra_logo.svg 8 | keywords: 9 | - cassandra 10 | - database 11 | - nosql 12 | maintainers: 13 | - email: goonohc@gmail.com 14 | name: KongZ 15 | name: cassandra 16 | version: 0.9.0 17 | -------------------------------------------------------------------------------- /charts/cassandra/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget -}} 2 | apiVersion: policy/v1beta1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | labels: 6 | app: {{ template "cassandra.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | heritage: {{ .Release.Service }} 9 | release: {{ .Release.Name }} 10 | name: {{ template "cassandra.fullname" . }} 11 | spec: 12 | selector: 13 | matchLabels: 14 | app: {{ template "cassandra.name" . }} 15 | release: {{ .Release.Name }} 16 | {{ toYaml .Values.podDisruptionBudget | indent 2 }} 17 | {{- end -}} 18 | -------------------------------------------------------------------------------- /templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "fullname" -}} 14 | {{- $name := default .Chart.Name .Values.nameOverride -}} 15 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 16 | {{- end -}} 17 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/poddisruptionbudget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: {{ template "zookeeper.fullname" . }} 5 | labels: 6 | app: {{ template "zookeeper.name" . }} 7 | chart: {{ template "zookeeper.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | component: server 11 | spec: 12 | selector: 13 | matchLabels: 14 | app: {{ template "zookeeper.name" . }} 15 | release: {{ .Release.Name }} 16 | component: server 17 | {{ toYaml .Values.podDisruptionBudget | indent 2 }} 18 | -------------------------------------------------------------------------------- /charts/zookeeper/Chart.yaml: -------------------------------------------------------------------------------- 1 | appVersion: 3.4.10 2 | description: Centralized service for maintaining configuration information, naming, 3 | providing distributed synchronization, and providing group services. 4 | home: https://zookeeper.apache.org/ 5 | icon: https://zookeeper.apache.org/images/zookeeper_small.gif 6 | maintainers: 7 | - email: lachlan.evenson@microsoft.com 8 | name: lachie83 9 | - email: owensk@google.com 10 | name: kow3ns 11 | name: zookeeper 12 | sources: 13 | - https://github.com/apache/zookeeper 14 | - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper 15 | version: 1.2.0 16 | -------------------------------------------------------------------------------- /templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "fullname" . }} 5 | labels: 6 | app: {{ template "name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - port: {{ .Values.service.externalPort }} 14 | targetPort: {{ .Values.service.internalPort }} 15 | protocol: TCP 16 | name: {{ .Values.service.name }} 17 | selector: 18 | app: {{ template "name" . }} 19 | release: {{ .Release.Name }} 20 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Thank you for installing ZooKeeper on your Kubernetes cluster. More information 2 | about ZooKeeper can be found at https://zookeeper.apache.org/doc/current/ 3 | 4 | Your connection string should look like: 5 | {{ template "zookeeper.fullname" . }}-0.{{ template "zookeeper.fullname" . }}-headless:{{ .Values.service.ports.client.port }},{{ template "zookeeper.fullname" . }}-1.{{ template "zookeeper.fullname" . }}-headless:{{ .Values.service.ports.client.port }},... 6 | 7 | You can also use the client service {{ template "zookeeper.fullname" . }}:{{ .Values.service.ports.client.port }} to connect to an available ZooKeeper server. 8 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8-jdk-alpine 2 | # Install required packages for installation 3 | RUN apk add --no-cache \ 4 | bash \ 5 | su-exec \ 6 | python 7 | 8 | # Define versions of apache atlas 9 | 10 | # Add local bin archive 11 | COPY apache-atlas-1.1.0-server.tar.gz / 12 | 13 | # Unarchive 14 | RUN set -x \ 15 | && cd / \ 16 | && tar -xzf apache-atlas-1.1.0-server.tar.gz \ 17 | && rm apache-atlas-1.1.0-server.tar.gz 18 | 19 | 20 | WORKDIR /apache-atlas-1.1.0 21 | 22 | EXPOSE 21000 23 | 24 | ENV PATH=$PATH:/apache-atlas-1.1.0/bin 25 | CMD ["/bin/bash", "-c", "/apache-atlas-1.1.0/bin/atlas_start.py; tail -fF /apache-atlas-1.1.0/logs/application.log"] 26 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "zookeeper.fullname" . }} 5 | labels: 6 | app: {{ template "zookeeper.name" . }} 7 | chart: {{ template "zookeeper.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | annotations: 11 | {{- with .Values.service.annotations }} 12 | {{ toYaml . | indent 4 }} 13 | {{- end }} 14 | spec: 15 | type: {{ .Values.service.type }} 16 | ports: 17 | {{- range $key, $value := .Values.service.ports }} 18 | - name: {{ $key }} 19 | {{ toYaml $value | indent 6 }} 20 | {{- end }} 21 | selector: 22 | app: {{ template "zookeeper.name" . }} 23 | release: {{ .Release.Name }} 24 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/config-jmx-exporter.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.exporters.jmx.enabled }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ .Release.Name }}-jmx-exporter 6 | labels: 7 | app: {{ template "zookeeper.name" . }} 8 | chart: {{ template "zookeeper.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | data: 12 | config.yml: |- 13 | hostPort: 127.0.0.1:{{ .Values.env.JMXPORT }} 14 | lowercaseOutputName: {{ .Values.exporters.jmx.config.lowercaseOutputName }} 15 | rules: 16 | {{ .Values.exporters.jmx.config.rules | toYaml | indent 6 }} 17 | ssl: false 18 | startDelaySeconds: {{ .Values.exporters.jmx.config.startDelaySeconds }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/service-headless.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "zookeeper.fullname" . }}-headless 5 | labels: 6 | app: {{ template "zookeeper.name" . }} 7 | chart: {{ template "zookeeper.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | {{- if .Values.headless.annotations }} 11 | annotations: 12 | {{ .Values.headless.annotations | toYaml | trimSuffix "\n" | indent 4 }} 13 | {{- end }} 14 | spec: 15 | clusterIP: None 16 | ports: 17 | {{- range $key, $port := .Values.ports }} 18 | - name: {{ $key }} 19 | port: {{ $port.containerPort }} 20 | targetPort: {{ $port.name }} 21 | protocol: {{ $port.protocol }} 22 | {{- end }} 23 | selector: 24 | app: {{ template "zookeeper.name" . }} 25 | release: {{ .Release.Name }} 26 | -------------------------------------------------------------------------------- /templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $serviceName := include "fullname" . -}} 3 | {{- $servicePort := .Values.service.externalPort -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ template "fullname" . }} 8 | labels: 9 | app: {{ template "name" . }} 10 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 11 | release: {{ .Release.Name }} 12 | heritage: {{ .Release.Service }} 13 | annotations: 14 | {{- range $key, $value := .Values.ingress.annotations }} 15 | {{ $key }}: {{ $value | quote }} 16 | {{- end }} 17 | spec: 18 | rules: 19 | {{- range $host := .Values.ingress.hosts }} 20 | - host: {{ $host }} 21 | http: 22 | paths: 23 | - path: / 24 | backend: 25 | serviceName: {{ $serviceName }} 26 | servicePort: {{ $servicePort }} 27 | {{- end -}} 28 | {{- if .Values.ingress.tls }} 29 | tls: 30 | {{ toYaml .Values.ingress.tls | indent 4 }} 31 | {{- end -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /charts/solr/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "solr.fullname" . -}} 3 | {{- $ingressPath := .Values.ingress.path -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | app: {{ template "solr.name" . }} 10 | chart: {{ template "solr.chart" . }} 11 | release: {{ .Release.Name }} 12 | heritage: {{ .Release.Service }} 13 | {{- with .Values.ingress.annotations }} 14 | annotations: 15 | {{ toYaml . | indent 4 }} 16 | {{- end }} 17 | spec: 18 | {{- if .Values.ingress.tls }} 19 | tls: 20 | {{- range .Values.ingress.tls }} 21 | - hosts: 22 | {{- range .hosts }} 23 | - {{ . }} 24 | {{- end }} 25 | secretName: {{ .secretName }} 26 | {{- end }} 27 | {{- end }} 28 | rules: 29 | {{- range .Values.ingress.hosts }} 30 | - host: {{ . }} 31 | http: 32 | paths: 33 | - path: {{ $ingressPath }} 34 | backend: 35 | serviceName: {{ $fullName }} 36 | servicePort: 8983 37 | {{- end }} 38 | {{- end }} 39 | -------------------------------------------------------------------------------- /charts/cassandra/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "cassandra.fullname" . }} 5 | labels: 6 | app: {{ template "cassandra.name" . }} 7 | chart: {{ template "cassandra.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | clusterIP: None 12 | type: {{ .Values.service.type }} 13 | ports: 14 | - name: intra 15 | port: 7000 16 | targetPort: 7000 17 | - name: tls 18 | port: 7001 19 | targetPort: 7001 20 | - name: jmx 21 | port: 7199 22 | targetPort: 7199 23 | - name: cql 24 | port: {{ default 9042 .Values.config.ports.cql }} 25 | targetPort: {{ default 9042 .Values.config.ports.cql }} 26 | - name: thrift 27 | port: {{ default 9160 .Values.config.ports.thrift }} 28 | targetPort: {{ default 9160 .Values.config.ports.thrift }} 29 | {{- if .Values.config.ports.agent }} 30 | - name: agent 31 | port: {{ .Values.config.ports.agent }} 32 | targetPort: {{ .Values.config.ports.agent }} 33 | {{- end }} 34 | selector: 35 | app: {{ template "cassandra.name" . }} 36 | release: {{ .Release.Name }} 37 | -------------------------------------------------------------------------------- /charts/solr/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "solr.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "solr.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "solr.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "zookeeper.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "zookeeper.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "zookeeper.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # atlas-helm-chart 2 | This chart will install the apache atlas and used solr for indexing and cassandra as backend storage. 3 | 4 | To install the chart. clone the repo. Create Secret for users credentials 5 | 6 | ```sh 7 | kubectl create -f secret.yaml 8 | ``` 9 | 10 | Finally, run following command to install chart 11 | 12 | ```sh 13 | helm install --name -f atlas-helm-chart/values.yaml atlas-helm-chart 14 | ``` 15 | This will run the solr, atlas, Cassandra and zookeeper pods 16 | 17 | solr version : 7.5 18 | 19 | atlas version : 1.1.0 20 | 21 | cassandra version : 3.11.3 22 | 23 | zookeeper version : 3.4.x 24 | 25 | Download the Source code of Apache Atlas from here [Download](http://atlas.apache.org/Downloads.html) 26 | 27 | ## Build Atlas from source 28 | 29 | ```sh 30 | tar xvfz apache-atlas-1.1.0-sources.tar.gz 31 | 32 | cd apache-atlas-sources-1.1.0/ 33 | 34 | mvn clean -DskipTests install 35 | ``` 36 | ## Packaging Apache Atlas 37 | ```sh 38 | mvn clean -DskipTests package -Pdist 39 | ``` 40 | This will Generate tar file of Apache atlas 41 | 42 | ## Build Docker image 43 | docker build -t "gcr.io/edw-dev/apache-atlas:1.1.0" . 44 | gcloud docker -- push gcr.io/edw-dev/apache-atlas:1.1.0 -------------------------------------------------------------------------------- /values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for atlas. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | replicaCount: 1 5 | image: 6 | repository: gcr.io/edw-dev/apache-atlas 7 | tag: 1.1.0 8 | pullPolicy: IfNotPresent 9 | config_parameter: 10 | cassandra_clustername: cassandra 11 | cassandra_storage_port: 9042 12 | # solr_zookeeper_url: :2181 13 | service: 14 | name: atlas 15 | type: LoadBalancer 16 | externalPort: 21000 17 | internalPort: 21000 18 | ingress: 19 | enabled: false 20 | # Used to create an Ingress record. 21 | hosts: 22 | - chart-example.local 23 | annotations: 24 | # kubernetes.io/ingress.class: nginx 25 | # kubernetes.io/tls-acme: "true" 26 | tls: 27 | # Secrets must be manually created in the namespace. 28 | # - secretName: chart-example-tls 29 | # hosts: 30 | # - chart-example.local 31 | resources: {} 32 | # We usually recommend not to specify default resources and to leave this as a conscious 33 | # choice for the user. This also increases chances charts run on environments with little 34 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 35 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 36 | # limits: 37 | # cpu: 100m 38 | # memory: 128Mi 39 | # requests: 40 | # cpu: 100m 41 | # memory: 128Mi 42 | -------------------------------------------------------------------------------- /charts/solr/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range .Values.ingress.hosts }} 4 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "solr.fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ template "solr.fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "solr.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.port }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "solr.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | echo "Visit http://127.0.0.1:8080 to use your application" 18 | kubectl port-forward $POD_NAME 8080:80 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range .Values.ingress.hosts }} 4 | http://{{ . }} 5 | {{- end }} 6 | {{- else if contains "NodePort" .Values.service.type }} 7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "fullname" . }}) 8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 9 | echo http://$NODE_IP:$NODE_PORT 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 12 | You can watch the status of by running 'kubectl get svc -w {{ template "fullname" . }}' 13 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | echo http://$SERVICE_IP:{{ .Values.service.externalPort }} 15 | {{- else if contains "ClusterIP" .Values.service.type }} 16 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 17 | kubectl port-forward $POD_NAME 8080:{{ .Values.service.externalPort }} 18 | echo "Visit http://127.0.0.1:8080 to use your application" 19 | echo "Default username/password is admin/admin" 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /charts/cassandra/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "cassandra.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "cassandra.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "cassandra.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Create the name of the service account to use 36 | */}} 37 | {{- define "cassandra.serviceAccountName" -}} 38 | {{- if .Values.serviceAccount.create -}} 39 | {{ default (include "cassandra.fullname" .) .Values.serviceAccount.name }} 40 | {{- else -}} 41 | {{ default "default" .Values.serviceAccount.name }} 42 | {{- end -}} 43 | {{- end -}} 44 | -------------------------------------------------------------------------------- /charts/cassandra/templates/backup/rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.backup.enabled }} 2 | {{- if .Values.serviceAccount.create }} 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ template "cassandra.serviceAccountName" . }} 7 | labels: 8 | app: {{ template "cassandra.name" . }} 9 | chart: {{ template "cassandra.chart" . }} 10 | release: "{{ .Release.Name }}" 11 | heritage: "{{ .Release.Service }}" 12 | --- 13 | {{- end }} 14 | {{- if .Values.rbac.create }} 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: Role 17 | metadata: 18 | name: {{ template "cassandra.fullname" . }}-backup 19 | labels: 20 | app: {{ template "cassandra.name" . }} 21 | chart: {{ template "cassandra.chart" . }} 22 | release: "{{ .Release.Name }}" 23 | heritage: "{{ .Release.Service }}" 24 | rules: 25 | - apiGroups: [""] 26 | resources: ["pods", "pods/log"] 27 | verbs: ["get", "list"] 28 | - apiGroups: [""] 29 | resources: ["pods/exec"] 30 | verbs: ["create"] 31 | --- 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | kind: RoleBinding 34 | metadata: 35 | name: {{ template "cassandra.fullname" . }}-backup 36 | labels: 37 | app: {{ template "cassandra.name" . }} 38 | chart: {{ template "cassandra.chart" . }} 39 | release: "{{ .Release.Name }}" 40 | heritage: "{{ .Release.Service }}" 41 | roleRef: 42 | apiGroup: rbac.authorization.k8s.io 43 | kind: Role 44 | name: {{ template "cassandra.fullname" . }}-backup 45 | subjects: 46 | - kind: ServiceAccount 47 | name: {{ template "cassandra.serviceAccountName" . }} 48 | namespace: {{ .Release.Namespace }} 49 | {{- end }} 50 | {{- end }} 51 | -------------------------------------------------------------------------------- /charts/solr/values.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | revisionHistoryLimit: 5 4 | 5 | updateStrategy: 6 | type: RollingUpdate 7 | 8 | podManagementPolicy: "OrderedReady" 9 | 10 | zkClientTimeout: 15000 11 | 12 | terminationGracePeriodSeconds: 1800 13 | 14 | ports: 15 | client: 16 | containerPort: 8983 17 | 18 | livenessProbe: 19 | # tcpSocket: 20 | # port: 8983 21 | # initialDelaySeconds: 20 22 | # periodSeconds: 30 23 | # timeoutSeconds: 30 24 | # failureThreshold: 6 25 | # successThreshold: 1 26 | 27 | readinessProbe: 28 | # tcpSocket: 29 | # port: 8983 30 | # initialDelaySeconds: 20 31 | # periodSeconds: 30 32 | # timeoutSeconds: 30 33 | # failureThreshold: 6 34 | # successThreshold: 1 35 | 36 | securityContext: 37 | enabled: true 38 | fsGroup: 8983 39 | runAsUser: 8983 40 | 41 | heap: "2g" 42 | timeZone: "UTC" 43 | logLevel: "INFO" 44 | 45 | image: 46 | repository: solr 47 | tag: 7.5 48 | pullPolicy: IfNotPresent 49 | 50 | service: 51 | type: ClusterIP 52 | port: 80 53 | 54 | ingress: 55 | enabled: true 56 | annotations: {} 57 | # kubernetes.io/ingress.class: nginx 58 | path: / 59 | hosts: 60 | - solr-admin.local 61 | 62 | resources: {} 63 | # limits: 64 | # cpu: 100m 65 | # memory: 128Mi 66 | # requests: 67 | # cpu: 100m 68 | # memory: 128Mi 69 | 70 | nodeSelector: {} 71 | 72 | tolerations: [] 73 | 74 | affinity: {} 75 | 76 | persistence: 77 | enabled: true 78 | # storageClass: "-" 79 | accessMode: ReadWriteOnce 80 | size: 1Gi 81 | 82 | zookeeper: 83 | replicaCount: 1 84 | ports: 85 | client: 86 | containerPort: 2181 87 | -------------------------------------------------------------------------------- /charts/cassandra/templates/backup/cronjob.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.backup.enabled }} 2 | {{- $release := .Release }} 3 | {{- $values := .Values }} 4 | {{- $backup := $values.backup }} 5 | {{- range $index, $schedule := $backup.schedule }} 6 | --- 7 | apiVersion: batch/v1beta1 8 | kind: CronJob 9 | metadata: 10 | name: {{ template "cassandra.fullname" $ }}-backup-{{ $schedule.keyspace }} 11 | labels: 12 | app: {{ template "cassandra.name" $ }} 13 | chart: {{ template "cassandra.chart" $ }} 14 | release: "{{ $release.Name }}" 15 | heritage: "{{ $release.Service }}" 16 | spec: 17 | schedule: {{ $schedule.cron | quote }} 18 | concurrencyPolicy: Forbid 19 | startingDeadlineSeconds: 120 20 | jobTemplate: 21 | spec: 22 | template: 23 | metadata: 24 | annotations: 25 | {{ toYaml $backup.annotations }} 26 | spec: 27 | restartPolicy: OnFailure 28 | serviceAccountName: {{ template "cassandra.fullname" $ }}-backup 29 | containers: 30 | - name: cassandra-backup 31 | image: "{{ $backup.image.repos }}:{{ $backup.image.tag }}" 32 | command: ["cain"] 33 | args: 34 | - backup 35 | - --namespace 36 | - {{ $release.Namespace }} 37 | - --selector 38 | - release={{ $release.Name }} 39 | - --keyspace 40 | - {{ $schedule.keyspace }} 41 | - --dst 42 | - {{ $backup.destination }} 43 | - --parallel 44 | - "0" 45 | {{- with $backup.env }} 46 | env: 47 | {{ toYaml . | indent 12 }} 48 | {{- end }} 49 | {{- with $backup.resources }} 50 | resources: 51 | {{ toYaml . | indent 14 }} 52 | {{- end }} 53 | affinity: 54 | podAffinity: 55 | preferredDuringSchedulingIgnoredDuringExecution: 56 | - labelSelector: 57 | matchExpressions: 58 | - key: app 59 | operator: In 60 | values: 61 | - {{ template "cassandra.fullname" $ }} 62 | - key: release 63 | operator: In 64 | values: 65 | - {{ $release.Name }} 66 | topologyKey: "kubernetes.io/hostname" 67 | {{- with $values.tolerations }} 68 | tolerations: 69 | {{ toYaml . | indent 10 }} 70 | {{- end }} 71 | {{- end }} 72 | {{- end }} 73 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/job-chroots.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.jobs.chroots.enabled }} 2 | {{- $root := . }} 3 | {{- $job := .Values.jobs.chroots }} 4 | apiVersion: batch/v1 5 | kind: Job 6 | metadata: 7 | name: {{ template "zookeeper.fullname" . }}-chroots 8 | annotations: 9 | "helm.sh/hook": post-install,post-upgrade 10 | "helm.sh/hook-weight": "-5" 11 | "helm.sh/hook-delete-policy": hook-succeeded 12 | labels: 13 | app: {{ template "zookeeper.name" . }} 14 | chart: {{ template "zookeeper.chart" . }} 15 | release: {{ .Release.Name }} 16 | heritage: {{ .Release.Service }} 17 | component: jobs 18 | job: chroots 19 | spec: 20 | activeDeadlineSeconds: {{ $job.activeDeadlineSeconds }} 21 | backoffLimit: {{ $job.backoffLimit }} 22 | completions: {{ $job.completions }} 23 | parallelism: {{ $job.parallelism }} 24 | template: 25 | metadata: 26 | labels: 27 | app: {{ template "zookeeper.name" . }} 28 | release: {{ .Release.Name }} 29 | component: jobs 30 | job: chroots 31 | spec: 32 | restartPolicy: {{ $job.restartPolicy }} 33 | {{- if .Values.priorityClassName }} 34 | priorityClassName: "{{ .Values.priorityClassName }}" 35 | {{- end }} 36 | containers: 37 | - name: main 38 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 39 | imagePullPolicy: {{ .Values.image.pullPolicy }} 40 | command: 41 | - /bin/bash 42 | - -o 43 | - pipefail 44 | - -euc 45 | {{- $port := .Values.service.ports.client.port }} 46 | - > 47 | sleep 15; 48 | export SERVER={{ template "zookeeper.fullname" $root }}:{{ $port }}; 49 | {{- range $job.config.create }} 50 | echo '==> {{ . }}'; 51 | echo '====> Create chroot if does not exist.'; 52 | zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid' 53 | || zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} create {{ . }} ""; 54 | echo '====> Confirm chroot exists.'; 55 | zkCli.sh -server {{ template "zookeeper.fullname" $root }}:{{ $port }} get {{ . }} 2>&1 >/dev/null | grep 'cZxid'; 56 | echo '====> Chroot exists.'; 57 | {{- end }} 58 | env: 59 | {{- range $key, $value := $job.env }} 60 | - name: {{ $key | upper | replace "." "_" }} 61 | value: {{ $value | quote }} 62 | {{- end }} 63 | resources: 64 | {{ toYaml $job.resources | indent 12 }} 65 | {{- end -}} 66 | -------------------------------------------------------------------------------- /templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "fullname" . }} 5 | labels: 6 | app: {{ template "name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | replicas: {{ .Values.replicaCount }} 12 | template: 13 | metadata: 14 | labels: 15 | app: {{ template "name" . }} 16 | release: {{ .Release.Name }} 17 | spec: 18 | initContainers: 19 | - name: {{ .Chart.Name }}-init 20 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 21 | imagePullPolicy: {{ .Values.image.pullPolicy }} 22 | command: [ 23 | "/bin/bash", 24 | "-c", 25 | "apk update; 26 | apk add curl zip; 27 | zip config.zip /apache-atlas-1.1.0/conf/solr/*; 28 | curl -X POST --header 'Content-Type:text/xml' -d @config.zip 'http://{{ .Release.Name }}-solr-headless:8983/solr/admin/configs?action=CREATE&name=vertex_index'; 29 | curl -X POST --header 'Content-Type:text/xml' -d @config.zip 'http://{{ .Release.Name }}-solr-headless:8983/solr/admin/configs?action=CREATE&name=edge_index'; 30 | curl -X POST --header 'Content-Type:text/xml' -d @config.zip 'http://{{ .Release.Name }}-solr-headless:8983/solr/admin/configs?action=CREATE&name=fulltext_index';" 31 | ] 32 | env: 33 | - name: ZK_CLIENT_TIMEOUT 34 | value: "{{ .Values.zkClientTimeout }}" 35 | containers: 36 | - name: {{ .Chart.Name }} 37 | command: [ 38 | "/bin/bash", 39 | "-c", 40 | "/apache-atlas-1.1.0/bin/atlas_start.py; 41 | tail -f /apache-atlas-1.1.0/logs/*.log;" 42 | ] 43 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 44 | imagePullPolicy: {{ .Values.image.pullPolicy }} 45 | ports: 46 | - containerPort: {{ .Values.service.internalPort }} 47 | resources: 48 | {{ toYaml .Values.resources | indent 12 }} 49 | {{- if .Values.nodeSelector }} 50 | nodeSelector: 51 | {{ toYaml .Values.nodeSelector | indent 8 }} 52 | {{- end }} 53 | volumeMounts: 54 | - name: atlas-config 55 | mountPath: /apache-atlas-1.1.0/conf/atlas-application.properties 56 | subPath: atlas-application.properties 57 | - name: atlas-users 58 | readOnly: true 59 | mountPath: "/etc/conf" 60 | volumes: 61 | - name: atlas-config 62 | configMap: 63 | name: atlas-config 64 | - name: atlas-users 65 | secret: 66 | secretName: atlas-zendesk-users-credentials 67 | 68 | -------------------------------------------------------------------------------- /charts/cassandra/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Cassandra CQL can be accessed via port {{ .Values.config.ports.cql }} on the following DNS name from within your cluster: 2 | Cassandra Thrift can be accessed via port {{ .Values.config.ports.thrift }} on the following DNS name from within your cluster: 3 | 4 | If you want to connect to the remote instance with your local Cassandra CQL cli. To forward the API port to localhost:9042 run the following: 5 | - kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{ .items[0].metadata.name }') 9042:{{ .Values.config.ports.cql }} 6 | 7 | If you want to connect to the Cassandra CQL run the following: 8 | {{- if contains "NodePort" .Values.service.type }} 9 | - export CQL_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "cassandra.fullname" . }}) 10 | - export CQL_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 11 | - cqlsh $CQL_HOST $CQL_PORT 12 | 13 | {{- else if contains "LoadBalancer" .Values.service.type }} 14 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 15 | Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "cassandra.fullname" . }}' 16 | - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "cassandra.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 17 | - echo cqlsh $SERVICE_IP 18 | {{- else if contains "ClusterIP" .Values.service.type }} 19 | - kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "cassandra.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 9042:{{ .Values.config.ports.cql }} 20 | echo cqlsh 127.0.0.1 9042 21 | {{- end }} 22 | 23 | You can also see the cluster status by run the following: 24 | - kubectl exec -it --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{.items[0].metadata.name}') nodetool status 25 | 26 | To tail the logs for the Cassandra pod run the following: 27 | - kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{ .items[0].metadata.name }') 28 | 29 | {{- if not .Values.persistence.enabled }} 30 | 31 | Note that the cluster is running with node-local storage instead of PersistentVolumes. In order to prevent data loss, 32 | pods will be decommissioned upon termination. Decommissioning may take some time, so you might also want to adjust the 33 | pod termination gace period, which is currently set to {{ .Values.podSettings.terminationGracePeriodSeconds }} seconds. 34 | 35 | {{- end}} 36 | -------------------------------------------------------------------------------- /templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: atlas-config 5 | data: 6 | atlas-application.properties: | 7 | atlas.graph.storage.backend=cql 8 | atlas.graph.storage.hostname={{ .Release.Name }}-cassandra 9 | atlas.graph.storage.cassandra.keyspace=JanusGraph 10 | atlas.graph.storage.clustername={{ .Values.config_parameter.cassandra_clustername }} 11 | atlas.graph.storage.port={{ .Values.config_parameter.cassandra_storage_port }} 12 | 13 | atlas.EntityAuditRepository.impl=org.apache.atlas.repository.audit.CassandraBasedAuditRepository 14 | atlas.EntityAuditRepository.keyspace=atlas_audit 15 | atlas.EntityAuditRepository.replicationFactor=1 16 | 17 | atlas.graph.index.search.backend=solr 18 | atlas.graph.index.search.solr.mode=cloud 19 | atlas.graph.index.search.solr.zookeeper-url={{ .Release.Name }}-zookeeper:{{ .Values.zookeeper.ports.client.containerPort }} 20 | atlas.graph.index.search.solr.zookeeper-connect-timeout=60000 21 | atlas.graph.index.search.solr.zookeeper-session-timeout=60000 22 | atlas.graph.index.search.solr.wait-searcher=true 23 | 24 | atlas.graph.index.search.max-result-set-size=150 25 | 26 | atlas.notification.embedded=true 27 | atlas.kafka.data=${sys:atlas.home}/data/kafka 28 | atlas.kafka.zookeeper.connect=localhost:9026 29 | atlas.kafka.bootstrap.servers=localhost:9027 30 | atlas.kafka.zookeeper.session.timeout.ms=400 31 | atlas.kafka.zookeeper.connection.timeout.ms=200 32 | atlas.kafka.zookeeper.sync.time.ms=20 33 | atlas.kafka.auto.commit.interval.ms=1000 34 | atlas.kafka.hook.group.id=atlas 35 | 36 | atlas.kafka.enable.auto.commit=false 37 | atlas.kafka.auto.offset.reset=earliest 38 | atlas.kafka.session.timeout.ms=30000 39 | atlas.kafka.offsets.topic.replication.factor=1 40 | atlas.kafka.poll.timeout.ms=1000 41 | 42 | atlas.notification.create.topics=true 43 | atlas.notification.replicas=1 44 | atlas.notification.topics=ATLAS_HOOK,ATLAS_ENTITIES 45 | atlas.notification.log.failed.messages=true 46 | atlas.notification.consumer.retry.interval=500 47 | atlas.notification.hook.retry.interval=1000 48 | 49 | atlas.enableTLS=false 50 | 51 | atlas.authentication.method.kerberos=false 52 | atlas.authentication.method.file=true 53 | 54 | atlas.authentication.method.ldap.type=none 55 | 56 | atlas.authentication.method.file.filename=/etc/conf/users-credentials.properties 57 | 58 | 59 | atlas.rest.address=http://localhost:21000 60 | 61 | atlas.audit.hbase.tablename=apache_atlas_entity_audit 62 | atlas.audit.zookeeper.session.timeout.ms=1000 63 | atlas.audit.hbase.zookeeper.quorum=localhost:2181 64 | 65 | atlas.server.ha.enabled=false 66 | atlas.authorizer.impl=simple 67 | atlas.authorizer.simple.authz.policy.file=atlas-simple-authz-policy.json 68 | atlas.rest-csrf.enabled=true 69 | atlas.rest-csrf.browser-useragents-regex=^Mozilla.*,^Opera.*,^Chrome.* 70 | atlas.rest-csrf.methods-to-ignore=GET,OPTIONS,HEAD,TRACE 71 | atlas.rest-csrf.custom-header=X-XSRF-HEADER 72 | 73 | atlas.metric.query.cache.ttlInSecs=900 74 | atlas.DeleteHandlerV1.impl=org.apache.atlas.repository.store.graph.v1.HardDeleteHandlerV1 75 | 76 | ######### Gremlin Search Configuration ######### 77 | 78 | #Set to false to disable gremlin search. 79 | atlas.search.gremlin.enable=false -------------------------------------------------------------------------------- /charts/solr/templates/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: {{ template "solr.fullname" . }} 5 | labels: 6 | app: {{ template "solr.name" . }} 7 | chart: {{ template "solr.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | serviceName: {{ template "solr.fullname" . }}-headless 12 | podManagementPolicy: {{ .Values.podManagementPolicy }} 13 | replicas: {{ .Values.replicaCount }} 14 | revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} 15 | selector: 16 | matchLabels: 17 | app: {{ template "solr.name" . }} 18 | release: {{ .Release.Name }} 19 | updateStrategy: 20 | {{ toYaml .Values.updateStrategy | indent 4 }} 21 | template: 22 | metadata: 23 | labels: 24 | app: {{ template "solr.name" . }} 25 | release: {{ .Release.Name }} 26 | annotations: 27 | {{- if .Values.podAnnotations }} 28 | ## Custom pod annotations 29 | {{- range $key, $value := .Values.podAnnotations }} 30 | {{ $key }}: {{ $value | quote }} 31 | {{- end }} 32 | {{- end }} 33 | spec: 34 | terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} 35 | {{- if .Values.securityContext.enabled }} 36 | securityContext: 37 | fsGroup: {{ .Values.securityContext.fsGroup }} 38 | runAsUser: {{ .Values.securityContext.runAsUser }} 39 | {{- end }} 40 | initContainers: 41 | - name: {{ .Chart.Name }}-init 42 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 43 | imagePullPolicy: {{ .Values.image.pullPolicy }} 44 | command: [ 45 | "/bin/bash", 46 | "-c", 47 | "solr zk ls / -z {{ .Release.Name }}-zookeeper:{{ .Values.zookeeper.ports.client.containerPort }} && 48 | /opt/docker-solr/scripts/init-solr-home" 49 | ] 50 | volumeMounts: 51 | - name: data 52 | mountPath: /opt/solr-home-cores/data 53 | subPath: data 54 | env: 55 | - name: ZK_CLIENT_TIMEOUT 56 | value: "{{ .Values.zkClientTimeout }}" 57 | - name: SOLR_HOME 58 | value: /opt/solr-home-cores/data 59 | - name: INIT_SOLR_HOME 60 | value: "yes" 61 | containers: 62 | - name: {{ .Chart.Name }} 63 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 64 | imagePullPolicy: {{ .Values.image.pullPolicy }} 65 | command: ["bin/solr"] 66 | args: ["-f", "-cloud"] 67 | env: 68 | - name: SOLR_POD_NAME 69 | valueFrom: 70 | fieldRef: 71 | fieldPath: metadata.name 72 | - name: SOLR_HOST 73 | value: $(SOLR_POD_NAME).{{ template "solr.fullname" . }}-headless 74 | - name: ZK_HOST 75 | value: "{{ .Release.Name }}-zookeeper:{{ .Values.zookeeper.ports.client.containerPort }}" 76 | - name: SOLR_TIMEZONE 77 | value: "{{ .Values.timeZone }}" 78 | - name: SOLR_HEAP 79 | value: "{{ .Values.heap }}" 80 | - name: SOLR_LOG_LEVEL 81 | value: "{{ .Values.logLevel }}" 82 | - name: SOLR_HOME 83 | value: /opt/solr-home-cores/data 84 | - name: INIT_SOLR_HOME 85 | value: "yes" 86 | ports: 87 | - name: solr-port 88 | containerPort: {{ .Values.ports.client.containerPort }} 89 | protocol: TCP 90 | livenessProbe: 91 | {{ toYaml .Values.livenessProbe | indent 12 }} 92 | readinessProbe: 93 | {{ toYaml .Values.readinessProbe | indent 12 }} 94 | volumeMounts: 95 | - name: data 96 | mountPath: /opt/solr-home-cores/data 97 | subPath: data 98 | lifecycle: 99 | preStop: 100 | exec: 101 | command: ["solr","stop", "-p", "{{ .Values.ports.client.containerPort }}"] 102 | resources: 103 | {{ toYaml .Values.resources | indent 12 }} 104 | {{- with .Values.nodeSelector }} 105 | nodeSelector: 106 | {{ toYaml . | indent 8 }} 107 | {{- end }} 108 | {{- with .Values.affinity }} 109 | affinity: 110 | {{ toYaml . | indent 8 }} 111 | {{- end }} 112 | {{- with .Values.tolerations }} 113 | tolerations: 114 | {{ toYaml . | indent 8 }} 115 | {{- end }} 116 | volumes: 117 | {{- if not .Values.persistence.enabled }} 118 | - name: data 119 | emptyDir: {} 120 | {{- end }} 121 | {{- if .Values.persistence.enabled }} 122 | volumeClaimTemplates: 123 | - metadata: 124 | name: data 125 | spec: 126 | accessModes: 127 | - {{ .Values.persistence.accessMode | quote }} 128 | resources: 129 | requests: 130 | storage: {{ .Values.persistence.size | quote }} 131 | {{- if .Values.persistence.storageClass }} 132 | {{- if (eq "-" .Values.persistence.storageClass) }} 133 | storageClassName: "" 134 | {{- else }} 135 | storageClassName: "{{ .Values.persistence.storageClass }}" 136 | {{- end }} 137 | {{- end }} 138 | {{- end }} 139 | -------------------------------------------------------------------------------- /charts/cassandra/values.yaml: -------------------------------------------------------------------------------- 1 | ## Cassandra image version 2 | ## ref: https://hub.docker.com/r/library/cassandra/ 3 | image: 4 | repo: cassandra 5 | tag: 3.11.3 6 | pullPolicy: IfNotPresent 7 | ## Specify ImagePullSecrets for Pods 8 | ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod 9 | # pullSecrets: myregistrykey 10 | 11 | ## Specify a service type 12 | ## ref: http://kubernetes.io/docs/user-guide/services/ 13 | service: 14 | type: ClusterIP 15 | 16 | ## Persist data to a persistent volume 17 | persistence: 18 | enabled: true 19 | ## cassandra data Persistent Volume Storage Class 20 | ## If defined, storageClassName: 21 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 22 | ## If undefined (the default) or set to null, no storageClassName spec is 23 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 24 | ## GKE, AWS & OpenStack) 25 | ## 26 | # storageClass: "-" 27 | accessMode: ReadWriteOnce 28 | size: 10Gi 29 | 30 | ## Configure resource requests and limits 31 | ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ 32 | ## Minimum memory for development is 4GB and 2 CPU cores 33 | ## Minimum memory for production is 8GB and 4 CPU cores 34 | ## ref: http://docs.datastax.com/en/archived/cassandra/2.0/cassandra/architecture/architecturePlanningHardware_c.html 35 | resources: {} 36 | # requests: 37 | # memory: 4Gi 38 | # cpu: 2 39 | # limits: 40 | # memory: 4Gi 41 | # cpu: 2 42 | 43 | ## Change cassandra configuration parameters below: 44 | ## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/configCassandra_yaml.html 45 | ## Recommended max heap size is 1/2 of system memory 46 | ## Recommeneed heap new size is 1/4 of max heap size 47 | ## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/operations/opsTuneJVM.html 48 | config: 49 | cluster_name: cassandra 50 | cluster_size: 1 51 | seed_size: 1 52 | num_tokens: 256 53 | # If you want Cassandra to use this datacenter and rack name, 54 | # you need to set endpoint_snitch to GossipingPropertyFileSnitch. 55 | # Otherwise, these values are ignored and datacenter1 and rack1 56 | # are used. 57 | dc_name: DC1 58 | rack_name: RAC1 59 | endpoint_snitch: SimpleSnitch 60 | max_heap_size: 2048M 61 | heap_new_size: 512M 62 | start_rpc: false 63 | ports: 64 | cql: 9042 65 | thrift: 9160 66 | # If a JVM Agent is in place 67 | # agent: 61621 68 | 69 | ## Custom env variables. 70 | ## ref: https://hub.docker.com/_/cassandra/ 71 | env: {} 72 | 73 | ## Liveness and Readiness probe values. 74 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ 75 | livenessProbe: 76 | initialDelaySeconds: 90 77 | periodSeconds: 30 78 | timeoutSeconds: 5 79 | successThreshold: 1 80 | failureThreshold: 3 81 | readinessProbe: 82 | initialDelaySeconds: 90 83 | periodSeconds: 30 84 | timeoutSeconds: 5 85 | successThreshold: 1 86 | failureThreshold: 3 87 | 88 | ## Configure node selector. Edit code below for adding selector to pods 89 | ## ref: https://kubernetes.io/docs/user-guide/node-selection/ 90 | # selector: 91 | # nodeSelector: 92 | # cloud.google.com/gke-nodepool: pool-db 93 | 94 | ## Additional pod annotations 95 | ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ 96 | podAnnotations: {} 97 | 98 | ## Additional pod labels 99 | ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ 100 | podLabels: {} 101 | 102 | ## Additional pod-level settings 103 | podSettings: 104 | # Change this to give pods more time to properly leave the cluster when not using persistent storage. 105 | terminationGracePeriodSeconds: 30 106 | 107 | ## Pod distruption budget 108 | podDisruptionBudget: {} 109 | # maxUnavailable: 1 110 | # minAvailable: 2 111 | 112 | podManagementPolicy: OrderedReady 113 | updateStrategy: 114 | type: OnDelete 115 | 116 | ## Pod Security Context 117 | securityContext: 118 | enabled: false 119 | fsGroup: 999 120 | runAsUser: 999 121 | 122 | ## Affinity for pod assignment 123 | ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity 124 | affinity: {} 125 | 126 | rbac: 127 | # Specifies whether RBAC resources should be created 128 | create: true 129 | 130 | serviceAccount: 131 | # Specifies whether a ServiceAccount should be created 132 | create: true 133 | # The name of the ServiceAccount to use. 134 | # If not set and create is true, a name is generated using the fullname template 135 | # name: 136 | 137 | ## Backup cronjob configuration 138 | ## Ref: https://github.com/maorfr/cain 139 | backup: 140 | enabled: false 141 | 142 | # Schedule to run jobs. Must be in cron time format 143 | # Ref: https://crontab.guru/ 144 | schedule: 145 | - keyspace: keyspace1 146 | cron: "0 7 * * *" 147 | - keyspace: keyspace2 148 | cron: "30 7 * * *" 149 | 150 | annotations: 151 | # Example for authorization using kube2iam 152 | # Can also be done using environment variables 153 | iam.amazonaws.com/role: cain 154 | 155 | image: 156 | repos: maorfr/cain 157 | tag: 0.1.0 158 | 159 | # Add additional environment variables 160 | env: 161 | # Example environment variable required for AWS credentials chain 162 | - name: AWS_REGION 163 | value: us-east-1 164 | 165 | resources: 166 | requests: 167 | memory: 1Gi 168 | cpu: 1 169 | limits: 170 | memory: 1Gi 171 | cpu: 1 172 | 173 | # Destination to store the backup artifacts 174 | # Currently only s3 is supported 175 | # Additional support can added. 176 | # Ref: https://github.com/maorfr/skbn 177 | destination: s3://bucket/cassandra 178 | 179 | ## Cassandra exported configuration 180 | ## ref: https://github.com/criteo/cassandra_exporter 181 | exporter: 182 | enabled: false 183 | image: 184 | repo: criteord/cassandra_exporter 185 | tag: 2.0.2 186 | port: 5556 187 | jvmOpts: "" 188 | -------------------------------------------------------------------------------- /charts/zookeeper/README.md: -------------------------------------------------------------------------------- 1 | # incubator/zookeeper 2 | 3 | This helm chart provides an implementation of the ZooKeeper [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) found in Kubernetes Contrib [Zookeeper StatefulSet](https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper). 4 | 5 | ## Prerequisites 6 | * Kubernetes 1.6+ 7 | * PersistentVolume support on the underlying infrastructure 8 | * A dynamic provisioner for the PersistentVolumes 9 | * A familiarity with [Apache ZooKeeper 3.4.x](https://zookeeper.apache.org/doc/current/) 10 | 11 | ## Chart Components 12 | This chart will do the following: 13 | 14 | * Create a fixed size ZooKeeper ensemble using a [StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/). 15 | * Create a [PodDisruptionBudget](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-disruption-budget/) so kubectl drain will respect the Quorum size of the ensemble. 16 | * Create a [Headless Service](https://kubernetes.io/docs/concepts/services-networking/service/) to control the domain of the ZooKeeper ensemble. 17 | * Create a Service configured to connect to the available ZooKeeper instance on the configured client port. 18 | * Optionally apply a [Pod Anti-Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) to spread the ZooKeeper ensemble across nodes. 19 | * Optionally start JMX Exporter and Zookeeper Exporter containers inside Zookeeper pods. 20 | * Optionally create a job which creates Zookeeper chroots (e.g. `/kafka1`). 21 | 22 | ## Installing the Chart 23 | You can install the chart with the release name `zookeeper` as below. 24 | 25 | ```console 26 | $ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator 27 | $ helm install --name zookeeper incubator/zookeeper 28 | ``` 29 | 30 | If you do not specify a name, helm will select a name for you. 31 | 32 | ### Installed Components 33 | You can use `kubectl get` to view all of the installed components. 34 | 35 | ```console{%raw} 36 | $ kubectl get all -l app=zookeeper 37 | NAME: zookeeper 38 | LAST DEPLOYED: Wed Apr 11 17:09:48 2018 39 | NAMESPACE: default 40 | STATUS: DEPLOYED 41 | 42 | RESOURCES: 43 | ==> v1beta1/PodDisruptionBudget 44 | NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE 45 | zookeeper N/A 1 1 2m 46 | 47 | ==> v1/Service 48 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 49 | zookeeper-headless ClusterIP None 2181/TCP,3888/TCP,2888/TCP 2m 50 | zookeeper ClusterIP 10.98.179.165 2181/TCP 2m 51 | 52 | ==> v1beta1/StatefulSet 53 | NAME DESIRED CURRENT AGE 54 | zookeeper 3 3 2m 55 | ``` 56 | 57 | 1. `statefulsets/zookeeper` is the StatefulSet created by the chart. 58 | 1. `po/zookeeper-<0|1|2>` are the Pods created by the StatefulSet. Each Pod has a single container running a ZooKeeper server. 59 | 1. `svc/zookeeper-headless` is the Headless Service used to control the network domain of the ZooKeeper ensemble. 60 | 1. `svc/zookeeper` is a Service that can be used by clients to connect to an available ZooKeeper server. 61 | 62 | ## Configuration 63 | You can specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. 64 | 65 | Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, 66 | 67 | ```console 68 | $ helm install --name my-release -f values.yaml incubator/zookeeper 69 | ``` 70 | 71 | ## Default Values 72 | 73 | - You can find all user-configurable settings, their defaults and commentary about them in [values.yaml](values.yaml). 74 | 75 | ## Deep Dive 76 | 77 | ## Image Details 78 | The image used for this chart is based on Ubuntu 16.04 LTS. This image is larger than Alpine or BusyBox, but it provides glibc, rather than ulibc or mucl, and a JVM release that is built against it. You can easily convert this chart to run against a smaller image with a JVM that is built against that image's libc. However, as far as we know, no Hadoop vendor supports, or has verified, ZooKeeper running on such a JVM. 79 | 80 | ## JVM Details 81 | The Java Virtual Machine used for this chart is the OpenJDK JVM 8u111 JRE (headless). 82 | 83 | ## ZooKeeper Details 84 | The ZooKeeper version is the latest stable version (3.4.10). The distribution is installed into /opt/zookeeper-3.4.10. This directory is symbolically linked to /opt/zookeeper. Symlinks are created to simulate a rpm installation into /usr. 85 | 86 | ## Failover 87 | You can test failover by killing the leader. Insert a key: 88 | ```console 89 | $ kubectl exec zookeeper-0 -- /opt/zookeeper/bin/zkCli.sh create /foo bar; 90 | $ kubectl exec zookeeper-2 -- /opt/zookeeper/bin/zkCli.sh get /foo; 91 | ``` 92 | 93 | Watch existing members: 94 | ```console 95 | $ kubectl run --attach bbox --image=busybox --restart=Never -- sh -c 'while true; do for i in 0 1 2; do echo zk-${i} $(echo stats | nc -${i}.:2181 | grep Mode); sleep 1; done; done'; 96 | 97 | zk-2 Mode: follower 98 | zk-0 Mode: follower 99 | zk-1 Mode: leader 100 | zk-2 Mode: follower 101 | ``` 102 | 103 | Delete Pods and wait for the StatefulSet controller to bring them back up: 104 | ```console 105 | $ kubectl delete po -l app=zookeeper 106 | $ kubectl get po --watch-only 107 | NAME READY STATUS RESTARTS AGE 108 | zookeeper-0 0/1 Running 0 35s 109 | zookeeper-0 1/1 Running 0 50s 110 | zookeeper-1 0/1 Pending 0 0s 111 | zookeeper-1 0/1 Pending 0 0s 112 | zookeeper-1 0/1 ContainerCreating 0 0s 113 | zookeeper-1 0/1 Running 0 19s 114 | zookeeper-1 1/1 Running 0 40s 115 | zookeeper-2 0/1 Pending 0 0s 116 | zookeeper-2 0/1 Pending 0 0s 117 | zookeeper-2 0/1 ContainerCreating 0 0s 118 | zookeeper-2 0/1 Running 0 19s 119 | zookeeper-2 1/1 Running 0 41s 120 | ``` 121 | 122 | Check the previously inserted key: 123 | ```console 124 | $ kubectl exec zookeeper-1 -- /opt/zookeeper/bin/zkCli.sh get /foo 125 | ionid = 0x354887858e80035, negotiated timeout = 30000 126 | 127 | WATCHER:: 128 | 129 | WatchedEvent state:SyncConnected type:None path:null 130 | bar 131 | ``` 132 | 133 | ## Scaling 134 | ZooKeeper can not be safely scaled in versions prior to 3.5.x. This chart currently uses 3.4.x. There are manual procedures for scaling a 3.4.x ensemble, but as noted in the [ZooKeeper 3.5.2 documentation](https://zookeeper.apache.org/doc/r3.5.2-alpha/zookeeperReconfig.html) these procedures require a rolling restart, are known to be error prone, and often result in a data loss. 135 | 136 | While ZooKeeper 3.5.x does allow for dynamic ensemble reconfiguration (including scaling membership), the current status of the release is still alpha, and 3.5.x is therefore not recommended for production use. 137 | 138 | ## Limitations 139 | * StatefulSet and PodDisruptionBudget are beta resources. 140 | * Only supports storage options that have backends for persistent volume claims. 141 | -------------------------------------------------------------------------------- /charts/cassandra/templates/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: {{ template "cassandra.fullname" . }} 5 | labels: 6 | app: {{ template "cassandra.name" . }} 7 | chart: {{ template "cassandra.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: {{ template "cassandra.name" . }} 14 | release: {{ .Release.Name }} 15 | serviceName: {{ template "cassandra.fullname" . }} 16 | replicas: {{ .Values.config.cluster_size }} 17 | podManagementPolicy: {{ .Values.podManagementPolicy }} 18 | updateStrategy: 19 | type: {{ .Values.updateStrategy.type }} 20 | template: 21 | metadata: 22 | labels: 23 | app: {{ template "cassandra.name" . }} 24 | release: {{ .Release.Name }} 25 | {{- if .Values.podLabels }} 26 | {{ toYaml .Values.podLabels | indent 8 }} 27 | {{- end }} 28 | {{- if .Values.podAnnotations }} 29 | annotations: 30 | {{ toYaml .Values.podAnnotations | indent 8 }} 31 | {{- end }} 32 | spec: 33 | {{- if .Values.selector }} 34 | {{ toYaml .Values.selector | indent 6 }} 35 | {{- end }} 36 | {{- if .Values.securityContext.enabled }} 37 | securityContext: 38 | fsGroup: {{ .Values.securityContext.fsGroup }} 39 | runAsUser: {{ .Values.securityContext.runAsUser }} 40 | {{- end }} 41 | {{- if .Values.affinity }} 42 | affinity: 43 | {{ toYaml .Values.affinity | indent 8 }} 44 | {{- end }} 45 | containers: 46 | {{- if .Values.exporter.enabled }} 47 | - name: cassandra-exporter 48 | image: "{{ .Values.exporter.image.repo }}:{{ .Values.exporter.image.tag }}" 49 | env: 50 | - name: CASSANDRA_EXPORTER_CONFIG_listenPort 51 | value: {{ .Values.exporter.port | quote }} 52 | - name: JVM_OPTS 53 | value: {{ .Values.exporter.jvmOpts | quote }} 54 | ports: 55 | - name: metrics 56 | containerPort: {{ .Values.exporter.port }} 57 | protocol: TCP 58 | - name: jmx 59 | containerPort: 5555 60 | livenessProbe: 61 | tcpSocket: 62 | port: {{ .Values.exporter.port }} 63 | readinessProbe: 64 | httpGet: 65 | path: /metrics 66 | port: {{ .Values.exporter.port }} 67 | initialDelaySeconds: 20 68 | timeoutSeconds: 45 69 | {{- end }} 70 | - name: {{ template "cassandra.fullname" . }} 71 | image: "{{ .Values.image.repo }}:{{ .Values.image.tag }}" 72 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }} 73 | resources: 74 | {{ toYaml .Values.resources | indent 10 }} 75 | env: 76 | {{- $seed_size := default 1 .Values.config.seed_size | int -}} 77 | {{- $global := . }} 78 | - name: CASSANDRA_SEEDS 79 | value: "{{- range $i, $e := until $seed_size }}{{ template "cassandra.fullname" $global }}-{{ $i }}.{{ template "cassandra.fullname" $global }}.{{ $global.Release.Namespace }}.svc.cluster.local{{- if (lt ( add1 $i ) $seed_size ) }},{{- end }}{{- end }}" 80 | - name: MAX_HEAP_SIZE 81 | value: {{ default "8192M" .Values.config.max_heap_size | quote }} 82 | - name: HEAP_NEWSIZE 83 | value: {{ default "200M" .Values.config.heap_new_size | quote }} 84 | - name: CASSANDRA_ENDPOINT_SNITCH 85 | value: {{ default "SimpleSnitch" .Values.config.endpoint_snitch | quote }} 86 | - name: CASSANDRA_CLUSTER_NAME 87 | value: {{ default "Cassandra" .Values.config.cluster_name | quote }} 88 | - name: CASSANDRA_DC 89 | value: {{ default "DC1" .Values.config.dc_name | quote }} 90 | - name: CASSANDRA_RACK 91 | value: {{ default "RAC1" .Values.config.rack_name | quote }} 92 | - name: CASSANDRA_START_RPC 93 | value: {{ default "false" .Values.config.start_rpc | quote }} 94 | - name: POD_IP 95 | valueFrom: 96 | fieldRef: 97 | fieldPath: status.podIP 98 | {{- range $key, $value := .Values.env }} 99 | - name: {{ $key | quote }} 100 | value: {{ $value | quote }} 101 | {{- end }} 102 | livenessProbe: 103 | exec: 104 | command: [ "/bin/sh", "-c", "nodetool status" ] 105 | initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} 106 | periodSeconds: {{ .Values.livenessProbe.periodSeconds }} 107 | timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} 108 | successThreshold: {{ .Values.livenessProbe.successThreshold }} 109 | failureThreshold: {{ .Values.livenessProbe.failureThreshold }} 110 | readinessProbe: 111 | exec: 112 | command: [ "/bin/sh", "-c", "nodetool status | grep -E \"^UN\\s+${POD_IP}\"" ] 113 | initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} 114 | periodSeconds: {{ .Values.readinessProbe.periodSeconds }} 115 | timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} 116 | successThreshold: {{ .Values.readinessProbe.successThreshold }} 117 | failureThreshold: {{ .Values.readinessProbe.failureThreshold }} 118 | ports: 119 | - name: intra 120 | containerPort: 7000 121 | - name: tls 122 | containerPort: 7001 123 | - name: jmx 124 | containerPort: 7199 125 | - name: cql 126 | containerPort: {{ default 9042 .Values.config.ports.cql }} 127 | - name: thrift 128 | containerPort: {{ default 9160 .Values.config.ports.thrift }} 129 | {{- if .Values.config.ports.agent }} 130 | - name: agent 131 | containerPort: {{ .Values.config.ports.agent }} 132 | {{- end }} 133 | volumeMounts: 134 | - name: data 135 | mountPath: /var/lib/cassandra 136 | {{- if not .Values.persistence.enabled }} 137 | lifecycle: 138 | preStop: 139 | exec: 140 | command: ["/bin/sh", "-c", "exec nodetool decommission"] 141 | {{- end }} 142 | terminationGracePeriodSeconds: {{ default 30 .Values.podSettings.terminationGracePeriodSeconds }} 143 | {{- if .Values.image.pullSecrets }} 144 | imagePullSecrets: 145 | - name: {{ .Values.image.pullSecrets }} 146 | {{- end }} 147 | {{- if not .Values.persistence.enabled }} 148 | volumes: 149 | - name: data 150 | emptyDir: {} 151 | {{- else }} 152 | volumeClaimTemplates: 153 | - metadata: 154 | name: data 155 | labels: 156 | app: {{ template "cassandra.name" . }} 157 | chart: {{ template "cassandra.chart" . }} 158 | release: {{ .Release.Name }} 159 | heritage: {{ .Release.Service }} 160 | spec: 161 | accessModes: 162 | - {{ .Values.persistence.accessMode | quote }} 163 | resources: 164 | requests: 165 | storage: {{ .Values.persistence.size | quote }} 166 | {{- if .Values.persistence.storageClass }} 167 | {{- if (eq "-" .Values.persistence.storageClass) }} 168 | storageClassName: "" 169 | {{- else }} 170 | storageClassName: "{{ .Values.persistence.storageClass }}" 171 | {{- end }} 172 | {{- end }} 173 | {{- end }} 174 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: StatefulSet 3 | metadata: 4 | name: {{ template "zookeeper.fullname" . }} 5 | labels: 6 | app: {{ template "zookeeper.name" . }} 7 | chart: {{ template "zookeeper.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | component: server 11 | spec: 12 | serviceName: {{ template "zookeeper.fullname" . }}-headless 13 | replicas: {{ .Values.replicaCount }} 14 | selector: 15 | matchLabels: 16 | app: {{ template "zookeeper.name" . }} 17 | release: {{ .Release.Name }} 18 | component: server 19 | updateStrategy: 20 | {{ toYaml .Values.updateStrategy | indent 4 }} 21 | template: 22 | metadata: 23 | labels: 24 | app: {{ template "zookeeper.name" . }} 25 | release: {{ .Release.Name }} 26 | component: server 27 | {{- if .Values.podLabels }} 28 | ## Custom pod labels 29 | {{- range $key, $value := .Values.podLabels }} 30 | {{ $key }}: {{ $value | quote }} 31 | {{- end }} 32 | {{- end }} 33 | annotations: 34 | {{- if .Values.podAnnotations }} 35 | ## Custom pod annotations 36 | {{- range $key, $value := .Values.podAnnotations }} 37 | {{ $key }}: {{ $value | quote }} 38 | {{- end }} 39 | {{- end }} 40 | spec: 41 | terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} 42 | {{- if .Values.schedulerName }} 43 | schedulerName: "{{ .Values.schedulerName }}" 44 | {{- end }} 45 | securityContext: 46 | {{ toYaml .Values.securityContext | indent 8 }} 47 | {{- if .Values.priorityClassName }} 48 | priorityClassName: "{{ .Values.priorityClassName }}" 49 | {{- end }} 50 | containers: 51 | 52 | - name: zookeeper 53 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 54 | imagePullPolicy: {{ .Values.image.pullPolicy }} 55 | command: 56 | - /bin/bash 57 | - -xec 58 | - zkGenConfig.sh && exec zkServer.sh start-foreground 59 | ports: 60 | {{- range $key, $port := .Values.ports }} 61 | - name: {{ $key }} 62 | {{ toYaml $port | indent 14 }} 63 | {{- end }} 64 | livenessProbe: 65 | {{ toYaml .Values.livenessProbe | indent 12 }} 66 | readinessProbe: 67 | {{ toYaml .Values.readinessProbe | indent 12 }} 68 | env: 69 | - name: ZK_REPLICAS 70 | value: {{ .Values.replicaCount | quote }} 71 | {{- range $key, $value := .Values.env }} 72 | - name: {{ $key | upper | replace "." "_" }} 73 | value: {{ $value | quote }} 74 | {{- end }} 75 | {{- range $secret := .Values.secrets }} 76 | {{- range $key := $secret.keys }} 77 | - name: {{ (print $secret.name "_" $key) | upper }} 78 | valueFrom: 79 | secretKeyRef: 80 | name: {{ $secret.name }} 81 | key: {{ $key }} 82 | {{- end }} 83 | {{- end }} 84 | resources: 85 | {{ toYaml .Values.resources | indent 12 }} 86 | volumeMounts: 87 | - name: data 88 | mountPath: /var/lib/zookeeper 89 | {{- range $secret := .Values.secrets }} 90 | {{- if $secret.mountPath }} 91 | {{- range $key := $secret.keys }} 92 | - name: {{ $.Release.Name }}-{{ $secret.name }} 93 | mountPath: {{ $secret.mountPath }}/{{ $key }} 94 | subPath: {{ $key }} 95 | readOnly: true 96 | {{- end }} 97 | {{- end }} 98 | {{- end }} 99 | 100 | 101 | {{- if .Values.exporters.jmx.enabled }} 102 | - name: jmx-exporter 103 | image: "{{ .Values.exporters.jmx.image.repository }}:{{ .Values.exporters.jmx.image.tag }}" 104 | imagePullPolicy: {{ .Values.exporters.jmx.image.pullPolicy }} 105 | ports: 106 | {{- range $key, $port := .Values.exporters.jmx.ports }} 107 | - name: {{ $key }} 108 | {{ toYaml $port | indent 14 }} 109 | {{- end }} 110 | livenessProbe: 111 | {{ toYaml .Values.exporters.jmx.livenessProbe | indent 12 }} 112 | readinessProbe: 113 | {{ toYaml .Values.exporters.jmx.readinessProbe | indent 12 }} 114 | env: 115 | - name: SERVICE_PORT 116 | value: {{ .Values.exporters.jmx.ports.jmxxp.containerPort | quote }} 117 | {{- with .Values.exporters.jmx.env }} 118 | {{- range $key, $value := . }} 119 | - name: {{ $key | upper | replace "." "_" }} 120 | value: {{ $value | quote }} 121 | {{- end }} 122 | {{- end }} 123 | resources: 124 | {{ toYaml .Values.exporters.jmx.resources | indent 12 }} 125 | volumeMounts: 126 | - name: config-jmx-exporter 127 | mountPath: /opt/jmx_exporter/config.yml 128 | subPath: config.yml 129 | {{- end }} 130 | 131 | {{- if .Values.exporters.zookeeper.enabled }} 132 | - name: zookeeper-exporter 133 | image: "{{ .Values.exporters.zookeeper.image.repository }}:{{ .Values.exporters.zookeeper.image.tag }}" 134 | imagePullPolicy: {{ .Values.exporters.zookeeper.image.pullPolicy }} 135 | args: 136 | - -bind-addr=:{{ .Values.exporters.zookeeper.ports.zookeeperxp.containerPort }} 137 | - -metrics-path={{ .Values.exporters.zookeeper.path }} 138 | - -zookeeper=localhost:{{ .Values.ports.client.containerPort }} 139 | - -log-level={{ .Values.exporters.zookeeper.config.logLevel }} 140 | - -reset-on-scrape={{ .Values.exporters.zookeeper.config.resetOnScrape }} 141 | ports: 142 | {{- range $key, $port := .Values.exporters.zookeeper.ports }} 143 | - name: {{ $key }} 144 | {{ toYaml $port | indent 14 }} 145 | {{- end }} 146 | livenessProbe: 147 | {{ toYaml .Values.exporters.zookeeper.livenessProbe | indent 12 }} 148 | readinessProbe: 149 | {{ toYaml .Values.exporters.zookeeper.readinessProbe | indent 12 }} 150 | env: 151 | {{- range $key, $value := .Values.exporters.zookeeper.env }} 152 | - name: {{ $key | upper | replace "." "_" }} 153 | value: {{ $value | quote }} 154 | {{- end }} 155 | resources: 156 | {{ toYaml .Values.exporters.zookeeper.resources | indent 12 }} 157 | {{- end }} 158 | 159 | {{- with .Values.nodeSelector }} 160 | nodeSelector: 161 | {{ toYaml . | indent 8 }} 162 | {{- end }} 163 | {{- with .Values.affinity }} 164 | affinity: 165 | {{ toYaml . | indent 8 }} 166 | {{- end }} 167 | {{- with .Values.tolerations }} 168 | tolerations: 169 | {{ toYaml . | indent 8 }} 170 | {{- end }} 171 | volumes: 172 | {{- range .Values.secrets }} 173 | - name: {{ $.Release.Name }}-{{ .name }} 174 | secret: 175 | secretName: {{ .name }} 176 | {{- end }} 177 | {{- if .Values.exporters.jmx.enabled }} 178 | - name: config-jmx-exporter 179 | configMap: 180 | name: {{ .Release.Name }}-jmx-exporter 181 | {{- end }} 182 | {{- if not .Values.persistence.enabled }} 183 | - name: data 184 | emptyDir: {} 185 | {{- end }} 186 | {{- if .Values.persistence.enabled }} 187 | volumeClaimTemplates: 188 | - metadata: 189 | name: data 190 | spec: 191 | accessModes: 192 | - {{ .Values.persistence.accessMode | quote }} 193 | resources: 194 | requests: 195 | storage: {{ .Values.persistence.size | quote }} 196 | {{- if .Values.persistence.storageClass }} 197 | {{- if (eq "-" .Values.persistence.storageClass) }} 198 | storageClassName: "" 199 | {{- else }} 200 | storageClassName: "{{ .Values.persistence.storageClass }}" 201 | {{- end }} 202 | {{- end }} 203 | {{- end }} 204 | -------------------------------------------------------------------------------- /charts/zookeeper/values.yaml: -------------------------------------------------------------------------------- 1 | ## As weighted quorums are not supported, it is imperative that an odd number of replicas 2 | ## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7. 3 | ## 4 | ## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set 5 | replicaCount: 1 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7) 6 | 7 | podDisruptionBudget: 8 | maxUnavailable: 1 # Limits how many Zokeeper pods may be unavailable due to voluntary disruptions. 9 | 10 | terminationGracePeriodSeconds: 1800 # Duration in seconds a Zokeeper pod needs to terminate gracefully. 11 | 12 | ## OnDelete requires you to manually delete each pod when making updates. 13 | ## This approach is at the moment safer than RollingUpdate because replication 14 | ## may be incomplete when replication source pod is killed. 15 | ## 16 | ## ref: http://blog.kubernetes.io/2017/09/kubernetes-statefulsets-daemonsets.html 17 | updateStrategy: 18 | type: OnDelete # Pods will only be created when you manually delete old pods. 19 | 20 | ## refs: 21 | ## - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper 22 | ## - https://github.com/kubernetes/contrib/blob/master/statefulsets/zookeeper/Makefile#L1 23 | image: 24 | repository: gcr.io/google_samples/k8szk # Container image repository for zookeeper container. 25 | tag: v3 # Container image tag for zookeeper container. 26 | pullPolicy: IfNotPresent # Image pull criteria for zookeeper container. 27 | 28 | service: 29 | type: ClusterIP # Exposes zookeeper on a cluster-internal IP. 30 | annotations: {} # Arbitrary non-identifying metadata for zookeeper service. 31 | ## AWS example for use with LoadBalancer service type. 32 | # external-dns.alpha.kubernetes.io/hostname: zookeeper.cluster.local 33 | # service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" 34 | # service.beta.kubernetes.io/aws-load-balancer-internal: "true" 35 | ports: 36 | client: 37 | port: 2181 # Service port number for client port. 38 | targetPort: client # Service target port for client port. 39 | protocol: TCP # Service port protocol for client port. 40 | 41 | ## Headless service. 42 | ## 43 | headless: 44 | annotations: {} 45 | 46 | ports: 47 | client: 48 | containerPort: 2181 # Port number for zookeeper container client port. 49 | protocol: TCP # Protocol for zookeeper container client port. 50 | election: 51 | containerPort: 3888 # Port number for zookeeper container election port. 52 | protocol: TCP # Protocol for zookeeper container election port. 53 | server: 54 | containerPort: 2888 # Port number for zookeeper container server port. 55 | protocol: TCP # Protocol for zookeeper container server port. 56 | 57 | resources: {} # Optionally specify how much CPU and memory (RAM) each zookeeper container needs. 58 | # We usually recommend not to specify default resources and to leave this as a conscious 59 | # choice for the user. This also increases chances charts run on environments with little 60 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 61 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 62 | # limits: 63 | # cpu: 100m 64 | # memory: 128Mi 65 | # requests: 66 | # cpu: 100m 67 | # memory: 128Mi 68 | 69 | priorityClassName: "" 70 | 71 | nodeSelector: {} # Node label-values required to run zookeeper pods. 72 | 73 | tolerations: [] # Node taint overrides for zookeeper pods. 74 | 75 | affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods. 76 | # podAntiAffinity: 77 | # requiredDuringSchedulingIgnoredDuringExecution: 78 | # - topologyKey: "kubernetes.io/hostname" 79 | # labelSelector: 80 | # matchLabels: 81 | # release: zookeeper 82 | 83 | podAnnotations: {} # Arbitrary non-identifying metadata for zookeeper pods. 84 | # prometheus.io/scrape: "true" 85 | # prometheus.io/path: "/metrics" 86 | # prometheus.io/port: "9141" 87 | 88 | podLabels: {} # Key/value pairs that are attached to zookeeper pods. 89 | # team: "developers" 90 | # service: "zookeeper" 91 | 92 | livenessProbe: 93 | exec: 94 | command: 95 | - zkOk.sh 96 | initialDelaySeconds: 20 97 | # periodSeconds: 30 98 | # timeoutSeconds: 30 99 | # failureThreshold: 6 100 | # successThreshold: 1 101 | 102 | readinessProbe: 103 | exec: 104 | command: 105 | - zkOk.sh 106 | initialDelaySeconds: 20 107 | # periodSeconds: 30 108 | # timeoutSeconds: 30 109 | # failureThreshold: 6 110 | # successThreshold: 1 111 | 112 | securityContext: 113 | fsGroup: 1000 114 | runAsUser: 1000 115 | 116 | ## Useful if using any custom authorizer. 117 | ## Pass any secrets to the kafka pods. Each secret will be passed as an 118 | ## environment variable by default. The secret can also be mounted to a 119 | ## specific path (in addition to environment variable) if required. Environment 120 | ## variable names are generated as: `_` (All upper case) 121 | # secrets: 122 | # - name: myKafkaSecret 123 | # keys: 124 | # - username 125 | # - password 126 | # # mountPath: /opt/kafka/secret 127 | # - name: myZkSecret 128 | # keys: 129 | # - user 130 | # - pass 131 | # mountPath: /opt/zookeeper/secret 132 | 133 | persistence: 134 | enabled: true 135 | ## zookeeper data Persistent Volume Storage Class 136 | ## If defined, storageClassName: 137 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 138 | ## If undefined (the default) or set to null, no storageClassName spec is 139 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 140 | ## GKE, AWS & OpenStack) 141 | ## 142 | # storageClass: "-" 143 | accessMode: ReadWriteOnce 144 | size: 5Gi 145 | 146 | ## Exporters query apps for metrics and make those metrics available for 147 | ## Prometheus to scrape. 148 | exporters: 149 | 150 | jmx: 151 | enabled: false 152 | image: 153 | repository: sscaling/jmx-prometheus-exporter 154 | tag: 0.3.0 155 | pullPolicy: IfNotPresent 156 | config: 157 | lowercaseOutputName: false 158 | ## ref: https://github.com/prometheus/jmx_exporter/blob/master/example_configs/zookeeper.yaml 159 | rules: 160 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 161 | name: "zookeeper_$2" 162 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 163 | name: "zookeeper_$3" 164 | labels: 165 | replicaId: "$2" 166 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 167 | name: "zookeeper_$4" 168 | labels: 169 | replicaId: "$2" 170 | memberType: "$3" 171 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 172 | name: "zookeeper_$4_$5" 173 | labels: 174 | replicaId: "$2" 175 | memberType: "$3" 176 | startDelaySeconds: 30 177 | env: {} 178 | resources: {} 179 | path: /metrics 180 | ports: 181 | jmxxp: 182 | containerPort: 9404 183 | protocol: TCP 184 | livenessProbe: 185 | httpGet: 186 | path: /metrics 187 | port: jmxxp 188 | initialDelaySeconds: 30 189 | periodSeconds: 15 190 | timeoutSeconds: 60 191 | failureThreshold: 8 192 | successThreshold: 1 193 | readinessProbe: 194 | httpGet: 195 | path: /metrics 196 | port: jmxxp 197 | initialDelaySeconds: 30 198 | periodSeconds: 15 199 | timeoutSeconds: 60 200 | failureThreshold: 8 201 | successThreshold: 1 202 | 203 | zookeeper: 204 | ## refs: 205 | ## - https://github.com/carlpett/zookeeper_exporter 206 | ## - https://hub.docker.com/r/josdotso/zookeeper-exporter/ 207 | ## - https://www.datadoghq.com/blog/monitoring-kafka-performance-metrics/#zookeeper-metrics 208 | enabled: false 209 | image: 210 | repository: josdotso/zookeeper-exporter 211 | tag: v1.1.2 212 | pullPolicy: IfNotPresent 213 | config: 214 | logLevel: info 215 | resetOnScrape: "true" 216 | env: {} 217 | resources: {} 218 | path: /metrics 219 | ports: 220 | zookeeperxp: 221 | containerPort: 9141 222 | protocol: TCP 223 | livenessProbe: 224 | httpGet: 225 | path: /metrics 226 | port: zookeeperxp 227 | initialDelaySeconds: 30 228 | periodSeconds: 15 229 | timeoutSeconds: 60 230 | failureThreshold: 8 231 | successThreshold: 1 232 | readinessProbe: 233 | httpGet: 234 | path: /metrics 235 | port: zookeeperxp 236 | initialDelaySeconds: 30 237 | periodSeconds: 15 238 | timeoutSeconds: 60 239 | failureThreshold: 8 240 | successThreshold: 1 241 | 242 | ## Use an alternate scheduler, e.g. "stork". 243 | ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ 244 | ## 245 | # schedulerName: 246 | 247 | ## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper 248 | env: 249 | 250 | ## Options related to JMX exporter. 251 | ## ref: https://github.com/apache/zookeeper/blob/master/bin/zkServer.sh#L36 252 | JMXAUTH: "false" 253 | JMXDISABLE: "false" 254 | JMXPORT: 1099 255 | JMXSSL: "false" 256 | 257 | ## The port on which the server will accept client requests. 258 | ZK_CLIENT_PORT: 2181 259 | 260 | ## The port on which the ensemble performs leader election. 261 | ZK_ELECTION_PORT: 3888 262 | 263 | ## The JVM heap size. 264 | ZK_HEAP_SIZE: 2G 265 | 266 | ## The number of Ticks that an ensemble member is allowed to perform leader 267 | ## election. 268 | ZK_INIT_LIMIT: 5 269 | 270 | ## The Log Level that for the ZooKeeper processes logger. 271 | ## Choices are `TRACE,DEBUG,INFO,WARN,ERROR,FATAL`. 272 | ZK_LOG_LEVEL: INFO 273 | 274 | ## The maximum number of concurrent client connections that 275 | ## a server in the ensemble will accept. 276 | ZK_MAX_CLIENT_CNXNS: 60 277 | 278 | ## The maximum session timeout that the ensemble will allow a client to request. 279 | ## Upstream default is `20 * ZK_TICK_TIME` 280 | ZK_MAX_SESSION_TIMEOUT: 40000 281 | 282 | ## The minimum session timeout that the ensemble will allow a client to request. 283 | ## Upstream default is `2 * ZK_TICK_TIME`. 284 | ZK_MIN_SESSION_TIMEOUT: 4000 285 | 286 | ## The delay, in hours, between ZooKeeper log and snapshot cleanups. 287 | ZK_PURGE_INTERVAL: 0 288 | 289 | ## The port on which the leader will send events to followers. 290 | ZK_SERVER_PORT: 2888 291 | 292 | ## The number of snapshots that the ZooKeeper process will retain if 293 | ## `ZK_PURGE_INTERVAL` is set to a value greater than `0`. 294 | ZK_SNAP_RETAIN_COUNT: 3 295 | 296 | ## The number of Tick by which a follower may lag behind the ensembles leader. 297 | ZK_SYNC_LIMIT: 10 298 | 299 | ## The number of wall clock ms that corresponds to a Tick for the ensembles 300 | ## internal time. 301 | ZK_TICK_TIME: 2000 302 | 303 | jobs: 304 | ## ref: http://zookeeper.apache.org/doc/r3.4.10/zookeeperProgrammers.html#ch_zkSessions 305 | chroots: 306 | enabled: false 307 | activeDeadlineSeconds: 300 308 | backoffLimit: 5 309 | completions: 1 310 | config: 311 | create: [] 312 | # - /kafka 313 | # - /ureplicator 314 | env: [] 315 | parallelism: 1 316 | resources: {} 317 | restartPolicy: Never 318 | -------------------------------------------------------------------------------- /charts/cassandra/README.md: -------------------------------------------------------------------------------- 1 | # Cassandra 2 | A Cassandra Chart for Kubernetes 3 | 4 | ## Install Chart 5 | To install the Cassandra Chart into your Kubernetes cluster (This Chart requires persistent volume by default, you may need to create a storage class before install chart. To create storage class, see [Persist data](#persist_data) section) 6 | 7 | ```bash 8 | helm install --namespace "cassandra" -n "cassandra" incubator/cassandra 9 | ``` 10 | 11 | After installation succeeds, you can get a status of Chart 12 | 13 | ```bash 14 | helm status "cassandra" 15 | ``` 16 | 17 | If you want to delete your Chart, use this command 18 | ```bash 19 | helm delete --purge "cassandra" 20 | ``` 21 | 22 | ## Persist data 23 | You need to create `StorageClass` before able to persist data in persistent volume. 24 | To create a `StorageClass` on Google Cloud, run the following 25 | 26 | ```bash 27 | kubectl create -f sample/create-storage-gce.yaml 28 | ``` 29 | 30 | And set the following values in `values.yaml` 31 | 32 | ```yaml 33 | persistence: 34 | enabled: true 35 | ``` 36 | 37 | If you want to create a `StorageClass` on other platform, please see documentation here [https://kubernetes.io/docs/user-guide/persistent-volumes/](https://kubernetes.io/docs/user-guide/persistent-volumes/) 38 | 39 | When running a cluster without persistence, the termination of a pod will first initiate a decommissioning of that pod. 40 | Depending on the amount of data stored inside the cluster this may take a while. In order to complete a graceful 41 | termination, pods need to get more time for it. Set the following values in `values.yaml`: 42 | 43 | ```yaml 44 | podSettings: 45 | terminationGracePeriodSeconds: 1800 46 | ``` 47 | 48 | ## Install Chart with specific cluster size 49 | By default, this Chart will create a cassandra with 3 nodes. If you want to change the cluster size during installation, you can use `--set config.cluster_size={value}` argument. Or edit `values.yaml` 50 | 51 | For example: 52 | Set cluster size to 5 53 | 54 | ```bash 55 | helm install --namespace "cassandra" -n "cassandra" --set config.cluster_size=5 incubator/cassandra/ 56 | ``` 57 | 58 | ## Install Chart with specific resource size 59 | By default, this Chart will create a cassandra with CPU 2 vCPU and 4Gi of memory which is suitable for development environment. 60 | If you want to use this Chart for production, I would recommend to update the CPU to 4 vCPU and 16Gi. Also increase size of `max_heap_size` and `heap_new_size`. 61 | To update the settings, edit `values.yaml` 62 | 63 | ## Install Chart with specific node 64 | Sometime you may need to deploy your cassandra to specific nodes to allocate resources. You can use node selector by edit `nodes.enabled=true` in `values.yaml` 65 | For example, you have 6 vms in node pools and you want to deploy cassandra to node which labeled as `cloud.google.com/gke-nodepool: pool-db` 66 | 67 | Set the following values in `values.yaml` 68 | 69 | ```yaml 70 | nodes: 71 | enabled: true 72 | selector: 73 | nodeSelector: 74 | cloud.google.com/gke-nodepool: pool-db 75 | ``` 76 | 77 | ## Configuration 78 | 79 | The following table lists the configurable parameters of the Cassandra chart and their default values. 80 | 81 | | Parameter | Description | Default | 82 | | ----------------------- | --------------------------------------------- | ---------------------------------------------------------- | 83 | | `image.repo` | `cassandra` image repository | `cassandra` | 84 | | `image.tag` | `cassandra` image tag | `3.11.3` | 85 | | `image.pullPolicy` | Image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | 86 | | `image.pullSecrets` | Image pull secrets | `nil` | 87 | | `config.cluster_name` | The name of the cluster. | `cassandra` | 88 | | `config.cluster_size` | The number of nodes in the cluster. | `3` | 89 | | `config.seed_size` | The number of seed nodes used to bootstrap new clients joining the cluster. | `2` | 90 | | `config.num_tokens` | Initdb Arguments | `256` | 91 | | `config.dc_name` | Initdb Arguments | `DC1` | 92 | | `config.rack_name` | Initdb Arguments | `RAC1` | 93 | | `config.endpoint_snitch` | Initdb Arguments | `SimpleSnitch` | 94 | | `config.max_heap_size` | Initdb Arguments | `2048M` | 95 | | `config.heap_new_size` | Initdb Arguments | `512M` | 96 | | `config.ports.cql` | Initdb Arguments | `9042` | 97 | | `config.ports.thrift` | Initdb Arguments | `9160` | 98 | | `config.ports.agent` | The port of the JVM Agent (if any) | `nil` | 99 | | `config.start_rpc` | Initdb Arguments | `false` | 100 | | `env` | Custom env variables | `{}` | 101 | | `persistence.enabled` | Use a PVC to persist data | `true` | 102 | | `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | 103 | | `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | 104 | | `persistence.size` | Size of data volume | `10Gi` | 105 | | `resources` | CPU/Memory resource requests/limits | Memory: `4Gi`, CPU: `2` | 106 | | `service.type` | k8s service type exposing ports, e.g. `NodePort`| `ClusterIP` | 107 | | `podManagementPolicy` | podManagementPolicy of the StatefulSet | `OrderedReady` | 108 | | `podDisruptionBudget` | Pod distruption budget | `{}` | 109 | | `podAnnotations` | pod annotations for the StatefulSet | `{}` | 110 | | `updateStrategy.type` | UpdateStrategy of the StatefulSet | `OnDelete` | 111 | | `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `90` | 112 | | `livenessProbe.periodSeconds` | How often to perform the probe | `30` | 113 | | `livenessProbe.timeoutSeconds` | When the probe times out | `5` | 114 | | `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | 115 | | `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` | 116 | | `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `90` | 117 | | `readinessProbe.periodSeconds` | How often to perform the probe | `30` | 118 | | `readinessProbe.timeoutSeconds` | When the probe times out | `5` | 119 | | `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | 120 | | `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` | 121 | | `rbac.create` | Specifies whether RBAC resources should be created | `true` | 122 | | `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | 123 | | `serviceAccount.name` | The name of the ServiceAccount to use | | 124 | | `backup.enabled` | Enable backup on chart installation | `false` | 125 | | `backup.schedule` | Keyspaces to backup, each with cron time | | 126 | | `backup.annotations` | Backup pod annotations | iam.amazonaws.com/role: `cain` | 127 | | `backup.image.repo` | Backup image repository | `maorfr/cain` | 128 | | `backup.image.tag` | Backup image tag | `0.1.0` | 129 | | `backup.env` | Backup environment variables | AWS_REGION: `us-east-1` | 130 | | `backup.resources` | Backup CPU/Memory resource requests/limits | Memory: `1Gi`, CPU: `1` | 131 | | `backup.destination` | Destination to store backup artifacts | `s3://bucket/cassandra` | 132 | | `exporter.enabled` | Enable Cassandra exporter | `false` | 133 | | `exporter.image.repo` | Exporter image repository | `criteord/cassandra_exporter` | 134 | | `exporter.image.tag` | Exporter image tag | `2.0.2` | 135 | | `exporter.port` | Exporter port | `5556` | 136 | | `exporter.jvmOpts` | Exporter additional JVM options | | 137 | 138 | ## Scale cassandra 139 | When you want to change the cluster size of your cassandra, you can use the helm upgrade command. 140 | 141 | ```bash 142 | helm upgrade --set config.cluster_size=5 cassandra incubator/cassandra 143 | ``` 144 | 145 | ## Get cassandra status 146 | You can get your cassandra cluster status by running the command 147 | 148 | ```bash 149 | kubectl exec -it --namespace cassandra $(kubectl get pods --namespace cassandra -l app=cassandra-cassandra -o jsonpath='{.items[0].metadata.name}') nodetool status 150 | ``` 151 | 152 | Output 153 | ```bash 154 | Datacenter: asia-east1 155 | ====================== 156 | Status=Up/Down 157 | |/ State=Normal/Leaving/Joining/Moving 158 | -- Address Load Tokens Owns (effective) Host ID Rack 159 | UN 10.8.1.11 108.45 KiB 256 66.1% 410cc9da-8993-4dc2-9026-1dd381874c54 a 160 | UN 10.8.4.12 84.08 KiB 256 68.7% 96e159e1-ef94-406e-a0be-e58fbd32a830 c 161 | UN 10.8.3.6 103.07 KiB 256 65.2% 1a42b953-8728-4139-b070-b855b8fff326 b 162 | ``` 163 | 164 | ## Benchmark 165 | You can use [cassandra-stress](https://docs.datastax.com/en/cassandra/3.0/cassandra/tools/toolsCStress.html) tool to run the benchmark on the cluster by the following command 166 | 167 | ```bash 168 | kubectl exec -it --namespace cassandra $(kubectl get pods --namespace cassandra -l app=cassandra-cassandra -o jsonpath='{.items[0].metadata.name}') cassandra-stress 169 | ``` 170 | 171 | Example of `cassandra-stress` argument 172 | - Run both read and write with ration 9:1 173 | - Operator total 1 million keys with uniform distribution 174 | - Use QUORUM for read/write 175 | - Generate 50 threads 176 | - Generate result in graph 177 | - Use NetworkTopologyStrategy with replica factor 2 178 | 179 | ```bash 180 | cassandra-stress mixed ratio\(write=1,read=9\) n=1000000 cl=QUORUM -pop dist=UNIFORM\(1..1000000\) -mode native cql3 -rate threads=50 -log file=~/mixed_autorate_r9w1_1M.log -graph file=test2.html title=test revision=test2 -schema "replication(strategy=NetworkTopologyStrategy, factor=2)" 181 | ``` 182 | --------------------------------------------------------------------------------