├── README.md ├── docker-kafka ├── Dockerfile ├── config │ └── server.properties ├── prometheus-config.yml └── run.sh ├── docker-zookeeper ├── Dockerfile ├── prometheus-config.yml └── run.sh ├── kafka-cluster ├── .helmignore ├── Chart.yaml ├── charts │ ├── kafka-operator │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ │ └── kafka-operator-deployment.yaml │ │ └── values.yaml │ ├── kafka │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── kafka-hawkular.yaml │ │ │ ├── kafka-headless-service.yaml │ │ │ ├── kafka-server-properties.yaml │ │ │ ├── kafka-service.yaml │ │ │ └── kafka-stateful-set.yaml │ │ └── values.yaml │ └── zookeeper │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ ├── zookeeper-hawkular.yaml │ │ ├── zookeeper-headless-service.yaml │ │ ├── zookeeper-service.yaml │ │ └── zookeeper-stateful-set.yaml │ │ └── values.yaml ├── templates │ ├── _helpers.yaml │ └── jolokia-secret.yaml └── values.yaml ├── kafka-secrets ├── .helmignore ├── Chart.yaml ├── templates │ └── kafka-jaas.yaml └── values.yaml ├── kafka.yaml ├── openshift ├── README.md ├── kafka-client.yml ├── kafka-ephemeral-template.yaml └── kafka-persistent-template.yaml └── test ├── 11topic-create-test1.yml ├── 12topic-create-test2.yml ├── 21consumer-test1.yml └── test.sh /README.md: -------------------------------------------------------------------------------- 1 | ## Managing Kafka clusters in Kubernetes 2 | 3 | ### Installing 4 | 5 | The installation is done using helm chart: 6 | 7 | ```bash 8 | helm install kafka-cluster 9 | ``` 10 | 11 | The chart will install zookeeper and kafka clusters as StatefulSets, configure needed services and monitoring. 12 | 13 | #### Running in OpenShift 14 | 15 | Installation in OpenShift can be done using provided openshift templates. For details see [openshift/README.md](openshift/README.md) 16 | 17 | ### Helm Chart Parameters 18 | 19 | ```yaml 20 | 21 | 22 | kafka: 23 | ## kafka.imageRegistry is the address of the container image registry to use 24 | # imageRegistry: docker.io 25 | ## kafka.pullPolicy is the kubernetes policy to use for pulling images: Always, IfNotPresent, Never 26 | pullPolicy: IfNotPresent 27 | ## kafka.image is the name of kafka container image to use 28 | image: nbogojevic/kafka 29 | ## kafka.tag is the tag of kafka container image to use 30 | tag: latest 31 | ## kafka.serviceName is the service name used by clients to find the cluster 32 | serviceName: kafka 33 | ## kafka.dnsName is the name used by StatefulSet to identify instances 34 | dnsName: broker 35 | ## kafka.nodes is set to number of nodes in kafka cluster 36 | nodes: 3 37 | ## kafka.jvmFlags are default java options to use with kafka 38 | jvmFlags: -Xmx1G 39 | ## kafka.storageSize is the default size of storage when using persistent volumes 40 | storageSize: 2GiB 41 | ## kafka.persistent is set to true to use persitent volumes 42 | persistent: false 43 | ## set to true if clients must authenticate. Only supports plaintext SASL mechanism. 44 | secured: true 45 | ## set to true if communication with kafka should be encrypted. TODO certificate management 46 | # tls: false 47 | # Set to true if topics can be deleted. 48 | deleteTopicEnable: false 49 | # Then name of the admin user. If not specified a random name is generated. 50 | #adminUser: admin 51 | # Then name of the admin password. If not specified a random password is generated. 52 | #adminPassword: secret-password 53 | # Then name of the client user. If not specified a random name is generated. 54 | #clientUser: client 55 | # Then name of the client password. If not specified a random password is generated. 56 | #clientPassword: secret-password 57 | zookeeperService: zookeeper 58 | # Default timeout in ms when connecting to zookeeper 59 | zookeperTimeout: 6000 60 | ## kafka.nodeSelector can be set with node labels to use when choosing nodes to deploy 61 | nodeSelector: 62 | 63 | zookeeper: 64 | ## zookeeper.imageRegistry is the address of the container image registry to use 65 | # imageRegistry: docker.io 66 | ## kafka.pullPolicy is the kubernetes policy to use for pulling images: Always, IfNotPresent, Never 67 | pullPolicy: IfNotPresent 68 | ## zookeeper.image is the name of zookeeper container image to use 69 | image: nbogojevic/zookeeper 70 | ## zookeeper.tag is the tag of zookeeper container image to use 71 | tag: latest 72 | ## zookeeper.serviceName is the service name used by clients to find the cluster 73 | serviceName: zookeeper 74 | ## zookeeper.dnsName is the name used by StatefulSet to identify instances 75 | dnsName: zoo 76 | ## zookeeper.nodes is set to number of nodes in zookeeper cluster 77 | nodes: 3 78 | ## zookeeper.jvmFlags are default java options to use with kafka 79 | jvmFlags: -Xmx512m 80 | ## zookeeper.nodeSelector can be set with node labels to use when choosing nodes to deploy 81 | nodeSelector: 82 | 83 | global: 84 | ## applicationName is the default name of the application 85 | applicationName: kafka-cluster 86 | 87 | ## hawkular is set to true to enable hawkular metrics 88 | hawkular: false 89 | 90 | ## prometheus is set to true to enable prometheus metrics 91 | prometheus: true 92 | 93 | ## jolokia is set to true if jolokia JMX endpoint should be exposed 94 | jolokia: true 95 | 96 | ## jolokiaUsername is set to the name of jolokia user for basic authentication 97 | # jolokiaUsername: jolokia 98 | 99 | ## jolokiaPassword can be set to the password to use. If not specified, a random password is generated 100 | # jolokiaPassword: jolokia 101 | ``` 102 | 103 | ## Kafka 104 | 105 | ### Kafka as Kubernetes StatefulSet 106 | 107 | Kafka runs as a Kubernetes StatefulSet (or PetSet in older versions - helm chart automatically chooses the supported resource type). 108 | 109 | #### Storage 110 | 111 | Kafka can be run with or without persitent volumes. 112 | 113 | When running with persitent volumes, the data is saved on the instance or the node crash. When running without persitent volumes, the data is stored locally on the node a kubernetes node loss may result in data loss, so high-availability and resiliency should be supported by kafka mechanisms, e.g. setting replication factor sufficiently high. 114 | 115 | As for best performances, one might perfer specific local storage characteristics (SSD), helm chart allows usage of node selector to select instances where kafka would be deployed. 116 | 117 | ### Kafka container image 118 | 119 | Default kafka container image is based on [fabric8/s2i-java](https://hub.docker.com/r/fabric8/s2i-java/) image. The docker build is located in [docker-kafka/](./docker-kafka/). 120 | 121 | ## Zookeeper 122 | 123 | Zookeeper also runs as a Stateful Set. 124 | 125 | Zookeeper uses local storage, so if you lose your zookeeper cluster, kafka will be unaware that persisted topics exist. 126 | The topic data is still present on kafka brokers, but you will need to re-create topics. 127 | This can be done using [kafka-operator](https://github.com/nbogojevic/kafka-operator). 128 | 129 | ### Zookeeper container image 130 | 131 | Default zookeeper container image is based on [fabric8/s2i-java](https://hub.docker.com/r/fabric8/s2i-java/) image. The docker build is located in [docker-zookeeper/](./docker-zookeeper/). 132 | 133 | ## Monitoring 134 | 135 | The startup script in docker images will detect if prometheus jmx_exporter, jolokia or agant-bond are installed and enable them. Each image is pre-configured to expose relevant JMX MBeans 136 | via prometheus jmx_exporter. 137 | 138 | If prometheus is enabled via helm variables, StatefulSet pods will be annotated with standard prometheus scraping annotations. 139 | 140 | ## Building 141 | 142 | Building docker images: 143 | 144 | ```bash 145 | docker build docker-kafka -t nbogojevic/kafka -f docker-kafka/Dockerfile 146 | docker build docker-zookeeper -t nbogojevic/zookeeper -f docker-zookeeper/Dockerfile 147 | ``` 148 | 149 | Checking helm chart: 150 | 151 | ```bash 152 | helm init 153 | helm lint kafka-cluster 154 | helm install kafka-cluster 155 | ``` -------------------------------------------------------------------------------- /docker-kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM fabric8/s2i-java 2 | 3 | USER root 4 | # Use prometheus 0.1.0 5 | # Install jmx_prometheus_javaagent 6 | ENV PROMETHEUS_EXPORTER_VERSION=0.1.0 7 | 8 | RUN mkdir -p /opt/prometheus/conf \ 9 | && curl -SLs "https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/${PROMETHEUS_EXPORTER_VERSION}/jmx_prometheus_javaagent-${PROMETHEUS_EXPORTER_VERSION}.jar" \ 10 | -o /opt/prometheus/jmx_prometheus_javaagent.jar \ 11 | && chmod 444 /opt/prometheus/jmx_prometheus_javaagent.jar \ 12 | && chmod 444 /opt/prometheus/conf 13 | 14 | ENV PROMETHEUS_EXPORTER_CONF="-javaagent:/opt/prometheus/jmx_prometheus_javaagent.jar=5555:/opt/prometheus/conf/prometheus-config.yml" 15 | ADD prometheus-config.yml /opt/prometheus/ 16 | 17 | ENV KAFKA_VERSION=1.0.0 18 | ENV SCALA_VERSION=2.12.4 19 | ENV KAFKA_BIN_VERSION=2.12-$KAFKA_VERSION 20 | 21 | # Download Scala and Kafka, untar 22 | RUN curl -SLs "http://www.scala-lang.org/files/archive/scala-${SCALA_VERSION}.rpm" -o scala.rpm \ 23 | && rpm -ihv scala.rpm \ 24 | && rm scala.rpm \ 25 | && curl -SLs "http://www.apache.org/dist/kafka/${KAFKA_VERSION}/kafka_${KAFKA_BIN_VERSION}.tgz" | tar -xzf - -C /opt \ 26 | && mv /opt/kafka_${KAFKA_BIN_VERSION} /opt/kafka \ 27 | && mkdir -p /deployments/bin 28 | 29 | ADD config/server.properties /opt/kafka/config/ 30 | 31 | WORKDIR /opt/kafka 32 | 33 | EXPOSE 9092 8778 5555 34 | 35 | ADD run.sh /deployments/bin 36 | 37 | # Set directory and file permissions 38 | RUN chown -R jboss:root /deployments /opt/kafka \ 39 | && chmod -R "g+rwX" /deployments /opt/kafka \ 40 | && chmod +x /deployments/bin/run.sh 41 | 42 | USER jboss 43 | 44 | CMD [ "/usr/local/s2i/run", "config/server.properties"] 45 | -------------------------------------------------------------------------------- /docker-kafka/config/server.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # see kafka.server.KafkaConfig for additional details and defaults 17 | 18 | ############################# Server Basics ############################# 19 | 20 | # The id of the broker. This must be set to a unique integer for each broker. 21 | broker.id=0 22 | 23 | # Use https://github.com/nbogojevic/kafka-operator instead 24 | auto.create.topics.enable=false 25 | 26 | # Switch to enable topic deletion or not, allow deletion for kafka operator 27 | delete.topic.enable=true 28 | 29 | 30 | ############################# Log Basics ############################# 31 | 32 | # A comma seperated list of directories under which to store log files 33 | log.dirs=/opt/kafka/data/topics 34 | 35 | 36 | ############################# Zookeeper ############################# 37 | 38 | # Zookeeper connection string (see zookeeper docs for details). 39 | # This is a comma separated host:port pairs, each corresponding to a zk 40 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 41 | # You can also append an optional chroot string to the urls to specify the 42 | # root directory for all kafka znodes. 43 | zookeeper.connect=zookeeper:2181 44 | 45 | # Timeout in ms for connecting to zookeeper 46 | zookeeper.connection.timeout.ms=6000 47 | -------------------------------------------------------------------------------- /docker-kafka/prometheus-config.yml: -------------------------------------------------------------------------------- 1 | 2 | lowercaseOutputName: true 3 | rules: 4 | - pattern : kafka.cluster<>Value 5 | name: kafka_cluster_$1_$2 6 | labels: 7 | topic: "$3" 8 | partition: "$4" 9 | - pattern : kafka.log<>Value 10 | name: kafka_log_$1 11 | labels: 12 | topic: "$2" 13 | partition: "$3" 14 | - pattern : kafka.controller<>(Count|Value) 15 | name: kafka_controller_$1_$2 16 | - pattern : kafka.network<>Value 17 | name: kafka_network_$1_$2 18 | - pattern : kafka.network<>Count 19 | name: kafka_network_$1_$2_total 20 | labels: 21 | request: "$3" 22 | - pattern : kafka.network<>Count 23 | name: kafka_network_$1_$2 24 | labels: 25 | request: "$3" 26 | type: COUNTER 27 | - pattern : kafka.network<>Count 28 | name: kafka_network_$1_$2 29 | labels: 30 | request: "$3" 31 | - pattern : kafka.network<>Count 32 | name: kafka_network_$1_$2 33 | - pattern : kafka.server<>Count 34 | name: kafka_server_$1_$2_total 35 | labels: 36 | topic: "$3" 37 | - pattern : kafka.server<>Count 38 | name: kafka_server_$1_$2_total 39 | type: COUNTER 40 | 41 | - pattern : kafka.server<>(Count|Value) 42 | name: kafka_server_$1_$2 43 | labels: 44 | clientId: "$3" 45 | topic: "$4" 46 | partition: "$5" 47 | - pattern : kafka.server<>(Count|Value) 48 | name: kafka_server_$1_$2 49 | labels: 50 | topic: "$3" 51 | partition: "$4" 52 | - pattern : kafka.server<>(Count|Value) 53 | name: kafka_server_$1_$2 54 | labels: 55 | topic: "$3" 56 | type: COUNTER 57 | 58 | - pattern : kafka.server<>(Count|Value) 59 | name: kafka_server_$1_$2 60 | labels: 61 | clientId: "$3" 62 | broker: "$4:$5" 63 | - pattern : kafka.server<>(Count|Value) 64 | name: kafka_server_$1_$2 65 | labels: 66 | clientId: "$3" 67 | - pattern : kafka.server<>(Count|Value) 68 | name: kafka_server_$1_$2 69 | 70 | - pattern : kafka.(\w+)<>Count 71 | name: kafka_$1_$2_$3_total 72 | - pattern : kafka.(\w+)<>Count 73 | name: kafka_$1_$2_$3_total 74 | labels: 75 | topic: "$4" 76 | type: COUNTER 77 | - pattern : kafka.(\w+)<>Count 78 | name: kafka_$1_$2_$3_total 79 | labels: 80 | topic: "$4" 81 | partition: "$5" 82 | type: COUNTER 83 | - pattern : kafka.(\w+)<>(Count|Value) 84 | name: kafka_$1_$2_$3_$4 85 | type: COUNTER 86 | - pattern : kafka.(\w+)<>(Count|Value) 87 | name: kafka_$1_$2_$3_$6 88 | labels: 89 | "$4": "$5" 90 | -------------------------------------------------------------------------------- /docker-kafka/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Command line arguments given to this script 4 | export KAFKA_OPTS="${KAFKA_OPTS:+${KAFKA_OPTS} }" 5 | 6 | # Add jolokia or agent bond 7 | if [ -f /opt/prometheus/prometheus-opts ]; then 8 | export KAFKA_OPTS="${KAFKA_OPTS:+${KAFKA_OPTS} }$(/opt/prometheus/prometheus-opts)" 9 | fi 10 | 11 | if [ -f /opt/jolokia/jolokia-opts ]; then 12 | export KAFKA_OPTS="${KAFKA_OPTS:+${KAFKA_OPTS} }$(/opt/jolokia/jolokia-opts)" 13 | fi 14 | 15 | export KAFKA_PROPERTIES_OVERRIDES="--override broker.id=$(hostname | awk -F'-' '{print $2}')" 16 | 17 | bin/kafka-server-start.sh /opt/mounted/config/server.properties ${KAFKA_PROPERTIES_OVERRIDES} 18 | 19 | -------------------------------------------------------------------------------- /docker-zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM fabric8/s2i-java 2 | 3 | USER root 4 | # Use prometheus 0.1.0 5 | # Install jmx_prometheus_javaagent 6 | ENV PROMETHEUS_EXPORTER_VERSION=0.1.0 7 | 8 | RUN mkdir -p /opt/prometheus/conf \ 9 | && curl -SLs "https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/${PROMETHEUS_EXPORTER_VERSION}/jmx_prometheus_javaagent-${PROMETHEUS_EXPORTER_VERSION}.jar" \ 10 | -o /opt/prometheus/jmx_prometheus_javaagent.jar \ 11 | && chmod 444 /opt/prometheus/jmx_prometheus_javaagent.jar \ 12 | && chmod 444 /opt/prometheus/conf 13 | 14 | ENV PROMETHEUS_EXPORTER_CONF="-javaagent:/opt/prometheus/jmx_prometheus_javaagent.jar=5555:/opt/prometheus/conf/prometheus-config.yml" 15 | ADD prometheus-config.yml /opt/prometheus/conf 16 | 17 | ENV ZOO_USER=jboss \ 18 | ZOO_CONF_DIR=/conf \ 19 | ZOO_DATA_DIR=/data \ 20 | ZOO_DATA_LOG_DIR=/datalog \ 21 | ZOO_PORT=2181 \ 22 | ZOO_TICK_TIME=2000 \ 23 | ZOO_INIT_LIMIT=5 \ 24 | ZOO_SYNC_LIMIT=2 25 | 26 | # Make directories and assign permissions 27 | RUN mkdir -p "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR" \ 28 | && chown "$ZOO_USER:root" "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR" \ 29 | && chmod -R "g+rwX" "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR" 30 | 31 | ARG DISTRO_NAME=zookeeper-3.4.11 32 | 33 | # Download Apache Zookeeper, untar and clean up 34 | RUN curl -SLsO "http://www.apache.org/dist/zookeeper/$DISTRO_NAME/$DISTRO_NAME.tar.gz" \ 35 | && tar -xzf "$DISTRO_NAME.tar.gz" -C /opt \ 36 | && mv "/opt/$DISTRO_NAME/conf/"* "$ZOO_CONF_DIR" \ 37 | && rm -r "$DISTRO_NAME.tar.gz" \ 38 | && mkdir -p /deployments/bin 39 | 40 | WORKDIR /opt/$DISTRO_NAME 41 | VOLUME ["$ZOO_DATA_DIR", "$ZOO_DATA_LOG_DIR", "$ZOO_CONF_DIR"] 42 | 43 | EXPOSE 2181 2888 3888 8778 5555 44 | 45 | ENV PATH=$PATH:/opt/$DISTRO_NAME/bin \ 46 | ZOOCFGDIR=$ZOO_CONF_DIR 47 | 48 | COPY run.sh /deployments/bin 49 | 50 | # Forcing timezone to UTC 51 | RUN unlink /etc/localtime && ln -s /usr/share/zoneinfo/UTC /etc/localtime 52 | 53 | RUN chown -R "$ZOO_USER:root" "/deployments" "/opt/$DISTRO_NAME" \ 54 | && chmod -R "g+rwX" "/deployments" "/opt/$DISTRO_NAME" \ 55 | && chmod +x /deployments/bin/run.sh \ 56 | && chown "$ZOO_USER:root" "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR" \ 57 | && chmod -R "g+rwX" "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR" 58 | 59 | USER $ZOO_USER 60 | 61 | CMD ["/usr/local/s2i/run", "zkServer.sh", "start-foreground"] 62 | -------------------------------------------------------------------------------- /docker-zookeeper/prometheus-config.yml: -------------------------------------------------------------------------------- 1 | rules: 2 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 3 | name: "zookeeper_$2" 4 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 5 | name: "zookeeper_$3" 6 | labels: 7 | replicaId: "$2" 8 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 9 | name: "zookeeper_$4" 10 | labels: 11 | replicaId: "$2" 12 | memberType: "$3" 13 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 14 | name: "zookeeper_$4_$5" 15 | labels: 16 | replicaId: "$2" 17 | memberType: "$3" 18 | -------------------------------------------------------------------------------- /docker-zookeeper/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | export SERVER_JVMFLAGS="${SERVER_JVMFLAGS:+${SERVER_JVMFLAGS} }" 6 | 7 | # Add jolokia or agent bond 8 | if [ -f /opt/prometheus/prometheus-opts ]; then 9 | export SERVER_JVMFLAGS="${SERVER_JVMFLAGS:+${SERVER_JVMFLAGS} }$(/opt/prometheus/prometheus-opts)" 10 | fi 11 | 12 | if [ -f /opt/jolokia/jolokia-opts ]; then 13 | export SERVER_JVMFLAGS="${SERVER_JVMFLAGS:+${SERVER_JVMFLAGS} }$(/opt/jolokia/jolokia-opts)" 14 | fi 15 | 16 | 17 | # Allow the container to be started with `--user` 18 | if [ "$1" = 'zkServer.sh' -a "$(id -u)" = '0' ]; then 19 | chown -R "$ZOO_USER" "$ZOO_DATA_DIR" "$ZOO_DATA_LOG_DIR" 20 | exec su-exec "$ZOO_USER" "$0" "$@" 21 | fi 22 | 23 | # Generate the config only if it doesn't exist 24 | if [ ! -f "$ZOO_CONF_DIR/zoo.cfg" ]; then 25 | CONFIG="$ZOO_CONF_DIR/zoo.cfg" 26 | 27 | ZOO_PORT="${ZOO_PORT:-2181}" 28 | echo "clientPort=$ZOO_PORT" >> "$CONFIG" 29 | echo "dataDir=$ZOO_DATA_DIR" >> "$CONFIG" 30 | echo "dataLogDir=$ZOO_DATA_LOG_DIR" >> "$CONFIG" 31 | 32 | echo "tickTime=$ZOO_TICK_TIME" >> "$CONFIG" 33 | echo "initLimit=$ZOO_INIT_LIMIT" >> "$CONFIG" 34 | echo "syncLimit=$ZOO_SYNC_LIMIT" >> "$CONFIG" 35 | 36 | ZOO_SERVER_COUNT="${ZOO_SERVER_COUNT:-3}" 37 | # If ZOO_SERVER_COUNT is not defined, assume 3 nodes 38 | for ((i=1;i<=ZOO_SERVER_COUNT;i++)); 39 | do 40 | echo "server.$i=zoo-$((i-1)).zoo:2888:3888:participant " >> "$CONFIG" 41 | done 42 | # If ZOO_SERVERS is not defined, allocate ZOO_SERVER_COUNT zoo nodes 43 | #if [ -z $ZOO_SERVERS ]; then 44 | # for server in $ZOO_SERVERS; do 45 | # echo "$server" >> "$CONFIG" 46 | # done 47 | #fi 48 | cat $CONFIG 49 | 50 | fi 51 | 52 | # Write myid only if it doesn't exist 53 | if [ ! -f "$ZOO_DATA_DIR/myid" ]; then 54 | if [ -z "$ZOO_MY_ID" ]; then 55 | ZOO_MY_ID=$(($(hostname | sed s/.*-//) + 1)) 56 | echo "Guessed server id: $ZOO_MY_ID" 57 | # Tries to bind to it's own server entry, which won't work with names ("Exception while listening java.net.SocketException: Unresolved address") 58 | sed -i s/server\.$ZOO_MY_ID\=[a-z0-9.-]*/server.$ZOO_MY_ID=0.0.0.0/ "$ZOO_CONF_DIR/zoo.cfg" 59 | fi 60 | echo "${ZOO_MY_ID:-1}" > "$ZOO_DATA_DIR/myid" 61 | fi 62 | 63 | 64 | exec "$@" -------------------------------------------------------------------------------- /kafka-cluster/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /kafka-cluster/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: kafka-cluster 2 | version: 0.2.0 3 | description: Kafka cluster 4 | keywords: 5 | - kafka 6 | - zookeeper 7 | home: https://github.com/nbogojevic/kubernetes-kafka 8 | sources: 9 | - https://github.com/nbogojevic/kubernetes-kafka 10 | maintainers: 11 | - name: Nenad Bogojević 12 | engine: gotpl # The name of the template engine (optional, defaults to gotpl) 13 | appVersion: 1.0.0-1 14 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka-operator/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Kafka operator deployment 3 | name: kafka-operator 4 | version: 0.1.0 5 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka-operator/templates/kafka-operator-deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.enabled }} 2 | {{- if .Capabilities.APIVersions.Has "apps/v1beta2" }} 3 | apiVersion: "apps/v1beta2" 4 | {{- else if .Capabilities.APIVersions.Has "apps/v1beta1" }} 5 | apiVersion: "apps/v1beta1" 6 | {{- end }} 7 | kind: Deployment 8 | metadata: 9 | annotations: 10 | labels: 11 | component: kafka-operator 12 | {{ include "standardLabels" . | indent 4 }} 13 | name: {{ .Values.global.applicationName }}-operator 14 | spec: 15 | replicas: 1 16 | revisionHistoryLimit: 2 17 | selector: 18 | matchLabels: 19 | component: kafka-operator 20 | {{ include "standardLabels" . | indent 6 }} 21 | template: 22 | metadata: 23 | annotations: 24 | {{- if .Values.global.prometheus }} 25 | prometheus.io/scrape: "true" 26 | prometheus.io/path: "/metrics" 27 | prometheus.io/port: "9779" 28 | {{- end }} 29 | labels: 30 | component: kafka-operator 31 | {{ include "standardLabels" . | indent 8 }} 32 | spec: 33 | containers: 34 | - env: 35 | - name: IMPORT_TOPICS 36 | value: "{{ .Values.importTopics }}" 37 | - name: LOG_LEVEL 38 | value: "{{ .Values.logLevel }}" 39 | - name: ENABLE_TOPIC_DELETE 40 | value: "{{ .Values.deleteTopics }}" 41 | - name: BOOTSTRAP_SERVER 42 | value: "{{ .Values.bootstrapServer }}" 43 | - name: ZOOKEEPER_CONNECT 44 | value: "{{ .Values.zookeeperConnect }}" 45 | - name: ENABLE_ACL 46 | value: "{{ .Values.manageAcl }}" 47 | {{- if .Values.global.jolokia }} 48 | - name: AB_JOLOKIA_PASSWORD_RANDOM 49 | value: "false" 50 | - name: AB_JOLOKIA_USER 51 | valueFrom: 52 | secretKeyRef: 53 | name: {{ .Values.global.applicationName }}-jolokia-secret 54 | key: username 55 | - name: AB_JOLOKIA_PASSWORD 56 | valueFrom: 57 | secretKeyRef: 58 | name: {{ .Values.global.applicationName }}-jolokia-secret 59 | key: password 60 | {{- end }} 61 | - name: JAVA_OPTIONS 62 | value: "{{ .Values.jvmFlags }}{{ if .Values.secured }} -Djava.security.auth.login.config=/opt/kafka/auth/kafka-admin-client-jaas.conf{{ end }}" 63 | name: {{ .Values.global.applicationName }}-operator 64 | ports: 65 | {{- if .Values.global.prometheus }} 66 | - containerPort: 9779 67 | name: prometheus 68 | {{- end }} 69 | {{- if .Values.global.jolokia }} 70 | - containerPort: 8778 71 | name: jolokia 72 | {{- end }} 73 | image: {{ if .Values.imageRegistry }}{{ .Values.imageRegistry }}/{{ end }}{{ .Values.image }}:{{ .Values.tag }} 74 | imagePullPolicy: {{ .Values.pullPolicy }} 75 | volumeMounts: 76 | {{- if .Values.secured }} 77 | - name: kafka-operator-auth 78 | mountPath: /opt/kafka/auth 79 | {{- end }} 80 | serviceAccountName: kafka-operator 81 | volumes: 82 | {{- if .Values.secured }} 83 | - name: kafka-operator-auth 84 | secret: 85 | secretName: {{ .Values.global.applicationName }}-kafka-admin-client-auth 86 | {{- end }} 87 | {{- end }} 88 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka-operator/values.yaml: -------------------------------------------------------------------------------- 1 | ## enabled is set to true if kafka-operator should be deployed 2 | enabled: true 3 | ## image is the name of kafka-operator container image to use 4 | image: nbogojevic/kafka-operator 5 | ## tag is the tag of kafka container image to use 6 | tag: latest 7 | ## bootstrap is the service name used by discover zookeeper cluster 8 | bootstrap: kafka:9092 9 | ## logLevel is the log level to set for kafka-operator 10 | logLevel: INFO 11 | ## deleteTopics is set to true if operator can delete kafka topics 12 | deleteTopics: true 13 | ## importTopics is set to true if operator should take ownership of all existing kafka topics 14 | importTopics: true 15 | ## jvmFlags are default java options to use with operator 16 | jvmFlags: -Xmx128M 17 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Kafka deplyoment as stateful set 3 | name: kafka 4 | version: 0.1.0 5 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka/templates/kafka-hawkular.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.hawkular }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ .Values.global.applicationName }}-kafka-hawkular-config 6 | labels: 7 | component: kafka-broker 8 | role: metrics 9 | {{- include "standardLabels" . | indent 4 }} 10 | data: 11 | hawkular-openshift-agent: | 12 | endpoints: 13 | - type: jolokia 14 | protocol: https 15 | port: 8778 16 | path: /jolokia/ 17 | collection_interval: 30s 18 | tls: 19 | skip_certificate_validation: true 20 | credentials: 21 | username: secret:{{ .Values.global.applicationName }}-jolokia-sercret/username 22 | password: secret:{{ .Values.global.applicationName }}-jolokia-sercret/password 23 | metrics: 24 | - name: kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec,topic=*#OneMinuteRate 25 | type: gauge 26 | id: kafka.${topic}.BytesInPerSec.rate 27 | description: ${topic} BytesInPerSec rate (1min) 28 | tags: 29 | topic_metrics: true 30 | byte_rate: true 31 | input: true 32 | - name: kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic=*#OneMinuteRate 33 | type: gauge 34 | id: kafka.${topic}.BytesOutPerSec.rate 35 | description: ${topic} BytesOutPerSec rate (1min) 36 | tags: 37 | topic_metrics: true 38 | byte_rate: true 39 | output: true 40 | - name: kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=*#OneMinuteRate 41 | type: gauge 42 | id: kafka.${topic}.MessageInPerSec.rate 43 | description: ${topic} ${name} (1min) 44 | tags: 45 | topic_metrics: true 46 | message_rate: true 47 | - name: java.lang:type=Memory#HeapMemoryUsage#used 48 | type: gauge 49 | id: jvm.heap.used 50 | units: B 51 | tags: 52 | jvm_metrics: true 53 | jvm_heap: true 54 | {{- end }} -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka/templates/kafka-headless-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Values.headlessService }} 5 | labels: 6 | component: kafka-broker 7 | {{ include "standardLabels" . | indent 4 }} 8 | spec: 9 | ports: 10 | - port: 9092 11 | clusterIP: None 12 | selector: 13 | app: {{ .Values.global.applicationName }} 14 | component: kafka-broker 15 | release: {{ .Release.Name | quote }} 16 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka/templates/kafka-server-properties.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Values.global.applicationName }}-kafka-server-properties 5 | labels: 6 | component: kafka-broker 7 | role: server.properties 8 | {{- include "standardLabels" . | indent 4 }} 9 | data: 10 | server.properties: | 11 | ############################# Server Basics ############################# 12 | 13 | # The id of the broker. This must be set to a unique integer for each broker. 14 | broker.id=0 15 | 16 | # Use https://github.com/nbogojevic/kafka-operator instead 17 | auto.create.topics.enable=false 18 | 19 | # Switch to enable topic deletion or not, default value is flase 20 | delete.topic.enable={{ default "false" .Values.deleteTopicEnable}} 21 | 22 | # Rebalance leaders if needed 23 | auto.leader.rebalance.enable=true 24 | 25 | ############################# Log Basics ############################# 26 | 27 | # A comma seperated list of directories under which to store log files 28 | log.dirs=/opt/kafka/data/topics 29 | 30 | ############################# Zookeeper ############################# 31 | 32 | # Zookeeper connection string (see zookeeper docs for details). 33 | # This is a comma separated host:port pairs, each corresponding to a zk 34 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 35 | # You can also append an optional chroot string to the urls to specify the 36 | # root directory for all kafka znodes. 37 | zookeeper.connect={{ .Values.zookeeperService }}:2181 38 | 39 | # Timeout in ms for connecting to zookeeper 40 | zookeeper.connection.timeout.ms={{ default 6000 .Values.zookeperTimeout}} 41 | 42 | {{ if .Values.secured }} 43 | ############################# Security ############################# 44 | listeners=SASL_{{ if .Values.tls }}SSL{{ else }}PLAINTEXT{{ end }}://:9092 45 | # Security protocol to use between brokers 46 | security.inter.broker.protocol=SASL_{{ if .Values.tls }}SSL{{ else }}PLAINTEXT{{ end }} 47 | # SASL mechanisms to use between brokers (PLAIN) 48 | sasl.mechanism.inter.broker.protocol=PLAIN 49 | # What SASL mechanisms are enabled (only PLAIN) 50 | sasl.enabled.mechanisms=PLAIN 51 | {{ end }} 52 | 53 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka/templates/kafka-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Values.serviceName }} 5 | labels: 6 | component: kafka-broker 7 | {{ include "standardLabels" . | indent 4 }} 8 | # Check if we can enable openshift dependencies 9 | # annotations: 10 | # service.alpha.openshift.io/dependencies: '[{"name":"{#{ .Values.headlessService }}","namespace":"","kind":"Service"}, {"name":"{#{ .Values.zookeeper.headlessService }}","namespace":"","kind":"Service"}, {"name":"{#{ .Values.zookeeper.serviceName }}","namespace":"","kind":"Service"}]' 11 | spec: 12 | ports: 13 | - port: 9092 14 | selector: 15 | app: {{ .Values.global.applicationName }} 16 | component: kafka-broker 17 | release: {{ .Release.Name | quote }} 18 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka/templates/kafka-stateful-set.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Capabilities.APIVersions.Has "apps/v1beta2" }} 2 | apiVersion: "apps/v1beta2" 3 | kind: StatefulSet 4 | {{- else if .Capabilities.APIVersions.Has "apps/v1beta1" }} 5 | apiVersion: "apps/v1beta1" 6 | kind: StatefulSet 7 | {{- else if .Capabilities.APIVersions.Has "apps/v1alpha1" }} 8 | apiVersion: "apps/v1alpha1" 9 | kind: PetSet 10 | {{- end }} 11 | metadata: 12 | name: {{ .Values.serverName | quote }} 13 | labels: 14 | component: kafka-broker 15 | {{ include "standardLabels" . | indent 4 }} 16 | spec: 17 | selector: 18 | matchLabels: 19 | component: kafka-broker 20 | {{ include "standardLabels" . | indent 6 }} 21 | serviceName: {{ .Values.headlessService | quote }} 22 | replicas: {{ .Values.nodes }} 23 | template: 24 | metadata: 25 | labels: 26 | component: kafka-broker 27 | {{ include "standardLabels" . | indent 8 }} 28 | annotations: 29 | {{- if .Values.global.prometheus }} 30 | prometheus.io/scrape: "true" 31 | prometheus.io/path: "/metrics" 32 | prometheus.io/port: "9779" 33 | {{- end }} 34 | spec: 35 | affinity: 36 | podAntiAffinity: 37 | preferredDuringSchedulingIgnoredDuringExecution: 38 | - weight: 1 39 | podAffinityTerm: 40 | labelSelector: 41 | matchExpressions: 42 | - key: "app" 43 | operator: In 44 | values: 45 | - {{ .Values.global.applicationName }} 46 | topologyKey: "kubernetes.io/hostname" 47 | {{- if .Values.nodeSelector }} 48 | nodeSelector: 49 | {{- range $key, $val := .Values.nodeSelector }} 50 | {{ $key }}: {{ $val | quote }} 51 | {{- end}} 52 | {{- end }} 53 | terminationGracePeriodSeconds: 1 54 | containers: 55 | - name: broker 56 | image: {{ if .Values.imageRegistry }}{{ .Values.imageRegistry }}/{{ end }}{{ .Values.image }}:{{ .Values.tag }} 57 | imagePullPolicy: {{ .Values.pullPolicy }} 58 | env: 59 | {{- if .Values.global.jolokia }} 60 | - name: AB_JOLOKIA_PASSWORD_RANDOM 61 | value: "false" 62 | - name: AB_JOLOKIA_USER 63 | valueFrom: 64 | secretKeyRef: 65 | name: {{ .Values.global.applicationName }}-jolokia-secret 66 | key: username 67 | - name: AB_JOLOKIA_PASSWORD 68 | valueFrom: 69 | secretKeyRef: 70 | name: {{ .Values.global.applicationName }}-jolokia-secret 71 | key: password 72 | {{- end }} 73 | - name: KAFKA_OPTS 74 | value: "{{ .Values.jvmFlags }}{{ if .Values.secured }} -Djava.security.auth.login.config=/opt/kafka/auth/kafka-server-jaas.conf{{ end }}" 75 | ports: 76 | - containerPort: 9092 77 | name: kafka 78 | {{- if .Values.global.prometheus }} 79 | - containerPort: 9779 80 | name: prometheus 81 | {{- end }} 82 | {{- if .Values.global.jolokia }} 83 | - containerPort: 8778 84 | name: jolokia 85 | {{- end }} 86 | livenessProbe: 87 | tcpSocket: 88 | port: 9092 89 | initialDelaySeconds: 20 90 | readinessProbe: 91 | tcpSocket: 92 | port: 9092 93 | initialDelaySeconds: 10 94 | volumeMounts: 95 | - name: datadir 96 | mountPath: /opt/kafka/data 97 | - name: kafka-service-properties 98 | mountPath: /opt/mounted/config 99 | {{- if .Values.secured }} 100 | - name: kafka-auth 101 | mountPath: /opt/kafka/auth 102 | {{- end }} 103 | volumes: 104 | - name: datadir 105 | {{- if .Values.persistent }} 106 | # TODO Volume claim reference here 107 | {{- else }} 108 | emptyDir: {} 109 | {{- end }} 110 | - name: kafka-service-properties 111 | configMap: 112 | name: {{ .Values.global.applicationName }}-kafka-server-properties 113 | {{- if .Values.secured }} 114 | - name: kafka-auth 115 | secret: 116 | secretName: {{ .Values.global.applicationName }}-kafka-broker-auth 117 | {{- end }} 118 | {{- if .Values.global.hawkular }} 119 | - name: hawkular-openshift-agent 120 | configMap: 121 | name: {{ .Values.global.applicationName }}-kafka-hawkular-config 122 | {{- end }} 123 | {{- if .Values.persistent }} 124 | volumeClaimTemplates: 125 | - metadata: 126 | name: {{ .Values.global.applicationName }}-kafka-datadir 127 | labels: 128 | component: kafka-broker 129 | {{ include "standardLabels" . | indent 8 }} 130 | annotations: 131 | volume.alpha.kubernetes.io/storage-class: anything 132 | spec: 133 | accessModes: 134 | - ReadWriteOnce 135 | resources: 136 | requests: 137 | storage: {{ .Values.storageSize | quote }} 138 | {{- end }} 139 | -------------------------------------------------------------------------------- /kafka-cluster/charts/kafka/values.yaml: -------------------------------------------------------------------------------- 1 | ## kafka.image is the name of kafka container image to use 2 | image: nbogojevic/kafka 3 | ## kafka.tag is the tag of kafka container image to use 4 | tag: latest 5 | ## serviceName is the service name used by clients to find the cluster 6 | serviceName: kafka 7 | ## headlessService is the name used by StatefulSet to identify instances 8 | headlessService: broker 9 | ## serverName is the name used by StatefulSet to identify instances 10 | serverName: kafka 11 | ## kafka.nodes is set to number of nodes in kafka cluster 12 | nodes: 3 13 | ## kafka.jvmFlags are default java options to use with kafka 14 | jvmFlags: -Xmx1G 15 | ## kafka.storageSize is the default size of storage when using persistent volumes 16 | storageSize: 2GiB 17 | ## kafka.persistent is set to true to use persitent volumes 18 | persistent: false 19 | ## set to true if clients must authenticate. Only supports plaintext SASL mechanism. 20 | secured: true 21 | ## set to true if communication with kafka should be encrypted. TODO certificate management 22 | # tls: false 23 | # Set to true if topics can be deleted. 24 | deleteTopicEnable: false 25 | # Then name of the admin user. If not specified a random name is generated. 26 | #adminUser: admin 27 | # Then name of the admin password. If not specified a random password is generated. 28 | #adminPassword: secret-password 29 | # Then name of the client user. If not specified a random name is generated. 30 | #clientUser: client 31 | # Then name of the client password. If not specified a random password is generated. 32 | #clientPassword: secret-password 33 | # Default timeout in ms when connecting to zookeeper 34 | zookeperTimeout: 6000 35 | ## kafka.nodeSelector can be set with node labels to use when choosing nodes to deploy 36 | nodeSelector: 37 | -------------------------------------------------------------------------------- /kafka-cluster/charts/zookeeper/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /kafka-cluster/charts/zookeeper/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: Zookeeper deplyoment as stateful set 3 | name: zookeeper 4 | version: 0.1.0 5 | -------------------------------------------------------------------------------- /kafka-cluster/charts/zookeeper/templates/zookeeper-hawkular.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.hawkular }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ .Values.global.applicationName }}-zookeeper-hawkular-config 6 | labels: 7 | component: zookeeper 8 | role: metrics 9 | {{- include "standardLabels" . | indent 4 }} 10 | data: 11 | hawkular-openshift-agent: | 12 | endpoints: 13 | - type: jolokia 14 | protocol: https 15 | port: 8778 16 | path: /jolokia/ 17 | collection_interval: 30s 18 | tls: 19 | skip_certificate_validation: true 20 | credentials: 21 | username: secret:{{ .Values.global.applicationName }}-jolokia-sercret/username 22 | password: secret:{{ .Values.global.applicationName }}-jolokia-sercret/password 23 | metrics: 24 | - name: java.lang:type=Memory#HeapMemoryUsage#used 25 | type: gauge 26 | id: jvm.heap.used 27 | units: B 28 | tags: 29 | jvm_metrics: true 30 | jvm_heap: true 31 | {{- end }} -------------------------------------------------------------------------------- /kafka-cluster/charts/zookeeper/templates/zookeeper-headless-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Values.headlessService }} 5 | labels: 6 | component: zookeeper 7 | {{- include "standardLabels" . | indent 4 }} 8 | spec: 9 | ports: 10 | - port: 2888 11 | name: peer 12 | - port: 3888 13 | name: leader-election 14 | clusterIP: None 15 | selector: 16 | app: {{ .Values.global.applicationName }} 17 | component: zookeeper 18 | release: {{ .Release.Name }} 19 | 20 | -------------------------------------------------------------------------------- /kafka-cluster/charts/zookeeper/templates/zookeeper-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Values.serviceName }} 5 | labels: 6 | component: zookeeper 7 | {{- include "standardLabels" . | indent 4 }} 8 | spec: 9 | ports: 10 | - port: 2181 11 | name: client 12 | selector: 13 | component: zookeeper 14 | {{- include "standardLabels" . | indent 4 }} 15 | -------------------------------------------------------------------------------- /kafka-cluster/charts/zookeeper/templates/zookeeper-stateful-set.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Capabilities.APIVersions.Has "apps/v1beta2" }} 2 | apiVersion: "apps/v1beta2" 3 | kind: StatefulSet 4 | {{- else if .Capabilities.APIVersions.Has "apps/v1beta1" }} 5 | apiVersion: "apps/v1beta1" 6 | kind: StatefulSet 7 | {{- else if .Capabilities.APIVersions.Has "apps/v1alpha1" }} 8 | apiVersion: "apps/v1alpha1" 9 | kind: PetSet 10 | {{- end }} 11 | metadata: 12 | name: {{ .Values.serverName | quote }} 13 | labels: 14 | component: zookeeper 15 | {{- include "standardLabels" . | indent 4 }} 16 | spec: 17 | selector: 18 | matchLabels: 19 | component: zookeeper 20 | {{ include "standardLabels" . | indent 6 }} 21 | serviceName: {{ .Values.headlessService | quote }} 22 | replicas: {{ .Values.nodes }} 23 | template: 24 | metadata: 25 | labels: 26 | component: zookeeper 27 | {{- include "standardLabels" . | indent 8 }} 28 | annotations: 29 | {{- if .Values.global.prometheus }} 30 | prometheus.io/scrape: "true" 31 | prometheus.io/path: "/metrics" 32 | prometheus.io/port: "9779" 33 | {{- end }} 34 | spec: 35 | {{- if .Values.nodeSelector }} 36 | nodeSelector: 37 | {{- range $key, $val := .Values.nodeSelector }} 38 | {{ $key }}: {{ $val | quote }} 39 | {{- end}} 40 | {{- end }} 41 | terminationGracePeriodSeconds: 1 42 | containers: 43 | - name: zookeeper 44 | image: {{ if .Values.imageRegistry }}{{ .Values.imageRegistry }}/{{ end }}{{ .Values.image }}:{{ .Values.tag }} 45 | imagePullPolicy: {{ .Values.pullPolicy }} 46 | env: 47 | {{- if .Values.global.jolokia }} 48 | - name: AB_JOLOKIA_PASSWORD_RANDOM 49 | value: "false" 50 | - name: AB_JOLOKIA_USER 51 | valueFrom: 52 | secretKeyRef: 53 | name: {{ .Values.global.applicationName }}-jolokia-secret 54 | key: username 55 | - name: AB_JOLOKIA_PASSWORD 56 | valueFrom: 57 | secretKeyRef: 58 | name: {{ .Values.global.applicationName }}-jolokia-secret 59 | key: password 60 | {{- end }} 61 | - name: SERVER_JVMFLAGS 62 | value: "{{ .Values.jvmFlags }}" 63 | - name: ZOO_SERVER_COUNT 64 | value: "{{ .Values.nodes }}" 65 | ports: 66 | - containerPort: 2181 67 | name: client 68 | - containerPort: 2888 69 | name: peer 70 | - containerPort: 3888 71 | name: leader-election 72 | {{- if .Values.global.prometheus }} 73 | - containerPort: 9779 74 | name: prometheus 75 | {{- end }} 76 | {{- if .Values.global.jolokia }} 77 | - containerPort: 8778 78 | name: jolokia 79 | {{- end }} 80 | volumeMounts: 81 | - name: datadir 82 | mountPath: /data 83 | # There are defaults in this folder, such as logging config 84 | #- name: conf 85 | # mountPath: /conf 86 | volumes: 87 | #- name: conf 88 | # emptyDir: {} 89 | - name: datadir 90 | emptyDir: {} 91 | {{- if .Values.global.hawkular }} 92 | - name: hawkular-openshift-agent 93 | configMap: 94 | name: {{ .Values.global.applicationName }}-zookeeper-hawkular-config 95 | {{- end }} 96 | -------------------------------------------------------------------------------- /kafka-cluster/charts/zookeeper/values.yaml: -------------------------------------------------------------------------------- 1 | ## zookeeper.image is the name of zookeeper container image to use 2 | image: nbogojevic/zookeeper 3 | ## zookeeper.tag is the tag of zookeeper container image to use 4 | tag: latest 5 | ## zookeeper.serviceName is the service name used by clients to find the cluster 6 | serviceName: zookeeper 7 | ## zookeeper.headlessService is the name used by StatefulSet to identify instances 8 | headlessService: zoo 9 | ## zookeeper.nodes is set to number of nodes in zookeeper cluster 10 | nodes: 3 11 | ## zookeeper.jvmFlags are default java options to use with kafka 12 | jvmFlags: -Xmx512m 13 | ## zookeeper.nodeSelector can be set with node labels to use when choosing nodes to deploy 14 | nodeSelector: 15 | -------------------------------------------------------------------------------- /kafka-cluster/templates/_helpers.yaml: -------------------------------------------------------------------------------- 1 | {{- define "standardLabels" }} 2 | app: {{ .Values.global.applicationName }} 3 | release: {{ .Release.Name | quote }} 4 | heritage: {{ .Release.Service | quote }} 5 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 6 | {{- end }} 7 | 8 | {{- $adminUser := default (randAlphaNum 16) .Values.kafkaAdminUser -}} 9 | {{- $adminPassword := default (randAlphaNum 16) .Values.kafkaAdminPassword -}} 10 | {{- $clientUser := default (randAlphaNum 16) .Values.kafkaClientUser -}} 11 | {{- $clientPassword := default (randAlphaNum 16) .Values.kafkaClientPassword -}} 12 | -------------------------------------------------------------------------------- /kafka-cluster/templates/jolokia-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ .Values.global.applicationName }}-jolokia-secret 5 | labels: 6 | {{- include "standardLabels" . | indent 4 }} 7 | type: Opaque 8 | data: 9 | username: {{ default "jolokia" .Values.global.jolokiaUsername | b64enc }} 10 | password: {{ default (randAlphaNum 16) .Values.global.jolokiaPassword | b64enc }} 11 | -------------------------------------------------------------------------------- /kafka-cluster/values.yaml: -------------------------------------------------------------------------------- 1 | 2 | 3 | kafka: 4 | ## kafka.imageRegistry is the address of the container image registry to use 5 | # imageRegistry: docker.io 6 | ## kafka.pullPolicy is the kubernetes policy to use for pulling images: Always, IfNotPresent, Never 7 | pullPolicy: IfNotPresent 8 | ## kafka.image is the name of kafka container image to use 9 | image: nbogojevic/kafka 10 | ## kafka.tag is the tag of kafka container image to use 11 | tag: latest 12 | ## kafka.serviceName is the service name used by clients to find the cluster 13 | serviceName: kafka 14 | ## kafka.headlessService is the name of headless service used by StatefulSet 15 | headlessService: broker 16 | ## serverName is the name used by StatefulSet to identify instances 17 | serverName: kafka 18 | ## kafka.nodes is set to number of nodes in kafka cluster 19 | nodes: 3 20 | ## kafka.jvmFlags are default java options to use with kafka 21 | jvmFlags: -Xmx384G 22 | ## kafka.storageSize is the default size of storage when using persistent volumes 23 | storageSize: 2GiB 24 | ## kafka.persistent is set to true to use persitent volumes 25 | persistent: false 26 | ## secured is set to true if clients must authenticate. Only supports plaintext SASL mechanism. 27 | secured: true 28 | ## set to true if communication with kafka should be encrypted. TODO certificate management 29 | # tls: false 30 | # Set to true if topics can be deleted. 31 | deleteTopicEnable: true 32 | # Then name of the admin user. If not specified a random name is generated. 33 | #adminUser: admin 34 | # Then name of the admin password. If not specified a random password is generated. 35 | #adminPassword: secret-password 36 | ## zookeeperServic the name of zookeper service to use 37 | zookeeperService: zookeeper 38 | ## zookeperTimeout Default timeout in ms when connecting to zookeeper 39 | zookeperTimeout: 6000 40 | ## usernamePoolSize number of predefined users for topic access 41 | usernamePoolSize: 50 42 | ## kafka.nodeSelector can be set with node labels to use when choosing nodes to deploy 43 | nodeSelector: 44 | 45 | zookeeper: 46 | ## zookeeper.imageRegistry is the address of the container image registry to use 47 | # imageRegistry: docker.io 48 | ## kafka.pullPolicy is the kubernetes policy to use for pulling images: Always, IfNotPresent, Never 49 | pullPolicy: IfNotPresent 50 | ## zookeeper.image is the name of zookeeper container image to use 51 | image: nbogojevic/zookeeper 52 | ## zookeeper.tag is the tag of zookeeper container image to use 53 | tag: latest 54 | ## zookeeper.serviceName is the service name used by clients to find the cluster 55 | serviceName: zookeeper 56 | ## zookeeper.headlessService is the name used by StatefulSet to identify instances 57 | headlessService: zoo 58 | ## serverName is the name used by StatefulSet to identify instances 59 | serverName: zoo 60 | ## zookeeper.nodes is set to number of nodes in zookeeper cluster 61 | nodes: 3 62 | ## zookeeper.jvmFlags are default java options to use with kafka 63 | jvmFlags: -Xmx256m 64 | ## zookeeper.nodeSelector can be set with node labels to use when choosing nodes to deploy 65 | nodeSelector: 66 | 67 | kafka-operator: 68 | ## enabled is set to true if kafka-operator should be deployed 69 | enabled: true 70 | ## image is the name of kafka-operator container image to use 71 | image: nbogojevic/kafka-operator 72 | ## pullPolicy is the kubernetes policy to use for pulling images: Always, IfNotPresent, Never 73 | pullPolicy: IfNotPresent 74 | ## tag is the tag of kafka container image to use 75 | tag: latest 76 | ## bootstrap is the service name used by discover kafka cluster 77 | bootstrapServer: kafka:9092 78 | ## logLevel is the log level to set for kafka-operator 79 | logLevel: INFO 80 | ## deleteTopics is set to true if operator can delete kafka topics 81 | deleteTopics: true 82 | ## importTopics is set to true if operator should take ownership of all existing kafka topics 83 | importTopics: true 84 | ## manageAcl is set to true if operator should manage topic ACL 85 | manageAcl: true 86 | ## secured is set to true if operator must authenticate. Only supports plaintext SASL mechanism. 87 | secured: true 88 | ## jvmFlags are default java options to use with operator 89 | jvmFlags: -Xmx128M 90 | 91 | global: 92 | ## applicationName is the default name of the application 93 | applicationName: kafka-cluster 94 | 95 | ## hawkular is set to true to enable hawkular metrics 96 | hawkular: false 97 | 98 | ## prometheus is set to true to enable prometheus metrics 99 | prometheus: true 100 | 101 | ## jolokia is set to true if jolokia JMX endpoint should be exposed 102 | jolokia: true 103 | 104 | ## jolokiaUsername is set to the name of jolokia user for basic authentication 105 | jolokiaUsername: jolokia 106 | 107 | ## jolokiaPassword can be set to the password to use. If not specified, a random password is generated 108 | jolokiaPassword: jolokiapwd 109 | -------------------------------------------------------------------------------- /kafka-secrets/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /kafka-secrets/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: kafka-secrets 2 | version: 0.2.0 3 | description: Kafka secrets 4 | keywords: 5 | - kafka 6 | home: https://github.com/nbogojevic/kubernetes-kafka 7 | sources: 8 | - https://github.com/nbogojevic/kubernetes-kafka 9 | maintainers: 10 | - name: Nenad Bogojević 11 | engine: gotpl # The name of the template engine (optional, defaults to gotpl) 12 | appVersion: 1.0.0-1 13 | -------------------------------------------------------------------------------- /kafka-secrets/templates/kafka-jaas.yaml: -------------------------------------------------------------------------------- 1 | {{- define "standardLabels" }} 2 | app: {{ .Values.applicationName }} 3 | release: {{ .Release.Name | quote }} 4 | heritage: {{ .Release.Service | quote }} 5 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 6 | {{- end }} 7 | 8 | {{- $adminUser := default (randAlphaNum 16) .Values.kafkaAdminUser -}} 9 | {{- $adminPassword := default (randAlphaNum 16) .Values.kafkaAdminPassword -}} 10 | {{- $clientUser := default (randAlphaNum 16) .Values.kafkaClientUser -}} 11 | {{- $clientPassword := default (randAlphaNum 16) .Values.kafkaClientPassword -}} 12 | {{ $adminUser := default (randAlphaNum 16) .Values.adminUser }} 13 | {{ $adminPassword := default (randAlphaNum 16) .Values.adminPassword }} 14 | {{ $clientUser := default (randAlphaNum 16) .Values.clientUser }} 15 | {{ $clientPassword := default (randAlphaNum 16) .Values.clientPassword }} 16 | {{- $usernamePool := dict }} 17 | {{- range $i, $e := until (default 50 (int .Values.usernamePoolSize)) }} 18 | {{- $_ := set $usernamePool (randAlphaNum 16) (randAlphaNum 16) }} 19 | {{- end }} 20 | apiVersion: v1 21 | kind: Secret 22 | metadata: 23 | name: {{ .Values.applicationName }}-kafka-auth-pool 24 | labels: 25 | component: kafka-operator 26 | role: auth 27 | {{- include "standardLabels" . | indent 4 }} 28 | stringData: 29 | username-pool: | 30 | {{- range $user, $pass := $usernamePool }} 31 | {{ $user }}={{ $pass }} 32 | {{- end }} 33 | --- 34 | apiVersion: v1 35 | kind: Secret 36 | metadata: 37 | name: {{ .Values.applicationName }}-kafka-consumed-auth-pool 38 | labels: 39 | component: kafka-operator 40 | role: auth 41 | {{- include "standardLabels" . | indent 4 }} 42 | stringData: 43 | consumed-usernames: "" 44 | --- 45 | apiVersion: v1 46 | kind: Secret 47 | metadata: 48 | name: {{ .Values.applicationName }}-kafka-broker-auth 49 | labels: 50 | component: kafka-broker 51 | role: auth 52 | {{- include "standardLabels" . | indent 4 }} 53 | stringData: 54 | kafka-server-jaas.conf: | 55 | KafkaServer { 56 | org.apache.kafka.common.security.plain.PlainLoginModule required 57 | username="{{ $adminUser }}" 58 | password="{{ $adminPassword }}" 59 | user_{{ $adminUser }}="{{ $adminPassword }}" 60 | user_{{ $clientUser }}="{{ $clientPassword }}" 61 | {{- range $k, $v := $usernamePool }} 62 | user_{{ $k }}="{{ $v }}" 63 | {{- end }} 64 | ; 65 | }; 66 | --- 67 | apiVersion: v1 68 | kind: Secret 69 | metadata: 70 | name: {{ .Values.applicationName }}-kafka-admin-client-auth 71 | labels: 72 | component: kafka-broker 73 | role: auth 74 | {{- include "standardLabels" . | indent 4 }} 75 | stringData: 76 | kafka-admin-client-jaas.conf: | 77 | KafkaClient { 78 | org.apache.kafka.common.security.plain.PlainLoginModule required 79 | username="{{ $adminUser }}" 80 | password="{{ $adminPassword }}"; 81 | }; 82 | --- 83 | apiVersion: v1 84 | kind: Secret 85 | metadata: 86 | name: {{ .Values.applicationName }}-kafka-client-auth 87 | labels: 88 | component: kafka-broker 89 | role: auth 90 | {{- include "standardLabels" . | indent 4 }} 91 | stringData: 92 | kafka-client-jaas.conf: | 93 | KafkaClient { 94 | org.apache.kafka.common.security.plain.PlainLoginModule required 95 | username="{{ $clientUser }}" 96 | password="{{ $clientPassword }}"; 97 | }; 98 | -------------------------------------------------------------------------------- /kafka-secrets/values.yaml: -------------------------------------------------------------------------------- 1 | ## applicationName is the default name of the application 2 | applicationName: kafka-cluster 3 | #adminUser: admin 4 | # Then name of the admin password. If not specified a random password is generated. 5 | #adminPassword: secret-password 6 | ## zookeeperServic the name of zookeper service to use 7 | zookeeperService: zookeeper 8 | ## zookeperTimeout Default timeout in ms when connecting to zookeeper 9 | zookeperTimeout: 6000 10 | ## usernamePoolSize number of predefined users for topic access 11 | usernamePoolSize: 50 12 | -------------------------------------------------------------------------------- /kafka.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nbogojevic/kubernetes-kafka/d5a8e8328243e000fdf14c68027b9f3a068c46ab/kafka.yaml -------------------------------------------------------------------------------- /openshift/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Updated for OpenShift 3.4 3 | 4 | # Kafka as Kubernetes StatefulSet 5 | 6 | Example of five Kafka brokers depending on five Zookeeper instances. 7 | 8 | To get consistent service DNS names `kafka-N.broker.kafka`(`.svc.cluster.local`), run everything in a single [namespace](http://kubernetes.io/docs/admin/namespaces/walkthrough/). 9 | 10 | ## Set up volume claims 11 | 12 | You may add [storage class](http://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses) 13 | to the kafka StatefulSet declaration to enable automatic volume provisioning. 14 | 15 | Alternatively create [PV](http://kubernetes.io/docs/user-guide/persistent-volumes/#persistent-volumes)s and [PVC](http://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims)s manually. For example in Minikube. 16 | 17 | ``` 18 | ./bootstrap/pv.sh 19 | oc create -f ./bootstrap/pvc.yml 20 | # check that claims are bound 21 | oc get pvc 22 | ``` 23 | 24 | ## Zookeeper 25 | 26 | Zookeeper runs as a [Stateful Set]. 27 | 28 | If you lose your zookeeper cluster, kafka will be unaware that persisted topics exist. 29 | The data is still there, but you need to re-create topics. 30 | 31 | ## Start Kafka & Zookeeper 32 | 33 | Assuming you have your PVCs `Bound`, or enabled automatic provisioning (see above), you can use kafka in persistent mode: 34 | ``` 35 | oc process -f kafka-persistent-template.yaml | oc create -f - 36 | ``` 37 | 38 | You can also run kafka in ephemeral storage mode. In this case you rely on kafka replication for data availability. 39 | ``` 40 | oc process -f kafka-ephemeral-template.yaml | oc create -f - 41 | ``` 42 | 43 | You might want to verify in logs that Kafka found its own DNS name(s) correctly. Look for records like: 44 | ``` 45 | oc logs kafka-0 | grep "Registered broker" 46 | # INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT -> EndPoint(kafka-0.broker.kafka.svc.cluster.local,9092,PLAINTEXT) 47 | ``` 48 | 49 | ## Testing manually 50 | 51 | There's a Kafka pod that doesn't start the server, so you can invoke the various shell scripts. 52 | ``` 53 | oc create -f kafka-client.yml 54 | ``` 55 | 56 | See `./test/test.sh` for some sample commands. 57 | 58 | ## Automated test, while going chaosmonkey on the cluster 59 | 60 | This is WIP, but topic creation has been automated. Note that as a [Job](http://kubernetes.io/docs/user-guide/jobs/), it will restart if the command fails, including if the topic exists :( 61 | ``` 62 | oc create -f test/11topic-create-test1.yml 63 | ``` 64 | 65 | Pods that keep consuming messages (but they won't exit on cluster failures) 66 | ``` 67 | oc create -f test/21consumer-test1.yml 68 | ``` 69 | 70 | ## Teardown & cleanup 71 | 72 | Testing and retesting... delete the namespace. PVs are outside namespaces so delete them too. 73 | ``` 74 | oc delete petset -l app=kafka 75 | oc delete all -l app=kafka 76 | oc delete configmap -l app=kafka 77 | oc delete template -l app=kafka 78 | ``` 79 | 80 | ## Importing template 81 | 82 | Two templates can be imported into openshift using: 83 | 84 | ```bash 85 | oc create -f kafka-ephemeral-template.yaml 86 | oc create -f kafka-persistent-template.yaml 87 | ``` 88 | 89 | ## Customizing Kafka 90 | 91 | It is possible to customize kafka container to advertise different host and port. This is done by specifying following 92 | two environmnet variables: KAFKA_ADVERTISED_HOST and KAFKA_ADVERTISED_PORT. When they are specified, the kafka 93 | containers will use those values to advertise their address instead of default ones. 94 | 95 | ## Hawkular Support 96 | 97 | ## Inspired by 98 | 99 | Original source code is at: https://github.com/Yolean/kubernetes-kafka 100 | 101 | -------------------------------------------------------------------------------- /openshift/kafka-client.yml: -------------------------------------------------------------------------------- 1 | # Kafka image without the service, so you can run ./bin/ stuff 2 | # oc exec -ti kafka-client -- /bin/bash 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: kafka-client 7 | app: kafka 8 | component: client 9 | spec: 10 | containers: 11 | - name: kafka 12 | image: nbogojevic/kafka:latest 13 | command: 14 | - sh 15 | - -c 16 | - "exec tail -f /dev/null" 17 | -------------------------------------------------------------------------------- /openshift/kafka-ephemeral-template.yaml: -------------------------------------------------------------------------------- 1 | id: kafka-ephemeral 2 | kind: Template 3 | apiVersion: v1 4 | name: kafka-ephemeral 5 | metadata: 6 | name: kafka-ephemeral 7 | labels: 8 | category: messaging 9 | annotations: 10 | description: | 11 | This template provides a Kafka cluster using ephemeral storage. 12 | The size of the cluster is provided using template parameters. 13 | tags: kafka,messaging 14 | openshift.io/display-name: Kafka cluster (ephemeral) 15 | iconClass: fa pficon-topology 16 | parameters: 17 | - description: The number of kafka nodes to deploy 18 | displayName: Kafka node count 19 | name: KAFKA_NODES 20 | value: "3" 21 | required: true 22 | - description: The number of zookeeper nodes to deploy 23 | displayName: Zookeeper node count 24 | name: ZOOKEEPER_NODES 25 | value: "5" 26 | required: true 27 | - description: The JVM heap options for Kafka 28 | displayName: Kafka JVM heap options 29 | name: KAFKA_HEAP_OPTS 30 | value: "-Xmx2G" 31 | - description: The JVM options for Zookeper 32 | displayName: Zookeeper JVM options 33 | name: ZOOKEEPER_SERVER_JVMFLAGS 34 | value: "-Xmx512m" 35 | - description: The password used for the Jolokia endpoint authentication 36 | name: JOLOKIA_PASSWORD 37 | generate: expression 38 | from: '[a-zA-Z0-9]{15}' 39 | required: true 40 | - description: The name of the Kafka cluster. Will be used to annotate cluster with label 'app' 41 | displayName: App name for Kafka cluster 42 | name: CLUSTER_APP_NAME 43 | value: "kafka" 44 | required: true 45 | - description: The name of the OpenShift Service exposed for the Kafka cluster. 46 | displayName: Kafka service name 47 | name: KAFKA_SERVICE_NAME 48 | value: "kafka" 49 | required: true 50 | - description: The name of the OpenShift DNS records service used for the Kafka brokers. 51 | displayName: Kafka DNS records service name 52 | name: KAFKA_DNS_SERVICE_NAME 53 | value: "broker" 54 | required: true 55 | - description: The name of the OpenShift Service exposed for the zookeeper cluster. 56 | displayName: Zookeeper service name 57 | name: ZOOKEEPER_SERVICE_NAME 58 | value: "zookeeper" 59 | required: true 60 | - description: The name of the OpenShift DNS records service exposed for the zookeeper cluster. 61 | displayName: Zookeeper DNS records service name 62 | name: ZOOKEEPER_DNS_SERVICE_NAME 63 | value: "zoo" 64 | required: true 65 | message: | 66 | Kafka cluster has been created. It has ${KAFKA_NODES} kafka brokers and ${ZOOKEEPER_NODES} zookepeer nodes. 67 | The clients can connect to broker from this project using following service 68 | 69 | ${KAFKA_SERVICE_NAME}:9092 70 | 71 | The cluster can be deleted using 72 | 73 | oc delete petset -l app=${CLUSTER_APP_NAME} 74 | oc delete all -l app=${CLUSTER_APP_NAME} 75 | oc delete configmap -l app=${CLUSTER_APP_NAME} 76 | objects: 77 | # Headless service for statefulset 78 | - apiVersion: v1 79 | kind: Service 80 | metadata: 81 | name: ${ZOOKEEPER_DNS_SERVICE_NAME} 82 | labels: 83 | app: ${CLUSTER_APP_NAME} 84 | component: zookeeper 85 | spec: 86 | ports: 87 | - port: 2888 88 | name: peer 89 | - port: 3888 90 | name: leader-election 91 | clusterIP: None 92 | selector: 93 | app: ${CLUSTER_APP_NAME} 94 | component: zookeeper 95 | # the headless service is for statefulset DNS, this one is for clients 96 | - apiVersion: v1 97 | kind: Service 98 | metadata: 99 | name: ${ZOOKEEPER_SERVICE_NAME} 100 | labels: 101 | app: ${CLUSTER_APP_NAME} 102 | component: zookeeper 103 | spec: 104 | ports: 105 | - port: 2181 106 | name: client 107 | selector: 108 | app: ${CLUSTER_APP_NAME} 109 | component: zookeeper 110 | # StatefulSet for zookeeper 111 | - apiVersion: apps/v1alpha1 112 | kind: PetSet 113 | metadata: 114 | name: zoo 115 | labels: 116 | app: ${CLUSTER_APP_NAME} 117 | component: zookeeper 118 | spec: 119 | serviceName: ${ZOOKEEPER_DNS_SERVICE_NAME} 120 | replicas: ${ZOOKEEPER_NODES} 121 | template: 122 | metadata: 123 | labels: 124 | app: ${CLUSTER_APP_NAME} 125 | component: zookeeper 126 | annotations: 127 | pod.alpha.kubernetes.io/initialized: "true" 128 | prometheus.io/scrape: true 129 | prometheus.io/path: /metrics 130 | prometheus.io/port: 5555 131 | spec: 132 | terminationGracePeriodSeconds: 10 133 | containers: 134 | - name: zookeeper 135 | image: nbogojevic/zookeeper:latest 136 | imagePullPolicy: Always 137 | env: 138 | - name: AB_JOLOKIA_PASSWORD_RANDOM 139 | value: "false" 140 | - name: AB_JOLOKIA_PASSWORD 141 | value: "${JOLOKIA_PASSWORD}" 142 | - name: SERVER_JVMFLAGS 143 | value: "${ZOOKEEPER_SERVER_JVMFLAGS}" 144 | - name: ZOO_SERVER_COUNT 145 | value: "${ZOOKEEPER_NODES}" 146 | ports: 147 | - containerPort: 2181 148 | name: client 149 | - containerPort: 2888 150 | name: peer 151 | - containerPort: 3888 152 | name: leader-election 153 | - containerPort: 5555 154 | name: prometheus 155 | - containerPort: 8778 156 | name: jolokia 157 | volumeMounts: 158 | - name: datadir 159 | mountPath: /data 160 | volumes: 161 | - name: datadir 162 | emptyDir: {} 163 | - name: hawkular-openshift-agent 164 | configMap: 165 | name: zookeeper-metrics-config 166 | - apiVersion: v1 167 | kind: ConfigMap 168 | metadata: 169 | name: zookeeper-metrics-config 170 | labels: 171 | app: ${CLUSTER_APP_NAME} 172 | component: zookeeper 173 | role: metrics 174 | metrics: hosa 175 | data: 176 | hawkular-openshift-agent: | 177 | endpoints: 178 | - type: jolokia 179 | protocol: https 180 | port: 8778 181 | path: /jolokia/ 182 | collection_interval: 30s 183 | tls: 184 | skip_certificate_validation: true 185 | credentials: 186 | username: jolokia 187 | password: ${JOLOKIA_PASSWORD} 188 | metrics: 189 | - name: java.lang:type=Memory#HeapMemoryUsage#used 190 | type: gauge 191 | id: jvm.heap.used 192 | units: B 193 | tags: 194 | jvm_metrics: true 195 | jvm_heap: true 196 | # A headless service to create DNS records 197 | - apiVersion: v1 198 | kind: Service 199 | metadata: 200 | name: ${KAFKA_DNS_SERVICE_NAME} 201 | labels: 202 | app: ${CLUSTER_APP_NAME} 203 | component: kafka-broker 204 | spec: 205 | ports: 206 | - port: 9092 207 | # [podname].broker.kafka.svc.cluster.local 208 | clusterIP: None 209 | selector: 210 | app: ${CLUSTER_APP_NAME} 211 | component: kafka-broker 212 | # Kafka service for cluster 213 | - apiVersion: v1 214 | kind: Service 215 | metadata: 216 | name: ${KAFKA_SERVICE_NAME} 217 | labels: 218 | app: ${CLUSTER_APP_NAME} 219 | component: kafka-broker 220 | annotations: 221 | service.alpha.openshift.io/dependencies: '[{"name":"broker","namespace":"","kind":"Service"}, {"name":"zoo","namespace":"","kind":"Service"}, {"name":"zookeeper","namespace":"","kind":"Service"}]' 222 | spec: 223 | ports: 224 | - port: 9092 225 | selector: 226 | app: ${CLUSTER_APP_NAME} 227 | component: kafka-broker 228 | - apiVersion: apps/v1alpha1 229 | kind: PetSet 230 | metadata: 231 | name: kafka 232 | labels: 233 | app: ${CLUSTER_APP_NAME} 234 | component: kafka-broker 235 | spec: 236 | serviceName: ${KAFKA_DNS_SERVICE_NAME} 237 | replicas: ${KAFKA_NODES} 238 | template: 239 | metadata: 240 | labels: 241 | app: ${CLUSTER_APP_NAME} 242 | component: kafka-broker 243 | annotations: 244 | pod.alpha.kubernetes.io/initialized: "true" 245 | prometheus.io/scrape: true 246 | prometheus.io/path: /metrics 247 | prometheus.io/port: 5555 248 | spec: 249 | terminationGracePeriodSeconds: 10 250 | containers: 251 | - name: broker 252 | image: nbogojevic/kafka:latest 253 | imagePullPolicy: Always 254 | env: 255 | - name: AB_JOLOKIA_PASSWORD_RANDOM 256 | value: "false" 257 | - name: AB_JOLOKIA_PASSWORD 258 | value: "${JOLOKIA_PASSWORD}" 259 | - name: KAFKA_HEAP_OPTS 260 | value: "${KAFKA_HEAP_OPTS}" 261 | ports: 262 | - containerPort: 9092 263 | name: kafka 264 | - containerPort: 5555 265 | name: prometheus 266 | - containerPort: 8778 267 | name: jolokia 268 | volumeMounts: 269 | - name: datadir 270 | mountPath: /opt/kafka/data 271 | volumes: 272 | - name: datadir 273 | emptyDir: {} 274 | - name: hawkular-openshift-agent 275 | configMap: 276 | name: kafka-metrics-confg 277 | - apiVersion: v1 278 | kind: ConfigMap 279 | metadata: 280 | name: kafka-metrics-confg 281 | labels: 282 | app: ${CLUSTER_APP_NAME} 283 | component: kafka-broker 284 | role: metrics 285 | metrics: hosa 286 | data: 287 | hawkular-openshift-agent: | 288 | endpoints: 289 | - type: jolokia 290 | protocol: https 291 | port: 8778 292 | path: /jolokia/ 293 | collection_interval: 30s 294 | tls: 295 | skip_certificate_validation: true 296 | credentials: 297 | username: jolokia 298 | password: ${JOLOKIA_PASSWORD} 299 | metrics: 300 | - name: kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec,topic=*#OneMinuteRate 301 | type: gauge 302 | id: kafka.${topic}.BytesInPerSec.rate 303 | description: ${topic} BytesInPerSec rate (1min) 304 | tags: 305 | topic_metrics: true 306 | byte_rate: true 307 | input: true 308 | - name: kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic=*#OneMinuteRate 309 | type: gauge 310 | id: kafka.${topic}.BytesOutPerSec.rate 311 | description: ${topic} BytesOutPerSec rate (1min) 312 | tags: 313 | topic_metrics: true 314 | byte_rate: true 315 | output: true 316 | - name: kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=*#OneMinuteRate 317 | type: gauge 318 | id: kafka.${topic}.MessageInPerSec.rate 319 | description: ${topic} ${name} (1min) 320 | tags: 321 | topic_metrics: true 322 | message_rate: true 323 | - name: java.lang:type=Memory#HeapMemoryUsage#used 324 | type: gauge 325 | id: jvm.heap.used 326 | units: B 327 | tags: 328 | jvm_metrics: true 329 | jvm_heap: true 330 | -------------------------------------------------------------------------------- /openshift/kafka-persistent-template.yaml: -------------------------------------------------------------------------------- 1 | id: kafka-persitent 2 | kind: Template 3 | apiVersion: v1 4 | name: kafka-persitent 5 | metadata: 6 | name: kafka-persitent 7 | labels: 8 | category: messaging 9 | annotations: 10 | description: | 11 | This template provides a Kafka cluster using persitent storage. 12 | The size of the cluster and size of persitent volumes are provided using template parameters. 13 | tags: "kafka,messaging" 14 | openshift.io/display-name: "Kafka cluster (persitent)" 15 | iconClass: "fa pficon-topology" 16 | parameters: 17 | - description: The number of kafka nodes to deploy 18 | displayName: Kafka node count 19 | name: KAFKA_NODES 20 | value: "3" 21 | required: true 22 | - description: Persistent volume size for Kafka 23 | name: KAFKA_VOLUME_SIZE 24 | default: 5Gi 25 | required: true 26 | - description: The number of zookeeper nodes to deploy 27 | displayName: Zookeeper node count 28 | name: ZOOKEEPER_NODES 29 | value: "5" 30 | required: true 31 | - description: The JVM heap options for Kafka 32 | displayName: Kafka JVM heap options 33 | name: KAFKA_HEAP_OPTS 34 | value: "-Xmx2G" 35 | - description: The JVM options for Zookeper 36 | displayName: Zookeeper JVM options 37 | name: ZOOKEEPER_SERVER_JVMFLAGS 38 | value: "-Xmx512m" 39 | - description: The password used for the Jolokia endpoint authentication 40 | name: JOLOKIA_PASSWORD 41 | generate: expression 42 | from: '[a-zA-Z0-9]{15}' 43 | required: true 44 | - description: The name of the Kafka cluster. Will be used to annotate cluster with label 'app' 45 | displayName: App name for Kafka cluster 46 | name: CLUSTER_APP_NAME 47 | value: "kafka" 48 | required: true 49 | - description: The name of the OpenShift Service exposed for the Kafka cluster. 50 | displayName: Kafka service name 51 | name: KAFKA_SERVICE_NAME 52 | value: "kafka" 53 | required: true 54 | - description: The name of the OpenShift DNS records service used for the Kafka brokers. 55 | displayName: Kafka DNS records service name 56 | name: KAFKA_DNS_SERVICE_NAME 57 | value: "broker" 58 | required: true 59 | - description: The name of the OpenShift Service exposed for the zookeeper cluster. 60 | displayName: Zookeeper service name 61 | name: ZOOKEEPER_SERVICE_NAME 62 | value: "zookeeper" 63 | required: true 64 | - description: The name of the OpenShift DNS records service exposed for the zookeeper cluster. 65 | displayName: Zookeeper DNS records service name 66 | name: ZOOKEEPER_DNS_SERVICE_NAME 67 | value: "zoo" 68 | required: true 69 | message: | 70 | Kafka cluster has been created. It has ${KAFKA_NODES} kafka brokers and ${ZOOKEEPER_NODES} zookepeer nodes. 71 | The clients can connect to broker from this project using following service 72 | 73 | ${KAFKA_SERVICE_NAME}:9092 74 | 75 | The cluster can be deleted using 76 | 77 | oc delete petset -l app=${CLUSTER_APP_NAME} 78 | oc delete all -l app=${CLUSTER_APP_NAME} 79 | oc delete configmap -l app=${CLUSTER_APP_NAME} 80 | objects: 81 | # Headless service for statefulset 82 | - apiVersion: v1 83 | kind: Service 84 | metadata: 85 | name: ${ZOOKEEPER_DNS_SERVICE_NAME} 86 | labels: 87 | app: ${CLUSTER_APP_NAME} 88 | component: zookeeper 89 | spec: 90 | ports: 91 | - port: 2888 92 | name: peer 93 | - port: 3888 94 | name: leader-election 95 | clusterIP: None 96 | selector: 97 | app: ${CLUSTER_APP_NAME} 98 | component: zookeeper 99 | # the headless service is for statefulset DNS, this one is for clients 100 | - apiVersion: v1 101 | kind: Service 102 | metadata: 103 | name: ${ZOOKEEPER_SERVICE_NAME} 104 | labels: 105 | app: ${CLUSTER_APP_NAME} 106 | component: zookeeper 107 | spec: 108 | ports: 109 | - port: 2181 110 | name: client 111 | selector: 112 | app: ${CLUSTER_APP_NAME} 113 | component: zookeeper 114 | # StatefulSet for zookeeper 115 | - apiVersion: apps/v1alpha1 116 | kind: PetSet 117 | metadata: 118 | name: ${ZOOKEEPER_DNS_SERVICE_NAME} 119 | labels: 120 | app: ${CLUSTER_APP_NAME} 121 | component: zookeeper 122 | spec: 123 | serviceName: ${ZOOKEEPER_DNS_SERVICE_NAME} 124 | replicas: ${ZOOKEEPER_NODES} 125 | template: 126 | metadata: 127 | labels: 128 | app: ${CLUSTER_APP_NAME} 129 | component: zookeeper 130 | annotations: 131 | pod.alpha.kubernetes.io/initialized: "true" 132 | prometheus.io/scrape: true 133 | prometheus.io/path: /metrics 134 | prometheus.io/port: 5555 135 | spec: 136 | nodeSelector: ${{ NODE_SELECTOR }} 137 | terminationGracePeriodSeconds: 10 138 | containers: 139 | - name: zookeeper 140 | image: nbogojevic/zookeeper:latest 141 | imagePullPolicy: Always 142 | env: 143 | - name: AB_JOLOKIA_PASSWORD_RANDOM 144 | value: "false" 145 | - name: AB_JOLOKIA_PASSWORD 146 | value: "${JOLOKIA_PASSWORD}" 147 | - name: SERVER_JVMFLAGS 148 | value: "${ZOOKEEPER_SERVER_JVMFLAGS}" 149 | - name: ZOO_SERVER_COUNT 150 | value: "${ZOOKEEPER_NODES}" 151 | ports: 152 | - containerPort: 2181 153 | name: client 154 | - containerPort: 2888 155 | name: peer 156 | - containerPort: 3888 157 | name: leader-election 158 | - containerPort: 5555 159 | name: prometheus 160 | - containerPort: 8778 161 | name: jolokia 162 | volumeMounts: 163 | - name: datadir 164 | mountPath: /data 165 | # There are defaults in this folder, such as logging config 166 | #- name: conf 167 | # mountPath: /conf 168 | volumes: 169 | #- name: conf 170 | # emptyDir: {} 171 | - name: datadir 172 | emptyDir: {} 173 | - name: hawkular-openshift-agent 174 | configMap: 175 | name: zookeeper-metrics-config 176 | - apiVersion: v1 177 | kind: ConfigMap 178 | metadata: 179 | name: zookeeper-metrics-config 180 | labels: 181 | app: ${CLUSTER_APP_NAME} 182 | component: zookeeper 183 | role: metrics 184 | metrics: hosa 185 | data: 186 | hawkular-openshift-agent: | 187 | endpoints: 188 | - type: jolokia 189 | protocol: https 190 | port: 8778 191 | path: /jolokia/ 192 | collection_interval: 30s 193 | tls: 194 | skip_certificate_validation: true 195 | credentials: 196 | username: jolokia 197 | password: ${JOLOKIA_PASSWORD} 198 | metrics: 199 | - name: java.lang:type=Memory#HeapMemoryUsage#used 200 | type: gauge 201 | id: jvm.heap.used 202 | units: B 203 | tags: 204 | jvm_metrics: true 205 | jvm_heap: true 206 | # A headless service to create DNS records 207 | - apiVersion: v1 208 | kind: Service 209 | metadata: 210 | name: ${KAFKA_DNS_SERVICE_NAME} 211 | labels: 212 | app: ${CLUSTER_APP_NAME} 213 | component: kafka-broker 214 | spec: 215 | ports: 216 | - port: 9092 217 | # [podname].broker.kafka.svc.cluster.local 218 | clusterIP: None 219 | selector: 220 | app: ${CLUSTER_APP_NAME} 221 | component: kafka-broker 222 | # Kafka service for cluster 223 | - apiVersion: v1 224 | kind: Service 225 | metadata: 226 | name: ${KAFKA_SERVICE_NAME} 227 | labels: 228 | app: ${CLUSTER_APP_NAME} 229 | component: kafka-broker 230 | annotations: 231 | service.alpha.openshift.io/dependencies: '[{"name":"broker","namespace":"","kind":"Service"}, {"name":"zoo","namespace":"","kind":"Service"}, {"name":"zookeeper","namespace":"","kind":"Service"}]' 232 | spec: 233 | ports: 234 | - port: 9092 235 | selector: 236 | app: ${CLUSTER_APP_NAME} 237 | component: kafka-broker 238 | - apiVersion: apps/v1alpha1 239 | kind: PetSet 240 | metadata: 241 | name: kafka 242 | labels: 243 | app: ${CLUSTER_APP_NAME} 244 | component: kafka-broker 245 | spec: 246 | serviceName: ${KAFKA_DNS_SERVICE_NAME} 247 | replicas: ${KAFKA_NODES} 248 | template: 249 | metadata: 250 | labels: 251 | app: ${CLUSTER_APP_NAME} 252 | component: kafka-broker 253 | annotations: 254 | pod.alpha.kubernetes.io/initialized: "true" 255 | prometheus.io/scrape: true 256 | prometheus.io/path: /metrics 257 | prometheus.io/port: 5555 258 | spec: 259 | nodeSelector: ${{ NODE_SELECTOR }} 260 | terminationGracePeriodSeconds: 10 261 | containers: 262 | - name: broker 263 | image: nbogojevic/kafka:latest 264 | imagePullPolicy: Always 265 | env: 266 | - name: AB_JOLOKIA_PASSWORD_RANDOM 267 | value: "false" 268 | - name: AB_JOLOKIA_PASSWORD 269 | value: "${JOLOKIA_PASSWORD}" 270 | - name: KAFKA_HEAP_OPTS 271 | value: "${KAFKA_HEAP_OPTS}" 272 | ports: 273 | - containerPort: 9092 274 | name: kafka 275 | - containerPort: 5555 276 | name: prometheus 277 | - containerPort: 8778 278 | name: jolokia 279 | volumeMounts: 280 | - name: datadir 281 | mountPath: /opt/kafka/data 282 | volumes: 283 | - name: datadir 284 | emptyDir: {} 285 | - name: hawkular-openshift-agent 286 | configMap: 287 | name: kafka-metrics-confg 288 | volumeClaimTemplates: 289 | - metadata: 290 | name: datadir 291 | labels: 292 | app: ${CLUSTER_APP_NAME} 293 | component: kafka-broker 294 | annotations: 295 | volume.alpha.kubernetes.io/storage-class: anything 296 | spec: 297 | accessModes: 298 | - ReadWriteOnce 299 | resources: 300 | requests: 301 | storage: ${KAFKA_VOLUME_SIZE} 302 | - apiVersion: v1 303 | kind: ConfigMap 304 | metadata: 305 | name: kafka-metrics-confg 306 | labels: 307 | app: ${CLUSTER_APP_NAME} 308 | component: kafka-broker 309 | role: metrics 310 | metrics: hosa 311 | data: 312 | hawkular-openshift-agent: | 313 | endpoints: 314 | - type: jolokia 315 | protocol: https 316 | port: 8778 317 | path: /jolokia/ 318 | collection_interval: 30s 319 | tls: 320 | skip_certificate_validation: true 321 | credentials: 322 | username: jolokia 323 | password: ${JOLOKIA_PASSWORD} 324 | metrics: 325 | - name: kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec,topic=*#OneMinuteRate 326 | type: gauge 327 | id: kafka.${topic}.BytesInPerSec.rate 328 | description: ${topic} BytesInPerSec rate (1min) 329 | tags: 330 | topic_metrics: true 331 | byte_rate: true 332 | input: true 333 | - name: kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec,topic=*#OneMinuteRate 334 | type: gauge 335 | id: kafka.${topic}.BytesOutPerSec.rate 336 | description: ${topic} BytesOutPerSec rate (1min) 337 | tags: 338 | topic_metrics: true 339 | byte_rate: true 340 | output: true 341 | - name: kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec,topic=*#OneMinuteRate 342 | type: gauge 343 | id: kafka.${topic}.MessageInPerSec.rate 344 | description: ${topic} ${name} (1min) 345 | tags: 346 | topic_metrics: true 347 | message_rate: true 348 | - name: java.lang:type=Memory#HeapMemoryUsage#used 349 | type: gauge 350 | id: jvm.heap.used 351 | units: B 352 | tags: 353 | jvm_metrics: true 354 | jvm_heap: true 355 | 356 | -------------------------------------------------------------------------------- /test/11topic-create-test1.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: topic-create-test1 5 | spec: 6 | template: 7 | metadata: 8 | name: topic-create-test1 9 | spec: 10 | containers: 11 | - name: kafka 12 | image: nbogojevic/kafka:latest 13 | command: 14 | - ./bin/kafka-topics.sh 15 | - --zookeeper 16 | - zookeeper:2181 17 | - --create 18 | - --topic 19 | - test1 20 | - --partitions 21 | - "1" 22 | - --replication-factor 23 | - "1" 24 | restartPolicy: Never 25 | -------------------------------------------------------------------------------- /test/12topic-create-test2.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: topic-create-test2 5 | spec: 6 | template: 7 | metadata: 8 | name: topic-create-test2 9 | spec: 10 | containers: 11 | - name: kafka 12 | image: nbogojevic/kafka:latest 13 | command: 14 | - ./bin/kafka-topics.sh 15 | - --zookeeper 16 | - zookeeper:2181 17 | - --create 18 | - --topic 19 | - test2 20 | - --partitions 21 | - "1" 22 | - --replication-factor 23 | - "3" 24 | restartPolicy: Never 25 | -------------------------------------------------------------------------------- /test/21consumer-test1.yml: -------------------------------------------------------------------------------- 1 | apiVersion: app/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: consumer-test1 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: consumer 11 | scope: test 12 | topic: test1 13 | spec: 14 | containers: 15 | - name: kafka 16 | image: nbogojevic/kafka:latest 17 | command: 18 | - ./bin/kafka-console-consumer.sh 19 | - --zookeeper 20 | - zookeeper:2181 21 | - --topic 22 | - test1 23 | - --from-beginning 24 | -------------------------------------------------------------------------------- /test/test.sh: -------------------------------------------------------------------------------- 1 | # Zookeeper tests 2 | oc exec zoo-0 -- /opt/zookeeper/bin/zkCli.sh create /foo bar; 3 | oc exec zoo-2 -- /opt/zookeeper/bin/zkCli.sh get /foo; 4 | 5 | # List topics 6 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --list 7 | 8 | # Create topic 9 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic kafka-extract30 --create --partitions 30 --replication-factor 2 10 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic kafka-transform30 --create --partitions 30 --replication-factor 2 11 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic kafka-store30 --create --partitions 30 --replication-factor 2 12 | 13 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic test2 --create --partitions 12 --replication-factor 1 14 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic test32-1replica --create --partitions 32 --replication-factor 1 15 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic test32-2replica --create --partitions 32 --replication-factor 2 16 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic testFwd-1replica --create --partitions 32 --replication-factor 1 17 | 18 | # Set one of your terminals to listen to messages on the test topic 19 | oc exec -ti kafka-client -- ./bin/kafka-console-consumer.sh --zookeeper zookeeper:2181 --topic test2 --from-beginning 20 | 21 | # Go ahead and produce messages 22 | echo "Write a message followed by enter, exit using Ctrl+C" 23 | oc exec -ti kafka-client -- ./bin/kafka-console-producer.sh --broker-list kafka-0.broker.kafkatest.svc.cluster.local:9092 --topic test1 24 | 25 | # Bootstrap even if two nodes are down (shorter name requires same namespace) 26 | oc exec -ti kafka-client -- ./bin/kafka-console-producer.sh --broker-list kafka-0.broker:9092,kafka-1.broker:9092,kafka-2.broker:9092 --topic test1 27 | 28 | # The following commands run in the pod 29 | oc exec -ti kafka-client -- /bin/bash 30 | 31 | # Topic 2, replicated 32 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --describe --topic test2 33 | 34 | ./bin/kafka-verifiable-consumer.sh \ 35 | --broker-list=kafka-0.broker.kafkatest.svc.cluster.local:9092,kafka-1.broker.kafkatest.svc.cluster.local:9092 \ 36 | --topic=test2 --group-id=A --verbose 37 | 38 | # If a topic isn't available this producer will tell you 39 | # WARN Error while fetching metadata with correlation id X : {topicname=LEADER_NOT_AVAILABLE} 40 | # ... but with current config Kafka will auto-create the topic 41 | ./bin/kafka-verifiable-producer.sh \ 42 | --broker-list=kafka-0.broker.kafkatest.svc.cluster.local:9092,kafka-1.broker.kafkatest.svc.cluster.local:9092 \ 43 | --value-prefix=1 --topic=test2 \ 44 | --acks=1 --throughput=1 --max-messages=10 45 | 46 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic kafka-extract30 --create --partitions 30 --replication-factor 2 47 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic kafka-transform30 --create --partitions 30 --replication-factor 2 48 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic kafka-store30 --create --partitions 30 --replication-factor 2 49 | 50 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-extract30 --config retention.bytes=1073741824 51 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-extract30 --config retention.ms=1000000 52 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-extract30 --config compression.type=producer 53 | 54 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-transform30 --config retention.bytes=1073741824 55 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-transform30 --config retention.ms=1000000 56 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-transform30 --config compression.type=producer 57 | 58 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-store30 --config retention.bytes=1073741824 59 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-store30 --config retention.ms=1000000 60 | oc exec -n kafkatest kafka-client -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --alter --topic kafka-store30 --config compression.type=producer 61 | 62 | --------------------------------------------------------------------------------