├── .gitignore ├── Dockerfile ├── README.md ├── docker-compose.yml ├── entrypoint.sh └── system-config.properties /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8-bullseye 2 | 3 | ENV KE_HOME=/opt/efak 4 | ENV EFAK_VERSION=3.0.1 5 | # Set config defaults 6 | ENV EFAK_CLUSTER_JMX_ACL=false 7 | ENV EFAK_CLUSTER_JMX_USER=keadmin 8 | ENV EFAK_CLUSTER_JMX_PASSWORD=keadmin123 9 | ENV EFAK_CLUSTER_JMX_SSL=false 10 | ENV EFAK_CLUSTER_JMX_TRUSTSTORE_LOCATION='/Users/dengjie/workspace/ssl/certificates/kafka.truststore' 11 | ENV EFAK_CLUSTER_JMX_TRUSTSTORE_PASSWORD=ke123456 12 | ENV EFAK_CLUSTER_JMX_URI='service:jmx:rmi:///jndi/rmi://%s/jmxrmi' 13 | ENV EFAK_CLUSTER_KAFKA_EAGLE_BROKER_SIZE=1 14 | ENV EFAK_CLUSTER_KAFKA_EAGLE_OFFSET_STORAGE=kafka 15 | ENV EFAK_CLUSTER_KAFKA_EAGLE_SASL_ENABLE=false 16 | ENV EFAK_CLUSTER_KAFKA_EAGLE_SASL_PROTOCOL=SASL_PLAINTEXT 17 | ENV EFAK_CLUSTER_KAFKA_EAGLE_SASL_MECHANISM=SCRAM-SHA-256 18 | ENV EFAK_CLUSTER_KAFKA_EAGLE_SASL_JAAS_CONFIG='org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="admin-secret";' 19 | ENV EFAK_CLUSTER_KAFKA_EAGLE_SASL_CGROUP_ENABLE=false 20 | ENV EFAK_CLUSTER_KAFKA_EAGLE_SASL_CGROUP_TOPICS=kafka_ads01,kafka_ads02 21 | ENV EFAK_CLUSTER_ZK_LIST=zookeeper:2181 22 | ENV EFAK_DB_DRIVER=org.sqlite.JDBC 23 | ENV EFAK_DB_USERNAME=root 24 | ENV EFAK_DB_PASSWORD=smartloli 25 | ENV EFAK_DB_URL=jdbc:sqlite:/hadoop/efak/db/ke.db 26 | ENV EFAK_KAFKA_CLUSTER_ALIAS='cluster' 27 | ENV EFAK_KAFKA_ZK_LIMIT_SIZE=25 28 | ENV EFAK_METRICS_CHARTS=false 29 | ENV EFAK_METRICS_RETAIN=30 30 | ENV EFAK_SQL_DISTRIBUTED_MODE_ENABLE=FALSE 31 | ENV EFAK_SQL_FIX_ERROR=false 32 | ENV EFAK_SQL_TOPIC_PREVIEW_RECORDS_MAX=10 33 | ENV EFAK_SQL_TOPIC_RECORDS_MAX=5000 34 | ENV EFAK_SQL_WORKNODE_PORT=8787 35 | ENV EFAK_SQL_WORKNODE_RPC_TIMEOUT=300000 36 | ENV EFAK_SQL_WORKNODE_SERVER_PATH='/Users/dengjie/workspace/kafka-eagle-plus/kafka-eagle-common/src/main/resources/works' 37 | ENV EFAK_SQL_WORKNODE_FETCH_THRESHOLD=5000 38 | ENV EFAK_SQL_WORKNODE_FETCH_TIMEOUT=20000 39 | ENV EFAK_TOPIC_TOKEN=keadmin 40 | ENV EFAK_WEBUI_PORT=8048 41 | ENV EFAK_ZK_ACL_ENABLE=false 42 | ENV EFAK_ZK_ACL_SCHEMA=digest 43 | ENV EFAK_ZK_ACL_USERNAME=test 44 | ENV EFAK_ZK_ACL_PASSWORD=test123 45 | ENV EFAK_ZK_CLUSTER_ALIAS='cluster' 46 | 47 | 48 | ADD system-config.properties /tmp 49 | ADD entrypoint.sh /usr/bin 50 | 51 | #RUN apk --update add wget gettext tar bash sqlite 52 | RUN apt-get update && apt-get upgrade -y && apt-get install -y sqlite3 gettext 53 | 54 | #get and unpack kafka eagle 55 | RUN mkdir -p /opt/efak/conf;cd /opt && \ 56 | wget https://github.com/smartloli/kafka-eagle-bin/archive/v${EFAK_VERSION}.tar.gz && \ 57 | tar zxvf v${EFAK_VERSION}.tar.gz -C efak --strip-components 1 && \ 58 | cd efak;tar zxvf efak-web-${EFAK_VERSION}-bin.tar.gz --strip-components 1 && \ 59 | rm efak-web-${EFAK_VERSION}-bin.tar.gz && \ 60 | chmod +x /opt/efak/bin/ke.sh 61 | 62 | EXPOSE 8048 8080 63 | 64 | ENTRYPOINT ["entrypoint.sh"] 65 | 66 | WORKDIR /opt/efak 67 | 68 | 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # docker-kafka-eagle 2 | [Eagle for Apache Kafka](https://www.kafka-eagle.org/) for docker 3 | EFAK is an easy and high-performance Kafka monitoring system. 4 | 5 | ## Anouncement :loudspeaker: 6 | I have created a new [dockerhub repo](https://hub.docker.com/r/nickzurich/efak) with the new name (EFAK) 7 | There is no hurry to migrate, i will keep kafka-eagle alive as long as there are image pulls. 8 | For now both repos will receive the same images / updates. 9 | 10 | ## Supported tags 11 | You can find the pre-built docker images [on dockerhub](https://hub.docker.com/r/nickzurich/kafka-eagle) 12 | Supported tags are: 13 | - [latest](https://github.com/nick-zh/docker-kafka-eagle/blob/main/Dockerfile) 14 | - [3.0.1](https://github.com/nick-zh/docker-kafka-eagle/blob/3.0.1/Dockerfile) 15 | - [2.1.0](https://github.com/nick-zh/docker-kafka-eagle/blob/2.1.0/Dockerfile) 16 | - [2.0.9](https://github.com/nick-zh/docker-kafka-eagle/blob/2.0.9/Dockerfile) 17 | - [2.0.8](https://github.com/nick-zh/docker-kafka-eagle/blob/2.0.8/Dockerfile) 18 | - [2.0.7](https://github.com/nick-zh/docker-kafka-eagle/blob/2.0.7/Dockerfile) 19 | - [2.0.6](https://github.com/nick-zh/docker-kafka-eagle/blob/2.0.6/Dockerfile) 20 | - [2.0.5](https://github.com/nick-zh/docker-kafka-eagle/blob/2.0.5/Dockerfile) 21 | - [2.0.4](https://github.com/nick-zh/docker-kafka-eagle/blob/2.0.4/Dockerfile) 22 | 23 | ## Test locally 24 | 1. Install docker and docker-compose: 25 | 2. Run the following command 26 | ``` 27 | docker-compose up 28 | ``` 29 | Then visit this url in your browser: 30 | ``` 31 | http://localhost:8048/ 32 | Test user/password: admin/123456 33 | ``` 34 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.1' 2 | services: 3 | zookeeper: 4 | image: confluentinc/cp-zookeeper:6.1.1 5 | ports: 6 | - "2181:2181" 7 | environment: 8 | ZOOKEEPER_CLIENT_PORT: 2181 9 | ZOOKEEPER_TICK_TIME: 2000 10 | KAFKA_JMX_PORT: 39999 11 | kafka: 12 | image: confluentinc/cp-kafka:6.1.1 13 | depends_on: 14 | - zookeeper 15 | ports: 16 | - "9092:9092" 17 | environment: 18 | KAFKA_BROKER_ID: 1 19 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' 20 | KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:9092' 21 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 22 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 23 | KAFKA_JMX_PORT: 49999 24 | eagle: 25 | build: 26 | context: . 27 | environment: 28 | EFAK_CLUSTER_ZK_LIST: zookeeper:2181 29 | depends_on: 30 | - kafka 31 | ports: 32 | - 8048:8048 33 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | prefix="jdbc:sqlite:" 3 | if [[ "$EFAK_DB_URL" == *"$prefix"* ]]; then 4 | db_dir=$(echo ${EFAK_DB_URL#${prefix}} | sed -e "s/\/[^\/]*$//") 5 | mkdir -p $db_dir 6 | fi 7 | envsubst < "/tmp/system-config.properties" > "/opt/efak/conf/system-config.properties" 8 | /opt/efak/bin/ke.sh start 9 | tail -f /opt/efak/kms/logs/catalina.out 10 | -------------------------------------------------------------------------------- /system-config.properties: -------------------------------------------------------------------------------- 1 | ###################################### 2 | # multi zookeeper & kafka cluster list 3 | # Settings prefixed with 'kafka.eagle.' will be deprecated, use 'efak.' instead 4 | ###################################### 5 | efak.zk.cluster.alias=cluster 6 | cluster.zk.list=${EFAK_CLUSTER_ZK_LIST} 7 | 8 | ###################################### 9 | # zookeeper enable acl 10 | ###################################### 11 | cluster.zk.acl.enable=${EFAK_ZK_ACL_ENABLE} 12 | cluster.zk.acl.schema=${EFAK_ZK_ACL_SCHEMA} 13 | cluster.zk.acl.username=${EFAK_ZK_ACL_USERNAME} 14 | cluster.zk.acl.password=${EFAK_ZK_ACL_PASSWORD} 15 | 16 | ###################################### 17 | # kraft broker 18 | ###################################### 19 | efak.kafka.cluster.alias=${EFAK_KAFKA_CLUSTER_ALIAS} 20 | 21 | ###################################### 22 | # broker size online list 23 | ###################################### 24 | cluster.efak.broker.size=${EFAK_CLUSTER_KAFKA_EAGLE_BROKER_SIZE} 25 | 26 | ###################################### 27 | # zk client thread limit 28 | # Zookeeper cluster allows the number of clients to connect to 29 | ###################################### 30 | kafka.zk.limit.size=${EFAK_KAFKA_ZK_LIMIT_SIZE} 31 | 32 | ###################################### 33 | # EFAK webui port 34 | ###################################### 35 | efak.webui.port=${EFAK_WEBUI_PORT} 36 | 37 | ###################################### 38 | # kafka jmx acl and ssl authenticate 39 | ###################################### 40 | cluster.efak.jmx.acl=${EFAK_CLUSTER_JMX_ACL} 41 | cluster.efak.jmx.user=${EFAK_CLUSTER_JMX_USER} 42 | cluster.efak.jmx.password=${EFAK_CLUSTER_JMX_PASSWORD} 43 | cluster.efak.jmx.ssl=${EFAK_CLUSTER_JMX_SSL} 44 | cluster.efak.jmx.truststore.location=${EFAK_CLUSTER_JMX_TRUSTSTORE_LOCATION} 45 | cluster.efak.jmx.truststore.password=${EFAK_CLUSTER_JMX_TRUSTSTORE_PASSWORD} 46 | 47 | ###################################### 48 | # kafka offset storage 49 | ###################################### 50 | cluster.efak.offset.storage=${EFAK_CLUSTER_KAFKA_EAGLE_OFFSET_STORAGE} 51 | 52 | # If offset is out of range occurs, enable this property -- Only suitable for kafka sql 53 | efak.sql.fix.error=${EFAK_SQL_FIX_ERROR} 54 | 55 | ###################################### 56 | # kafka jmx uri 57 | ###################################### 58 | cluster.efak.jmx.uri=${EFAK_CLUSTER_JMX_URI} 59 | 60 | ###################################### 61 | # kafka metrics, 15 days by default 62 | ###################################### 63 | 64 | # Whether the Kafka performance monitoring diagram is enabled 65 | efak.metrics.charts=${EFAK_METRICS_CHARTS} 66 | 67 | # Kafka Eagle keeps data for 30 days by default 68 | efak.metrics.retain=${EFAK_METRICS_RETAIN} 69 | 70 | ###################################### 71 | # kafka sql topic records max 72 | ###################################### 73 | efak.sql.topic.records.max=${EFAK_SQL_TOPIC_RECORDS_MAX} 74 | efak.sql.topic.preview.records.max=${EFAK_SQL_TOPIC_PREVIEW_RECORDS_MAX} 75 | efak.sql.worknode.port=${EFAK_SQL_WORKNODE_PORT} 76 | efak.sql.distributed.enable=${EFAK_SQL_DISTRIBUTED_MODE_ENABLE} 77 | efak.sql.worknode.rpc.timeout=${EFAK_SQL_WORKNODE_RPC_TIMEOUT} 78 | efak.sql.worknode.fetch.threshold=${EFAK_SQL_WORKNODE_FETCH_THRESHOLD} 79 | efak.sql.worknode.fetch.timeout=${EFAK_SQL_WORKNODE_FETCH_TIMEOUT} 80 | efak.sql.worknode.server.path=${EFAK_SQL_WORKNODE_SERVER_PATH} 81 | 82 | ###################################### 83 | # delete kafka topic token 84 | # Set to delete the topic token, so that administrators can have the right to delete 85 | ###################################### 86 | efak.topic.token=${EFAK_TOPIC_TOKEN} 87 | 88 | ###################################### 89 | # kafka sasl authenticate 90 | ###################################### 91 | cluster.efak.sasl.enable=${EFAK_CLUSTER_KAFKA_EAGLE_SASL_ENABLE} 92 | cluster.efak.sasl.protocol=${EFAK_CLUSTER_KAFKA_EAGLE_SASL_PROTOCOL} 93 | cluster.efak.sasl.mechanism=${EFAK_CLUSTER_KAFKA_EAGLE_SASL_MECHANISM} 94 | cluster.efak.sasl.jaas.config=${EFAK_CLUSTER_KAFKA_EAGLE_SASL_JAAS_CONFIG} 95 | # If not set, the value can be empty 96 | cluster.efak.sasl.client.id=${EFAK_CLUSTER_KAFKA_EAGLE_SASL_CLIENT_ID} 97 | # Add kafka cluster cgroups 98 | cluster.efak.sasl.cgroup.enable=${EFAK_CLUSTER_KAFKA_EAGLE_SASL_CGROUP_ENABLE} 99 | cluster.efak.sasl.cgroup.topics=${EFAK_CLUSTER_KAFKA_EAGLE_SASL_CGROUP_TOPICS} 100 | 101 | ###################################### 102 | # kafka jdbc driver address 103 | # Default use sqlite to store data 104 | ###################################### 105 | efak.driver=${EFAK_DB_DRIVER} 106 | # It is important to note that the '/hadoop/kafka-eagle/db' path must exist. 107 | efak.url=${EFAK_DB_URL} 108 | efak.username=${EFAK_DB_USERNAME} 109 | efak.password=${EFAK_DB_PASSWORD} 110 | --------------------------------------------------------------------------------