├── .env ├── .gitignore ├── KerberosCheatsheet.md ├── README.md ├── TlsCheatsheet.md ├── acls ├── docker-compose.yaml ├── kafka │ ├── Dockerfile │ ├── admin.conf │ ├── consumer.conf │ ├── kafka.conf │ ├── kafka.sasl.jaas.conf │ ├── kafkacat.conf │ ├── log4j.properties.template │ └── producer.conf ├── up └── zookeeper.sasl.jaas.conf ├── apache-kafka-with-zk3.5-and-tls ├── .gitignore ├── README.md ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ └── server.properties ├── up └── zookeeper │ ├── Dockerfile │ ├── tlsZkCli.sh │ └── zoo.cfg ├── auditlog ├── README.md ├── config │ └── delete-records.json ├── data │ └── my_msgs.txt ├── docker-compose.yml ├── example-config.json ├── kafka │ ├── consumer-user.properties │ ├── kafka-user.properties │ ├── kafka.properties │ ├── kafka.sasl.jaas.config │ ├── log4j.properties │ ├── producer-user.properties │ └── tools-log4j.properties ├── scripts │ ├── create-topics.sh │ ├── delete-records.sh │ ├── describe-topics.sh │ ├── explore-audit-topic.sh │ └── write-msg.sh ├── up └── zookeeper │ ├── log4j.properties │ ├── tools-log4j.properties │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── ca-builder-scripts ├── .gitignore ├── README.md ├── build-a-batch-of-certs.sh ├── build-a-batch-of-stores.sh ├── configs │ ├── batch-of-certs.txt │ ├── batch-of-stores.txt │ ├── ca-config-vars │ ├── ca.config │ └── intermediate-ca.config ├── create-crl.sh ├── create-pair-certs.sh ├── del-cert.sh ├── revoke-cert.sh ├── setup-ca-with-intermediate-ca.sh ├── support-scripts │ ├── build-ca.sh │ └── create-cert.sh └── utils │ ├── build-ca.sh │ ├── build-intermediate-ca.sh │ └── functions.sh ├── delegation_tokens ├── .gitignore ├── ca.cnf ├── client.cnf ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ ├── create_client_properties.sh │ ├── kafka_server_jaas.conf │ └── server.properties ├── server.cnf ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── kafka-connect-mtls ├── .gitignore ├── README.md ├── check-ssl-client-auth.sh ├── connect │ ├── config │ │ ├── ca.cnf │ │ └── client.cnf │ └── secrets │ │ ├── ca-chain.cert.pem │ │ ├── connect.cert.pem │ │ ├── connect.key.pem │ │ ├── server.keystore │ │ └── server.truststore ├── docker-compose.yml └── up ├── kerberos-multi-node ├── README.md ├── docker-compose.yml ├── down ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ ├── kafka.sasl.jaas.config │ └── server.properties ├── kafka1 │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ ├── kafka.sasl.jaas.config │ └── server.properties ├── kdc │ ├── Dockerfile │ └── krb5.conf ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── kerberos-multi-sasl ├── README.md ├── client │ ├── Dockerfile │ ├── client.sasl.jaas.config │ ├── command.properties │ ├── confluent.repo │ ├── consumer.properties │ ├── producer.properties │ └── scram.properties ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── kafka.sasl.jaas.config │ └── server.properties ├── kdc │ ├── Dockerfile │ └── krb5.conf ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── kerberos ├── README.md ├── client │ ├── Dockerfile │ ├── client.sasl.jaas.config │ ├── command.properties │ ├── confluent.repo │ ├── consumer.properties │ └── producer.properties ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── kafka.sasl.jaas.config │ └── server.properties ├── kdc │ ├── Dockerfile │ └── krb5.conf ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── ldap-auth ├── docker-compose.yaml ├── kafka │ ├── Dockerfile │ ├── alice.properties │ ├── barnie.properties │ ├── charlie.properties │ ├── confluent.repo │ ├── kafka.jaas.config │ ├── kafka.properties │ ├── log4j.properties │ └── server.properties ├── ldap │ └── custom │ │ ├── 01_base.ldif │ │ ├── 02_KafkaDevelopers.ldif │ │ ├── 03_ProjectA.ldif │ │ ├── 04_ProjectB.ldif │ │ ├── 10_alice.ldif │ │ ├── 11_barnie.ldif │ │ ├── 12_charlie.ldif │ │ ├── 13_donald.ldif │ │ ├── 14_eva.ldif │ │ ├── 15_fritz.ldif │ │ ├── 16_greta.ldif │ │ ├── 17_kafka.ldif │ │ └── 20_group_add.ldif ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── ldap ├── acls │ └── acls.csv ├── add-user ├── custom │ ├── 01_base.ldif │ ├── 02_KafkaDevelopers.ldif │ ├── 10_alice.ldif │ ├── 11_barnie.ldif │ ├── 12_charlie.ldif │ └── 20_group_add.ldif ├── docker-compose-with-ssl.yaml ├── docker-compose.yaml ├── kafka │ ├── Dockerfile │ ├── alice.properties │ ├── barnie.properties │ ├── charlie.properties │ ├── confluent.repo │ ├── consumer.properties │ ├── jks │ │ └── .gitignore │ ├── kafka.jaas.config │ ├── kafka.properties │ ├── log4j.properties │ ├── server-with-ssl.properties │ ├── server.properties │ └── users │ │ └── purbon.properties ├── ldap │ ├── certs │ │ └── .gitignore │ └── custom │ │ ├── 01_base.ldif │ │ ├── 02_KafkaDevelopers.ldif │ │ ├── 10_alice.ldif │ │ ├── 11_barnie.ldif │ │ ├── 12_charlie.ldif │ │ └── 20_group_add.ldif ├── scripts │ ├── .gitignore │ └── certs-create.sh ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── multi-sasl ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.plain.properties │ ├── consumer.properties │ ├── kafka.sasl.jaas.config │ └── server.properties ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── none ├── .env ├── docker-compose.yml └── up ├── oauth ├── .gitignore ├── ca.cnf ├── docker-compose.yml ├── generate_certs.sh ├── kafka │ ├── Dockerfile │ ├── client.properties │ ├── confluent.repo │ ├── kafka_server_jaas.conf │ ├── oauthcallbackhandlers │ │ ├── .gitignore │ │ ├── pom.xml │ │ └── src │ │ │ ├── main │ │ │ └── java │ │ │ │ └── io │ │ │ │ └── confluent │ │ │ │ └── examples │ │ │ │ └── authentication │ │ │ │ └── oauth │ │ │ │ ├── JwtHelper.java │ │ │ │ ├── MyOauthBearerToken.java │ │ │ │ ├── OauthBearerLoginCallbackHandler.java │ │ │ │ └── OauthBearerValidatorCallbackHandler.java │ │ │ └── test │ │ │ └── java │ │ │ └── io │ │ │ └── confluent │ │ │ └── examples │ │ │ └── authentication │ │ │ └── oauth │ │ │ ├── JwtHelperTest.java │ │ │ └── ProduceDataTest.java │ ├── server.properties │ └── test_produce_and_consume.sh ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ └── zookeeper.properties ├── plain ├── .env ├── consumer.properties ├── docker-compose.yml ├── producer.properties └── up ├── quotas ├── Client │ ├── Dockerfile │ └── confluent.repo ├── Grafana │ └── provisioning │ │ ├── dashboards │ │ ├── grafana-dashboard.json │ │ └── one-quota.yaml │ │ └── datasources │ │ └── prometheus.yaml ├── JMX_Exporter │ ├── jmx_prometheus_javaagent-0.11.0.jar │ ├── kafka_config.yml │ └── zookeeper_config.yml ├── Prometheus │ └── prometheus.yml ├── docker-compose.yml ├── secrets │ ├── admin.properties │ ├── kafka_server_jaas.conf │ ├── noquota.properties │ └── quota.properties └── up ├── rbac ├── .env ├── README.md ├── client-configs │ ├── alice.properties │ ├── barnie.properties │ ├── charlie.properties │ ├── copy-props.sh │ ├── donald.properties │ ├── eva.properties │ ├── fritz.properties │ └── greta.properties ├── conf │ ├── keypair.pem │ └── public.pem ├── create-role-bindings.sh ├── docker-compose.yml ├── functions.sh ├── kafka-registered.sh ├── ldap │ └── custom │ │ ├── 01_base.ldif │ │ ├── 02_KafkaDevelopers.ldif │ │ ├── 03_ProjectA.ldif │ │ ├── 04_ProjectB.ldif │ │ ├── 10_alice.ldif │ │ ├── 11_barnie.ldif │ │ ├── 12_charlie.ldif │ │ ├── 13_donald.ldif │ │ ├── 14_eva.ldif │ │ ├── 15_fritz.ldif │ │ ├── 16_greta.ldif │ │ └── 20_group_add.ldif └── up ├── schema-registry ├── with-basic-auth-and-ccloud │ ├── README.md │ ├── docker-compose.yml │ ├── jaas_config.file │ ├── password-file │ └── up ├── with-basic-auth │ ├── .env │ ├── docker-compose.yml │ ├── jaas_config.file │ ├── password-file │ └── up └── with-http_and_https │ ├── .env │ ├── .gitignore │ ├── README.md │ ├── docker-compose.yml │ ├── schema-registry │ ├── config │ │ ├── ca.cnf │ │ └── client.cnf │ └── secrets │ │ ├── ca-chain.cert.pem │ │ ├── schema-registry.cert.pem │ │ ├── schema-registry.key.pem │ │ ├── schema-registry.keystore │ │ └── schema-registry.truststore │ ├── up │ └── verify.sh ├── scram ├── .env ├── admin.properties ├── consumer.properties ├── docker-compose.yml ├── jline-2.14.6.jar ├── kafka.sasl.jaas.config ├── producer.properties ├── up └── zookeeper.sasl.jaas.config ├── secure-jmx ├── README.md ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ └── server.properties ├── pull-jmx-kafka.sh ├── pull-jmx-zookeeper.sh ├── secrets │ ├── client.keystore │ ├── client.truststore │ ├── jmxremote.access │ ├── jmxremote.password │ ├── jmxremote.properties │ ├── kafka.keystore │ └── kafka.truststore ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── jmxremote.access │ ├── jmxremote.password │ ├── jmxremote.properties │ └── zookeeper.properties ├── tls-with-ocrl ├── .gitignore ├── README.md ├── certs │ ├── broker.keystore │ ├── broker.truststore │ ├── client.keystore │ └── client.truststore ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ └── server.properties ├── up ├── web │ └── crls.pem └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ └── zookeeper.properties └── tls ├── .gitignore ├── ca.cnf ├── client.cnf ├── docker-compose.yml ├── kafka ├── Dockerfile ├── confluent.repo ├── consumer.properties ├── kafkacat ├── kafkacat.conf └── server.properties ├── kafkacat.conf ├── local-client.cnf ├── schema-registry-client.cnf ├── schema-registry ├── Dockerfile ├── confluent.repo └── schema-registry.properties ├── server.cnf ├── up └── zookeeper ├── Dockerfile ├── confluent.repo └── zookeeper.properties /.env: -------------------------------------------------------------------------------- 1 | # Values in this file will be used to replace env variables in Docker Compose files used throughout the repo. 2 | 3 | # You can override any of these values in one of several ways: 4 | # 1. Create another '.env' file for the docker-compose.yml file 5 | # 2. Edit the docker-compose.yml file directly and replace the env variable with a value 6 | # 3. Set a corresponding environment variable in the shell 7 | 8 | # REPOSITORY - repository for Docker image 9 | # The '/' which separates the REPOSITORY from the image name is not required here 10 | # Examples: 11 | # - REPOSITORY=confluentinc will use images from the confluentinc repository at https://hub.docker.com/u/confluentinc 12 | # - REPOSITORY=.dkr.ecr.us-west-2.amazonaws.com/confluentinc will use images from the confluentinc repository at the specified ECR registry (images must be pulled separately) 13 | REPOSITORY=confluentinc 14 | 15 | # TAG - image tag 16 | # The ':' which separates the image name from the TAG is not required here 17 | # Examples: 18 | # - TAG=5.4.0 will use the image tag 5.4.0 19 | # - TAG=5.4.x-latest will use the image tag 5.4.x-latest 20 | # TAG=5.5.0 21 | TAG=6.1.0 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .envrc 2 | .java-version 3 | kerberos-multi-node/TODO 4 | .idea 5 | -------------------------------------------------------------------------------- /acls/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM confluentinc/cp-enterprise-kafka:5.4.0 2 | 3 | MAINTAINER sven@confluent.io 4 | 5 | # Make sure the log directory is world-writable 6 | RUN echo "===> Creating authorizer logs dir ..." \ 7 | && mkdir -p /var/log/kafka-auth-logs \ 8 | && chmod -R ag+w /var/log/kafka-auth-logs 9 | 10 | COPY log4j.properties.template /etc/confluent/docker/log4j.properties.template 11 | 12 | COPY *.conf /tmp/ 13 | 14 | -------------------------------------------------------------------------------- /acls/kafka/admin.conf: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="admin" \ 5 | password="admin-pass"; 6 | -------------------------------------------------------------------------------- /acls/kafka/consumer.conf: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="consumer" \ 5 | password="consumer-pass"; 6 | -------------------------------------------------------------------------------- /acls/kafka/kafka.conf: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka-pass"; 6 | -------------------------------------------------------------------------------- /acls/kafka/kafka.sasl.jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.scram.ScramLoginModule required 3 | username="kafka" 4 | password="kafka-pass"; 5 | }; 6 | KafkaClient { 7 | org.apache.kafka.common.security.scram.ScramLoginModule required 8 | username="kafka" 9 | password="kafka-pass"; 10 | }; 11 | Client { 12 | org.apache.zookeeper.server.auth.DigestLoginModule required 13 | username="admin" 14 | password="password"; 15 | }; 16 | 17 | -------------------------------------------------------------------------------- /acls/kafka/kafkacat.conf: -------------------------------------------------------------------------------- 1 | security.protocol=SASL_PLAINTEXT 2 | sasl.mechanisms=SCRAM-SHA-256 3 | sasl.username=kafka 4 | sasl.password=kafka-pass 5 | -------------------------------------------------------------------------------- /acls/kafka/log4j.properties.template: -------------------------------------------------------------------------------- 1 | log4j.rootLogger={{ env["KAFKA_LOG4J_ROOT_LOGLEVEL"] | default('INFO') }}, stdout 2 | 3 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 4 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 5 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 6 | 7 | log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender 8 | log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH 9 | log4j.appender.authorizerAppender.File=/var/log/kafka-auth-logs/kafka-authorizer.log 10 | log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout 11 | log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n 12 | 13 | log4j.additivity.kafka.authorizer.logger=false 14 | 15 | {% set loggers = { 16 | 'kafka': 'INFO', 17 | 'kafka.network.RequestChannel$': 'WARN', 18 | 'kafka.producer.async.DefaultEventHandler': 'DEBUG', 19 | 'kafka.request.logger': 'WARN', 20 | 'kafka.controller': 'TRACE', 21 | 'kafka.log.LogCleaner': 'INFO', 22 | 'state.change.logger': 'TRACE', 23 | 'kafka.authorizer.logger': 'DEBUG, authorizerAppender' 24 | } -%} 25 | 26 | 27 | {% if env['KAFKA_LOG4J_LOGGERS'] %} 28 | {% set loggers = parse_log4j_loggers(env['KAFKA_LOG4J_LOGGERS'], loggers) %} 29 | {% endif %} 30 | 31 | {% for logger,loglevel in loggers.iteritems() %} 32 | log4j.logger.{{logger}}={{loglevel}} 33 | {% endfor %} 34 | 35 | -------------------------------------------------------------------------------- /acls/kafka/producer.conf: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="producer" \ 5 | password="producer-pass"; 6 | -------------------------------------------------------------------------------- /acls/zookeeper.sasl.jaas.conf: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_admin="password"; 4 | }; 5 | Client { 6 | org.apache.zookeeper.server.auth.DigestLoginModule required 7 | username="admin" 8 | password="password"; 9 | }; 10 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | certs/ 3 | certs-old/ 4 | tmp-dir 5 | images/ 6 | zookeeper.properties 7 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | build: zookeeper/ 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | restart: on-failure 8 | environment: 9 | - SERVER_JVMFLAGS=-Dzookeeper.serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory 10 | volumes: 11 | - ./certs/zk-stores:/var/lib/secret 12 | 13 | kafka: 14 | build: kafka/ 15 | container_name: kafka 16 | hostname: kafka 17 | depends_on: 18 | - zookeeper 19 | restart: on-failure 20 | volumes: 21 | - ./certs/kafka-stores:/var/lib/secret 22 | environment: 23 | - KAFKA_OPTS=-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=/var/lib/secret/kafka.jks -Dzookeeper.ssl.keyStore.password=confluent -Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks -Dzookeeper.ssl.trustStore.password=confluent 24 | ports: 25 | - 29092:29092 26 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM purbon/kafka 2 | MAINTAINER pere.urbon@gmail.com 3 | ENV container docker 4 | 5 | # 1. Install openjdk 6 | RUN yum install -y java-11-openjdk 7 | 8 | # 2. Configure Kafka 9 | COPY server.properties /etc/kafka/server.properties 10 | 11 | EXPOSE 9092 12 | 13 | CMD kafka-server-start.sh /etc/kafka/server.properties 14 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM purbon/zookeeper:3.5.5 2 | MAINTAINER pere.urbon@gmail.com 3 | ENV container docker 4 | 5 | # 2. Install zookeeper and kafka 6 | RUN yum install -y java-11-openjdk 7 | 8 | 9 | # 3. Configure zookeeper 10 | COPY zoo.cfg "${ZK_HOME}/conf/zoo.cfg" 11 | 12 | # 4. Add extra utility scripts 13 | 14 | ENV PATH="/opt/tlsZkCli.sh:${PATH}" 15 | COPY tlsZkCli.sh /opt/tlsZkCli.sh 16 | 17 | EXPOSE 2182 18 | 19 | CMD zkServer.sh start-foreground 20 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/zookeeper/tlsZkCli.sh: -------------------------------------------------------------------------------- 1 | ##!/usr/bin/env bash 2 | 3 | export CLIENT_JVMFLAGS="-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true 4 | -Dzookeeper.ssl.keyStore.location=/var/lib/secret/zookeeper.jks 5 | -Dzookeeper.ssl.keyStore.password=confluent 6 | -Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks 7 | -Dzookeeper.ssl.trustStore.password=confluent" 8 | 9 | zkCli.sh -server $1 10 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/zookeeper/zoo.cfg: -------------------------------------------------------------------------------- 1 | # The number of milliseconds of each tick 2 | tickTime=2000 3 | # The number of ticks that the initial 4 | # synchronization phase can take 5 | initLimit=10 6 | # The number of ticks that can pass between 7 | # sending a request and getting an acknowledgement 8 | syncLimit=5 9 | # the directory where the snapshot is stored. 10 | # do not use /tmp for storage, /tmp here is just 11 | # example sakes. 12 | dataDir=/tmp/zookeeper 13 | # the port at which the clients will connect 14 | #clientPort=2181 15 | secureClientPort=2182 16 | # the maximum number of client connections. 17 | # increase this if you need to handle more clients 18 | #maxClientCnxns=60 19 | # 20 | # Be sure to read the maintenance section of the 21 | # administrator guide before turning on autopurge. 22 | # 23 | # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance 24 | # 25 | # The number of snapshots to retain in dataDir 26 | #autopurge.snapRetainCount=3 27 | # Purge task interval in hours 28 | # Set to "0" to disable auto purge feature 29 | #autopurge.purgeInterval=1 30 | 31 | authProvider.1=org.apache.zookeeper.server.auth.X509AuthenticationProvider 32 | ssl.trustStore.location=/var/lib/secret/truststore.jks 33 | ssl.trustStore.password=confluent 34 | ssl.keyStore.location=/var/lib/secret/zookeeper.jks 35 | ssl.keyStore.password=confluent 36 | # This option is commented out only as an example of what is possible for the 37 | # SSL authentication. In a production environment this should be set as 38 | # here, with ssl.clientAuth=need 39 | #ssl.clientAuth=need 40 | -------------------------------------------------------------------------------- /auditlog/config/delete-records.json: -------------------------------------------------------------------------------- 1 | { 2 | "partitions": [ 3 | { 4 | "topic": "bar", 5 | "partition": 0, 6 | "offset": 3 7 | } 8 | ], 9 | "version": 1 10 | } 11 | -------------------------------------------------------------------------------- /auditlog/data/my_msgs.txt: -------------------------------------------------------------------------------- 1 | This is a message 2 | This is another message 3 | Abracadabra 4 | -------------------------------------------------------------------------------- /auditlog/example-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "routes": { 3 | "crn:///kafka=*/group=*": { 4 | "consume": { 5 | "allowed": "confluent-audit-log-events", 6 | "denied": "confluent-audit-log-events" 7 | } 8 | }, 9 | "crn:///kafka=*/topic=*": { 10 | "produce": { 11 | "allowed": "confluent-audit-log-events", 12 | "denied": "confluent-audit-log-events" 13 | }, 14 | "consume": { 15 | "allowed": "confluent-audit-log-events", 16 | "denied": "confluent-audit-log-events" 17 | } 18 | } 19 | }, 20 | "destinations": { 21 | "topics": { 22 | "confluent-audit-log-events": { 23 | "retention_ms": 7776000000 24 | } 25 | } 26 | }, 27 | "default_topics": { 28 | "allowed": "confluent-audit-log-events", 29 | "denied": "confluent-audit-log-events" 30 | }, 31 | "excluded_principals": ["User:kafka", "User:ANONYMOUS"] 32 | } 33 | -------------------------------------------------------------------------------- /auditlog/kafka/consumer-user.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="consumer" \ 5 | password="consumerpass"; 6 | 7 | -------------------------------------------------------------------------------- /auditlog/kafka/kafka-user.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | 7 | -------------------------------------------------------------------------------- /auditlog/kafka/kafka.properties: -------------------------------------------------------------------------------- 1 | broker.id=1 2 | advertised.listeners=SASL_PLAINTEXT://kafka:9092 3 | offsets.topic.replication.factor=1 4 | allow.everyone.if.no.acl.found=false 5 | zookeeper.connect=zookeeper:2181 6 | security.inter.broker.protocol=SASL_PLAINTEXT 7 | authorizer.class.name=io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer 8 | log.dirs=/var/lib/kafka/data 9 | confluent.security.event.router.config={"routes":{"crn:///kafka=*/group=*":{"consume":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"}},"crn:///kafka=*/topic=*":{"produce":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"},"consume":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"}}},"destinations":{"topics":{"confluent-audit-log-events":{"retention_ms":7776000000}}},"default_topics":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"},"excluded_principals":["User:kafka","User:ANONYMOUS"]} 10 | listeners=SASL_PLAINTEXT://0.0.0.0:9092 11 | zookeeper.set.acl=true 12 | super.users=User:kafka 13 | offsets.topic.num.partitions=1 14 | sasl.enabled.mechanisms=SCRAM-SHA-256 15 | transaction.state.log.replication.factor=1 16 | sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256 17 | confluent.license.topic.replication.factor=1 18 | -%} 19 | 20 | -------------------------------------------------------------------------------- /auditlog/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.scram.ScramLoginModule required 3 | username="kafka" 4 | password="kafka"; 5 | }; 6 | Client { 7 | org.apache.zookeeper.server.auth.DigestLoginModule required 8 | username="admin" 9 | password="password"; 10 | }; 11 | -------------------------------------------------------------------------------- /auditlog/kafka/log4j.properties: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger=INFO, stdout 3 | 4 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | 8 | 9 | log4j.logger.kafka.authorizer.logger=WARN 10 | log4j.logger.kafka.log.LogCleaner=INFO 11 | log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG 12 | log4j.logger.kafka.controller=TRACE 13 | log4j.logger.kafka.network.RequestChannel$=WARN 14 | log4j.logger.kafka.request.logger=WARN 15 | log4j.logger.state.change.logger=TRACE 16 | log4j.logger.kafka=INFO 17 | -------------------------------------------------------------------------------- /auditlog/kafka/producer-user.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="producer" \ 5 | password="producerpass"; 6 | 7 | -------------------------------------------------------------------------------- /auditlog/kafka/tools-log4j.properties: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger=WARN, stderr 3 | 4 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | log4j.appender.stderr.Target=System.err -------------------------------------------------------------------------------- /auditlog/scripts/create-topics.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Create topic foo with User:kafka" 4 | echo "NOTE: this topic creation will be ignored because uses a user inside the ignore list." 5 | echo 6 | docker exec kafka kafka-topics --bootstrap-server kafka:9092 \ 7 | --command-config /etc/kafka/kafka-user.properties \ 8 | --create --topic foo --replication-factor 1 --partitions 1 9 | sleep 1 10 | echo "Create topic bar with User:producer" 11 | echo "NOTE: This action will be noted in the audit log." 12 | echo 13 | docker exec kafka kafka-topics --bootstrap-server kafka:9092 \ 14 | --command-config /etc/kafka/producer-user.properties \ 15 | --create --topic bar --replication-factor 1 --partitions 1 16 | 17 | ## Add extra ACLs need to handle the topic bar 18 | docker exec kafka kafka-acls --bootstrap-server kafka:9092 \ 19 | --command-config /etc/kafka/kafka-user.properties \ 20 | --add --allow-principal User:producer --operation AlterConfigs \ 21 | --topic "bar" 22 | 23 | docker exec kafka kafka-acls --bootstrap-server kafka:9092 \ 24 | --command-config /etc/kafka/kafka-user.properties \ 25 | --add --allow-principal User:producer --operation Delete \ 26 | --topic "bar" 27 | 28 | sleep 1 29 | 30 | echo "Change of a configuration" 31 | echo "NOTE: This action will be noted in the audit log." 32 | echo 33 | docker exec kafka kafka-configs --bootstrap-server kafka:9092 \ 34 | --topic bar --add-config retention.ms=2592000001 \ 35 | --alter --command-config /etc/kafka/producer-user.properties 36 | -------------------------------------------------------------------------------- /auditlog/scripts/delete-records.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker exec kafka kafka-delete-records --bootstrap-server kafka:9092 \ 4 | --command-config /etc/kafka/producer-user.properties \ 5 | --offset-json-file /tmp/config/delete-records.json 6 | -------------------------------------------------------------------------------- /auditlog/scripts/describe-topics.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker exec kafka kafka-topics --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --describe 4 | -------------------------------------------------------------------------------- /auditlog/scripts/explore-audit-topic.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | TOPIC="confluent-audit-log-events" 4 | docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 \ 5 | --consumer.config /etc/kafka/kafka-user.properties \ 6 | --topic $TOPIC --from-beginning 7 | -------------------------------------------------------------------------------- /auditlog/scripts/write-msg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PWD=`pwd` 4 | topic=$1 5 | network="auditlog_default" 6 | 7 | USERNAME=producer 8 | PASSWORD=producerpass 9 | 10 | echo "Write messages to topic $1" 11 | 12 | docker run --network $network \ 13 | --volume $PWD/data/my_msgs.txt:/data/my_msgs.txt \ 14 | confluentinc/cp-kafkacat \ 15 | kafkacat -b kafka:9092 \ 16 | -t $topic \ 17 | -X security.protocol=SASL_PLAINTEXT -X sasl.mechanisms=SCRAM-SHA-256 -X sasl.username=$USERNAME -X sasl.password=$PASSWORD \ 18 | -P -l /data/my_msgs.txt 19 | -------------------------------------------------------------------------------- /auditlog/up: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker-compose up -d 4 | 5 | docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka 6 | docker-compose exec zookeeper kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=consumerpass],SCRAM-SHA-512=[password=consumerpass]' --entity-type users --entity-name consumer 7 | docker-compose exec zookeeper kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=producerpass],SCRAM-SHA-512=[password=producerpass]' --entity-type users --entity-name producer 8 | 9 | # ACLs 10 | docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:producer --producer --topic=* 11 | docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:consumer --consumer --topic=* --group=* 12 | docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:confluent-audit --producer --topic confluent-audit-log-events --resource-pattern-type prefixed 13 | 14 | echo "Example configuration:" 15 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --producer.config /etc/kafka/producer-user.properties --topic test" 16 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --consumer.config /etc/kafka/consumer-user.properties --topic test --from-beginning" 17 | -------------------------------------------------------------------------------- /auditlog/zookeeper/log4j.properties: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger=INFO, stdout 3 | 4 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | 8 | -------------------------------------------------------------------------------- /auditlog/zookeeper/tools-log4j.properties: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger=WARN, stderr 3 | 4 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | log4j.appender.stderr.Target=System.err -------------------------------------------------------------------------------- /auditlog/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | 2 | dataDir=/var/lib/zookeeper/data 3 | dataLogDir=/var/lib/zookeeper/log 4 | 5 | clientPort=2181 6 | 7 | 8 | -------------------------------------------------------------------------------- /auditlog/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_admin="password"; 4 | }; 5 | Client { 6 | org.apache.zookeeper.server.auth.DigestLoginModule required 7 | username="admin" 8 | password="password"; 9 | }; 10 | -------------------------------------------------------------------------------- /ca-builder-scripts/.gitignore: -------------------------------------------------------------------------------- 1 | tmp-certs/ 2 | stores 3 | legacy/ 4 | 5 | ## remove from git the generated CA files 6 | 7 | ca/ 8 | -------------------------------------------------------------------------------- /ca-builder-scripts/build-a-batch-of-certs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | input=$1 4 | while IFS= read -r line 5 | do 6 | fields=($(echo $line | tr "," "\n")) 7 | #./support-scripts/create-cert.sh ${fields[0]} ${fields[1]} 8 | echo "./support-scripts/create-cert.sh ${fields[0]} ${fields[1]}" 9 | done < "$input" 10 | -------------------------------------------------------------------------------- /ca-builder-scripts/configs/batch-of-certs.txt: -------------------------------------------------------------------------------- 1 | consumer,machine0.example.com 2 | producer,machine1.example.com 3 | kafka,machine2.example.com 4 | zookeeper,machine3.example.com 5 | -------------------------------------------------------------------------------- /ca-builder-scripts/configs/batch-of-stores.txt: -------------------------------------------------------------------------------- 1 | consumer,machine0.example.com 2 | producer,machine1.example.com 3 | kafka,machine2.example.com 4 | zookeeper,machine3.example.com 5 | -------------------------------------------------------------------------------- /ca-builder-scripts/configs/ca-config-vars: -------------------------------------------------------------------------------- 1 | DE 2 | Berlin 3 | Berlin 4 | Confluent Germany 5 | -------------------------------------------------------------------------------- /ca-builder-scripts/create-crl.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | DEFAULT_PASSWORD=${1:-confluent} 5 | 6 | if [ -z "${CA_ROOT_DIR+x}" ]; 7 | then 8 | CA_ROOT_DIR='.' 9 | fi 10 | 11 | source $CA_ROOT_DIR/utils/functions.sh 12 | 13 | (cd $CA_ROOT_DIR/ca; create_certificate_revokation_list ) 14 | -------------------------------------------------------------------------------- /ca-builder-scripts/create-pair-certs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #HOSTNAME="www.example.com" 4 | #EXTENSION="server_cert" #usr_cert for client auth, server_cert for for backend 5 | 6 | #HOSTNAME="my.kafka.consumer" 7 | #EXTENSION="usr_cert" 8 | set -e 9 | 10 | HOSTNAME=$1 11 | MACHINE=${2:-""} 12 | EXTENSION=${3:-server_cert} 13 | DEFAULT_PASSWORD=${4:-confluent} 14 | 15 | echo "Building a part of certificates for $HOSTNAME using $EXTENSION" 16 | 17 | if [ -z "${CA_ROOT_DIR+x}" ]; 18 | then 19 | CA_ROOT_DIR='.' 20 | fi 21 | 22 | ITERMEDIATE_CA_DIR=$CA_ROOT_DIR/ca/intermediate 23 | 24 | CERT_FILE="$ITERMEDIATE_CA_DIR/certs/$HOSTNAME.cert.pem" 25 | 26 | if test -f "$CERT_FILE"; then 27 | RED='\033[0;31m' 28 | NC='\033[0m' # No Color 29 | printf "${RED}Cert $CERT_FILE exist! exiting...${NC}" 30 | exit 1 31 | fi 32 | 33 | source $CA_ROOT_DIR/utils/functions.sh 34 | 35 | (cd $CA_ROOT_DIR; refresh_openssl_file "$CA_ROOT_DIR" "$ITERMEDIATE_CA_DIR" ) 36 | (cd $CA_ROOT_DIR/ca; generate_final_certificate "$MACHINE" ) 37 | -------------------------------------------------------------------------------- /ca-builder-scripts/del-cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | NAME=$1 4 | 5 | if [ -z "${CA_ROOT_DIR+x}" ]; 6 | then 7 | CA_ROOT_DIR='.' 8 | fi 9 | 10 | echo "Deleting CERT $NAME" 11 | 12 | rm "$CA_ROOT_DIR/ca/intermediate/private/$NAME.key.pem" 13 | rm "$CA_ROOT_DIR/ca/intermediate/certs/$NAME.cert.pem" 14 | rm "$CA_ROOT_DIR/ca/intermediate/csr/$NAME.csr.pem" 15 | -------------------------------------------------------------------------------- /ca-builder-scripts/revoke-cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CERT=$1 4 | DEFAULT_PASSWORD=${2:-confluent} 5 | 6 | if [ -z "${CA_ROOT_DIR+x}" ]; 7 | then 8 | CA_ROOT_DIR='.' 9 | fi 10 | 11 | source $CA_ROOT_DIR/utils/functions.sh 12 | 13 | (cd $CA_ROOT_DIR/ca; revoke_cert $CERT ) 14 | -------------------------------------------------------------------------------- /ca-builder-scripts/setup-ca-with-intermediate-ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ## 4 | # This script builds a Certificate Authority of the form: 5 | # Root CA -> intermediate CA 6 | # 7 | # In the CA_ROOT_DIR, this script will create the necessary directory strucures 8 | # and generate the certificates, all signed using the value provided as an 9 | # argument to this script, or confluent by default. 10 | ## 11 | 12 | DEFAULT_PASSWORD=${1:-confluent} 13 | export CA_ROOT_DIR=`pwd` 14 | 15 | echo -e "Building the CA root setup\n" 16 | 17 | ./utils/build-ca.sh $DEFAULT_PASSWORD 18 | 19 | echo -e "Building the intemedite CA root setup:\n" 20 | 21 | ./utils/build-intermediate-ca.sh $DEFAULT_PASSWORD 22 | -------------------------------------------------------------------------------- /ca-builder-scripts/support-scripts/build-ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect 2 | 3 | proc slurp {file} { 4 | set fh [open $file r] 5 | set ret [read $fh] 6 | close $fh 7 | return $ret 8 | } 9 | 10 | set timeout 20 11 | set configslurp [slurp configs/ca-config-vars] 12 | 13 | set lines [split $configslurp \n] 14 | set COUNTRY_NAME [lrange $lines 0 0] 15 | set STATE [lrange $lines 1 1] 16 | set LOCALITY [lrange $lines 2 2] 17 | set ORGANIZATION [lrange $lines 3 3] 18 | 19 | eval spawn ./setup-ca-with-intermediate-ca.sh 20 | ## Generating the data for the CA setup. 21 | expect "Country Name (2 letter code)" 22 | send "$COUNTRY_NAME\r"; 23 | expect "State or Province Name" 24 | send "$STATE\r"; 25 | expect "Locality Name" 26 | send "$LOCALITY\r"; 27 | expect "Organization Name" 28 | send "$ORGANIZATION\r"; 29 | expect "Organizational Unit Name" 30 | send "\r"; 31 | expect "Common Name" 32 | send "CA\r"; 33 | expect "Email Address" 34 | send "\r"; 35 | ## Generating the data for the Intermediate setup. 36 | expect "Country Name (2 letter code)" 37 | send "$COUNTRY_NAME\r"; 38 | expect "State or Province Name" 39 | send "$STATE\r"; 40 | expect "Locality Name" 41 | send "$LOCALITY\r"; 42 | expect "Organization Name" 43 | send "$ORGANIZATION\r"; 44 | expect "Organizational Unit Name" 45 | send "\r"; 46 | expect "Common Name" 47 | send "Intermediate-CA\r"; 48 | expect "Email Address" 49 | send "\r"; 50 | # Sign the certificate and commit 51 | expect "Sign the certificate?" 52 | send "y\r"; 53 | expect "1 out of 1 certificate requests certified, commit?" 54 | send "y\r"; 55 | interact 56 | -------------------------------------------------------------------------------- /ca-builder-scripts/support-scripts/create-cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -f 2 | 3 | proc slurp {file} { 4 | set fh [open $file r] 5 | set ret [read $fh] 6 | close $fh 7 | return $ret 8 | } 9 | 10 | proc create_certs {cert_name,machine} { 11 | eval spawn ./create-pair-certs.sh $cert_name $machine 12 | } 13 | 14 | set timeout 20 15 | set configslurp [slurp configs/ca-config-vars] 16 | 17 | set lines [split $configslurp \n] 18 | set COUNTRY_NAME [lrange $lines 0 0] 19 | set STATE [lrange $lines 1 1] 20 | set LOCALITY [lrange $lines 2 2] 21 | set ORGANIZATION [lrange $lines 3 3] 22 | 23 | set cert_name [lindex $argv 0] 24 | set machine [lrange $argv 1 end] 25 | 26 | spawn ./create-pair-certs.sh $cert_name $machine 27 | 28 | ## Generating the data for the CA setup. 29 | expect "Country Name (2 letter code)" 30 | send "$COUNTRY_NAME\r"; 31 | expect "State or Province Name" 32 | send "$STATE\r"; 33 | expect "Locality Name" 34 | send "$LOCALITY\r"; 35 | expect "Organization Name" 36 | send "$ORGANIZATION\r"; 37 | expect "Organizational Unit Name" 38 | send "\r"; 39 | expect "Common Name" 40 | send "$cert_name\r"; 41 | expect "Email Address" 42 | send "\r"; 43 | # Sign the certificate and commit 44 | expect "Sign the certificate?" 45 | send "y\r"; 46 | expect "1 out of 1 certificate requests certified, commit" 47 | send "y\r"; 48 | interact 49 | -------------------------------------------------------------------------------- /ca-builder-scripts/utils/build-ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DEFAULT_PASSWORD=${1:-confluent} 4 | 5 | if [ -z "${CA_ROOT_DIR+x}" ]; 6 | then 7 | CA_ROOT_DIR='.' 8 | fi 9 | 10 | source $CA_ROOT_DIR/utils/functions.sh 11 | 12 | mkdir $CA_ROOT_DIR/ca; 13 | 14 | setup_ca_dir_structure "$CA_ROOT_DIR/ca" 15 | 16 | cp $CA_ROOT_DIR/configs/ca.config $CA_ROOT_DIR/ca/openssl.cnf 17 | 18 | (cd $CA_ROOT_DIR/ca; generate_ca_keys_and_certs ) 19 | 20 | ## Verify the CA certificate 21 | openssl x509 -noout -text -in $CA_ROOT_DIR/ca/certs/ca.cert.pem 22 | -------------------------------------------------------------------------------- /ca-builder-scripts/utils/build-intermediate-ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DEFAULT_PASSWORD=${1:-confluent} 4 | 5 | if [ -z "${CA_ROOT_DIR+x}" ]; 6 | then 7 | CA_ROOT_DIR='.' 8 | fi 9 | ITERMEDIATE_CA_DIR=$CA_ROOT_DIR/ca/intermediate 10 | 11 | source $CA_ROOT_DIR/utils/functions.sh 12 | 13 | mkdir -p $ITERMEDIATE_CA_DIR 14 | 15 | setup_intermediate_ca_dir_structure $ITERMEDIATE_CA_DIR 16 | 17 | cp $CA_ROOT_DIR/configs/intermediate-ca.config $ITERMEDIATE_CA_DIR/openssl.cnf 18 | 19 | (cd $ITERMEDIATE_CA_DIR; generate_intermediate_keys_and_certs) 20 | 21 | (cd $CA_ROOT_DIR/ca; sign_intermediate_cert_authority; verify_generate_intermediate_ca) 22 | (cd $CA_ROOT_DIR/ca; create_ca_chain) 23 | -------------------------------------------------------------------------------- /delegation_tokens/.gitignore: -------------------------------------------------------------------------------- 1 | certs/ 2 | -------------------------------------------------------------------------------- /delegation_tokens/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = UK 18 | organizationName = Confluent 19 | localityName = London 20 | commonName = kafka.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /delegation_tokens/client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=kafka.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=kafka.confluent.local 30 | -------------------------------------------------------------------------------- /delegation_tokens/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | build: zookeeper/ 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | domainname: confluent.local 8 | restart: on-failure 9 | volumes: 10 | - ./certs/:/var/lib/secret 11 | environment: 12 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 13 | networks: 14 | default: 15 | aliases: 16 | - zookeeper.confluent.local 17 | 18 | 19 | kafka: 20 | build: kafka/ 21 | container_name: kafka 22 | hostname: kafka 23 | domainname: confluent.local 24 | depends_on: 25 | - zookeeper 26 | restart: on-failure 27 | volumes: 28 | - ./certs/:/var/lib/secret 29 | environment: 30 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 31 | networks: 32 | default: 33 | aliases: 34 | - kafka.confluent.local 35 | ports: 36 | - "9093:9093" 37 | 38 | volumes: 39 | secret: {} 40 | 41 | networks: 42 | default: 43 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY kafka_server_jaas.conf /etc/kafka/kafka_server_jaas.conf 17 | COPY consumer.properties /etc/kafka/consumer.properties 18 | COPY create_client_properties.sh /etc/kafka/create_client_properties.sh 19 | 20 | EXPOSE 9093 21 | 22 | CMD kafka-server-start /etc/kafka/server.properties 23 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | # Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT 3 | security.protocol=SASL_SSL 4 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 5 | username="kafka" \ 6 | password="kafka"; 7 | ssl.truststore.location=/var/lib/secret/truststore.jks 8 | ssl.truststore.password=test1234 9 | ssl.keystore.location=/var/lib/secret/client.keystore.jks 10 | ssl.keystore.password=test1234 11 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/create_client_properties.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | 6 | RESPONSE=$(kafka-delegation-tokens \ 7 | --bootstrap-server kafka.confluent.local:9093 \ 8 | --create \ 9 | --command-config /etc/kafka/consumer.properties \ 10 | --max-life-time-period -1 | tail -1) 11 | 12 | TOKENID=$(echo $RESPONSE | cut -d " " -f1) 13 | HMAC=$(echo $RESPONSE | cut -d " " -f2) 14 | 15 | echo "Received token id: $TOKENID" 16 | echo "Received message authentication code: $HMAC" 17 | 18 | echo 'sasl.mechanism=SCRAM-SHA-256 19 | # Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT 20 | security.protocol=SASL_SSL 21 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 22 | username="'$TOKENID'" \ 23 | password="'$HMAC'" \ 24 | tokenauth="true"; 25 | ssl.truststore.location=/var/lib/secret/truststore.jks 26 | ssl.truststore.password=test1234 27 | ssl.keystore.location=/var/lib/secret/client.keystore.jks 28 | ssl.keystore.password=test1234' > /tmp/delegation_token_client.properties 29 | 30 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | // Username and password are used by the broker to initiate connections to other brokers 2 | // admin is another user allowed to connect to the broker. 3 | 4 | KafkaServer { 5 | org.apache.kafka.common.security.scram.ScramLoginModule required 6 | username="kafka" 7 | password="kafka" 8 | user_admin="admin"; 9 | }; 10 | 11 | // The client section is used by kafka to connect to zookeeper. 12 | // This must match the zookeeper jaas configuration. 13 | Client { 14 | org.apache.zookeeper.server.auth.DigestLoginModule required 15 | username="kafka" 16 | password="kafka"; 17 | }; 18 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/server.properties: -------------------------------------------------------------------------------- 1 | ############################# Server Basics ############################# 2 | broker.id=0 3 | listeners=SASL_SSL://kafka.confluent.local:9093 4 | advertised.listeners=SASL_SSL://kafka.confluent.local:9093 5 | log.dirs=/var/lib/kafka 6 | offsets.topic.replication.factor=1 7 | transaction.state.log.replication.factor=1 8 | transaction.state.log.min.isr=1 9 | zookeeper.connect=zookeeper.confluent.local:2181 10 | 11 | # TLS Configuration 12 | security.inter.broker.protocol=SASL_SSL 13 | ssl.truststore.location=/var/lib/secret/truststore.jks 14 | ssl.truststore.password=test1234 15 | ssl.keystore.location=/var/lib/secret/server.keystore.jks 16 | ssl.keystore.password=test1234 17 | ssl.client.auth=required 18 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 19 | delegation.token.master.key=foo 20 | sasl.enabled.mechanisms=SCRAM-SHA-256 21 | sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256 22 | super.users=User:kafka 23 | -------------------------------------------------------------------------------- /delegation_tokens/server.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=kafka.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = serverAuth, clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=kafka.confluent.local 30 | -------------------------------------------------------------------------------- /delegation_tokens/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure zookeeper 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 17 | 18 | EXPOSE 2181 19 | 20 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 21 | -------------------------------------------------------------------------------- /delegation_tokens/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /delegation_tokens/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | requireClientAuthScheme=sasl 6 | 7 | -------------------------------------------------------------------------------- /delegation_tokens/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_kafka="kafka"; 4 | }; 5 | -------------------------------------------------------------------------------- /kafka-connect-mtls/.gitignore: -------------------------------------------------------------------------------- 1 | connect/secrets/client-*.pem 2 | connect/secrets/client.p12 3 | -------------------------------------------------------------------------------- /kafka-connect-mtls/README.md: -------------------------------------------------------------------------------- 1 | # Kafka Connect REST api ssl client auth 2 | 3 | One of the common question regarding security on Kafka Connect REST api is how to prevent unwanted access. 4 | This playbook show one of the possible methods currently possible (as of November 2019) using the SSL mTLS feature. 5 | 6 | ## Requirements 7 | 8 | To be able to execute this playbook you require: 9 | 10 | * Docker (19.03 or later) 11 | * Docker compose (1.24.1 or later) 12 | * curl 13 | 14 | ## Bootstrap the playbook 15 | 16 | The playbook bootstrap can be done by executing the ```./up``` script. 17 | 18 | ### Prepared TLS certificates and keystores 19 | 20 | A set of prepared TLS certificates and keystores are available within the _connect/secrets_ directory. 21 | Most relevant ones are: 22 | 23 | * _certificate.p12_: TLS certificate to verify the failure of mTLS (this is a self sign certificate) 24 | * _rest-client.p12_: TLS certificate to verify the positive verification using mTLS (this cert is sign by the same CA as the server identity) 25 | * _server.keystore_ and _server.truststore_: keystores prepared for the Kafka Connect REST server identity. 26 | 27 | All this certs has been created with the ca-builder-scripts. 28 | 29 | ## Verify the connectivity 30 | 31 | To verify the connectivity there is a prepared script ```check-ssl-client-auth.sh```. 32 | This script uses curl to verify a success and a failure authentication using mTLS 33 | -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/config/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = DE 18 | organizationName = Confluent 19 | localityName = Berlin 20 | commonName = connect.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/config/client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=connect.client 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=localhost 30 | -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/secrets/connect.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | Proc-Type: 4,ENCRYPTED 3 | DEK-Info: AES-256-CBC,03578ECB28A28BA14408AF4EB2F82DCB 4 | 5 | hQkZlH8VnKBOMDtLMQT85fvYzoBYCEyjif/1/6QPid1asfxp7LGAiIEVM/sSecRz 6 | axGneC1B9dp1gEY09XISa2ChZwGY+vUfmpOdpyi+aeSLUeF05jsPS16fbvDfo8t4 7 | JnMk8XsvXs1SmXBtMvcFDnqVxWYp/kBNk2CrM24/aRgN14pykSEu0Uy5NhUGbQ/D 8 | vdhFq1BdWOapQQN0Oi7O5vtttOapiLKPPKaewGGV1LhyG2rJEoxj5JzrpIj2N6mB 9 | yjzGaHkNdvFi2BoHb/UIAaKSX5Kn+B1MW4m7eJ3YuogvBnlmr6pZaa9xxYOI+fV8 10 | eM1JG0U1P477dqLJLRCRTtYZACmXbRy5/WV1TewKjw/ij7QHQ68ISQa2748X1gL6 11 | 09jL5Grux/gJSDuJrhuSMyPwYSJNx5585/HLLQseKOFdvqFbHAjfd2/ZptaBpxp4 12 | jbkylbVxvroPZQjRhj1p0v3CkUCWYUg3CSkzNLR5Y21AvqH5ZHCbjerfZvrPp3Wc 13 | nHShzc18wUruHmT8dwdDSb7s5OJRFkEDLNFsCsijtl47yDwaQoazeJ8UXkQ/q+FR 14 | iIfctz0JZrWXbH89nr3i3cjwGOxQmPuMiCypYov0YezuWwyiqQMM2r62nyFTCUQQ 15 | ooqh3OV02suBNn2GXnrXwzdCoCgcL61a2l4+rHu8rsKHKX3VzEk+SP/WsgO65KAg 16 | jCSRV3zVTWTRbXmvFM+tv6ARDKgvhJUYAC4zuP5ZQJHsoLXhsJ/nHjlUVFVub9aT 17 | +BieN64UHih7lfKzK3OZJAuu5hSMY8vA8JuAkBoNNKB+CEwQnakhEQB6u87s9xgO 18 | GBu6med8u0isWI9uSwZ0u2/MaELRmcx6MvjdOFh8TIWU1vdtcf8F8avyP9wxGBMq 19 | PeFxYJ+qCx8tRUHgO6QmE/cZKmQr//mtZgOMjVwVnysmQLh8Shn1WW2FHhqWauAo 20 | FC2PJPw4aribkG8/O/mVx0P5/bcgHL8N7S1DWLUFsMzMYJJ35CNOG94cOFSAWFRr 21 | 2mJLRzJFwxuh1S9S/SwqMhdz96I4OsmKAVp6iKDVusR6qBoAAjkbLCAJqF883FHo 22 | Urfgr5lLx/9wG6E+zGgQOC1bKsFTlSEVQ7vVLizG2etDytOvGM70Gz5ecyDY1zFl 23 | 1D9hGLhoJtFWE8U4CLPI6pQrKXRftnUV2RbvmsgRBuHBm/HeBLbNBsuDNK3WCzvI 24 | YRzBpyXOblcENvj258yVtfqjRAR2b5hWeRjdyCZjxNq2S7f1Qow0nhPxe2Fq3JLu 25 | nGstpUt1gwKNstoMEVwYI8TzFP4kRzx5H3w2EgjzxWoybJXqANW3XHySBMcim6NR 26 | QMnn30bcjMI8vIe1AaL+AKskNBf4aVj+4IzvC6L+1yrzI5l4KfWbcJJk+q/rTXdQ 27 | mwy4DW5LfenlZoh8zQIGdHKAbdrFwI0gk0pX3Bjy69+1QAy1gNPqe5L9IUMmbsZE 28 | hueSRSsPgI7PDT2hv8XeoWuy+Un6/l4E34F2WvtR802kaYwgeRZIcJrFV8+yALvt 29 | awVcFBkjmWFRjGLFG7/f29+n998g31FqynKU9NmPL49aB8UfQBrtLY07f6snYPA2 30 | -----END RSA PRIVATE KEY----- 31 | -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/secrets/server.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/kafka-connect-mtls/connect/secrets/server.keystore -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/secrets/server.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/kafka-connect-mtls/connect/secrets/server.truststore -------------------------------------------------------------------------------- /kafka-connect-mtls/up: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker-compose up -d 4 | 5 | echo "to verify the connection use the check-ssl-client-auth.sh script" 6 | -------------------------------------------------------------------------------- /kerberos-multi-node/README.md: -------------------------------------------------------------------------------- 1 | # Kerberos multi-node deployment example 2 | 3 | This example shows how-to deploy multiple kafka nodes in an example kerberos enabled environment. 4 | 5 | The only thing that's different then your normal environment is that this example uses a different principal for each zookeeper client. 6 | 7 | https://issues.apache.org/jira/browse/KAFKA-7710 Jira contains a more information. 8 | TLDR; we have to set two configs in the zookeeper.properties to make this work 9 | 10 | ``` 11 | kerberos.removeHostFromPrincipal = true 12 | kerberos.removeRealmFromPrincipal = false 13 | ``` 14 | 15 | The first removes the hostname from the principal name. 16 | So that anyone authenticated with the principal 'kafka/*@REALM' is allowed by ZK ACLs. 17 | -------------------------------------------------------------------------------- /kerberos-multi-node/down: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DESTROY=no 3 | if [ ! -f "${PWD}/docker-compose.yml" ]; then 4 | echo "No docker-compose found. Exiting." 5 | exit 2 6 | fi 7 | 8 | usage() 9 | { 10 | echo "Usage: $0 [-h] [-d]" 11 | echo "-d destroy images. They will be rebuilt next time" 12 | exit 2 13 | } 14 | 15 | destroy() 16 | { 17 | docker-compose rm --force 18 | } 19 | 20 | stop_docker-compose() 21 | { 22 | docker-compose stop 23 | } 24 | 25 | # Should use getopts here but, why? 26 | if [[ "${1}" == "-h" ]]; then 27 | usage 28 | exit 2 29 | fi 30 | 31 | if [[ "${1}" == "-d" ]]; then 32 | echo "Stopping and destroying containers" 33 | DESTROY=yes 34 | fi 35 | 36 | stop_docker-compose 37 | if [[ $? != 0 ]]; then 38 | echo "Stopping the docker-compose failed. Exiting for manual cleanup" 39 | echo "I suggest 'docker-compose ps'" 40 | exit 2 41 | fi 42 | 43 | if [[ "${DESTROY}" == "yes" ]]; then 44 | destroy 45 | fi 46 | 47 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | RUN yum install -y confluent-control-center 14 | 15 | # 3. Configure Kafka for Kerberos 16 | RUN yum install -y krb5-workstation krb5-libs 17 | COPY server.properties /etc/kafka/server.properties 18 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 19 | COPY consumer.properties /etc/kafka/consumer.properties 20 | 21 | EXPOSE 9093 22 | 23 | CMD kafka-server-start /etc/kafka/server.properties 24 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/var/lib/secret/kafka.key" 6 | principal="kafka/kafka.kerberos-multi-node_default@TEST.CONFLUENT.IO"; 7 | }; 8 | 9 | KafkaClient { 10 | com.sun.security.auth.module.Krb5LoginModule required 11 | useKeyTab=true 12 | storeKey=true 13 | keyTab="/var/lib/secret/kafka.key" 14 | principal="admin@TEST.CONFLUENT.IO"; 15 | }; 16 | 17 | Client { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=true 20 | storeKey=true 21 | useTicketCache=false 22 | keyTab="/var/lib/secret/kafka.key" 23 | principal="kafka@TEST.CONFLUENT.IO"; 24 | }; 25 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=0 2 | listeners=SASL_PLAINTEXT://kafka:9093 3 | advertised.listeners=SASL_PLAINTEXT://kafka:9093 4 | security.inter.broker.protocol=SASL_PLAINTEXT 5 | log.dirs=/var/lib/kafka 6 | offsets.topic.replication.factor=1 7 | transaction.state.log.replication.factor=1 8 | transaction.state.log.min.isr=1 9 | zookeeper.connect=zookeeper.kerberos-multi-node_default:2181 10 | zookeeper.set.acl=true 11 | 12 | # Kerberos / GSSAPI Authentication mechanism 13 | sasl.enabled.mechanisms=GSSAPI 14 | sasl.mechanism.inter.broker.protocol=GSSAPI 15 | security.inter.broker.protocol=SASL_PLAINTEXT 16 | sasl.kerberos.service.name=kafka 17 | allow.everyone.if.no.acl.found=false 18 | super.users=User:admin;User:kafka 19 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 20 | 21 | # metric reporter configuration with Kerberos 22 | metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter 23 | confluent.metrics.reporter.bootstrap.servers=kafka:9093 24 | confluent.metrics.reporter.sasl.mechanism=GSSAPI 25 | confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT 26 | confluent.metrics.reporter.sasl.kerberos.service.name=kafka 27 | confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 28 | useKeyTab=true \ 29 | storeKey=true \ 30 | keyTab="/var/lib/secret/kafka.key" \ 31 | principal="kafka@TEST.CONFLUENT.IO"; 32 | confluent.metrics.reporter.topic.replicas=1 33 | 34 | 35 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | RUN yum install -y confluent-control-center 14 | 15 | # 3. Configure Kafka for Kerberos 16 | RUN yum install -y krb5-workstation krb5-libs 17 | COPY server.properties /etc/kafka/server.properties 18 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 19 | COPY consumer.properties /etc/kafka/consumer.properties 20 | 21 | EXPOSE 9093 22 | 23 | CMD kafka-server-start /etc/kafka/server.properties 24 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/var/lib/secret/kafka.key" 6 | principal="kafka/kafka1.kerberos-multi-node_default@TEST.CONFLUENT.IO"; 7 | }; 8 | 9 | KafkaClient { 10 | com.sun.security.auth.module.Krb5LoginModule required 11 | useKeyTab=true 12 | storeKey=true 13 | keyTab="/var/lib/secret/kafka.key" 14 | principal="admin@TEST.CONFLUENT.IO"; 15 | }; 16 | 17 | Client { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=true 20 | storeKey=true 21 | useTicketCache=false 22 | keyTab="/var/lib/secret/kafka.key" 23 | principal="kafka@TEST.CONFLUENT.IO"; 24 | }; 25 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=1 2 | listeners=SASL_PLAINTEXT://kafka1:9093 3 | advertised.listeners=SASL_PLAINTEXT://kafka1:9093 4 | security.inter.broker.protocol=SASL_PLAINTEXT 5 | log.dirs=/var/lib/kafka 6 | offsets.topic.replication.factor=1 7 | transaction.state.log.replication.factor=1 8 | transaction.state.log.min.isr=1 9 | zookeeper.connect=zookeeper.kerberos-multi-node_default:2181 10 | zookeeper.set.acl=true 11 | 12 | # Kerberos / GSSAPI Authentication mechanism 13 | sasl.enabled.mechanisms=GSSAPI 14 | sasl.mechanism.inter.broker.protocol=GSSAPI 15 | security.inter.broker.protocol=SASL_PLAINTEXT 16 | sasl.kerberos.service.name=kafka 17 | allow.everyone.if.no.acl.found=false 18 | super.users=User:admin;User:kafka 19 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 20 | 21 | # metric reporter configuration with Kerberos 22 | metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter 23 | confluent.metrics.reporter.bootstrap.servers=kafka:9093 24 | confluent.metrics.reporter.sasl.mechanism=GSSAPI 25 | confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT 26 | confluent.metrics.reporter.sasl.kerberos.service.name=kafka 27 | confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 28 | useKeyTab=true \ 29 | storeKey=true \ 30 | keyTab="/var/lib/secret/kafka.key" \ 31 | principal="kafka@TEST.CONFLUENT.IO"; 32 | confluent.metrics.reporter.topic.replicas=1 33 | -------------------------------------------------------------------------------- /kerberos-multi-node/kdc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Installing Kerberos server, admin and client 6 | RUN yum install -y krb5-server krb5-libs 7 | RUN yum install -y krb5-workstation krb5-libs 8 | 9 | # 2. Configuring Kerberos and KDC 10 | COPY krb5.conf /etc/krb5.conf 11 | RUN mkdir /var/log/kerberos 12 | RUN mkdir /etc/kdc 13 | RUN mkdir -p /var/kerberos/krb5kdc/ 14 | RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf 15 | 16 | EXPOSE 88 17 | 18 | RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s 19 | 20 | CMD /usr/sbin/krb5kdc -n 21 | -------------------------------------------------------------------------------- /kerberos-multi-node/kdc/krb5.conf: -------------------------------------------------------------------------------- 1 | [libdefaults] 2 | default_realm = TEST.CONFLUENT.IO 3 | ticket_lifetime = 24h 4 | renew_lifetime = 7d 5 | forwardable = true 6 | rdns = false 7 | dns_lookup_kdc = no 8 | dns_lookup_realm = no 9 | 10 | [realms] 11 | TEST.CONFLUENT.IO = { 12 | kdc = kdc 13 | admin_server = kadmin 14 | } 15 | 16 | [domain_realm] 17 | .test.confluent.io = TEST.CONFLUENT.IO 18 | test.confluent.io = TEST.CONFLUENT.IO 19 | kerberos_default = TEST.CONFLUENT.IO 20 | .kerberos_default = TEST.CONFLUENT.IO 21 | 22 | [logging] 23 | kdc = FILE:/var/log/kerberos/krb5kdc.log 24 | admin_server = FILE:/var/log/kerberos/kadmin.log 25 | default = FILE:/var/log/kerberos/krb5lib.log 26 | -------------------------------------------------------------------------------- /kerberos-multi-node/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure zookeeper for Kerberos 15 | RUN yum install -y krb5-workstation krb5-libs 16 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 17 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 18 | 19 | EXPOSE 2181 20 | 21 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 22 | -------------------------------------------------------------------------------- /kerberos-multi-node/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /kerberos-multi-node/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir=/var/lib/zookeeper 17 | # the port at which the clients will connect 18 | clientPort=2181 19 | # disable the per-ip limit on the number of connections since this is a non-production config 20 | maxClientCnxns=0 21 | authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider 22 | zookeeper.allowSaslFailedClients=false 23 | requireClientAuthScheme=sasl 24 | kerberos.removeHostFromPrincipal = true 25 | kerberos.removeRealmFromPrincipal = false 26 | -------------------------------------------------------------------------------- /kerberos-multi-node/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | useTicketCache=false 6 | keyTab="/var/lib/secret/kafka.key" 7 | principal="zkservice/zookeeper.kerberos-multi-node_default@TEST.CONFLUENT.IO"; 8 | }; 9 | 10 | Client { 11 | com.sun.security.auth.module.Krb5LoginModule required 12 | useTicketCache=true; 13 | }; 14 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install confluent kafka tools: 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Install Kerberos libaries 15 | RUN yum install -y krb5-workstation krb5-libs 16 | 17 | # 4. Copy in required settings for client access to Kafka 18 | COPY consumer.properties /etc/kafka/consumer.properties 19 | COPY producer.properties /etc/kafka/producer.properties 20 | COPY command.properties /etc/kafka/command.properties 21 | COPY scram.properties /etc/kafka/scram.properties 22 | COPY client.sasl.jaas.config /etc/kafka/client_jaas.conf 23 | 24 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 25 | 26 | CMD sleep infinity 27 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/client.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | /* 2 | * Credentials to use when connecting to ZooKeeper directly. 3 | * 4 | * Whenever possible you should use the Kafka AdminClient API instead of ZooKeeper. 5 | */ 6 | Client { 7 | com.sun.security.auth.module.Krb5LoginModule required 8 | useTicketCache=true; 9 | }; 10 | 11 | 12 | /* 13 | * Credentials to connect to Kafka. 14 | */ 15 | KafkaClient { 16 | com.sun.security.auth.module.Krb5LoginModule required 17 | useTicketCache=true; 18 | }; 19 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/command.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 4 | serviceName=kafka \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/producer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/scram.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-512 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | services: 3 | kdc: 4 | hostname: kdc.kerberos-demo.local 5 | #domainname: kerberos_default 6 | build: kdc/ 7 | container_name: kdc 8 | volumes: 9 | - secret:/var/lib/secret 10 | - ./kdc/krb5.conf:/etc/kdc/krb5.conf 11 | 12 | zookeeper: 13 | build: zookeeper/ 14 | container_name: zookeeper 15 | hostname: zookeeper.kerberos-demo.local 16 | #domainname: kerberos_default 17 | depends_on: 18 | - kdc 19 | # Required to wait for the keytab to get generated 20 | restart: on-failure 21 | volumes: 22 | - secret:/var/lib/secret 23 | - ./kdc/krb5.conf:/etc/krb5.conf 24 | 25 | kafka: 26 | build: kafka/ 27 | container_name: kafka 28 | hostname: kafka.kerberos-demo.local 29 | #domainname: kerberos_default 30 | depends_on: 31 | - zookeeper 32 | - kdc 33 | # Required to wait for the keytab to get generated 34 | restart: on-failure 35 | volumes: 36 | - secret:/var/lib/secret 37 | - ./kdc/krb5.conf:/etc/krb5.conf 38 | 39 | client: 40 | build: client/ 41 | container_name: client 42 | hostname: client.kerberos-demo.local 43 | #domainname: kerberos_default 44 | depends_on: 45 | - kafka 46 | - kdc 47 | # Required to wait for the keytab to get generated 48 | volumes: 49 | - secret:/var/lib/secret 50 | - ./kdc/krb5.conf:/etc/krb5.conf 51 | 52 | volumes: 53 | secret: {} 54 | 55 | networks: 56 | default: 57 | name: kerberos-demo.local 58 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | RUN yum install -y confluent-control-center 14 | 15 | # 3. Configure Kafka for Kerberos 16 | RUN yum install -y krb5-workstation krb5-libs 17 | COPY server.properties /etc/kafka/server.properties 18 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 19 | 20 | EXPOSE 9093 21 | 22 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 23 | 24 | CMD kafka-server-start /etc/kafka/server.properties 25 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | /* 2 | * The service principal 3 | */ 4 | /* 5 | KafkaServer { 6 | com.sun.security.auth.module.Krb5LoginModule required 7 | useKeyTab=true 8 | storeKey=true 9 | keyTab="/var/lib/secret/kafka.key" 10 | principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO"; 11 | }; 12 | */ 13 | 14 | /* 15 | * Zookeeper client principal 16 | */ 17 | Client { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=true 20 | storeKey=true 21 | useTicketCache=false 22 | keyTab="/var/lib/secret/zookeeper-client.key" 23 | principal="zkclient@TEST.CONFLUENT.IO"; 24 | }; 25 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kdc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Installing Kerberos server, admin and client 6 | RUN yum install -y krb5-server krb5-libs 7 | RUN yum install -y krb5-workstation krb5-libs 8 | 9 | # 2. Configuring Kerberos and KDC 10 | COPY krb5.conf /etc/krb5.conf 11 | RUN mkdir /var/log/kerberos 12 | RUN mkdir /etc/kdc 13 | RUN mkdir -p /var/kerberos/krb5kdc/ 14 | RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf 15 | 16 | EXPOSE 88 17 | 18 | RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s 19 | 20 | CMD /usr/sbin/krb5kdc -n 21 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kdc/krb5.conf: -------------------------------------------------------------------------------- 1 | [libdefaults] 2 | default_realm = TEST.CONFLUENT.IO 3 | ticket_lifetime = 24h 4 | renew_lifetime = 7d 5 | forwardable = true 6 | rdns = false 7 | dns_lookup_kdc = no 8 | dns_lookup_realm = no 9 | 10 | [realms] 11 | TEST.CONFLUENT.IO = { 12 | kdc = kdc 13 | admin_server = kadmin 14 | } 15 | 16 | [domain_realm] 17 | .test.confluent.io = TEST.CONFLUENT.IO 18 | test.confluent.io = TEST.CONFLUENT.IO 19 | kerberos-demo.local = TEST.CONFLUENT.IO 20 | .kerberos-demo.local = TEST.CONFLUENT.IO 21 | 22 | [logging] 23 | kdc = FILE:/var/log/kerberos/krb5kdc.log 24 | admin_server = FILE:/var/log/kerberos/kadmin.log 25 | default = FILE:/var/log/kerberos/krb5lib.log 26 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure zookeeper for Kerberos 15 | RUN yum install -y krb5-workstation krb5-libs 16 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 17 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 18 | 19 | EXPOSE 2181 20 | 21 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 22 | 23 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 24 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | zookeeper.allowSaslFailedClients=false 6 | requireClientAuthScheme=sasl 7 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | keyTab="/var/lib/secret/zookeeper.key" 5 | storeKey=true 6 | useTicketCache=false 7 | principal="zookeeper/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO"; 8 | }; 9 | 10 | Client { 11 | com.sun.security.auth.module.Krb5LoginModule required 12 | useTicketCache=true; 13 | }; 14 | -------------------------------------------------------------------------------- /kerberos/client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install confluent kafka tools: 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-kafka-2.12 13 | 14 | # 3. Install Kerberos libaries 15 | RUN yum install -y krb5-workstation krb5-libs 16 | 17 | # 4. Copy in required settings for client access to Kafka 18 | COPY consumer.properties /etc/kafka/consumer.properties 19 | COPY producer.properties /etc/kafka/producer.properties 20 | COPY command.properties /etc/kafka/command.properties 21 | COPY client.sasl.jaas.config /etc/kafka/client_jaas.conf 22 | 23 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 24 | 25 | CMD sleep infinity 26 | -------------------------------------------------------------------------------- /kerberos/client/client.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | /* 2 | * Credentials to use when connecting to ZooKeeper directly. 3 | * 4 | * Whenever possible you should use the Kafka AdminClient API instead of ZooKeeper. 5 | */ 6 | Client { 7 | com.sun.security.auth.module.Krb5LoginModule required 8 | useTicketCache=true; 9 | }; 10 | 11 | 12 | /* 13 | * Credentials to connect to Kafka. 14 | */ 15 | KafkaClient { 16 | com.sun.security.auth.module.Krb5LoginModule required 17 | useTicketCache=true; 18 | }; 19 | -------------------------------------------------------------------------------- /kerberos/client/command.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 4 | serviceName=kafka \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos/client/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos/client/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos/client/producer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | services: 3 | kdc: 4 | hostname: kdc.kerberos-demo.local 5 | #domainname: kerberos_default 6 | build: kdc/ 7 | container_name: kdc 8 | volumes: 9 | - secret:/var/lib/secret 10 | - ./kdc/krb5.conf:/etc/kdc/krb5.conf 11 | 12 | zookeeper: 13 | build: zookeeper/ 14 | container_name: zookeeper 15 | hostname: zookeeper.kerberos-demo.local 16 | #domainname: kerberos_default 17 | depends_on: 18 | - kdc 19 | # Required to wait for the keytab to get generated 20 | restart: on-failure 21 | volumes: 22 | - secret:/var/lib/secret 23 | - ./kdc/krb5.conf:/etc/krb5.conf 24 | 25 | kafka: 26 | build: kafka/ 27 | container_name: kafka 28 | hostname: kafka.kerberos-demo.local 29 | #domainname: kerberos_default 30 | depends_on: 31 | - zookeeper 32 | - kdc 33 | # Required to wait for the keytab to get generated 34 | restart: on-failure 35 | volumes: 36 | - secret:/var/lib/secret 37 | - ./kdc/krb5.conf:/etc/krb5.conf 38 | 39 | client: 40 | build: client/ 41 | container_name: client 42 | hostname: client.kerberos-demo.local 43 | #domainname: kerberos_default 44 | depends_on: 45 | - kafka 46 | - kdc 47 | # Required to wait for the keytab to get generated 48 | volumes: 49 | - secret:/var/lib/secret 50 | - ./kdc/krb5.conf:/etc/krb5.conf 51 | 52 | volumes: 53 | secret: {} 54 | 55 | networks: 56 | default: 57 | name: kerberos-demo.local 58 | -------------------------------------------------------------------------------- /kerberos/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-kafka-2.12 13 | RUN yum install -y confluent-control-center 14 | 15 | # 3. Configure Kafka for Kerberos 16 | RUN yum install -y krb5-workstation krb5-libs 17 | COPY server.properties /etc/kafka/server.properties 18 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 19 | 20 | EXPOSE 9093 21 | 22 | ENV KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice" 23 | 24 | CMD kafka-server-start /etc/kafka/server.properties 25 | -------------------------------------------------------------------------------- /kerberos/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | /* 2 | * The service principal 3 | */ 4 | KafkaServer { 5 | com.sun.security.auth.module.Krb5LoginModule required 6 | useKeyTab=true 7 | storeKey=true 8 | keyTab="/var/lib/secret/kafka.key" 9 | principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO"; 10 | }; 11 | 12 | /* 13 | * Zookeeper client principal 14 | */ 15 | Client { 16 | com.sun.security.auth.module.Krb5LoginModule required 17 | useKeyTab=true 18 | storeKey=true 19 | useTicketCache=false 20 | keyTab="/var/lib/secret/zookeeper-client.key" 21 | principal="zkclient@TEST.CONFLUENT.IO"; 22 | }; 23 | -------------------------------------------------------------------------------- /kerberos/kafka/server.properties: -------------------------------------------------------------------------------- 1 | # Basic broker and listener configuration 2 | broker.id=0 3 | listeners=SASL_PLAINTEXT://kafka.kerberos-demo.local:9093 4 | zookeeper.connect=zookeeper.kerberos-demo.local:2181 5 | log.dirs=/var/lib/kafka 6 | 7 | offsets.topic.replication.factor=1 8 | transaction.state.log.replication.factor=1 9 | transaction.state.log.min.isr=1 10 | num.partitions=12 11 | 12 | 13 | # Kerberos / GSSAPI Authentication mechanism 14 | sasl.enabled.mechanisms=GSSAPI 15 | sasl.kerberos.service.name=kafka 16 | 17 | 18 | # Configure replication to require Kerberos: 19 | sasl.mechanism.inter.broker.protocol=GSSAPI 20 | security.inter.broker.protocol=SASL_PLAINTEXT 21 | 22 | 23 | # Authorization config: 24 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 25 | zookeeper.set.acl=true 26 | allow.everyone.if.no.acl.found=false 27 | super.users=User:admin;User:kafka 28 | 29 | 30 | # Demonstrate setting up the Confluent Metrics Reporter with required *client* credentials 31 | metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter 32 | confluent.metrics.reporter.bootstrap.servers=kafka:9093 33 | confluent.metrics.reporter.sasl.mechanism=GSSAPI 34 | confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT 35 | confluent.metrics.reporter.sasl.kerberos.service.name=kafka 36 | confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 37 | useKeyTab=true \ 38 | storeKey=true \ 39 | keyTab="/var/lib/secret/kafka-admin.key" \ 40 | principal="admin/for-kafka@TEST.CONFLUENT.IO"; 41 | 42 | confluent.metrics.reporter.topic.replicas=1 43 | confluent.support.metrics.enable=false 44 | confluent.support.customer.id=anonymous 45 | -------------------------------------------------------------------------------- /kerberos/kdc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Installing Kerberos server, admin and client 6 | RUN yum install -y krb5-server krb5-libs 7 | RUN yum install -y krb5-workstation krb5-libs 8 | 9 | # 2. Configuring Kerberos and KDC 10 | COPY krb5.conf /etc/krb5.conf 11 | RUN mkdir /var/log/kerberos 12 | RUN mkdir /etc/kdc 13 | RUN mkdir -p /var/kerberos/krb5kdc/ 14 | RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf 15 | 16 | EXPOSE 88 17 | 18 | RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s 19 | 20 | CMD /usr/sbin/krb5kdc -n 21 | -------------------------------------------------------------------------------- /kerberos/kdc/krb5.conf: -------------------------------------------------------------------------------- 1 | [libdefaults] 2 | default_realm = TEST.CONFLUENT.IO 3 | forwardable = true 4 | rdns = false 5 | dns_lookup_kdc = no 6 | dns_lookup_realm = no 7 | 8 | [realms] 9 | TEST.CONFLUENT.IO = { 10 | kdc = kdc 11 | admin_server = kadmin 12 | } 13 | 14 | [domain_realm] 15 | .test.confluent.io = TEST.CONFLUENT.IO 16 | test.confluent.io = TEST.CONFLUENT.IO 17 | kerberos-demo.local = TEST.CONFLUENT.IO 18 | .kerberos-demo.local = TEST.CONFLUENT.IO 19 | 20 | [logging] 21 | kdc = FILE:/var/log/kerberos/krb5kdc.log 22 | admin_server = FILE:/var/log/kerberos/kadmin.log 23 | default = FILE:/var/log/kerberos/krb5lib.log 24 | -------------------------------------------------------------------------------- /kerberos/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-kafka-2.12 13 | 14 | # 3. Configure zookeeper for Kerberos 15 | RUN yum install -y krb5-workstation krb5-libs 16 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 17 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 18 | 19 | EXPOSE 2181 20 | 21 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 22 | 23 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 24 | -------------------------------------------------------------------------------- /kerberos/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | zookeeper.allowSaslFailedClients=false 6 | requireClientAuthScheme=sasl 7 | -------------------------------------------------------------------------------- /kerberos/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | keyTab="/var/lib/secret/zookeeper.key" 5 | storeKey=true 6 | useTicketCache=false 7 | principal="zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO"; 8 | }; 9 | 10 | Client { 11 | com.sun.security.auth.module.Krb5LoginModule required 12 | useTicketCache=true; 13 | }; 14 | -------------------------------------------------------------------------------- /ldap-auth/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | ldap: 4 | image: osixia/openldap:1.3.0 5 | hostname: ldap 6 | container_name: ldap 7 | environment: 8 | LDAP_ORGANISATION: "Confluent" 9 | LDAP_DOMAIN: "confluent.io" 10 | ports: 11 | - "389:389" 12 | - "636:636" 13 | volumes: 14 | - "$PWD/ldap/custom:/container/service/slapd/assets/config/bootstrap/ldif/custom" 15 | command: "--copy-service" 16 | 17 | phpldapadmin-service: 18 | image: osixia/phpldapadmin:0.9.0 19 | container_name: ldapadmin-service 20 | environment: 21 | - PHPLDAPADMIN_LDAP_HOSTS=ldap 22 | ports: 23 | - "6444:443" 24 | depends_on: 25 | - ldap 26 | 27 | zookeeper: 28 | build: zookeeper/ 29 | hostname: zookeeper 30 | container_name: zookeeper 31 | environment: 32 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 33 | 34 | kafka: 35 | build: kafka/ 36 | container_name: kafka 37 | environment: 38 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 39 | depends_on: 40 | - zookeeper 41 | - ldap 42 | volumes: 43 | - "$PWD/kafka/users:/service/kafka/users" 44 | - "$PWD/kafka/jks:/etc/kafka/jks" 45 | ports: 46 | - "9093:9093" 47 | 48 | -------------------------------------------------------------------------------- /ldap-auth/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-server 13 | RUN yum install -y confluent-security 14 | 15 | # 3. Configure Kafka and zookeeper for Kerberos 16 | COPY server.properties /etc/kafka/server.properties 17 | COPY kafka.jaas.config /etc/kafka/kafka_server_jaas.conf 18 | COPY log4j.properties /etc/kafka/log4j.properties 19 | 20 | COPY alice.properties /etc/kafka/alice.properties 21 | COPY barnie.properties /etc/kafka/barnie.properties 22 | COPY charlie.properties /etc/kafka/charlie.properties 23 | COPY kafka.properties /etc/kafka/kafka.properties 24 | 25 | EXPOSE 9093 26 | 27 | CMD kafka-server-start /etc/kafka/server.properties 28 | -------------------------------------------------------------------------------- /ldap-auth/kafka/alice.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="alice" password="alice-secret"; 5 | -------------------------------------------------------------------------------- /ldap-auth/kafka/barnie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="barnie" \ 5 | password="barnie-secret"; 6 | -------------------------------------------------------------------------------- /ldap-auth/kafka/charlie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="charlie" \ 5 | password="charlie-secret"; 6 | -------------------------------------------------------------------------------- /ldap-auth/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /ldap-auth/kafka/kafka.jaas.config: -------------------------------------------------------------------------------- 1 | Client { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | username="kafka" 4 | password="kafka"; 5 | }; 6 | -------------------------------------------------------------------------------- /ldap-auth/kafka/kafka.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/01_base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=users,dc=confluent,dc=io 2 | objectClass: organizationalUnit 3 | ou: Users 4 | 5 | dn: ou=groups,dc=confluent,dc=io 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/02_KafkaDevelopers.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: Kafka Developers 5 | gidNumber: 5000 6 | 7 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/03_ProjectA.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=ProjectA,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: ProjectA 5 | gidNumber: 5001 6 | 7 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/04_ProjectB.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=ProjectB,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: ProjectB 5 | gidNumber: 5002 6 | 7 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/10_alice.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=alice,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: alice 6 | sn: LookingGlass 7 | givenName: Alice 8 | cn: alice 9 | displayName: Alice LookingGlass 10 | uidNumber: 10000 11 | gidNumber: 5000 12 | userPassword: alice-secret 13 | gecos: alice 14 | loginShell: /bin/bash 15 | homeDirectory: /home/alice 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/11_barnie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: barnie 6 | sn: Rubble 7 | givenName: Barnie 8 | cn: barnie 9 | displayName: Barnie Rubble 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: barnie-secret 13 | gecos: barnie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/barnie 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/12_charlie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: charlie 6 | sn: Sheen 7 | givenName: Charlie 8 | cn: charlie 9 | displayName: Charlie Sheen 10 | uidNumber: 10002 11 | gidNumber: 5000 12 | userPassword: charlie-secret 13 | gecos: charlie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/charlie 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/13_donald.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=donald,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: donald 6 | sn: Duck 7 | givenName: Donald 8 | cn: donald 9 | displayName: Donald Duck 10 | uidNumber: 10003 11 | gidNumber: 5000 12 | userPassword: donald-secret 13 | gecos: donald 14 | loginShell: /bin/bash 15 | homeDirectory: /home/donald 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/14_eva.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=eva,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: eva 6 | sn: Maria 7 | givenName: Eva 8 | cn: eva 9 | displayName: Eva Maria 10 | uidNumber: 10004 11 | gidNumber: 5000 12 | userPassword: eva-secret 13 | gecos: eva 14 | loginShell: /bin/bash 15 | homeDirectory: /home/eva 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/15_fritz.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=fritz,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: fritz 6 | sn: Walter 7 | givenName: Fritz 8 | cn: fritz 9 | displayName: Fritz Walter 10 | uidNumber: 10005 11 | gidNumber: 5000 12 | userPassword: fritz-secret 13 | gecos: fritz 14 | loginShell: /bin/bash 15 | homeDirectory: /home/fritz 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/16_greta.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=greta,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: greta 6 | sn: Thunberg 7 | givenName: Greta 8 | cn: greta 9 | displayName: Greta Thunberg 10 | uidNumber: 10006 11 | gidNumber: 5000 12 | userPassword: greta-secret 13 | gecos: greta 14 | loginShell: /bin/bash 15 | homeDirectory: /home/greta 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/17_kafka.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=kafka,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: kafka 6 | sn: kafka 7 | givenName: kafka 8 | cn: kafka 9 | displayName: kafka 10 | uidNumber: 10007 11 | gidNumber: 5000 12 | userPassword: kafka 13 | gecos: kafka 14 | loginShell: /bin/bash 15 | homeDirectory: /home/kafka 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/20_group_add.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | changetype: modify 3 | add: memberuid 4 | memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }} 5 | 6 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 7 | changetype: modify 8 | add: memberuid 9 | memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 10 | 11 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 12 | changetype: modify 13 | add: memberuid 14 | memberuid: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 15 | 16 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 17 | changetype: modify 18 | add: memberuid 19 | memberuid: cn=eva,ou=users,{{ LDAP_BASE_DN }} 20 | 21 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 22 | changetype: modify 23 | add: memberuid 24 | memberuid: cn=fritz,ou=users,{{ LDAP_BASE_DN }} 25 | -------------------------------------------------------------------------------- /ldap-auth/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ## start docker-compose up to and including kafka 4 | docker-compose up -d --build 5 | 6 | echo "Example configuration:" 7 | echo "Should succeed (barnie is in group)" 8 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/etc/kafka/barnie.properties" 9 | echo "Should fail (charlie is NOT in group)" 10 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/etc/kafka/charlie.properties" 11 | echo "Should succeed (alice is in group)" 12 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/alice.properties --topic test-topic --from-beginning" 13 | echo "List ACLs" 14 | echo "-> docker-compose exec kafka kafka-acls --bootstrap-server kafka:9093 --list --command-config /etc/kafka/kafka.properties" 15 | -------------------------------------------------------------------------------- /ldap-auth/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 17 | 18 | EXPOSE 2181 19 | 20 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 21 | -------------------------------------------------------------------------------- /ldap-auth/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /ldap-auth/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | requireClientAuthScheme=sasl 6 | -------------------------------------------------------------------------------- /ldap-auth/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_kafka="kafka"; 4 | }; 5 | -------------------------------------------------------------------------------- /ldap/acls/acls.csv: -------------------------------------------------------------------------------- 1 | KafkaPrincipal,ResourceType,PatternType,ResourceName,Operation,PermissionType,Host 2 | User:kafka,Cluster,LITERAL,kafka-cluster,All,Allow,* 3 | Group:Kafka Developers,Group,LITERAL,*,Read,Allow,* 4 | Group:Kafka Developers,Topic,LITERAL,test-topic,Describe,Allow,* 5 | Group:Kafka Developers,Topic,LITERAL,test-topic,Read,Allow,* 6 | Group:Kafka Developers,Topic,LITERAL,test-topic,Write,Allow,* 7 | Group:Kafka Developers,Topic,LITERAL,test-topic,Create,Allow,* 8 | -------------------------------------------------------------------------------- /ldap/add-user: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Creating the users 4 | docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=purbon-secret],SCRAM-SHA-512=[password=purbon-secret]' --entity-type users --entity-name purbon 5 | 6 | 7 | echo "Should succeed as the new user is in the group" 8 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/service/kafka/users/purbon.properties" 9 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /service/kafka/users/purbon.properties --topic test-topic --from-beginning" 10 | -------------------------------------------------------------------------------- /ldap/custom/01_base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: organizationalUnit 3 | ou: Users 4 | 5 | dn: ou=groups,{{ LDAP_BASE_DN }} 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | -------------------------------------------------------------------------------- /ldap/custom/02_KafkaDevelopers.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: Kafka Developers 5 | gidNumber: 5000 6 | 7 | -------------------------------------------------------------------------------- /ldap/custom/10_alice.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=alice,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: alice 6 | sn: LookingGlass 7 | givenName: Alice 8 | cn: alice 9 | displayName: Alice LookingGlass 10 | uidNumber: 10000 11 | gidNumber: 5000 12 | userPassword: alice-secret 13 | gecos: alice 14 | loginShell: /bin/bash 15 | homeDirectory: /home/alice 16 | -------------------------------------------------------------------------------- /ldap/custom/11_barnie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: barnie 6 | sn: Rubble 7 | givenName: Barnie 8 | cn: barnie 9 | displayName: Barnie Rubble 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: barnie-secret 13 | gecos: barnie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/barnie 16 | -------------------------------------------------------------------------------- /ldap/custom/12_charlie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: charlie 6 | sn: Sheen 7 | givenName: Charlie 8 | cn: charlie 9 | displayName: Charlie Sheen 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: charlie-secret 13 | gecos: charlie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/charlie 16 | -------------------------------------------------------------------------------- /ldap/custom/20_group_add.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | changetype: modify 3 | add: memberuid 4 | memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }} 5 | 6 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 7 | changetype: modify 8 | add: memberuid 9 | memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 10 | -------------------------------------------------------------------------------- /ldap/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-server 13 | RUN yum install -y confluent-security 14 | 15 | # 3. Configure Kafka and zookeeper for Kerberos 16 | COPY server.properties /etc/kafka/server.properties 17 | COPY server-with-ssl.properties /etc/kafka/server-with-ssl.properties 18 | COPY kafka.jaas.config /etc/kafka/kafka_server_jaas.conf 19 | COPY log4j.properties /etc/kafka/log4j.properties 20 | 21 | COPY alice.properties /etc/kafka/alice.properties 22 | COPY barnie.properties /etc/kafka/barnie.properties 23 | COPY charlie.properties /etc/kafka/charlie.properties 24 | COPY kafka.properties /etc/kafka/kafka.properties 25 | 26 | EXPOSE 9093 27 | 28 | CMD kafka-server-start /etc/kafka/server.properties 29 | -------------------------------------------------------------------------------- /ldap/kafka/alice.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="alice" \ 5 | password="alice-secret"; 6 | -------------------------------------------------------------------------------- /ldap/kafka/barnie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="barnie" \ 5 | password="barnie-secret"; 6 | -------------------------------------------------------------------------------- /ldap/kafka/charlie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="charlie" \ 5 | password="charlie-secret"; 6 | -------------------------------------------------------------------------------- /ldap/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /ldap/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | 7 | -------------------------------------------------------------------------------- /ldap/kafka/jks/.gitignore: -------------------------------------------------------------------------------- 1 | *.crt 2 | *.csr 3 | *_creds 4 | *.jks 5 | *.srl 6 | *.key 7 | *.pem 8 | *.der 9 | *.p12 10 | -------------------------------------------------------------------------------- /ldap/kafka/kafka.jaas.config: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.scram.ScramLoginModule required 3 | username="kafka" 4 | password="kafka"; 5 | }; 6 | 7 | Client { 8 | org.apache.zookeeper.server.auth.DigestLoginModule required 9 | username="kafka" 10 | password="kafka"; 11 | }; 12 | -------------------------------------------------------------------------------- /ldap/kafka/kafka.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | -------------------------------------------------------------------------------- /ldap/kafka/users/purbon.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="purbon" \ 5 | password="purbon-secret"; 6 | -------------------------------------------------------------------------------- /ldap/ldap/certs/.gitignore: -------------------------------------------------------------------------------- 1 | *.crt 2 | *.csr 3 | *_creds 4 | *.jks 5 | *.srl 6 | *.key 7 | *.pem 8 | *.der 9 | *.p12 10 | -------------------------------------------------------------------------------- /ldap/ldap/custom/01_base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: organizationalUnit 3 | ou: Users 4 | 5 | dn: ou=groups,{{ LDAP_BASE_DN }} 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | -------------------------------------------------------------------------------- /ldap/ldap/custom/02_KafkaDevelopers.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: Kafka Developers 5 | gidNumber: 5000 6 | 7 | -------------------------------------------------------------------------------- /ldap/ldap/custom/10_alice.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=alice,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: alice 6 | sn: LookingGlass 7 | givenName: Alice 8 | cn: alice 9 | displayName: Alice LookingGlass 10 | uidNumber: 10000 11 | gidNumber: 5000 12 | userPassword: alice-secret 13 | gecos: alice 14 | loginShell: /bin/bash 15 | homeDirectory: /home/alice 16 | -------------------------------------------------------------------------------- /ldap/ldap/custom/11_barnie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: barnie 6 | sn: Rubble 7 | givenName: Barnie 8 | cn: barnie 9 | displayName: Barnie Rubble 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: barnie-secret 13 | gecos: barnie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/barnie 16 | -------------------------------------------------------------------------------- /ldap/ldap/custom/12_charlie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: charlie 6 | sn: Sheen 7 | givenName: Charlie 8 | cn: charlie 9 | displayName: Charlie Sheen 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: charlie-secret 13 | gecos: charlie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/charlie 16 | -------------------------------------------------------------------------------- /ldap/ldap/custom/20_group_add.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | changetype: modify 3 | add: memberuid 4 | memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }} 5 | 6 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 7 | changetype: modify 8 | add: memberuid 9 | memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 10 | -------------------------------------------------------------------------------- /ldap/scripts/.gitignore: -------------------------------------------------------------------------------- 1 | *.crt 2 | *.csr 3 | *_creds 4 | *.jks 5 | *.srl 6 | *.key 7 | *.pem 8 | *.der 9 | *.p12 10 | -------------------------------------------------------------------------------- /ldap/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 17 | 18 | EXPOSE 2181 19 | 20 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 21 | -------------------------------------------------------------------------------- /ldap/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /ldap/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | requireClientAuthScheme=sasl 6 | -------------------------------------------------------------------------------- /ldap/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_kafka="kafka"; 4 | }; 5 | -------------------------------------------------------------------------------- /multi-sasl/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | zookeeper: 5 | build: zookeeper/ 6 | container_name: zookeeper 7 | hostname: zookeeper 8 | restart: on-failure 9 | environment: 10 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 11 | 12 | kafka: 13 | build: kafka/ 14 | container_name: kafka 15 | environment: 16 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 17 | depends_on: 18 | - zookeeper 19 | restart: on-failure 20 | -------------------------------------------------------------------------------- /multi-sasl/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk-devel 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 17 | COPY consumer.properties /etc/kafka/consumer.properties 18 | COPY consumer.plain.properties /etc/kafka/consumer.plain.properties 19 | 20 | EXPOSE 9093 21 | 22 | CMD kafka-server-start /etc/kafka/server.properties 23 | -------------------------------------------------------------------------------- /multi-sasl/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /multi-sasl/kafka/consumer.plain.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | 7 | -------------------------------------------------------------------------------- /multi-sasl/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | 7 | -------------------------------------------------------------------------------- /multi-sasl/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Client { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | username="admin" 4 | password="password"; 5 | }; 6 | -------------------------------------------------------------------------------- /multi-sasl/kafka/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=0 2 | listeners=SASL_PLAINTEXT://kafka:9093 3 | advertised.listeners=SASL_PLAINTEXT://kafka:9093 4 | log.dirs=/var/lib/kafka 5 | offsets.topic.replication.factor=1 6 | transaction.state.log.replication.factor=1 7 | transaction.state.log.min.isr=1 8 | zookeeper.connect=zookeeper:2181 9 | 10 | # Scram Authentication mechanism 11 | sasl.enabled.mechanisms=SCRAM-SHA-256,PLAIN 12 | sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256 13 | listener.name.sasl_plaintext.scram-sha-256.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 14 | username="kafka" \ 15 | password="kafka"; 16 | listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 17 | username="kafka" \ 18 | password="kafka" \ 19 | user_kafka="kafka"; 20 | security.inter.broker.protocol=SASL_PLAINTEXT 21 | allow.everyone.if.no.acl.found=false 22 | super.users=User:kafka 23 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 24 | zookeeper.set.acl=true 25 | -------------------------------------------------------------------------------- /multi-sasl/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d --build 4 | 5 | # Creating the user kafka 6 | # kafka is configured as a super user, no need for additional ACL 7 | docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka 8 | 9 | echo "Example configuration:" 10 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --producer.config /etc/kafka/consumer.properties --topic test" 11 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --producer.config /etc/kafka/consumer.plain.properties --topic test" 12 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/consumer.properties --topic test --from-beginning" 13 | -------------------------------------------------------------------------------- /multi-sasl/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk-devel 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 17 | 18 | EXPOSE 2181 19 | 20 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 21 | -------------------------------------------------------------------------------- /multi-sasl/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /multi-sasl/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | -------------------------------------------------------------------------------- /multi-sasl/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_admin="password"; 4 | }; 5 | -------------------------------------------------------------------------------- /none/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /none/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | services: 4 | zookeeper: 5 | image: confluentinc/cp-zookeeper:6.1.0 6 | hostname: zookeeper 7 | container_name: zookeeper 8 | environment: 9 | ZOOKEEPER_SERVER_ID: 1 10 | ZOOKEEPER_CLIENT_PORT: 2181 11 | ZOOKEEPER_TICK_TIME: "2000" 12 | ZOOKEEPER_SERVERS: zookeeper:2888:3888 13 | KAFKA_JMX_PORT: 9999 14 | KAFKA_JMX_HOSTNAME: localhost 15 | kafka: 16 | image: confluentinc/cp-enterprise-kafka:6.1.0 17 | hostname: kafka 18 | container_name: kafka 19 | depends_on: 20 | - zookeeper 21 | environment: 22 | KAFKA_BROKER_ID: 1 23 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 24 | KAFKA_LISTENER: INTERNAL://kafka:9092,OUTSIDE://localhost:9093 25 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,OUTSIDE://localhost:9093 26 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT 27 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 28 | KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter 29 | CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:9092 30 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 31 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 32 | KAFKA_JMX_PORT: 9999 33 | KAFKA_JMX_HOSTNAME: kafka 34 | KAFKA_BROKER_RACK: 0 35 | ports: 36 | - 9093:9093 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /none/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d 4 | 5 | # Creating the user kafka 6 | # kafka is configured as a super user, no need for additional ACL 7 | # docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-255=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka 8 | 9 | echo "Example configuration:" 10 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --topic test" 11 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --topic test --from-beginning" 12 | -------------------------------------------------------------------------------- /oauth/.gitignore: -------------------------------------------------------------------------------- 1 | certs/ 2 | *.jks 3 | -------------------------------------------------------------------------------- /oauth/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = UK 18 | organizationName = Confluent 19 | localityName = London 20 | commonName = kafka.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /oauth/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | zookeeper: 5 | build: zookeeper/ 6 | container_name: zookeeper 7 | domainname: confluent.local 8 | hostname: zookeeper 9 | networks: 10 | default: 11 | aliases: 12 | - zookeeper.confluent.local 13 | 14 | kafka: 15 | build: kafka/ 16 | container_name: kafka 17 | domainname: confluent.local 18 | hostname: kafka 19 | depends_on: 20 | - zookeeper 21 | environment: 22 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 23 | networks: 24 | default: 25 | aliases: 26 | - kafka.confluent.local 27 | 28 | networks: 29 | default: 30 | -------------------------------------------------------------------------------- /oauth/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY client.properties /etc/kafka/client.properties 17 | COPY kafka_server_jaas.conf /etc/kafka/kafka_server_jaas.conf 18 | COPY oauthcallbackhandlers/target/dummy-oauth-adapter-0.1.0-jar-with-dependencies.jar /usr/share/java/kafka/dummy-oauth-adapter-0.1.0-jar-with-dependencies.jar 19 | COPY test_produce_and_consume.sh /tmp/test_produce_and_consume.sh 20 | 21 | # 4. Put SSL certificates in place 22 | COPY kafka.server.keystore.jks /etc/kafka/kafka.server.keystore.jks 23 | COPY kafka.server.truststore.jks /etc/kafka/kafka.server.truststore.jks 24 | # this will be used by the kafka-console-producer.sh and kafka-console-consumer.sh scripts 25 | COPY kafka.client.truststore.jks /etc/kafka/kafka.client.truststore.jks 26 | 27 | EXPOSE 9093 28 | 29 | CMD kafka-server-start /etc/kafka/server.properties 30 | -------------------------------------------------------------------------------- /oauth/kafka/client.properties: -------------------------------------------------------------------------------- 1 | security.protocol=SASL_SSL 2 | sasl.mechanism=OAUTHBEARER 3 | sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler 4 | ssl.truststore.location=/etc/kafka/kafka.client.truststore.jks 5 | ssl.truststore.password=secret 6 | -------------------------------------------------------------------------------- /oauth/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /oauth/kafka/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required; 3 | }; 4 | 5 | KafkaClient { 6 | org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required; 7 | }; 8 | -------------------------------------------------------------------------------- /oauth/kafka/oauthcallbackhandlers/.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .idea 3 | -------------------------------------------------------------------------------- /oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/JwtHelper.java: -------------------------------------------------------------------------------- 1 | package io.confluent.examples.authentication.oauth; 2 | 3 | import io.jsonwebtoken.Claims; 4 | import io.jsonwebtoken.Jws; 5 | import io.jsonwebtoken.Jwts; 6 | import io.jsonwebtoken.SignatureAlgorithm; 7 | 8 | import java.io.UnsupportedEncodingException; 9 | import java.util.Arrays; 10 | import java.util.Date; 11 | import java.util.HashSet; 12 | 13 | public class JwtHelper { 14 | 15 | String createJwt() throws UnsupportedEncodingException { 16 | return Jwts.builder() 17 | .setSubject("bene") 18 | .setExpiration(new Date(System.currentTimeMillis() + 1000 * 60 * 60)) 19 | .claim("name", "Benedikt") 20 | .claim("scope", "developer admin") 21 | .setNotBefore(new Date()) 22 | .setIssuedAt(new Date()) 23 | .signWith( 24 | SignatureAlgorithm.HS256, 25 | "secret".getBytes("UTF-8") 26 | ).compact(); 27 | } 28 | 29 | MyOauthBearerToken validate(String jwt) throws UnsupportedEncodingException { 30 | Jws claims = Jwts.parser() 31 | .setSigningKey("secret".getBytes("UTF-8")) 32 | .parseClaimsJws(jwt); 33 | MyOauthBearerToken token = new MyOauthBearerToken(); 34 | token.setLifetimeMs(claims.getBody().getExpiration().getTime()); 35 | token.setPrincipalName(claims.getBody().getSubject()); 36 | token.setScopes(new HashSet<>(Arrays.asList(((String) claims.getBody().get("scope")).split(" ")))); 37 | token.setStartTimeMs(claims.getBody().getIssuedAt().getTime()); 38 | token.setValue(jwt); 39 | return token; 40 | } 41 | 42 | } 43 | -------------------------------------------------------------------------------- /oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/MyOauthBearerToken.java: -------------------------------------------------------------------------------- 1 | package io.confluent.examples.authentication.oauth; 2 | 3 | import lombok.Data; 4 | import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; 5 | 6 | import java.util.HashSet; 7 | import java.util.Set; 8 | 9 | @Data 10 | public class MyOauthBearerToken implements OAuthBearerToken { 11 | 12 | private long lifetimeMs; 13 | private String value; 14 | private long startTimeMs; 15 | private String principalName; 16 | private Set scopes = new HashSet<>(); 17 | 18 | MyOauthBearerToken() { } 19 | 20 | MyOauthBearerToken(String value) { 21 | this.value = value; 22 | this.lifetimeMs = System.currentTimeMillis() + 1000 * 60 * 60; 23 | } 24 | 25 | @Override 26 | public String value() { 27 | return this.value; 28 | } 29 | 30 | @Override 31 | public Set scope() { 32 | return scopes; 33 | } 34 | 35 | @Override 36 | public long lifetimeMs() { 37 | return this.lifetimeMs; 38 | } 39 | 40 | @Override 41 | public String principalName() { 42 | return this.principalName; 43 | } 44 | 45 | @Override 46 | public Long startTimeMs() { 47 | return startTimeMs; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/OauthBearerValidatorCallbackHandler.java: -------------------------------------------------------------------------------- 1 | package io.confluent.examples.authentication.oauth; 2 | 3 | import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; 4 | import org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | import javax.security.auth.callback.Callback; 9 | import javax.security.auth.callback.UnsupportedCallbackException; 10 | import javax.security.auth.login.AppConfigurationEntry; 11 | import java.io.IOException; 12 | import java.util.List; 13 | import java.util.Map; 14 | 15 | public class OauthBearerValidatorCallbackHandler implements AuthenticateCallbackHandler { 16 | 17 | private final Logger log = LoggerFactory.getLogger(OauthBearerValidatorCallbackHandler.class); 18 | 19 | private JwtHelper jwtHelper = new JwtHelper(); 20 | 21 | @Override 22 | public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { 23 | 24 | } 25 | 26 | @Override 27 | public void close() { 28 | 29 | } 30 | 31 | @Override 32 | public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { 33 | log.info("Validating token."); 34 | for (Callback callback : callbacks) { 35 | if (callback instanceof OAuthBearerValidatorCallback) { 36 | OAuthBearerValidatorCallback oAuthBearerValidatorCallback = (OAuthBearerValidatorCallback) callback; 37 | log.info("Tokenvalue: {}", oAuthBearerValidatorCallback.tokenValue()); 38 | oAuthBearerValidatorCallback.token(jwtHelper.validate(oAuthBearerValidatorCallback.tokenValue())); 39 | continue; 40 | } 41 | throw new UnsupportedCallbackException(callback); 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /oauth/kafka/oauthcallbackhandlers/src/test/java/io/confluent/examples/authentication/oauth/JwtHelperTest.java: -------------------------------------------------------------------------------- 1 | package io.confluent.examples.authentication.oauth; 2 | 3 | import org.junit.Test; 4 | 5 | import java.io.UnsupportedEncodingException; 6 | import java.util.Arrays; 7 | import java.util.HashSet; 8 | 9 | import static org.junit.Assert.assertEquals; 10 | import static org.junit.Assert.assertTrue; 11 | 12 | public class JwtHelperTest { 13 | 14 | @Test 15 | public void test() throws UnsupportedEncodingException { 16 | JwtHelper underTest = new JwtHelper(); 17 | String jwt = underTest.createJwt(); 18 | MyOauthBearerToken parsed = underTest.validate(jwt); 19 | System.err.println(parsed); 20 | assertEquals("bene", parsed.getPrincipalName()); 21 | assertEquals(new HashSet<>(Arrays.asList("developer", "admin")), parsed.getScopes()); 22 | assertTrue(parsed.getStartTimeMs() <= System.currentTimeMillis()); 23 | assertTrue(parsed.getLifetimeMs() > System.currentTimeMillis()); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /oauth/kafka/server.properties: -------------------------------------------------------------------------------- 1 | ############################# Server Basics ############################# 2 | broker.id=0 3 | listeners=SASL_SSL://kafka.confluent.local:9093 4 | advertised.listeners=SASL_SSL://kafka.confluent.local:9093 5 | log.dirs=/var/lib/kafka 6 | offsets.topic.replication.factor=1 7 | transaction.state.log.replication.factor=1 8 | transaction.state.log.min.isr=1 9 | zookeeper.connect=zookeeper.confluent.local:2181 10 | 11 | # oauth bearer configuration 12 | security.inter.broker.protocol=SASL_SSL 13 | sasl.mechanism.inter.broker.protocol=OAUTHBEARER 14 | sasl.enabled.mechanisms=OAUTHBEARER 15 | listener.name.sasl_ssl.oauthbearer.sasl.server.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerValidatorCallbackHandler 16 | listener.name.sasl_ssl.oauthbearer.sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler 17 | 18 | ssl.truststore.location=/etc/kafka/kafka.server.truststore.jks 19 | ssl.truststore.password=secret 20 | ssl.keystore.location=/etc/kafka/kafka.server.keystore.jks 21 | ssl.keystore.password=secret 22 | ssl.key.password=secret 23 | -------------------------------------------------------------------------------- /oauth/kafka/test_produce_and_consume.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo 'some sample messages 4 | sent via sasl outh bearer authentication 5 | with custom token generation and validation. 6 | ' | kafka-console-producer --broker-list kafka.confluent.local:9093 --topic test --producer.config /etc/kafka/client.properties 7 | timeout 5 kafka-console-consumer --bootstrap-server kafka.confluent.local:9093 --topic test --from-beginning --consumer.config /etc/kafka/client.properties 8 | -------------------------------------------------------------------------------- /oauth/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | set -u 5 | 6 | pushd kafka/oauthcallbackhandlers 7 | mvn clean package 8 | popd 9 | 10 | ./generate_certs.sh 11 | 12 | docker-compose up -d --build 13 | 14 | sleep 5 15 | 16 | docker-compose exec kafka /tmp/test_produce_and_consume.sh 17 | -------------------------------------------------------------------------------- /oauth/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | 17 | EXPOSE 2181 18 | 19 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 20 | -------------------------------------------------------------------------------- /oauth/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /oauth/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | -------------------------------------------------------------------------------- /plain/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /plain/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="consumer" \ 5 | password="consumer-secret"; 6 | 7 | -------------------------------------------------------------------------------- /plain/producer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="producer" \ 5 | password="producer-secret"; 6 | 7 | -------------------------------------------------------------------------------- /plain/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d 4 | 5 | echo "Example configuration:" 6 | echo "-> kafka-console-producer --broker-list localhost:9093 --producer.config producer.properties --topic test" 7 | echo "-> kafka-console-consumer --bootstrap-server localhost:9093 --consumer.config consumer.properties --topic test --from-beginning" 8 | -------------------------------------------------------------------------------- /quotas/Client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.1/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install Confluent Kafka stack 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-kafka-2.11 13 | 14 | CMD tail -f /dev/null 15 | -------------------------------------------------------------------------------- /quotas/Client/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.1/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.1/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.1 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.1/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /quotas/Grafana/provisioning/dashboards/one-quota.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | # provider name 5 | - name: 'prometheus' 6 | # org id. will default to orgId 1 if not specified 7 | orgId: 1 8 | # name of the dashboard folder. Required 9 | folder: '' 10 | # folder UID. will be automatically generated if not specified 11 | folderUid: '' 12 | # provider type. Required 13 | type: file 14 | # disable dashboard deletion 15 | disableDeletion: false 16 | # enable dashboard editing 17 | editable: true 18 | # how often Grafana will scan for changed dashboards 19 | updateIntervalSeconds: 10 20 | options: 21 | # path to dashboard files on disk. Required 22 | path: /etc/grafana/provisioning/dashboards 23 | -------------------------------------------------------------------------------- /quotas/Grafana/provisioning/datasources/prometheus.yaml: -------------------------------------------------------------------------------- 1 | # config file version 2 | apiVersion: 1 3 | 4 | # list of datasources that should be deleted from the database 5 | deleteDatasources: 6 | - name: Prometheus 7 | orgId: 1 8 | 9 | # list of datasources to insert/update depending 10 | # whats available in the database 11 | datasources: 12 | # name of the datasource. Required 13 | - name: Prometheus 14 | # datasource type. Required 15 | type: prometheus 16 | # access mode. direct or proxy. Required 17 | access: proxy 18 | # org id. will default to orgId 1 if not specified 19 | orgId: 1 20 | # url 21 | url: http://prometheus:9090 22 | # database password, if used 23 | password: 24 | # database user, if used 25 | user: 26 | # database name, if used 27 | database: 28 | # enable/disable basic auth 29 | basicAuth: true 30 | # basic auth username 31 | basicAuthUser: admin 32 | # basic auth password 33 | basicAuthPassword: foobar 34 | # enable/disable with credentials headers 35 | withCredentials: 36 | # mark as default datasource. Max one per org 37 | isDefault: true 38 | # fields that will be converted to json and stored in json_data 39 | jsonData: 40 | graphiteVersion: "1.1" 41 | tlsAuth: false 42 | tlsAuthWithCACert: false 43 | # json object of data that will be encrypted. 44 | secureJsonData: 45 | tlsCACert: "..." 46 | tlsClientCert: "..." 47 | tlsClientKey: "..." 48 | version: 1 49 | # allow users to edit datasources from the UI. 50 | editable: true 51 | -------------------------------------------------------------------------------- /quotas/JMX_Exporter/jmx_prometheus_javaagent-0.11.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/quotas/JMX_Exporter/jmx_prometheus_javaagent-0.11.0.jar -------------------------------------------------------------------------------- /quotas/JMX_Exporter/kafka_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssl: false 3 | lowercaseOutputName: true 4 | lowercaseOutputLabelNames: true 5 | rules: 6 | - pattern : kafka.server<>(Value|OneMinuteRate) 7 | name: "cp_kafka_server_replicamanager_$1" 8 | - pattern : kafka.controller<>Value 9 | name: "cp_kafka_controller_kafkacontroller_$1" 10 | - pattern : kafka.server<>OneMinuteRate 11 | name: "cp_kafka_server_brokertopicmetrics_$1" 12 | - pattern : kafka.network<>OneMinuteRate 13 | name: "cp_kafka_network_requestmetrics_requestspersec_$1" 14 | - pattern : kafka.network<>Value 15 | name: "cp_kafka_network_socketserver_networkprocessoravgidlepercent" 16 | - pattern : kafka.server<>Value 17 | name: "cp_kafka_server_replicafetchermanager_maxlag_$1" 18 | - pattern : kafka.server<>OneMinuteRate 19 | name: "cp_kafka_kafkarequesthandlerpool_requesthandleravgidlepercent" 20 | - pattern : kafka.controller<>OneMinuteRate 21 | name: "cp_kafka_controller_controllerstats_$1" 22 | - pattern : kafka.server<>OneMinuteRate 23 | name: "cp_kafka_server_sessionexpirelistener_$1" 24 | - pattern : kafka.server<>(\w+) 25 | name: "cp_kafka_server_throttle_produce_$1_$2_$3" 26 | - pattern : kafka.server<>(\w+) 27 | name: "cp_kafka_server_throttle_fetch_$1_$2_$3" 28 | - pattern : kafka.server<>(\w+) 29 | name: "cp_kafka_server_throttle_request_$1_$2_$3" 30 | -------------------------------------------------------------------------------- /quotas/JMX_Exporter/zookeeper_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssl: false 3 | lowercaseOutputName: true 4 | lowercaseOutputLabelNames: true 5 | rules: 6 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 7 | name: "cp_zookeeper_$2" 8 | -------------------------------------------------------------------------------- /quotas/Prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: 'kafka-monitor' 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: 'kafka-broker' 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ['kafka:5556'] 20 | 21 | # The job name is added as a label `job=` to any timeseries scraped from this config. 22 | - job_name: 'zookeeper' 23 | 24 | # Override the global default and scrape targets from this job every 5 seconds. 25 | scrape_interval: 5s 26 | 27 | static_configs: 28 | - targets: ['zookeeper:5556'] 29 | -------------------------------------------------------------------------------- /quotas/secrets/admin.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | -------------------------------------------------------------------------------- /quotas/secrets/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.plain.PlainLoginModule required 3 | username="kafka" 4 | password="kafka" 5 | user_kafka="kafka" 6 | user_quota="quota-secret" 7 | user_noquota="noquota-secret"; 8 | }; 9 | -------------------------------------------------------------------------------- /quotas/secrets/noquota.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="noquota" \ 5 | password="noquota-secret"; 6 | 7 | -------------------------------------------------------------------------------- /quotas/secrets/quota.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="quota" \ 5 | password="quota-secret"; 6 | 7 | -------------------------------------------------------------------------------- /quotas/up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Up script for testing quotas 4 | # 5 | 6 | # Start up cluster 7 | docker-compose up -d 8 | 9 | # Wait for Kafka Broker to be up 10 | 11 | docker-compose exec kafka env -u KAFKA_OPTS cub kafka-ready -b kafka:9092 -c /etc/kafka/secrets/admin.properties 1 30 12 | 13 | # Set up topics 14 | docker-compose exec client kafka-topics --create --zookeeper zookeeper:2181 --topic quota-topic --partitions 1 --replication-factor 1 15 | 16 | # Set up quotas 17 | docker-compose exec client kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'producer_byte_rate=10240,consumer_byte_rate=20480' --entity-type users --entity-name quota 18 | 19 | # Suggest Grafana URL for monitoring 20 | echo "Open http://localhost:3000 to see the Grafana dashboard" 21 | 22 | # Show example on how to test quotas 23 | echo "Example configuration without quota to access kafka:" 24 | 25 | echo "-> docker-compose exec client kafka-producer-perf-test --num-records 100000 --throughput 100000 --producer-props bootstrap.servers=kafka:9092 --record-size 100 --topic quota-topic --producer.config /etc/kafka/secrets/noquota.properties" 26 | 27 | echo "Example configuration with quota to access kafka:" 28 | 29 | echo "-> docker-compose exec client kafka-producer-perf-test --num-records 100000 --throughput 100000 --producer-props bootstrap.servers=kafka:9092 --record-size 100 --topic quota-topic --producer.config /etc/kafka/secrets/quota.properties" 30 | -------------------------------------------------------------------------------- /rbac/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /rbac/README.md: -------------------------------------------------------------------------------- 1 | # User hierarchy 2 | 3 | ## User names 4 | * Alice 5 | * Barnie 6 | * Charlie 7 | * Donald 8 | * Eva 9 | * Fritz 10 | * Greta 11 | 12 | ## Groups 13 | * ProjectA 14 | * ProjectB 15 | -------------------------------------------------------------------------------- /rbac/client-configs/alice.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="alice" \ 5 | password="alice-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/barnie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="barnie" \ 5 | password="barnie-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/charlie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="charlie" \ 5 | password="charlie-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/copy-props.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | first=$1 3 | second=$2 4 | 5 | cp ${first}.properties ${second}.properties 6 | sed -i '' "s/${first}/${second}/g" ${second}.properties 7 | -------------------------------------------------------------------------------- /rbac/client-configs/donald.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="donald" \ 5 | password="donald-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/eva.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="eva" \ 5 | password="eva-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/fritz.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="fritz" \ 5 | password="fritz-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/greta.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="greta" \ 5 | password="greta-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/conf/keypair.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAtzZ/i+k9IkeYSGL72tAE2YxRqbTEcmpx4/Hag1XbgcZeH5TX 3 | JpfOekpdfeV4uef21XKCuA1AWCjEiYlVCDIpEaUbIL4ecCORtWkKEiWd8S06xH+I 4 | 7gMF77wPm3LrXKX9ciZSrVpKqhTyp1lJJeI5xYhXBmnmpfXaPEqtUV8YR72i2BOC 5 | Kq/qix0MZD4qXtTKyYrjpJS8SyTZAIEpmkAIddQeerTJuVqZFiACpaZNEZ+/bpe3 6 | G7pa0iHFpqn7njCXHDRA4H0S/7GrjtQV05F2ahaCRIAEAfLbMc1473ZQzniOvGmi 7 | OTekEYhWKt2XqpHmSb4sBXdzdOmctmkAJZNvVQIDAQABAoIBABeCvNdddOTjPx16 8 | 5krsCtNH+GrIsbTlB/xyWtIjwGlk5Us0NG/VZz+2L00ql1Vz+O7nJPMtcCZgBOiJ 9 | YQoONcr33wVSoGFPjzEya4JspVn/rdztaMryRF7BSVdvZdibzGxLkqyAO7ibu4/S 10 | G1rrLvK0TkaabGi7pee6xYb9pwX9jh3HP24QZP9eeZKgAQ3vB8IHvveQloi/GsA2 11 | 7u9u0v9jWhv3RGpxAovPAwyGQhqfuMMEKVkwGUbdve4rv475fyWvz1ubYxRhu/BE 12 | /C8O1ekmKY1nCHb+H1YGqay+L5U6fm8AxwcgD6L/gyn3hEMKElYE9kBMp5cJEy9J 13 | Zo5OgMECgYEA6TdJ19ANbfSYjxLLuVTxg0mvBYTLAifmAZ3atICw5InhiAInZYe0 14 | m7oNYIghCEAMpMRHBjffOOPPiOGBXmgqV59v81GrBvpPxEg0mGVWEI1ZsyuagtON 15 | xzH6GBTZ85ThMJshKtOxvIiCJjhi/DAgYgfBcC/kNS7kH5aYNKSnYKkCgYEAyRyj 16 | B+LzPfMhxkeY7WsMr+BOx1CwJ48iuuoOhu0vXPWtgR9xb5ZcekKjcMF9wu9ZpviU 17 | IoMDTJQQ8ytDl5aMPa+SiEOM3dH5mzycT3cHUOuHdPKGO0pSImxf5fScWq+S/p/v 18 | 8jG98548fLuLZzn2Ic/rb4NrrLgXE4wp/SEvyM0CgYEAzjMVNhx9E0AVd0LeH5JM 19 | K5GFjzKXL+PJCJriWYADZz3Fy7Rj0vBGrv20gCo8UogW5cOpLIVP94Ps5hDEio09 20 | CtYsbI1D01qUFm7lGe1XSDFCIxmldpDIJVw5zPr6rdRvusMeczhTSOfFczedxW5j 21 | 42kKDkA53RAFoSxMjRcb6mkCgYBv2BQ2y5lZB4DuA11iFBmvKgDFyfLdXTYEWyyP 22 | DxM7EIpYeAMe+rEdcTfx+jmVkoT6Xd4MP24zRVkT0yi5AgmmOKE2VNLffenh0lhf 23 | vSN9di8T89iA2rlI2ZqPiXT23hPStWG9ALrR5FthRu+lDc/7R+V4U88q9AopXdbw 24 | JmertQKBgHY0hhFsf6g5c0kagfID532ZWkWmk5TP3IfnjUtngk2IHVNqpTy4gu66 25 | ACpMDrNuWF3HcfD/eJP++lyVpp6x/eSNwVL1XmgN5hPAL/fE9+I8WGtq6ShrqZsR 26 | IL3XkKrwp+H8qwWAUa2tkFDSqeUF4/HNXHVrzMltOxemv3twhTTS 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /rbac/conf/public.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtzZ/i+k9IkeYSGL72tAE 3 | 2YxRqbTEcmpx4/Hag1XbgcZeH5TXJpfOekpdfeV4uef21XKCuA1AWCjEiYlVCDIp 4 | EaUbIL4ecCORtWkKEiWd8S06xH+I7gMF77wPm3LrXKX9ciZSrVpKqhTyp1lJJeI5 5 | xYhXBmnmpfXaPEqtUV8YR72i2BOCKq/qix0MZD4qXtTKyYrjpJS8SyTZAIEpmkAI 6 | ddQeerTJuVqZFiACpaZNEZ+/bpe3G7pa0iHFpqn7njCXHDRA4H0S/7GrjtQV05F2 7 | ahaCRIAEAfLbMc1473ZQzniOvGmiOTekEYhWKt2XqpHmSb4sBXdzdOmctmkAJZNv 8 | VQIDAQAB 9 | -----END PUBLIC KEY----- 10 | -------------------------------------------------------------------------------- /rbac/functions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | retry() { 3 | local -r -i max_attempts="$1"; shift 4 | local -r -i sleep_interval="$1"; shift 5 | local -r cmd="$@" 6 | local -i attempt_num=1 7 | 8 | until $cmd 9 | do 10 | if (( attempt_num == max_attempts )) 11 | then 12 | echo "Failed after $attempt_num attempts" 13 | return 1 14 | else 15 | printf "." 16 | ((attempt_num++)) 17 | sleep $sleep_interval 18 | fi 19 | done 20 | printf "\n" 21 | } 22 | 23 | container_healthy() { 24 | local name=$1 25 | local container=$(docker-compose ps -q $1) 26 | local healthy=$(docker inspect --format '{{ .State.Health.Status }}' $container) 27 | if [ $healthy == healthy ] 28 | then 29 | printf "$1 is healthy" 30 | return 0 31 | else 32 | return 1 33 | fi 34 | } 35 | -------------------------------------------------------------------------------- /rbac/kafka-registered.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | zookeeper-shell $1 get /cluster/id 4 | version=$(zookeeper-shell $1 get /cluster/id 2> /dev/null | grep version) 5 | echo $version 6 | if [ $version ]; then 7 | exit 0 8 | else 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /rbac/ldap/custom/01_base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=users,dc=confluent,dc=io 2 | objectClass: organizationalUnit 3 | ou: Users 4 | 5 | dn: ou=groups,dc=confluent,dc=io 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | -------------------------------------------------------------------------------- /rbac/ldap/custom/02_KafkaDevelopers.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: Kafka Developers 5 | gidNumber: 5000 6 | 7 | -------------------------------------------------------------------------------- /rbac/ldap/custom/03_ProjectA.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=ProjectA,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: ProjectA 5 | gidNumber: 5001 6 | 7 | -------------------------------------------------------------------------------- /rbac/ldap/custom/04_ProjectB.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=ProjectB,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: ProjectB 5 | gidNumber: 5002 6 | 7 | -------------------------------------------------------------------------------- /rbac/ldap/custom/10_alice.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=alice,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: alice 6 | sn: LookingGlass 7 | givenName: Alice 8 | cn: alice 9 | displayName: Alice LookingGlass 10 | uidNumber: 10000 11 | gidNumber: 5000 12 | userPassword: alice-secret 13 | gecos: alice 14 | loginShell: /bin/bash 15 | homeDirectory: /home/alice 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/11_barnie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: barnie 6 | sn: Rubble 7 | givenName: Barnie 8 | cn: barnie 9 | displayName: Barnie Rubble 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: barnie-secret 13 | gecos: barnie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/barnie 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/12_charlie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: charlie 6 | sn: Sheen 7 | givenName: Charlie 8 | cn: charlie 9 | displayName: Charlie Sheen 10 | uidNumber: 10002 11 | gidNumber: 5000 12 | userPassword: charlie-secret 13 | gecos: charlie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/charlie 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/13_donald.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=donald,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: donald 6 | sn: Duck 7 | givenName: Donald 8 | cn: donald 9 | displayName: Donald Duck 10 | uidNumber: 10003 11 | gidNumber: 5000 12 | userPassword: donald-secret 13 | gecos: donald 14 | loginShell: /bin/bash 15 | homeDirectory: /home/donald 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/14_eva.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=eva,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: eva 6 | sn: Maria 7 | givenName: Eva 8 | cn: eva 9 | displayName: Eva Maria 10 | uidNumber: 10004 11 | gidNumber: 5000 12 | userPassword: eva-secret 13 | gecos: eva 14 | loginShell: /bin/bash 15 | homeDirectory: /home/eva 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/15_fritz.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=fritz,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: fritz 6 | sn: Walter 7 | givenName: Fritz 8 | cn: fritz 9 | displayName: Fritz Walter 10 | uidNumber: 10005 11 | gidNumber: 5000 12 | userPassword: fritz-secret 13 | gecos: fritz 14 | loginShell: /bin/bash 15 | homeDirectory: /home/fritz 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/16_greta.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=greta,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: greta 6 | sn: Thunberg 7 | givenName: Greta 8 | cn: greta 9 | displayName: Greta Thunberg 10 | uidNumber: 10006 11 | gidNumber: 5000 12 | userPassword: greta-secret 13 | gecos: greta 14 | loginShell: /bin/bash 15 | homeDirectory: /home/greta 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/20_group_add.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | changetype: modify 3 | add: memberuid 4 | memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }} 5 | 6 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 7 | changetype: modify 8 | add: memberuid 9 | memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 10 | 11 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 12 | changetype: modify 13 | add: memberuid 14 | memberuid: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 15 | 16 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 17 | changetype: modify 18 | add: memberuid 19 | memberuid: cn=eva,ou=users,{{ LDAP_BASE_DN }} 20 | 21 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 22 | changetype: modify 23 | add: memberuid 24 | memberuid: cn=fritz,ou=users,{{ LDAP_BASE_DN }} 25 | -------------------------------------------------------------------------------- /rbac/up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## start docker-compose up to and including kafka 4 | docker-compose up -d kafka 5 | docker-compose up -d tools 6 | 7 | # wait for kafka container to be healthy 8 | source ./functions.sh 9 | echo 10 | echo "Waiting for the broker to be healthy" 11 | retry 10 5 container_healthy kafka 12 | 13 | # Create the roles 14 | echo "Creating role bindings for principals" 15 | docker-compose exec tools bash -c "/tmp/create-role-bindings.sh" || exit 1 16 | 17 | ## start remaining services 18 | 19 | docker-compose up -d 20 | 21 | echo "Services should be up:" 22 | 23 | docker-compose ps 24 | 25 | echo "Example configuration:" 26 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth-and-ccloud/README.md: -------------------------------------------------------------------------------- 1 | ## Pre-Reqs 2 | 3 | 1. CCloud Service Account 4 | 2. CCloud API Key and Secret for Service Account 5 | 3. Service Account authorized to read/write/create the following topics. You can pre-create these topics if you'd like to reduce the AuthO rules to just read/write. 6 | - `_confluent-license` - License Topic 7 | - `schemas-security-plugin` - Schemas Topic 8 | - `schemas-security-plugin_acl` - ACLs Topic 9 | 4. The following Env Vars Defined 10 | - `CLUSTER_BOOTSTRAP_SERVERS` 11 | - `CLUSTER_API_KEY` 12 | - `CLUSTER_API_SECRET` 13 | 14 | ### Sample Commands 15 | 16 | *Create ACLs Needed in CCloud* 17 | ``` 18 | ccloud kafka acl create --allow --service-account ... --operation READ --operation WRITE --operation CREATE --operation DESCRIBE --operation DESCRIBE-CONFIGS --topic schemas-security-plugin 19 | ccloud kafka acl create --allow --service-account ... --operation READ --operation WRITE --operation CREATE --operation DESCRIBE --operation DESCRIBE-CONFIGS --topic schemas-security-plugin_acl 20 | ccloud kafka acl create --allow --service-account 181693 --operation READ --operation WRITE --operation CREATE --operation DESCRIBE --operation DESCRIBE-CONFIGS --topic _confluent-license 21 | ccloud kafka acl create --allow --service-account 181693 --operation READ --operation WRITE --consumer-group schema-registry 22 | ``` 23 | 24 | *Define Env Vars* 25 | ``` 26 | export CLUSTER_BOOTSTRAP_SERVERS="....confluent.cloud:9092" 27 | export CLUSTER_API_KEY="..." 28 | export CLUSTER_API_SECRET="..." 29 | ``` 30 | 31 | ## Users 32 | 33 | | User | Pass | Desc | 34 | |-------|-------|---------------------| 35 | | read | read | Global Read Access | 36 | | write | write | Global Write Access | 37 | | admin | admin | Global Admin Access | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth-and-ccloud/jaas_config.file: -------------------------------------------------------------------------------- 1 | Schema { 2 | org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required 3 | file="/tmp/password-file" 4 | debug="false"; 5 | }; 6 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth-and-ccloud/password-file: -------------------------------------------------------------------------------- 1 | read: OBF:1vgt1sar1saj1vg1,read 2 | write: OBF:1wnl1ym51unz1ym91wml,write 3 | admin: OBF:1u2a1toa1w8v1tok1u30,admin 4 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth-and-ccloud/up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker-compose up -d 4 | # TODO: An ugly sleep to remove with the confluent utility belt at some point 5 | sleep 5 6 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p read -o SUBJECT_READ 7 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p write -o SUBJECT_WRITE 8 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p admin -o '*' 9 | 10 | # Uncomment the below 2 lines for anonymous support 11 | #docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o GLOBAL_READ 12 | #docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o GLOBAL_COMPATIBILITY_READ 13 | #docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p 'ANONYMOUS' -o SUBJECT_READ 14 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth/.env: -------------------------------------------------------------------------------- 1 | ../../.env -------------------------------------------------------------------------------- /schema-registry/with-basic-auth/jaas_config.file: -------------------------------------------------------------------------------- 1 | Schema { 2 | org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required 3 | file="/tmp/password-file" 4 | debug="false"; 5 | }; 6 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth/password-file: -------------------------------------------------------------------------------- 1 | read: OBF:1vgt1sar1saj1vg1,read 2 | write: OBF:1wnl1ym51unz1ym91wml,write 3 | admin: OBF:1u2a1toa1w8v1tok1u30,admin 4 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth/up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker-compose up -d 4 | 5 | docker-compose logs schema-registry | grep "Server started, listening for requests" 6 | while (( $? == 1 )) 7 | do 8 | sleep 1 9 | echo "Waiting for schema registry to be started ..." 10 | docker-compose logs schema-registry | grep "Server started, listening for requests" 11 | done 12 | 13 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p read -o SUBJECT_READ 14 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p write -o SUBJECT_WRITE 15 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p admin -o '*' 16 | 17 | echo "Schema Registry is listening on http://localhost:8089" 18 | echo "-> user:password | description" 19 | echo "-> _____________" 20 | echo "-> read:read | Global read access" 21 | echo "-> write:write | Global write access" 22 | echo "-> admin:admin | Global admin access" 23 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/.env: -------------------------------------------------------------------------------- 1 | ../../.env -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/.gitignore: -------------------------------------------------------------------------------- 1 | schema-registry/secrets/client*pem 2 | schema-registry/secrets/client.p12 3 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | This playbook is an example of configuration where Schema Registry is configured for accepting request on `http` and `https`. 4 | This repo as well add mTLS as a mutual authentication mechanism via TLS. 5 | Requests on the `http` endpoint are actually identified as the `ANONYMOUS` user. This is possible thanks to the `confluent.schema.registry.anonymous.principal=true` option. 6 | 7 | The following ACLs are configured: 8 | - `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p 'ANONYMOUS' -o 'SUBJECT_READ'` 9 | - `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o 'GLOBAL_SUBJECTS_READ'` 10 | - `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o 'GLOBAL_COMPATIBILITY_READ'` 11 | - `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p 'C=DE,O=Confluent Ltd,L=Berlin,CN=schema-registry' -o '*'` 12 | 13 | With this configuration, ` curl -X GET http://localhost:8089/subjects/` is successful, but the `ANONYMOUS` user does not have the privileges to write new schemas. 14 | Only the client with the TLS client certificate `C=DE,O=Confluent Ltd,L=Berlin,CN=schema-registry` can write new schemas, this could be for example your CI tool or an admin user. 15 | 16 | Use the _verify.sh_ script to verify the http mTLS authentication 17 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/config/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = DE 18 | organizationName = Confluent 19 | localityName = Berlin 20 | commonName = schema-registry.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/config/client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=schema-registry.client 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=localhost 30 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | Proc-Type: 4,ENCRYPTED 3 | DEK-Info: AES-256-CBC,4F162BBCCF33132B2E020A4181B3FD0C 4 | 5 | jmAHtitFXynlxRf81Z/xdNazchBDXuK2ZWeUVuCPARW3BX+bZpoyHU3P5s9CZTPV 6 | M/9fxAxlAUbQ92+gsANWOIcND5OKBWSn6l6ua+YvY61ebXGVC/gdLQfgW8bG1tbq 7 | Y+6NrogfhB4WXZyHfjVhbY6GEONPJbkSyBaV6TTlFRNOBo9+aTE0fGxlE1niJRZh 8 | BbHOV4tI2NScdlvEW6tzMHO56pRRwQ3dE+eOKN+14xkq1dZZdsly03klHstuMYZn 9 | oxfBO2VPeP2uQR+csqBe4scyKAfPEpMzSY37cWBbbnIepKA3Jorsd8/g+B3MrXXk 10 | tdhHd1DmDGZawWgQgZ/BgGEvXIcUHVSUvadKin3jkpCJkze4wMaTy+8ukj6wvU3q 11 | zTnJOCV7L00vQKGL3b/iws7qIXOeK6j/FgNgXyLJlTy71UM5uzNUjbV58MvrsRa7 12 | dix5wh6ubv9Rk4ySJaJkE2ruvH4OZrRhmZGES3NnliTdNeJXhbKMe3HXppTVnUnm 13 | bpj2fI3sLyIjyioXcuh+UVpme9aobJPGvFY9rbs1duGGIQ1KTElcpIGI5Bc+c+Rb 14 | KCi8POlWsp5MTgB4oSncyPBZzpQ/zwkXVUkND9nD+I/f+Oqqh0dOF1qZ3RhrsYFP 15 | 1+H3cpebDEs0/7gMTuIW9HS3ziXjkPsl/Rx66XHX8Ww9mv4ulO57nsDxyoWJ+kFe 16 | mj9DzBgh4eHLPNAFixaYjQMOoaNqdSFZHoTnlXMfiwQoUMpel5RrrbZ94qW7LazS 17 | jfqhH1NAxVHWBN1navJhuI3ibNpr+b8IncdbkBJ58KNBjghJTx0BN0MCN19+jU5t 18 | eLl9jtsl8ky6Q7uWnWywJR8/vgQaJNZgvJsCBoL4IcSN9ejUkgAR0cm92FY+39ui 19 | VMcPEPfzp9ULBEr5oTMHniH0r+hVMFKqgtQ2f0yP3/Jva0vA+6cpEQ9StHncHw0R 20 | CeYsqcVWStA5SZzKn8QtuiuS4O7VEBF9lpTAUkiJ0ts6UzX/sdJigie4lqfA1Ymv 21 | vlZlcxFnHAZE73/AdcwcQtDoxskdYt74ZdFuXojZMZDOlGei1aPxwEoFCFlYToV/ 22 | YiMZnu5oZxkQm+5uzBVXCq/4Y8xNveZtPn2EGpeJwIrWbRtotGIcvK89RNO+RI2Q 23 | GZeO2vhb1bElL65HJ6/MJK6IaYu+7qOxSG4ULkcrO/qqGg9yQY6kmb9dQA/pknLh 24 | 2r5MDWB6fHvP7OgMOMJXPz48BWf2lpOBNAr6+tiBtoFiBqZzwMOosI9qj78W5bly 25 | wV6Cuj2T1lNM5S9VE7xq492jQQUoffY4Tn3uNiLeNCZ5K1su2GoE3UbMfiLHdG+o 26 | igY+ut/rX8cg/waByFW7u3PaED1bMe7znBLHUz7M4ad8L/Xy5obwMYT+CvnwoXSt 27 | VdS2y+pDF81mUdZGGQFLlQXqg60sclwOWMMxuhwyuvAeRjAVQruWvJodjrsmo2xr 28 | N/q1HYyFtK8AmF8MB9UOXOuuSkneoeMFDgg1auPy1DApYmEJtWu2wN0FHCG8Famn 29 | Ml06N1xyyaHqQqMLTP2Wf4493foQPpUfMDOGZZrzWzSVKNxe53QVmi1RCefZuxjB 30 | -----END RSA PRIVATE KEY----- 31 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.keystore -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.truststore -------------------------------------------------------------------------------- /scram/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /scram/admin.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="admin" \ 5 | password="admin-secret"; 6 | 7 | -------------------------------------------------------------------------------- /scram/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="consumer" \ 5 | password="consumer-secret"; 6 | 7 | -------------------------------------------------------------------------------- /scram/jline-2.14.6.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/scram/jline-2.14.6.jar -------------------------------------------------------------------------------- /scram/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Client { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | username="admin" 4 | password="password"; 5 | }; 6 | -------------------------------------------------------------------------------- /scram/producer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="producer" \ 5 | password="producer-secret"; 6 | 7 | -------------------------------------------------------------------------------- /scram/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d 4 | 5 | # Creating the user admin (super user) 6 | # The first user needs to be created using the zookeeper connection (bootstrapping process) 7 | docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret],SCRAM-SHA-512=[password=admin-secret]' --entity-type users --entity-name admin 8 | 9 | # All additional users can be created using the broker connection 10 | docker-compose exec kafka kafka-configs --bootstrap-server kafka:9093 --alter --add-config 'SCRAM-SHA-256=[password=kafka-secret],SCRAM-SHA-512=[password=kafka-secret]' --entity-type users --entity-name kafka --command-config /tmp/admin.properties 11 | docker-compose exec kafka kafka-configs --bootstrap-server kafka:9093 --alter --add-config 'SCRAM-SHA-256=[password=producer-secret],SCRAM-SHA-512=[password=producer-secret]' --entity-type users --entity-name producer --command-config /tmp/admin.properties 12 | docker-compose exec kafka kafka-configs --bootstrap-server kafka:9093 --alter --add-config 'SCRAM-SHA-256=[password=consumer-secret],SCRAM-SHA-512=[password=consumer-secret]' --entity-type users --entity-name consumer --command-config /tmp/admin.properties 13 | 14 | echo "Example configuration:" 15 | echo "-> kafka-console-producer --broker-list localhost:9093 --producer.config producer.properties --topic test" 16 | echo "-> kafka-console-consumer --bootstrap-server localhost:9093 --consumer.config consumer.properties --topic test --from-beginning" 17 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /tmp/admin.properties --topic test --from-beginning" 18 | 19 | -------------------------------------------------------------------------------- /scram/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_admin="password"; 4 | }; 5 | -------------------------------------------------------------------------------- /secure-jmx/README.md: -------------------------------------------------------------------------------- 1 | # A guide to having a secure JMX connection 2 | 3 | The need of having a secure JMX connection is very common in big organisations. There are a few ways of implementing this, but in this example we offer one of them: 4 | 5 | In Apache Kafka, you can pass a JVM option like this: 6 | 7 | ```java 8 | KAFKA_JMX_OPTS=-Dcom.sun.management.config.file=/var/ssl/private/jmxremote.properties 9 | ``` 10 | 11 | this would instruct the JXM to configure jmx using the referenced file. 12 | 13 | This file should look like: 14 | 15 | ```java 16 | com.sun.management.jmxremote=true 17 | com.sun.management.jmxremote.port=9999 18 | com.sun.management.jmxremote.rmi.port=9999 19 | com.sun.management.jmxremote.password.file=/var/ssl/private/jmxremote.password 20 | com.sun.management.jmxremote.access.file=/var/ssl/private/jmxremote.access 21 | com.sun.management.jmxremote.registry.ssl=true 22 | com.sun.management.jmxremote.ssl.config.file=/var/ssl/private/jmxremote.properties 23 | 24 | javax.net.ssl.keyStore=/var/ssl/private/kafka.keystore 25 | javax.net.ssl.keyStorePassword=confluent 26 | javax.net.ssl.trustStore=/var/ssl/private/kafka.truststore 27 | javax.net.ssl.trustStorePassword=confluent 28 | ``` 29 | 30 | in this example we set: 31 | 32 | * An SSL secured JMX connection. 33 | * That has authentication using configured user and password files. 34 | 35 | Other options to handle authentication are possible, like having LDAP and/or other login modules. They are not covered in this example. 36 | -------------------------------------------------------------------------------- /secure-jmx/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | zookeeper: 5 | build: zookeeper/ 6 | container_name: zookeeper 7 | hostname: zookeeper 8 | volumes: 9 | - ./secrets/:/var/ssl/private 10 | environment: 11 | KAFKA_JMX_OPTS: " -Dcom.sun.management.config.file=/var/ssl/private/jmxremote.properties" 12 | 13 | kafka: 14 | build: kafka/ 15 | container_name: kafka 16 | depends_on: 17 | - zookeeper 18 | volumes: 19 | - ./secrets/:/var/ssl/private 20 | environment: 21 | KAFKA_JMX_OPTS: "-Dcom.sun.management.config.file=/var/ssl/private/jmxremote.properties" 22 | #KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote.port=9999 -Dcom.sun.management.jmxremote.rmi.port=9999 -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false" 23 | -------------------------------------------------------------------------------- /secure-jmx/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER pere.urbon@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY consumer.properties /etc/kafka/consumer.properties 17 | 18 | EXPOSE 9093 19 | EXPOSE 9999 20 | 21 | CMD kafka-server-start /etc/kafka/server.properties 22 | -------------------------------------------------------------------------------- /secure-jmx/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /secure-jmx/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /secure-jmx/kafka/server.properties: -------------------------------------------------------------------------------- 1 | ############################# Server Basics ############################# 2 | broker.id=0 3 | listeners=PLAINTEXT://kafka:9093 4 | advertised.listeners=PLAINTEXT://kafka:9093 5 | security.inter.broker.protocol=PLAINTEXT 6 | log.dirs=/var/lib/kafka 7 | ############################# Internal Topic Settings ############################# 8 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" 9 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3. 10 | offsets.topic.replication.factor=1 11 | transaction.state.log.replication.factor=1 12 | transaction.state.log.min.isr=1 13 | 14 | # Zookeeper connection string (see zookeeper docs for details). 15 | # This is a comma separated host:port pairs, each corresponding to a zk 16 | # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". 17 | # You can also append an optional chroot string to the urls to specify the 18 | # root directory for all kafka znodes. 19 | zookeeper.connect=zookeeper:2181 20 | 21 | ############################# Group Coordinator Settings ############################# 22 | 23 | # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. 24 | # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. 25 | # The default value for this is 3 seconds. 26 | # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. 27 | # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. 28 | group.initial.rebalance.delay.ms=0 29 | -------------------------------------------------------------------------------- /secure-jmx/pull-jmx-kafka.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | MY_KAFKA_OPTS="-Djavax.net.ssl.keyStore=/var/ssl/private/kafka.keystore -Djavax.net.ssl.keyStorePassword=confluent -Djavax.net.ssl.trustStore=/var/ssl/private/kafka.truststore -Djavax.net.ssl.trustStorePassword=confluent" 4 | 5 | docker-compose exec -e KAFKA_JMX_OPTS="" -e KAFKA_OPTS="$MY_KAFKA_OPTS" kafka kafka-run-class kafka.tools.JmxTool \ 6 | --object-name kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec \ 7 | --jmx-ssl-enable true --jmx-auth-prop admin=adminpassword 8 | -------------------------------------------------------------------------------- /secure-jmx/pull-jmx-zookeeper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | MY_KAFKA_OPTS="-Djavax.net.ssl.keyStore=/var/ssl/private/kafka.keystore -Djavax.net.ssl.keyStorePassword=confluent -Djavax.net.ssl.trustStore=/var/ssl/private/kafka.truststore -Djavax.net.ssl.trustStorePassword=confluent" 4 | 5 | docker-compose exec -e KAFKA_JMX_OPTS="" -e KAFKA_OPTS="$MY_KAFKA_OPTS" zookeeper kafka-run-class kafka.tools.JmxTool \ 6 | --object-name org.apache.ZooKeeperService:name0=StandaloneServer_port2181 \ 7 | --jmx-ssl-enable true --jmx-auth-prop admin=adminpassword 8 | 9 | 10 | #get -s -b org.apache.ZooKeeperService:name0=StandaloneServer_port2181 AvgRequestLatency 11 | -------------------------------------------------------------------------------- /secure-jmx/secrets/client.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/secure-jmx/secrets/client.keystore -------------------------------------------------------------------------------- /secure-jmx/secrets/client.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/secure-jmx/secrets/client.truststore -------------------------------------------------------------------------------- /secure-jmx/secrets/jmxremote.access: -------------------------------------------------------------------------------- 1 | admin readwrite 2 | user readonly 3 | -------------------------------------------------------------------------------- /secure-jmx/secrets/jmxremote.password: -------------------------------------------------------------------------------- 1 | admin adminpassword 2 | user userpassword 3 | -------------------------------------------------------------------------------- /secure-jmx/secrets/jmxremote.properties: -------------------------------------------------------------------------------- 1 | com.sun.management.jmxremote=true 2 | com.sun.management.jmxremote.port=9999 3 | com.sun.management.jmxremote.rmi.port=9999 4 | com.sun.management.jmxremote.password.file=/var/ssl/private/jmxremote.password 5 | com.sun.management.jmxremote.access.file=/var/ssl/private/jmxremote.access 6 | com.sun.management.jmxremote.registry.ssl=true 7 | com.sun.management.jmxremote.ssl.config.file=/var/ssl/private/jmxremote.properties 8 | 9 | javax.net.ssl.keyStore=/var/ssl/private/kafka.keystore 10 | javax.net.ssl.keyStorePassword=confluent 11 | javax.net.ssl.trustStore=/var/ssl/private/kafka.truststore 12 | javax.net.ssl.trustStorePassword=confluent 13 | -------------------------------------------------------------------------------- /secure-jmx/secrets/kafka.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/secure-jmx/secrets/kafka.keystore -------------------------------------------------------------------------------- /secure-jmx/secrets/kafka.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/secure-jmx/secrets/kafka.truststore -------------------------------------------------------------------------------- /secure-jmx/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d --build 4 | 5 | # Creating the user kafka 6 | # kafka is configured as a super user, no need for additional ACL 7 | # docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-255=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka 8 | 9 | echo 10 | echo "Example jmx pulling: ./pull-jmx.sh" 11 | echo 12 | echo "other tools useful to check this are any JMX consumer like jconsole or others." 13 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER pere.urbon@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | 17 | EXPOSE 2181 18 | EXPOSE 9998 19 | 20 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 21 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/jmxremote.access: -------------------------------------------------------------------------------- 1 | admin readwrite 2 | user readonly 3 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/jmxremote.password: -------------------------------------------------------------------------------- 1 | admin adminpassword 2 | user userpassword 3 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/jmxremote.properties: -------------------------------------------------------------------------------- 1 | com.sun.management.jmxremote=true 2 | com.sun.management.jmxremote.port=9998 3 | com.sun.management.jmxremote.rmi.port=9998 4 | com.sun.management.jmxremote.password.file=/var/ssl/private/jmxremote.password 5 | com.sun.management.jmxremote.access.file=/var/ssl/private/jmxremote.access 6 | com.sun.management.jmxremote.registry.ssl=true 7 | com.sun.management.jmxremote.ssl.config.file=/var/ssl/private/jmxremote.properties 8 | 9 | javax.net.ssl.keyStore=/var/ssl/private/kafka.keystore 10 | javax.net.ssl.keyStorePassword=confluent 11 | javax.net.ssl.trustStore=/var/ssl/private/kafka.truststore 12 | javax.net.ssl.trustStorePassword=confluent 13 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir=/var/lib/zookeeper 17 | # the port at which the clients will connect 18 | clientPort=2181 19 | # disable the per-ip limit on the number of connections since this is a non-production config 20 | maxClientCnxns=0 21 | -------------------------------------------------------------------------------- /tls-with-ocrl/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/tls-with-ocrl/.gitignore -------------------------------------------------------------------------------- /tls-with-ocrl/certs/broker.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/tls-with-ocrl/certs/broker.keystore -------------------------------------------------------------------------------- /tls-with-ocrl/certs/broker.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/tls-with-ocrl/certs/broker.truststore -------------------------------------------------------------------------------- /tls-with-ocrl/certs/client.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/tls-with-ocrl/certs/client.keystore -------------------------------------------------------------------------------- /tls-with-ocrl/certs/client.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/tls-with-ocrl/certs/client.truststore -------------------------------------------------------------------------------- /tls-with-ocrl/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | build: zookeeper/ 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | domainname: confluent.local 8 | restart: on-failure 9 | networks: 10 | default: 11 | aliases: 12 | - zookeeper.confluent.local 13 | 14 | kafka: 15 | build: kafka/ 16 | container_name: kafka 17 | hostname: kafka 18 | domainname: confluent.local 19 | depends_on: 20 | - zookeeper 21 | restart: on-failure 22 | environment: 23 | - KAFKA_OPTS=-Dcom.sun.security.enableCRLDP=true -Dcom.sun.net.ssl.checkRevocation=true 24 | # - KAFKA_OPTS=-Djavax.net.debug=all -Djava.security.debug=all 25 | volumes: 26 | - ./certs/:/var/lib/secret 27 | networks: 28 | default: 29 | aliases: 30 | - kafka.confluent.local 31 | ports: 32 | - "9093:9093" 33 | 34 | apache: 35 | image: 'httpd:2.4' 36 | container_name: httpd 37 | hostname: httpd 38 | ports: 39 | - "18080:80" 40 | volumes: 41 | - ./web/:/usr/local/apache2/htdocs/ 42 | 43 | volumes: 44 | secret: {} 45 | 46 | networks: 47 | default: 48 | -------------------------------------------------------------------------------- /tls-with-ocrl/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | #schema-registry package is rquiterd to run kafka-avro-console-producer 14 | RUN yum install -y confluent-schema-registry 15 | 16 | # 3. Configure Kafka 17 | COPY server.properties /etc/kafka/server.properties 18 | COPY consumer.properties /etc/kafka/consumer.properties 19 | 20 | EXPOSE 9093 21 | 22 | CMD kafka-server-start /etc/kafka/server.properties 23 | -------------------------------------------------------------------------------- /tls-with-ocrl/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls-with-ocrl/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka.conflent.local:9093 2 | security.protocol=SSL 3 | ssl.truststore.location=/var/lib/secret/client.truststore 4 | ssl.truststore.password=confluent 5 | ssl.keystore.location=/var/lib/secret/client.keystore 6 | ssl.keystore.password=confluent 7 | -------------------------------------------------------------------------------- /tls-with-ocrl/kafka/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=0 2 | listeners=SSL://kafka.confluent.local:9093,PLAINTEXT://kafka.confluent.local:9092 3 | advertised.listeners=SSL://kafka.confluent.local:9093,PLAINTEXT://kafka.confluent.local:9092 4 | log.dirs=/var/lib/kafka 5 | offsets.topic.replication.factor=1 6 | transaction.state.log.replication.factor=1 7 | transaction.state.log.min.isr=1 8 | zookeeper.connect=zookeeper.confluent.local:2181 9 | 10 | # TLS Configuration 11 | security.inter.broker.protocol=SSL 12 | ssl.truststore.location=/var/lib/secret/broker.truststore 13 | ssl.truststore.password=confluent 14 | ssl.keystore.location=/var/lib/secret/broker.keystore 15 | ssl.keystore.password=confluent 16 | ssl.client.auth=required 17 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 18 | super.users=User:CN=kafka.confluent.local,L=London,O=Confluent,C=UK;User:CN=schema-registry.confluent.local,L=London,O=Confluent,C=UK;User:CN=kafka.confluent.local,O=Confluent Ltd,L=Berlin,ST=Berlin,C=DE;User:CN=producer1,O=Confluent Ltd,L=Berlin,ST=Berlin,C=DE 19 | -------------------------------------------------------------------------------- /tls-with-ocrl/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Starting docker-compose services 5 | docker-compose up -d --build 6 | 7 | echo "Example configuration to access kafka:" 8 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka.confluent.local:9093 --topic test --producer.config /etc/kafka/consumer.properties" 9 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka.confluent.local:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning" 10 | -------------------------------------------------------------------------------- /tls-with-ocrl/web/crls.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN X509 CRL----- 2 | MIIDDzCB+AIBATANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJERTEPMA0GA1UE 3 | CAwGQmVybGluMRYwFAYDVQQKDA1Db25mbHVlbnQgTHRkMQswCQYDVQQLDAJQUzEY 4 | MBYGA1UEAwwPSW50ZXJtZWRpYXRlIENBMR4wHAYJKoZIhvcNAQkBFg9jYUBjb25m 5 | bHVlbnQuaW8XDTE5MDgzMDE0MDA0NFoXDTE5MDkyOTE0MDA0NFowFTATAgIQAhcN 6 | MTkwODMwMTQwMDMyWqAwMC4wHwYDVR0jBBgwFoAUuEd/Mi/LdUwtRm8Sj4orD55j 7 | TPcwCwYDVR0UBAQCAhAAMA0GCSqGSIb3DQEBCwUAA4ICAQC9wIZkWRf4i52FYeYR 8 | hlvV1Z+DzGMMcg+wPhDxdTHWieA4eJZVDbOpY8P7nM+voU/+QYsF3oTW1lrJV2aO 9 | dTebeLG1t3/40IzvkG70aRgFe199gLj3+ke6UZzrVzD8KqY+pci94uYcwZgr1nxD 10 | SXyD8WffuYRJ9hf5huInZRnp34ECnSTX7gTh2oaoV4SLI1CXKXB62i9OMShOfcQj 11 | 0Uc4DAE5BgZe9uUx2tLeA3vDLCdcrQrPMjy2j536V2U4KyvdY9IiblMvqt2Y0FmU 12 | cxdVL5mo+LUAt3b1fSoOcypxqdlAydxlMBVg8ZDYfw/l44KLA2v3yguKUhtjvYCa 13 | rL24TyltI1I2PYSZJ8pObg+MC9pwjwsSQG2bQOr5scAU4FukFVao2Stc6JSHj5Ng 14 | J/5ExpKpT6k2GY+OU1FZDD0Jku1IZXtyTBYpr3ynxtD2aEgO2Iveh/w5eLT5FDIR 15 | uTRPQBCfwAKFNulv8aDT8BtSYl1Xj5xlG0h1ROyFyfbqcns/Zf3MKcZpTw7MF9xk 16 | /2SzbsinhNBk2vi0WbhA8zCP1+P/rgLCWlDUJbagXzaeWEL3VOgDaqMJ3ks0ruTy 17 | tBBX+kdOoKrG7notMTivsqwsYA4/ZYXhDupA106Z4/1h4zJFVDWpoKnKRLD3WryI 18 | FR/OjytC6XgcNAgGw3Gff8hEcw== 19 | -----END X509 CRL----- 20 | -------------------------------------------------------------------------------- /tls-with-ocrl/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure zookeeper 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | 17 | EXPOSE 2181 18 | 19 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 20 | -------------------------------------------------------------------------------- /tls-with-ocrl/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls-with-ocrl/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | -------------------------------------------------------------------------------- /tls/.gitignore: -------------------------------------------------------------------------------- 1 | certs 2 | -------------------------------------------------------------------------------- /tls/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = UK 18 | organizationName = Confluent 19 | localityName = London 20 | commonName = kafka.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /tls/client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=kafka.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=kafka.confluent.local 30 | -------------------------------------------------------------------------------- /tls/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | build: zookeeper/ 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | domainname: confluent.local 8 | restart: on-failure 9 | volumes: 10 | - ./certs/:/var/lib/secret 11 | networks: 12 | default: 13 | aliases: 14 | - zookeeper.confluent.local 15 | 16 | 17 | kafka: 18 | build: kafka/ 19 | container_name: kafka 20 | hostname: kafka 21 | domainname: confluent.local 22 | depends_on: 23 | - zookeeper 24 | restart: on-failure 25 | volumes: 26 | - ./certs/:/var/lib/secret 27 | networks: 28 | default: 29 | aliases: 30 | - kafka.confluent.local 31 | ports: 32 | - "9093:9093" 33 | environment: 34 | SCHEMA_REGISTRY_OPTS: '-Djavax.net.ssl.keyStore=/var/lib/secret/client.keystore.jks -Djavax.net.ssl.trustStore=/var/lib/secret/truststore.jks -Djavax.net.ssl.keyStorePassword=test1234 -Djavax.net.ssl.trustStorePassword=test1234' 35 | 36 | schema-registry: 37 | build: schema-registry/ 38 | container_name: schema-registry 39 | hostname: schema-registry 40 | domainname: confluent.local 41 | depends_on: 42 | - kafka 43 | restart: on-failure 44 | volumes: 45 | - ./certs/:/var/lib/secret 46 | - ./schema-registry/schema-registry.properties:/etc/schema-registry/schema-registry.properties 47 | networks: 48 | default: 49 | aliases: 50 | - schema-registry.confluent.local 51 | ports: 52 | - "8443:8443" 53 | 54 | volumes: 55 | secret: {} 56 | 57 | networks: 58 | default: 59 | -------------------------------------------------------------------------------- /tls/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-server 13 | 14 | # 3. Configure Kafka 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY consumer.properties /etc/kafka/consumer.properties 17 | 18 | # 4. Add kafkacat 19 | COPY kafkacat /usr/local/bin 20 | RUN chmod +x /usr/local/bin/kafkacat 21 | COPY kafkacat.conf /etc/kafka/kafkacat.conf 22 | 23 | EXPOSE 9093 24 | 25 | CMD kafka-server-start /etc/kafka/server.properties 26 | -------------------------------------------------------------------------------- /tls/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka.conflent.local:9093 2 | security.protocol=SSL 3 | ssl.truststore.location=/var/lib/secret/truststore.jks 4 | ssl.truststore.password=test1234 5 | ssl.keystore.location=/var/lib/secret/client.keystore.jks 6 | ssl.keystore.password=test1234 7 | -------------------------------------------------------------------------------- /tls/kafka/kafkacat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sknop/kafka-security-playbook/0ed9240f8491565e3017dd27d2f21b802b584087/tls/kafka/kafkacat -------------------------------------------------------------------------------- /tls/kafka/kafkacat.conf: -------------------------------------------------------------------------------- 1 | security.protocol=SSL 2 | ssl.key.location=/var/lib/secret/client.pem 3 | ssl.key.password=test1234 4 | ssl.certificate.location=/var/lib/secret/client.pem 5 | ssl.ca.location=/var/lib/secret/ca.pem 6 | -------------------------------------------------------------------------------- /tls/kafka/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=0 2 | listeners=SSL://kafka.confluent.local:9093 3 | advertised.listeners=SSL://kafka.confluent.local:9093 4 | log.dirs=/var/lib/kafka 5 | offsets.topic.replication.factor=1 6 | transaction.state.log.replication.factor=1 7 | zookeeper.connect=zookeeper.confluent.local:2181 8 | 9 | # TLS Configuration 10 | security.inter.broker.protocol=SSL 11 | ssl.truststore.location=/var/lib/secret/truststore.jks 12 | ssl.truststore.password=test1234 13 | ssl.keystore.location=/var/lib/secret/server.keystore.jks 14 | ssl.keystore.password=test1234 15 | ssl.client.auth=required 16 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 17 | super.users=User:CN=kafka.confluent.local,L=London,O=Confluent,C=UK;User:CN=schema-registry.confluent.local,L=London,O=Confluent,C=UK 18 | 19 | # Metrics-reporters 20 | metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter 21 | confluent.metrics.reporter.bootstrap.servers=kafka.confluent.local:9093 22 | confluent.metrics.reporter.security.protocol=SSL 23 | confluent.metrics.reporter.ssl.truststore.location=/var/lib/secret/truststore.jks 24 | confluent.metrics.reporter.ssl.truststore.password=test1234 25 | confluent.metrics.reporter.ssl.keystore.location=/var/lib/secret/server.keystore.jks 26 | confluent.metrics.reporter.ssl.keystore.password=test1234 27 | confluent.metrics.reporter.ssl.key.password=test1234 28 | 29 | confluent.metrics.reporter.topic.replicas=1 30 | 31 | -------------------------------------------------------------------------------- /tls/kafkacat.conf: -------------------------------------------------------------------------------- 1 | security.protocol=SSL 2 | ssl.key.location=certs/client.pem 3 | ssl.key.password=test1234 4 | ssl.certificate.location=certs/client.pem 5 | ssl.ca.location=certs/ca.pem 6 | -------------------------------------------------------------------------------- /tls/local-client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=Kiril-Piskunov.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=Kiril-Piskunov.local 30 | -------------------------------------------------------------------------------- /tls/schema-registry-client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=schema-registry.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth, serverAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=schema-registry.confluent.local 30 | -------------------------------------------------------------------------------- /tls/schema-registry/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-schema-registry confluent-security 13 | 14 | # 3. Configure Kafka 15 | COPY schema-registry.properties /etc/schema-registry/schema-registry.properties 16 | 17 | EXPOSE 8443 18 | 19 | CMD schema-registry-start /etc/schema-registry/schema-registry.properties 20 | -------------------------------------------------------------------------------- /tls/schema-registry/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls/schema-registry/schema-registry.properties: -------------------------------------------------------------------------------- 1 | listeners=https://schema-registry.confluent.local:8443 2 | inter.instance.protocol=https 3 | ssl.keystore.location=/var/lib/secret/schema-registry-client.keystore.jks 4 | ssl.keystore.password=test1234 5 | ssl.key.password=test1234 6 | kafkastore.topic=_schemas 7 | debug=false 8 | 9 | #SSL settings for communication with Kafka Broker 10 | kafkastore.bootstrap.servers=SSL://kafka.confluent.local:9093 11 | kafkastore.security.protocol=SSL 12 | 13 | #SSL trust store to verify cert presented by the broker 14 | kafkastore.ssl.truststore.location=/var/lib/secret/truststore.jks 15 | kafkastore.ssl.truststore.password=test1234 16 | 17 | #SSL key store to provide a cert for the broker 18 | kafkastore.ssl.keystore.location=/var/lib/secret/schema-registry-client.keystore.jks 19 | kafkastore.ssl.keystore.password=test1234 20 | kafkastore.ssl.key.password=test1234 21 | -------------------------------------------------------------------------------- /tls/server.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=kafka.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = serverAuth, clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=kafka.confluent.local 30 | -------------------------------------------------------------------------------- /tls/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform 13 | 14 | # 3. Configure zookeeper 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | 17 | EXPOSE 2181 18 | 19 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 20 | -------------------------------------------------------------------------------- /tls/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | --------------------------------------------------------------------------------