├── .env ├── .gitignore ├── KerberosCheatsheet.md ├── README.md ├── TlsCheatsheet.md ├── acls ├── docker-compose.yaml ├── kafka │ ├── Dockerfile │ ├── admin.conf │ ├── consumer.conf │ ├── kafka.conf │ ├── kafka.sasl.jaas.conf │ ├── kafkacat.conf │ ├── log4j.properties.template │ └── producer.conf ├── up └── zookeeper.sasl.jaas.conf ├── apache-kafka-with-zk3.5-and-tls ├── .gitignore ├── README.md ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ └── server.properties ├── up └── zookeeper │ ├── Dockerfile │ ├── tlsZkCli.sh │ └── zoo.cfg ├── auditlog ├── README.md ├── config │ └── delete-records.json ├── data │ └── my_msgs.txt ├── docker-compose.yml ├── example-config.json ├── kafka │ ├── consumer-user.properties │ ├── kafka-user.properties │ ├── kafka.properties │ ├── kafka.sasl.jaas.config │ ├── log4j.properties │ ├── producer-user.properties │ └── tools-log4j.properties ├── scripts │ ├── create-topics.sh │ ├── delete-records.sh │ ├── describe-topics.sh │ ├── explore-audit-topic.sh │ └── write-msg.sh ├── up └── zookeeper │ ├── log4j.properties │ ├── tools-log4j.properties │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── ca-builder-scripts ├── .gitignore ├── README.md ├── build-a-batch-of-certs.sh ├── build-a-batch-of-stores.sh ├── configs │ ├── batch-of-certs.txt │ ├── batch-of-stores.txt │ ├── ca-config-vars │ ├── ca.config │ └── intermediate-ca.config ├── create-crl.sh ├── create-pair-certs.sh ├── del-cert.sh ├── revoke-cert.sh ├── setup-ca-with-intermediate-ca.sh ├── support-scripts │ ├── build-ca.sh │ └── create-cert.sh └── utils │ ├── build-ca.sh │ ├── build-intermediate-ca.sh │ └── functions.sh ├── delegation_tokens ├── .gitignore ├── ca.cnf ├── client.cnf ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ ├── create_client_properties.sh │ ├── kafka_server_jaas.conf │ └── server.properties ├── server.cnf ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── kafka-connect-mtls ├── .gitignore ├── README.md ├── check-ssl-client-auth.sh ├── connect │ ├── config │ │ ├── ca.cnf │ │ └── client.cnf │ └── secrets │ │ ├── ca-chain.cert.pem │ │ ├── connect.cert.pem │ │ ├── connect.key.pem │ │ ├── server.keystore │ │ └── server.truststore ├── docker-compose.yml └── up ├── kerberos-multi-node ├── README.md ├── docker-compose.yml ├── down ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ ├── kafka.sasl.jaas.config │ └── server.properties ├── kafka1 │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ ├── kafka.sasl.jaas.config │ └── server.properties ├── kdc │ ├── Dockerfile │ └── krb5.conf ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── kerberos-multi-sasl ├── README.md ├── client │ ├── Dockerfile │ ├── client.sasl.jaas.config │ ├── command.properties │ ├── confluent.repo │ ├── consumer.properties │ ├── producer.properties │ └── scram.properties ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── kafka.sasl.jaas.config │ └── server.properties ├── kdc │ ├── Dockerfile │ └── krb5.conf ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── kerberos ├── README.md ├── client │ ├── Dockerfile │ ├── client.sasl.jaas.config │ ├── command.properties │ ├── confluent.repo │ ├── consumer.properties │ └── producer.properties ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── kafka.sasl.jaas.config │ └── server.properties ├── kdc │ ├── Dockerfile │ └── krb5.conf ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── kraft └── none │ ├── docker-compose.yml │ ├── image │ └── kafka-images │ │ └── kafka │ │ ├── Dockerfile │ │ ├── Dockerfile.ubi8 │ │ ├── README.md │ │ ├── include │ │ └── etc │ │ │ └── confluent │ │ │ └── docker │ │ │ ├── configure │ │ │ ├── ensure │ │ │ ├── kafka.properties.template │ │ │ ├── launch │ │ │ ├── log4j.properties.template │ │ │ ├── run │ │ │ └── tools-log4j.properties.template │ │ ├── pom.xml │ │ ├── requirements.txt │ │ ├── setup.py │ │ ├── test │ │ ├── fixtures │ │ │ ├── cluster-bridged-plain.yml │ │ │ ├── cluster-bridged-sasl.yml │ │ │ ├── cluster-bridged-ssl.yml │ │ │ ├── cluster-host-plain.yml │ │ │ ├── cluster-host-sasl.yml │ │ │ ├── cluster-host-ssl.yml │ │ │ ├── secrets │ │ │ │ ├── bridged.consumer.ssl.config │ │ │ │ ├── bridged.consumer.ssl.sasl.config │ │ │ │ ├── bridged.producer.ssl.config │ │ │ │ ├── bridged.producer.ssl.sasl.config │ │ │ │ ├── bridged_broker1_jaas.conf │ │ │ │ ├── bridged_broker2_jaas.conf │ │ │ │ ├── bridged_broker3_jaas.conf │ │ │ │ ├── bridged_consumer_jaas.conf │ │ │ │ ├── bridged_krb.conf │ │ │ │ ├── bridged_producer_jaas.conf │ │ │ │ ├── broker1-ca1-signed.crt │ │ │ │ ├── broker1_keystore_creds │ │ │ │ ├── broker1_sslkey_creds │ │ │ │ ├── broker1_truststore_creds │ │ │ │ ├── broker2-ca1-signed.crt │ │ │ │ ├── broker2_keystore_creds │ │ │ │ ├── broker2_sslkey_creds │ │ │ │ ├── broker2_truststore_creds │ │ │ │ ├── broker3-ca1-signed.crt │ │ │ │ ├── broker3_keystore_creds │ │ │ │ ├── broker3_sslkey_creds │ │ │ │ ├── broker3_truststore_creds │ │ │ │ ├── client-plain.config │ │ │ │ ├── config_krb.conf │ │ │ │ ├── config_server1_jaas.conf │ │ │ │ ├── consumer-ca1-signed.crt │ │ │ │ ├── consumer_keystore_creds │ │ │ │ ├── consumer_sslkey_creds │ │ │ │ ├── consumer_truststore_creds │ │ │ │ ├── create-certs.sh │ │ │ │ ├── host.consumer.ssl.config │ │ │ │ ├── host.consumer.ssl.sasl.config │ │ │ │ ├── host.producer.ssl.config │ │ │ │ ├── host.producer.ssl.sasl.config │ │ │ │ ├── host_broker1_jaas.conf │ │ │ │ ├── host_broker2_jaas.conf │ │ │ │ ├── host_broker3_jaas.conf │ │ │ │ ├── host_consumer_jaas.conf │ │ │ │ ├── host_krb.conf │ │ │ │ ├── host_producer_jaas.conf │ │ │ │ ├── host_zookeeper_1_jaas.conf │ │ │ │ ├── host_zookeeper_2_jaas.conf │ │ │ │ ├── host_zookeeper_3_jaas.conf │ │ │ │ ├── kafka.broker1.keystore.jks │ │ │ │ ├── kafka.broker1.truststore.jks │ │ │ │ ├── kafka.broker2.keystore.jks │ │ │ │ ├── kafka.broker2.truststore.jks │ │ │ │ ├── kafka.broker3.keystore.jks │ │ │ │ ├── kafka.broker3.truststore.jks │ │ │ │ ├── kafka.consumer.keystore.jks │ │ │ │ ├── kafka.consumer.truststore.jks │ │ │ │ ├── kafka.producer.keystore.jks │ │ │ │ ├── kafka.producer.truststore.jks │ │ │ │ ├── kafkacat-ca1-signed.pem │ │ │ │ ├── kafkacat.client.key │ │ │ │ ├── krb_server.conf │ │ │ │ ├── producer-ca1-signed.crt │ │ │ │ ├── producer-ssl.config │ │ │ │ ├── producer_keystore_creds │ │ │ │ ├── producer_sslkey_creds │ │ │ │ ├── producer_truststore_creds │ │ │ │ ├── snakeoil-ca-1.crt │ │ │ │ └── snakeoil-ca-1.key │ │ │ ├── standalone-config.yml │ │ │ └── standalone-network.yml │ │ └── test_kafka.py │ │ └── tox.ini │ └── up ├── ldap-auth ├── docker-compose.yaml ├── kafka │ ├── Dockerfile │ ├── alice.properties │ ├── barnie.properties │ ├── charlie.properties │ ├── confluent.repo │ ├── kafka.jaas.config │ ├── kafka.properties │ ├── log4j.properties │ └── server.properties ├── ldap │ └── custom │ │ ├── 01_base.ldif │ │ ├── 02_KafkaDevelopers.ldif │ │ ├── 03_ProjectA.ldif │ │ ├── 04_ProjectB.ldif │ │ ├── 10_alice.ldif │ │ ├── 11_barnie.ldif │ │ ├── 12_charlie.ldif │ │ ├── 13_donald.ldif │ │ ├── 14_eva.ldif │ │ ├── 15_fritz.ldif │ │ ├── 16_greta.ldif │ │ ├── 17_kafka.ldif │ │ └── 20_group_add.ldif ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── ldap ├── acls │ └── acls.csv ├── add-user ├── custom │ ├── 01_base.ldif │ ├── 02_KafkaDevelopers.ldif │ ├── 10_alice.ldif │ ├── 11_barnie.ldif │ ├── 12_charlie.ldif │ └── 20_group_add.ldif ├── docker-compose-with-ssl.yaml ├── docker-compose.yaml ├── kafka │ ├── Dockerfile │ ├── alice.properties │ ├── barnie.properties │ ├── charlie.properties │ ├── confluent.repo │ ├── consumer.properties │ ├── jks │ │ └── .gitignore │ ├── kafka.jaas.config │ ├── kafka.properties │ ├── log4j.properties │ ├── server-with-ssl.properties │ ├── server.properties │ └── users │ │ └── purbon.properties ├── ldap │ ├── certs │ │ └── .gitignore │ └── custom │ │ ├── 01_base.ldif │ │ ├── 02_KafkaDevelopers.ldif │ │ ├── 10_alice.ldif │ │ ├── 11_barnie.ldif │ │ ├── 12_charlie.ldif │ │ └── 20_group_add.ldif ├── scripts │ ├── .gitignore │ └── certs-create.sh ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── multi-sasl ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.plain.properties │ ├── consumer.properties │ ├── kafka.sasl.jaas.config │ └── server.properties ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── zookeeper.properties │ └── zookeeper.sasl.jaas.config ├── none ├── .env ├── docker-compose.yml └── up ├── oauth ├── .gitignore ├── ca.cnf ├── docker-compose.yml ├── generate_certs.sh ├── kafka │ ├── Dockerfile │ ├── client.properties │ ├── confluent.repo │ ├── kafka_server_jaas.conf │ ├── oauthcallbackhandlers │ │ ├── .gitignore │ │ ├── pom.xml │ │ └── src │ │ │ ├── main │ │ │ └── java │ │ │ │ └── io │ │ │ │ └── confluent │ │ │ │ └── examples │ │ │ │ └── authentication │ │ │ │ └── oauth │ │ │ │ ├── JwtHelper.java │ │ │ │ ├── MyOauthBearerToken.java │ │ │ │ ├── OauthBearerLoginCallbackHandler.java │ │ │ │ └── OauthBearerValidatorCallbackHandler.java │ │ │ └── test │ │ │ └── java │ │ │ └── io │ │ │ └── confluent │ │ │ └── examples │ │ │ └── authentication │ │ │ └── oauth │ │ │ ├── JwtHelperTest.java │ │ │ └── ProduceDataTest.java │ ├── server.properties │ └── test_produce_and_consume.sh ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ └── zookeeper.properties ├── plain ├── .env ├── consumer.properties ├── docker-compose.yml ├── producer.properties └── up ├── quotas ├── Client │ ├── Dockerfile │ └── confluent.repo ├── Grafana │ └── provisioning │ │ ├── dashboards │ │ ├── grafana-dashboard.json │ │ └── one-quota.yaml │ │ └── datasources │ │ └── prometheus.yaml ├── JMX_Exporter │ ├── jmx_prometheus_javaagent-0.11.0.jar │ ├── kafka_config.yml │ └── zookeeper_config.yml ├── Prometheus │ └── prometheus.yml ├── docker-compose.yml ├── secrets │ ├── admin.properties │ ├── kafka_server_jaas.conf │ ├── noquota.properties │ └── quota.properties └── up ├── rbac ├── .env ├── README.md ├── client-configs │ ├── alice.properties │ ├── barnie.properties │ ├── charlie.properties │ ├── copy-props.sh │ ├── donald.properties │ ├── eva.properties │ ├── fritz.properties │ └── greta.properties ├── conf │ ├── keypair.pem │ └── public.pem ├── create-role-bindings.sh ├── docker-compose.yml ├── functions.sh ├── kafka-registered.sh ├── ldap │ └── custom │ │ ├── 01_base.ldif │ │ ├── 02_KafkaDevelopers.ldif │ │ ├── 03_ProjectA.ldif │ │ ├── 04_ProjectB.ldif │ │ ├── 10_alice.ldif │ │ ├── 11_barnie.ldif │ │ ├── 12_charlie.ldif │ │ ├── 13_donald.ldif │ │ ├── 14_eva.ldif │ │ ├── 15_fritz.ldif │ │ ├── 16_greta.ldif │ │ └── 20_group_add.ldif └── up ├── schema-registry ├── with-basic-auth-and-ccloud │ ├── README.md │ ├── docker-compose.yml │ ├── jaas_config.file │ ├── password-file │ └── up ├── with-basic-auth │ ├── .env │ ├── docker-compose.yml │ ├── jaas_config.file │ ├── password-file │ └── up └── with-http_and_https │ ├── .env │ ├── .gitignore │ ├── README.md │ ├── docker-compose.yml │ ├── schema-registry │ ├── config │ │ ├── ca.cnf │ │ └── client.cnf │ └── secrets │ │ ├── ca-chain.cert.pem │ │ ├── schema-registry.cert.pem │ │ ├── schema-registry.key.pem │ │ ├── schema-registry.keystore │ │ └── schema-registry.truststore │ ├── up │ └── verify.sh ├── scram ├── .env ├── admin.properties ├── consumer.properties ├── docker-compose.yml ├── jline-2.14.6.jar ├── kafka.sasl.jaas.config ├── producer.properties ├── up └── zookeeper.sasl.jaas.config ├── secure-jmx ├── README.md ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ └── server.properties ├── pull-jmx-kafka.sh ├── pull-jmx-zookeeper.sh ├── secrets │ ├── client.keystore │ ├── client.truststore │ ├── jmxremote.access │ ├── jmxremote.password │ ├── jmxremote.properties │ ├── kafka.keystore │ └── kafka.truststore ├── up └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ ├── jmxremote.access │ ├── jmxremote.password │ ├── jmxremote.properties │ └── zookeeper.properties ├── tls-with-ocrl ├── .gitignore ├── README.md ├── certs │ ├── broker.keystore │ ├── broker.truststore │ ├── client.keystore │ └── client.truststore ├── docker-compose.yml ├── kafka │ ├── Dockerfile │ ├── confluent.repo │ ├── consumer.properties │ └── server.properties ├── up ├── web │ └── crls.pem └── zookeeper │ ├── Dockerfile │ ├── confluent.repo │ └── zookeeper.properties └── tls ├── .gitignore ├── ca.cnf ├── client.cnf ├── docker-compose.yml ├── kafka ├── Dockerfile ├── confluent.repo ├── consumer.properties ├── kafkacat ├── kafkacat.conf └── server.properties ├── kafkacat.conf ├── local-client.cnf ├── schema-registry-client.cnf ├── schema-registry ├── Dockerfile ├── confluent.repo └── schema-registry.properties ├── server.cnf ├── up └── zookeeper ├── Dockerfile ├── confluent.repo └── zookeeper.properties /.env: -------------------------------------------------------------------------------- 1 | # Values in this file will be used to replace env variables in Docker Compose files used throughout the repo. 2 | 3 | # You can override any of these values in one of several ways: 4 | # 1. Create another '.env' file for the docker-compose.yml file 5 | # 2. Edit the docker-compose.yml file directly and replace the env variable with a value 6 | # 3. Set a corresponding environment variable in the shell 7 | 8 | # REPOSITORY - repository for Docker image 9 | # The '/' which separates the REPOSITORY from the image name is not required here 10 | # Examples: 11 | # - REPOSITORY=confluentinc will use images from the confluentinc repository at https://hub.docker.com/u/confluentinc 12 | # - REPOSITORY=.dkr.ecr.us-west-2.amazonaws.com/confluentinc will use images from the confluentinc repository at the specified ECR registry (images must be pulled separately) 13 | REPOSITORY=confluentinc 14 | 15 | # TAG - image tag 16 | # The ':' which separates the image name from the TAG is not required here 17 | # Examples: 18 | # - TAG=5.4.0 will use the image tag 5.4.0 19 | # - TAG=5.4.x-latest will use the image tag 5.4.x-latest 20 | # TAG=5.5.0 21 | TAG=6.1.0 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .envrc 2 | .java-version 3 | kerberos-multi-node/TODO 4 | .idea 5 | -------------------------------------------------------------------------------- /acls/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM confluentinc/cp-enterprise-kafka:5.4.0 2 | 3 | MAINTAINER sven@confluent.io 4 | 5 | # Make sure the log directory is world-writable 6 | RUN echo "===> Creating authorizer logs dir ..." \ 7 | && mkdir -p /var/log/kafka-auth-logs \ 8 | && chmod -R ag+w /var/log/kafka-auth-logs 9 | 10 | COPY log4j.properties.template /etc/confluent/docker/log4j.properties.template 11 | 12 | COPY *.conf /tmp/ 13 | 14 | -------------------------------------------------------------------------------- /acls/kafka/admin.conf: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="admin" \ 5 | password="admin-pass"; 6 | -------------------------------------------------------------------------------- /acls/kafka/consumer.conf: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="consumer" \ 5 | password="consumer-pass"; 6 | -------------------------------------------------------------------------------- /acls/kafka/kafka.conf: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka-pass"; 6 | -------------------------------------------------------------------------------- /acls/kafka/kafka.sasl.jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.scram.ScramLoginModule required 3 | username="kafka" 4 | password="kafka-pass"; 5 | }; 6 | KafkaClient { 7 | org.apache.kafka.common.security.scram.ScramLoginModule required 8 | username="kafka" 9 | password="kafka-pass"; 10 | }; 11 | Client { 12 | org.apache.zookeeper.server.auth.DigestLoginModule required 13 | username="admin" 14 | password="password"; 15 | }; 16 | 17 | -------------------------------------------------------------------------------- /acls/kafka/kafkacat.conf: -------------------------------------------------------------------------------- 1 | security.protocol=SASL_PLAINTEXT 2 | sasl.mechanisms=SCRAM-SHA-256 3 | sasl.username=kafka 4 | sasl.password=kafka-pass 5 | -------------------------------------------------------------------------------- /acls/kafka/producer.conf: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="producer" \ 5 | password="producer-pass"; 6 | -------------------------------------------------------------------------------- /acls/zookeeper.sasl.jaas.conf: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_admin="password"; 4 | }; 5 | Client { 6 | org.apache.zookeeper.server.auth.DigestLoginModule required 7 | username="admin" 8 | password="password"; 9 | }; 10 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | certs/ 3 | certs-old/ 4 | tmp-dir 5 | images/ 6 | zookeeper.properties 7 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | build: zookeeper/ 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | restart: on-failure 8 | environment: 9 | - SERVER_JVMFLAGS=-Dzookeeper.serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory 10 | volumes: 11 | - ./certs/zk-stores:/var/lib/secret 12 | 13 | kafka: 14 | build: kafka/ 15 | container_name: kafka 16 | hostname: kafka 17 | depends_on: 18 | - zookeeper 19 | restart: on-failure 20 | volumes: 21 | - ./certs/kafka-stores:/var/lib/secret 22 | environment: 23 | - KAFKA_OPTS=-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=/var/lib/secret/kafka.jks -Dzookeeper.ssl.keyStore.password=confluent -Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks -Dzookeeper.ssl.trustStore.password=confluent 24 | ports: 25 | - 29092:29092 26 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM purbon/kafka 2 | MAINTAINER pere.urbon@gmail.com 3 | ENV container docker 4 | 5 | # 1. Install openjdk 6 | RUN yum install -y java-11-openjdk 7 | 8 | # 2. Configure Kafka 9 | COPY server.properties /etc/kafka/server.properties 10 | 11 | EXPOSE 9092 12 | 13 | CMD kafka-server-start.sh /etc/kafka/server.properties 14 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM purbon/zookeeper:3.5.5 2 | MAINTAINER pere.urbon@gmail.com 3 | ENV container docker 4 | 5 | # 2. Install zookeeper and kafka 6 | RUN yum install -y java-11-openjdk 7 | 8 | 9 | # 3. Configure zookeeper 10 | COPY zoo.cfg "${ZK_HOME}/conf/zoo.cfg" 11 | 12 | # 4. Add extra utility scripts 13 | 14 | ENV PATH="/opt/tlsZkCli.sh:${PATH}" 15 | COPY tlsZkCli.sh /opt/tlsZkCli.sh 16 | 17 | EXPOSE 2182 18 | 19 | CMD zkServer.sh start-foreground 20 | -------------------------------------------------------------------------------- /apache-kafka-with-zk3.5-and-tls/zookeeper/tlsZkCli.sh: -------------------------------------------------------------------------------- 1 | ##!/usr/bin/env bash 2 | 3 | export CLIENT_JVMFLAGS="-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true 4 | -Dzookeeper.ssl.keyStore.location=/var/lib/secret/zookeeper.jks 5 | -Dzookeeper.ssl.keyStore.password=confluent 6 | -Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks 7 | -Dzookeeper.ssl.trustStore.password=confluent" 8 | 9 | zkCli.sh -server $1 10 | -------------------------------------------------------------------------------- /auditlog/config/delete-records.json: -------------------------------------------------------------------------------- 1 | { 2 | "partitions": [ 3 | { 4 | "topic": "bar", 5 | "partition": 0, 6 | "offset": 3 7 | } 8 | ], 9 | "version": 1 10 | } 11 | -------------------------------------------------------------------------------- /auditlog/data/my_msgs.txt: -------------------------------------------------------------------------------- 1 | This is a message 2 | This is another message 3 | Abracadabra 4 | -------------------------------------------------------------------------------- /auditlog/example-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "routes": { 3 | "crn:///kafka=*/group=*": { 4 | "consume": { 5 | "allowed": "confluent-audit-log-events", 6 | "denied": "confluent-audit-log-events" 7 | } 8 | }, 9 | "crn:///kafka=*/topic=*": { 10 | "produce": { 11 | "allowed": "confluent-audit-log-events", 12 | "denied": "confluent-audit-log-events" 13 | }, 14 | "consume": { 15 | "allowed": "confluent-audit-log-events", 16 | "denied": "confluent-audit-log-events" 17 | } 18 | } 19 | }, 20 | "destinations": { 21 | "topics": { 22 | "confluent-audit-log-events": { 23 | "retention_ms": 7776000000 24 | } 25 | } 26 | }, 27 | "default_topics": { 28 | "allowed": "confluent-audit-log-events", 29 | "denied": "confluent-audit-log-events" 30 | }, 31 | "excluded_principals": ["User:kafka", "User:ANONYMOUS"] 32 | } 33 | -------------------------------------------------------------------------------- /auditlog/kafka/consumer-user.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="consumer" \ 5 | password="consumerpass"; 6 | 7 | -------------------------------------------------------------------------------- /auditlog/kafka/kafka-user.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | 7 | -------------------------------------------------------------------------------- /auditlog/kafka/kafka.properties: -------------------------------------------------------------------------------- 1 | broker.id=1 2 | advertised.listeners=SASL_PLAINTEXT://kafka:9092 3 | offsets.topic.replication.factor=1 4 | allow.everyone.if.no.acl.found=false 5 | zookeeper.connect=zookeeper:2181 6 | security.inter.broker.protocol=SASL_PLAINTEXT 7 | authorizer.class.name=io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer 8 | log.dirs=/var/lib/kafka/data 9 | confluent.security.event.router.config={"routes":{"crn:///kafka=*/group=*":{"consume":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"}},"crn:///kafka=*/topic=*":{"produce":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"},"consume":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"}}},"destinations":{"topics":{"confluent-audit-log-events":{"retention_ms":7776000000}}},"default_topics":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"},"excluded_principals":["User:kafka","User:ANONYMOUS"]} 10 | listeners=SASL_PLAINTEXT://0.0.0.0:9092 11 | zookeeper.set.acl=true 12 | super.users=User:kafka 13 | offsets.topic.num.partitions=1 14 | sasl.enabled.mechanisms=SCRAM-SHA-256 15 | transaction.state.log.replication.factor=1 16 | sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256 17 | confluent.license.topic.replication.factor=1 18 | -%} 19 | 20 | -------------------------------------------------------------------------------- /auditlog/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.scram.ScramLoginModule required 3 | username="kafka" 4 | password="kafka"; 5 | }; 6 | Client { 7 | org.apache.zookeeper.server.auth.DigestLoginModule required 8 | username="admin" 9 | password="password"; 10 | }; 11 | -------------------------------------------------------------------------------- /auditlog/kafka/log4j.properties: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger=INFO, stdout 3 | 4 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | 8 | 9 | log4j.logger.kafka.authorizer.logger=WARN 10 | log4j.logger.kafka.log.LogCleaner=INFO 11 | log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG 12 | log4j.logger.kafka.controller=TRACE 13 | log4j.logger.kafka.network.RequestChannel$=WARN 14 | log4j.logger.kafka.request.logger=WARN 15 | log4j.logger.state.change.logger=TRACE 16 | log4j.logger.kafka=INFO 17 | -------------------------------------------------------------------------------- /auditlog/kafka/producer-user.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="producer" \ 5 | password="producerpass"; 6 | 7 | -------------------------------------------------------------------------------- /auditlog/kafka/tools-log4j.properties: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger=WARN, stderr 3 | 4 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | log4j.appender.stderr.Target=System.err -------------------------------------------------------------------------------- /auditlog/scripts/delete-records.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker exec kafka kafka-delete-records --bootstrap-server kafka:9092 \ 4 | --command-config /etc/kafka/producer-user.properties \ 5 | --offset-json-file /tmp/config/delete-records.json 6 | -------------------------------------------------------------------------------- /auditlog/scripts/describe-topics.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker exec kafka kafka-topics --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --describe 4 | -------------------------------------------------------------------------------- /auditlog/scripts/explore-audit-topic.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | TOPIC="confluent-audit-log-events" 4 | docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 \ 5 | --consumer.config /etc/kafka/kafka-user.properties \ 6 | --topic $TOPIC --from-beginning 7 | -------------------------------------------------------------------------------- /auditlog/scripts/write-msg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PWD=`pwd` 4 | topic=$1 5 | network="auditlog_default" 6 | 7 | USERNAME=producer 8 | PASSWORD=producerpass 9 | 10 | echo "Write messages to topic $1" 11 | 12 | docker run --network $network \ 13 | --volume $PWD/data/my_msgs.txt:/data/my_msgs.txt \ 14 | confluentinc/cp-kafkacat \ 15 | kafkacat -b kafka:9092 \ 16 | -t $topic \ 17 | -X security.protocol=SASL_PLAINTEXT -X sasl.mechanisms=SCRAM-SHA-256 -X sasl.username=$USERNAME -X sasl.password=$PASSWORD \ 18 | -P -l /data/my_msgs.txt 19 | -------------------------------------------------------------------------------- /auditlog/zookeeper/log4j.properties: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger=INFO, stdout 3 | 4 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | 8 | -------------------------------------------------------------------------------- /auditlog/zookeeper/tools-log4j.properties: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger=WARN, stderr 3 | 4 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | log4j.appender.stderr.Target=System.err -------------------------------------------------------------------------------- /auditlog/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | 2 | dataDir=/var/lib/zookeeper/data 3 | dataLogDir=/var/lib/zookeeper/log 4 | 5 | clientPort=2181 6 | 7 | 8 | -------------------------------------------------------------------------------- /auditlog/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_admin="password"; 4 | }; 5 | Client { 6 | org.apache.zookeeper.server.auth.DigestLoginModule required 7 | username="admin" 8 | password="password"; 9 | }; 10 | -------------------------------------------------------------------------------- /ca-builder-scripts/.gitignore: -------------------------------------------------------------------------------- 1 | tmp-certs/ 2 | stores 3 | legacy/ 4 | 5 | ## remove from git the generated CA files 6 | 7 | ca/ 8 | -------------------------------------------------------------------------------- /ca-builder-scripts/build-a-batch-of-certs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | input=$1 4 | while IFS= read -r line 5 | do 6 | fields=($(echo $line | tr "," "\n")) 7 | #./support-scripts/create-cert.sh ${fields[0]} ${fields[1]} 8 | echo "./support-scripts/create-cert.sh ${fields[0]} ${fields[1]}" 9 | done < "$input" 10 | -------------------------------------------------------------------------------- /ca-builder-scripts/configs/batch-of-certs.txt: -------------------------------------------------------------------------------- 1 | consumer,machine0.example.com 2 | producer,machine1.example.com 3 | kafka,machine2.example.com 4 | zookeeper,machine3.example.com 5 | -------------------------------------------------------------------------------- /ca-builder-scripts/configs/batch-of-stores.txt: -------------------------------------------------------------------------------- 1 | consumer,machine0.example.com 2 | producer,machine1.example.com 3 | kafka,machine2.example.com 4 | zookeeper,machine3.example.com 5 | -------------------------------------------------------------------------------- /ca-builder-scripts/configs/ca-config-vars: -------------------------------------------------------------------------------- 1 | DE 2 | Berlin 3 | Berlin 4 | Confluent Germany 5 | -------------------------------------------------------------------------------- /ca-builder-scripts/create-crl.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | DEFAULT_PASSWORD=${1:-confluent} 5 | 6 | if [ -z "${CA_ROOT_DIR+x}" ]; 7 | then 8 | CA_ROOT_DIR='.' 9 | fi 10 | 11 | source $CA_ROOT_DIR/utils/functions.sh 12 | 13 | (cd $CA_ROOT_DIR/ca; create_certificate_revokation_list ) 14 | -------------------------------------------------------------------------------- /ca-builder-scripts/create-pair-certs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #HOSTNAME="www.example.com" 4 | #EXTENSION="server_cert" #usr_cert for client auth, server_cert for for backend 5 | 6 | #HOSTNAME="my.kafka.consumer" 7 | #EXTENSION="usr_cert" 8 | set -e 9 | 10 | HOSTNAME=$1 11 | MACHINE=${2:-""} 12 | EXTENSION=${3:-server_cert} 13 | DEFAULT_PASSWORD=${4:-confluent} 14 | 15 | echo "Building a part of certificates for $HOSTNAME using $EXTENSION" 16 | 17 | if [ -z "${CA_ROOT_DIR+x}" ]; 18 | then 19 | CA_ROOT_DIR='.' 20 | fi 21 | 22 | ITERMEDIATE_CA_DIR=$CA_ROOT_DIR/ca/intermediate 23 | 24 | CERT_FILE="$ITERMEDIATE_CA_DIR/certs/$HOSTNAME.cert.pem" 25 | 26 | if test -f "$CERT_FILE"; then 27 | RED='\033[0;31m' 28 | NC='\033[0m' # No Color 29 | printf "${RED}Cert $CERT_FILE exist! exiting...${NC}" 30 | exit 1 31 | fi 32 | 33 | source $CA_ROOT_DIR/utils/functions.sh 34 | 35 | (cd $CA_ROOT_DIR; refresh_openssl_file "$CA_ROOT_DIR" "$ITERMEDIATE_CA_DIR" ) 36 | (cd $CA_ROOT_DIR/ca; generate_final_certificate "$MACHINE" ) 37 | -------------------------------------------------------------------------------- /ca-builder-scripts/del-cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | NAME=$1 4 | 5 | if [ -z "${CA_ROOT_DIR+x}" ]; 6 | then 7 | CA_ROOT_DIR='.' 8 | fi 9 | 10 | echo "Deleting CERT $NAME" 11 | 12 | rm "$CA_ROOT_DIR/ca/intermediate/private/$NAME.key.pem" 13 | rm "$CA_ROOT_DIR/ca/intermediate/certs/$NAME.cert.pem" 14 | rm "$CA_ROOT_DIR/ca/intermediate/csr/$NAME.csr.pem" 15 | -------------------------------------------------------------------------------- /ca-builder-scripts/revoke-cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CERT=$1 4 | DEFAULT_PASSWORD=${2:-confluent} 5 | 6 | if [ -z "${CA_ROOT_DIR+x}" ]; 7 | then 8 | CA_ROOT_DIR='.' 9 | fi 10 | 11 | source $CA_ROOT_DIR/utils/functions.sh 12 | 13 | (cd $CA_ROOT_DIR/ca; revoke_cert $CERT ) 14 | -------------------------------------------------------------------------------- /ca-builder-scripts/setup-ca-with-intermediate-ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ## 4 | # This script builds a Certificate Authority of the form: 5 | # Root CA -> intermediate CA 6 | # 7 | # In the CA_ROOT_DIR, this script will create the necessary directory strucures 8 | # and generate the certificates, all signed using the value provided as an 9 | # argument to this script, or confluent by default. 10 | ## 11 | 12 | DEFAULT_PASSWORD=${1:-confluent} 13 | export CA_ROOT_DIR=`pwd` 14 | 15 | echo -e "Building the CA root setup\n" 16 | 17 | ./utils/build-ca.sh $DEFAULT_PASSWORD 18 | 19 | echo -e "Building the intemedite CA root setup:\n" 20 | 21 | ./utils/build-intermediate-ca.sh $DEFAULT_PASSWORD 22 | -------------------------------------------------------------------------------- /ca-builder-scripts/support-scripts/create-cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/expect -f 2 | 3 | proc slurp {file} { 4 | set fh [open $file r] 5 | set ret [read $fh] 6 | close $fh 7 | return $ret 8 | } 9 | 10 | proc create_certs {cert_name,machine} { 11 | eval spawn ./create-pair-certs.sh $cert_name $machine 12 | } 13 | 14 | set timeout 20 15 | set configslurp [slurp configs/ca-config-vars] 16 | 17 | set lines [split $configslurp \n] 18 | set COUNTRY_NAME [lrange $lines 0 0] 19 | set STATE [lrange $lines 1 1] 20 | set LOCALITY [lrange $lines 2 2] 21 | set ORGANIZATION [lrange $lines 3 3] 22 | 23 | set cert_name [lindex $argv 0] 24 | set machine [lrange $argv 1 end] 25 | 26 | spawn ./create-pair-certs.sh $cert_name $machine 27 | 28 | ## Generating the data for the CA setup. 29 | expect "Country Name (2 letter code)" 30 | send "$COUNTRY_NAME\r"; 31 | expect "State or Province Name" 32 | send "$STATE\r"; 33 | expect "Locality Name" 34 | send "$LOCALITY\r"; 35 | expect "Organization Name" 36 | send "$ORGANIZATION\r"; 37 | expect "Organizational Unit Name" 38 | send "\r"; 39 | expect "Common Name" 40 | send "$cert_name\r"; 41 | expect "Email Address" 42 | send "\r"; 43 | # Sign the certificate and commit 44 | expect "Sign the certificate?" 45 | send "y\r"; 46 | expect "1 out of 1 certificate requests certified, commit" 47 | send "y\r"; 48 | interact 49 | -------------------------------------------------------------------------------- /ca-builder-scripts/utils/build-ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DEFAULT_PASSWORD=${1:-confluent} 4 | 5 | if [ -z "${CA_ROOT_DIR+x}" ]; 6 | then 7 | CA_ROOT_DIR='.' 8 | fi 9 | 10 | source $CA_ROOT_DIR/utils/functions.sh 11 | 12 | mkdir $CA_ROOT_DIR/ca; 13 | 14 | setup_ca_dir_structure "$CA_ROOT_DIR/ca" 15 | 16 | cp $CA_ROOT_DIR/configs/ca.config $CA_ROOT_DIR/ca/openssl.cnf 17 | 18 | (cd $CA_ROOT_DIR/ca; generate_ca_keys_and_certs ) 19 | 20 | ## Verify the CA certificate 21 | openssl x509 -noout -text -in $CA_ROOT_DIR/ca/certs/ca.cert.pem 22 | -------------------------------------------------------------------------------- /ca-builder-scripts/utils/build-intermediate-ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DEFAULT_PASSWORD=${1:-confluent} 4 | 5 | if [ -z "${CA_ROOT_DIR+x}" ]; 6 | then 7 | CA_ROOT_DIR='.' 8 | fi 9 | ITERMEDIATE_CA_DIR=$CA_ROOT_DIR/ca/intermediate 10 | 11 | source $CA_ROOT_DIR/utils/functions.sh 12 | 13 | mkdir -p $ITERMEDIATE_CA_DIR 14 | 15 | setup_intermediate_ca_dir_structure $ITERMEDIATE_CA_DIR 16 | 17 | cp $CA_ROOT_DIR/configs/intermediate-ca.config $ITERMEDIATE_CA_DIR/openssl.cnf 18 | 19 | (cd $ITERMEDIATE_CA_DIR; generate_intermediate_keys_and_certs) 20 | 21 | (cd $CA_ROOT_DIR/ca; sign_intermediate_cert_authority; verify_generate_intermediate_ca) 22 | (cd $CA_ROOT_DIR/ca; create_ca_chain) 23 | -------------------------------------------------------------------------------- /delegation_tokens/.gitignore: -------------------------------------------------------------------------------- 1 | certs/ 2 | -------------------------------------------------------------------------------- /delegation_tokens/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = UK 18 | organizationName = Confluent 19 | localityName = London 20 | commonName = kafka.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /delegation_tokens/client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=kafka.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=kafka.confluent.local 30 | -------------------------------------------------------------------------------- /delegation_tokens/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | build: zookeeper/ 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | domainname: confluent.local 8 | restart: on-failure 9 | volumes: 10 | - ./certs/:/var/lib/secret 11 | environment: 12 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 13 | networks: 14 | default: 15 | aliases: 16 | - zookeeper.confluent.local 17 | 18 | 19 | kafka: 20 | build: kafka/ 21 | container_name: kafka 22 | hostname: kafka 23 | domainname: confluent.local 24 | depends_on: 25 | - zookeeper 26 | restart: on-failure 27 | volumes: 28 | - ./certs/:/var/lib/secret 29 | environment: 30 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 31 | networks: 32 | default: 33 | aliases: 34 | - kafka.confluent.local 35 | ports: 36 | - "9093:9093" 37 | 38 | volumes: 39 | secret: {} 40 | 41 | networks: 42 | default: 43 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY kafka_server_jaas.conf /etc/kafka/kafka_server_jaas.conf 17 | COPY consumer.properties /etc/kafka/consumer.properties 18 | COPY create_client_properties.sh /etc/kafka/create_client_properties.sh 19 | 20 | EXPOSE 9093 21 | 22 | CMD kafka-server-start /etc/kafka/server.properties 23 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | # Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT 3 | security.protocol=SASL_SSL 4 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 5 | username="kafka" \ 6 | password="kafka"; 7 | ssl.truststore.location=/var/lib/secret/truststore.jks 8 | ssl.truststore.password=test1234 9 | ssl.keystore.location=/var/lib/secret/client.keystore.jks 10 | ssl.keystore.password=test1234 11 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/create_client_properties.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | 6 | RESPONSE=$(kafka-delegation-tokens \ 7 | --bootstrap-server kafka.confluent.local:9093 \ 8 | --create \ 9 | --command-config /etc/kafka/consumer.properties \ 10 | --max-life-time-period -1 | tail -1) 11 | 12 | TOKENID=$(echo $RESPONSE | cut -d " " -f1) 13 | HMAC=$(echo $RESPONSE | cut -d " " -f2) 14 | 15 | echo "Received token id: $TOKENID" 16 | echo "Received message authentication code: $HMAC" 17 | 18 | echo 'sasl.mechanism=SCRAM-SHA-256 19 | # Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT 20 | security.protocol=SASL_SSL 21 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 22 | username="'$TOKENID'" \ 23 | password="'$HMAC'" \ 24 | tokenauth="true"; 25 | ssl.truststore.location=/var/lib/secret/truststore.jks 26 | ssl.truststore.password=test1234 27 | ssl.keystore.location=/var/lib/secret/client.keystore.jks 28 | ssl.keystore.password=test1234' > /tmp/delegation_token_client.properties 29 | 30 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | // Username and password are used by the broker to initiate connections to other brokers 2 | // admin is another user allowed to connect to the broker. 3 | 4 | KafkaServer { 5 | org.apache.kafka.common.security.scram.ScramLoginModule required 6 | username="kafka" 7 | password="kafka" 8 | user_admin="admin"; 9 | }; 10 | 11 | // The client section is used by kafka to connect to zookeeper. 12 | // This must match the zookeeper jaas configuration. 13 | Client { 14 | org.apache.zookeeper.server.auth.DigestLoginModule required 15 | username="kafka" 16 | password="kafka"; 17 | }; 18 | -------------------------------------------------------------------------------- /delegation_tokens/kafka/server.properties: -------------------------------------------------------------------------------- 1 | ############################# Server Basics ############################# 2 | broker.id=0 3 | listeners=SASL_SSL://kafka.confluent.local:9093 4 | advertised.listeners=SASL_SSL://kafka.confluent.local:9093 5 | log.dirs=/var/lib/kafka 6 | offsets.topic.replication.factor=1 7 | transaction.state.log.replication.factor=1 8 | transaction.state.log.min.isr=1 9 | zookeeper.connect=zookeeper.confluent.local:2181 10 | 11 | # TLS Configuration 12 | security.inter.broker.protocol=SASL_SSL 13 | ssl.truststore.location=/var/lib/secret/truststore.jks 14 | ssl.truststore.password=test1234 15 | ssl.keystore.location=/var/lib/secret/server.keystore.jks 16 | ssl.keystore.password=test1234 17 | ssl.client.auth=required 18 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 19 | delegation.token.master.key=foo 20 | sasl.enabled.mechanisms=SCRAM-SHA-256 21 | sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256 22 | super.users=User:kafka 23 | -------------------------------------------------------------------------------- /delegation_tokens/server.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=kafka.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = serverAuth, clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=kafka.confluent.local 30 | -------------------------------------------------------------------------------- /delegation_tokens/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure zookeeper 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 17 | 18 | EXPOSE 2181 19 | 20 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 21 | -------------------------------------------------------------------------------- /delegation_tokens/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /delegation_tokens/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | requireClientAuthScheme=sasl 6 | 7 | -------------------------------------------------------------------------------- /delegation_tokens/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_kafka="kafka"; 4 | }; 5 | -------------------------------------------------------------------------------- /kafka-connect-mtls/.gitignore: -------------------------------------------------------------------------------- 1 | connect/secrets/client-*.pem 2 | connect/secrets/client.p12 3 | -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/config/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = DE 18 | organizationName = Confluent 19 | localityName = Berlin 20 | commonName = connect.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/config/client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=connect.client 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=localhost 30 | -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/secrets/server.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kafka-connect-mtls/connect/secrets/server.keystore -------------------------------------------------------------------------------- /kafka-connect-mtls/connect/secrets/server.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kafka-connect-mtls/connect/secrets/server.truststore -------------------------------------------------------------------------------- /kafka-connect-mtls/up: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker-compose up -d 4 | 5 | echo "to verify the connection use the check-ssl-client-auth.sh script" 6 | -------------------------------------------------------------------------------- /kerberos-multi-node/README.md: -------------------------------------------------------------------------------- 1 | # Kerberos multi-node deployment example 2 | 3 | This example shows how-to deploy multiple kafka nodes in an example kerberos enabled environment. 4 | 5 | The only thing that's different then your normal environment is that this example uses a different principal for each zookeeper client. 6 | 7 | https://issues.apache.org/jira/browse/KAFKA-7710 Jira contains a more information. 8 | TLDR; we have to set two configs in the zookeeper.properties to make this work 9 | 10 | ``` 11 | kerberos.removeHostFromPrincipal = true 12 | kerberos.removeRealmFromPrincipal = false 13 | ``` 14 | 15 | The first removes the hostname from the principal name. 16 | So that anyone authenticated with the principal 'kafka/*@REALM' is allowed by ZK ACLs. 17 | -------------------------------------------------------------------------------- /kerberos-multi-node/down: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | DESTROY=no 3 | if [ ! -f "${PWD}/docker-compose.yml" ]; then 4 | echo "No docker-compose found. Exiting." 5 | exit 2 6 | fi 7 | 8 | usage() 9 | { 10 | echo "Usage: $0 [-h] [-d]" 11 | echo "-d destroy images. They will be rebuilt next time" 12 | exit 2 13 | } 14 | 15 | destroy() 16 | { 17 | docker-compose rm --force 18 | } 19 | 20 | stop_docker-compose() 21 | { 22 | docker-compose stop 23 | } 24 | 25 | # Should use getopts here but, why? 26 | if [[ "${1}" == "-h" ]]; then 27 | usage 28 | exit 2 29 | fi 30 | 31 | if [[ "${1}" == "-d" ]]; then 32 | echo "Stopping and destroying containers" 33 | DESTROY=yes 34 | fi 35 | 36 | stop_docker-compose 37 | if [[ $? != 0 ]]; then 38 | echo "Stopping the docker-compose failed. Exiting for manual cleanup" 39 | echo "I suggest 'docker-compose ps'" 40 | exit 2 41 | fi 42 | 43 | if [[ "${DESTROY}" == "yes" ]]; then 44 | destroy 45 | fi 46 | 47 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | RUN yum install -y confluent-control-center 18 | 19 | # 3. Configure Kafka for Kerberos 20 | RUN yum install -y krb5-workstation krb5-libs 21 | COPY server.properties /etc/kafka/server.properties 22 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 23 | COPY consumer.properties /etc/kafka/consumer.properties 24 | 25 | EXPOSE 9093 26 | 27 | CMD kafka-server-start /etc/kafka/server.properties 28 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/var/lib/secret/kafka.key" 6 | principal="kafka/kafka.kerberos-multi-node_default@TEST.CONFLUENT.IO"; 7 | }; 8 | 9 | KafkaClient { 10 | com.sun.security.auth.module.Krb5LoginModule required 11 | useKeyTab=true 12 | storeKey=true 13 | keyTab="/var/lib/secret/kafka.key" 14 | principal="admin@TEST.CONFLUENT.IO"; 15 | }; 16 | 17 | Client { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=true 20 | storeKey=true 21 | useTicketCache=false 22 | keyTab="/var/lib/secret/kafka.key" 23 | principal="kafka@TEST.CONFLUENT.IO"; 24 | }; 25 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | RUN yum install -y confluent-control-center 18 | 19 | # 3. Configure Kafka for Kerberos 20 | RUN yum install -y krb5-workstation krb5-libs 21 | COPY server.properties /etc/kafka/server.properties 22 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 23 | COPY consumer.properties /etc/kafka/consumer.properties 24 | 25 | EXPOSE 9093 26 | 27 | CMD kafka-server-start /etc/kafka/server.properties 28 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-node/kafka1/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/var/lib/secret/kafka.key" 6 | principal="kafka/kafka1.kerberos-multi-node_default@TEST.CONFLUENT.IO"; 7 | }; 8 | 9 | KafkaClient { 10 | com.sun.security.auth.module.Krb5LoginModule required 11 | useKeyTab=true 12 | storeKey=true 13 | keyTab="/var/lib/secret/kafka.key" 14 | principal="admin@TEST.CONFLUENT.IO"; 15 | }; 16 | 17 | Client { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=true 20 | storeKey=true 21 | useTicketCache=false 22 | keyTab="/var/lib/secret/kafka.key" 23 | principal="kafka@TEST.CONFLUENT.IO"; 24 | }; 25 | -------------------------------------------------------------------------------- /kerberos-multi-node/kdc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Installing Kerberos server, admin and client 10 | RUN yum install -y krb5-server krb5-libs 11 | RUN yum install -y krb5-workstation krb5-libs 12 | 13 | # 2. Configuring Kerberos and KDC 14 | COPY krb5.conf /etc/krb5.conf 15 | RUN mkdir /var/log/kerberos 16 | RUN mkdir /etc/kdc 17 | RUN mkdir -p /var/kerberos/krb5kdc/ 18 | RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf 19 | 20 | EXPOSE 88 21 | 22 | RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s 23 | 24 | CMD /usr/sbin/krb5kdc -n 25 | -------------------------------------------------------------------------------- /kerberos-multi-node/kdc/krb5.conf: -------------------------------------------------------------------------------- 1 | [libdefaults] 2 | default_realm = TEST.CONFLUENT.IO 3 | ticket_lifetime = 24h 4 | renew_lifetime = 7d 5 | forwardable = true 6 | rdns = false 7 | dns_lookup_kdc = no 8 | dns_lookup_realm = no 9 | 10 | [realms] 11 | TEST.CONFLUENT.IO = { 12 | kdc = kdc 13 | admin_server = kadmin 14 | } 15 | 16 | [domain_realm] 17 | .test.confluent.io = TEST.CONFLUENT.IO 18 | test.confluent.io = TEST.CONFLUENT.IO 19 | kerberos_default = TEST.CONFLUENT.IO 20 | .kerberos_default = TEST.CONFLUENT.IO 21 | 22 | [logging] 23 | kdc = FILE:/var/log/kerberos/krb5kdc.log 24 | admin_server = FILE:/var/log/kerberos/kadmin.log 25 | default = FILE:/var/log/kerberos/krb5lib.log 26 | -------------------------------------------------------------------------------- /kerberos-multi-node/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | 18 | # 3. Configure zookeeper for Kerberos 19 | RUN yum install -y krb5-workstation krb5-libs 20 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 21 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 22 | 23 | EXPOSE 2181 24 | 25 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 26 | -------------------------------------------------------------------------------- /kerberos-multi-node/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /kerberos-multi-node/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | useTicketCache=false 6 | keyTab="/var/lib/secret/kafka.key" 7 | principal="zkservice/zookeeper.kerberos-multi-node_default@TEST.CONFLUENT.IO"; 8 | }; 9 | 10 | Client { 11 | com.sun.security.auth.module.Krb5LoginModule required 12 | useTicketCache=true; 13 | }; 14 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install confluent kafka tools: 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Install Kerberos libaries 15 | RUN yum install -y krb5-workstation krb5-libs 16 | 17 | # 4. Copy in required settings for client access to Kafka 18 | COPY consumer.properties /etc/kafka/consumer.properties 19 | COPY producer.properties /etc/kafka/producer.properties 20 | COPY command.properties /etc/kafka/command.properties 21 | COPY scram.properties /etc/kafka/scram.properties 22 | COPY client.sasl.jaas.config /etc/kafka/client_jaas.conf 23 | 24 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 25 | 26 | CMD sleep infinity 27 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/client.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | /* 2 | * Credentials to use when connecting to ZooKeeper directly. 3 | * 4 | * Whenever possible you should use the Kafka AdminClient API instead of ZooKeeper. 5 | */ 6 | Client { 7 | com.sun.security.auth.module.Krb5LoginModule required 8 | useTicketCache=true; 9 | }; 10 | 11 | 12 | /* 13 | * Credentials to connect to Kafka. 14 | */ 15 | KafkaClient { 16 | com.sun.security.auth.module.Krb5LoginModule required 17 | useTicketCache=true; 18 | }; 19 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/command.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 4 | serviceName=kafka \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/producer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/client/scram.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-512 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | RUN yum install -y confluent-control-center 18 | 19 | # 3. Configure Kafka for Kerberos 20 | RUN yum install -y krb5-workstation krb5-libs 21 | COPY server.properties /etc/kafka/server.properties 22 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 23 | 24 | EXPOSE 9093 25 | 26 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 27 | 28 | CMD kafka-server-start /etc/kafka/server.properties 29 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | /* 2 | * The service principal 3 | */ 4 | /* 5 | KafkaServer { 6 | com.sun.security.auth.module.Krb5LoginModule required 7 | useKeyTab=true 8 | storeKey=true 9 | keyTab="/var/lib/secret/kafka.key" 10 | principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO"; 11 | }; 12 | */ 13 | 14 | /* 15 | * Zookeeper client principal 16 | */ 17 | Client { 18 | com.sun.security.auth.module.Krb5LoginModule required 19 | useKeyTab=true 20 | storeKey=true 21 | useTicketCache=false 22 | keyTab="/var/lib/secret/zookeeper-client.key" 23 | principal="zkclient@TEST.CONFLUENT.IO"; 24 | }; 25 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kdc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Installing Kerberos server, admin and client 10 | RUN yum install -y krb5-server krb5-libs 11 | RUN yum install -y krb5-workstation krb5-libs 12 | 13 | # 2. Configuring Kerberos and KDC 14 | COPY krb5.conf /etc/krb5.conf 15 | RUN mkdir /var/log/kerberos 16 | RUN mkdir /etc/kdc 17 | RUN mkdir -p /var/kerberos/krb5kdc/ 18 | RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf 19 | 20 | EXPOSE 88 21 | 22 | RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s 23 | 24 | CMD /usr/sbin/krb5kdc -n 25 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/kdc/krb5.conf: -------------------------------------------------------------------------------- 1 | [libdefaults] 2 | default_realm = TEST.CONFLUENT.IO 3 | ticket_lifetime = 24h 4 | renew_lifetime = 7d 5 | forwardable = true 6 | rdns = false 7 | dns_lookup_kdc = no 8 | dns_lookup_realm = no 9 | 10 | [realms] 11 | TEST.CONFLUENT.IO = { 12 | kdc = kdc 13 | admin_server = kadmin 14 | } 15 | 16 | [domain_realm] 17 | .test.confluent.io = TEST.CONFLUENT.IO 18 | test.confluent.io = TEST.CONFLUENT.IO 19 | kerberos-demo.local = TEST.CONFLUENT.IO 20 | .kerberos-demo.local = TEST.CONFLUENT.IO 21 | 22 | [logging] 23 | kdc = FILE:/var/log/kerberos/krb5kdc.log 24 | admin_server = FILE:/var/log/kerberos/kadmin.log 25 | default = FILE:/var/log/kerberos/krb5lib.log 26 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | 18 | # 3. Configure zookeeper for Kerberos 19 | RUN yum install -y krb5-workstation krb5-libs 20 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 21 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 22 | 23 | EXPOSE 2181 24 | 25 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 26 | 27 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 28 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | zookeeper.allowSaslFailedClients=false 6 | requireClientAuthScheme=sasl 7 | -------------------------------------------------------------------------------- /kerberos-multi-sasl/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | keyTab="/var/lib/secret/zookeeper.key" 5 | storeKey=true 6 | useTicketCache=false 7 | principal="zookeeper/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO"; 8 | }; 9 | 10 | Client { 11 | com.sun.security.auth.module.Krb5LoginModule required 12 | useTicketCache=true; 13 | }; 14 | -------------------------------------------------------------------------------- /kerberos/client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install confluent kafka tools: 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-kafka-2.12 13 | 14 | # 3. Install Kerberos libaries 15 | RUN yum install -y krb5-workstation krb5-libs 16 | 17 | # 4. Copy in required settings for client access to Kafka 18 | COPY consumer.properties /etc/kafka/consumer.properties 19 | COPY producer.properties /etc/kafka/producer.properties 20 | COPY command.properties /etc/kafka/command.properties 21 | COPY client.sasl.jaas.config /etc/kafka/client_jaas.conf 22 | 23 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 24 | 25 | CMD sleep infinity 26 | -------------------------------------------------------------------------------- /kerberos/client/client.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | /* 2 | * Credentials to use when connecting to ZooKeeper directly. 3 | * 4 | * Whenever possible you should use the Kafka AdminClient API instead of ZooKeeper. 5 | */ 6 | Client { 7 | com.sun.security.auth.module.Krb5LoginModule required 8 | useTicketCache=true; 9 | }; 10 | 11 | 12 | /* 13 | * Credentials to connect to Kafka. 14 | */ 15 | KafkaClient { 16 | com.sun.security.auth.module.Krb5LoginModule required 17 | useTicketCache=true; 18 | }; 19 | -------------------------------------------------------------------------------- /kerberos/client/command.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 4 | serviceName=kafka \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos/client/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos/client/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos/client/producer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka:9093 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.kerberos.service.name=kafka 4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \ 5 | useTicketCache=true; 6 | -------------------------------------------------------------------------------- /kerberos/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-kafka-2.12 17 | RUN yum install -y confluent-control-center 18 | 19 | # 3. Configure Kafka for Kerberos 20 | RUN yum install -y krb5-workstation krb5-libs 21 | COPY server.properties /etc/kafka/server.properties 22 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 23 | 24 | EXPOSE 9093 25 | 26 | ENV KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice" 27 | 28 | CMD kafka-server-start /etc/kafka/server.properties 29 | -------------------------------------------------------------------------------- /kerberos/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | /* 2 | * The service principal 3 | */ 4 | KafkaServer { 5 | com.sun.security.auth.module.Krb5LoginModule required 6 | useKeyTab=true 7 | storeKey=true 8 | keyTab="/var/lib/secret/kafka.key" 9 | principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO"; 10 | }; 11 | 12 | /* 13 | * Zookeeper client principal 14 | */ 15 | Client { 16 | com.sun.security.auth.module.Krb5LoginModule required 17 | useKeyTab=true 18 | storeKey=true 19 | useTicketCache=false 20 | keyTab="/var/lib/secret/zookeeper-client.key" 21 | principal="zkclient@TEST.CONFLUENT.IO"; 22 | }; 23 | -------------------------------------------------------------------------------- /kerberos/kdc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Installing Kerberos server, admin and client 10 | RUN yum install -y krb5-server krb5-libs 11 | RUN yum install -y krb5-workstation krb5-libs 12 | 13 | # 2. Configuring Kerberos and KDC 14 | COPY krb5.conf /etc/krb5.conf 15 | RUN mkdir /var/log/kerberos 16 | RUN mkdir /etc/kdc 17 | RUN mkdir -p /var/kerberos/krb5kdc/ 18 | RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf 19 | 20 | EXPOSE 88 21 | 22 | RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s 23 | 24 | CMD /usr/sbin/krb5kdc -n 25 | -------------------------------------------------------------------------------- /kerberos/kdc/krb5.conf: -------------------------------------------------------------------------------- 1 | [libdefaults] 2 | default_realm = TEST.CONFLUENT.IO 3 | forwardable = true 4 | rdns = false 5 | dns_lookup_kdc = no 6 | dns_lookup_realm = no 7 | 8 | [realms] 9 | TEST.CONFLUENT.IO = { 10 | kdc = kdc 11 | admin_server = kadmin 12 | } 13 | 14 | [domain_realm] 15 | .test.confluent.io = TEST.CONFLUENT.IO 16 | test.confluent.io = TEST.CONFLUENT.IO 17 | kerberos-demo.local = TEST.CONFLUENT.IO 18 | .kerberos-demo.local = TEST.CONFLUENT.IO 19 | 20 | [logging] 21 | kdc = FILE:/var/log/kerberos/krb5kdc.log 22 | admin_server = FILE:/var/log/kerberos/kadmin.log 23 | default = FILE:/var/log/kerberos/krb5lib.log 24 | -------------------------------------------------------------------------------- /kerberos/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-kafka-2.12 17 | 18 | # 3. Configure zookeeper for Kerberos 19 | RUN yum install -y krb5-workstation krb5-libs 20 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 21 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 22 | 23 | EXPOSE 2181 24 | 25 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 26 | 27 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 28 | -------------------------------------------------------------------------------- /kerberos/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /kerberos/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | zookeeper.allowSaslFailedClients=false 6 | requireClientAuthScheme=sasl 7 | -------------------------------------------------------------------------------- /kerberos/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | keyTab="/var/lib/secret/zookeeper.key" 5 | storeKey=true 6 | useTicketCache=false 7 | principal="zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO"; 8 | }; 9 | 10 | Client { 11 | com.sun.security.auth.module.Krb5LoginModule required 12 | useTicketCache=true; 13 | }; 14 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/ensure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2020 Confluent Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | . /etc/confluent/docker/bash-config 18 | 19 | export KAFKA_DATA_DIRS=${KAFKA_DATA_DIRS:-"/var/lib/kafka/data"} 20 | echo "===> Check if $KAFKA_DATA_DIRS is writable ..." 21 | dub path "$KAFKA_DATA_DIRS" writable 22 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/kafka.properties.template: -------------------------------------------------------------------------------- 1 | {% set excluded_props = ['KAFKA_VERSION', 2 | 'KAFKA_HEAP_OPTS' 3 | 'KAFKA_LOG4J_OPTS', 4 | 'KAFKA_OPTS', 5 | 'KAFKA_JMX_OPTS', 6 | 'KAFKA_JVM_PERFORMANCE_OPTS', 7 | 'KAFKA_GC_LOG_OPTS', 8 | 'KAFKA_LOG4J_ROOT_LOGLEVEL', 9 | 'KAFKA_LOG4J_LOGGERS', 10 | 'KAFKA_TOOLS_LOG4J_LOGLEVEL', 11 | 'KAFKA_ZOOKEEPER_CLIENT_CNXN_SOCKET'] 12 | -%} 13 | 14 | {# properties that don't fit the standard format #} 15 | {% set other_props = { 16 | 'KAFKA_ZOOKEEPER_CLIENT_CNXN_SOCKET' : 'zookeeper.clientCnxnSocket' 17 | } -%} 18 | 19 | {% set kafka_props = env_to_props('KAFKA_', '', exclude=excluded_props) -%} 20 | {% for name, value in kafka_props.items() -%} 21 | {{name}}={{value}} 22 | {% endfor -%} 23 | 24 | {% for k, property in other_props.items() -%} 25 | {% if env.get(k) != None -%} 26 | {{property}}={{env[k]}} 27 | {% endif -%} 28 | {% endfor -%} 29 | 30 | {% set confluent_support_props = env_to_props('CONFLUENT_SUPPORT_', 'confluent.support.') -%} 31 | {% for name, value in confluent_support_props.items() -%} 32 | {{name}}={{value}} 33 | {% endfor -%} 34 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/log4j.properties.template: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger={{ env["KAFKA_LOG4J_ROOT_LOGLEVEL"] | default('INFO') }}, stdout 3 | 4 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | 8 | {% set loggers = { 9 | 'kafka': 'INFO', 10 | 'kafka.network.RequestChannel$': 'WARN', 11 | 'kafka.producer.async.DefaultEventHandler': 'DEBUG', 12 | 'kafka.request.logger': 'WARN', 13 | 'kafka.controller': 'TRACE', 14 | 'kafka.log.LogCleaner': 'INFO', 15 | 'state.change.logger': 'TRACE', 16 | 'kafka.authorizer.logger': 'WARN' 17 | } -%} 18 | 19 | 20 | {% if env['KAFKA_LOG4J_LOGGERS'] %} 21 | {% set loggers = parse_log4j_loggers(env['KAFKA_LOG4J_LOGGERS'], loggers) %} 22 | {% endif %} 23 | 24 | {% for logger,loglevel in loggers.items() %} 25 | log4j.logger.{{logger}}={{loglevel}} 26 | {% endfor %} 27 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright 2016 Confluent Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | . /etc/confluent/docker/bash-config 18 | 19 | # Set environment values if they exist as arguments 20 | if [ $# -ne 0 ]; then 21 | echo "===> Overriding env params with args ..." 22 | for var in "$@" 23 | do 24 | export "$var" 25 | done 26 | fi 27 | 28 | echo "===> User" 29 | id 30 | 31 | echo "===> Configuring ..." 32 | /etc/confluent/docker/configure 33 | 34 | echo "===> Running preflight checks ... " 35 | /etc/confluent/docker/ensure 36 | 37 | echo "===> Launching ... " 38 | exec /etc/confluent/docker/launch 39 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/tools-log4j.properties.template: -------------------------------------------------------------------------------- 1 | 2 | log4j.rootLogger={{ env["KAFKA_TOOLS_LOG4J_LOGLEVEL"] | default('WARN') }}, stderr 3 | 4 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender 5 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n 7 | log4j.appender.stderr.Target=System.err 8 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/requirements.txt: -------------------------------------------------------------------------------- 1 | git+https://github.com/confluentinc/confluent-docker-utils@v0.0.32 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | 4 | setuptools.setup( 5 | name='kafka-tests', 6 | version='0.0.1', 7 | author="Confluent, Inc.", 8 | author_email="core-kafka-eng@confluent.io", 9 | description='Kafka docker image tests', 10 | url="https://github.com/confluentinc/kafka-images", 11 | dependency_links=open("requirements.txt").read().split("\n"), 12 | packages=['test'], 13 | include_package_data=True, 14 | python_requires='>=2.7', 15 | setup_requires=['setuptools-git'], 16 | ) 17 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.consumer.ssl.config: -------------------------------------------------------------------------------- 1 | group.id=ssl-bridged 2 | ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks 3 | ssl.truststore.password=confluent 4 | 5 | ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks 6 | ssl.keystore.password=confluent 7 | ssl.key.password=confluent 8 | 9 | security.protocol=SSL 10 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.consumer.ssl.sasl.config: -------------------------------------------------------------------------------- 1 | group.id=ssl-sasl-bridged 2 | ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks 3 | ssl.truststore.password=confluent 4 | 5 | ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks 6 | ssl.keystore.password=confluent 7 | ssl.key.password=confluent 8 | 9 | security.protocol=SASL_SSL 10 | sasl.mechanism=GSSAPI 11 | sasl.kerberos.service.name=kafka 12 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.producer.ssl.config: -------------------------------------------------------------------------------- 1 | ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks 2 | ssl.truststore.password=confluent 3 | 4 | ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks 5 | ssl.keystore.password=confluent 6 | ssl.key.password=confluent 7 | 8 | security.protocol=SSL 9 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.producer.ssl.sasl.config: -------------------------------------------------------------------------------- 1 | ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks 2 | ssl.truststore.password=confluent 3 | 4 | ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks 5 | ssl.keystore.password=confluent 6 | ssl.key.password=confluent 7 | 8 | security.protocol=SASL_SSL 9 | sasl.mechanism=GSSAPI 10 | sasl.kerberos.service.name=kafka 11 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker1_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/bridged_broker1.keytab" 6 | principal="kafka/kafka-sasl-ssl-1@TEST.CONFLUENT.IO"; 7 | }; 8 | 9 | KafkaClient { 10 | com.sun.security.auth.module.Krb5LoginModule required 11 | useKeyTab=true 12 | storeKey=true 13 | keyTab="/etc/kafka/secrets/bridged_broker1.keytab" 14 | principal="kafka/kafka-sasl-ssl-1@TEST.CONFLUENT.IO"; 15 | }; 16 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker2_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/bridged_broker2.keytab" 6 | principal="kafka/kafka-sasl-ssl-2@TEST.CONFLUENT.IO"; 7 | }; 8 | 9 | KafkaClient { 10 | com.sun.security.auth.module.Krb5LoginModule required 11 | useKeyTab=true 12 | storeKey=true 13 | keyTab="/etc/kafka/secrets/bridged_broker2.keytab" 14 | principal="kafka/kafka-sasl-ssl-2@TEST.CONFLUENT.IO"; 15 | }; 16 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker3_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/bridged_broker3.keytab" 6 | principal="kafka/kafka-sasl-ssl-3@TEST.CONFLUENT.IO"; 7 | }; 8 | 9 | KafkaClient { 10 | com.sun.security.auth.module.Krb5LoginModule required 11 | useKeyTab=true 12 | storeKey=true 13 | keyTab="/etc/kafka/secrets/bridged_broker3.keytab" 14 | principal="kafka/kafka-sasl-ssl-3@TEST.CONFLUENT.IO"; 15 | }; 16 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_consumer_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaClient { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/bridged_consumer.keytab" 6 | principal="bridged_consumer/kafka-sasl-ssl-consumer@TEST.CONFLUENT.IO"; 7 | }; 8 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_krb.conf: -------------------------------------------------------------------------------- 1 | [logging] 2 | default = FILE:/var/log/kerberos/krb5libs.log 3 | kdc = FILE:/var/log/kerberos/krb5kdc.log 4 | admin_server = FILE:/var/log/kerberos/kadmind.log 5 | 6 | [libdefaults] 7 | default_realm = TEST.CONFLUENT.IO 8 | dns_lookup_realm = false 9 | dns_lookup_kdc = false 10 | ticket_lifetime = 24h 11 | renew_lifetime = 7d 12 | forwardable = true 13 | # WARNING: We use weaker key types to simplify testing as stronger key types 14 | # require the enhanced security JCE policy file to be installed. You should 15 | # NOT run with this configuration in production or any real environment. You 16 | # have been warned. 17 | default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 18 | default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 19 | permitted_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 20 | 21 | [realms] 22 | TEST.CONFLUENT.IO = { 23 | kdc = kerberos 24 | admin_server = kerberos 25 | } 26 | 27 | [domain_realm] 28 | .test.confluent.io = TEST.CONFLUENT.IO 29 | test.confluent.io = TEST.CONFLUENT.IO 30 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_producer_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaClient { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/bridged_producer.keytab" 6 | principal="bridged_producer/kafka-sasl-ssl-producer@TEST.CONFLUENT.IO"; 7 | }; 8 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1-ca1-signed.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIC0jCCAjsCCQC4Ge6Xmxv2ajANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj 3 | YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP 4 | TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw 5 | OTE4MTQyOVoXDTQzMTEyNDE4MTQyOVowdDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT 6 | AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD 7 | VQQLEwRURVNUMSIwIAYDVQQDExlicm9rZXIxLnRlc3QuY29uZmx1ZW50LmlvMIIB 8 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoxhAcghALuWHLZtKFjSKoA6z 9 | EUE5djS4iudQ7hHLV68JReMWJR4fO2OfsZHKNo8uIzF836SgH0ZYepmt0PtRLspi 10 | kKmSwlJQjBmB9/JSOnUuWX53DxWaIKZUaB/OwdxPgo3qpLXciGwOffip68loo7XG 11 | bhYStZfDCiDw2w+N7Px93a9xA0ZNgWHFsal8qoMLg3V0xW3BkS+jcjsPud2muXOq 12 | E1a93P/40ZkgfoGkFHvh+HJITXhVtRDIoJJMJO6UFr7jfqnhvC07nDVCJjCIOl7C 13 | ebid3e2gplBwsyBeL9ulc6EfMa/URaAFvGMkIy0Qkcr9hlPHPcNvnglVvISocQID 14 | AQABMA0GCSqGSIb3DQEBBQUAA4GBADXwLG913lSI05RqyT0Ph/mtA4NyfPUkOnuJ 15 | JxJzHWzp+G68QjmWMGycN6fqN1MosjZtu9/p4Z5Rjx2ywJCOO4wcOteLuIvJkfHm 16 | 4gB+NvzajUZ1YIg/LD09TPSYXmPX5juR/zxMaChcZigGTpADnpoLfsyydUNSLw3L 17 | boKeqh+v 18 | -----END CERTIFICATE----- 19 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_keystore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_sslkey_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_truststore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2-ca1-signed.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIC0jCCAjsCCQC4Ge6Xmxv2azANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj 3 | YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP 4 | TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw 5 | OTE4MTQzM1oXDTQzMTEyNDE4MTQzM1owdDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT 6 | AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD 7 | VQQLEwRURVNUMSIwIAYDVQQDExlicm9rZXIyLnRlc3QuY29uZmx1ZW50LmlvMIIB 8 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAud6NF7S4nl4USma/Dvxq0Ftx 9 | 1OiuGZVKw5qo3KL+v87GP2Uyoqqu7YsB/dFpfl8j49My3Bo4Jy5g60oGgHEYh0lj 10 | 0vb0NqNKUSu4/7jNHuN4hGnplPMTtshlrU0VaDCKM6M1IHJYsGzcpsEoVmjA5NaT 11 | zq9WKKXzdbM/5n0NHvvnESyR9Ug1RKuEPAGKxqc0AwMHiqWCLb8jHtbjkzf7PuEy 12 | MIAzeb/BcJygqBYvKHqQKtumWQy0YdT5vDl4M7Aywv2p188s5vBEgzCmjCQRjPph 13 | 1lLBWka1OGoebmPt5DqWNvtHzXA1Bit2aOA3BxnXN50Jq0MtEq46NGCQ4Up8XQID 14 | AQABMA0GCSqGSIb3DQEBBQUAA4GBAIVf4TckPp7iGjMIx1wJjPv9RSPynF/hljgS 15 | 7indUs9vjijLMuMR4E66a/JQiKqZxGUefBvtYX0oGKfRZDO5DgwHx50Kv9Yx6Ux6 16 | VqTq+CFpWJLSsdlNHHbjVYCVjCEFZAUX/Y4ULVCNihYNaLIB+NWNj5jEuemqAnUl 17 | eyM5UwLQ 18 | -----END CERTIFICATE----- 19 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_keystore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_sslkey_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_truststore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3-ca1-signed.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIC0jCCAjsCCQC4Ge6Xmxv2bDANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj 3 | YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP 4 | TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw 5 | OTE4MTQzNloXDTQzMTEyNDE4MTQzNlowdDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT 6 | AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD 7 | VQQLEwRURVNUMSIwIAYDVQQDExlicm9rZXIzLnRlc3QuY29uZmx1ZW50LmlvMIIB 8 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAhFw0H4/NksLIyFu+8YnrKr9J 9 | D1iupm5mpavXFiB3UrCUYjBPc+bkXRExFtAnhwB1q7R/E+zJtzlmLl5FojWFSaiv 10 | t2bjugPHs6bZ1D3p0UqouoQf7AgQQNSB+wZoZCp3yiKxI+s1U3NKT6MRbAN5eaUx 11 | 1JNo1fV2zVIIvsKFe1Zldt5uSML9OBtcixaJCsPkyGenV1kajkoiHC8UHgzu5obr 12 | 9QSOdkRWTgq5LX+gyWhAC4hF+ApA5QGQYT8m5paj5c2YHhpZcTFepRnZSh3fq7Xq 13 | hXPqzQyX6v9Kxii9QaVwY2zwgON09OJ5KF9UK4FPQfZmd4dJEVV4CybhHTP3kQID 14 | AQABMA0GCSqGSIb3DQEBBQUAA4GBANstHkSvQjumHlwQSAaQ4pA6YION0GcY+Lzl 15 | vUIE2DFRwzsV87wFa2sc46XOSpjhUxaYEqtyzHYCaPaZ/n2t07857AqNXJjeJZhW 16 | L/l17cAFdPToP63cpMBQF9deQyhHQTMEMhPKBYg9ym9B3wh2emGSxriD1nhU0cbK 17 | KlEkqnUP 18 | -----END CERTIFICATE----- 19 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_keystore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_sslkey_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_truststore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/client-plain.config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/client-plain.config -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/config_krb.conf: -------------------------------------------------------------------------------- 1 | [logging] 2 | default = FILE:/var/log/kerberos/krb5libs.log 3 | kdc = FILE:/var/log/kerberos/krb5kdc.log 4 | admin_server = FILE:/var/log/kerberos/kadmind.log 5 | 6 | [libdefaults] 7 | default_realm = TEST.CONFLUENT.IO 8 | dns_lookup_realm = false 9 | dns_lookup_kdc = false 10 | ticket_lifetime = 24h 11 | renew_lifetime = 7d 12 | forwardable = true 13 | 14 | [realms] 15 | TEST.CONFLUENT.IO = { 16 | kdc = kerberos 17 | admin_server = confluent 18 | } 19 | 20 | [domain_realm] 21 | .test.confluent.io = TEST.CONFLUENT.IO 22 | test.confluent.io = TEST.CONFLUENT.IO 23 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/config_server1_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/broker1.keytab" 6 | principal="kafka/sasl-ssl-config@TEST.CONFLUENT.IO"; 7 | }; 8 | KafkaClient { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/broker1.keytab" 13 | principal="kafka/sasl-ssl-config@TEST.CONFLUENT.IO"; 14 | }; 15 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer-ca1-signed.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIC0zCCAjwCCQC4Ge6Xmxv2bjANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj 3 | YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP 4 | TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw 5 | OTE4MTQ0MloXDTQzMTEyNDE4MTQ0MlowdTELMAkGA1UEBhMCVVMxCzAJBgNVBAgT 6 | AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD 7 | VQQLEwRURVNUMSMwIQYDVQQDExpjb25zdW1lci50ZXN0LmNvbmZsdWVudC5pbzCC 8 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJMlGunTtQd2dTY+EPTXMLvO 9 | +QSznU/JdLM0liqBGInJ2J1yC76avKjBhXqbJRA+cYYq7EvBFuaeFAeRPfTXLJYv 10 | 67cmVN2QSAmd5jGiyOkP2w3q9WYyhczIQLE87NcC0/E3UE9SY25sUsZneJifhJLC 11 | JpEaQS+JSP8yWMwyGm67ccTIHanvGoha0s2aP97BhTqxAarBzSjW/IDO4r5yCwPJ 12 | dAbiI00rnt7zgeLwjzJBrvpzYbp4IhAQy8dnPeZ3PbL4qz+tyJECONn7lYIFFHMX 13 | SVw9GZe4KGnCyhnY/t4aKXRSTk8mBEjIybhfXV/mEE94z+KOAwUWV+uXWLQRUDUC 14 | AwEAATANBgkqhkiG9w0BAQUFAAOBgQBi9i8ynwSosbyP5FvX1wAHs3QYrD/izFxj 15 | d8W8STg7/TJbb8jJwl5ievUASiH0rvHrorEHs3Vyijc5W6nLoAL+KLjUYlNyd2b+ 16 | 78YYxgUFMFAzvrXJ1oFOWTkDb66LGWCj75z6hKPX2U33PY5+A7YSNMnjYlqQBSgN 17 | sorDym1cfQ== 18 | -----END CERTIFICATE----- 19 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_keystore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_sslkey_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_truststore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host.consumer.ssl.config: -------------------------------------------------------------------------------- 1 | group.id=ssl-host 2 | ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks 3 | ssl.truststore.password=confluent 4 | 5 | ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks 6 | ssl.keystore.password=confluent 7 | ssl.key.password=confluent 8 | 9 | security.protocol=SSL 10 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host.consumer.ssl.sasl.config: -------------------------------------------------------------------------------- 1 | group.id=ssl-sasl-host 2 | ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks 3 | ssl.truststore.password=confluent 4 | 5 | ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks 6 | ssl.keystore.password=confluent 7 | ssl.key.password=confluent 8 | 9 | security.protocol=SASL_SSL 10 | sasl.mechanism=GSSAPI 11 | sasl.kerberos.service.name=kafka 12 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host.producer.ssl.config: -------------------------------------------------------------------------------- 1 | ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks 2 | ssl.truststore.password=confluent 3 | 4 | ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks 5 | ssl.keystore.password=confluent 6 | ssl.key.password=confluent 7 | 8 | security.protocol=SSL 9 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host.producer.ssl.sasl.config: -------------------------------------------------------------------------------- 1 | ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks 2 | ssl.truststore.password=confluent 3 | 4 | ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks 5 | ssl.keystore.password=confluent 6 | ssl.key.password=confluent 7 | 8 | security.protocol=SASL_SSL 9 | sasl.mechanism=GSSAPI 10 | sasl.kerberos.service.name=kafka 11 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_broker1_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/host_broker1.keytab" 6 | principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO"; 7 | }; 8 | KafkaClient { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/host_broker1.keytab" 13 | principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO"; 14 | }; 15 | 16 | Client { 17 | com.sun.security.auth.module.Krb5LoginModule required 18 | useKeyTab=true 19 | storeKey=true 20 | keyTab="/etc/kafka/secrets/zkclient-host-1.keytab" 21 | principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO"; 22 | }; 23 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_broker2_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/host_broker2.keytab" 6 | principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO"; 7 | }; 8 | KafkaClient { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/host_broker2.keytab" 13 | principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO"; 14 | }; 15 | 16 | Client { 17 | com.sun.security.auth.module.Krb5LoginModule required 18 | useKeyTab=true 19 | storeKey=true 20 | keyTab="/etc/kafka/secrets/zkclient-host-2.keytab" 21 | principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO"; 22 | }; 23 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_broker3_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/host_broker3.keytab" 6 | principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO"; 7 | }; 8 | KafkaClient { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/host_broker3.keytab" 13 | principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO"; 14 | }; 15 | 16 | Client { 17 | com.sun.security.auth.module.Krb5LoginModule required 18 | useKeyTab=true 19 | storeKey=true 20 | keyTab="/etc/kafka/secrets/zkclient-host-3.keytab" 21 | principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO"; 22 | }; 23 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_consumer_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaClient { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/host_consumer.keytab" 6 | principal="host_consumer/sasl.kafka.com@TEST.CONFLUENT.IO"; 7 | }; 8 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_krb.conf: -------------------------------------------------------------------------------- 1 | [logging] 2 | default = FILE:/var/log/kerberos/krb5libs.log 3 | kdc = FILE:/var/log/kerberos/krb5kdc.log 4 | admin_server = FILE:/var/log/kerberos/kadmind.log 5 | 6 | [libdefaults] 7 | default_realm = TEST.CONFLUENT.IO 8 | dns_lookup_realm = false 9 | dns_lookup_kdc = false 10 | ticket_lifetime = 24h 11 | renew_lifetime = 7d 12 | forwardable = true 13 | 14 | [realms] 15 | TEST.CONFLUENT.IO = { 16 | kdc = localhost 17 | admin_server = localhost 18 | } 19 | 20 | [domain_realm] 21 | .TEST.CONFLUENT.IO = TEST.CONFLUENT.IO 22 | TEST.CONFLUENT.IO = TEST.CONFLUENT.IO 23 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_producer_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaClient { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/host_producer.keytab" 6 | principal="host_producer/sasl.kafka.com@TEST.CONFLUENT.IO"; 7 | }; 8 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_zookeeper_1_jaas.conf: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/zookeeper-host-1.keytab" 6 | principal="zookeeper/sasl.kafka.com@TEST.CONFLUENT.IO"; 7 | }; 8 | Client { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/zkclient-host-1.keytab" 13 | principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO"; 14 | }; 15 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_zookeeper_2_jaas.conf: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/zookeeper-host-1.keytab" 6 | principal="zookeeper/sasl.kafka.com@TEST.CONFLUENT.IO"; 7 | }; 8 | Client { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/zkclient-host-1.keytab" 13 | principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO"; 14 | }; 15 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_zookeeper_3_jaas.conf: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | storeKey=true 5 | keyTab="/etc/kafka/secrets/zookeeper-host-1.keytab" 6 | principal="zookeeper/sasl.kafka.com@TEST.CONFLUENT.IO"; 7 | }; 8 | Client { 9 | com.sun.security.auth.module.Krb5LoginModule required 10 | useKeyTab=true 11 | storeKey=true 12 | keyTab="/etc/kafka/secrets/zkclient-host-1.keytab" 13 | principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO"; 14 | }; 15 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker1.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker1.keystore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker1.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker1.truststore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker2.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker2.keystore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker2.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker2.truststore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker3.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker3.keystore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker3.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.broker3.truststore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.consumer.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.consumer.keystore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.consumer.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.consumer.truststore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.producer.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.producer.keystore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.producer.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafka.producer.truststore.jks -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafkacat-ca1-signed.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICQjCCAasCCQC4Ge6Xmxv2aTANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj 3 | YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP 4 | TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw 5 | OTE4MTQyOFoXDTQzMTEyNDE4MTQyOFowaDEjMCEGA1UEAxMaa2Fma2FjYXQudGVz 6 | dC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNPTkZMVUVO 7 | VDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMIGfMA0GCSqGSIb3DQEB 8 | AQUAA4GNADCBiQKBgQDGtDOf/EYZY08D82ehsAITjLprXDMGnfuiXxdsiZyqCIxc 9 | JPM6gKtxzU8DnkWTY5xEnWxjIwDjQGXwBCnXaNBq7kgBL3P13rtnX34ZQar49NX3 10 | 6RR8IUTM5HxDzxOkmg3aZ2dkKWZU5B1VRTZzWA7mxQEZMPjV8DrhHUa3XdWH7wID 11 | AQABMA0GCSqGSIb3DQEBBQUAA4GBAJHd35NpxhDY43LtmHMqGdObBaiUBuB7jai4 12 | QRzdq7J+bafQ28sIjXo03lV7YRMd9r0gPBXhWymHH838xJh7TnbpHHyJ/CBjVnqG 13 | Mc+cDTMudNWXOrayYeN1WkF/ufP+gJfRl084Lg8BKFaKntRIW/kG/1CniJYRs/JD 14 | amRxX3iB 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafkacat.client.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | Proc-Type: 4,ENCRYPTED 3 | DEK-Info: DES-EDE3-CBC,F8EEC4C2219A7603 4 | 5 | Jh4EwEWHP6DwOImmfwjgUODepVQ9RoBD5PZLngTuCMGI8xrWbINGi1ejd3H+61Tx 6 | bwSirRCwKHqr0JVqvaA8VxZEeIleQ+ZqRASEUhnvGU2gTJ7sEF6KD+4i5n4pKBOQ 7 | 0/F0rIHLB/slongr1JlTP7F22XC3DXkHsmsjtTvkqAPXh4tgM5SGwlE8sYKqL4Py 8 | moSQBN2+yGsjP96Mg+mskzvSTucKrCJ9cXnTbCQfimgxk9kjNzcrVZN51r5fiQ+Z 9 | 0x0muyQh/G9gvDKP4xq6PAUopUNlzPutVm0rcY7SIE3Q0WK65AfMvbLYPulnAJua 10 | JJVYGjF0y/Zze4p7hfJUqzfto5+bwfD/AgwUOzCIpPcARKb7LsYoZNJqqkcuXc00 11 | XQ8evsA/DF9DWXmWNpE6LOqAe5k38c+NYMC7gIYltoFwPr4tq71jkpJfZ+QetzOf 12 | TuxuYjcXf8FHx12JgOybeglFjK85aZ2nDgI+F5yUx7jvp/Qkpo4tJ/uXhr2D4m9U 13 | Q+bcas1PJDv+aYRa9pV09hkHTKb0wlnk3r1Fs0lNjyTKhBqhwrhDwAuaw2uj2tgA 14 | ulX3zHp1vNzTecrDdsUNn99xDLls40Uqh3Wlsg56ck8i18DLRre4fyg6Rk33n369 15 | hobAb1fujIvBpdtzGbeqvo6YGfqb77JVLrr/f3wt62A0ocoYpm7YbD0RFYKC14Tz 16 | ogLii/59KtIaKIeCc/eHs+WiXoyoLqVjMk4/+TXUrKBsuOwxVeOk1FUlJuJpmo5o 17 | 3b7ehgVXprn0xO8lC9xjMIWhbRUl6scE6LyYiUyX12XBAif2pMhV9w== 18 | -----END RSA PRIVATE KEY----- 19 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/krb_server.conf: -------------------------------------------------------------------------------- 1 | [logging] 2 | default = FILE:/var/log/kerberos/krb5libs.log 3 | kdc = FILE:/var/log/kerberos/krb5kdc.log 4 | admin_server = FILE:/var/log/kerberos/kadmind.log 5 | 6 | [libdefaults] 7 | default_realm = TEST.CONFLUENT.IO 8 | dns_lookup_realm = false 9 | dns_lookup_kdc = false 10 | ticket_lifetime = 24h 11 | renew_lifetime = 7d 12 | forwardable = true 13 | udp_preference_limit = 1000000 14 | # WARNING: We use weaker key types to simplify testing as stronger key types 15 | # require the enhanced security JCE policy file to be installed. You should 16 | # NOT run with this configuration in production or any real environment. You 17 | # have been warned. 18 | default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 19 | default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 20 | permitted_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 21 | 22 | [realms] 23 | TEST.CONFLUENT.IO = { 24 | kdc = kerberos 25 | admin_server = kerberos 26 | } 27 | 28 | [domain_realm] 29 | .TEST.CONFLUENT.IO = TEST.CONFLUENT.IO 30 | TEST.CONFLUENT.IO = TEST.CONFLUENT.IO 31 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer-ca1-signed.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIC0zCCAjwCCQC4Ge6Xmxv2bTANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj 3 | YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP 4 | TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw 5 | OTE4MTQzOVoXDTQzMTEyNDE4MTQzOVowdTELMAkGA1UEBhMCVVMxCzAJBgNVBAgT 6 | AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD 7 | VQQLEwRURVNUMSMwIQYDVQQDExpwcm9kdWNlci50ZXN0LmNvbmZsdWVudC5pbzCC 8 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5M+oKxIzpuCLM3/O/RFTjn 9 | mkdKAvygCkKDrzLvQHaVUQhWtObUOyzxObk+mVj9SS8K1HpGwX88USdQqHuGBsrZ 10 | 5OHUU9yS5TJ3J+wNR8Wf/ki84Z/tM8NCKv9MxCnSoy6s9Wk4Lk8S1lvAp9sPQhaX 11 | Y9d55z+j3LNKsa8YJqX6XHJc3XtyFMshY2LsFS/s3YPGYl54tVVvGQJ7qxf7cVhI 12 | /ISH1LVfkhA4XO6KSGf/mBg2XTkcGDO5kHVAnmIfGeZ45B17HAy4UI2LMM0Q7xKm 13 | mgFDMWpdV47JeM2bfP3wNMSREfkepjyZk5PTvevYZJpQJ7/U841RPIkCgE6CCNkC 14 | AwEAATANBgkqhkiG9w0BAQUFAAOBgQAMVY7TqXsMXnoVb1aWwmNruOKfAlubS/sQ 15 | 4tfxyY1SMfhBYCRR+ZxlGrXY0GmKfzRUjjaH+8rwHn6WRpI3Qk7IHIU5LO+3jrKh 16 | 3DNraRokKBFz35TBmkEJY7Xc5KOzRA3g5739TvDXwKPNtOsI41GpbOl3HSBjnEGG 17 | 01c4XSxmeQ== 18 | -----END CERTIFICATE----- 19 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer-ssl.config: -------------------------------------------------------------------------------- 1 | security.protocol=SSL 2 | ssl.truststore.location=/Users/sumit/code/confluent/cp-docker/security/kafka.producer.truststore.jks 3 | ssl.truststore.password=confluent 4 | 5 | ssl.keystore.location=/Users/sumit/code/confluent/cp-docker/security/kafka.producer.keystore.jks 6 | ssl.keystore.password=confluent 7 | ssl.key.password=confluent 8 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer_keystore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer_sslkey_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer_truststore_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/snakeoil-ca-1.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDDTCCAnagAwIBAgIJAPgq7cn3Z8iiMA0GCSqGSIb3DQEBBQUAMGMxHjAcBgNV 3 | BAMTFWNhMS50ZXN0LmNvbmZsdWVudC5pbzENMAsGA1UECxMEVEVTVDESMBAGA1UE 4 | ChMJQ09ORkxVRU5UMREwDwYDVQQHEwhQYWxvQWx0bzELMAkGA1UEBhMCVVMwHhcN 5 | MTYwNzA5MTgxNDI4WhcNMTcwNzA5MTgxNDI4WjBjMR4wHAYDVQQDExVjYTEudGVz 6 | dC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNPTkZMVUVO 7 | VDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMIGfMA0GCSqGSIb3DQEB 8 | AQUAA4GNADCBiQKBgQDt72SR2FPC1HWqghQO8DNlxjPnqgW6RJhDLiA8+iLGPVYc 9 | MoBtsxMtMoWVx7WUciOWO2Az/v92J7QPhO8KkdRIrv4yTRTd/sYPA5Ky4P19Rc3l 10 | Zr+iWB73EBkliVzEkXkC5mS9Qsx83bl32+d0fMk/GccKlgtJ5Ramf0RNB9a5EwID 11 | AQABo4HIMIHFMB0GA1UdDgQWBBR4t05HUOhx4JSsfEF5l6PJ8/pzzzCBlQYDVR0j 12 | BIGNMIGKgBR4t05HUOhx4JSsfEF5l6PJ8/pzz6FnpGUwYzEeMBwGA1UEAxMVY2Ex 13 | LnRlc3QuY29uZmx1ZW50LmlvMQ0wCwYDVQQLEwRURVNUMRIwEAYDVQQKEwlDT05G 14 | TFVFTlQxETAPBgNVBAcTCFBhbG9BbHRvMQswCQYDVQQGEwJVU4IJAPgq7cn3Z8ii 15 | MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEADLxsXBPIddsnW7nLe3Zb 16 | pvVLSf3cwF6SUewgBvxmrcRbuVynsODJc6P0UU8+z6JyjqcYqBoyuupVksBPa1aJ 17 | wtB/5YCRmXnz1Af2P2NrlpSs6R4uJuCd47OuGhgoA4TPTDgDt3j9zDncCh2e6S2A 18 | r8koy4uaoj7cNQMV6OZxgjE= 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/test/fixtures/secrets/snakeoil-ca-1.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | Proc-Type: 4,ENCRYPTED 3 | DEK-Info: DES-EDE3-CBC,1A482A57BA230AD3 4 | 5 | B5XZ6aSWAYrUgxLtLB+WlLFRN1EpZohUlKUr5kmEDsr2gaa2ELMuxda6ie84uNoQ 6 | pb5PI0TKhdSf2YY0XBHHxPlZy/1LpRTi0typVE5JSsOOc8EqHE/hTas9C6+/Ipw1 7 | bEybyBdxdvFyY5M7CIhKeEV2FXZnq+enIUi1iiMMmDRMw22VJRXcI+e+MUALUsD/ 8 | hcJk+5njYao6Cf8l+zTGl3Yxq9/mPqgQwfYxdTwkRk9P73TL7X8YC+BH1N1efJqS 9 | P6xC0viH2xgrg3j6X0zjGGPRwLYONxtzDrCxJgZOoPKdPGLPtjkxN2JHH40kv0QC 10 | 91gdoDnd2n2L4de1hT8T54vwmh8UwGrYARykt+lIMUjZl/Nd715e6uAXQZYyxM4g 11 | g2gHDXwp5EKcLsQdJI7CejEWgUnHQUOPXc2JQHXknmHTclgK2OBjgrFKLAQQIpjN 12 | hbcf4MTOhhPqE9QztnNH9i37rqv+SWyhiSKWODpuruaT9+gXnUAZx7bViNKfC0zB 13 | yQRxozCEr99CaOvZwClizDsz9kCMWbCMw5g9ISyIHm3oMf+sPwZQVT7i+z9+lGDO 14 | jEp9Aoj+n6ywqVfVZAjE3JzvyX84jtX8K7X608JFAtG05wf0Fmb9QwrtL0gOL7C2 15 | UuDcKYin2tlOX5JQ2CxWlElA4sCNMufvsYL492CYo+gZ0FxVptyu/r3swCujqKqd 16 | IyYIgHiNRbgWeZ2QEom9GFWxVnWaUW7807tk+p++3tnYyhQY/lA35cmv2RK6jYR8 17 | QXv9Vc4159fLEPwxBgHC2njgXU2USN+XvNI3n98KUtmNRF0S7a+npQ== 18 | -----END RSA PRIVATE KEY----- 19 | -------------------------------------------------------------------------------- /kraft/none/image/kafka-images/kafka/tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = test 3 | toxworkdir = /var/tmp 4 | 5 | [testenv] 6 | # Consolidating all deps here instead of cleanly/separately in test/style/cover so we 7 | # have a single env (platform) to work with, which makes debugging easier (like which env?). 8 | # Not as clean but easier to work with for dev, which is better. 9 | deps = 10 | -rrequirements.txt 11 | flake8 12 | pytest 13 | pytest-xdist 14 | pytest-cov 15 | install_command = pip install -U {packages} 16 | recreate = True 17 | skipsdist = True 18 | usedevelop = True 19 | setenv = 20 | PIP_PROCESS_DEPENDENCY_LINKS=1 21 | PIP_DEFAULT_TIMEOUT=60 22 | ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future 23 | basepython = python3 24 | envdir = {toxworkdir}/confluent 25 | 26 | [testenv:test] 27 | commands = 28 | py.test --color=no {env:PYTESTARGS:} test 29 | 30 | [testenv:style] 31 | commands = 32 | flake8 --config tox.ini 33 | 34 | [testenv:cover] 35 | commands = 36 | py.test {env:PYTESTARGS:} --cov . --cov-report=xml --cov-report=html --cov-report=term test 37 | 38 | [flake8] 39 | ignore = E111,E121,W292,E123,E226 40 | max-line-length = 160 41 | 42 | [pytest] 43 | addopts = -n 1 44 | -------------------------------------------------------------------------------- /kraft/none/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d 4 | 5 | # Creating the user kafka 6 | # kafka is configured as a super user, no need for additional ACL 7 | # docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-255=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka 8 | 9 | echo "Example configuration:" 10 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --topic test" 11 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --topic test --from-beginning" 12 | -------------------------------------------------------------------------------- /ldap-auth/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-server 17 | RUN yum install -y confluent-security 18 | 19 | # 3. Configure Kafka and zookeeper for Kerberos 20 | COPY server.properties /etc/kafka/server.properties 21 | COPY kafka.jaas.config /etc/kafka/kafka_server_jaas.conf 22 | COPY log4j.properties /etc/kafka/log4j.properties 23 | 24 | COPY alice.properties /etc/kafka/alice.properties 25 | COPY barnie.properties /etc/kafka/barnie.properties 26 | COPY charlie.properties /etc/kafka/charlie.properties 27 | COPY kafka.properties /etc/kafka/kafka.properties 28 | 29 | EXPOSE 9093 30 | 31 | CMD kafka-server-start /etc/kafka/server.properties 32 | -------------------------------------------------------------------------------- /ldap-auth/kafka/alice.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="alice" password="alice-secret"; 5 | -------------------------------------------------------------------------------- /ldap-auth/kafka/barnie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="barnie" \ 5 | password="barnie-secret"; 6 | -------------------------------------------------------------------------------- /ldap-auth/kafka/charlie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="charlie" \ 5 | password="charlie-secret"; 6 | -------------------------------------------------------------------------------- /ldap-auth/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /ldap-auth/kafka/kafka.jaas.config: -------------------------------------------------------------------------------- 1 | Client { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | username="kafka" 4 | password="kafka"; 5 | }; 6 | -------------------------------------------------------------------------------- /ldap-auth/kafka/kafka.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/01_base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=users,dc=confluent,dc=io 2 | objectClass: organizationalUnit 3 | ou: Users 4 | 5 | dn: ou=groups,dc=confluent,dc=io 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/02_KafkaDevelopers.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: Kafka Developers 5 | gidNumber: 5000 6 | 7 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/03_ProjectA.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=ProjectA,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: ProjectA 5 | gidNumber: 5001 6 | 7 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/04_ProjectB.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=ProjectB,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: ProjectB 5 | gidNumber: 5002 6 | 7 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/10_alice.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=alice,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: alice 6 | sn: LookingGlass 7 | givenName: Alice 8 | cn: alice 9 | displayName: Alice LookingGlass 10 | uidNumber: 10000 11 | gidNumber: 5000 12 | userPassword: alice-secret 13 | gecos: alice 14 | loginShell: /bin/bash 15 | homeDirectory: /home/alice 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/11_barnie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: barnie 6 | sn: Rubble 7 | givenName: Barnie 8 | cn: barnie 9 | displayName: Barnie Rubble 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: barnie-secret 13 | gecos: barnie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/barnie 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/12_charlie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: charlie 6 | sn: Sheen 7 | givenName: Charlie 8 | cn: charlie 9 | displayName: Charlie Sheen 10 | uidNumber: 10002 11 | gidNumber: 5000 12 | userPassword: charlie-secret 13 | gecos: charlie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/charlie 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/13_donald.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=donald,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: donald 6 | sn: Duck 7 | givenName: Donald 8 | cn: donald 9 | displayName: Donald Duck 10 | uidNumber: 10003 11 | gidNumber: 5000 12 | userPassword: donald-secret 13 | gecos: donald 14 | loginShell: /bin/bash 15 | homeDirectory: /home/donald 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/14_eva.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=eva,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: eva 6 | sn: Maria 7 | givenName: Eva 8 | cn: eva 9 | displayName: Eva Maria 10 | uidNumber: 10004 11 | gidNumber: 5000 12 | userPassword: eva-secret 13 | gecos: eva 14 | loginShell: /bin/bash 15 | homeDirectory: /home/eva 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/15_fritz.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=fritz,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: fritz 6 | sn: Walter 7 | givenName: Fritz 8 | cn: fritz 9 | displayName: Fritz Walter 10 | uidNumber: 10005 11 | gidNumber: 5000 12 | userPassword: fritz-secret 13 | gecos: fritz 14 | loginShell: /bin/bash 15 | homeDirectory: /home/fritz 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/16_greta.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=greta,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: greta 6 | sn: Thunberg 7 | givenName: Greta 8 | cn: greta 9 | displayName: Greta Thunberg 10 | uidNumber: 10006 11 | gidNumber: 5000 12 | userPassword: greta-secret 13 | gecos: greta 14 | loginShell: /bin/bash 15 | homeDirectory: /home/greta 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/17_kafka.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=kafka,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: kafka 6 | sn: kafka 7 | givenName: kafka 8 | cn: kafka 9 | displayName: kafka 10 | uidNumber: 10007 11 | gidNumber: 5000 12 | userPassword: kafka 13 | gecos: kafka 14 | loginShell: /bin/bash 15 | homeDirectory: /home/kafka 16 | -------------------------------------------------------------------------------- /ldap-auth/ldap/custom/20_group_add.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | changetype: modify 3 | add: memberuid 4 | memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }} 5 | 6 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 7 | changetype: modify 8 | add: memberuid 9 | memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 10 | 11 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 12 | changetype: modify 13 | add: memberuid 14 | memberuid: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 15 | 16 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 17 | changetype: modify 18 | add: memberuid 19 | memberuid: cn=eva,ou=users,{{ LDAP_BASE_DN }} 20 | 21 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 22 | changetype: modify 23 | add: memberuid 24 | memberuid: cn=fritz,ou=users,{{ LDAP_BASE_DN }} 25 | -------------------------------------------------------------------------------- /ldap-auth/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ## start docker-compose up to and including kafka 4 | docker-compose up -d --build 5 | 6 | echo "Example configuration:" 7 | echo "Should succeed (barnie is in group)" 8 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/etc/kafka/barnie.properties" 9 | echo "Should fail (charlie is NOT in group)" 10 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/etc/kafka/charlie.properties" 11 | echo "Should succeed (alice is in group)" 12 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/alice.properties --topic test-topic --from-beginning" 13 | echo "List ACLs" 14 | echo "-> docker-compose exec kafka kafka-acls --bootstrap-server kafka:9093 --list --command-config /etc/kafka/kafka.properties" 15 | -------------------------------------------------------------------------------- /ldap-auth/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | 18 | # 3. Configure Kafka and zookeeper for Kerberos 19 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 20 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 21 | 22 | EXPOSE 2181 23 | 24 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 25 | -------------------------------------------------------------------------------- /ldap-auth/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /ldap-auth/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | requireClientAuthScheme=sasl 6 | -------------------------------------------------------------------------------- /ldap-auth/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_kafka="kafka"; 4 | }; 5 | -------------------------------------------------------------------------------- /ldap/acls/acls.csv: -------------------------------------------------------------------------------- 1 | KafkaPrincipal,ResourceType,PatternType,ResourceName,Operation,PermissionType,Host 2 | User:kafka,Cluster,LITERAL,kafka-cluster,All,Allow,* 3 | Group:Kafka Developers,Group,LITERAL,*,Read,Allow,* 4 | Group:Kafka Developers,Topic,LITERAL,test-topic,Describe,Allow,* 5 | Group:Kafka Developers,Topic,LITERAL,test-topic,Read,Allow,* 6 | Group:Kafka Developers,Topic,LITERAL,test-topic,Write,Allow,* 7 | Group:Kafka Developers,Topic,LITERAL,test-topic,Create,Allow,* 8 | -------------------------------------------------------------------------------- /ldap/add-user: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Creating the users 4 | docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=purbon-secret],SCRAM-SHA-512=[password=purbon-secret]' --entity-type users --entity-name purbon 5 | 6 | 7 | echo "Should succeed as the new user is in the group" 8 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/service/kafka/users/purbon.properties" 9 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /service/kafka/users/purbon.properties --topic test-topic --from-beginning" 10 | -------------------------------------------------------------------------------- /ldap/custom/01_base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: organizationalUnit 3 | ou: Users 4 | 5 | dn: ou=groups,{{ LDAP_BASE_DN }} 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | -------------------------------------------------------------------------------- /ldap/custom/02_KafkaDevelopers.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: Kafka Developers 5 | gidNumber: 5000 6 | 7 | -------------------------------------------------------------------------------- /ldap/custom/10_alice.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=alice,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: alice 6 | sn: LookingGlass 7 | givenName: Alice 8 | cn: alice 9 | displayName: Alice LookingGlass 10 | uidNumber: 10000 11 | gidNumber: 5000 12 | userPassword: alice-secret 13 | gecos: alice 14 | loginShell: /bin/bash 15 | homeDirectory: /home/alice 16 | -------------------------------------------------------------------------------- /ldap/custom/11_barnie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: barnie 6 | sn: Rubble 7 | givenName: Barnie 8 | cn: barnie 9 | displayName: Barnie Rubble 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: barnie-secret 13 | gecos: barnie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/barnie 16 | -------------------------------------------------------------------------------- /ldap/custom/12_charlie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: charlie 6 | sn: Sheen 7 | givenName: Charlie 8 | cn: charlie 9 | displayName: Charlie Sheen 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: charlie-secret 13 | gecos: charlie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/charlie 16 | -------------------------------------------------------------------------------- /ldap/custom/20_group_add.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | changetype: modify 3 | add: memberuid 4 | memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }} 5 | 6 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 7 | changetype: modify 8 | add: memberuid 9 | memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 10 | -------------------------------------------------------------------------------- /ldap/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-server 17 | RUN yum install -y confluent-security 18 | 19 | # 3. Configure Kafka and zookeeper for Kerberos 20 | COPY server.properties /etc/kafka/server.properties 21 | COPY server-with-ssl.properties /etc/kafka/server-with-ssl.properties 22 | COPY kafka.jaas.config /etc/kafka/kafka_server_jaas.conf 23 | COPY log4j.properties /etc/kafka/log4j.properties 24 | 25 | COPY alice.properties /etc/kafka/alice.properties 26 | COPY barnie.properties /etc/kafka/barnie.properties 27 | COPY charlie.properties /etc/kafka/charlie.properties 28 | COPY kafka.properties /etc/kafka/kafka.properties 29 | 30 | EXPOSE 9093 31 | 32 | CMD kafka-server-start /etc/kafka/server.properties 33 | -------------------------------------------------------------------------------- /ldap/kafka/alice.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="alice" \ 5 | password="alice-secret"; 6 | -------------------------------------------------------------------------------- /ldap/kafka/barnie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="barnie" \ 5 | password="barnie-secret"; 6 | -------------------------------------------------------------------------------- /ldap/kafka/charlie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="charlie" \ 5 | password="charlie-secret"; 6 | -------------------------------------------------------------------------------- /ldap/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /ldap/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | 7 | -------------------------------------------------------------------------------- /ldap/kafka/jks/.gitignore: -------------------------------------------------------------------------------- 1 | *.crt 2 | *.csr 3 | *_creds 4 | *.jks 5 | *.srl 6 | *.key 7 | *.pem 8 | *.der 9 | *.p12 10 | -------------------------------------------------------------------------------- /ldap/kafka/kafka.jaas.config: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.scram.ScramLoginModule required 3 | username="kafka" 4 | password="kafka"; 5 | }; 6 | 7 | Client { 8 | org.apache.zookeeper.server.auth.DigestLoginModule required 9 | username="kafka" 10 | password="kafka"; 11 | }; 12 | -------------------------------------------------------------------------------- /ldap/kafka/kafka.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | -------------------------------------------------------------------------------- /ldap/kafka/users/purbon.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="purbon" \ 5 | password="purbon-secret"; 6 | -------------------------------------------------------------------------------- /ldap/ldap/certs/.gitignore: -------------------------------------------------------------------------------- 1 | *.crt 2 | *.csr 3 | *_creds 4 | *.jks 5 | *.srl 6 | *.key 7 | *.pem 8 | *.der 9 | *.p12 10 | -------------------------------------------------------------------------------- /ldap/ldap/custom/01_base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: organizationalUnit 3 | ou: Users 4 | 5 | dn: ou=groups,{{ LDAP_BASE_DN }} 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | -------------------------------------------------------------------------------- /ldap/ldap/custom/02_KafkaDevelopers.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: Kafka Developers 5 | gidNumber: 5000 6 | 7 | -------------------------------------------------------------------------------- /ldap/ldap/custom/10_alice.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=alice,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: alice 6 | sn: LookingGlass 7 | givenName: Alice 8 | cn: alice 9 | displayName: Alice LookingGlass 10 | uidNumber: 10000 11 | gidNumber: 5000 12 | userPassword: alice-secret 13 | gecos: alice 14 | loginShell: /bin/bash 15 | homeDirectory: /home/alice 16 | -------------------------------------------------------------------------------- /ldap/ldap/custom/11_barnie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: barnie 6 | sn: Rubble 7 | givenName: Barnie 8 | cn: barnie 9 | displayName: Barnie Rubble 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: barnie-secret 13 | gecos: barnie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/barnie 16 | -------------------------------------------------------------------------------- /ldap/ldap/custom/12_charlie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: charlie 6 | sn: Sheen 7 | givenName: Charlie 8 | cn: charlie 9 | displayName: Charlie Sheen 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: charlie-secret 13 | gecos: charlie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/charlie 16 | -------------------------------------------------------------------------------- /ldap/ldap/custom/20_group_add.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | changetype: modify 3 | add: memberuid 4 | memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }} 5 | 6 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 7 | changetype: modify 8 | add: memberuid 9 | memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 10 | -------------------------------------------------------------------------------- /ldap/scripts/.gitignore: -------------------------------------------------------------------------------- 1 | *.crt 2 | *.csr 3 | *_creds 4 | *.jks 5 | *.srl 6 | *.key 7 | *.pem 8 | *.der 9 | *.p12 10 | -------------------------------------------------------------------------------- /ldap/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | 18 | # 3. Configure Kafka and zookeeper for Kerberos 19 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 20 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 21 | 22 | EXPOSE 2181 23 | 24 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 25 | -------------------------------------------------------------------------------- /ldap/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /ldap/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | requireClientAuthScheme=sasl 6 | -------------------------------------------------------------------------------- /ldap/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_kafka="kafka"; 4 | }; 5 | -------------------------------------------------------------------------------- /multi-sasl/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | zookeeper: 5 | build: zookeeper/ 6 | container_name: zookeeper 7 | hostname: zookeeper 8 | restart: on-failure 9 | environment: 10 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf 11 | 12 | kafka: 13 | build: kafka/ 14 | container_name: kafka 15 | environment: 16 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 17 | depends_on: 18 | - zookeeper 19 | restart: on-failure 20 | -------------------------------------------------------------------------------- /multi-sasl/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk-devel 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf 17 | COPY consumer.properties /etc/kafka/consumer.properties 18 | COPY consumer.plain.properties /etc/kafka/consumer.plain.properties 19 | 20 | EXPOSE 9093 21 | 22 | CMD kafka-server-start /etc/kafka/server.properties 23 | -------------------------------------------------------------------------------- /multi-sasl/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /multi-sasl/kafka/consumer.plain.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | 7 | -------------------------------------------------------------------------------- /multi-sasl/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | 7 | -------------------------------------------------------------------------------- /multi-sasl/kafka/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Client { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | username="admin" 4 | password="password"; 5 | }; 6 | -------------------------------------------------------------------------------- /multi-sasl/kafka/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=0 2 | listeners=SASL_PLAINTEXT://kafka:9093 3 | advertised.listeners=SASL_PLAINTEXT://kafka:9093 4 | log.dirs=/var/lib/kafka 5 | offsets.topic.replication.factor=1 6 | transaction.state.log.replication.factor=1 7 | transaction.state.log.min.isr=1 8 | zookeeper.connect=zookeeper:2181 9 | 10 | # Scram Authentication mechanism 11 | sasl.enabled.mechanisms=SCRAM-SHA-256,PLAIN 12 | sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256 13 | listener.name.sasl_plaintext.scram-sha-256.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 14 | username="kafka" \ 15 | password="kafka"; 16 | listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 17 | username="kafka" \ 18 | password="kafka" \ 19 | user_kafka="kafka"; 20 | security.inter.broker.protocol=SASL_PLAINTEXT 21 | allow.everyone.if.no.acl.found=false 22 | super.users=User:kafka 23 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 24 | zookeeper.set.acl=true 25 | -------------------------------------------------------------------------------- /multi-sasl/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d --build 4 | 5 | # Creating the user kafka 6 | # kafka is configured as a super user, no need for additional ACL 7 | docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka 8 | 9 | echo "Example configuration:" 10 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --producer.config /etc/kafka/consumer.properties --topic test" 11 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --producer.config /etc/kafka/consumer.plain.properties --topic test" 12 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/consumer.properties --topic test --from-beginning" 13 | -------------------------------------------------------------------------------- /multi-sasl/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-11-openjdk-devel 16 | RUN yum install -y confluent-platform-2.12 17 | 18 | # 3. Configure Kafka and zookeeper for Kerberos 19 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 20 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf 21 | 22 | EXPOSE 2181 23 | 24 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 25 | -------------------------------------------------------------------------------- /multi-sasl/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /multi-sasl/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider 5 | -------------------------------------------------------------------------------- /multi-sasl/zookeeper/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_admin="password"; 4 | }; 5 | -------------------------------------------------------------------------------- /none/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /none/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d 4 | 5 | # Creating the user kafka 6 | # kafka is configured as a super user, no need for additional ACL 7 | # docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-255=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka 8 | 9 | echo "Example configuration:" 10 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --topic test" 11 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --topic test --from-beginning" 12 | -------------------------------------------------------------------------------- /oauth/.gitignore: -------------------------------------------------------------------------------- 1 | certs/ 2 | *.jks 3 | -------------------------------------------------------------------------------- /oauth/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = UK 18 | organizationName = Confluent 19 | localityName = London 20 | commonName = kafka.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /oauth/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | zookeeper: 5 | build: zookeeper/ 6 | container_name: zookeeper 7 | domainname: confluent.local 8 | hostname: zookeeper 9 | networks: 10 | default: 11 | aliases: 12 | - zookeeper.confluent.local 13 | 14 | kafka: 15 | build: kafka/ 16 | container_name: kafka 17 | domainname: confluent.local 18 | hostname: kafka 19 | depends_on: 20 | - zookeeper 21 | environment: 22 | - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf 23 | networks: 24 | default: 25 | aliases: 26 | - kafka.confluent.local 27 | 28 | networks: 29 | default: 30 | -------------------------------------------------------------------------------- /oauth/kafka/client.properties: -------------------------------------------------------------------------------- 1 | security.protocol=SASL_SSL 2 | sasl.mechanism=OAUTHBEARER 3 | sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler 4 | ssl.truststore.location=/etc/kafka/kafka.client.truststore.jks 5 | ssl.truststore.password=secret 6 | -------------------------------------------------------------------------------- /oauth/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /oauth/kafka/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required; 3 | }; 4 | 5 | KafkaClient { 6 | org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required; 7 | }; 8 | -------------------------------------------------------------------------------- /oauth/kafka/oauthcallbackhandlers/.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .idea 3 | -------------------------------------------------------------------------------- /oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/MyOauthBearerToken.java: -------------------------------------------------------------------------------- 1 | package io.confluent.examples.authentication.oauth; 2 | 3 | import lombok.Data; 4 | import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; 5 | 6 | import java.util.HashSet; 7 | import java.util.Set; 8 | 9 | @Data 10 | public class MyOauthBearerToken implements OAuthBearerToken { 11 | 12 | private long lifetimeMs; 13 | private String value; 14 | private long startTimeMs; 15 | private String principalName; 16 | private Set scopes = new HashSet<>(); 17 | 18 | MyOauthBearerToken() { } 19 | 20 | MyOauthBearerToken(String value) { 21 | this.value = value; 22 | this.lifetimeMs = System.currentTimeMillis() + 1000 * 60 * 60; 23 | } 24 | 25 | @Override 26 | public String value() { 27 | return this.value; 28 | } 29 | 30 | @Override 31 | public Set scope() { 32 | return scopes; 33 | } 34 | 35 | @Override 36 | public long lifetimeMs() { 37 | return this.lifetimeMs; 38 | } 39 | 40 | @Override 41 | public String principalName() { 42 | return this.principalName; 43 | } 44 | 45 | @Override 46 | public Long startTimeMs() { 47 | return startTimeMs; 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /oauth/kafka/oauthcallbackhandlers/src/test/java/io/confluent/examples/authentication/oauth/JwtHelperTest.java: -------------------------------------------------------------------------------- 1 | package io.confluent.examples.authentication.oauth; 2 | 3 | import org.junit.Test; 4 | 5 | import java.io.UnsupportedEncodingException; 6 | import java.util.Arrays; 7 | import java.util.HashSet; 8 | 9 | import static org.junit.Assert.assertEquals; 10 | import static org.junit.Assert.assertTrue; 11 | 12 | public class JwtHelperTest { 13 | 14 | @Test 15 | public void test() throws UnsupportedEncodingException { 16 | JwtHelper underTest = new JwtHelper(); 17 | String jwt = underTest.createJwt(); 18 | MyOauthBearerToken parsed = underTest.validate(jwt); 19 | System.err.println(parsed); 20 | assertEquals("bene", parsed.getPrincipalName()); 21 | assertEquals(new HashSet<>(Arrays.asList("developer", "admin")), parsed.getScopes()); 22 | assertTrue(parsed.getStartTimeMs() <= System.currentTimeMillis()); 23 | assertTrue(parsed.getLifetimeMs() > System.currentTimeMillis()); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /oauth/kafka/server.properties: -------------------------------------------------------------------------------- 1 | ############################# Server Basics ############################# 2 | broker.id=0 3 | listeners=SASL_SSL://kafka.confluent.local:9093 4 | advertised.listeners=SASL_SSL://kafka.confluent.local:9093 5 | log.dirs=/var/lib/kafka 6 | offsets.topic.replication.factor=1 7 | transaction.state.log.replication.factor=1 8 | transaction.state.log.min.isr=1 9 | zookeeper.connect=zookeeper.confluent.local:2181 10 | 11 | # oauth bearer configuration 12 | security.inter.broker.protocol=SASL_SSL 13 | sasl.mechanism.inter.broker.protocol=OAUTHBEARER 14 | sasl.enabled.mechanisms=OAUTHBEARER 15 | listener.name.sasl_ssl.oauthbearer.sasl.server.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerValidatorCallbackHandler 16 | listener.name.sasl_ssl.oauthbearer.sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler 17 | 18 | ssl.truststore.location=/etc/kafka/kafka.server.truststore.jks 19 | ssl.truststore.password=secret 20 | ssl.keystore.location=/etc/kafka/kafka.server.keystore.jks 21 | ssl.keystore.password=secret 22 | ssl.key.password=secret 23 | -------------------------------------------------------------------------------- /oauth/kafka/test_produce_and_consume.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo 'some sample messages 4 | sent via sasl outh bearer authentication 5 | with custom token generation and validation. 6 | ' | kafka-console-producer --broker-list kafka.confluent.local:9093 --topic test --producer.config /etc/kafka/client.properties 7 | timeout 5 kafka-console-consumer --bootstrap-server kafka.confluent.local:9093 --topic test --from-beginning --consumer.config /etc/kafka/client.properties 8 | -------------------------------------------------------------------------------- /oauth/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | set -u 5 | 6 | pushd kafka/oauthcallbackhandlers 7 | mvn clean package 8 | popd 9 | 10 | ./generate_certs.sh 11 | 12 | docker-compose up -d --build 13 | 14 | sleep 5 15 | 16 | docker-compose exec kafka /tmp/test_produce_and_consume.sh 17 | -------------------------------------------------------------------------------- /oauth/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-1.8.0-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | 18 | # 3. Configure Kafka and zookeeper 19 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 20 | 21 | EXPOSE 2181 22 | 23 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 24 | -------------------------------------------------------------------------------- /oauth/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | 15 | -------------------------------------------------------------------------------- /oauth/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | -------------------------------------------------------------------------------- /plain/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /plain/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="consumer" \ 5 | password="consumer-secret"; 6 | 7 | -------------------------------------------------------------------------------- /plain/producer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="producer" \ 5 | password="producer-secret"; 6 | 7 | -------------------------------------------------------------------------------- /plain/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d 4 | 5 | echo "Example configuration:" 6 | echo "-> kafka-console-producer --broker-list localhost:9093 --producer.config producer.properties --topic test" 7 | echo "-> kafka-console-consumer --bootstrap-server localhost:9093 --consumer.config consumer.properties --topic test --from-beginning" 8 | -------------------------------------------------------------------------------- /quotas/Client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER seknop@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.1/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install Confluent Kafka stack 11 | RUN yum install -y java-1.8.0-openjdk 12 | RUN yum install -y confluent-kafka-2.11 13 | 14 | CMD tail -f /dev/null 15 | -------------------------------------------------------------------------------- /quotas/Client/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.1/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.1/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.1 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.1/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /quotas/Grafana/provisioning/dashboards/one-quota.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | # provider name 5 | - name: 'prometheus' 6 | # org id. will default to orgId 1 if not specified 7 | orgId: 1 8 | # name of the dashboard folder. Required 9 | folder: '' 10 | # folder UID. will be automatically generated if not specified 11 | folderUid: '' 12 | # provider type. Required 13 | type: file 14 | # disable dashboard deletion 15 | disableDeletion: false 16 | # enable dashboard editing 17 | editable: true 18 | # how often Grafana will scan for changed dashboards 19 | updateIntervalSeconds: 10 20 | options: 21 | # path to dashboard files on disk. Required 22 | path: /etc/grafana/provisioning/dashboards 23 | -------------------------------------------------------------------------------- /quotas/JMX_Exporter/jmx_prometheus_javaagent-0.11.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/quotas/JMX_Exporter/jmx_prometheus_javaagent-0.11.0.jar -------------------------------------------------------------------------------- /quotas/JMX_Exporter/zookeeper_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssl: false 3 | lowercaseOutputName: true 4 | lowercaseOutputLabelNames: true 5 | rules: 6 | - pattern: "org.apache.ZooKeeperService<>(\\w+)" 7 | name: "cp_zookeeper_$2" 8 | -------------------------------------------------------------------------------- /quotas/Prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: 'kafka-monitor' 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: 'kafka-broker' 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ['kafka:5556'] 20 | 21 | # The job name is added as a label `job=` to any timeseries scraped from this config. 22 | - job_name: 'zookeeper' 23 | 24 | # Override the global default and scrape targets from this job every 5 seconds. 25 | scrape_interval: 5s 26 | 27 | static_configs: 28 | - targets: ['zookeeper:5556'] 29 | -------------------------------------------------------------------------------- /quotas/secrets/admin.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="kafka" \ 5 | password="kafka"; 6 | -------------------------------------------------------------------------------- /quotas/secrets/kafka_server_jaas.conf: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | org.apache.kafka.common.security.plain.PlainLoginModule required 3 | username="kafka" 4 | password="kafka" 5 | user_kafka="kafka" 6 | user_quota="quota-secret" 7 | user_noquota="noquota-secret"; 8 | }; 9 | -------------------------------------------------------------------------------- /quotas/secrets/noquota.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="noquota" \ 5 | password="noquota-secret"; 6 | 7 | -------------------------------------------------------------------------------- /quotas/secrets/quota.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="quota" \ 5 | password="quota-secret"; 6 | 7 | -------------------------------------------------------------------------------- /rbac/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /rbac/README.md: -------------------------------------------------------------------------------- 1 | # User hierarchy 2 | 3 | ## User names 4 | * Alice 5 | * Barnie 6 | * Charlie 7 | * Donald 8 | * Eva 9 | * Fritz 10 | * Greta 11 | 12 | ## Groups 13 | * ProjectA 14 | * ProjectB 15 | -------------------------------------------------------------------------------- /rbac/client-configs/alice.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="alice" \ 5 | password="alice-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/barnie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="barnie" \ 5 | password="barnie-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/charlie.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="charlie" \ 5 | password="charlie-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/copy-props.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | first=$1 3 | second=$2 4 | 5 | cp ${first}.properties ${second}.properties 6 | sed -i '' "s/${first}/${second}/g" ${second}.properties 7 | -------------------------------------------------------------------------------- /rbac/client-configs/donald.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="donald" \ 5 | password="donald-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/eva.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="eva" \ 5 | password="eva-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/fritz.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="fritz" \ 5 | password="fritz-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/client-configs/greta.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=PLAIN 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 4 | username="greta" \ 5 | password="greta-secret" ; 6 | -------------------------------------------------------------------------------- /rbac/conf/public.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtzZ/i+k9IkeYSGL72tAE 3 | 2YxRqbTEcmpx4/Hag1XbgcZeH5TXJpfOekpdfeV4uef21XKCuA1AWCjEiYlVCDIp 4 | EaUbIL4ecCORtWkKEiWd8S06xH+I7gMF77wPm3LrXKX9ciZSrVpKqhTyp1lJJeI5 5 | xYhXBmnmpfXaPEqtUV8YR72i2BOCKq/qix0MZD4qXtTKyYrjpJS8SyTZAIEpmkAI 6 | ddQeerTJuVqZFiACpaZNEZ+/bpe3G7pa0iHFpqn7njCXHDRA4H0S/7GrjtQV05F2 7 | ahaCRIAEAfLbMc1473ZQzniOvGmiOTekEYhWKt2XqpHmSb4sBXdzdOmctmkAJZNv 8 | VQIDAQAB 9 | -----END PUBLIC KEY----- 10 | -------------------------------------------------------------------------------- /rbac/functions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | retry() { 3 | local -r -i max_attempts="$1"; shift 4 | local -r -i sleep_interval="$1"; shift 5 | local -r cmd="$@" 6 | local -i attempt_num=1 7 | 8 | until $cmd 9 | do 10 | if (( attempt_num == max_attempts )) 11 | then 12 | echo "Failed after $attempt_num attempts" 13 | return 1 14 | else 15 | printf "." 16 | ((attempt_num++)) 17 | sleep $sleep_interval 18 | fi 19 | done 20 | printf "\n" 21 | } 22 | 23 | container_healthy() { 24 | local name=$1 25 | local container=$(docker-compose ps -q $1) 26 | local healthy=$(docker inspect --format '{{ .State.Health.Status }}' $container) 27 | if [ $healthy == healthy ] 28 | then 29 | printf "$1 is healthy" 30 | return 0 31 | else 32 | return 1 33 | fi 34 | } 35 | -------------------------------------------------------------------------------- /rbac/kafka-registered.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | zookeeper-shell $1 get /cluster/id 4 | version=$(zookeeper-shell $1 get /cluster/id 2> /dev/null | grep version) 5 | echo $version 6 | if [ $version ]; then 7 | exit 0 8 | else 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /rbac/ldap/custom/01_base.ldif: -------------------------------------------------------------------------------- 1 | dn: ou=users,dc=confluent,dc=io 2 | objectClass: organizationalUnit 3 | ou: Users 4 | 5 | dn: ou=groups,dc=confluent,dc=io 6 | objectClass: organizationalUnit 7 | ou: Groups 8 | 9 | -------------------------------------------------------------------------------- /rbac/ldap/custom/02_KafkaDevelopers.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: Kafka Developers 5 | gidNumber: 5000 6 | 7 | -------------------------------------------------------------------------------- /rbac/ldap/custom/03_ProjectA.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=ProjectA,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: ProjectA 5 | gidNumber: 5001 6 | 7 | -------------------------------------------------------------------------------- /rbac/ldap/custom/04_ProjectB.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=ProjectB,ou=groups,{{ LDAP_BASE_DN }} 2 | objectClass: top 3 | objectClass: posixGroup 4 | cn: ProjectB 5 | gidNumber: 5002 6 | 7 | -------------------------------------------------------------------------------- /rbac/ldap/custom/10_alice.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=alice,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: alice 6 | sn: LookingGlass 7 | givenName: Alice 8 | cn: alice 9 | displayName: Alice LookingGlass 10 | uidNumber: 10000 11 | gidNumber: 5000 12 | userPassword: alice-secret 13 | gecos: alice 14 | loginShell: /bin/bash 15 | homeDirectory: /home/alice 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/11_barnie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: barnie 6 | sn: Rubble 7 | givenName: Barnie 8 | cn: barnie 9 | displayName: Barnie Rubble 10 | uidNumber: 10001 11 | gidNumber: 5000 12 | userPassword: barnie-secret 13 | gecos: barnie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/barnie 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/12_charlie.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: charlie 6 | sn: Sheen 7 | givenName: Charlie 8 | cn: charlie 9 | displayName: Charlie Sheen 10 | uidNumber: 10002 11 | gidNumber: 5000 12 | userPassword: charlie-secret 13 | gecos: charlie 14 | loginShell: /bin/bash 15 | homeDirectory: /home/charlie 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/13_donald.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=donald,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: donald 6 | sn: Duck 7 | givenName: Donald 8 | cn: donald 9 | displayName: Donald Duck 10 | uidNumber: 10003 11 | gidNumber: 5000 12 | userPassword: donald-secret 13 | gecos: donald 14 | loginShell: /bin/bash 15 | homeDirectory: /home/donald 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/14_eva.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=eva,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: eva 6 | sn: Maria 7 | givenName: Eva 8 | cn: eva 9 | displayName: Eva Maria 10 | uidNumber: 10004 11 | gidNumber: 5000 12 | userPassword: eva-secret 13 | gecos: eva 14 | loginShell: /bin/bash 15 | homeDirectory: /home/eva 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/15_fritz.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=fritz,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: fritz 6 | sn: Walter 7 | givenName: Fritz 8 | cn: fritz 9 | displayName: Fritz Walter 10 | uidNumber: 10005 11 | gidNumber: 5000 12 | userPassword: fritz-secret 13 | gecos: fritz 14 | loginShell: /bin/bash 15 | homeDirectory: /home/fritz 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/16_greta.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=greta,ou=users,{{ LDAP_BASE_DN }} 2 | objectClass: inetOrgPerson 3 | objectClass: posixAccount 4 | objectClass: shadowAccount 5 | uid: greta 6 | sn: Thunberg 7 | givenName: Greta 8 | cn: greta 9 | displayName: Greta Thunberg 10 | uidNumber: 10006 11 | gidNumber: 5000 12 | userPassword: greta-secret 13 | gecos: greta 14 | loginShell: /bin/bash 15 | homeDirectory: /home/greta 16 | -------------------------------------------------------------------------------- /rbac/ldap/custom/20_group_add.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 2 | changetype: modify 3 | add: memberuid 4 | memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }} 5 | 6 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 7 | changetype: modify 8 | add: memberuid 9 | memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }} 10 | 11 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 12 | changetype: modify 13 | add: memberuid 14 | memberuid: cn=charlie,ou=users,{{ LDAP_BASE_DN }} 15 | 16 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 17 | changetype: modify 18 | add: memberuid 19 | memberuid: cn=eva,ou=users,{{ LDAP_BASE_DN }} 20 | 21 | dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }} 22 | changetype: modify 23 | add: memberuid 24 | memberuid: cn=fritz,ou=users,{{ LDAP_BASE_DN }} 25 | -------------------------------------------------------------------------------- /rbac/up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## start docker-compose up to and including kafka 4 | docker-compose up -d kafka 5 | docker-compose up -d tools 6 | 7 | # wait for kafka container to be healthy 8 | source ./functions.sh 9 | echo 10 | echo "Waiting for the broker to be healthy" 11 | retry 10 5 container_healthy kafka 12 | 13 | # Create the roles 14 | echo "Creating role bindings for principals" 15 | docker-compose exec tools bash -c "/tmp/create-role-bindings.sh" || exit 1 16 | 17 | ## start remaining services 18 | 19 | docker-compose up -d 20 | 21 | echo "Services should be up:" 22 | 23 | docker-compose ps 24 | 25 | echo "Example configuration:" 26 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth-and-ccloud/jaas_config.file: -------------------------------------------------------------------------------- 1 | Schema { 2 | org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required 3 | file="/tmp/password-file" 4 | debug="false"; 5 | }; 6 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth-and-ccloud/password-file: -------------------------------------------------------------------------------- 1 | read: OBF:1vgt1sar1saj1vg1,read 2 | write: OBF:1wnl1ym51unz1ym91wml,write 3 | admin: OBF:1u2a1toa1w8v1tok1u30,admin 4 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth-and-ccloud/up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker-compose up -d 4 | # TODO: An ugly sleep to remove with the confluent utility belt at some point 5 | sleep 5 6 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p read -o SUBJECT_READ 7 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p write -o SUBJECT_WRITE 8 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p admin -o '*' 9 | 10 | # Uncomment the below 2 lines for anonymous support 11 | #docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o GLOBAL_READ 12 | #docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o GLOBAL_COMPATIBILITY_READ 13 | #docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p 'ANONYMOUS' -o SUBJECT_READ 14 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth/.env: -------------------------------------------------------------------------------- 1 | ../../.env -------------------------------------------------------------------------------- /schema-registry/with-basic-auth/jaas_config.file: -------------------------------------------------------------------------------- 1 | Schema { 2 | org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required 3 | file="/tmp/password-file" 4 | debug="false"; 5 | }; 6 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth/password-file: -------------------------------------------------------------------------------- 1 | read: OBF:1vgt1sar1saj1vg1,read 2 | write: OBF:1wnl1ym51unz1ym91wml,write 3 | admin: OBF:1u2a1toa1w8v1tok1u30,admin 4 | -------------------------------------------------------------------------------- /schema-registry/with-basic-auth/up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker-compose up -d 4 | 5 | docker-compose logs schema-registry | grep "Server started, listening for requests" 6 | while (( $? == 1 )) 7 | do 8 | sleep 1 9 | echo "Waiting for schema registry to be started ..." 10 | docker-compose logs schema-registry | grep "Server started, listening for requests" 11 | done 12 | 13 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p read -o SUBJECT_READ 14 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p write -o SUBJECT_WRITE 15 | docker-compose exec schema-registry sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p admin -o '*' 16 | 17 | echo "Schema Registry is listening on http://localhost:8089" 18 | echo "-> user:password | description" 19 | echo "-> _____________" 20 | echo "-> read:read | Global read access" 21 | echo "-> write:write | Global write access" 22 | echo "-> admin:admin | Global admin access" 23 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/.env: -------------------------------------------------------------------------------- 1 | ../../.env -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/.gitignore: -------------------------------------------------------------------------------- 1 | schema-registry/secrets/client*pem 2 | schema-registry/secrets/client.p12 3 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/config/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = DE 18 | organizationName = Confluent 19 | localityName = Berlin 20 | commonName = schema-registry.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/config/client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=schema-registry.client 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=localhost 30 | -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.keystore -------------------------------------------------------------------------------- /schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/schema-registry/with-http_and_https/schema-registry/secrets/schema-registry.truststore -------------------------------------------------------------------------------- /scram/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /scram/admin.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="admin" \ 5 | password="admin-secret"; 6 | 7 | -------------------------------------------------------------------------------- /scram/consumer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="consumer" \ 5 | password="consumer-secret"; 6 | 7 | -------------------------------------------------------------------------------- /scram/jline-2.14.6.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/scram/jline-2.14.6.jar -------------------------------------------------------------------------------- /scram/kafka.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Client { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | username="admin" 4 | password="password"; 5 | }; 6 | -------------------------------------------------------------------------------- /scram/producer.properties: -------------------------------------------------------------------------------- 1 | sasl.mechanism=SCRAM-SHA-256 2 | security.protocol=SASL_PLAINTEXT 3 | sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \ 4 | username="producer" \ 5 | password="producer-secret"; 6 | 7 | -------------------------------------------------------------------------------- /scram/zookeeper.sasl.jaas.config: -------------------------------------------------------------------------------- 1 | Server { 2 | org.apache.zookeeper.server.auth.DigestLoginModule required 3 | user_admin="password"; 4 | }; 5 | -------------------------------------------------------------------------------- /secure-jmx/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | 4 | zookeeper: 5 | build: zookeeper/ 6 | container_name: zookeeper 7 | hostname: zookeeper 8 | volumes: 9 | - ./secrets/:/var/ssl/private 10 | environment: 11 | KAFKA_JMX_OPTS: " -Dcom.sun.management.config.file=/var/ssl/private/jmxremote.properties" 12 | 13 | kafka: 14 | build: kafka/ 15 | container_name: kafka 16 | depends_on: 17 | - zookeeper 18 | volumes: 19 | - ./secrets/:/var/ssl/private 20 | environment: 21 | KAFKA_JMX_OPTS: "-Dcom.sun.management.config.file=/var/ssl/private/jmxremote.properties" 22 | #KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote.port=9999 -Dcom.sun.management.jmxremote.rmi.port=9999 -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false" 23 | -------------------------------------------------------------------------------- /secure-jmx/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER pere.urbon@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY consumer.properties /etc/kafka/consumer.properties 17 | 18 | EXPOSE 9093 19 | EXPOSE 9999 20 | 21 | CMD kafka-server-start /etc/kafka/server.properties 22 | -------------------------------------------------------------------------------- /secure-jmx/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /secure-jmx/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /secure-jmx/pull-jmx-kafka.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | MY_KAFKA_OPTS="-Djavax.net.ssl.keyStore=/var/ssl/private/kafka.keystore -Djavax.net.ssl.keyStorePassword=confluent -Djavax.net.ssl.trustStore=/var/ssl/private/kafka.truststore -Djavax.net.ssl.trustStorePassword=confluent" 4 | 5 | docker-compose exec -e KAFKA_JMX_OPTS="" -e KAFKA_OPTS="$MY_KAFKA_OPTS" kafka kafka-run-class kafka.tools.JmxTool \ 6 | --object-name kafka.server:type=BrokerTopicMetrics,name=MessagesInPerSec \ 7 | --jmx-ssl-enable true --jmx-auth-prop admin=adminpassword 8 | -------------------------------------------------------------------------------- /secure-jmx/pull-jmx-zookeeper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | MY_KAFKA_OPTS="-Djavax.net.ssl.keyStore=/var/ssl/private/kafka.keystore -Djavax.net.ssl.keyStorePassword=confluent -Djavax.net.ssl.trustStore=/var/ssl/private/kafka.truststore -Djavax.net.ssl.trustStorePassword=confluent" 4 | 5 | docker-compose exec -e KAFKA_JMX_OPTS="" -e KAFKA_OPTS="$MY_KAFKA_OPTS" zookeeper kafka-run-class kafka.tools.JmxTool \ 6 | --object-name org.apache.ZooKeeperService:name0=StandaloneServer_port2181 \ 7 | --jmx-ssl-enable true --jmx-auth-prop admin=adminpassword 8 | 9 | 10 | #get -s -b org.apache.ZooKeeperService:name0=StandaloneServer_port2181 AvgRequestLatency 11 | -------------------------------------------------------------------------------- /secure-jmx/secrets/client.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/secure-jmx/secrets/client.keystore -------------------------------------------------------------------------------- /secure-jmx/secrets/client.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/secure-jmx/secrets/client.truststore -------------------------------------------------------------------------------- /secure-jmx/secrets/jmxremote.access: -------------------------------------------------------------------------------- 1 | admin readwrite 2 | user readonly 3 | -------------------------------------------------------------------------------- /secure-jmx/secrets/jmxremote.password: -------------------------------------------------------------------------------- 1 | admin adminpassword 2 | user userpassword 3 | -------------------------------------------------------------------------------- /secure-jmx/secrets/jmxremote.properties: -------------------------------------------------------------------------------- 1 | com.sun.management.jmxremote=true 2 | com.sun.management.jmxremote.port=9999 3 | com.sun.management.jmxremote.rmi.port=9999 4 | com.sun.management.jmxremote.password.file=/var/ssl/private/jmxremote.password 5 | com.sun.management.jmxremote.access.file=/var/ssl/private/jmxremote.access 6 | com.sun.management.jmxremote.registry.ssl=true 7 | com.sun.management.jmxremote.ssl.config.file=/var/ssl/private/jmxremote.properties 8 | 9 | javax.net.ssl.keyStore=/var/ssl/private/kafka.keystore 10 | javax.net.ssl.keyStorePassword=confluent 11 | javax.net.ssl.trustStore=/var/ssl/private/kafka.truststore 12 | javax.net.ssl.trustStorePassword=confluent 13 | -------------------------------------------------------------------------------- /secure-jmx/secrets/kafka.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/secure-jmx/secrets/kafka.keystore -------------------------------------------------------------------------------- /secure-jmx/secrets/kafka.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/secure-jmx/secrets/kafka.truststore -------------------------------------------------------------------------------- /secure-jmx/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | docker-compose up -d --build 4 | 5 | # Creating the user kafka 6 | # kafka is configured as a super user, no need for additional ACL 7 | # docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-255=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka 8 | 9 | echo 10 | echo "Example jmx pulling: ./pull-jmx.sh" 11 | echo 12 | echo "other tools useful to check this are any JMX consumer like jconsole or others." 13 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | MAINTAINER pere.urbon@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform-2.12 13 | 14 | # 3. Configure Kafka and zookeeper for Kerberos 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | 17 | EXPOSE 2181 18 | EXPOSE 9998 19 | 20 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 21 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.5/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.5 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.5/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/jmxremote.access: -------------------------------------------------------------------------------- 1 | admin readwrite 2 | user readonly 3 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/jmxremote.password: -------------------------------------------------------------------------------- 1 | admin adminpassword 2 | user userpassword 3 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/jmxremote.properties: -------------------------------------------------------------------------------- 1 | com.sun.management.jmxremote=true 2 | com.sun.management.jmxremote.port=9998 3 | com.sun.management.jmxremote.rmi.port=9998 4 | com.sun.management.jmxremote.password.file=/var/ssl/private/jmxremote.password 5 | com.sun.management.jmxremote.access.file=/var/ssl/private/jmxremote.access 6 | com.sun.management.jmxremote.registry.ssl=true 7 | com.sun.management.jmxremote.ssl.config.file=/var/ssl/private/jmxremote.properties 8 | 9 | javax.net.ssl.keyStore=/var/ssl/private/kafka.keystore 10 | javax.net.ssl.keyStorePassword=confluent 11 | javax.net.ssl.trustStore=/var/ssl/private/kafka.truststore 12 | javax.net.ssl.trustStorePassword=confluent 13 | -------------------------------------------------------------------------------- /secure-jmx/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # the directory where the snapshot is stored. 16 | dataDir=/var/lib/zookeeper 17 | # the port at which the clients will connect 18 | clientPort=2181 19 | # disable the per-ip limit on the number of connections since this is a non-production config 20 | maxClientCnxns=0 21 | -------------------------------------------------------------------------------- /tls-with-ocrl/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/tls-with-ocrl/.gitignore -------------------------------------------------------------------------------- /tls-with-ocrl/certs/broker.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/tls-with-ocrl/certs/broker.keystore -------------------------------------------------------------------------------- /tls-with-ocrl/certs/broker.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/tls-with-ocrl/certs/broker.truststore -------------------------------------------------------------------------------- /tls-with-ocrl/certs/client.keystore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/tls-with-ocrl/certs/client.keystore -------------------------------------------------------------------------------- /tls-with-ocrl/certs/client.truststore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/tls-with-ocrl/certs/client.truststore -------------------------------------------------------------------------------- /tls-with-ocrl/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | zookeeper: 4 | build: zookeeper/ 5 | container_name: zookeeper 6 | hostname: zookeeper 7 | domainname: confluent.local 8 | restart: on-failure 9 | networks: 10 | default: 11 | aliases: 12 | - zookeeper.confluent.local 13 | 14 | kafka: 15 | build: kafka/ 16 | container_name: kafka 17 | hostname: kafka 18 | domainname: confluent.local 19 | depends_on: 20 | - zookeeper 21 | restart: on-failure 22 | environment: 23 | - KAFKA_OPTS=-Dcom.sun.security.enableCRLDP=true -Dcom.sun.net.ssl.checkRevocation=true 24 | # - KAFKA_OPTS=-Djavax.net.debug=all -Djava.security.debug=all 25 | volumes: 26 | - ./certs/:/var/lib/secret 27 | networks: 28 | default: 29 | aliases: 30 | - kafka.confluent.local 31 | ports: 32 | - "9093:9093" 33 | 34 | apache: 35 | image: 'httpd:2.4' 36 | container_name: httpd 37 | hostname: httpd 38 | ports: 39 | - "18080:80" 40 | volumes: 41 | - ./web/:/usr/local/apache2/htdocs/ 42 | 43 | volumes: 44 | secret: {} 45 | 46 | networks: 47 | default: 48 | -------------------------------------------------------------------------------- /tls-with-ocrl/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-1.8.0-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | #schema-registry package is rquiterd to run kafka-avro-console-producer 18 | RUN yum install -y confluent-schema-registry 19 | 20 | # 3. Configure Kafka 21 | COPY server.properties /etc/kafka/server.properties 22 | COPY consumer.properties /etc/kafka/consumer.properties 23 | 24 | EXPOSE 9093 25 | 26 | CMD kafka-server-start /etc/kafka/server.properties 27 | -------------------------------------------------------------------------------- /tls-with-ocrl/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls-with-ocrl/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka.conflent.local:9093 2 | security.protocol=SSL 3 | ssl.truststore.location=/var/lib/secret/client.truststore 4 | ssl.truststore.password=confluent 5 | ssl.keystore.location=/var/lib/secret/client.keystore 6 | ssl.keystore.password=confluent 7 | -------------------------------------------------------------------------------- /tls-with-ocrl/kafka/server.properties: -------------------------------------------------------------------------------- 1 | broker.id=0 2 | listeners=SSL://kafka.confluent.local:9093,PLAINTEXT://kafka.confluent.local:9092 3 | advertised.listeners=SSL://kafka.confluent.local:9093,PLAINTEXT://kafka.confluent.local:9092 4 | log.dirs=/var/lib/kafka 5 | offsets.topic.replication.factor=1 6 | transaction.state.log.replication.factor=1 7 | transaction.state.log.min.isr=1 8 | zookeeper.connect=zookeeper.confluent.local:2181 9 | 10 | # TLS Configuration 11 | security.inter.broker.protocol=SSL 12 | ssl.truststore.location=/var/lib/secret/broker.truststore 13 | ssl.truststore.password=confluent 14 | ssl.keystore.location=/var/lib/secret/broker.keystore 15 | ssl.keystore.password=confluent 16 | ssl.client.auth=required 17 | authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 18 | super.users=User:CN=kafka.confluent.local,L=London,O=Confluent,C=UK;User:CN=schema-registry.confluent.local,L=London,O=Confluent,C=UK;User:CN=kafka.confluent.local,O=Confluent Ltd,L=Berlin,ST=Berlin,C=DE;User:CN=producer1,O=Confluent Ltd,L=Berlin,ST=Berlin,C=DE 19 | -------------------------------------------------------------------------------- /tls-with-ocrl/up: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Starting docker-compose services 5 | docker-compose up -d --build 6 | 7 | echo "Example configuration to access kafka:" 8 | echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka.confluent.local:9093 --topic test --producer.config /etc/kafka/consumer.properties" 9 | echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka.confluent.local:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning" 10 | -------------------------------------------------------------------------------- /tls-with-ocrl/web/crls.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN X509 CRL----- 2 | MIIDDzCB+AIBATANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJERTEPMA0GA1UE 3 | CAwGQmVybGluMRYwFAYDVQQKDA1Db25mbHVlbnQgTHRkMQswCQYDVQQLDAJQUzEY 4 | MBYGA1UEAwwPSW50ZXJtZWRpYXRlIENBMR4wHAYJKoZIhvcNAQkBFg9jYUBjb25m 5 | bHVlbnQuaW8XDTE5MDgzMDE0MDA0NFoXDTE5MDkyOTE0MDA0NFowFTATAgIQAhcN 6 | MTkwODMwMTQwMDMyWqAwMC4wHwYDVR0jBBgwFoAUuEd/Mi/LdUwtRm8Sj4orD55j 7 | TPcwCwYDVR0UBAQCAhAAMA0GCSqGSIb3DQEBCwUAA4ICAQC9wIZkWRf4i52FYeYR 8 | hlvV1Z+DzGMMcg+wPhDxdTHWieA4eJZVDbOpY8P7nM+voU/+QYsF3oTW1lrJV2aO 9 | dTebeLG1t3/40IzvkG70aRgFe199gLj3+ke6UZzrVzD8KqY+pci94uYcwZgr1nxD 10 | SXyD8WffuYRJ9hf5huInZRnp34ECnSTX7gTh2oaoV4SLI1CXKXB62i9OMShOfcQj 11 | 0Uc4DAE5BgZe9uUx2tLeA3vDLCdcrQrPMjy2j536V2U4KyvdY9IiblMvqt2Y0FmU 12 | cxdVL5mo+LUAt3b1fSoOcypxqdlAydxlMBVg8ZDYfw/l44KLA2v3yguKUhtjvYCa 13 | rL24TyltI1I2PYSZJ8pObg+MC9pwjwsSQG2bQOr5scAU4FukFVao2Stc6JSHj5Ng 14 | J/5ExpKpT6k2GY+OU1FZDD0Jku1IZXtyTBYpr3ynxtD2aEgO2Iveh/w5eLT5FDIR 15 | uTRPQBCfwAKFNulv8aDT8BtSYl1Xj5xlG0h1ROyFyfbqcns/Zf3MKcZpTw7MF9xk 16 | /2SzbsinhNBk2vi0WbhA8zCP1+P/rgLCWlDUJbagXzaeWEL3VOgDaqMJ3ks0ruTy 17 | tBBX+kdOoKrG7notMTivsqwsYA4/ZYXhDupA106Z4/1h4zJFVDWpoKnKRLD3WryI 18 | FR/OjytC6XgcNAgGw3Gff8hEcw== 19 | -----END X509 CRL----- 20 | -------------------------------------------------------------------------------- /tls-with-ocrl/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos8 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 0. Fixing Mirror list for Centos 6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-* 7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-* 8 | 9 | # 1. Adding Confluent repository 10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key 11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 12 | RUN yum clean all 13 | 14 | # 2. Install zookeeper and kafka 15 | RUN yum install -y java-1.8.0-openjdk 16 | RUN yum install -y confluent-platform-2.12 17 | 18 | # 3. Configure zookeeper 19 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 20 | 21 | EXPOSE 2181 22 | 23 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 24 | -------------------------------------------------------------------------------- /tls-with-ocrl/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/5.4/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/5.4 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls-with-ocrl/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | -------------------------------------------------------------------------------- /tls/.gitignore: -------------------------------------------------------------------------------- 1 | certs 2 | -------------------------------------------------------------------------------- /tls/ca.cnf: -------------------------------------------------------------------------------- 1 | [ policy_match ] 2 | countryName = match 3 | stateOrProvinceName = match 4 | organizationName = match 5 | organizationalUnitName = optional 6 | commonName = supplied 7 | emailAddress = optional 8 | 9 | [ req ] 10 | prompt = no 11 | distinguished_name = dn 12 | default_md = sha256 13 | default_bits = 4096 14 | x509_extensions = v3_ca 15 | 16 | [ dn ] 17 | countryName = UK 18 | organizationName = Confluent 19 | localityName = London 20 | commonName = kafka.confluent.local 21 | 22 | [ v3_ca ] 23 | subjectKeyIdentifier=hash 24 | basicConstraints = critical,CA:true 25 | authorityKeyIdentifier=keyid:always,issuer:always 26 | keyUsage = critical,keyCertSign,cRLSign 27 | -------------------------------------------------------------------------------- /tls/client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=kafka.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=kafka.confluent.local 30 | -------------------------------------------------------------------------------- /tls/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-server 13 | 14 | # 3. Configure Kafka 15 | COPY server.properties /etc/kafka/server.properties 16 | COPY consumer.properties /etc/kafka/consumer.properties 17 | 18 | # 4. Add kafkacat 19 | COPY kafkacat /usr/local/bin 20 | RUN chmod +x /usr/local/bin/kafkacat 21 | COPY kafkacat.conf /etc/kafka/kafkacat.conf 22 | 23 | EXPOSE 9093 24 | 25 | CMD kafka-server-start /etc/kafka/server.properties 26 | -------------------------------------------------------------------------------- /tls/kafka/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls/kafka/consumer.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=kafka.conflent.local:9093 2 | security.protocol=SSL 3 | ssl.truststore.location=/var/lib/secret/truststore.jks 4 | ssl.truststore.password=test1234 5 | ssl.keystore.location=/var/lib/secret/client.keystore.jks 6 | ssl.keystore.password=test1234 7 | -------------------------------------------------------------------------------- /tls/kafka/kafkacat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-security-playbook/24ee035b30cc38a4a4f00012013b0885e0ccfe1d/tls/kafka/kafkacat -------------------------------------------------------------------------------- /tls/kafka/kafkacat.conf: -------------------------------------------------------------------------------- 1 | security.protocol=SSL 2 | ssl.key.location=/var/lib/secret/client.pem 3 | ssl.key.password=test1234 4 | ssl.certificate.location=/var/lib/secret/client.pem 5 | ssl.ca.location=/var/lib/secret/ca.pem 6 | -------------------------------------------------------------------------------- /tls/kafkacat.conf: -------------------------------------------------------------------------------- 1 | security.protocol=SSL 2 | ssl.key.location=certs/client.pem 3 | ssl.key.password=test1234 4 | ssl.certificate.location=certs/client.pem 5 | ssl.ca.location=certs/ca.pem 6 | -------------------------------------------------------------------------------- /tls/local-client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=Kiril-Piskunov.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=Kiril-Piskunov.local 30 | -------------------------------------------------------------------------------- /tls/schema-registry-client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=schema-registry.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = clientAuth, serverAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=schema-registry.confluent.local 30 | -------------------------------------------------------------------------------- /tls/schema-registry/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-schema-registry confluent-security 13 | 14 | # 3. Configure Kafka 15 | COPY schema-registry.properties /etc/schema-registry/schema-registry.properties 16 | 17 | EXPOSE 8443 18 | 19 | CMD schema-registry-start /etc/schema-registry/schema-registry.properties 20 | -------------------------------------------------------------------------------- /tls/schema-registry/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls/schema-registry/schema-registry.properties: -------------------------------------------------------------------------------- 1 | listeners=https://schema-registry.confluent.local:8443 2 | inter.instance.protocol=https 3 | ssl.keystore.location=/var/lib/secret/schema-registry-client.keystore.jks 4 | ssl.keystore.password=test1234 5 | ssl.key.password=test1234 6 | kafkastore.topic=_schemas 7 | debug=false 8 | 9 | #SSL settings for communication with Kafka Broker 10 | kafkastore.bootstrap.servers=SSL://kafka.confluent.local:9093 11 | kafkastore.security.protocol=SSL 12 | 13 | #SSL trust store to verify cert presented by the broker 14 | kafkastore.ssl.truststore.location=/var/lib/secret/truststore.jks 15 | kafkastore.ssl.truststore.password=test1234 16 | 17 | #SSL key store to provide a cert for the broker 18 | kafkastore.ssl.keystore.location=/var/lib/secret/schema-registry-client.keystore.jks 19 | kafkastore.ssl.keystore.password=test1234 20 | kafkastore.ssl.key.password=test1234 21 | -------------------------------------------------------------------------------- /tls/server.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | prompt = no 3 | distinguished_name = dn 4 | default_md = sha256 5 | default_bits = 4096 6 | req_extensions = v3_req 7 | 8 | [ dn ] 9 | countryName = UK 10 | organizationName = Confluent 11 | localityName = London 12 | commonName=kafka.confluent.local 13 | 14 | [ v3_ca ] 15 | subjectKeyIdentifier=hash 16 | basicConstraints = critical,CA:true 17 | authorityKeyIdentifier=keyid:always,issuer:always 18 | keyUsage = critical,keyCertSign,cRLSign 19 | 20 | [ v3_req ] 21 | subjectKeyIdentifier = hash 22 | basicConstraints = CA:FALSE 23 | nsComment = "OpenSSL Generated Certificate" 24 | keyUsage = critical, digitalSignature, keyEncipherment 25 | extendedKeyUsage = serverAuth, clientAuth 26 | subjectAltName = @alt_names 27 | 28 | [ alt_names ] 29 | DNS.1=kafka.confluent.local 30 | -------------------------------------------------------------------------------- /tls/zookeeper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER d.gasparina@gmail.com 3 | ENV container docker 4 | 5 | # 1. Adding Confluent repository 6 | RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key 7 | COPY confluent.repo /etc/yum.repos.d/confluent.repo 8 | RUN yum clean all 9 | 10 | # 2. Install zookeeper and kafka 11 | RUN yum install -y java-11-openjdk 12 | RUN yum install -y confluent-platform 13 | 14 | # 3. Configure zookeeper 15 | COPY zookeeper.properties /etc/kafka/zookeeper.properties 16 | 17 | EXPOSE 2181 18 | 19 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties 20 | -------------------------------------------------------------------------------- /tls/zookeeper/confluent.repo: -------------------------------------------------------------------------------- 1 | [Confluent.dist] 2 | name=Confluent repository (dist) 3 | baseurl=https://packages.confluent.io/rpm/6.0/7 4 | gpgcheck=1 5 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 6 | enabled=1 7 | 8 | [Confluent] 9 | name=Confluent repository 10 | baseurl=https://packages.confluent.io/rpm/6.0 11 | gpgcheck=1 12 | gpgkey=https://packages.confluent.io/rpm/6.0/archive.key 13 | enabled=1 14 | -------------------------------------------------------------------------------- /tls/zookeeper/zookeeper.properties: -------------------------------------------------------------------------------- 1 | dataDir=/var/lib/zookeeper 2 | clientPort=2181 3 | maxClientCnxns=0 4 | --------------------------------------------------------------------------------