├── images ├── process.png └── app-architecture.png ├── kafka-streams-source-cluster.properties ├── .gitignore ├── prometheus ├── jmx_prometheus_javaagent-0.15.0.jar └── kafka-streams-prometheus.yaml ├── connect ├── jars │ └── replicator-rest-extension-6.1.0.jar ├── Dockerfile ├── file-source-connector.json ├── file-sink-connector.json └── replicator.json ├── msk-migration ├── configmaps │ ├── msk.properties │ └── cloud.properties ├── Dockerfile ├── app-pod.yaml ├── migrate ├── connectors │ └── replicator.json ├── up └── values.yaml ├── kafka-streams-app ├── Dockerfile ├── src │ └── main │ │ ├── resources │ │ └── log4j.properties │ │ └── java │ │ └── org │ │ └── example │ │ └── WordCount.java └── pom.xml ├── kafka-streams-cloud-cluster.properties ├── up ├── README.md ├── migrate └── docker-compose.yml /images/process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-streams-migration/HEAD/images/process.png -------------------------------------------------------------------------------- /kafka-streams-source-cluster.properties: -------------------------------------------------------------------------------- 1 | application.id=streams-wordcount 2 | bootstrap.servers=broker:29092 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **.idea 2 | **.git 3 | **target/ 4 | **/*.iml 5 | data/sink.txt 6 | data/hsperfdata_appuser 7 | .ccloud 8 | -------------------------------------------------------------------------------- /images/app-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-streams-migration/HEAD/images/app-architecture.png -------------------------------------------------------------------------------- /prometheus/jmx_prometheus_javaagent-0.15.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-streams-migration/HEAD/prometheus/jmx_prometheus_javaagent-0.15.0.jar -------------------------------------------------------------------------------- /connect/jars/replicator-rest-extension-6.1.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Dabz/kafka-streams-migration/HEAD/connect/jars/replicator-rest-extension-6.1.0.jar -------------------------------------------------------------------------------- /connect/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM confluentinc/cp-server-connect-base:7.0.0 2 | 3 | ENV CONNECT_PLUGIN_PATH="/usr/share/java,/usr/share/confluent-hub-components,/usr/share/filestream-connectors" 4 | 5 | RUN confluent-hub install --no-prompt confluentinc/kafka-connect-replicator:7.0.0 6 | -------------------------------------------------------------------------------- /msk-migration/configmaps/msk.properties: -------------------------------------------------------------------------------- 1 | bootstrap.servers=b-3.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092,b-1.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092,b-2.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092 2 | application.id=app-migration 3 | -------------------------------------------------------------------------------- /kafka-streams-app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM maven:3.6.3-jdk-8 2 | COPY ./ ./ 3 | RUN mvn clean package 4 | CMD ["java", "-javaagent:/tmp/prometheus/jmx_prometheus_javaagent-0.15.0.jar=2112:/tmp/prometheus/kafka-streams-prometheus.yaml", "-jar", "target/kafka-streams-app-1.0-SNAPSHOT-jar-with-dependencies.jar", "/tmp/conf.properties"] 5 | -------------------------------------------------------------------------------- /msk-migration/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM confluentinc/cp-server-connect-base:7.0.1 2 | USER root 3 | RUN confluent-hub install confluentinc/kafka-connect-replicator:7.0.1 --no-prompt 4 | RUN cp /usr/share/confluent-hub-components/confluentinc-kafka-connect-replicator/lib/replicator-rest-extension-7.0.1.jar /usr/share/java/kafka/ 5 | USER appuser 6 | -------------------------------------------------------------------------------- /connect/file-source-connector.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "connector-source", 3 | "config": { 4 | "connector.class" : "FileStreamSourceConnector", 5 | "tasks.max" : 1, 6 | "topic" : "streams-plaintext-input", 7 | "file" : "/tmp/source.txt", 8 | "key.converter": "org.apache.kafka.connect.storage.StringConverter", 9 | "value.converter": "org.apache.kafka.connect.storage.StringConverter" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /kafka-streams-cloud-cluster.properties: -------------------------------------------------------------------------------- 1 | application.id=cloud-wordcount 2 | bootstrap.servers=CCLOUD_CLUSTER 3 | security.protocol=SASL_SSL 4 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='CLUSTER_API_KEY' password='CLUSTER_API_SECRET'; 5 | sasl.mechanism=PLAIN 6 | # Required for correctness in Apache Kafka clients prior to 2.6 7 | client.dns.lookup=use_all_dns_ips 8 | producer.acks=all 9 | auto.offset.reset=latest 10 | -------------------------------------------------------------------------------- /msk-migration/configmaps/cloud.properties: -------------------------------------------------------------------------------- 1 | application.id=app-migration 2 | bootstrap.servers=pkc-4r297.europe-west1.gcp.confluent.cloud:9092 3 | key.converter=org.apache.kafka.connect.storage.StringConverter 4 | value.converter=io.confluent.connect.avro.AvroConverter 5 | ssl.endpoint.identification.algorithm=https 6 | security.protocol=SASL_SSL 7 | sasl.mechanism=PLAIN 8 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="CLUSTER_API_KEY" password="CLUSTER_API_SECRET"; 9 | request.timeout.ms=20000 10 | 11 | -------------------------------------------------------------------------------- /connect/file-sink-connector.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "connector-sink", 3 | "config": { 4 | "connector.class" : "org.apache.kafka.connect.file.FileStreamSinkConnector", 5 | "tasks.max" : 1, 6 | "topics" : "streams-plaintext-output", 7 | "file" : "/tmp/sink.txt", 8 | "key.converter": "org.apache.kafka.connect.storage.StringConverter", 9 | "value.converter": "org.apache.kafka.connect.storage.StringConverter", 10 | "consumer.override.isolation.level": "read_committed", 11 | "consumer.override.auto.offset.reset": "latest" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /kafka-streams-app/src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | log4j.rootLogger=INFO, console 16 | 17 | log4j.appender.console=org.apache.log4j.ConsoleAppender 18 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 19 | log4j.appender.console.layout.ConversionPattern=%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n -------------------------------------------------------------------------------- /msk-migration/app-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: app 5 | spec: 6 | containers: 7 | - name: app 8 | image: app:latest 9 | imagePullPolicy: IfNotPresent 10 | command: ["java", "-javaagent:/tmp/prometheus/jmx_prometheus_javaagent-0.15.0.jar=2112:/tmp/prometheus/kafka-streams-prometheus.yaml", "-jar", "target/kafka-streams-app-1.0-SNAPSHOT-jar-with-dependencies.jar", "/tmp/conf/msk.properties"] 11 | volumeMounts: 12 | - name: app-config 13 | mountPath: /tmp/conf 14 | - name: app-prometheus 15 | mountPath: /tmp/prometheus 16 | env: 17 | - name: MSK_BOOTSTRAP_SERVERS 18 | value: b-3.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092,b-1.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092,b-2.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092 19 | - name: CLUSTER_TYPE 20 | value: MSK 21 | volumes: 22 | - name: app-config 23 | configMap: 24 | name: msk-app-config 25 | - name: app-prometheus 26 | configMap: 27 | name: app-prometheus 28 | 29 | -------------------------------------------------------------------------------- /msk-migration/migrate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | require_command() { 4 | for var in "$@"; do 5 | if which $var 2>&1 > /dev/null; then 6 | continue 7 | fi 8 | echo "$var is missing" 9 | exit 1 10 | done 11 | 12 | } 13 | 14 | require_env() { 15 | for var in "$@"; do 16 | if printenv $var 2>&1 > /dev/null; then 17 | continue 18 | fi 19 | echo "$var env variable is missing" 20 | exit 1 21 | done 22 | 23 | } 24 | 25 | require_command kubectl curl docker 26 | require_env CCLOUD_JAAS CCLOUD_BOOTSTRAP_SERVER 27 | 28 | echo Waiting for Replicator to finish replicator 29 | until [[ `kubectl exec -it deploy/migration-cp-kafka-connect -c cp-kafka-connect-server -- curl -s -f http://localhost:8084/WorkerMetrics/replicator | jq '.tasks[].metrics[].messageLag' | awk '{sum=sum+$0} END{print sum}'` -eq '0' ]]; do 30 | echo -n . 31 | sleep 5 32 | done 33 | echo 34 | 35 | echo Stop Kafka Streams 36 | kubectl delete pod app 37 | 38 | echo Stop replicator 39 | kubectl exec -it deploy/migration-cp-kafka-connect -c cp-kafka-connect-server -- curl -s -f -X DELETE localhost:8084/connectors/replicator 40 | 41 | echo Start Kafka Streams on CCLoud 42 | kubectl delete pod app 43 | -------------------------------------------------------------------------------- /up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # To clean up previous state 6 | docker-compose down -v 7 | 8 | docker-compose up -d zookeeper 9 | docker-compose up -d broker 10 | docker-compose up -d connect 11 | 12 | echo Waiting for Kafka Connect to be up and running 13 | until curl -s -f -X 'GET' 'http://localhost:8083/' ; do 14 | echo -n "." 15 | sleep 5 16 | done 17 | echo 18 | 19 | echo Starting with hello world in data/source.txt 20 | echo "Hello world" > data/source.txt 21 | cat data/source.txt 22 | echo "" > data/sink.txt 23 | 24 | echo Creating file sink connector reading streams-wordcount-output topic and writing to data/sink.txt 25 | curl -s -f -X 'POST' 'http://localhost:8083/connectors/' --data @connect/file-sink-connector.json -H 'Content-Type:application/json' | jq . 26 | 27 | echo Creating file source connector reading from data/source.txt and sending lines to streams-plaintext-input 28 | curl -s -f -X 'POST' 'http://localhost:8083/connectors/' --data @connect/file-source-connector.json -H 'Content-Type:application/json' | jq . 29 | 30 | echo Launching EOS based kafka streams with states stores that counts words 31 | docker-compose up -d kafka-streams 32 | sleep 10 33 | 34 | echo Application will produce EOS results of counts 35 | cat data/sink.txt 36 | 37 | echo Sending World in data/source.txt 38 | echo World >> data/source.txt 39 | echo Waiting for processing 40 | sleep 2 41 | echo Let\'s look at the updates in data/sink.txt 42 | cat data/sink.txt 43 | -------------------------------------------------------------------------------- /connect/replicator.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "replicator", 3 | "config": { 4 | "connector.class": "io.confluent.connect.replicator.ReplicatorSourceConnector", 5 | "key.converter": "io.confluent.connect.replicator.util.ByteArrayConverter", 6 | "value.converter": "io.confluent.connect.replicator.util.ByteArrayConverter", 7 | "src.kafka.request.timeout.ms": "20000", 8 | "src.kafka.bootstrap.servers": "broker:29092", 9 | "src.kafka.isolation.level": "read_committed", 10 | "dest.kafka.ssl.endpoint.identification.algorithm": "https", 11 | "dest.kafka.sasl.mechanism": "PLAIN", 12 | "dest.kafka.request.timeout.ms": "20000", 13 | "dest.kafka.bootstrap.servers": "CCLOUD_CLUSTER", 14 | "dest.kafka.retry.backoff.ms": "500", 15 | "dest.kafka.sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"CLUSTER_API_KEY\" password=\"CLUSTER_API_SECRET\";", 16 | "dest.kafka.security.protocol": "SASL_SSL", 17 | "dest.topic.replication.factor": "3", 18 | "topic.regex": "streams-.*", 19 | "topic.blacklist": "streams-wordcount-counts-store-repartition", 20 | "offset.topic.commit": true, 21 | "transforms":"renamePrefix", 22 | "transforms.renamePrefix.type":"org.apache.kafka.connect.transforms.RegexRouter", 23 | "transforms.renamePrefix.regex":"streams-wordcount-(.*)", 24 | "transforms.renamePrefix.replacement":"cloud-wordcount-$1", 25 | "topic.preserve.partitions": true, 26 | "topic.auto.create": false 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /msk-migration/connectors/replicator.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "replicator", 3 | "connector.class": "io.confluent.connect.replicator.ReplicatorSourceConnector", 4 | "dest.kafka.sasl.mechanism": "PLAIN", 5 | "dest.kafka.ssl.endpoint.identification.algorithm": "https", 6 | "dest.kafka.security.protocol": "SASL_SSL", 7 | "dest.topic.replication.factor": "3", 8 | "dest.kafka.retry.backoff.ms": "500", 9 | "dest.kafka.request.timeout.ms": "20000", 10 | "dest.bootstrap.servers": "pkc-4r297.europe-west1.gcp.confluent.cloud:9092", 11 | "dest.kafka.sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"VVCKLG2QREPFSPOI\" password=\"k3VKBmqSywcaEhUP4WTdGLCo9pyplT0DZims1IG6Tvr/2UPBYdUXR35yTC4+SdFz\";", 12 | "src.kafka.bootstrap.servers": "PLAINTEXT:b-3.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092,b-1.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092,b-2.damien-cluster.7mzb6l.c2.kafka.eu-west-3.amazonaws.com:9092", 13 | "topic.config.sync": "true", 14 | "topic.auto.create": "true", 15 | "consumer.override.session.timeout.ms": "45000", 16 | "tasks.max": "1", 17 | "value.converter": "io.confluent.connect.replicator.util.ByteArrayConverter", 18 | "key.converter": "io.confluent.connect.replicator.util.ByteArrayConverter", 19 | "header.converter": "io.confluent.connect.replicator.util.ByteArrayConverter", 20 | "schema.registry.client.basic.auth.credentials.source": "USER_INFO", 21 | "provenance.header.enable": "true", 22 | "topic.whitelist": "streams-plaintext-input,streams-plaintext-output,streams-plaintext-input,app-migration-counts-store-changelog", 23 | "producer.override.enable.idempotence": "true", 24 | "producer.override.acks": "all", 25 | "producer.override.delivery.timeout.ms": "24000", 26 | "producer.override.compression.type": "zstd", 27 | "producer.override.security.protocol": "SASL_SSL", 28 | "producer.override.retry.backoff.ms": "500", 29 | "producer.override.request.timeout.ms": "20000", 30 | "producer.override.sasl.jaas.config": "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"VVCKLG2QREPFSPOI\" password=\"k3VKBmqSywcaEhUP4WTdGLCo9pyplT0DZims1IG6Tvr/2UPBYdUXR35yTC4+SdFz\";", 31 | "producer.override.sasl.mechanism": "PLAIN" 32 | } 33 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kafka Streams migration example using Replicator 2 | 3 | ## Key points 4 | 5 | - If Kafka Streams leverage `exactly-once`, Replicator must be configured with `isolation.level=read_committed` 6 | - Migrated application need to be restarted with `auto.offset.reset=latest` 7 | - In order to migrate connectors one by one, for source connectors, data must be extracted from the topics `connect-configs` and `connect-offsets`. 8 | - Source connectors (e.g. JDBC Source or Filestream source) commit offsets in Kafka Connect internal topics `connect-offsets`. 9 | 10 | ## Migration process 11 | 12 | * On the source cluster: 13 | * Stop producers and source connectors 14 | * Wait for each topology and connectors to clear up lag. 15 | * Stop the Kafka Streams application 16 | * Copy Kafka Connect source connector offset json payloads 17 | - Stop consumer and sink connectors 18 | * Migration start: 19 | * Start Replicator with `isolation.level=read_committed` 20 | * Wait for replicator to catch up 21 | * Start application on the destination cluster 22 | * Setup connectors and source connect offset json playloads 23 | * Start producers, connectors, Kafka Streams and consumers 24 | 25 | [![Process](./images/process.png)](http://www.plantuml.com/plantuml/uml/dLF1Rjim3BtxAuYUcai_84Mn3jrXbst0SXXs6ZIBYRdAea1I2O9X_pxLib75mGwhOHW67_dq7aazgZcnF8PEdoac9sw4mKL_4ZB322OP6qW7v_b4yL21Rghk2cPan15kTiO9UeuHUsEvWTyTb6SxXPEmppsAtZT1vImzlfOiu1EdypK8QiwmfaGstC8kzmCuXL_-Pz_zwIwr21RDBgL0lPjYEcGh1k8Yx3HGGBXztwHyB6J17TvjW1HklwDkIkOawPiZgqTZz7FbPzwqH3kApujS6Dx2jBIb4BLDMLdxH0UfSgS9QFLJInipzrCGBITmqTTS-8eLPodmtCKJsG2a7AQoku1730-2pl_eUHp9qBSkNndUrAtj1mne2D88MK_kvJyUBcPNtgV0sJTBLRBMTYySyNwlQ7U2Bz_49U_yK2oYsio0bgzNSDx0BqSK8OyBNidqwf0aU2JE6iwWxeWUAEwvKVZF5LyFPZtp_tOpiJIth3IrK-FKFMBqUGn_0G00) 26 | 27 | ## Application architecture 28 | 29 | - A source connector stream `data/source.txt` and push it to `streams-plaintext-input` 30 | - The Kafka Streams application count the occurence of each world from the `streams-plaintext-input` topic and push the result to `streams-wordcount-output` 31 | - This application has two internal topics: `streams-wordcount-counts-store-repartition` and `streams-wordcount-counts-store-changelog` 32 | - A sink connector consume the `streams-wordcount-output` topics and write the value of each message in `data/sink.txt` 33 | 34 | ![Architecture](./images/app-architecture.png) 35 | 36 | ## How to run the example 37 | 38 | ```sh 39 | export CCLOUD_CLUSTER=XXXXX.confluent.cloud:9092 40 | export CLUSTER_API_KEY=XXXXX 41 | export CLUSTER_API_SECRET=XXXXX 42 | 43 | # Start the local environment, the connectors and Kafka Streams 44 | ./up 45 | 46 | # Migrate Kafka Streams and connectors to Confluent Cloud 47 | ./migrate 48 | echo "World" >> data/source.txt 49 | sleep 10 50 | cat data/sink.txt 51 | ```` 52 | 53 | -------------------------------------------------------------------------------- /msk-migration/up: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | require_command() { 4 | for var in "$@"; do 5 | if which $var 2>&1 > /dev/null; then 6 | continue 7 | fi 8 | echo "$var is missing" 9 | exit 1 10 | done 11 | 12 | } 13 | 14 | require_env() { 15 | for var in "$@"; do 16 | if printenv $var 2>&1 > /dev/null; then 17 | continue 18 | fi 19 | echo "$var env variable is missing" 20 | exit 1 21 | done 22 | 23 | } 24 | 25 | echo Checking requirements 26 | require_command kind kubectl helm curl docker kafka-topics 27 | require_env MSK_CLUSTER 28 | require_env CCLOUD_BOOTSTRAP_SERVER CCLOUD_CLUSTER_API_KEY CCLOUD_CLUSTER_API_SECRET 29 | rm -rf tmp; mkdir -p tmp 30 | cp -r configmaps tmp/configmaps 31 | sed -i s/CLUSTER_BOOTSTRAP_SERVER/$CCLOUD_BOOTSTRAP_SERVER/ tmp/configmaps/cloud.properties 32 | sed -i s/CLUSTER_API_KEY/$CCLOUD_CLUSTER_API_KEY/ tmp/configmaps/cloud.properties 33 | sed -i s~CLUSTER_API_SECRET~$CCLOUD_CLUSTER_API_SECRET~ tmp/configmaps/cloud.properties 34 | cp ./values.yaml tmp/values.yaml 35 | sed -i s/CLUSTER_BOOTSTRAP_SERVER/$CCLOUD_BOOTSTRAP_SERVER/ tmp/values.yaml 36 | sed -i s/CLUSTER_API_KEY/$CCLOUD_CLUSTER_API_KEY/ tmp/values.yaml 37 | sed -i s~CLUSTER_API_SECRET~$CCLOUD_CLUSTER_API_SECRET~ tmp/values.yaml 38 | 39 | echo Creating Kind Kubernetes cluster 40 | if [ "kind" != "`kind get clusters`" ]; then 41 | kind create cluster --name kind 42 | fi 43 | 44 | echo Creating topics on MSK 45 | kafka-topics --bootstrap-server $MSK_CLUSTER --topic streams-plaintext-input --replication-factor 3 --partitions 10 --create --if-not-exists 46 | kafka-topics --bootstrap-server $MSK_CLUSTER --topic streams-plaintext-output --replication-factor 3 --partitions 10 --create --if-not-exists 47 | 48 | echo Building images 49 | docker build -t connect:latest . 50 | (cd ../kafka-streams-app/ && docker build -t app:latest .) 51 | kind load docker-image app:latest 52 | kind load docker-image connect:latest 53 | 54 | echo Installing Kafka Connect with cp-helm-charts 55 | kubectl create configmap msk-app-config --from-file=tmp/configmaps/msk.properties 56 | kubectl create configmap cloud-app-config --from-file=tmp/configmaps/cloud.properties 57 | kubectl create configmap app-prometheus --from-file=../prometheus/ 58 | 59 | helm repo add confluentinc https://confluentinc.github.io/cp-helm-charts/ 60 | helm repo update 61 | helm install migration confluentinc/cp-helm-charts -f tmp/values.yaml 62 | 63 | echo Deploying Kafka Streams application 64 | kubectl apply -f app-pod.yaml 65 | echo "Hello World" | kafka-console-producer --broker-list $MSK_CLUSTER --topic streams-plaintext-input 66 | 67 | echo Deploying Replicator 68 | sleep 60 69 | kubectl exec -it deploy/migration-cp-kafka-connect -c cp-kafka-connect-server -- curl -X PUT -H 'Content-Type:application/json' http://localhost:8083/connectors/replicator/config --data "$(cat connectors/replicator.json)" 70 | 71 | echo Done 72 | -------------------------------------------------------------------------------- /msk-migration/values.yaml: -------------------------------------------------------------------------------- 1 | cp-zookeeper: 2 | enabled: false 3 | cp-kafka: 4 | enabled: false 5 | cp-schema-registry: 6 | enabled: false 7 | cp-kafka-rest: 8 | enabled: false 9 | 10 | ## ------------------------------------------------------ 11 | ## Kafka Connect 12 | ## ------------------------------------------------------ 13 | cp-kafka-connect: 14 | enabled: true 15 | kafka: 16 | bootstrapServers: CLUSTER_BOOTSTRAP_SERVER 17 | configurationOverrides: 18 | bootstrap.servers: CLUSTER_BOOTSTRAP_SERVER 19 | key.converter: org.apache.kafka.connect.storage.StringConverter 20 | value.converter: io.confluent.connect.avro.AvroConverter 21 | ssl.endpoint.identification.algorithm: https 22 | security.protocol: SASL_SSL 23 | sasl.mechanism: PLAIN 24 | sasl.jaas.config: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="CLUSTER_API_KEY" password="CLUSTER_API_SECRET";' 25 | request.timeout.ms: 20000 26 | retry.backoff.ms: 500 27 | producer.bootstrap.servers: pkc-4r297.europe-west1.gcp.confluent.cloud:9092 28 | producer.ssl.endpoint.identification.algorithm: https 29 | producer.security.protocol: SASL_SSL 30 | producer.sasl.mechanism: PLAIN 31 | producer.sasl.jaas.config: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="CLUSTER_API_KEY" password="CLUSTER_API_SECRET";' 32 | producer.request.timeout.ms: 20000 33 | producer.retry.backoff.ms: 500 34 | consumer.bootstrap.servers: CLUSTER_BOOTSTRAP_SERVER 35 | consumer.ssl.endpoint.identification.algorithm: https 36 | consumer.security.protocol: SASL_SSL 37 | consumer.sasl.mechanism: PLAIN 38 | consumer.sasl.jaas.config: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="CLUSTER_API_KEY" password="CLUSTER_API_SECRET";' 39 | consumer.request.timeout.ms: 20000 40 | consumer.retry.backoff.ms: 500 41 | offset.flush.interval.ms: 10000 42 | offset.storage.file.filename: /tmp/connect.offsets 43 | group.id: connect-cluster 44 | offset.storage.topic: msk-connect-offsets 45 | offset.storage.replication.factor: 3 46 | offset.storage.partitions: 3 47 | config.storage.topic: msk-connect-configs 48 | config.storage.replication.factor: 3 49 | status.storage.topic: msk-connect-status 50 | status.storage.replication.factor: 3 51 | rest.extension.classes: io.confluent.connect.replicator.monitoring.ReplicatorMonitoringExtension 52 | 53 | image: connect 54 | imageTag: latest 55 | ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. 56 | ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod 57 | imagePullSecrets: 58 | # - name: "regcred" 59 | heapOptions: "-Xms512M -Xmx512M" 60 | resources: {} 61 | ## If you do want to specify resources, uncomment the following lines, adjust them as necessary, 62 | ## and remove the curly braces after 'resources:' 63 | # limits: 64 | # cpu: 100m 65 | # memory: 128Mi 66 | # requests: 67 | # cpu: 100m 68 | # memory: 128Mi 69 | 70 | cp-ksql-server: 71 | enabled: false 72 | cp-control-center: 73 | enabled: false 74 | -------------------------------------------------------------------------------- /prometheus/kafka-streams-prometheus.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | lowercaseOutputName: true 3 | lowercaseOutputLabelNames: true 4 | whitelistObjectNames: 5 | - kafka.streams:* 6 | # The two lines below are used to pull the Kafka Client Producer & consumer metrics from Kafka Streams Client. 7 | # If you care about Producer/Consumer metrics for Kafka Streams, please uncomment 2 lines below. 8 | # Please note that this increases the scrape duration by about 1-2 seconds as it needs to parse a lot of data. 9 | - "kafka.consumer:*" 10 | - "kafka.producer:*" 11 | blacklistObjectNames: 12 | - kafka.streams:type=kafka-metrics-count 13 | - "kafka.admin.client:*" 14 | - "kafka.consumer:type=*,id=*" 15 | - "kafka.producer:type=*,id=*" 16 | - "kafka.*:type=kafka-metrics-count,*" 17 | rules: 18 | # kafka.streams:type=stream-processor-node-metrics,processor-node-id=*,task-id=*,thread-id=* 19 | # kafka.streams:type=stream-record-cache-metrics,record-cache-id=*,task-id=*,thread-id=* 20 | # kafka.streams:type=stream-state-metrics,rocksdb-state-id=*,task-id=*,thread-id=* 21 | # kafka.streams:type=stream-state-metrics,rocksdb-state-id=*,task-id=*,thread-id=* 22 | - pattern: "kafka.streams<>(.+):" 23 | name: kafka_streams_$1_$6 24 | type: GAUGE 25 | labels: 26 | thread_id: "$2" 27 | task_id: "$3" 28 | $4: "$5" 29 | # kafka.streams:type=stream-task-metrics,task-id=*,thread-id=* 30 | - pattern: "kafka.streams<>(.+):" 31 | name: kafka_streams_$1_$4 32 | type: GAUGE 33 | labels: 34 | thread_id: "$2" 35 | task_id: "$3" 36 | # kafka.streams:type=stream-metrics,client-id=* 37 | - pattern: "kafka.streams<>(state|alive-stream-threads|commit-id|version|application-id): (.+)" 38 | name: kafka_streams_stream_metrics 39 | value: 1 40 | type: UNTYPED 41 | labels: 42 | $1: "$2" 43 | $3: "$4" 44 | # kafka.streams:type=stream-thread-metrics,thread-id=* 45 | - pattern: "kafka.streams<>([^:]+)" 46 | name: kafka_streams_$1_$4 47 | type: GAUGE 48 | labels: 49 | $2: "$3" 50 | # "kafka.consumer:type=app-info,client-id=*" 51 | # "kafka.producer:type=app-info,client-id=*" 52 | - pattern: "kafka.(.+)<>(.+): (.+)" 53 | value: 1 54 | name: kafka_$1_app_info 55 | labels: 56 | client_type: $1 57 | client_id: $2 58 | $3: $4 59 | type: UNTYPED 60 | # "kafka.consumer:type=consumer-metrics,client-id=*, protocol=*, cipher=*" 61 | # "kafka.consumer:type=type=consumer-fetch-manager-metrics,client-id=*, topic=*, partition=*" 62 | # "kafka.producer:type=producer-metrics,client-id=*, protocol=*, cipher=*" 63 | - pattern: "kafka.(.+)<>(.+):" 64 | name: kafka_$1_$2_$9 65 | type: GAUGE 66 | labels: 67 | client_type: $1 68 | $3: "$4" 69 | $5: "$6" 70 | $7: "$8" 71 | # "kafka.consumer:type=consumer-node-metrics,client-id=*, node-id=*" 72 | # "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*, topic=*" 73 | # "kafka.producer:type=producer-node-metrics,client-id=*, node-id=*" 74 | # "kafka.producer:type=producer-topic-metrics,client-id=*, topic=*" 75 | - pattern: "kafka.(.+)<>(.+):" 76 | name: kafka_$1_$2_$7 77 | type: GAUGE 78 | labels: 79 | client_type: $1 80 | $3: "$4" 81 | $5: "$6" 82 | # "kafka.consumer:type=consumer-fetch-manager-metrics,client-id=*" 83 | # "kafka.consumer:type=consumer-metrics,client-id=*" 84 | # "kafka.producer:type=producer-metrics,client-id=*" 85 | - pattern: "kafka.(.+)<>(.+):" 86 | name: kafka_$1_$2_$5 87 | type: GAUGE 88 | labels: 89 | client_type: $1 90 | $3: "$4" 91 | - pattern: "kafka.(.+)<>(.+):" 92 | name: kafka_$1_$2_$3 93 | labels: 94 | client_type: $1 95 | -------------------------------------------------------------------------------- /kafka-streams-app/pom.xml: -------------------------------------------------------------------------------- 1 | 17 | 18 | 21 | 4.0.0 22 | 23 | org.example 24 | kafka-streams-app 25 | 1.0-SNAPSHOT 26 | jar 27 | 28 | Kafka Streams Quickstart :: Java 29 | 30 | 31 | UTF-8 32 | 3.0.0 33 | 1.7.7 34 | 1.2.17 35 | 1.7.30 36 | 37 | 38 | 39 | 40 | apache.snapshots 41 | Apache Development Snapshot Repository 42 | https://repository.apache.org/content/repositories/snapshots/ 43 | 44 | false 45 | 46 | 47 | true 48 | 49 | 50 | 51 | 52 | 56 | 57 | 58 | 59 | 60 | org.apache.maven.plugins 61 | maven-compiler-plugin 62 | 3.1 63 | 64 | 1.8 65 | 1.8 66 | 67 | 68 | 69 | 70 | org.apache.maven.plugins 71 | maven-assembly-plugin 72 | 73 | 74 | package 75 | 76 | single 77 | 78 | 79 | 80 | 81 | 82 | org.example.WordCount 83 | 84 | 85 | 86 | 87 | jar-with-dependencies 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | org.apache.kafka 100 | kafka-streams 101 | ${kafka.version} 102 | 103 | 104 | org.slf4j 105 | slf4j-simple 106 | ${slf4j-simple.version} 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /kafka-streams-app/src/main/java/org/example/WordCount.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | package org.example; 18 | 19 | import org.apache.kafka.clients.consumer.ConsumerConfig; 20 | import org.apache.kafka.clients.producer.ProducerConfig; 21 | import org.apache.kafka.common.config.SaslConfigs; 22 | import org.apache.kafka.common.config.TopicConfig; 23 | import org.apache.kafka.common.serialization.Serdes; 24 | import org.apache.kafka.common.utils.Bytes; 25 | import org.apache.kafka.streams.KafkaStreams; 26 | import org.apache.kafka.streams.StreamsBuilder; 27 | import org.apache.kafka.streams.StreamsConfig; 28 | import org.apache.kafka.streams.Topology; 29 | import org.apache.kafka.streams.kstream.KeyValueMapper; 30 | import org.apache.kafka.streams.kstream.Materialized; 31 | import org.apache.kafka.streams.kstream.Produced; 32 | import org.apache.kafka.streams.kstream.ValueMapper; 33 | import org.apache.kafka.streams.state.KeyValueStore; 34 | 35 | import java.io.FileInputStream; 36 | import java.util.Arrays; 37 | import java.util.Locale; 38 | import java.util.Properties; 39 | import java.util.concurrent.CountDownLatch; 40 | 41 | /** 42 | * In this example, we implement a simple WordCount program using the high-level Streams DSL 43 | * that reads from a source topic "streams-plaintext-input", where the values of messages represent lines of text, 44 | * split each text line into words and then compute the word occurence histogram, write the continuous updated histogram 45 | * into a topic "streams-wordcount-output" where each record is an updated count of a single word. 46 | */ 47 | public class WordCount { 48 | 49 | public static void main(String[] args) throws Exception { 50 | Properties props = new Properties(); 51 | props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); 52 | props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2); 53 | props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); 54 | props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); 55 | 56 | if (args.length > 0) { 57 | props.load(new FileInputStream(args[0])); 58 | } 59 | 60 | if (props.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG).contains("MSK_CLUSTER") || System.getenv("CLUSTER_TYPE").equals("MSK")) { 61 | props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, System.getenv("MSK_BOOTSTRAP_SERVERS")); 62 | } 63 | 64 | if (props.getProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG).contains("CCLOUD_CLUSTER") || System.getenv("CLUSTER_TYPE").equals("CCLOUD")) { 65 | String jaas = props.getProperty(SaslConfigs.SASL_JAAS_CONFIG); 66 | jaas = jaas.replaceAll("CLUSTER_API_KEY", System.getenv("CLUSTER_API_KEY")); 67 | jaas = jaas.replaceAll("CLUSTER_API_SECRET", System.getenv("CLUSTER_API_SECRET")); 68 | props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, System.getenv("CCLOUD_CLUSTER")); 69 | props.put(SaslConfigs.SASL_JAAS_CONFIG, jaas); 70 | } 71 | 72 | final StreamsBuilder builder = new StreamsBuilder(); 73 | 74 | builder.stream("streams-plaintext-input") 75 | .flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+"))) 76 | .groupBy((key, value) -> value) 77 | .count(Materialized.>as("counts-store")) 78 | .toStream() 79 | .mapValues((k, v) -> k + " " + String.valueOf(v)) 80 | .to("streams-plaintext-output", Produced.with(Serdes.String(), Serdes.String())); 81 | 82 | final Topology topology = builder.build(); 83 | final KafkaStreams streams = new KafkaStreams(topology, props); 84 | final CountDownLatch latch = new CountDownLatch(1); 85 | 86 | // attach shutdown handler to catch control-c 87 | Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") { 88 | @Override 89 | public void run() { 90 | streams.close(); 91 | latch.countDown(); 92 | } 93 | }); 94 | 95 | try { 96 | streams.start(); 97 | latch.await(); 98 | } catch (Throwable e) { 99 | System.exit(1); 100 | } 101 | System.exit(0); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /migrate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Cleaning Confluent Cloud cluster" 4 | for i in cloud-wordcount-counts-store-changelog cloud-wordcount-counts-store-repartition streams-plaintext-output streams-plaintext-input migration-demo-connect-configs migration-demo-connect-offsets migration-demo-connect-statuses _confluent-command; do 5 | confluent kafka topic delete $i; 6 | done 7 | 8 | for i in cloud-wordcount-counts-store-changelog cloud-wordcount-counts-store-repartition streams-plaintext-output streams-plaintext-input ; do 9 | confluent kafka topic create $i; 10 | done 11 | 12 | set -e 13 | 14 | echo Pause source connectors 15 | curl -s -f -X PUT http://localhost:8083/connectors/connector-source/pause -H 'Content-Type:application/json' 16 | 17 | echo Wait for Kafka Streams lag to reach 0 18 | # EoS could return lag of 0 or 1 due to the commit mark 19 | until [[ `docker-compose exec broker kafka-consumer-groups --describe --bootstrap-server localhost:19092 --group streams-wordcount | grep 'streams-wordcount-' | awk '{ print $6 }' | grep -v '0' | grep -v '1' | grep -v '-' | wc -l | sed -e 's/^[[:space:]]*//'` -eq 0 ]]; do 20 | echo -n . 21 | sleep 5 22 | done 23 | echo 24 | 25 | echo Wait for sink connect lag to reach 0 26 | # EoS could return lag of 0 or 1 due to the commit mark 27 | until [[ `docker-compose exec broker kafka-consumer-groups --describe --bootstrap-server localhost:19092 --group connect-connector-sink | grep 'connect-connector-sink-' | awk '{ print $6 }' | grep -v '0' | wc -l | sed -e 's/^[[:space:]]*//'` -eq 0 ]]; do 28 | echo -n . 29 | sleep 5 30 | done 31 | echo 32 | 33 | echo Delete connectors 34 | curl -s -f -X PUT http://localhost:8083/connectors/connector-sink/pause -H 'Content-Type:application/json' 35 | 36 | echo Stop Kafka Streams 37 | docker-compose stop kafka-streams 38 | 39 | echo Start Kafka Connect that uses cloud 40 | docker-compose up -d connect-cloud 41 | 42 | echo Waiting for Kafka Connect to be up and running 43 | until curl -s -f -X 'GET' 'http://localhost:8084/'; do 44 | echo -n "." 45 | sleep 5 46 | done 47 | echo 48 | 49 | echo Starting Replicator 50 | curl -s -f -X POST http://localhost:8084/connectors -H 'Content-Type: application/json' --data "`cat connect/replicator.json| tr -d '\r\n' | sed "s/CCLOUD_CLUSTER/$CCLOUD_CLUSTER/" | sed "s/CLUSTER_API_KEY/$CLUSTER_API_KEY/" | sed "s#CLUSTER_API_SECRET#$CLUSTER_API_SECRET#"`" | jq . 51 | 52 | echo Wait for Replicator to finish moving local data to cloud 53 | for i in `seq 3`; do 54 | sleep 10 55 | curl -s http://localhost:8084/connectors/replicator/status | jq 56 | done 57 | 58 | echo Ensure Replicator lag is 0 59 | 60 | until [[ `curl -s -f http://localhost:8084/WorkerMetrics/replicator | jq '.tasks[].metrics[].messageLag' | awk '{sum=sum+$0} END{print sum}'` -eq '0' ]]; do 61 | echo -n . 62 | sleep 5 63 | done 64 | echo 65 | 66 | echo Stopping Replicator 67 | curl -s -f -X DELETE localhost:8084/connectors/replicator 68 | 69 | echo Sopping Kafka Connect as we will manually copy connector configs and offsets 70 | docker-compose stop connect-cloud 71 | 72 | echo Copying connector-source and connector-sink configs to kafka 73 | for line in `docker-compose exec broker kafka-console-consumer --timeout-ms 15000 --bootstrap-server localhost:29092 --topic docker-connect-configs --property print.key=true --property 'key.separator=|' --from-beginning | grep -E 'connector-source|connector-sink'`; do 74 | if [[ "$line" =~ 'PAUSED' ]]; then 75 | continue; 76 | fi 77 | docker-compose exec -e "msg=$line" broker bash -c "echo \"\$msg\" | kafka-console-producer --broker-list $CCLOUD_CLUSTER --topic migration-demo-connect-configs --property 'parse.key=true' --property 'key.separator=|' --producer-property \"sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='$CLUSTER_API_KEY' password='$CLUSTER_API_SECRET';\" --producer-property \"security.protocol=SASL_SSL\" --producer-property \"sasl.mechanism=PLAIN\"" 78 | done 79 | 80 | echo Copying connector-source and connector-sink offsets to kafka 81 | for line in `docker-compose exec broker kafka-console-consumer --timeout-ms 5000 --bootstrap-server localhost:19092 --topic docker-connect-offsets --property print.key=true --property 'key.separator=|' --from-beginning | grep 'connector-source' | tail -n 1`; do 82 | docker-compose exec -e "msg=$line" broker bash -c "echo \"\$msg\" | kafka-console-producer --broker-list $CCLOUD_CLUSTER --topic migration-demo-connect-offsets --property 'parse.key=true' --property 'key.separator=|' --producer-property \"sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='$CLUSTER_API_KEY' password='$CLUSTER_API_SECRET';\" --producer-property \"security.protocol=SASL_SSL\" --producer-property \"sasl.mechanism=PLAIN\"" 83 | done 84 | 85 | echo Restarting Kafka Connect with 86 | docker-compose up -d connect-cloud 87 | 88 | echo Waiting for Kafka Connect using cloud to be up and running 89 | until curl -s -f -X 'GET' 'http://localhost:8084/'; do 90 | echo -n "." 91 | sleep 5 92 | done 93 | echo 94 | 95 | echo Resuming source connector 96 | curl -s -X 'PUT' 'http://localhost:8084/connectors/connector-source/resume' -H 'Content-Type:application/json' 97 | 98 | echo Resuming sink connector 99 | curl -s -X 'PUT' 'http://localhost:8084/connectors/connector-sink/resume' -H 'Content-Type:application/json' 100 | 101 | echo Restarting Kafka Streams 102 | docker-compose up -d kafka-streams-cloud 103 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '2' 3 | services: 4 | zookeeper: 5 | image: confluentinc/cp-zookeeper:7.0.0 6 | hostname: zookeeper 7 | container_name: zookeeper 8 | ports: 9 | - "12181:2181" 10 | environment: 11 | ZOOKEEPER_CLIENT_PORT: 2181 12 | ZOOKEEPER_TICK_TIME: 2000 13 | 14 | broker: 15 | image: confluentinc/cp-server:7.0.0 16 | hostname: broker 17 | container_name: broker 18 | depends_on: 19 | - zookeeper 20 | ports: 21 | - "19092:19092" 22 | - "9101:9101" 23 | environment: 24 | KAFKA_BROKER_ID: 1 25 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' 26 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 27 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:19092 28 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 29 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 30 | KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 31 | KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1 32 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 33 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 34 | KAFKA_JMX_PORT: 9101 35 | KAFKA_JMX_HOSTNAME: localhost 36 | KAFKA_NUM_PARTITIONS: 6 37 | 38 | connect: 39 | build: ./connect 40 | hostname: connect 41 | container_name: connect 42 | depends_on: 43 | - broker 44 | ports: 45 | - "8083:8083" 46 | environment: 47 | CONNECT_BOOTSTRAP_SERVERS: 'broker:29092' 48 | CONNECT_REST_ADVERTISED_HOST_NAME: connect 49 | CONNECT_REST_PORT: 8083 50 | CONNECT_GROUP_ID: compose-connect-group 51 | CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs 52 | CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 53 | CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 54 | CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets 55 | CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 56 | CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status 57 | CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 58 | CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter 59 | CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter 60 | CONNECT_CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY: All 61 | volumes: 62 | - ./data:/tmp 63 | 64 | kafka-streams: 65 | build: ./kafka-streams-app 66 | hostname: kafka-streams 67 | container_name: kafka-streams 68 | restart: on-failure 69 | ports: 70 | - "2112:2112" 71 | depends_on: 72 | - broker 73 | volumes: 74 | - ./kafka-streams-source-cluster.properties:/tmp/conf.properties 75 | - ./prometheus:/tmp/prometheus 76 | 77 | connect-cloud: 78 | build: ./connect 79 | hostname: connect-cloud 80 | container_name: connect-cloud 81 | ports: 82 | - "8084:8083" 83 | environment: 84 | CONNECT_REST_ADVERTISED_HOST_NAME: connect 85 | CONNECT_BOOTSTRAP_SERVERS: ${CCLOUD_CLUSTER} 86 | CONNECT_SECURITY_PROTOCOL: SASL_SSL 87 | CONNECT_SASL_MECHANISM: PLAIN 88 | CONNECT_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="${CLUSTER_API_KEY}" password="${CLUSTER_API_SECRET}"; 89 | CONNECT_REQUEST_TIMEOUT_MS: 20000 90 | CONNECT_RETRY_BACKOFF_MS: 500 91 | 92 | CONNECT_PRODUCER_BOOTSTRAP_SERVERS: ${CCLOUD_CLUSTER} 93 | CONNECT_PRODUCER_SECURITY_PROTOCOL: SASL_SSL 94 | CONNECT_PRODUCER_SASL_MECHANISM: PLAIN 95 | CONNECT_PRODUCER_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="${CLUSTER_API_KEY}" password="${CLUSTER_API_SECRET}"; 96 | CONNECT_PRODUCER_REQUEST_TIMEOUT_MS: 20000 97 | CONNECT_PRODUCER_RETRY_BACKOFF_MS: 500 98 | 99 | CONNECT_CONSUMER_REQUEST_TIMEOUT_MS: 20000 100 | CONNECT_CONSUMER_BOOTSTRAP_SERVERS: ${CCLOUD_CLUSTER} 101 | CONNECT_CONSUMER_SECURITY_PROTOCOL: SASL_SSL 102 | CONNECT_CONSUMER_SASL_MECHANISM: PLAIN 103 | CONNECT_CONSUMER_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required username="${CLUSTER_API_KEY}" password="${CLUSTER_API_SECRET}"; 104 | 105 | CONNECT_CONFIG_STORAGE_TOPIC: migration-demo-connect-configs 106 | CONNECT_OFFSET_STORAGE_TOPIC: migration-demo-connect-offsets 107 | CONNECT_STATUS_STORAGE_TOPIC: migration-demo-connect-statuses 108 | CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 109 | CONNECT_GROUP_ID: connect-cluster 110 | CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter 111 | CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter 112 | CONNECT_REST_EXTENSION_CLASSES: io.confluent.connect.replicator.monitoring.ReplicatorMonitoringExtension 113 | CONNECT_CONNECTOR_CLIENT_CONFIG_OVERRIDE_POLICY: All 114 | volumes: 115 | - ./data:/tmp 116 | - ./connect/jars/:/etc/kafka-connect/jars/ 117 | 118 | kafka-streams-cloud: 119 | build: ./kafka-streams-app 120 | hostname: kafka-streams-cloud 121 | container_name: kafka-streams-cloud 122 | restart: on-failure 123 | ports: 124 | - "2113:2112" 125 | environment: 126 | CCLOUD_CLUSTER: ${CCLOUD_CLUSTER} 127 | CLUSTER_API_KEY: ${CLUSTER_API_KEY} 128 | CLUSTER_API_SECRET: ${CLUSTER_API_SECRET} 129 | volumes: 130 | - ./kafka-streams-cloud-cluster.properties:/tmp/conf.properties 131 | - ./prometheus:/tmp/prometheus 132 | --------------------------------------------------------------------------------