├── kafka-java-springboot
├── settings.gradle
├── gradle
│ └── wrapper
│ │ ├── gradle-wrapper.jar
│ │ └── gradle-wrapper.properties
├── src
│ └── main
│ │ ├── avro
│ │ └── DataRecordAvro.avsc
│ │ ├── java
│ │ └── io
│ │ │ └── confluent
│ │ │ └── examples
│ │ │ └── clients
│ │ │ └── cloud
│ │ │ └── springboot
│ │ │ ├── kafka
│ │ │ ├── ConsumerExample.java
│ │ │ ├── SpringbootKafkaApplication.java
│ │ │ └── ProducerExample.java
│ │ │ └── streams
│ │ │ └── SpringbootStreamsApplication.java
│ │ └── resources
│ │ ├── log4j.properties
│ │ └── application.properties
├── ccloud-generate-cp-configs.sh
├── startStreams.sh
├── startProducerConsumer.sh
├── build.gradle
├── gradlew.bat
└── README.md
├── kafka-python-application
├── requirements.txt
├── Dockerfile
├── README.md
├── producer.py
├── consumer.py
├── producer_ccsr.py
└── consumer_ccsr.py
├── kafka-consumer-application
├── configuration
│ ├── test.properties
│ ├── prod.properties
│ ├── dev-template.properties
│ └── ccloud.properties
├── src
│ ├── main
│ │ └── java
│ │ │ └── io
│ │ │ └── confluent
│ │ │ └── developer
│ │ │ ├── ConsumerRecordsHandler.java
│ │ │ ├── FileWritingRecordsHandler.java
│ │ │ └── KafkaConsumerApplication.java
│ └── test
│ │ └── java
│ │ └── io
│ │ └── confluent
│ │ └── developer
│ │ ├── FileWritingRecordsHandlerTest.java
│ │ └── KafkaConsumerApplicationTest.java
└── build.gradle
├── kafka-streams
├── gradle
│ └── wrapper
│ │ ├── gradle-wrapper.jar
│ │ └── gradle-wrapper.properties
├── app
│ ├── src
│ │ ├── main
│ │ │ ├── avro
│ │ │ │ ├── processed_order.avsc
│ │ │ │ ├── user.avsc
│ │ │ │ ├── product_order.avsc
│ │ │ │ ├── appliance_order.avsc
│ │ │ │ ├── electronic_order.avsc
│ │ │ │ └── combined_order.avsc
│ │ │ ├── resources
│ │ │ │ └── streams.properties.orig
│ │ │ └── java
│ │ │ │ └── io
│ │ │ │ └── m03315
│ │ │ │ └── learning
│ │ │ │ └── kafka
│ │ │ │ ├── StreamsUtils.java
│ │ │ │ ├── KTableExample.java
│ │ │ │ ├── TopicLoader.java
│ │ │ │ ├── BasicStreams.java
│ │ │ │ └── StreamsJoin.java
│ │ └── test
│ │ │ └── java
│ │ │ └── io
│ │ │ └── m03315
│ │ │ └── learning
│ │ │ └── kafka
│ │ │ └── AppTest.java
│ └── build.gradle
├── settings.gradle
└── gradlew.bat
├── initial
├── Dockerfile
├── docker-compose.yml
└── README.md
├── kafka-go-getting-started
├── go.mod
├── getting-started-template.properties
├── util.go
├── consumer.go
├── README.md
└── producer.go
├── kafka-c-getting-started
├── Makefile
├── getting_started-template.ini
├── common.c
├── README.md
├── producer.c
└── consumer.c
├── kafka-dotnet-getting-started
├── getting-started-template.properties
├── consumer
│ ├── consumer.csproj
│ └── consumer.cs
├── producer
│ ├── producer.csproj
│ └── producer.cs
└── README.md
├── kafka-producer-application
├── input.txt
├── configuration
│ ├── dev-template.properties
│ ├── test.properties
│ ├── prod-template.properties
│ └── ccloud-template.properties
├── build.gradle
├── src
│ ├── test
│ │ └── java
│ │ │ └── io
│ │ │ └── confluent
│ │ │ └── developer
│ │ │ └── KafkaProducerApplicationTest.java
│ └── main
│ │ └── java
│ │ └── io
│ │ └── confluent
│ │ └── developer
│ │ └── KafkaProducerApplication.java
└── README.md
├── kafka-nodejs-getting-started
├── getting-started-template.properties
├── README.md
├── util.js
├── consumer.js
└── producer.js
├── kafka-producer-application-callback
├── input.txt
├── configuration
│ ├── test.properties
│ ├── dev-template.properties
│ ├── prod.properties
│ └── ccloud.properties
├── build.gradle
├── README.md
└── src
│ ├── test
│ └── java
│ │ └── io
│ │ └── confluent
│ │ └── developer
│ │ └── KafkaProducerCallbackApplicationTest.java
│ └── main
│ └── java
│ └── io
│ └── confluent
│ └── developer
│ └── KafkaProducerCallbackApplication.java
├── kafka-java-maven-application
├── checkstyle
│ └── suppressions.xml
├── src
│ └── main
│ │ ├── resources
│ │ ├── avro
│ │ │ └── io
│ │ │ │ └── confluent
│ │ │ │ └── examples
│ │ │ │ └── clients
│ │ │ │ └── cloud
│ │ │ │ ├── DataRecordAvro.avsc
│ │ │ │ ├── DataRecordAvro2a.avsc
│ │ │ │ └── DataRecordAvro2b.avsc
│ │ └── log4j.properties
│ │ └── java
│ │ └── io
│ │ └── confluent
│ │ └── examples
│ │ └── clients
│ │ └── cloud
│ │ ├── model
│ │ ├── DataRecord.java
│ │ └── PageviewRecord.java
│ │ ├── Util.java
│ │ ├── ConsumerExamplePageviews.java
│ │ ├── ConsumerAvroExample.java
│ │ ├── ConsumerExample.java
│ │ ├── StreamsAvroExample.java
│ │ ├── ProducerAvroExample.java
│ │ ├── ProducerExample.java
│ │ └── StreamsExample.java
├── checkstyle.xml
├── java.config
└── README.md
├── kafka-rest-proxy
├── ccloud-generate-cp-configs.sh
└── java.config
├── kafka-python-getting-started
├── getting_started_template.ini
├── producer.py
├── README.md
└── consumer.py
├── kafka-spring-boot-getting-started
├── build.gradle
├── src
│ └── main
│ │ ├── resources
│ │ └── application-tempalte.yaml
│ │ └── java
│ │ └── examples
│ │ ├── Consumer.java
│ │ ├── Producer.java
│ │ └── SpringBootWithKafkaApplication.java
└── REAMDE.md
├── console-consumer-read-specific-offsets-partition
├── configuration
│ └── ccloud-template.properties
└── README.md
├── kcat
├── kcat-example.sh
└── README.md
├── README.md
├── .github
└── workflows
│ ├── gradle.yml
│ └── codeql.yml
└── ConfluentCLI-KM.md
/kafka-java-springboot/settings.gradle:
--------------------------------------------------------------------------------
1 | rootProject.name = 'java-springboot'
2 |
--------------------------------------------------------------------------------
/kafka-python-application/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
2 | certifi
3 | confluent-kafka[avro,json,protobuf]>=1.4.2
4 | avro
5 | fastavro
6 |
--------------------------------------------------------------------------------
/kafka-consumer-application/configuration/test.properties:
--------------------------------------------------------------------------------
1 | input.topic.name=input-topic
2 | input.topic.partitions=1
3 | input.topic.replication.factor=1
--------------------------------------------------------------------------------
/kafka-streams/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/m03315/100DAYSKAFKA/HEAD/kafka-streams/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/initial/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:latest
2 |
3 | RUN apk update && apk add curl \
4 | && curl -sL --http1.1 https://cnfl.io/cli | sh -s -- latest
5 |
6 |
--------------------------------------------------------------------------------
/kafka-go-getting-started/go.mod:
--------------------------------------------------------------------------------
1 | module kafka-go-getting-started
2 |
3 | go 1.19
4 |
5 | require github.com/confluentinc/confluent-kafka-go v1.9.2 // indirect
6 |
--------------------------------------------------------------------------------
/kafka-java-springboot/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/m03315/100DAYSKAFKA/HEAD/kafka-java-springboot/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/kafka-c-getting-started/Makefile:
--------------------------------------------------------------------------------
1 | ALL: producer consumer
2 |
3 | CFLAGS=-Wall $(shell pkg-config --cflags glib-2.0 rdkafka)
4 | LDLIBS=$(shell pkg-config --libs glib-2.0 rdkafka)
5 |
--------------------------------------------------------------------------------
/kafka-python-application/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-slim
2 |
3 | COPY requirements.txt /tmp/requirements.txt
4 | RUN pip3 install -U -r /tmp/requirements.txt
5 |
6 | COPY *.py ./
7 |
--------------------------------------------------------------------------------
/kafka-dotnet-getting-started/getting-started-template.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=SASL_SSL
3 | sasl.mechanisms=PLAIN
4 | sasl.username=< CLUSTER API KEY >
5 | sasl.password=< CLUSTER API SECRET >
--------------------------------------------------------------------------------
/kafka-producer-application/input.txt:
--------------------------------------------------------------------------------
1 | 1-value
2 | 2-words
3 | 3-All Streams
4 | 4-Lead to
5 | 5-Kafka
6 | 6-Go to
7 | 7-Kafka Summit
8 | 8-How can
9 | 9-a 10 ounce
10 | 10-bird carry a
11 | 11-5lb coconut
12 |
13 |
--------------------------------------------------------------------------------
/kafka-nodejs-getting-started/getting-started-template.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=SASL_SSL
3 | sasl.mechanisms=PLAIN
4 | sasl.username=< CLUSTER API KEY >
5 | sasl.password=< CLUSTER API SECRET >
6 |
--------------------------------------------------------------------------------
/kafka-producer-application-callback/input.txt:
--------------------------------------------------------------------------------
1 | 1-value
2 | 2-words
3 | 3-All Streams
4 | 4-Lead to
5 | 5-Kafka
6 | 6-Go to
7 | 7-Kafka Summit
8 | 8-How can
9 | 9-a 10 ounce
10 | 10-bird carry a
11 | 11-5lb coconut
12 |
--------------------------------------------------------------------------------
/kafka-java-springboot/src/main/avro/DataRecordAvro.avsc:
--------------------------------------------------------------------------------
1 | {"namespace": "io.confluent.examples.clients.cloud",
2 | "type": "record",
3 | "name": "DataRecordAvro",
4 | "fields": [
5 | {"name": "count", "type": "long"}
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/checkstyle/suppressions.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/resources/avro/io/confluent/examples/clients/cloud/DataRecordAvro.avsc:
--------------------------------------------------------------------------------
1 | {"namespace": "io.confluent.examples.clients.cloud",
2 | "type": "record",
3 | "name": "DataRecordAvro",
4 | "fields": [
5 | {"name": "count", "type": "long"}
6 | ]
7 | }
8 |
--------------------------------------------------------------------------------
/kafka-go-getting-started/getting-started-template.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=SASL_SSL
3 | sasl.mechanisms=PLAIN
4 | sasl.username=< CLUSTER API KEY >
5 | sasl.password=< CLUSTER API SECRET >
6 |
7 | # Best practice for Kafka producer to prevent data loss
8 | acks=all
9 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/avro/processed_order.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "io.m03315.learning.kafka.avro",
3 | "type": "record",
4 | "name": "ProcessedOrder",
5 | "fields": [
6 | {"name": "time_processed", "type": "long"},
7 | {"name": "product", "type": "string"}
8 | ]
9 | }
--------------------------------------------------------------------------------
/kafka-streams/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | distributionBase=GRADLE_USER_HOME
2 | distributionPath=wrapper/dists
3 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip
4 | networkTimeout=10000
5 | zipStoreBase=GRADLE_USER_HOME
6 | zipStorePath=wrapper/dists
7 |
--------------------------------------------------------------------------------
/kafka-consumer-application/configuration/prod.properties:
--------------------------------------------------------------------------------
1 | application.id=kafka-consumer-application
2 | bootstrap.servers=<>
3 | schema.registry.url=<>
4 |
5 | example.topic.name=<>
6 | example.topic.partitions=<>
7 | example.topic.replication.factor=<>
8 |
--------------------------------------------------------------------------------
/kafka-consumer-application/src/main/java/io/confluent/developer/ConsumerRecordsHandler.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 | import org.apache.kafka.clients.consumer.ConsumerRecords;
4 |
5 | public interface ConsumerRecordsHandler {
6 | void process(ConsumerRecords consumerRecords);
7 | }
8 |
--------------------------------------------------------------------------------
/kafka-java-springboot/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Mon Feb 03 14:04:23 EST 2020
2 | distributionUrl=https\://services.gradle.org/distributions/gradle-6.0.1-all.zip
3 | distributionBase=GRADLE_USER_HOME
4 | distributionPath=wrapper/dists
5 | zipStorePath=wrapper/dists
6 | zipStoreBase=GRADLE_USER_HOME
7 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/avro/user.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "io.m03315.learning.kafka.avro",
3 | "type": "record",
4 | "name": "User",
5 | "fields": [
6 | {"name": "name", "type": "string"},
7 | {"name": "address", "type": "string"},
8 | {"name": "user_id", "type": "string"}
9 | ]
10 | }
--------------------------------------------------------------------------------
/kafka-java-springboot/ccloud-generate-cp-configs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This file is deprecated in favor of the function in ./ccloud_library.sh
4 |
5 | # Source library
6 | DIR_THIS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
7 | source ./ccloud_library.sh
8 |
9 | ccloud::generate_configs "$1"
10 |
--------------------------------------------------------------------------------
/kafka-rest-proxy/ccloud-generate-cp-configs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This file is deprecated in favor of the function in ../utils/ccloud_library.sh
4 |
5 | # Source library
6 | DIR_THIS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
7 | source ccloud_library.sh
8 |
9 | ccloud::generate_configs "$1"
10 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/resources/avro/io/confluent/examples/clients/cloud/DataRecordAvro2a.avsc:
--------------------------------------------------------------------------------
1 | {"namespace": "io.confluent.examples.clients.cloud",
2 | "type": "record",
3 | "name": "DataRecordAvro",
4 | "fields": [
5 | {"name": "count", "type": "long"},
6 | {"name": "region", "type": "string"}
7 | ]
8 | }
9 |
--------------------------------------------------------------------------------
/kafka-producer-application-callback/configuration/test.properties:
--------------------------------------------------------------------------------
1 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
2 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
3 | acks=all
4 |
5 |
6 | #Properties below this line are specific to code in this application
7 | output.topic.name=output-topic
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/resources/avro/io/confluent/examples/clients/cloud/DataRecordAvro2b.avsc:
--------------------------------------------------------------------------------
1 | {"namespace": "io.confluent.examples.clients.cloud",
2 | "type": "record",
3 | "name": "DataRecordAvro",
4 | "fields": [
5 | {"name": "count", "type": "long"},
6 | {"name": "region", "type": "string", "default": ""}
7 | ]
8 | }
9 |
--------------------------------------------------------------------------------
/kafka-producer-application-callback/configuration/dev-template.properties:
--------------------------------------------------------------------------------
1 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
2 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
3 | acks=all
4 |
5 | #Properties below this line are specific to code in this application
6 | output.topic.name=output-topic
7 |
8 |
--------------------------------------------------------------------------------
/kafka-producer-application/configuration/dev-template.properties:
--------------------------------------------------------------------------------
1 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
2 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
3 | acks=all
4 |
5 | #Properties below this line are specific to code in this application
6 | input.topic.name=input-topic
7 | output.topic.name=output-topic
8 |
--------------------------------------------------------------------------------
/kafka-producer-application/configuration/test.properties:
--------------------------------------------------------------------------------
1 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
2 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
3 | acks=all
4 |
5 |
6 | #Properties below this line are specific to code in this application
7 | input.topic.name=input-topic
8 | output.topic.name=output-topic
9 |
--------------------------------------------------------------------------------
/kafka-producer-application-callback/configuration/prod.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 |
3 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
4 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
5 | acks=all
6 |
7 |
8 | #Properties below this line are specific to code in this application
9 | output.topic.name=
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/avro/product_order.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "io.m03315.learning.kafka.avro",
3 | "type": "record",
4 | "name": "ProductOrder",
5 | "fields": [
6 | {"name": "order_id", "type": "string"},
7 | {"name": "product", "type": "string"},
8 | {"name": "user_id", "type": "string"},
9 | {"name": "time", "type": "long"}
10 | ]
11 | }
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/avro/appliance_order.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "io.m03315.learning.kafka.avro",
3 | "type": "record",
4 | "name": "ApplianceOrder",
5 | "fields": [
6 | {"name": "order_id", "type": "string"},
7 | {"name": "appliance_id", "type": "string"},
8 | {"name": "user_id", "type": "string"},
9 | {"name": "time", "type": "long"}
10 | ]
11 | }
--------------------------------------------------------------------------------
/kafka-producer-application/configuration/prod-template.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 |
3 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
4 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
5 | acks=all
6 |
7 |
8 | #Properties below this line are specific to code in this application
9 | input.topic.name=
10 | output.topic.name=
--------------------------------------------------------------------------------
/kafka-c-getting-started/getting_started-template.ini:
--------------------------------------------------------------------------------
1 | [default]
2 | bootstrap.servers=
3 | security.protocol=SASL_SSL
4 | sasl.mechanisms=PLAIN
5 | sasl.username=< CLUSTER API KEY >
6 | sasl.password=< CLUSTER API SECRET >
7 |
8 | [consumer]
9 | group.id=c_example_group_1
10 |
11 | # 'auto.offset.reset=earliest' to start reading from the beginning of
12 | # the topic if no committed offsets exist.
13 | auto.offset.reset=earliest
14 |
--------------------------------------------------------------------------------
/kafka-streams/settings.gradle:
--------------------------------------------------------------------------------
1 | /*
2 | * This file was generated by the Gradle 'init' task.
3 | *
4 | * The settings file is used to specify which projects to include in your build.
5 | *
6 | * Detailed information about configuring a multi-project build in Gradle can be found
7 | * in the user manual at https://docs.gradle.org/7.6/userguide/multi_project_builds.html
8 | */
9 |
10 | rootProject.name = 'kafka-streams'
11 | include('app')
12 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/avro/electronic_order.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "io.m03315.learning.kafka.avro",
3 | "type": "record",
4 | "name": "ElectronicOrder",
5 | "fields": [
6 | {"name": "order_id", "type": "string" },
7 | {"name": "electronic_id", "type": "string" },
8 | {"name": "user_id", "type": "string" },
9 | {"name": "price", "type": "double", "default": 0.0 },
10 | {"name": "time", "type": "long" }
11 | ]
12 | }
--------------------------------------------------------------------------------
/kafka-python-getting-started/getting_started_template.ini:
--------------------------------------------------------------------------------
1 | [default]
2 | bootstrap.servers=< BOOTSTRAP SERVERS>
3 | security.protocol=SASL_SSL
4 | sasl.mechanisms=PLAIN
5 | sasl.username=< CLUSTER API KEY >
6 | sasl.password=< CLUSTER API SECRET >
7 |
8 | [consumer]
9 | group.id=python_example_group_1
10 |
11 | # 'auto.offset.reset=earliest' to start reading from the beginning of
12 | # the topic if no committed offsets exist.
13 | auto.offset.reset=earliest
14 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/avro/combined_order.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "namespace": "io.m03315.learning.kafka.avro",
3 | "type": "record",
4 | "name": "CombinedOrder",
5 | "fields": [
6 | {"name": "electronic_order_id", "type": "string"},
7 | {"name": "appliance_order_id", "type": "string"},
8 | {"name": "appliance_id", "type": "string"},
9 | {"name": "user_name", "type": "string", "default": ""},
10 | {"name": "time", "type": "long"}
11 | ]
12 | }
--------------------------------------------------------------------------------
/kafka-consumer-application/configuration/dev-template.properties:
--------------------------------------------------------------------------------
1 | # Consumer properties
2 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
3 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
4 | max.poll.interval.ms=300000
5 | enable.auto.commit=true
6 | auto.offset.reset=earliest
7 | group.id=consumer-application
8 |
9 | # Application specific properties
10 | file.path=consumer-records.out
11 | input.topic.name=input-topic
12 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/test/java/io/m03315/learning/kafka/AppTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * This Java source file was generated by the Gradle 'init' task.
3 | */
4 | package io.m03315.learning.kafka;
5 |
6 | import org.junit.jupiter.api.Test;
7 | import static org.junit.jupiter.api.Assertions.*;
8 |
9 | class AppTest {
10 | @Test void appHasAGreeting() {
11 | App classUnderTest = new App();
12 | assertNotNull(classUnderTest.getGreeting(), "app should have a greeting");
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/model/DataRecord.java:
--------------------------------------------------------------------------------
1 | package io.confluent.examples.clients.cloud.model;
2 |
3 | public class DataRecord {
4 |
5 | Long count;
6 |
7 | public DataRecord() {
8 | }
9 |
10 | public DataRecord(Long count) {
11 | this.count = count;
12 | }
13 |
14 | public Long getCount() {
15 | return count;
16 | }
17 |
18 | public String toString() {
19 | return new com.google.gson.Gson().toJson(this);
20 | }
21 |
22 | }
23 |
--------------------------------------------------------------------------------
/kafka-dotnet-getting-started/consumer/consumer.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Exe
5 | netcoreapp6.0
6 | Consumer
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/kafka-dotnet-getting-started/producer/producer.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Exe
5 | netcoreapp6.0
6 | Producer
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/checkstyle.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/kafka-consumer-application/configuration/ccloud.properties:
--------------------------------------------------------------------------------
1 | # Required connection configs for Kafka producer, consumer, and admin
2 | bootstrap.servers={{ BOOTSTRAP_SERVERS }}
3 | security.protocol=SASL_SSL
4 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER_API_KEY }}' password='{{ CLUSTER_API_SECRET }}';
5 | sasl.mechanism=PLAIN
6 | # Required for correctness in Apache Kafka clients prior to 2.6
7 | client.dns.lookup=use_all_dns_ips
8 |
9 | # Best practice for Kafka producer to prevent data loss
10 | acks=all
11 |
12 | # Required connection configs for Confluent Cloud Schema Registry
13 | schema.registry.url={{ SR_URL }}
14 | basic.auth.credentials.source=USER_INFO
15 | basic.auth.user.info={{ SR_API_KEY }}:{{ SR_API_SECRET }}
16 |
--------------------------------------------------------------------------------
/kafka-spring-boot-getting-started/build.gradle:
--------------------------------------------------------------------------------
1 | buildscript {
2 | repositories {
3 | jcenter()
4 | }
5 | }
6 |
7 | plugins {
8 | id 'org.springframework.boot' version "2.7.5"
9 | id 'io.spring.dependency-management' version '1.1.0'
10 | id 'java'
11 | }
12 |
13 | repositories {
14 | jcenter()
15 |
16 | maven {
17 | url 'https://packages.confluent.io/maven'
18 | }
19 | }
20 |
21 | apply plugin: 'idea'
22 |
23 | group = 'examples'
24 | version = '0.0.1'
25 | sourceCompatibility = 11
26 |
27 | repositories {
28 | jcenter()
29 | }
30 |
31 | dependencies {
32 | implementation 'org.springframework.boot:spring-boot-starter-web:2.7.5'
33 | implementation 'org.apache.kafka:kafka-clients'
34 | implementation 'org.springframework.kafka:spring-kafka'
35 | }
36 |
--------------------------------------------------------------------------------
/kafka-producer-application-callback/configuration/ccloud.properties:
--------------------------------------------------------------------------------
1 | # Required connection configs for Kafka producer, consumer, and admin
2 | bootstrap.servers={{ BOOTSTRAP_SERVERS }}
3 | security.protocol=SASL_SSL
4 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER_API_KEY }}' password='{{ CLUSTER_API_SECRET }}';
5 | sasl.mechanism=PLAIN
6 | # Required for correctness in Apache Kafka clients prior to 2.6
7 | client.dns.lookup=use_all_dns_ips
8 |
9 | # Best practice for Kafka producer to prevent data loss
10 | acks=all
11 |
12 | # Required connection configs for Confluent Cloud Schema Registry
13 | schema.registry.url={{ SR_URL }}
14 | basic.auth.credentials.source=USER_INFO
15 | basic.auth.user.info={{ SR_API_KEY }}:{{ SR_API_SECRET }}
16 |
--------------------------------------------------------------------------------
/kafka-producer-application/configuration/ccloud-template.properties:
--------------------------------------------------------------------------------
1 | # Required connection configs for Kafka producer, consumer, and admin
2 | bootstrap.servers={{ BOOTSTRAP_SERVERS }}
3 | security.protocol=SASL_SSL
4 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER_API_KEY }}' password='{{ CLUSTER_API_SECRET }}';
5 | sasl.mechanism=PLAIN
6 | # Required for correctness in Apache Kafka clients prior to 2.6
7 | client.dns.lookup=use_all_dns_ips
8 |
9 | # Best practice for Kafka producer to prevent data loss
10 | acks=all
11 |
12 | # Required connection configs for Confluent Cloud Schema Registry
13 | schema.registry.url={{ SR_URL }}
14 | basic.auth.credentials.source=USER_INFO
15 | basic.auth.user.info={{ SR_API_KEY }}:{{ SR_API_SECRET }}
16 |
--------------------------------------------------------------------------------
/kafka-java-springboot/src/main/java/io/confluent/examples/clients/cloud/springboot/kafka/ConsumerExample.java:
--------------------------------------------------------------------------------
1 | package io.confluent.examples.clients.cloud.springboot.kafka;
2 |
3 | import org.apache.kafka.clients.consumer.ConsumerRecord;
4 | import org.springframework.kafka.annotation.KafkaListener;
5 | import org.springframework.stereotype.Component;
6 |
7 | import io.confluent.examples.clients.cloud.DataRecordAvro;
8 | import lombok.extern.log4j.Log4j2;
9 |
10 | @Log4j2
11 | @Component
12 | public class ConsumerExample {
13 |
14 | @KafkaListener(topics = "#{'${io.confluent.developer.config.topic.name}'}")
15 | public void consume(final ConsumerRecord consumerRecord) {
16 | log.info("received {} {}", consumerRecord.key(), consumerRecord.value());
17 |
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/console-consumer-read-specific-offsets-partition/configuration/ccloud-template.properties:
--------------------------------------------------------------------------------
1 | # Required connection configs for Kafka producer, consumer, and admin
2 | bootstrap.servers={{ BOOTSTRAP_SERVERS }}
3 | security.protocol=SASL_SSL
4 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER_API_KEY }}' password='{{ CLUSTER_API_SECRET }}';
5 | sasl.mechanism=PLAIN
6 | # Required for correctness in Apache Kafka clients prior to 2.6
7 | client.dns.lookup=use_all_dns_ips
8 |
9 | # Best practice for Kafka producer to prevent data loss
10 | acks=all
11 |
12 | # Required connection configs for Confluent Cloud Schema Registry
13 | schema.registry.url={{ SR_URL }}
14 | basic.auth.credentials.source=USER_INFO
15 | basic.auth.user.info={{ SR_API_KEY }}:{{ SR_API_SECRET }}
16 |
--------------------------------------------------------------------------------
/kafka-spring-boot-getting-started/src/main/resources/application-tempalte.yaml:
--------------------------------------------------------------------------------
1 | spring:
2 | kafka:
3 | bootstrap-servers:
4 | properties:
5 | security:
6 | protocol: SASL_SSL
7 | sasl:
8 | jaas:
9 | config: org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER API KEY }}' password='{{ CLUSTER API SECRET }}';
10 | mechanism: PLAIN
11 | producer:
12 | key-serializer: org.apache.kafka.common.serialization.StringSerializer
13 | value-serializer: org.apache.kafka.common.serialization.StringSerializer
14 | consumer:
15 | group-id: group_id
16 | auto-offset-reset: earliest
17 | key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
18 | value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
19 |
--------------------------------------------------------------------------------
/kafka-rest-proxy/java.config:
--------------------------------------------------------------------------------
1 | # Required connection configs for Kafka producer, consumer, and admin
2 | bootstrap.servers={{ BROKER_ENDPOINT }}
3 | security.protocol=SASL_SSL
4 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER_API_KEY }}' password='{{ CLUSTER_API_SECRET }}';
5 | sasl.mechanism=PLAIN
6 | # Required for correctness in Apache Kafka clients prior to 2.6
7 | client.dns.lookup=use_all_dns_ips
8 |
9 | # Best practice for higher availability in Apache Kafka clients prior to 3.0
10 | session.timeout.ms=45000
11 |
12 | # Best practice for Kafka producer to prevent data loss
13 | acks=all
14 |
15 | # Required connection configs for Confluent Cloud Schema Registry
16 | schema.registry.url=https://{{ SR_ENDPOINT }}
17 | basic.auth.credentials.source=USER_INFO
18 | basic.auth.user.info={{ SR_API_KEY }}:{{ SR_API_SECRET }}
19 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/java.config:
--------------------------------------------------------------------------------
1 | # Required connection configs for Kafka producer, consumer, and admin
2 | bootstrap.servers={{BROKER_ENDPOINT}}
3 | security.protocol=SASL_SSL
4 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER_API_KEY }}' password='{{ CLUSTER_API_SECRET }}';
5 | sasl.mechanism=PLAIN
6 | # Required for correctness in Apache Kafka clients prior to 2.6
7 | client.dns.lookup=use_all_dns_ips
8 |
9 | # Best practice for higher availability in Apache Kafka clients prior to 3.0
10 | session.timeout.ms=45000
11 |
12 | # Best practice for Kafka producer to prevent data loss
13 | acks=all
14 |
15 | # Required connection configs for Confluent Cloud Schema Registry
16 | schema.registry.url={{BROKER_ENDPOINT}}
17 | basic.auth.credentials.source=USER_INFO
18 | basic.auth.user.info={{ SR_API_KEY }}:{{ SR_API_SECRET }}
19 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/model/PageviewRecord.java:
--------------------------------------------------------------------------------
1 | package io.confluent.examples.clients.cloud.model;
2 |
3 | public class PageviewRecord {
4 |
5 | Long viewtime;
6 | String userid;
7 | String pageid;
8 |
9 | public PageviewRecord() {
10 | }
11 |
12 | public PageviewRecord(Long viewtime, String userid, String pageid) {
13 | this.viewtime = viewtime;
14 | this.userid = userid;
15 | this.pageid = pageid;
16 | }
17 |
18 | public Long getViewtime() {
19 | return viewtime;
20 | }
21 |
22 | public String getUserid() {
23 | return userid;
24 | }
25 |
26 | public String getPageid() {
27 | return pageid;
28 | }
29 |
30 | public String toString() {
31 | return new com.google.gson.Gson().toJson(this);
32 | }
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/Util.java:
--------------------------------------------------------------------------------
1 | package io.confluent.examples.clients.cloud;
2 |
3 | import java.io.FileInputStream;
4 | import java.io.IOException;
5 | import java.io.InputStream;
6 | import java.nio.file.Files;
7 | import java.nio.file.Paths;
8 | import java.util.Properties;
9 |
10 | public class Util {
11 | public static Properties loadConfig(String configFile) throws IOException {
12 | configFile = configFile.replaceFirst("^~", System.getProperty("user.home"));
13 | if (!Files.exists(Paths.get(configFile))) {
14 | throw new IOException(configFile + " not found.");
15 | }
16 | final Properties cfg = new Properties();
17 | try (InputStream inputStream = new FileInputStream(configFile)) {
18 | cfg.load(inputStream);
19 | }
20 | return cfg;
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/kafka-nodejs-getting-started/README.md:
--------------------------------------------------------------------------------
1 | # Node.js
2 |
3 |
4 | ## Install prerequisites
5 |
6 | install [node.js 16.3.1](https://nodejs.org/en/download/)
7 |
8 | install required kafka libraries:
9 |
10 | ```
11 | npm i node-rdkafka
12 | ```
13 |
14 | ## configuration
15 |
16 | rename [getting-started-template.properties](getting-started-template.properties) to getting-started.properties
17 |
18 |
19 | replace following values by yours :
20 | - bootstrap.servers
21 | - sasl.username
22 | - sasl.password
23 |
24 |
25 | ## Build Producer
26 |
27 | - create a utility script file [util.js](util.js)
28 |
29 | - create producer application file [producer.js](producer.js)
30 |
31 |
32 | ## Build Consumer
33 |
34 | - create consumer application file [consumer.js](consumer.js)
35 |
36 | ## Produce Events
37 |
38 | ```
39 | node producer.js getting-started.properties
40 | ```
41 |
42 | ## Consume Events
43 |
44 | ```
45 | node consumer.js getting-started.properties
46 | ```
47 |
--------------------------------------------------------------------------------
/kafka-c-getting-started/common.c:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | static void load_config_group(rd_kafka_conf_t *conf,
4 | GKeyFile *key_file,
5 | const char *group
6 | ) {
7 | char errstr[512];
8 | g_autoptr(GError) error = NULL;
9 |
10 | gchar **ptr = g_key_file_get_keys(key_file, group, NULL, &error);
11 | if (error) {
12 | g_error("%s", error->message);
13 | exit(1);
14 | }
15 |
16 | while (*ptr) {
17 | const char *key = *ptr;
18 | g_autofree gchar *value = g_key_file_get_string(key_file, group, key, &error);
19 |
20 | if (error) {
21 | g_error("Reading key: %s", error->message);
22 | exit(1);
23 | }
24 |
25 | if (rd_kafka_conf_set(conf, key, value, errstr, sizeof(errstr))
26 | != RD_KAFKA_CONF_OK
27 | ) {
28 | g_error("%s", errstr);
29 | exit(1);
30 | }
31 |
32 | ptr++;
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/kafka-spring-boot-getting-started/src/main/java/examples/Consumer.java:
--------------------------------------------------------------------------------
1 | package examples;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 | import org.springframework.kafka.annotation.KafkaListener;
6 | import org.springframework.stereotype.Service;
7 | import org.apache.kafka.clients.consumer.ConsumerRecord;
8 | import org.springframework.kafka.support.KafkaHeaders;
9 | import org.springframework.messaging.handler.annotation.Header;
10 |
11 | import java.io.IOException;
12 |
13 | @Service
14 | public class Consumer {
15 |
16 | private final Logger logger = LoggerFactory.getLogger(Consumer.class);
17 |
18 | @KafkaListener(id = "myConsumer", topics = "purchases", groupId = "spring-boot", autoStartup = "false")
19 | public void listen(String value,
20 | @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
21 | @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String key) {
22 | logger.info(String.format("Consumed event from topic %s: key = %-10s value = %s", topic, key, value));
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/initial/docker-compose.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: '3'
3 | services:
4 | zookeeper:
5 | image: confluentinc/cp-zookeeper:7.3.0
6 | container_name: zookeeper
7 | environment:
8 | ZOOKEEPER_CLIENT_PORT: 2181
9 | ZOOKEEPER_TICK_TIME: 2000
10 | broker:
11 | image: confluentinc/cp-kafka:7.3.0
12 | container_name: broker
13 | ports:
14 | # To learn about configuring Kafka for access across networks see
15 | # https://www.confluent.io/blog/kafka-client-cannot-connect-to-broker-on-aws-on-docker-etc/
16 | - "9092:9092"
17 | depends_on:
18 | - zookeeper
19 | environment:
20 | KAFKA_BROKER_ID: 1
21 | KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
22 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT
23 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092,PLAINTEXT_INTERNAL://broker:29092
24 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
25 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
26 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/resources/streams.properties.orig:
--------------------------------------------------------------------------------
1 | aggregate.input.topic=aggregate-input-topic
2 | aggregate.output.topic=aggregate-output-topic
3 | basic.input.topic=basic-input-streams
4 | basic.output.topic=basic-output-streams
5 | error.input.topic=streams-error-input
6 | error.output.topic=streams-error-output
7 | extractor.input.topic=extractor-input-topic
8 | extractor.output.topic=extractor-output-topic
9 | stream_one.input.topic=streams-left-side-input
10 | stream_two.input.topic=streams-right-side-input
11 | table.input.topic=streams-join-table-input
12 | joins.output.topic=streams-joins-output
13 | ktable.input.topic=ktable-input
14 | ktable.output.topic=ktable-output
15 | processor.input.topic=processor-input-topic
16 | processor.output.topic=processor-output-topic
17 | windowed.input.topic=windowed-input-topic
18 | windowed.output.topic=windowed-output-topic
19 | serdes.input.topic=serdes-input-topic
20 | serdes.output.topic=serdes-output-topic
21 | sr.input.topic=sr-input-topic
22 | sr.output.topic=sr-output-topic
--------------------------------------------------------------------------------
/kafka-java-springboot/startStreams.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | RED='\033[0;31m'
4 | NC='\033[0m' # No Color
5 | GREEN='\033[0;32m'
6 | BLUE='\033[0;34m'
7 |
8 | # including some common utilities (`ccloud::validate_ccloud_config`, `ccloud::validate_schema_registry_up`, etc)
9 | source ./ccloud_library.sh
10 |
11 | echo -e "\n${BLUE}\t☁️ Generating a config from Confluent Cloud properties... ${NC}\n"
12 |
13 | export CONFIG_FILE=~/java.config
14 | ccloud::validate_ccloud_config $CONFIG_FILE || exit
15 |
16 | ./ccloud-generate-cp-configs.sh $CONFIG_FILE
17 |
18 | DELTA_CONFIGS_DIR=delta_configs
19 | source $DELTA_CONFIGS_DIR/env.delta
20 |
21 | ccloud::validate_schema_registry_up $SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO $SCHEMA_REGISTRY_URL || exit 1
22 |
23 |
24 | echo -e "${GREEN}\t🍃 Starting Spring Boot application (Kafka Streams)... ${NC}"
25 |
26 | java -cp build/libs/java-springboot-0.0.1-SNAPSHOT.jar -Dloader.main=io.confluent.examples.clients.cloud.springboot.streams.SpringbootStreamsApplication org.springframework.boot.loader.PropertiesLauncher
27 |
--------------------------------------------------------------------------------
/kafka-go-getting-started/util.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "os"
7 | "strings"
8 |
9 | "github.com/confluentinc/confluent-kafka-go/kafka"
10 | )
11 |
12 | func ReadConfig(configFile string) kafka.ConfigMap {
13 |
14 | m := make(map[string]kafka.ConfigValue)
15 |
16 | file, err := os.Open(configFile)
17 | if err != nil {
18 | fmt.Fprintf(os.Stderr, "Failed to open file: %s", err)
19 | os.Exit(1)
20 | }
21 | defer file.Close()
22 |
23 | scanner := bufio.NewScanner(file)
24 | for scanner.Scan() {
25 | line := strings.TrimSpace(scanner.Text())
26 | if !strings.HasPrefix(line, "#") && len(line) != 0 {
27 | kv := strings.Split(line, "=")
28 | parameter := strings.TrimSpace(kv[0])
29 | value := strings.TrimSpace(kv[1])
30 | m[parameter] = value
31 | }
32 | }
33 |
34 | if err := scanner.Err(); err != nil {
35 | fmt.Printf("Failed to read file: %s", err)
36 | os.Exit(1)
37 | }
38 |
39 | return m
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/kcat/kcat-example.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | source ../../../utils/helper.sh
6 | source ../../../utils/ccloud_library.sh
7 |
8 | CONFIG_FILE=~/.confluent/librdkafka.config
9 | ccloud::validate_ccloud_config $CONFIG_FILE || exit
10 |
11 | # Set topic name
12 | topic_name=test1
13 |
14 | # Create topic in Confluent Cloud
15 | ~/bin/confluent kafka topic create --if-not-exists $topic_name
16 | # Uncomment below if local Kafka cluster
17 | #kafka-topics --bootstrap-server `grep "^\s*bootstrap.server" $CONFIG_FILE | tail -1` --topic $topic_name --create --if-not-exists
18 |
19 | # To specify the configuration file for connecting to the Confluent Cloud cluster
20 | # option 1: use `-F ` argument (shown in the code below)
21 | # option 2: export `KCAT_CONFIG`
22 | #export KCAT_CONFIG=$CONFIG_FILE
23 |
24 | # Produce messages
25 | num_messages=10
26 | (for i in `seq 1 $num_messages`; do echo "alice,{\"count\":${i}}" ; done) | \
27 | kafkacat -F $CONFIG_FILE \
28 | -K , \
29 | -P -t $topic_name
30 |
31 | # Consume messages
32 | kafkacat -F $CONFIG_FILE \
33 | -K , \
34 | -C -t $topic_name -e
35 |
--------------------------------------------------------------------------------
/kafka-consumer-application/src/main/java/io/confluent/developer/FileWritingRecordsHandler.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 | import java.io.IOException;
4 | import java.nio.file.Files;
5 | import java.nio.file.Path;
6 | import java.nio.file.StandardOpenOption;
7 | import java.util.ArrayList;
8 | import java.util.List;
9 | import org.apache.kafka.clients.consumer.ConsumerRecords;
10 |
11 | public class FileWritingRecordsHandler implements ConsumerRecordsHandler {
12 |
13 | private final Path path;
14 |
15 | public FileWritingRecordsHandler(final Path path) {
16 | this.path = path;
17 | }
18 |
19 | @Override
20 | public void process(final ConsumerRecords consumerRecords) {
21 | final List valueList = new ArrayList<>();
22 | consumerRecords.forEach(record -> valueList.add(record.value()));
23 | if (!valueList.isEmpty()) {
24 | try {
25 | Files.write(path, valueList, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.APPEND);
26 | } catch (IOException e) {
27 | throw new RuntimeException(e);
28 | }
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/kcat/README.md:
--------------------------------------------------------------------------------
1 | # kcat
2 |
3 | ## install kcat
4 |
5 | ```
6 | sudo apt-get install kafkacat
7 | ```
8 |
9 |
10 | ## create a local config file
11 |
12 | ```
13 | touch ~/.confluent/librdkafka.config
14 | ```
15 |
16 | ### Template configuration
17 | ```
18 | # Required connection configs for Kafka producer, consumer, and admin
19 | bootstrap.servers={{ BROKER_ENDPOINT }}
20 | security.protocol=SASL_SSL
21 | sasl.mechanisms=PLAIN
22 | sasl.username={{ CLUSTER_API_KEY }}
23 | sasl.password={{ CLUSTER_API_SECRET }}
24 |
25 | # Best practice for higher availability in librdkafka clients prior to 1.7
26 | session.timeout.ms=45000
27 | ```
28 |
29 |
30 | ## get kcat sample
31 |
32 | ```
33 | git clone https://github.com/confluentinc/examples
34 | cd examples
35 | git checkout 7.3.0-post
36 | ```
37 |
38 | ```
39 | cd clients/cloud/kcat/
40 | ```
41 |
42 |
43 | ## Produce & Consume Records
44 |
45 | ```
46 | confluent kafka topic create --if-not-exists test1
47 | ```
48 |
49 |
50 | replace examples/clients/cloud/kcat/kcat-example.sh by [kcat-example.sh](kcat-example.sh)
51 |
52 |
53 | RUN
54 |
55 | ```
56 | ./kcat-example.sh
57 | ```
58 |
59 |
60 |
--------------------------------------------------------------------------------
/kafka-java-springboot/startProducerConsumer.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | RED='\033[0;31m'
4 | NC='\033[0m' # No Color
5 | GREEN='\033[0;32m'
6 | BLUE='\033[0;34m'
7 |
8 | # including some common utilities (`ccloud::validate_ccloud_config`, `ccloud::validate_schema_registry_up`, etc)
9 | source ccloud_library.sh
10 |
11 | echo -e "\n${BLUE}\t☁️ Generating a config from Confluent Cloud properties... ${NC}\n"
12 |
13 | export CONFIG_FILE=~/java.config
14 | ccloud::validate_ccloud_config $CONFIG_FILE || exit
15 |
16 | ./ccloud-generate-cp-configs.sh $CONFIG_FILE
17 |
18 | DELTA_CONFIGS_DIR=delta_configs
19 | source $DELTA_CONFIGS_DIR/env.delta
20 |
21 | ccloud::validate_schema_registry_up $SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO $SCHEMA_REGISTRY_URL || exit 1
22 |
23 |
24 | echo -e "\n${BLUE}\t🍃 Building Spring Boot application... ${NC}"
25 |
26 | ./gradlew build
27 |
28 | echo -e "${GREEN}\t🍃 Starting Spring Boot application (spring-kafka API)... ${NC}"
29 |
30 | java -cp build/libs/java-springboot-0.0.1-SNAPSHOT.jar -Dloader.main=io.confluent.examples.clients.cloud.springboot.kafka.SpringbootKafkaApplication org.springframework.boot.loader.PropertiesLauncher
31 |
--------------------------------------------------------------------------------
/kafka-java-springboot/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | log4j.rootLogger=INFO, stderr
16 |
17 | ## STDERR Appender
18 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender
19 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
20 | log4j.appender.stderr.Target=System.err
21 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
22 |
--------------------------------------------------------------------------------
/kafka-nodejs-getting-started/util.js:
--------------------------------------------------------------------------------
1 | const fs = require('fs');
2 | const readline = require('readline');
3 |
4 | function readAllLines(path) {
5 | return new Promise((resolve, reject) => {
6 | // Test file access directly, so that we can fail fast.
7 | // Otherwise, an ENOENT is thrown in the global scope by the readline internals.
8 | try {
9 | fs.accessSync(path, fs.constants.R_OK);
10 | } catch (err) {
11 | reject(err);
12 | }
13 |
14 | let lines = [];
15 |
16 | const reader = readline.createInterface({
17 | input: fs.createReadStream(path),
18 | crlfDelay: Infinity
19 | });
20 |
21 | reader
22 | .on('line', (line) => lines.push(line))
23 | .on('close', () => resolve(lines));
24 | });
25 | }
26 |
27 | exports.configFromPath = async function configFromPath(path) {
28 | const lines = await readAllLines(path);
29 |
30 | return lines
31 | .filter((line) => !/^\s*?#/.test(line))
32 | .map((line) => line
33 | .split('=')
34 | .map((s) => s.trim()))
35 | .reduce((config, [k, v]) => {
36 | config[k] = v;
37 | return config;
38 | }, {});
39 | };
40 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | # Licensed to the Apache Software Foundation (ASF) under one or more
2 | # contributor license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright ownership.
4 | # The ASF licenses this file to You under the Apache License, Version 2.0
5 | # (the "License"); you may not use this file except in compliance with
6 | # the License. You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | log4j.rootLogger=INFO, stderr
16 |
17 | ## STDERR Appender
18 | log4j.appender.stderr=org.apache.log4j.ConsoleAppender
19 | log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
20 | log4j.appender.stderr.Target=System.err
21 | log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
22 |
--------------------------------------------------------------------------------
/kafka-java-springboot/src/main/java/io/confluent/examples/clients/cloud/springboot/kafka/SpringbootKafkaApplication.java:
--------------------------------------------------------------------------------
1 | package io.confluent.examples.clients.cloud.springboot.kafka;
2 |
3 | import org.apache.kafka.clients.admin.NewTopic;
4 | import org.springframework.beans.factory.annotation.Value;
5 | import org.springframework.boot.SpringApplication;
6 | import org.springframework.boot.autoconfigure.SpringBootApplication;
7 | import org.springframework.context.annotation.Bean;
8 |
9 | @SpringBootApplication
10 | public class SpringbootKafkaApplication {
11 |
12 | // injected from application.properties
13 | @Value("${io.confluent.developer.config.topic.name}")
14 | private String topicName;
15 |
16 | @Value("${io.confluent.developer.config.topic.partitions}")
17 | private int numPartitions;
18 |
19 | @Value("${io.confluent.developer.config.topic.replicas}")
20 | private int replicas;
21 |
22 | @Bean
23 | NewTopic moviesTopic() {
24 | return new NewTopic(topicName, numPartitions, (short) replicas);
25 | }
26 |
27 | public static void main(final String[] args) {
28 | SpringApplication.run(SpringbootKafkaApplication.class, args);
29 | }
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/kafka-dotnet-getting-started/README.md:
--------------------------------------------------------------------------------
1 | # .NET
2 |
3 |
4 | ## Install .NET CORE
5 |
6 | install [.net core >= 6.0](https://dotnet.microsoft.com/download)
7 |
8 | ## configuration
9 |
10 | rename [getting-started-template.properties](getting-started-template.properties) to getting-started.properties
11 |
12 |
13 | replace following values by yours :
14 | - bootstrap.servers
15 | - sasl.username
16 | - sasl.password
17 |
18 |
19 | ## Build Producer
20 |
21 | create a project file [producer.csproj](producer/producer.csproj)
22 |
23 | create producer application file [producer.cs](producer/producer.cs)
24 |
25 | compile application file :
26 |
27 | ```
28 | cd producer
29 | dotnet build producer.csproj
30 | ```
31 |
32 |
33 | ## Build Consumer
34 |
35 | create a project file [consumer.csproj](consumer/consumer.csproj)
36 |
37 | create consumer application file [consumer.cs](consumer/consumer.cs)
38 |
39 | compile application file :
40 |
41 | ```
42 | cd ../consumer
43 | dotnet build consumer.csproj
44 | cd ..
45 | ```
46 |
47 | ## Produce Events
48 |
49 | ```
50 | cd producer
51 | dotnet run $(pwd)/../getting-started.properties
52 | ```
53 |
54 | ## Consume Events
55 |
56 | ```
57 | cd consumer
58 | dotnet run $(pwd)/../getting-started.properties
59 | ```
60 |
--------------------------------------------------------------------------------
/kafka-spring-boot-getting-started/REAMDE.md:
--------------------------------------------------------------------------------
1 | # Apache Kafka and Spring Boot
2 |
3 |
4 | ## Requirements
5 |
6 | - Gradle
7 | - Java 11
8 |
9 |
10 | ## Create a gradle project
11 |
12 | create a new Gradle build file [build.gradle](build.gradle)
13 |
14 |
15 | ## Create configuration
16 |
17 | Rename the file [application-template.yaml](src/main/resources/application-template.yaml) to application.yaml
18 |
19 |
20 | replace below values with yours :
21 |
22 | - bootstrap-servers
23 |
24 | - {{ CLUSTER API KEY }}
25 |
26 | - {{ CLUSTER API SECRET }}
27 |
28 |
29 |
30 | ## create Topic
31 |
32 |
33 | Use existing topic named purchase, if not exist create it
34 |
35 |
36 | ## Write a Springboot producer application
37 |
38 | wreate a new java class file using SpringBootApplication annotation [SpringBootWithKafkaApplication.java](src/main/java/examples/SpringBootWithKafkaApplication.java)
39 |
40 | create a Kafka producer class [Producer.java](src/main/java/examples/Producer.java)
41 |
42 |
43 |
44 | ## write a consumer application
45 |
46 | create a new java class for kafka consumer [Consumer.java](src/main/java/examples/Consumer.java)
47 |
48 |
49 | ## Build & compile project
50 |
51 | ```
52 | gradle build
53 | ```
54 |
55 | ## Produce Events
56 |
57 | ```
58 | gradle bootRun --args='--producer'
59 | ```
60 |
61 | ## Consume Events
62 |
63 | ```
64 | gradle bootRun --args='--consumer'
65 | ```
66 |
67 |
--------------------------------------------------------------------------------
/kafka-consumer-application/build.gradle:
--------------------------------------------------------------------------------
1 | buildscript {
2 | repositories {
3 | mavenCentral()
4 | }
5 | dependencies {
6 | classpath "gradle.plugin.com.github.jengelman.gradle.plugins:shadow:7.0.0"
7 | }
8 | }
9 |
10 | plugins {
11 | id "java"
12 | id "com.google.cloud.tools.jib" version "3.3.1"
13 | id "idea"
14 | id "eclipse"
15 | }
16 |
17 | sourceCompatibility = "1.8"
18 | targetCompatibility = "1.8"
19 | version = "0.0.1"
20 |
21 | repositories {
22 | mavenCentral()
23 |
24 | maven {
25 | url "https://packages.confluent.io/maven"
26 | }
27 | }
28 |
29 | apply plugin: "com.github.johnrengelman.shadow"
30 |
31 | dependencies {
32 | implementation "org.slf4j:slf4j-simple:2.0.5"
33 | implementation "org.apache.kafka:kafka-clients:3.3.1"
34 | testImplementation "junit:junit:4.13.2"
35 | testImplementation 'org.hamcrest:hamcrest:2.2'
36 | }
37 |
38 | test {
39 | testLogging {
40 | outputs.upToDateWhen { false }
41 | showStandardStreams = true
42 | exceptionFormat = "full"
43 | }
44 | }
45 |
46 | jar {
47 | manifest {
48 | attributes(
49 | "Class-Path": configurations.compileClasspath.collect { it.getName() }.join(" "),
50 | "Main-Class": "io.confluent.developer.KafkaConsumerApplication"
51 | )
52 | }
53 | }
54 |
55 | shadowJar {
56 | archiveBaseName = "kafka-consumer-application-standalone"
57 | archiveClassifier = ''
58 | }
59 |
--------------------------------------------------------------------------------
/kafka-spring-boot-getting-started/src/main/java/examples/Producer.java:
--------------------------------------------------------------------------------
1 | package examples;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 | import org.springframework.beans.factory.annotation.Autowired;
6 | import org.springframework.kafka.core.KafkaTemplate;
7 | import org.springframework.stereotype.Service;
8 | import org.springframework.util.concurrent.ListenableFuture;
9 | import org.springframework.util.concurrent.ListenableFutureCallback;
10 | import org.springframework.kafka.support.SendResult;
11 |
12 | @Service
13 | public class Producer {
14 |
15 | private static final Logger logger = LoggerFactory.getLogger(Producer.class);
16 | private static final String TOPIC = "purchases";
17 |
18 | @Autowired
19 | private KafkaTemplate kafkaTemplate;
20 |
21 | public void sendMessage(String key, String value) {
22 | ListenableFuture> future = kafkaTemplate.send(TOPIC, key, value);
23 | future.addCallback(new ListenableFutureCallback>() {
24 | @Override
25 | public void onSuccess(SendResult result) {
26 | logger.info(String.format("Produced event to topic %s: key = %-10s value = %s", TOPIC, key, value));
27 | }
28 | @Override
29 | public void onFailure(Throwable ex) {
30 | ex.printStackTrace();
31 | }
32 | });
33 | }
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/kafka-producer-application/build.gradle:
--------------------------------------------------------------------------------
1 | buildscript {
2 | repositories {
3 | mavenCentral()
4 | }
5 | dependencies {
6 | classpath "gradle.plugin.com.github.jengelman.gradle.plugins:shadow:7.0.0"
7 | }
8 | }
9 |
10 | plugins {
11 | id "java"
12 | id "com.google.cloud.tools.jib" version "3.3.1"
13 | id "idea"
14 | id "eclipse"
15 | }
16 |
17 | sourceCompatibility = "1.8"
18 | targetCompatibility = "1.8"
19 | version = "0.0.1"
20 |
21 | repositories {
22 | mavenCentral()
23 |
24 | maven {
25 | url "https://packages.confluent.io/maven"
26 | }
27 | }
28 |
29 | apply plugin: "com.github.johnrengelman.shadow"
30 |
31 | dependencies {
32 | implementation "org.slf4j:slf4j-simple:2.0.3"
33 | implementation "org.apache.kafka:kafka-clients:3.3.1"
34 | testImplementation "org.apache.kafka:kafka-streams-test-utils:3.3.1"
35 | testImplementation "junit:junit:4.13.2"
36 | testImplementation 'org.hamcrest:hamcrest:2.2'
37 | }
38 |
39 | test {
40 | testLogging {
41 | outputs.upToDateWhen { false }
42 | showStandardStreams = true
43 | exceptionFormat = "full"
44 | }
45 | }
46 |
47 | jar {
48 | manifest {
49 | attributes(
50 | "Class-Path": configurations.compileClasspath.collect { it.getName() }.join(" "),
51 | "Main-Class": "io.confluent.developer.KafkaProducerApplication"
52 | )
53 | }
54 | }
55 |
56 | shadowJar {
57 | archiveBaseName = "kafka-producer-application-standalone"
58 | archiveClassifier = ''
59 | }
60 |
61 |
--------------------------------------------------------------------------------
/kafka-producer-application-callback/build.gradle:
--------------------------------------------------------------------------------
1 | buildscript {
2 | repositories {
3 | mavenCentral()
4 | }
5 | dependencies {
6 | classpath "gradle.plugin.com.github.jengelman.gradle.plugins:shadow:7.0.0"
7 | }
8 | }
9 |
10 | plugins {
11 | id "java"
12 | id "com.google.cloud.tools.jib" version "3.3.1"
13 | id "idea"
14 | id "eclipse"
15 | }
16 |
17 | sourceCompatibility = "1.8"
18 | targetCompatibility = "1.8"
19 | version = "0.0.1"
20 |
21 | repositories {
22 | mavenCentral()
23 |
24 | maven {
25 | url "https://packages.confluent.io/maven"
26 | }
27 | }
28 |
29 | apply plugin: "com.github.johnrengelman.shadow"
30 |
31 | dependencies {
32 | implementation "org.slf4j:slf4j-simple:2.0.3"
33 | implementation "org.apache.kafka:kafka-streams:3.1.0"
34 | testImplementation "org.apache.kafka:kafka-streams-test-utils:3.1.0"
35 | testImplementation "junit:junit:4.13.2"
36 | testImplementation 'org.hamcrest:hamcrest:2.2'
37 | }
38 |
39 | test {
40 | testLogging {
41 | outputs.upToDateWhen { false }
42 | showStandardStreams = true
43 | exceptionFormat = "full"
44 | }
45 | }
46 |
47 | jar {
48 | manifest {
49 | attributes(
50 | "Class-Path": configurations.compileClasspath.collect { it.getName() }.join(" "),
51 | "Main-Class": "io.confluent.developer.KafkaProducerCallbackApplication"
52 | )
53 | }
54 | }
55 |
56 | shadowJar {
57 | archiveBaseName = "kafka-producer-application-callback-standalone"
58 | archiveClassifier = ''
59 | }
60 |
--------------------------------------------------------------------------------
/kafka-c-getting-started/README.md:
--------------------------------------------------------------------------------
1 | # Apache Kafka C/C++
2 |
3 |
4 | ## install perequisites
5 |
6 |
7 | install gcc
8 | ```
9 | sudo apt-get install gcc
10 | ```
11 |
12 | install [librdkafka](https://github.com/edenhill/librdkafka)
13 |
14 | ```
15 | sudo apt-get install librdkafka
16 | ```
17 |
18 | install [pkg-config](https://www.freedesktop.org/wiki/Software/pkg-config/)
19 |
20 | ```
21 | sudo apt-get install pkg-config
22 | ```
23 |
24 | install [glib](https://www.gnu.org/software/libc/)
25 |
26 | ```
27 | sudo apt-get install -y libglib2.0-dev
28 | ```
29 |
30 | ## Create project
31 |
32 | create a new [Makefile](Makefile)
33 |
34 |
35 | create a common c file will be usded by producer and consumer [common.c](common.c)
36 |
37 | ## Configuration
38 |
39 | rename config file [getting_started-template.ini](getting_started-template.ini) to getting_started.ini
40 |
41 | replace below values with yours:
42 |
43 | - bootstrap.servers
44 | - CLUSTER API KEY
45 | - cluster API SECRET
46 |
47 | ## Build Producer
48 |
49 | create a producer file [producer.c](producer.c)
50 |
51 | ## Build Consumer
52 |
53 | create a consumer file [consumer.c](consumer.c)
54 |
55 |
56 | ## Produce Events
57 |
58 | Compile producer file
59 | ```
60 | make producer
61 | ```
62 |
63 | Run producer program with config file
64 |
65 | ```
66 | ./producer getting-started.ini
67 | ```
68 |
69 | ## Consume Events
70 |
71 | Compile consumer file
72 |
73 | ```
74 | make consumer
75 | ```
76 |
77 | Run consumer program with config file
78 |
79 | ```
80 | ./consumer getting-started.ini
81 | ```
82 |
83 |
--------------------------------------------------------------------------------
/kafka-java-springboot/build.gradle:
--------------------------------------------------------------------------------
1 | buildscript {
2 | repositories {
3 | jcenter()
4 | }
5 | dependencies {
6 | classpath 'com.commercehub.gradle.plugin:gradle-avro-plugin:0.15.1'
7 | }
8 | }
9 |
10 | plugins {
11 | id 'org.springframework.boot' version '2.2.4.RELEASE'
12 | id 'io.spring.dependency-management' version '1.0.9.RELEASE'
13 | id 'java'
14 | }
15 |
16 | repositories {
17 | jcenter()
18 |
19 | maven {
20 | url 'https://packages.confluent.io/maven'
21 | }
22 | }
23 |
24 | apply plugin: 'com.commercehub.gradle.plugin.avro'
25 | apply plugin: 'idea'
26 |
27 | group = 'io.confluent.examples.clients.cloud'
28 | version = '0.0.1-SNAPSHOT'
29 | sourceCompatibility = '8'
30 |
31 | bootJar {
32 | mainClassName = "io.confluent.examples.clients.cloud.springboot.kafka.SpringbootKafkaApplication"
33 | }
34 |
35 | repositories {
36 | jcenter()
37 | }
38 |
39 | dependencies {
40 | implementation 'org.springframework.boot:spring-boot-starter'
41 | implementation 'org.apache.kafka:kafka-streams'
42 | implementation 'org.springframework.kafka:spring-kafka'
43 |
44 | compileOnly 'org.projectlombok:lombok'
45 | annotationProcessor 'org.projectlombok:lombok'
46 |
47 | implementation 'org.apache.kafka:kafka-streams'
48 | implementation 'org.apache.avro:avro'
49 | implementation 'io.confluent:kafka-streams-avro-serde:5.4.0'
50 |
51 | testImplementation('org.springframework.boot:spring-boot-starter-test') {
52 | exclude group: 'org.junit.vintage', module: 'junit-vintage-engine'
53 | }
54 | testImplementation 'org.springframework.kafka:spring-kafka-test'
55 | }
56 |
57 | test {
58 | useJUnitPlatform()
59 | }
60 |
--------------------------------------------------------------------------------
/kafka-dotnet-getting-started/consumer/consumer.cs:
--------------------------------------------------------------------------------
1 | using Confluent.Kafka;
2 | using System;
3 | using System.Threading;
4 | using Microsoft.Extensions.Configuration;
5 |
6 | class Consumer {
7 |
8 | static void Main(string[] args)
9 | {
10 | if (args.Length != 1) {
11 | Console.WriteLine("Please provide the configuration file path as a command line argument");
12 | }
13 |
14 | IConfiguration configuration = new ConfigurationBuilder()
15 | .AddIniFile(args[0])
16 | .Build();
17 |
18 | configuration["group.id"] = "kafka-dotnet-getting-started";
19 | configuration["auto.offset.reset"] = "earliest";
20 |
21 | const string topic = "purchases";
22 |
23 | CancellationTokenSource cts = new CancellationTokenSource();
24 | Console.CancelKeyPress += (_, e) => {
25 | e.Cancel = true; // prevent the process from terminating.
26 | cts.Cancel();
27 | };
28 |
29 | using (var consumer = new ConsumerBuilder(
30 | configuration.AsEnumerable()).Build())
31 | {
32 | consumer.Subscribe(topic);
33 | try {
34 | while (true) {
35 | var cr = consumer.Consume(cts.Token);
36 | Console.WriteLine($"Consumed event from topic {topic} with key {cr.Message.Key,-10} and value {cr.Message.Value}");
37 | }
38 | }
39 | catch (OperationCanceledException) {
40 | // Ctrl-C was pressed.
41 | }
42 | finally{
43 | consumer.Close();
44 | }
45 | }
46 | }
47 | }
--------------------------------------------------------------------------------
/kafka-go-getting-started/consumer.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "os/signal"
7 | "syscall"
8 | "time"
9 |
10 | "github.com/confluentinc/confluent-kafka-go/kafka"
11 | )
12 |
13 | func main() {
14 |
15 | if len(os.Args) != 2 {
16 | fmt.Fprintf(os.Stderr, "Usage: %s \n",
17 | os.Args[0])
18 | os.Exit(1)
19 | }
20 |
21 | configFile := os.Args[1]
22 | conf := ReadConfig(configFile)
23 | conf["group.id"] = "kafka-go-getting-started"
24 | conf["auto.offset.reset"] = "earliest"
25 |
26 | c, err := kafka.NewConsumer(&conf)
27 |
28 | if err != nil {
29 | fmt.Printf("Failed to create consumer: %s", err)
30 | os.Exit(1)
31 | }
32 |
33 | topic := "purchases"
34 | err = c.SubscribeTopics([]string{topic}, nil)
35 | // Set up a channel for handling Ctrl-C, etc
36 | sigchan := make(chan os.Signal, 1)
37 | signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
38 |
39 | // Process messages
40 | run := true
41 | for run {
42 | select {
43 | case sig := <-sigchan:
44 | fmt.Printf("Caught signal %v: terminating\n", sig)
45 | run = false
46 | default:
47 | ev, err := c.ReadMessage(100 * time.Millisecond)
48 | if err != nil {
49 | // Errors are informational and automatically handled by the consumer
50 | continue
51 | }
52 | fmt.Printf("Consumed event from topic %s: key = %-10s value = %s\n",
53 | *ev.TopicPartition.Topic, string(ev.Key), string(ev.Value))
54 | }
55 | }
56 |
57 | c.Close()
58 |
59 | }
60 |
--------------------------------------------------------------------------------
/kafka-java-springboot/src/main/resources/application.properties:
--------------------------------------------------------------------------------
1 | # topic config
2 | io.confluent.developer.config.topic.name=test
3 | io.confluent.developer.config.topic.replicas=3
4 | io.confluent.developer.config.topic.partitions=6
5 |
6 | # common configs
7 | spring.kafka.properties.sasl.mechanism=PLAIN
8 | spring.kafka.properties.bootstrap.servers=${BOOTSTRAP_SERVERS}
9 | spring.kafka.properties.sasl.jaas.config=${SASL_JAAS_CONFIG}
10 | spring.kafka.properties.security.protocol=SASL_SSL
11 |
12 | # Confluent Cloud Schema Registry configuration
13 | spring.kafka.properties.basic.auth.credentials.source=USER_INFO
14 | spring.kafka.properties.schema.registry.basic.auth.user.info=${SCHEMA_REGISTRY_BASIC_AUTH_USER_INFO}
15 | spring.kafka.properties.schema.registry.url=${SCHEMA_REGISTRY_URL}
16 |
17 | # Producer configuration
18 | spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
19 | spring.kafka.producer.value-serializer=io.confluent.kafka.serializers.KafkaAvroSerializer
20 |
21 | # Consumer configuration
22 | spring.kafka.consumer.group-id=java-springboot
23 | spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
24 | spring.kafka.consumer.value-deserializer=io.confluent.kafka.serializers.KafkaAvroDeserializer
25 |
26 | # kafka streams properties
27 | spring.kafka.streams.application-id=count-aggregator
28 | spring.kafka.streams.properties.default.key.serde=org.apache.kafka.common.serialization.Serdes$StringSerde
29 | spring.kafka.streams.properties.default.value.serde=io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde
30 | spring.kafka.streams.properties.commit.interval.ms=0
31 | spring.kafka.streams.properties.replication.factor=3
32 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 100DAYSKAFKA
2 | KAFKA 100-Day Challenge
3 |
4 |
5 | ## Initial local environnement
6 |
7 | - [Initial local environnement using Docker](/initial/)
8 |
9 |
10 | ## Examples using different programming languages
11 |
12 | ### C
13 |
14 | - [Producer and Consumer in C](/kafka-c-getting-started/)
15 |
16 | ### GO
17 |
18 | - [Producer and Consumer in GO](/kafka-go-getting-started/)
19 |
20 | ### Java Implementations
21 |
22 | - [Simple Java Consumer Application](/kafka-consumer-application/)
23 | - [Simple Java Producer Application](/kafka-producer-application/)
24 | - [Simple Java Producer with callback Application](/kafka-producer-application-callback/)
25 | - [Java Maven Application](/kafka-java-maven-application/)
26 | - [Springboot Application](/kafka-java-springboot/)
27 | - [Springboot Getting started](/kafka-spring-boot-getting-started/)
28 | - [Kafka Stream Java Gradle Application](/kafka-streams/)
29 |
30 | ### .Net
31 | - [Producer and Consumer using .Net](/kafka-dotnet-getting-started/)
32 |
33 | ### NodeJs
34 | - [Producer and Consumer in NodeJS](/kafka-nodejs-getting-started/)
35 |
36 | ### Python
37 |
38 | - [Producer and Consumer in Python](/kafka-python-application/)
39 | - [Producer and Consumer in Python getting started](/kafka-python-getting-started/)
40 |
41 |
42 | ## kafka
43 | - [kcat](/kcat/)
44 | - [protobuf](/kafka-protobuf/)
45 | - [rest proxy](/kafka-rest-proxy/)
46 | - [console consumer with offset and partition](/console-consumer-read-specific-offsets-partition/)
47 |
48 |
49 |
50 | ## Documentation
51 | - [RestAPI Knowledge Management](RESTAPI-KM.md)
52 | - [General Knwoledge Management](KnowledgeManagement.md)
53 | - [Kafka Stream KM](kafka-stream-KM.md)
54 | - [ConfluentCLI KM](ConfluentCLI-KM.md)
55 |
56 |
--------------------------------------------------------------------------------
/kafka-java-springboot/src/main/java/io/confluent/examples/clients/cloud/springboot/streams/SpringbootStreamsApplication.java:
--------------------------------------------------------------------------------
1 | package io.confluent.examples.clients.cloud.springboot.streams;
2 |
3 | import org.apache.kafka.common.serialization.Serdes;
4 | import org.apache.kafka.streams.KeyValue;
5 | import org.apache.kafka.streams.StreamsBuilder;
6 | import org.apache.kafka.streams.kstream.KStream;
7 | import org.apache.kafka.streams.kstream.Printed;
8 | import org.springframework.beans.factory.annotation.Value;
9 | import org.springframework.boot.SpringApplication;
10 | import org.springframework.boot.autoconfigure.SpringBootApplication;
11 | import org.springframework.context.annotation.Bean;
12 | import org.springframework.kafka.annotation.EnableKafkaStreams;
13 |
14 | import io.confluent.examples.clients.cloud.DataRecordAvro;
15 |
16 | import static org.apache.kafka.streams.kstream.Grouped.with;
17 |
18 | @SpringBootApplication
19 | @EnableKafkaStreams
20 | public class SpringbootStreamsApplication {
21 |
22 | @Value("${io.confluent.developer.config.topic.name}")
23 | private String topicName;
24 |
25 | public static void main(final String[] args) {
26 | SpringApplication.run(SpringbootStreamsApplication.class, args);
27 | }
28 |
29 | @Bean
30 | KStream countAgg(final StreamsBuilder builder) {
31 |
32 | final KStream stream = builder.stream(topicName);
33 | final KStream countAgg = stream
34 | .map((key, value) -> new KeyValue<>(key, value.getCount()))
35 | .groupByKey(with(Serdes.String(), Serdes.Long()))
36 | .reduce(Long::sum).toStream();
37 |
38 | countAgg.print(Printed.toSysOut().withLabel("Running count"));
39 | return countAgg;
40 | }
41 |
42 | }
43 |
--------------------------------------------------------------------------------
/kafka-go-getting-started/README.md:
--------------------------------------------------------------------------------
1 | # GO
2 |
3 | ## Install GO
4 |
5 | ```
6 | wget https://go.dev/dl/go1.19.3.linux-amd64.tar.gz
7 | ```
8 |
9 | ### Remove previous GO version and install a new one
10 |
11 | ```
12 | sudo rm -rf /usr/local/go && tar -xvf go1.19.3.linux-amd64.tar.gz -C /usr/local
13 | ```
14 |
15 | ### Add /usr/local/go/bin to the PATH environment variable
16 | ```
17 | export PATH=$PATH:/usr/local/go/bin
18 | ```
19 |
20 | ### Check if install OK
21 |
22 | ```
23 | go version
24 | ```
25 |
26 | ## Initialize the Go module and download the Confluent Go Kafka dependency
27 |
28 | ```
29 | go mod init kafka-go-getting-started
30 | go get github.com/confluentinc/confluent-kafka-go/kafka
31 | ```
32 |
33 |
34 |
35 | ## Configuration
36 |
37 | rename [getting-started-template.properties](getting-started-template.properties) to getting-started.properties
38 |
39 |
40 | replace below values by yours
41 | - bootstrap.servers
42 | - sasl.username
43 | - sasl.password
44 |
45 | ## Create a topic
46 |
47 | create a topic named purchases
48 |
49 | ## Build a Producer
50 |
51 | a file named [util.go](util.go) will help to load configuration file for the go application
52 |
53 | [producer.go](producer.go) file is the producer application code
54 |
55 |
56 | ### compile the producer
57 |
58 | ```
59 | go build -o out/producer util.go producer.go
60 | ```
61 | ## Build a Consumer
62 |
63 | [consumer.go](consumer.go) file is the consumer application code
64 |
65 | ### compile the consumer
66 |
67 | ```
68 | go build -o out/consumer util.go consumer.go
69 | ```
70 |
71 | ## Produce events
72 |
73 | ```
74 | ./out/producer getting-started.properties
75 | ```
76 |
77 | ## Consumer events
78 |
79 | ```
80 | ./out/consumer getting-started.properties
81 | ```
82 |
83 |
84 |
--------------------------------------------------------------------------------
/kafka-java-springboot/src/main/java/io/confluent/examples/clients/cloud/springboot/kafka/ProducerExample.java:
--------------------------------------------------------------------------------
1 | package io.confluent.examples.clients.cloud.springboot.kafka;
2 |
3 | import org.apache.kafka.clients.admin.NewTopic;
4 | import org.apache.kafka.clients.producer.RecordMetadata;
5 | import org.springframework.boot.context.event.ApplicationStartedEvent;
6 | import org.springframework.context.event.EventListener;
7 | import org.springframework.kafka.core.KafkaTemplate;
8 | import org.springframework.stereotype.Component;
9 |
10 | import io.confluent.examples.clients.cloud.DataRecordAvro;
11 | import lombok.RequiredArgsConstructor;
12 | import lombok.extern.log4j.Log4j2;
13 |
14 | import static java.util.stream.IntStream.range;
15 |
16 | @Log4j2
17 | @Component
18 | @RequiredArgsConstructor
19 | public class ProducerExample {
20 |
21 | private final KafkaTemplate producer;
22 | private final NewTopic topic;
23 |
24 | @EventListener(ApplicationStartedEvent.class)
25 | public void produce() {
26 | // Produce sample data
27 | range(0, 10).forEach(i -> {
28 | final String key = "alice";
29 | final DataRecordAvro record = new DataRecordAvro((long) i);
30 | log.info("Producing record: {}\t{}", key, record);
31 | producer.send(topic.name(), key, record).addCallback(
32 | result -> {
33 | final RecordMetadata m;
34 | if (result != null) {
35 | m = result.getRecordMetadata();
36 | log.info("Produced record to topic {} partition {} @ offset {}",
37 | m.topic(),
38 | m.partition(),
39 | m.offset());
40 | }
41 | },
42 | exception -> log.error("Failed to produce to kafka", exception));
43 | });
44 |
45 | producer.flush();
46 |
47 | log.info("10 messages were produced to topic {}", topic.name());
48 |
49 | }
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/kafka-python-getting-started/producer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys
4 | from random import choice
5 | from argparse import ArgumentParser, FileType
6 | from configparser import ConfigParser
7 | from confluent_kafka import Producer
8 |
9 | if __name__ == '__main__':
10 | # Parse the command line.
11 | parser = ArgumentParser()
12 | parser.add_argument('config_file', type=FileType('r'))
13 | args = parser.parse_args()
14 |
15 | # Parse the configuration.
16 | # See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
17 | config_parser = ConfigParser()
18 | config_parser.read_file(args.config_file)
19 | config = dict(config_parser['default'])
20 |
21 | # Create Producer instance
22 | producer = Producer(config)
23 |
24 | # Optional per-message delivery callback (triggered by poll() or flush())
25 | # when a message has been successfully delivered or permanently
26 | # failed delivery (after retries).
27 | def delivery_callback(err, msg):
28 | if err:
29 | print('ERROR: Message failed delivery: {}'.format(err))
30 | else:
31 | print("Produced event to topic {topic}: key = {key:12} value = {value:12}".format(
32 | topic=msg.topic(), key=msg.key().decode('utf-8'), value=msg.value().decode('utf-8')))
33 |
34 | # Produce data by selecting random values from these lists.
35 | topic = "purchases"
36 | user_ids = ['eabara', 'jsmith', 'sgarcia', 'jbernard', 'htanaka', 'awalther']
37 | products = ['book', 'alarm clock', 't-shirts', 'gift card', 'batteries']
38 |
39 | count = 0
40 | for _ in range(10):
41 |
42 | user_id = choice(user_ids)
43 | product = choice(products)
44 | producer.produce(topic, product, user_id, callback=delivery_callback)
45 | count += 1
46 |
47 | # Block until the messages are sent.
48 | producer.poll(10000)
49 | producer.flush()
50 |
--------------------------------------------------------------------------------
/.github/workflows/gradle.yml:
--------------------------------------------------------------------------------
1 | # This workflow uses actions that are not certified by GitHub.
2 | # They are provided by a third-party and are governed by
3 | # separate terms of service, privacy policy, and support
4 | # documentation.
5 | # This workflow will build a Java project with Gradle and cache/restore any dependencies to improve the workflow execution time
6 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-java-with-gradle
7 |
8 | name: Java CI with Gradle
9 |
10 | on:
11 | pull_request:
12 | branches: [ "main" ]
13 |
14 | permissions:
15 | contents: read
16 |
17 | jobs:
18 | build:
19 |
20 | runs-on: ubuntu-latest
21 |
22 | steps:
23 | - uses: actions/checkout@v3
24 | - name: Set up JDK 11
25 | uses: actions/setup-java@v3
26 | with:
27 | java-version: '11'
28 | distribution: 'adopt'
29 | - name: Build with Gradle (kafka-consumer-application)
30 | uses: gradle/gradle-build-action@v2
31 | with:
32 | gradle-version: 7.6
33 | arguments: build
34 | build-root-directory: ./kafka-consumer-application
35 | - name: Build with Gradle (kafka-producer-application-callback)
36 | uses: gradle/gradle-build-action@v2
37 | with:
38 | gradle-version: 7.6
39 | arguments: build
40 | build-root-directory: ./kafka-producer-application-callback
41 | - name: Build with Gradle (kafka-producer-application)
42 | uses: gradle/gradle-build-action@v2
43 | with:
44 | gradle-version: 7.6
45 | arguments: build
46 | build-root-directory: ./kafka-producer-application
47 | - name: Build with Gradle (kafka-spring-boot-getting-started)
48 | uses: gradle/gradle-build-action@v2
49 | with:
50 | gradle-version: 7.6
51 | arguments: build
52 | build-root-directory: ./kafka-spring-boot-getting-started
53 |
54 |
55 |
--------------------------------------------------------------------------------
/kafka-go-getting-started/producer.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "os"
7 |
8 | "github.com/confluentinc/confluent-kafka-go/kafka"
9 | )
10 |
11 | func main() {
12 |
13 | if len(os.Args) != 2 {
14 | fmt.Fprintf(os.Stderr, "Usage: %s \n",
15 | os.Args[0])
16 | os.Exit(1)
17 | }
18 | configFile := os.Args[1]
19 | conf := ReadConfig(configFile)
20 |
21 | topic := "purchases"
22 | p, err := kafka.NewProducer(&conf)
23 |
24 | if err != nil {
25 | fmt.Printf("Failed to create producer: %s", err)
26 | os.Exit(1)
27 | }
28 |
29 | // Go-routine to handle message delivery reports and
30 | // possibly other event types (errors, stats, etc)
31 | go func() {
32 | for e := range p.Events() {
33 | switch ev := e.(type) {
34 | case *kafka.Message:
35 | if ev.TopicPartition.Error != nil {
36 | fmt.Printf("Failed to deliver message: %v\n", ev.TopicPartition)
37 | } else {
38 | fmt.Printf("Produced event to topic %s: key = %-10s value = %s\n",
39 | *ev.TopicPartition.Topic, string(ev.Key), string(ev.Value))
40 | }
41 | }
42 | }
43 | }()
44 |
45 | users := [...]string{"eabara", "jsmith", "sgarcia", "jbernard", "htanaka", "awalther"}
46 | items := [...]string{"book", "alarm clock", "t-shirts", "gift card", "batteries"}
47 |
48 | for n := 0; n < 10; n++ {
49 | key := users[rand.Intn(len(users))]
50 | data := items[rand.Intn(len(items))]
51 | p.Produce(&kafka.Message{
52 | TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny},
53 | Key: []byte(key),
54 | Value: []byte(data),
55 | }, nil)
56 | }
57 |
58 | // Wait for all messages to be delivered
59 | p.Flush(15 * 1000)
60 | p.Close()
61 | }
62 |
--------------------------------------------------------------------------------
/kafka-dotnet-getting-started/producer/producer.cs:
--------------------------------------------------------------------------------
1 | using Confluent.Kafka;
2 | using System;
3 | using Microsoft.Extensions.Configuration;
4 |
5 | class Producer {
6 | static void Main(string[] args)
7 | {
8 | if (args.Length != 1) {
9 | Console.WriteLine("Please provide the configuration file path as a command line argument");
10 | }
11 |
12 | IConfiguration configuration = new ConfigurationBuilder()
13 | .AddIniFile(args[0])
14 | .Build();
15 |
16 | const string topic = "purchases";
17 |
18 | string[] users = { "eabara", "jsmith", "sgarcia", "jbernard", "htanaka", "awalther" };
19 | string[] items = { "book", "alarm clock", "t-shirts", "gift card", "batteries" };
20 |
21 | using (var producer = new ProducerBuilder(
22 | configuration.AsEnumerable()).Build())
23 | {
24 | var numProduced = 0;
25 | Random rnd = new Random();
26 | const int numMessages = 10;
27 | for (int i = 0; i < numMessages; ++i)
28 | {
29 | var user = users[rnd.Next(users.Length)];
30 | var item = items[rnd.Next(items.Length)];
31 |
32 | producer.Produce(topic, new Message { Key = user, Value = item },
33 | (deliveryReport) =>
34 | {
35 | if (deliveryReport.Error.Code != ErrorCode.NoError) {
36 | Console.WriteLine($"Failed to deliver message: {deliveryReport.Error.Reason}");
37 | }
38 | else {
39 | Console.WriteLine($"Produced event to topic {topic}: key = {user,-10} value = {item}");
40 | numProduced += 1;
41 | }
42 | });
43 | }
44 |
45 | producer.Flush(TimeSpan.FromSeconds(10));
46 | Console.WriteLine($"{numProduced} messages were produced to topic {topic}");
47 | }
48 | }
49 | }
--------------------------------------------------------------------------------
/kafka-nodejs-getting-started/consumer.js:
--------------------------------------------------------------------------------
1 | const Kafka = require('node-rdkafka');
2 | const { configFromPath } = require('./util');
3 |
4 | function createConfigMap(config) {
5 | if (config.hasOwnProperty('security.protocol')) {
6 | return {
7 | 'bootstrap.servers': config['bootstrap.servers'],
8 | 'sasl.username': config['sasl.username'],
9 | 'sasl.password': config['sasl.password'],
10 | 'security.protocol': config['security.protocol'],
11 | 'sasl.mechanisms': config['sasl.mechanisms'],
12 | 'group.id': 'kafka-nodejs-getting-started'
13 | }
14 | } else {
15 | return {
16 | 'bootstrap.servers': config['bootstrap.servers'],
17 | 'group.id': 'kafka-nodejs-getting-started'
18 | }
19 | }
20 | }
21 |
22 | function createConsumer(config, onData) {
23 | const consumer = new Kafka.KafkaConsumer(
24 | createConfigMap(config),
25 | {'auto.offset.reset': 'earliest'});
26 |
27 | return new Promise((resolve, reject) => {
28 | consumer
29 | .on('ready', () => resolve(consumer))
30 | .on('data', onData);
31 |
32 | consumer.connect();
33 | });
34 | };
35 |
36 |
37 | async function consumerExample() {
38 | if (process.argv.length < 3) {
39 | console.log("Please provide the configuration file path as the command line argument");
40 | process.exit(1);
41 | }
42 | let configPath = process.argv.slice(2)[0];
43 | const config = await configFromPath(configPath);
44 |
45 | //let seen = 0;
46 | let topic = "purchases";
47 |
48 | const consumer = await createConsumer(config, ({key, value}) => {
49 | let k = key.toString().padEnd(10, ' ');
50 | console.log(`Consumed event from topic ${topic}: key = ${k} value = ${value}`);
51 | });
52 |
53 | consumer.subscribe([topic]);
54 | consumer.consume();
55 |
56 | process.on('SIGINT', () => {
57 | console.log('\nDisconnecting consumer ...');
58 | consumer.disconnect();
59 | });
60 | }
61 |
62 | consumerExample()
63 | .catch((err) => {
64 | console.error(`Something went wrong:\n${err}`);
65 | process.exit(1);
66 | });
67 |
--------------------------------------------------------------------------------
/kafka-python-getting-started/README.md:
--------------------------------------------------------------------------------
1 | # Python
2 |
3 | ## set up a virtual environment:
4 |
5 | Create and activate a Python virtual environment to give yourself a clean, isolated workspace:
6 |
7 | ```
8 | virtualenv env
9 |
10 | source env/bin/activate
11 | ```
12 |
13 | > Using Python 3.x
14 |
15 | ## install the kafka library:
16 |
17 | ```
18 | pip install confluent-kafka
19 | ```
20 |
21 | ## get running kafka instances informations
22 |
23 | ### LOCAL
24 |
25 | use ../initial/docker-compose.yaml
26 |
27 | ```
28 | docker-compose -d
29 | ```
30 |
31 | ### CLOUD
32 |
33 | Confluent -> Cluster Overview -> Cluster settings -> get Bootstrap server info
34 |
35 |
36 |
37 | ## Configuration
38 |
39 | ### Confluent Cloud
40 | rename getting_started_template.ini file to getting_started.ini
41 |
42 | replace below configs with yours :
43 |
44 | - bootstrap.servers
45 | - sasl.username
46 | - sasl.password
47 |
48 | ### Local
49 |
50 | paste following configs into getting_start.ini file
51 |
52 | ```
53 | [default]
54 | bootstrap.servers=localhost:9092
55 |
56 | [consumer]
57 | group.id=python_example_group_1
58 |
59 | # 'auto.offset.reset=earliest' to start reading from the beginning of
60 | # the topic if no committed offsets exist.
61 | auto.offset.reset=earliest
62 | ```
63 |
64 | ## Create Topic
65 |
66 | Create a new topic "purchases" with 1 partition
67 |
68 | ### Confluent Cloud
69 |
70 | use console add a new topic
71 |
72 | ### Local
73 |
74 | ```
75 | docker compose exec broker \
76 | kafka-topics --create \
77 | --topic purchases \
78 | --bootstrap-server localhost:9092 \
79 | --replication-factor 1 \
80 | --partitions 1
81 | ```
82 |
83 |
84 |
85 |
86 | ## Build Producer
87 |
88 |
89 | producer.py
90 |
91 | ### Produce Event
92 |
93 | ```
94 | chmod u+x producer.py
95 |
96 | ./producer.py getting_started.ini
97 | ```
98 |
99 | ## Build Consumer
100 |
101 | consumer.py
102 |
103 | ### Consume Events
104 |
105 | ```
106 | chmod u+x consumer.py
107 |
108 | ./consumer.py getting_started.ini
109 | ```
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
--------------------------------------------------------------------------------
/kafka-python-getting-started/consumer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys
4 | from argparse import ArgumentParser, FileType
5 | from configparser import ConfigParser
6 | from confluent_kafka import Consumer, OFFSET_BEGINNING
7 |
8 | if __name__ == '__main__':
9 | # Parse the command line.
10 | parser = ArgumentParser()
11 | parser.add_argument('config_file', type=FileType('r'))
12 | parser.add_argument('--reset', action='store_true')
13 | args = parser.parse_args()
14 |
15 | # Parse the configuration.
16 | # See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
17 | config_parser = ConfigParser()
18 | config_parser.read_file(args.config_file)
19 | config = dict(config_parser['default'])
20 | config.update(config_parser['consumer'])
21 |
22 | # Create Consumer instance
23 | consumer = Consumer(config)
24 |
25 | # Set up a callback to handle the '--reset' flag.
26 | def reset_offset(consumer, partitions):
27 | if args.reset:
28 | for p in partitions:
29 | p.offset = OFFSET_BEGINNING
30 | consumer.assign(partitions)
31 |
32 | # Subscribe to topic
33 | topic = "purchases"
34 | consumer.subscribe([topic], on_assign=reset_offset)
35 |
36 | # Poll for new messages from Kafka and print them.
37 | try:
38 | while True:
39 | msg = consumer.poll(1.0)
40 | if msg is None:
41 | # Initial message consumption may take up to
42 | # `session.timeout.ms` for the consumer group to
43 | # rebalance and start consuming
44 | print("Waiting...")
45 | elif msg.error():
46 | print("ERROR: %s".format(msg.error()))
47 | else:
48 | # Extract the (optional) key and value, and print.
49 |
50 | print("Consumed event from topic {topic}: key = {key:12} value = {value:12}".format(
51 | topic=msg.topic(), key=msg.key().decode('utf-8'), value=msg.value().decode('utf-8')))
52 | except KeyboardInterrupt:
53 | pass
54 | finally:
55 | # Leave group and commit final offsets
56 | consumer.close()
57 |
--------------------------------------------------------------------------------
/kafka-consumer-application/src/test/java/io/confluent/developer/FileWritingRecordsHandlerTest.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 | import static org.hamcrest.MatcherAssert.assertThat;
4 | import static org.hamcrest.Matchers.equalTo;
5 |
6 | import java.io.IOException;
7 | import java.nio.file.Files;
8 | import java.nio.file.Path;
9 | import java.util.ArrayList;
10 | import java.util.Arrays;
11 | import java.util.HashMap;
12 | import java.util.List;
13 | import java.util.Map;
14 | import org.apache.kafka.clients.consumer.ConsumerRecord;
15 | import org.apache.kafka.clients.consumer.ConsumerRecords;
16 | import org.apache.kafka.common.TopicPartition;
17 | import org.junit.Test;
18 |
19 | public class FileWritingRecordsHandlerTest {
20 |
21 | @Test
22 | public void testProcess() throws IOException {
23 | final Path tempFilePath = Files.createTempFile("test-handler", ".out");
24 | try {
25 | final ConsumerRecordsHandler recordsHandler = new FileWritingRecordsHandler(tempFilePath);
26 | recordsHandler.process(createConsumerRecords());
27 | final List expectedWords = Arrays.asList("it's but", "a flesh wound", "come back");
28 | List actualRecords = Files.readAllLines(tempFilePath);
29 | assertThat(actualRecords, equalTo(expectedWords));
30 | } finally {
31 | Files.deleteIfExists(tempFilePath);
32 | }
33 | }
34 |
35 |
36 | private ConsumerRecords createConsumerRecords() {
37 | final String topic = "test";
38 | final int partition = 0;
39 | final TopicPartition topicPartition = new TopicPartition(topic, partition);
40 | final List> consumerRecordsList = new ArrayList<>();
41 | consumerRecordsList.add(new ConsumerRecord<>(topic, partition, 0, null, "it's but"));
42 | consumerRecordsList.add(new ConsumerRecord<>(topic, partition, 0, null, "a flesh wound"));
43 | consumerRecordsList.add(new ConsumerRecord<>(topic, partition, 0, null, "come back"));
44 | final Map>> recordsMap = new HashMap<>();
45 | recordsMap.put(topicPartition, consumerRecordsList);
46 |
47 | return new ConsumerRecords<>(recordsMap);
48 | }
49 | }
--------------------------------------------------------------------------------
/kafka-producer-application-callback/README.md:
--------------------------------------------------------------------------------
1 | # JAVA Kafka Producer Application with callbacks
2 |
3 |
4 | ## Configure the project
5 |
6 | configure the project with Gradle build file [build.gradle](build.gradle)
7 |
8 |
9 |
10 | Run following command to obtain Gradle Wrapper:
11 |
12 | ```
13 | gradle wrapper
14 | ```
15 |
16 | ## Add Confluent Cluster configs informations
17 |
18 | add new file [ccloud.properties](configuration/ccloud.properties)
19 |
20 | ## Add application and producer properties
21 |
22 | rename [dev-template.properties](configuration/dev-template.properties) to dev.properties
23 |
24 |
25 | ## Update dev properties file with confluent cloud configs
26 |
27 | ```
28 | cat configuration/ccloud.properties >> configuration/dev.properties
29 |
30 | ```
31 |
32 | ## Create the KafkaProducer application
33 |
34 | create a new java class [KafkaProducerCallbackApplication.java](src/main/java/io/confluent/developer/KafkaProducerCallbackApplication.java)
35 |
36 | ## Create data to produce to Kafka
37 |
38 | create a file [input.txt](input.txt)
39 |
40 | ## Compile and run the KafkaProducer application
41 |
42 | in the terminal, run
43 |
44 | ```
45 | ./gradlew shadowJar
46 | ```
47 |
48 | run application:
49 |
50 | ```
51 | java -jar build/libs/kafka-producer-application-callback-standalone-0.0.1.jar configuration/dev.properties input.txt
52 | ```
53 |
54 | ## Create a test configuration file
55 |
56 | create a test file [test.properties](configuration/test.properties)
57 |
58 | ## Write a unit test
59 |
60 | create new java test class file [KafkaProducerCallbackApplicationTest.java](src/test/java/io/confluent/developer/KafkaProducerCallbackApplicationTest.java)
61 |
62 | ## Invoke the tests
63 |
64 | ```
65 | ./gradlew test
66 | ```
67 |
68 | ## Create a producation configuration file
69 |
70 | create a production config file [prod.properties](configuration/prod.properties)
71 |
72 | ## Build a Docker image
73 |
74 | ```
75 | gradle jibDockerBuild --image=io.confluent.developer/kafka-producer-application-callback-join:0.0.1
76 | ```
77 |
78 | ## Launch a container
79 |
80 | ```
81 | docker run -v $PWD/configuration/prod.properties:/config.properties io.confluent.developer/kafka-producer-application-callback-join:0.0.1 config.properties
82 | ```
83 |
--------------------------------------------------------------------------------
/kafka-producer-application-callback/src/test/java/io/confluent/developer/KafkaProducerCallbackApplicationTest.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 |
4 | import static org.hamcrest.CoreMatchers.equalTo;
5 | import static org.hamcrest.MatcherAssert.assertThat;
6 |
7 | import java.io.IOException;
8 | import java.util.Arrays;
9 | import java.util.List;
10 | import java.util.Properties;
11 | import java.util.stream.Collectors;
12 | import org.apache.kafka.clients.producer.MockProducer;
13 | import org.apache.kafka.common.serialization.StringSerializer;
14 | import org.apache.kafka.clients.producer.ProducerRecord;
15 | import org.apache.kafka.streams.KeyValue;
16 | import org.junit.Test;
17 |
18 |
19 | public class KafkaProducerCallbackApplicationTest {
20 |
21 | private final static String TEST_CONFIG_FILE = "configuration/test.properties";
22 |
23 | @Test
24 | public void testProduce() throws IOException {
25 | final StringSerializer stringSerializer = new StringSerializer();
26 | final MockProducer mockProducer = new MockProducer<>(true, stringSerializer, stringSerializer);
27 | final Properties props = KafkaProducerCallbackApplication.loadProperties(TEST_CONFIG_FILE);
28 | final String topic = props.getProperty("output.topic.name");
29 | final KafkaProducerCallbackApplication producerApp = new KafkaProducerCallbackApplication(mockProducer, topic);
30 | final List records = Arrays.asList("foo-bar", "bar-foo", "baz-bar", "great:weather");
31 |
32 | records.forEach(producerApp::produce);
33 |
34 | final List> expectedList = Arrays.asList(KeyValue.pair("foo", "bar"),
35 | KeyValue.pair("bar", "foo"),
36 | KeyValue.pair("baz", "bar"),
37 | KeyValue.pair(null,"great:weather"));
38 |
39 | final List> actualList = mockProducer.history().stream().map(this::toKeyValue).collect(Collectors.toList());
40 |
41 | assertThat(actualList, equalTo(expectedList));
42 | producerApp.shutdown();
43 | }
44 |
45 |
46 | private KeyValue toKeyValue(final ProducerRecord producerRecord) {
47 | return KeyValue.pair(producerRecord.key(), producerRecord.value());
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/java/io/m03315/learning/kafka/StreamsUtils.java:
--------------------------------------------------------------------------------
1 | package io.m03315.learning.kafka;
2 |
3 | import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde;
4 | import org.apache.avro.specific.SpecificRecord;
5 | import org.apache.kafka.clients.admin.NewTopic;
6 | import org.apache.kafka.clients.producer.Callback;
7 |
8 | import java.io.FileInputStream;
9 | import java.io.IOException;
10 | import java.util.HashMap;
11 | import java.util.Map;
12 | import java.util.Properties;
13 |
14 | public class StreamsUtils {
15 |
16 | public static final String PROPERTIES_FILE_PATH = "src/main/resources/streams.properties";
17 | public static final short REPLICATION_FACTOR = 3;
18 | public static final int PARTITIONS = 6;
19 |
20 | public static Properties loadProperties() throws IOException {
21 | Properties properties = new Properties();
22 | try (FileInputStream fis = new FileInputStream("src/main/resources/streams.properties")) {
23 | properties.load(fis);
24 | return properties;
25 | }
26 | }
27 | public static Map propertiesToMap(final Properties properties) {
28 | final Map configs = new HashMap<>();
29 | properties.forEach((key, value) -> configs.put((String)key, (String)value));
30 | return configs;
31 | }
32 |
33 | public static SpecificAvroSerde getSpecificAvroSerde(final Map serdeConfig) {
34 | final SpecificAvroSerde specificAvroSerde = new SpecificAvroSerde<>();
35 | specificAvroSerde.configure(serdeConfig, false);
36 | return specificAvroSerde;
37 | }
38 |
39 | public static Callback callback() {
40 | return (metadata, exception) -> {
41 | if(exception != null) {
42 | System.out.printf("Producing records encountered error %s %n", exception);
43 | } else {
44 | System.out.printf("Record produced - offset - %d timestamp - %d %n", metadata.offset(), metadata.timestamp());
45 | }
46 |
47 | };
48 | }
49 |
50 | public static NewTopic createTopic(final String topicName){
51 | return new NewTopic(topicName, PARTITIONS, REPLICATION_FACTOR);
52 | }
53 | }
--------------------------------------------------------------------------------
/console-consumer-read-specific-offsets-partition/README.md:
--------------------------------------------------------------------------------
1 | # Read from a specific offset and partition
2 |
3 |
4 | ## Cluster Config
5 |
6 | rename [ccloud-template.properties](configuration/ccloud-template.properties) to ccloud.properties
7 |
8 |
9 | ## create a new topic through Confluent CLI
10 |
11 | ```
12 | confluent kafka topic create example-topic --partitions 2
13 | ```
14 |
15 |
16 | ## Produce records with keys and values
17 |
18 | ```
19 | confluent kafka topic produce example-topic --parse-key --delimiter ":"
20 | ```
21 |
22 | add new records
23 |
24 | ```
25 | key1:the lazy
26 | key2:fox jumped
27 | key3:over the
28 | key4:brown cow
29 | key1:All
30 | key2:streams
31 | key3:lead
32 | key4:to
33 | key1:Kafka
34 | key2:Go to
35 | key3:Kafka
36 | key4:summit
37 | ```
38 |
39 | ## start a new consumer to read from the first partition
40 |
41 | ```
42 | docker run -v $PWD/configuration/ccloud.properties:/tmp/ccloud.properties confluentinc/cp-kafka:7.3.0 \
43 | bash -c 'kafka-console-consumer \
44 | --topic example-topic \
45 | --bootstrap-server `grep "^\s*bootstrap.server" /tmp/ccloud.properties | tail -1` \
46 | --consumer.config /tmp/ccloud.properties \
47 | --from-beginning \
48 | --property print.key=true \
49 | --property key.separator="-" \
50 | --partition 0'
51 | ```
52 |
53 | ## start a new consumer to read from the second partition
54 |
55 | ```
56 | docker run -v $PWD/configuration/ccloud.properties:/tmp/ccloud.properties confluentinc/cp-kafka:7.3.0 \
57 | bash -c 'kafka-console-consumer \
58 | --topic example-topic \
59 | --bootstrap-server `grep "^\s*bootstrap.server" /tmp/ccloud.properties | tail -1` \
60 | --consumer.config /tmp/ccloud.properties \
61 | --from-beginning \
62 | --property print.key=true \
63 | --property key.separator="-" \
64 | --partition 1'
65 | ```
66 |
67 | ## read records starting from a specific offset
68 |
69 | ```
70 | docker run -v $PWD/configuration/ccloud.properties:/tmp/ccloud.properties confluentinc/cp-kafka:7.3.0 \
71 | bash -c 'kafka-console-consumer \
72 | --topic example-topic \
73 | --bootstrap-server `grep "^\s*bootstrap.server" /tmp/ccloud.properties | tail -1` \
74 | --consumer.config /tmp/ccloud.properties \
75 | --property print.key=true \
76 | --property key.separator="-" \
77 | --partition 1 \
78 | --offset 3'
79 | ```
80 |
81 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/java/io/m03315/learning/kafka/KTableExample.java:
--------------------------------------------------------------------------------
1 | package io.m03315.learning.kafka;
2 |
3 | import org.apache.kafka.common.serialization.Serdes;
4 | import org.apache.kafka.common.utils.Bytes;
5 | import org.apache.kafka.streams.KafkaStreams;
6 | import org.apache.kafka.streams.StreamsBuilder;
7 | import org.apache.kafka.streams.StreamsConfig;
8 | import org.apache.kafka.streams.kstream.KTable;
9 | import org.apache.kafka.streams.kstream.Materialized;
10 | import org.apache.kafka.streams.kstream.Produced;
11 | import org.apache.kafka.streams.state.KeyValueStore;
12 |
13 | import java.io.FileInputStream;
14 | import java.io.IOException;
15 | import java.util.Properties;
16 |
17 | public class KTableExample {
18 |
19 | public static void main(String[] args) throws IOException {
20 | final Properties streamsProps = new Properties();
21 | try (FileInputStream fis = new FileInputStream("src/main/resources/streams.properties")) {
22 | streamsProps.load(fis);
23 | }
24 | streamsProps.put(StreamsConfig.APPLICATION_ID_CONFIG, "ktable-application");
25 |
26 | StreamsBuilder builder = new StreamsBuilder();
27 | final String inputTopic = streamsProps.getProperty("ktable.input.topic");
28 | final String outputTopic = streamsProps.getProperty("ktable.output.topic");
29 | final String orderNumberStart = "orderNumber-";
30 |
31 | KTable firstKTable = builder.table(inputTopic,
32 | Materialized.>as("ktable-store")
33 | .withKeySerde(Serdes.String())
34 | .withValueSerde(Serdes.String()));
35 |
36 | firstKTable.filter((key, value) -> value.contains(orderNumberStart))
37 | .mapValues(value -> value.substring(value.indexOf("-") + 1))
38 | .filter((key, value) -> Long.parseLong(value) > 1000)
39 | .toStream()
40 | .peek((key, value) -> System.out.println("Outgoing record - key " + key + " value " + value))
41 | .to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
42 |
43 | KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsProps);
44 | TopicLoader.runProducer();
45 | kafkaStreams.start();
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/kafka-producer-application/src/test/java/io/confluent/developer/KafkaProducerApplicationTest.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 |
4 | import static org.hamcrest.CoreMatchers.equalTo;
5 | import static org.hamcrest.MatcherAssert.assertThat;
6 |
7 | import java.io.IOException;
8 | import java.util.Arrays;
9 | import java.util.List;
10 | import java.util.Properties;
11 | import java.util.stream.Collectors;
12 | import org.apache.kafka.clients.producer.MockProducer;
13 | import org.apache.kafka.common.serialization.StringSerializer;
14 | import org.apache.kafka.clients.producer.ProducerRecord;
15 | import org.apache.kafka.streams.KeyValue;
16 | import org.junit.Test;
17 |
18 |
19 | public class KafkaProducerApplicationTest {
20 |
21 | private final static String TEST_CONFIG_FILE = "configuration/test.properties";
22 |
23 | @Test
24 | public void testProduce() throws IOException {
25 | final StringSerializer stringSerializer = new StringSerializer();
26 | final MockProducer mockProducer = new MockProducer<>(true, stringSerializer, stringSerializer);
27 | final Properties props = KafkaProducerApplication.loadProperties(TEST_CONFIG_FILE);
28 | final String topic = props.getProperty("output.topic.name");
29 | final KafkaProducerApplication producerApp = new KafkaProducerApplication(mockProducer, topic);
30 | final List records = Arrays.asList("foo-bar", "bar-foo", "baz-bar", "great:weather");
31 |
32 | records.forEach(producerApp::produce);
33 |
34 | final List> expectedList = Arrays.asList(KeyValue.pair("foo", "bar"),
35 | KeyValue.pair("bar", "foo"),
36 | KeyValue.pair("baz", "bar"),
37 | KeyValue.pair(null,"great:weather"));
38 | // Use the MockProducer.history() method to get the records sent to the producer so the test can assert the expected records match the actual ones sent
39 | final List> actualList = mockProducer.history().stream().map(this::toKeyValue).collect(Collectors.toList());
40 |
41 | assertThat(actualList, equalTo(expectedList));
42 | producerApp.shutdown();
43 | }
44 |
45 |
46 | private KeyValue toKeyValue(final ProducerRecord producerRecord) {
47 | return KeyValue.pair(producerRecord.key(), producerRecord.value());
48 | }
49 | }
50 |
51 |
--------------------------------------------------------------------------------
/kafka-java-springboot/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | set DIRNAME=%~dp0
12 | if "%DIRNAME%" == "" set DIRNAME=.
13 | set APP_BASE_NAME=%~n0
14 | set APP_HOME=%DIRNAME%
15 |
16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
17 | set DEFAULT_JVM_OPTS=
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windows variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 |
53 | :win9xME_args
54 | @rem Slurp the command line arguments.
55 | set CMD_LINE_ARGS=
56 | set _SKIP=2
57 |
58 | :win9xME_args_slurp
59 | if "x%~1" == "x" goto execute
60 |
61 | set CMD_LINE_ARGS=%*
62 |
63 | :execute
64 | @rem Setup the command line
65 |
66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
67 |
68 | @rem Execute Gradle
69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
70 |
71 | :end
72 | @rem End local scope for the variables with windows NT shell
73 | if "%ERRORLEVEL%"=="0" goto mainEnd
74 |
75 | :fail
76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
77 | rem the _cmd.exe /c_ return code!
78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
79 | exit /b 1
80 |
81 | :mainEnd
82 | if "%OS%"=="Windows_NT" endlocal
83 |
84 | :omega
85 |
--------------------------------------------------------------------------------
/kafka-nodejs-getting-started/producer.js:
--------------------------------------------------------------------------------
1 | const Kafka = require('node-rdkafka');
2 | const { configFromPath } = require('./util');
3 |
4 | function createConfigMap(config) {
5 | if (config.hasOwnProperty('security.protocol')) {
6 | return {
7 | 'bootstrap.servers': config['bootstrap.servers'],
8 | 'sasl.username': config['sasl.username'],
9 | 'sasl.password': config['sasl.password'],
10 | 'security.protocol': config['security.protocol'],
11 | 'sasl.mechanisms': config['sasl.mechanisms'],
12 | 'dr_msg_cb': true }
13 | } else {
14 | return {
15 | 'bootstrap.servers': config['bootstrap.servers'],
16 | 'dr_msg_cb': true
17 | }
18 | }
19 | }
20 |
21 | function createProducer(config, onDeliveryReport) {
22 |
23 | const producer = new Kafka.Producer(createConfigMap(config));
24 |
25 | return new Promise((resolve, reject) => {
26 | producer
27 | .on('ready', () => resolve(producer))
28 | .on('delivery-report', onDeliveryReport)
29 | .on('event.error', (err) => {
30 | console.warn('event.error', err);
31 | reject(err);
32 | });
33 | producer.connect();
34 | });
35 | }
36 |
37 | async function produceExample() {
38 | if (process.argv.legth < 3) {
39 | console.log("Please provide the configuration file path as the command line argument");
40 | process.exit(1);
41 | }
42 | let configPath = process.argv.slice(2)[0];
43 | const config = await configFromPath(configPath);
44 |
45 | let topic = "purchases";
46 |
47 | let users = [ "eabara", "jsmith", "sgarcia", "jbernard", "htanaka", "awalther" ];
48 | let items = [ "book", "alarm clock", "t-shirts", "gift card", "batteries" ];
49 |
50 | const producer = await createProducer(config, (err, report) => {
51 | if (err) {
52 | console.warn('Error producing', err)
53 | } else {
54 | const {topic, key, value} = report;
55 | let k = key.toString().padEnd(10, ' ');
56 | console.log(`Produced event to topic ${topic}: key = ${k} value = ${value}`);
57 | }
58 | });
59 |
60 | let numEvents = 10;
61 | for (let idx = 0; idx < numEvents; ++idx) {
62 |
63 | const key = users[Math.floor(Math.random() * users.length)];
64 | const value = Buffer.from(items[Math.floor(Math.random() * items.length)]);
65 |
66 | producer.produce(topic, -1, value, key);
67 | }
68 |
69 | producer.flush(10000, () => {
70 | producer.disconnect();
71 | });
72 | }
73 |
74 | produceExample()
75 | .catch((err) => {
76 | console.error(`Something went wrong:\n${err}`);
77 | process.exit(1);
78 | });
79 |
--------------------------------------------------------------------------------
/kafka-spring-boot-getting-started/src/main/java/examples/SpringBootWithKafkaApplication.java:
--------------------------------------------------------------------------------
1 | package examples;
2 |
3 | import examples.Producer;
4 | import org.springframework.boot.SpringApplication;
5 | import org.springframework.boot.autoconfigure.SpringBootApplication;
6 | import org.springframework.beans.factory.annotation.Autowired;
7 | import org.springframework.kafka.listener.MessageListenerContainer;
8 | import org.springframework.kafka.config.KafkaListenerEndpointRegistry;
9 | import org.springframework.boot.CommandLineRunner;
10 | import org.springframework.boot.WebApplicationType;
11 | import org.springframework.context.annotation.Bean;
12 |
13 |
14 | @SpringBootApplication
15 | public class SpringBootWithKafkaApplication {
16 |
17 | private final Producer producer;
18 |
19 | public static void main(String[] args) {
20 | SpringApplication application = new SpringApplication(SpringBootWithKafkaApplication.class);
21 | application.setWebApplicationType(WebApplicationType.NONE);
22 | application.run(args);
23 | }
24 |
25 | @Bean
26 | public CommandLineRunner CommandLineRunnerBean() {
27 | return (args) -> {
28 | for (String arg : args) {
29 | switch (arg) {
30 | case "--producer":
31 | this.producer.sendMessage("awalther", "t-shirts");
32 | this.producer.sendMessage("htanaka", "t-shirts");
33 | this.producer.sendMessage("htanaka", "batteries");
34 | this.producer.sendMessage("eabara", "t-shirts");
35 | this.producer.sendMessage("htanaka", "t-shirts");
36 | this.producer.sendMessage("jsmith", "book");
37 | this.producer.sendMessage("awalther", "t-shirts");
38 | this.producer.sendMessage("jsmith", "batteries");
39 | this.producer.sendMessage("jsmith", "gift card");
40 | this.producer.sendMessage("eabara", "t-shirts");
41 | break;
42 | case "--consumer":
43 | MessageListenerContainer listenerContainer = kafkaListenerEndpointRegistry.getListenerContainer("myConsumer");
44 | listenerContainer.start();
45 | break;
46 | default:
47 | break;
48 | }
49 | }
50 | };
51 | }
52 |
53 | @Autowired
54 | SpringBootWithKafkaApplication(Producer producer) {
55 | this.producer = producer;
56 | }
57 |
58 | @Autowired
59 | private KafkaListenerEndpointRegistry kafkaListenerEndpointRegistry;
60 |
61 | }
62 |
--------------------------------------------------------------------------------
/kafka-consumer-application/src/main/java/io/confluent/developer/KafkaConsumerApplication.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 |
4 | import java.io.FileInputStream;
5 | import java.io.IOException;
6 | import java.nio.file.Paths;
7 | import java.time.Duration;
8 | import java.util.Collections;
9 | import java.util.Properties;
10 | import org.apache.kafka.clients.consumer.Consumer;
11 | import org.apache.kafka.clients.consumer.ConsumerRecords;
12 | import org.apache.kafka.clients.consumer.KafkaConsumer;
13 |
14 | public class KafkaConsumerApplication {
15 |
16 | private volatile boolean keepConsuming = true;
17 | private ConsumerRecordsHandler recordsHandler;
18 | private Consumer consumer;
19 |
20 | public KafkaConsumerApplication(final Consumer consumer,
21 | final ConsumerRecordsHandler recordsHandler) {
22 | this.consumer = consumer;
23 | this.recordsHandler = recordsHandler;
24 | }
25 |
26 | public void runConsume(final Properties consumerProps) {
27 | try {
28 | consumer.subscribe(Collections.singletonList(consumerProps.getProperty("input.topic.name")));
29 | while (keepConsuming) {
30 | final ConsumerRecords consumerRecords = consumer.poll(Duration.ofSeconds(1));
31 | recordsHandler.process(consumerRecords);
32 | }
33 | } finally {
34 | consumer.close();
35 | }
36 | }
37 |
38 | public void shutdown() {
39 | keepConsuming = false;
40 | }
41 |
42 | public static Properties loadProperties(String fileName) throws IOException {
43 | final Properties props = new Properties();
44 | final FileInputStream input = new FileInputStream(fileName);
45 | props.load(input);
46 | input.close();
47 | return props;
48 | }
49 |
50 | public static void main(String[] args) throws Exception {
51 |
52 | if (args.length < 1) {
53 | throw new IllegalArgumentException(
54 | "This program takes one argument: the path to an environment configuration file.");
55 | }
56 |
57 | final Properties consumerAppProps = KafkaConsumerApplication.loadProperties(args[0]);
58 | final String filePath = consumerAppProps.getProperty("file.path");
59 | final Consumer consumer = new KafkaConsumer<>(consumerAppProps);
60 | final ConsumerRecordsHandler recordsHandler = new FileWritingRecordsHandler(Paths.get(filePath));
61 | final KafkaConsumerApplication consumerApplication = new KafkaConsumerApplication(consumer, recordsHandler);
62 |
63 | Runtime.getRuntime().addShutdownHook(new Thread(consumerApplication::shutdown));
64 |
65 | consumerApplication.runConsume(consumerAppProps);
66 | }
67 |
68 | }
69 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/java/io/m03315/learning/kafka/TopicLoader.java:
--------------------------------------------------------------------------------
1 |
2 | package io.m03315.learning.kafka;
3 |
4 | import org.apache.kafka.clients.admin.Admin;
5 | import org.apache.kafka.clients.producer.Callback;
6 | import org.apache.kafka.clients.producer.KafkaProducer;
7 | import org.apache.kafka.clients.producer.Producer;
8 | import org.apache.kafka.clients.producer.ProducerConfig;
9 | import org.apache.kafka.clients.producer.ProducerRecord;
10 | import org.apache.kafka.common.serialization.StringSerializer;
11 |
12 | import java.io.IOException;
13 | import java.util.List;
14 | import java.util.Properties;
15 | import java.util.stream.Collectors;
16 |
17 | public class TopicLoader {
18 |
19 | public static void main(String[] args) throws IOException {
20 | runProducer();
21 | }
22 |
23 | public static void runProducer() throws IOException {
24 | Properties properties = StreamsUtils.loadProperties();
25 | properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
26 | properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
27 |
28 | try(Admin adminClient = Admin.create(properties);
29 | Producer producer = new KafkaProducer<>(properties)) {
30 | final String inputTopic = properties.getProperty("basic.input.topic");
31 | final String outputTopic = properties.getProperty("basic.output.topic");
32 | var topics = List.of(StreamsUtils.createTopic(inputTopic), StreamsUtils.createTopic(outputTopic));
33 | adminClient.createTopics(topics);
34 |
35 | Callback callback = (metadata, exception) -> {
36 | if(exception != null) {
37 | System.out.printf("Producing records encountered error %s %n", exception);
38 | } else {
39 | System.out.printf("Record produced - offset - %d timestamp - %d %n", metadata.offset(), metadata.timestamp());
40 | }
41 |
42 | };
43 |
44 | var rawRecords = List.of("orderNumber-1001",
45 | "orderNumber-5000",
46 | "orderNumber-999",
47 | "orderNumber-3330",
48 | "bogus-1",
49 | "bogus-2",
50 | "orderNumber-8400");
51 | var producerRecords = rawRecords.stream().map(r -> new ProducerRecord<>(inputTopic,"order-key", r)).collect(Collectors.toList());
52 | producerRecords.forEach((pr -> producer.send(pr, callback)));
53 |
54 |
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/kafka-python-application/README.md:
--------------------------------------------------------------------------------
1 | # Python: Code Example for Apache Kafka
2 |
3 |
4 | ## Install requirements
5 |
6 | ```
7 | virtualenv ccloud-venv
8 | source ./ccloud-venv/bin/activate
9 | pip install -r requirements.txt
10 | ```
11 |
12 |
13 |
14 | ## Configuration
15 |
16 | replace configs values in this [librdkafka.config](librdkafka.config) by yours
17 |
18 | ## Basic Producer and Consumer
19 |
20 | ### Produce Records
21 |
22 | ```
23 | ./producer.py -f librdkafka.config -t test1
24 | ```
25 |
26 | [producer code](producer.py)
27 |
28 |
29 | ### Consume Records
30 |
31 | ```
32 | ./consumer.py -f librdkafka.config -t test1
33 | ```
34 |
35 | [consumer code](consumer.py)
36 |
37 |
38 | ## Avro And Confluent Cloud Schema Registry
39 |
40 | ```
41 | curl -u {{ SR_API_KEY }}:{{ SR_API_SECRET }} https://{{ SR_ENDPOINT }}/subjects
42 | ```
43 |
44 | ### Produce Avro Records
45 |
46 | ```
47 | ./producer_ccsr.py -f librdkafka.config -t test2
48 | ```
49 |
50 | [producer Avro code](producer_ccsr.py)
51 |
52 |
53 | ### Consume Avro Records
54 |
55 | ```
56 | ./consumer_ccsr.py -f librdkafka.config -t test2
57 | ```
58 |
59 | [consumer Avro code](consumer_ccsr.py)
60 |
61 |
62 | ## Confluent Cloud Schema Registry
63 |
64 | ```
65 | curl -u : https:///subjects
66 | ```
67 |
68 | Verify that the subject test2-value exists
69 | ```
70 | ["test2-value"]
71 | ```
72 |
73 | View the schema information for subject test2-value. In the following output, substitute values for , , and .
74 |
75 | ```
76 | curl -u : https:///subjects/test2-value/versions/1
77 | ```
78 |
79 | Verify the schema information for subject test2-value.
80 |
81 | ```
82 | {"subject":"test2-value","version":1,"id":100001,"schema":"{\"name\":\"io.confluent.examples.clients.cloud.DataRecordAvro\",\"type\":\"record\",\"fields\":[{\"name\":\"count\",\"type\":\"long\"}]}"}
83 | ```
84 |
85 | ## Run all the code in Docker
86 |
87 | Use this [Dockerfile](Dockerfile) file
88 |
89 | ```
90 | docker build -t cloud-demo-python .
91 | ```
92 |
93 | Run the Docker image using the following command:
94 |
95 | ```
96 | docker run -v librdkafka.config:/root/.confluent/librdkafka.config -it --rm cloud-demo-python bash
97 | ```
98 |
99 | Run the Python applications from within the container shell
100 |
101 | ```
102 | ./producer.py -f $HOME/.confluent/librdkafka.config -t test1
103 | ./consumer.py -f $HOME/.confluent/librdkafka.config -t test1
104 | ./producer_ccsr.py -f $HOME/.confluent/librdkafka.config -t test2
105 | ./consumer_ccsr.py -f $HOME/.confluent/librdkafka.config -t test2
106 | ```
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
--------------------------------------------------------------------------------
/kafka-python-application/producer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright 2020 Confluent Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | # =============================================================================
19 | #
20 | # Produce messages to Confluent Cloud
21 | # Using Confluent Python Client for Apache Kafka
22 | #
23 | # =============================================================================
24 |
25 | from confluent_kafka import Producer, KafkaError
26 | import json
27 | import ccloud_lib
28 |
29 |
30 | if __name__ == '__main__':
31 |
32 | # Read arguments and configurations and initialize
33 | args = ccloud_lib.parse_args()
34 | config_file = args.config_file
35 | topic = args.topic
36 | conf = ccloud_lib.read_ccloud_config(config_file)
37 |
38 | # Create Producer instance
39 | producer_conf = ccloud_lib.pop_schema_registry_params_from_config(conf)
40 | producer = Producer(producer_conf)
41 |
42 | # Create topic if needed
43 | ccloud_lib.create_topic(conf, topic)
44 |
45 | delivered_records = 0
46 |
47 | # Optional per-message on_delivery handler (triggered by poll() or flush())
48 | # when a message has been successfully delivered or
49 | # permanently failed delivery (after retries).
50 | def acked(err, msg):
51 | global delivered_records
52 | """Delivery report handler called on
53 | successful or failed delivery of message
54 | """
55 | if err is not None:
56 | print("Failed to deliver message: {}".format(err))
57 | else:
58 | delivered_records += 1
59 | print("Produced record to topic {} partition [{}] @ offset {}"
60 | .format(msg.topic(), msg.partition(), msg.offset()))
61 |
62 | for n in range(10):
63 | record_key = "alice"
64 | record_value = json.dumps({'count': n})
65 | print("Producing record: {}\t{}".format(record_key, record_value))
66 | producer.produce(topic, key=record_key, value=record_value, on_delivery=acked)
67 | # p.poll() serves delivery reports (on_delivery)
68 | # from previous produce() calls.
69 | producer.poll(0)
70 |
71 | producer.flush()
72 |
73 | print("{} messages were produced to topic {}!".format(delivered_records, topic))
74 |
--------------------------------------------------------------------------------
/kafka-java-springboot/README.md:
--------------------------------------------------------------------------------
1 | # Java Spring Boot: Code Example for Apache Kafka
2 |
3 | - Java 1.8 or higher to run the demo application.
4 |
5 |
6 | ## setup config file
7 |
8 | create $HOME/java.config
9 |
10 | ```
11 | # Required connection configs for Kafka producer, consumer, and admin
12 | bootstrap.servers={{ BROKER_ENDPOINT }}
13 | security.protocol=SASL_SSL
14 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='{{ CLUSTER_API_KEY }}' password='{{ CLUSTER_API_SECRET }}';
15 | sasl.mechanism=PLAIN
16 | # Required for correctness in Apache Kafka clients prior to 2.6
17 | client.dns.lookup=use_all_dns_ips
18 |
19 | # Best practice for higher availability in Apache Kafka clients prior to 3.0
20 | session.timeout.ms=45000
21 |
22 | # Best practice for Kafka producer to prevent data loss
23 | acks=all
24 |
25 | # Required connection configs for Confluent Cloud Schema Registry
26 | schema.registry.url=https://{{ SR_ENDPOINT }}
27 | basic.auth.credentials.source=USER_INFO
28 | basic.auth.user.info={{ SR_API_KEY }}:{{ SR_API_SECRET }}
29 | ```
30 |
31 | ## Avro and Confluent Cloud Schema Registry
32 |
33 | If the topic does not already exist in your Kafka cluster, the producer application will use the Kafka Admin Client API to create the topic. Each record written to Kafka has a key representing a username and a value of a count, formatted as json (for example, {"count": 0}). The consumer application reads the same Kafka topic and keeps a rolling sum of the count as it processes each record.
34 |
35 | ### Check your config
36 |
37 | ```
38 | curl -u {{ SR_API_KEY }}:{{ SR_API_SECRET }} https://{{ SR_ENDPOINT }}/subjects
39 | ```
40 |
41 | ## Produce and Consume Records
42 |
43 | This Spring Boot application has the following two components: [Producer](src/main/java/io/confluent/examples/clients/cloud/springboot/kafka/ProducerExample.java) and [Consumer](src/main/java/io/confluent/examples/clients/cloud/springboot/kafka/ConsumerExample.java) that are initialized during the Spring Boot application startup. The producer writes Kafka data to a topic in your Kafka cluster. Each record has a String key representing a username (for example, alice) and a value of a count, formatted with the Avro schema [DataRecordAvro.avsc](src/main/avro/DataRecordAvro.avsc)
44 |
45 | ### Run the producer and consumer with the following command
46 | ```
47 | ./startProducerConsumer.sh
48 | ```
49 |
50 | ### Stop
51 |
52 | press CTRL-C
53 |
54 | ## Kafka Streams
55 | The Kafka Streams API reads from the same topic and does a rolling count and stateful sum aggregation as it processes each record.
56 |
57 | ### Run the Kafka Streams application
58 |
59 | ```
60 | ./startStreams.sh
61 | ```
62 |
63 | ### Stop
64 |
65 | press CTRL-C
66 |
67 | ### [Kafka Streams Code](src/main/java/io/confluent/examples/clients/cloud/springboot/streams/SpringbootStreamsApplication.java)
68 |
69 |
70 |
--------------------------------------------------------------------------------
/kafka-python-application/consumer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright 2020 Confluent Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | # =============================================================================
19 | #
20 | # Consume messages from Confluent Cloud
21 | # Using Confluent Python Client for Apache Kafka
22 | #
23 | # =============================================================================
24 |
25 | from confluent_kafka import Consumer
26 | import json
27 | import ccloud_lib
28 |
29 |
30 | if __name__ == '__main__':
31 |
32 | # Read arguments and configurations and initialize
33 | args = ccloud_lib.parse_args()
34 | config_file = args.config_file
35 | topic = args.topic
36 | conf = ccloud_lib.read_ccloud_config(config_file)
37 |
38 | # Create Consumer instance
39 | # 'auto.offset.reset=earliest' to start reading from the beginning of the
40 | # topic if no committed offsets exist
41 | consumer_conf = ccloud_lib.pop_schema_registry_params_from_config(conf)
42 | consumer_conf['group.id'] = 'python_example_group_1'
43 | consumer_conf['auto.offset.reset'] = 'earliest'
44 | consumer = Consumer(consumer_conf)
45 |
46 | # Subscribe to topic
47 | consumer.subscribe([topic])
48 |
49 | # Process messages
50 | total_count = 0
51 | try:
52 | while True:
53 | msg = consumer.poll(1.0)
54 | if msg is None:
55 | # No message available within timeout.
56 | # Initial message consumption may take up to
57 | # `session.timeout.ms` for the consumer group to
58 | # rebalance and start consuming
59 | print("Waiting for message or event/error in poll()")
60 | continue
61 | elif msg.error():
62 | print('error: {}'.format(msg.error()))
63 | else:
64 | # Check for Kafka message
65 | record_key = msg.key()
66 | record_value = msg.value()
67 | data = json.loads(record_value)
68 | count = data['count']
69 | total_count += count
70 | print("Consumed record with key {} and value {}, \
71 | and updated total count to {}"
72 | .format(record_key, record_value, total_count))
73 | except KeyboardInterrupt:
74 | pass
75 | finally:
76 | # Leave group and commit final offsets
77 | consumer.close()
78 |
--------------------------------------------------------------------------------
/kafka-consumer-application/src/test/java/io/confluent/developer/KafkaConsumerApplicationTest.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 | import static org.hamcrest.MatcherAssert.assertThat;
4 | import static org.hamcrest.Matchers.equalTo;
5 |
6 | import java.nio.file.Files;
7 | import java.nio.file.Path;
8 | import java.util.Arrays;
9 | import java.util.Collections;
10 | import java.util.HashMap;
11 | import java.util.List;
12 | import java.util.Map;
13 | import java.util.Properties;
14 | import org.apache.kafka.clients.consumer.ConsumerRecord;
15 | import org.apache.kafka.clients.consumer.MockConsumer;
16 | import org.apache.kafka.clients.consumer.OffsetResetStrategy;
17 | import org.apache.kafka.common.TopicPartition;
18 | import org.junit.Test;
19 |
20 |
21 | public class KafkaConsumerApplicationTest {
22 |
23 | private final static String TEST_CONFIG_FILE = "configuration/test.properties";
24 |
25 | @Test
26 | public void consumerTest() throws Exception {
27 |
28 | final Path tempFilePath = Files.createTempFile("test-consumer-output", ".out");
29 | final ConsumerRecordsHandler recordsHandler = new FileWritingRecordsHandler(tempFilePath);
30 | final Properties testConsumerProps = KafkaConsumerApplication.loadProperties(TEST_CONFIG_FILE);
31 | final String topic = testConsumerProps.getProperty("input.topic.name");
32 | final TopicPartition topicPartition = new TopicPartition(topic, 0);
33 | final MockConsumer mockConsumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
34 |
35 | final KafkaConsumerApplication consumerApplication = new KafkaConsumerApplication(mockConsumer, recordsHandler);
36 |
37 | mockConsumer.schedulePollTask(() -> addTopicPartitionsAssignmentAndAddConsumerRecords(topic, mockConsumer, topicPartition));
38 | mockConsumer.schedulePollTask(consumerApplication::shutdown);
39 | consumerApplication.runConsume(testConsumerProps);
40 |
41 | final List expectedWords = Arrays.asList("foo", "bar", "baz");
42 | List actualRecords = Files.readAllLines(tempFilePath);
43 | assertThat(actualRecords, equalTo(expectedWords));
44 | }
45 |
46 | private void addTopicPartitionsAssignmentAndAddConsumerRecords(final String topic,
47 | final MockConsumer mockConsumer,
48 | final TopicPartition topicPartition) {
49 |
50 | final Map beginningOffsets = new HashMap<>();
51 | beginningOffsets.put(topicPartition, 0L);
52 | mockConsumer.rebalance(Collections.singletonList(topicPartition));
53 | mockConsumer.updateBeginningOffsets(beginningOffsets);
54 | addConsumerRecords(mockConsumer,topic);
55 | }
56 |
57 | private void addConsumerRecords(final MockConsumer mockConsumer, final String topic) {
58 | mockConsumer.addRecord(new ConsumerRecord<>(topic, 0, 0, null, "foo"));
59 | mockConsumer.addRecord(new ConsumerRecord<>(topic, 0, 1, null, "bar"));
60 | mockConsumer.addRecord(new ConsumerRecord<>(topic, 0, 2, null, "baz"));
61 | }
62 |
63 |
64 | }
65 |
--------------------------------------------------------------------------------
/kafka-streams/gradlew.bat:
--------------------------------------------------------------------------------
1 | @rem
2 | @rem Copyright 2015 the original author or authors.
3 | @rem
4 | @rem Licensed under the Apache License, Version 2.0 (the "License");
5 | @rem you may not use this file except in compliance with the License.
6 | @rem You may obtain a copy of the License at
7 | @rem
8 | @rem https://www.apache.org/licenses/LICENSE-2.0
9 | @rem
10 | @rem Unless required by applicable law or agreed to in writing, software
11 | @rem distributed under the License is distributed on an "AS IS" BASIS,
12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | @rem See the License for the specific language governing permissions and
14 | @rem limitations under the License.
15 | @rem
16 |
17 | @if "%DEBUG%"=="" @echo off
18 | @rem ##########################################################################
19 | @rem
20 | @rem Gradle startup script for Windows
21 | @rem
22 | @rem ##########################################################################
23 |
24 | @rem Set local scope for the variables with windows NT shell
25 | if "%OS%"=="Windows_NT" setlocal
26 |
27 | set DIRNAME=%~dp0
28 | if "%DIRNAME%"=="" set DIRNAME=.
29 | @rem This is normally unused
30 | set APP_BASE_NAME=%~n0
31 | set APP_HOME=%DIRNAME%
32 |
33 | @rem Resolve any "." and ".." in APP_HOME to make it shorter.
34 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
35 |
36 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
37 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
38 |
39 | @rem Find java.exe
40 | if defined JAVA_HOME goto findJavaFromJavaHome
41 |
42 | set JAVA_EXE=java.exe
43 | %JAVA_EXE% -version >NUL 2>&1
44 | if %ERRORLEVEL% equ 0 goto execute
45 |
46 | echo.
47 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
48 | echo.
49 | echo Please set the JAVA_HOME variable in your environment to match the
50 | echo location of your Java installation.
51 |
52 | goto fail
53 |
54 | :findJavaFromJavaHome
55 | set JAVA_HOME=%JAVA_HOME:"=%
56 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
57 |
58 | if exist "%JAVA_EXE%" goto execute
59 |
60 | echo.
61 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
62 | echo.
63 | echo Please set the JAVA_HOME variable in your environment to match the
64 | echo location of your Java installation.
65 |
66 | goto fail
67 |
68 | :execute
69 | @rem Setup the command line
70 |
71 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
72 |
73 |
74 | @rem Execute Gradle
75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
76 |
77 | :end
78 | @rem End local scope for the variables with windows NT shell
79 | if %ERRORLEVEL% equ 0 goto mainEnd
80 |
81 | :fail
82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
83 | rem the _cmd.exe /c_ return code!
84 | set EXIT_CODE=%ERRORLEVEL%
85 | if %EXIT_CODE% equ 0 set EXIT_CODE=1
86 | if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
87 | exit /b %EXIT_CODE%
88 |
89 | :mainEnd
90 | if "%OS%"=="Windows_NT" endlocal
91 |
92 | :omega
93 |
--------------------------------------------------------------------------------
/.github/workflows/codeql.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ "main" ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ "main" ]
20 | schedule:
21 | - cron: '40 8 * * 3'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 | permissions:
28 | actions: read
29 | contents: read
30 | security-events: write
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | language: [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
37 | # Use only 'java' to analyze code written in Java, Kotlin or both
38 | # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
39 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
40 |
41 | steps:
42 | - name: Checkout repository
43 | uses: actions/checkout@v3
44 |
45 | # Initializes the CodeQL tools for scanning.
46 | - name: Initialize CodeQL
47 | uses: github/codeql-action/init@v2
48 | with:
49 | languages: ${{ matrix.language }}
50 | # If you wish to specify custom queries, you can do so here or in a config file.
51 | # By default, queries listed here will override any specified in a config file.
52 | # Prefix the list here with "+" to use these queries and those in the config file.
53 |
54 | # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
55 | # queries: security-extended,security-and-quality
56 |
57 |
58 | # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
59 | # If this step fails, then you should remove it and run the build manually (see below)
60 | - name: Autobuild
61 | uses: github/codeql-action/autobuild@v2
62 |
63 | # ℹ️ Command-line programs to run using the OS shell.
64 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
65 |
66 | # If the Autobuild fails above, remove it and uncomment the following three lines.
67 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
68 |
69 | # - run: |
70 | # echo "Run, Build Application using script"
71 | # ./location_of_script_within_repo/buildscript.sh
72 |
73 | - name: Perform CodeQL Analysis
74 | uses: github/codeql-action/analyze@v2
75 | with:
76 | category: "/language:${{matrix.language}}"
77 |
--------------------------------------------------------------------------------
/kafka-streams/app/build.gradle:
--------------------------------------------------------------------------------
1 | /*
2 | * This file was generated by the Gradle 'init' task.
3 | *
4 | * This generated file contains a sample Java application project to get you started.
5 | * For more details take a look at the 'Building Java & JVM projects' chapter in the Gradle
6 | * User Manual available at https://docs.gradle.org/7.6/userguide/building_java_projects.html
7 | */
8 |
9 | plugins {
10 | // Apply the application plugin to add support for building a CLI application in Java.
11 | id 'application'
12 | id "com.github.davidmc24.gradle.plugin.avro" version "1.0.0"
13 | }
14 |
15 | repositories {
16 | // Use Maven Central for resolving dependencies.
17 | mavenCentral()
18 | maven {
19 | url "https://packages.confluent.io/maven"
20 | }
21 | }
22 |
23 | dependencies {
24 | // Use JUnit Jupiter for testing.
25 | testImplementation 'org.junit.jupiter:junit-jupiter:5.9.1'
26 |
27 | // This dependency is used by the application.
28 | implementation 'com.google.guava:guava:31.1-jre'
29 |
30 | implementation "org.apache.avro:avro:1.10.2"
31 | implementation 'org.apache.logging.log4j:log4j-api:2.17.2'
32 | implementation 'org.apache.logging.log4j:log4j-core:2.17.2'
33 | implementation 'org.apache.logging.log4j:log4j-slf4j-impl:2.17.2'
34 |
35 | implementation('org.apache.kafka:kafka-streams:2.8.0') {
36 | exclude group: 'org.apache.kafka', module: 'kafka-clients'
37 | }
38 | implementation('org.apache.kafka:kafka-clients:2.8.0!!')
39 | implementation('io.confluent:kafka-streams-avro-serde:6.1.1') {
40 | exclude group: 'org.apache.kafka', module: 'kafka-clients'
41 | exclude group: 'org.apache.kafka', module: 'kafka-streams'
42 | }
43 | }
44 |
45 | application {
46 | // Define the main class for the application.
47 | mainClass = 'io.m03315.learning.kafka.BasicStreams'
48 | }
49 |
50 | /*
51 |
52 | def generateAvro = tasks.register("generateAvro", com.github.davidmc24.gradle.plugin.avro.GenerateAvroJavaTask) {
53 | source("src/main/avro")
54 | outputDir = file("src/main/java")
55 | fieldVisibility = "PUBLIC"
56 | enableDecimalLogicalType = true
57 | }
58 |
59 | tasks.named("compileJava").configure {
60 | source(generateAvro)
61 | } */
62 |
63 | tasks.named('test') {
64 | // Use JUnit Platform for unit tests.
65 | useJUnitPlatform()
66 | }
67 |
68 |
69 | var basePackage = 'io.m03315.learning.kafka.'
70 | def exerciseMap = [basic : basePackage + 'BasicStreams',
71 | ktable : basePackage + 'KTableExample',
72 | joins: basePackage + 'StreamsJoin']
73 |
74 | task runStreams(type: Exec) {
75 |
76 | var streamsFullPath = ''
77 |
78 | if (project.hasProperty("args")) {
79 | var exercise = project.getProperty("args")
80 | streamsFullPath = exerciseMap[exercise]
81 |
82 | if (streamsFullPath == null) {
83 | throw new StopActionException("!!!!!! You entered '${exercise}' for an exercise to run, but that's not a valid entry. The valid options are ${exerciseMap.keySet()}" )
84 | }
85 | dependsOn assemble
86 | group = "Execution"
87 | println "Using example ${streamsFullPath}"
88 | description = "Run a Kafka Streams exercise"
89 | commandLine "java", "-classpath", sourceSets.main.runtimeClasspath.getAsPath(), streamsFullPath
90 | }
91 | }
92 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/ConsumerExamplePageviews.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2020 Confluent Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.confluent.examples.clients.cloud;
17 |
18 | import io.confluent.examples.clients.cloud.model.PageviewRecord;
19 | import io.confluent.kafka.serializers.KafkaJsonDeserializerConfig;
20 | import org.apache.kafka.clients.consumer.Consumer;
21 | import org.apache.kafka.clients.consumer.ConsumerConfig;
22 | import org.apache.kafka.clients.consumer.ConsumerRecord;
23 | import org.apache.kafka.clients.consumer.ConsumerRecords;
24 | import org.apache.kafka.clients.consumer.KafkaConsumer;
25 |
26 | import java.time.Duration;
27 | import java.util.Arrays;
28 | import java.util.Properties;
29 |
30 | import static io.confluent.examples.clients.cloud.Util.loadConfig;
31 |
32 | public class ConsumerExamplePageviews {
33 |
34 | public static void main(final String[] args) throws Exception {
35 | if (args.length != 2) {
36 | System.out.println("Please provide command line arguments: configPath topic");
37 | System.exit(1);
38 | }
39 |
40 | final String topic = args[1];
41 |
42 | // Load properties from a local configuration file
43 | // Create the configuration file (e.g. at '$HOME/.confluent/java.config') with configuration parameters
44 | // to connect to your Kafka cluster, which can be on your local host, Confluent Cloud, or any other cluster.
45 | // Follow these instructions to create this file: https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/java.html
46 |
47 | final Properties props = loadConfig(args[0]);
48 |
49 | // Add additional properties.
50 | props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
51 | props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonDeserializer");
52 | props.put(KafkaJsonDeserializerConfig.JSON_VALUE_TYPE, PageviewRecord.class);
53 | props.put(ConsumerConfig.GROUP_ID_CONFIG, "demo-beginner-cloud-1");
54 | props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
55 |
56 | final Consumer consumer = new KafkaConsumer(props);
57 | consumer.subscribe(Arrays.asList(topic));
58 |
59 | try {
60 | while (true) {
61 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
62 | for (ConsumerRecord record : records) {
63 | String key = record.key();
64 | PageviewRecord value = record.value();
65 | System.out.printf("Consumed record with key %s and value %s%n", key, value);
66 | }
67 | }
68 | } finally {
69 | consumer.close();
70 | }
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/ConsumerAvroExample.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2020 Confluent Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.confluent.examples.clients.cloud;
17 |
18 | import io.confluent.kafka.serializers.KafkaAvroDeserializer;
19 | import io.confluent.kafka.serializers.KafkaAvroDeserializerConfig;
20 | import org.apache.kafka.clients.consumer.Consumer;
21 | import org.apache.kafka.clients.consumer.ConsumerConfig;
22 | import org.apache.kafka.clients.consumer.ConsumerRecord;
23 | import org.apache.kafka.clients.consumer.ConsumerRecords;
24 | import org.apache.kafka.clients.consumer.KafkaConsumer;
25 |
26 | import java.time.Duration;
27 | import java.util.Arrays;
28 | import java.util.Properties;
29 |
30 | import static io.confluent.examples.clients.cloud.Util.loadConfig;
31 |
32 | public class ConsumerAvroExample {
33 |
34 | public static void main(final String[] args) throws Exception {
35 | if (args.length != 2) {
36 | System.out.println("Please provide command line arguments: configPath topic");
37 | System.exit(1);
38 | }
39 |
40 | final String topic = args[1];
41 |
42 | // Load properties from a local configuration file
43 | // Create the configuration file (e.g. at '$HOME/.confluent/java.config') with configuration parameters
44 | // to connect to your Kafka cluster, which can be on your local host, Confluent Cloud, or any other cluster.
45 | // Follow these instructions to create this file: https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/java.html
46 | final Properties props = loadConfig(args[0]);
47 |
48 | // Add additional properties.
49 | props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
50 | props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class);
51 | props.put(KafkaAvroDeserializerConfig.SPECIFIC_AVRO_READER_CONFIG, true);
52 | props.put(ConsumerConfig.GROUP_ID_CONFIG, "demo-consumer-avro-1");
53 | props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
54 |
55 | final Consumer consumer = new KafkaConsumer(props);
56 | consumer.subscribe(Arrays.asList(topic));
57 |
58 | Long total_count = 0L;
59 |
60 | try {
61 | while (true) {
62 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
63 | for (ConsumerRecord record : records) {
64 | String key = record.key();
65 | DataRecordAvro value = record.value();
66 | total_count += value.getCount();
67 | System.out.printf("Consumed record with key %s and value %s, and updated total count to %d%n", key, value, total_count);
68 | }
69 | }
70 | } finally {
71 | consumer.close();
72 | }
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/ConsumerExample.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2020 Confluent Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.confluent.examples.clients.cloud;
17 |
18 | import io.confluent.examples.clients.cloud.model.DataRecord;
19 | import io.confluent.kafka.serializers.KafkaJsonDeserializerConfig;
20 | import org.apache.kafka.clients.consumer.Consumer;
21 | import org.apache.kafka.clients.consumer.ConsumerConfig;
22 | import org.apache.kafka.clients.consumer.ConsumerRecord;
23 | import org.apache.kafka.clients.consumer.ConsumerRecords;
24 | import org.apache.kafka.clients.consumer.KafkaConsumer;
25 |
26 | import java.time.Duration;
27 | import java.util.Arrays;
28 | import java.util.Properties;
29 |
30 | import static io.confluent.examples.clients.cloud.Util.loadConfig;
31 |
32 | public class ConsumerExample {
33 |
34 | public static void main(final String[] args) throws Exception {
35 | if (args.length != 2) {
36 | System.out.println("Please provide command line arguments: configPath topic");
37 | System.exit(1);
38 | }
39 |
40 | final String topic = args[1];
41 |
42 | // Load properties from a local configuration file
43 | // Create the configuration file (e.g. at '$HOME/.confluent/java.config') with configuration parameters
44 | // to connect to your Kafka cluster, which can be on your local host, Confluent Cloud, or any other cluster.
45 | // Follow these instructions to create this file: https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/java.html
46 |
47 | final Properties props = loadConfig(args[0]);
48 |
49 | // Add additional properties.
50 | props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
51 | props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonDeserializer");
52 | props.put(KafkaJsonDeserializerConfig.JSON_VALUE_TYPE, DataRecord.class);
53 | props.put(ConsumerConfig.GROUP_ID_CONFIG, "demo-consumer-1");
54 | props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
55 |
56 | final Consumer consumer = new KafkaConsumer(props);
57 | consumer.subscribe(Arrays.asList(topic));
58 |
59 | Long total_count = 0L;
60 |
61 | try {
62 | while (true) {
63 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100));
64 | for (ConsumerRecord record : records) {
65 | String key = record.key();
66 | DataRecord value = record.value();
67 | total_count += value.getCount();
68 | System.out.printf("Consumed record with key %s and value %s, and updated total count to %d%n", key, value, total_count);
69 | }
70 | }
71 | } finally {
72 | consumer.close();
73 | }
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/kafka-producer-application-callback/src/main/java/io/confluent/developer/KafkaProducerCallbackApplication.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 |
4 | import org.apache.kafka.clients.producer.KafkaProducer;
5 | import org.apache.kafka.clients.producer.Producer;
6 | import org.apache.kafka.clients.producer.ProducerRecord;
7 |
8 | import java.io.FileInputStream;
9 | import java.io.IOException;
10 | import java.nio.file.Files;
11 | import java.nio.file.Paths;
12 | import java.util.List;
13 | import java.util.Properties;
14 |
15 | public class KafkaProducerCallbackApplication {
16 |
17 | private final Producer producer;
18 | final String outTopic;
19 |
20 | public KafkaProducerCallbackApplication(final Producer producer,
21 | final String topic) {
22 | this.producer = producer;
23 | outTopic = topic;
24 | }
25 |
26 | public void produce(final String message) {
27 | final String[] parts = message.split("-");
28 | final String key, value;
29 | if (parts.length > 1) {
30 | key = parts[0];
31 | value = parts[1];
32 | } else {
33 | key = null;
34 | value = parts[0];
35 | }
36 |
37 | final ProducerRecord producerRecord = new ProducerRecord<>(outTopic, key, value);
38 | producer.send(producerRecord, (recordMetadata, exception) -> {
39 | if (exception == null) {
40 | System.out.println("Record written to offset " +
41 | recordMetadata.offset() + " timestamp " +
42 | recordMetadata.timestamp());
43 | } else {
44 | System.err.println("An error occurred");
45 | exception.printStackTrace(System.err);
46 | }
47 | });
48 | }
49 |
50 | public void shutdown() {
51 | producer.close();
52 | }
53 |
54 | public static Properties loadProperties(String fileName) throws IOException {
55 | final Properties envProps = new Properties();
56 | final FileInputStream input = new FileInputStream(fileName);
57 | envProps.load(input);
58 | input.close();
59 |
60 | return envProps;
61 | }
62 |
63 | public static void main(String[] args) throws Exception {
64 | if (args.length < 2) {
65 | throw new IllegalArgumentException(
66 | "This program takes two arguments: the path to an environment configuration file and" +
67 | "the path to the file with records to send");
68 | }
69 |
70 | final Properties props = KafkaProducerCallbackApplication.loadProperties(args[0]);
71 | final String topic = props.getProperty("output.topic.name");
72 | final Producer producer = new KafkaProducer<>(props);
73 | final KafkaProducerCallbackApplication producerApp = new KafkaProducerCallbackApplication(producer, topic);
74 |
75 | String filePath = args[1];
76 | try {
77 | List linesToProduce = Files.readAllLines(Paths.get(filePath));
78 | linesToProduce.stream()
79 | .filter(l -> !l.trim().isEmpty())
80 | .forEach(producerApp::produce);
81 | } catch (IOException e) {
82 | System.err.printf("Error reading file %s due to %s %n", filePath, e);
83 | } finally {
84 | producerApp.shutdown();
85 | }
86 | }
87 | }
88 |
89 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/java/io/m03315/learning/kafka/BasicStreams.java:
--------------------------------------------------------------------------------
1 | /*
2 | * This Java source file was generated by the Gradle 'init' task.
3 | */
4 | package io.m03315.learning.kafka;
5 |
6 | import org.apache.kafka.common.serialization.Serdes;
7 | import org.apache.kafka.streams.KafkaStreams;
8 | import org.apache.kafka.streams.StreamsBuilder;
9 | import org.apache.kafka.streams.StreamsConfig;
10 | import org.apache.kafka.streams.kstream.Consumed;
11 | import org.apache.kafka.streams.kstream.KStream;
12 | import org.apache.kafka.streams.kstream.Produced;
13 |
14 | import java.io.FileInputStream;
15 | import java.io.IOException;
16 | import java.time.Duration;
17 | import java.util.Properties;
18 | import java.util.concurrent.CountDownLatch;
19 |
20 | public class BasicStreams {
21 |
22 | public static void main(String[] args) throws IOException {
23 | Properties streamsProps = new Properties();
24 | try (FileInputStream fis = new FileInputStream("src/main/resources/streams.properties")) {
25 | streamsProps.load(fis);
26 | }
27 | streamsProps.put(StreamsConfig.APPLICATION_ID_CONFIG, "basic-streams");
28 |
29 | StreamsBuilder builder = new StreamsBuilder();
30 | final String inputTopic = streamsProps.getProperty("basic.input.topic");
31 | final String outputTopic = streamsProps.getProperty("basic.output.topic");
32 |
33 | final String orderNumberStart = "orderNumber-";
34 | // Using the StreamsBuilder from above, create a KStream with an input-topic
35 | // and a Consumed instance with the correct
36 | // Serdes for the key and value HINT: builder.stream and Serdes.String()
37 | //KStream firstStream = null;
38 | KStream firstStream = builder.stream(inputTopic, Consumed.with(Serdes.String(), Serdes.String()));
39 |
40 | firstStream.peek((key, value) -> System.out.println("Incoming record - key " + key + " value " + value))
41 | // filter records by making sure they contain the orderNumberStart variable from above HINT: use filter
42 | .filter((key, value) -> value.contains(orderNumberStart))
43 | // map the value to a new string by removing the orderNumberStart portion HINT: use mapValues
44 | .mapValues(value -> value.substring(value.indexOf("-") + 1))
45 | .filter((key, value) -> Long.parseLong(value) > 1000)
46 | // only forward records where the value is 1000 or greater HINT: use filter and Long.parseLong
47 | .peek((key, value) -> System.out.println("Outgoing record - key " + key + " value " + value))
48 | // Write the results to an output topic defined above as outputTopic HINT: use "to" and Produced and Serdes.String()
49 | .to(outputTopic, Produced.with(Serdes.String(), Serdes.String()));
50 |
51 | try (KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsProps)) {
52 | final CountDownLatch shutdownLatch = new CountDownLatch(1);
53 |
54 | Runtime.getRuntime().addShutdownHook(new Thread(() -> {
55 | kafkaStreams.close(Duration.ofSeconds(2));
56 | shutdownLatch.countDown();
57 | }));
58 | TopicLoader.runProducer();
59 | try {
60 | kafkaStreams.start();
61 | shutdownLatch.await();
62 | } catch (Throwable e) {
63 | System.exit(1);
64 | }
65 | }
66 | System.exit(0);
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/README.md:
--------------------------------------------------------------------------------
1 | # Java: Code Example for Apache Kafka
2 |
3 | - java 1.8+
4 | - maven
5 |
6 |
7 | ## Configuration
8 |
9 |
10 | fetch your kafka cluster config and update [java.config](java.config) file
11 |
12 |
13 | ## Compile project
14 |
15 | ```
16 | mvn clean package
17 | ```
18 |
19 |
20 | ## Produce Records
21 |
22 | ```
23 | mvn exec:java -Dexec.mainClass="io.confluent.examples.clients.cloud.ProducerExample" \
24 | -Dexec.args="java.config test1"
25 | ```
26 |
27 | [producer code](src/main/java/io/confluent/examples/clients/cloud/ProducerExample.java)
28 |
29 |
30 | ## Consume Records
31 |
32 | ```
33 | mvn exec:java -Dexec.mainClass="io.confluent.examples.clients.cloud.ConsumerExample" \
34 | -Dexec.args="java.config test1"
35 | ```
36 |
37 | [consumer code](src/main/java/io/confluent/examples/clients/cloud/ConsumerExample.java)
38 |
39 |
40 | ## Kafka Streams
41 |
42 | ```
43 | mvn exec:java -Dexec.mainClass="io.confluent.examples.clients.cloud.StreamsExample" \
44 | -Dexec.args="java.config test1"
45 | ```
46 |
47 | [Kafka Streams code](src/main/java/io/confluent/examples/clients/cloud/StreamsExample.java)
48 |
49 |
50 | ## Avro and Confluent Cloud Schema Registry
51 |
52 | check your configuration :
53 |
54 | ```
55 | curl -u {{ SR_API_KEY }}:{{ SR_API_SECRET }} https://{{ SR_ENDPOINT }}/subjects
56 | ```
57 |
58 | ### Produce Avro Records
59 |
60 | ```
61 | mvn exec:java -Dexec.mainClass="io.confluent.examples.clients.cloud.ProducerAvroExample" \
62 | -Dexec.args="java.config test2"
63 | ```
64 |
65 | [producer Avro code](src/main/java/io/confluent/examples/clients/cloud/ProducerAvroExample.java)
66 |
67 | ### Consume Avro Records
68 |
69 | ```
70 | mvn exec:java -Dexec.mainClass="io.confluent.examples.clients.cloud.ConsumerAvroExample" \
71 | -Dexec.args="java.config test2"
72 | ```
73 |
74 | [consumer Avro code](src/main/java/io/confluent/examples/clients/cloud/ConsumerAvroExample.java)
75 |
76 | ### Avro Kafka Streams
77 |
78 | ```
79 | mvn exec:java -Dexec.mainClass="io.confluent.examples.clients.cloud.StreamsAvroExample" \
80 | -Dexec.args="java.config test2"
81 | ```
82 |
83 | [Kafka Streams Avro code](src/main/java/io/confluent/examples/clients/cloud/StreamsAvroExample.java)
84 |
85 | ## Schema Evolution with Confluent Cloud Schema Registry
86 |
87 | ```
88 | curl -u : https:///subjects
89 | ```
90 |
91 | View the schema information for subject test2-value.
92 |
93 | ```
94 | curl -u : https:///subjects/test2-value/versions/1
95 | ```
96 |
97 | Verify the schema information for subject test2-value
98 |
99 | ```
100 | {"subject":"test2-value","version":1,"id":100001,"schema":"{\"name\":\"io.confluent.examples.clients.cloud.DataRecordAvro\",\"type\":\"record\",\"fields\":[{\"name\":\"count\",\"type\":\"long\"}]}"}
101 | ```
102 |
103 | For schema evolution, you can test schema compatibility between newer schema versions and older schema versions in Confluent Cloud Schema Registry. The pom.xml hardcodes the Schema Registry subject name to test2-value—change this if you didn’t use topic name test2. Then test local schema compatibility for DataRecordAvro2a.avsc, which should fail, and DataRecordAvro2b.avsc, which should pass.
104 | ```
105 | # DataRecordAvro2a.avsc compatibility test: FAIL
106 | mvn schema-registry:test-compatibility "-DschemaRegistryUrl={{ SCHEMA_REGISTRY_URL }}" "-DschemaRegistryBasicAuthUserInfo={{ SR_API_KEY }}:{{ SR_API_SECRET }}" "-DschemaLocal=src/main/resources/avro/io/confluent/examples/clients/cloud/DataRecordAvro2a.avsc"
107 |
108 | # DataRecordAvro2b.avsc compatibility test: PASS
109 | mvn schema-registry:test-compatibility "-DschemaRegistryUrl={{ SCHEMA_REGISTRY_URL }}" "-DschemaRegistryBasicAuthUserInfo={{ SR_API_KEY }}:{{ SR_API_SECRET }}" "-DschemaLocal=src/main/resources/avro/io/confluent/examples/clients/cloud/DataRecordAvro2b.avsc"
110 | ```
111 |
112 |
113 |
114 |
--------------------------------------------------------------------------------
/kafka-c-getting-started/producer.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "common.c"
5 |
6 | #define ARR_SIZE(arr) ( sizeof((arr)) / sizeof((arr[0])) )
7 |
8 | /* Optional per-message delivery callback (triggered by poll() or flush())
9 | * when a message has been successfully delivered or permanently
10 | * failed delivery (after retries).
11 | */
12 | static void dr_msg_cb (rd_kafka_t *kafka_handle,
13 | const rd_kafka_message_t *rkmessage,
14 | void *opaque) {
15 | if (rkmessage->err) {
16 | g_error("Message delivery failed: %s", rd_kafka_err2str(rkmessage->err));
17 | }
18 | }
19 |
20 | int main (int argc, char **argv) {
21 | rd_kafka_t *producer;
22 | rd_kafka_conf_t *conf;
23 | char errstr[512];
24 |
25 | // Parse the command line.
26 | if (argc != 2) {
27 | g_error("Usage: %s ", argv[0]);
28 | return 1;
29 | }
30 |
31 | // Parse the configuration.
32 | // See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
33 | const char *config_file = argv[1];
34 |
35 | g_autoptr(GError) error = NULL;
36 | g_autoptr(GKeyFile) key_file = g_key_file_new();
37 | if (!g_key_file_load_from_file (key_file, config_file, G_KEY_FILE_NONE, &error)) {
38 | g_error ("Error loading config file: %s", error->message);
39 | return 1;
40 | }
41 |
42 | // Load the relevant configuration sections.
43 | conf = rd_kafka_conf_new();
44 | load_config_group(conf, key_file, "default");
45 |
46 | // Install a delivery-error callback.
47 | rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
48 |
49 | // Create the Producer instance.
50 | producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
51 | if (!producer) {
52 | g_error("Failed to create new producer: %s", errstr);
53 | return 1;
54 | }
55 |
56 | // Configuration object is now owned, and freed, by the rd_kafka_t instance.
57 | conf = NULL;
58 |
59 | // Produce data by selecting random values from these lists.
60 | int message_count = 10;
61 | const char *topic = "purchases";
62 | const char *user_ids[6] = {"eabara", "jsmith", "sgarcia", "jbernard", "htanaka", "awalther"};
63 | const char *products[5] = {"book", "alarm clock", "t-shirts", "gift card", "batteries"};
64 |
65 | for (int i = 0; i < message_count; i++) {
66 | const char *key = user_ids[random() % ARR_SIZE(user_ids)];
67 | const char *value = products[random() % ARR_SIZE(products)];
68 | size_t key_len = strlen(key);
69 | size_t value_len = strlen(value);
70 |
71 | rd_kafka_resp_err_t err;
72 |
73 | err = rd_kafka_producev(producer,
74 | RD_KAFKA_V_TOPIC(topic),
75 | RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
76 | RD_KAFKA_V_KEY((void*)key, key_len),
77 | RD_KAFKA_V_VALUE((void*)value, value_len),
78 | RD_KAFKA_V_OPAQUE(NULL),
79 | RD_KAFKA_V_END);
80 |
81 | if (err) {
82 | g_error("Failed to produce to topic %s: %s", topic, rd_kafka_err2str(err));
83 | return 1;
84 | } else {
85 | g_message("Produced event to topic %s: key = %12s value = %12s", topic, key, value);
86 | }
87 |
88 | rd_kafka_poll(producer, 0);
89 | }
90 |
91 | // Block until the messages are all sent.
92 | g_message("Flushing final messages..");
93 | rd_kafka_flush(producer, 10 * 1000);
94 |
95 | if (rd_kafka_outq_len(producer) > 0) {
96 | g_error("%d message(s) were not delivered", rd_kafka_outq_len(producer));
97 | }
98 |
99 | g_message("%d events were produced to topic %s.", message_count, topic);
100 |
101 | rd_kafka_destroy(producer);
102 |
103 | return 0;
104 | }
105 |
--------------------------------------------------------------------------------
/kafka-c-getting-started/consumer.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "common.c"
5 |
6 | static volatile sig_atomic_t run = 1;
7 |
8 | /**
9 | * @brief Signal termination of program
10 | */
11 | static void stop(int sig) {
12 | run = 0;
13 | }
14 |
15 | int main (int argc, char **argv) {
16 | rd_kafka_t *consumer;
17 | rd_kafka_conf_t *conf;
18 | rd_kafka_resp_err_t err;
19 | char errstr[512];
20 |
21 | // Parse the command line.
22 | if (argc != 2) {
23 | g_error("Usage: %s ", argv[0]);
24 | return 1;
25 | }
26 |
27 | // Parse the configuration.
28 | // See https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
29 | const char *config_file = argv[1];
30 |
31 | g_autoptr(GError) error = NULL;
32 | g_autoptr(GKeyFile) key_file = g_key_file_new();
33 | if (!g_key_file_load_from_file (key_file, config_file, G_KEY_FILE_NONE, &error)) {
34 | g_error ("Error loading config file: %s", error->message);
35 | return 1;
36 | }
37 |
38 | // Load the relevant configuration sections.
39 | conf = rd_kafka_conf_new();
40 | load_config_group(conf, key_file, "default");
41 | load_config_group(conf, key_file, "consumer");
42 |
43 | // Create the Consumer instance.
44 | consumer = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
45 | if (!consumer) {
46 | g_error("Failed to create new consumer: %s", errstr);
47 | return 1;
48 | }
49 | rd_kafka_poll_set_consumer(consumer);
50 |
51 | // Configuration object is now owned, and freed, by the rd_kafka_t instance.
52 | conf = NULL;
53 |
54 | // Convert the list of topics to a format suitable for librdkafka.
55 | const char *topic = "purchases";
56 | rd_kafka_topic_partition_list_t *subscription = rd_kafka_topic_partition_list_new(1);
57 | rd_kafka_topic_partition_list_add(subscription, topic, RD_KAFKA_PARTITION_UA);
58 |
59 | // Subscribe to the list of topics.
60 | err = rd_kafka_subscribe(consumer, subscription);
61 | if (err) {
62 | g_error("Failed to subscribe to %d topics: %s", subscription->cnt, rd_kafka_err2str(err));
63 | rd_kafka_topic_partition_list_destroy(subscription);
64 | rd_kafka_destroy(consumer);
65 | return 1;
66 | }
67 |
68 | rd_kafka_topic_partition_list_destroy(subscription);
69 |
70 | // Install a signal handler for clean shutdown.
71 | signal(SIGINT, stop);
72 |
73 | // Start polling for messages.
74 | while (run) {
75 | rd_kafka_message_t *consumer_message;
76 |
77 | consumer_message = rd_kafka_consumer_poll(consumer, 500);
78 | if (!consumer_message) {
79 | g_message("Waiting...");
80 | continue;
81 | }
82 |
83 | if (consumer_message->err) {
84 | if (consumer_message->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
85 | /* We can ignore this error - it just means we've read
86 | * everything and are waiting for more data.
87 | */
88 | } else {
89 | g_message("Consumer error: %s", rd_kafka_message_errstr(consumer_message));
90 | return 1;
91 | }
92 | } else {
93 | g_message("Consumed event from topic %s: key = %.*s value = %s",
94 | rd_kafka_topic_name(consumer_message->rkt),
95 | (int)consumer_message->key_len,
96 | (char *)consumer_message->key,
97 | (char *)consumer_message->payload
98 | );
99 | }
100 |
101 | // Free the message when we're done.
102 | rd_kafka_message_destroy(consumer_message);
103 | }
104 |
105 | // Close the consumer: commit final offsets and leave the group.
106 | g_message( "Closing consumer");
107 | rd_kafka_consumer_close(consumer);
108 |
109 | // Destroy the consumer.
110 | rd_kafka_destroy(consumer);
111 |
112 | return 0;
113 | }
114 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/StreamsAvroExample.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2020 Confluent Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package io.confluent.examples.clients.cloud;
18 |
19 | import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde;
20 | import org.apache.kafka.clients.consumer.ConsumerConfig;
21 | import org.apache.kafka.common.serialization.Serde;
22 | import org.apache.kafka.common.serialization.Serdes;
23 | import org.apache.kafka.streams.KafkaStreams;
24 | import org.apache.kafka.streams.KeyValue;
25 | import org.apache.kafka.streams.StreamsBuilder;
26 | import org.apache.kafka.streams.StreamsConfig;
27 | import org.apache.kafka.streams.kstream.Consumed;
28 | import org.apache.kafka.streams.kstream.Grouped;
29 | import org.apache.kafka.streams.kstream.KStream;
30 | import org.apache.kafka.streams.kstream.Printed;
31 |
32 | import java.util.Map;
33 | import java.util.Properties;
34 |
35 | import static io.confluent.examples.clients.cloud.Util.loadConfig;
36 |
37 | public class StreamsAvroExample {
38 |
39 | public static void main(String[] args) throws Exception {
40 |
41 | if (args.length != 2) {
42 | System.out.println("Please provide command line arguments: configPath topic");
43 | System.exit(1);
44 | }
45 |
46 | final String topic = args[1];
47 |
48 | // Load properties from a local configuration file
49 | // Create the configuration file (e.g. at '$HOME/.confluent/java.config') with configuration parameters
50 | // to connect to your Kafka cluster, which can be on your local host, Confluent Cloud, or any other cluster.
51 | // Follow these instructions to create this file: https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/java.html
52 | final Properties props = loadConfig(args[0]);
53 |
54 | // Add additional properties.
55 | props.put(StreamsConfig.APPLICATION_ID_CONFIG, "demo-streams-avro-1");
56 | // Disable caching to print the aggregation value after each record
57 | props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
58 | props.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, "-1");
59 | props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
60 |
61 | final Serde dataRecordAvroSerde = new SpecificAvroSerde<>();
62 | final boolean isKeySerde = false;
63 | Map SRconfig = (Map)props;
64 | dataRecordAvroSerde.configure(
65 | SRconfig,
66 | isKeySerde);
67 |
68 | final StreamsBuilder builder = new StreamsBuilder();
69 | final KStream records = builder.stream(topic, Consumed.with(Serdes.String(), dataRecordAvroSerde));
70 |
71 | KStream counts = records.map((k, v) -> new KeyValue(k, v.getCount()));
72 | counts.print(Printed.toSysOut().withLabel("Consumed record"));
73 |
74 | // Aggregate values by key
75 | KStream countAgg = counts.groupByKey(Grouped.with(Serdes.String(), Serdes.Long()))
76 | .reduce(
77 | (aggValue, newValue) -> aggValue + newValue)
78 | .toStream();
79 | countAgg.print(Printed.toSysOut().withLabel("Running count"));
80 |
81 | KafkaStreams streams = new KafkaStreams(builder.build(), props);
82 | streams.start();
83 |
84 | // Add shutdown hook to respond to SIGTERM and gracefully close Kafka Streams
85 | Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
86 |
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/kafka-python-application/producer_ccsr.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright 2020 Confluent Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | # =============================================================================
19 | #
20 | # Produce messages to Confluent Cloud
21 | # Using Confluent Python Client for Apache Kafka
22 | # Writes Avro data, integration with Confluent Cloud Schema Registry
23 | #
24 | # =============================================================================
25 | from confluent_kafka import SerializingProducer
26 | from confluent_kafka.serialization import StringSerializer
27 | from confluent_kafka.schema_registry import SchemaRegistryClient
28 | from confluent_kafka.schema_registry.avro import AvroSerializer
29 |
30 | import json
31 | import ccloud_lib
32 |
33 | if __name__ == '__main__':
34 |
35 | # Read arguments and configurations and initialize
36 | args = ccloud_lib.parse_args()
37 | config_file = args.config_file
38 | topic = args.topic
39 | conf = ccloud_lib.read_ccloud_config(config_file)
40 |
41 | # Create topic if needed
42 | ccloud_lib.create_topic(conf, topic)
43 |
44 | # for full list of configurations, see:
45 | # https://docs.confluent.io/platform/current/clients/confluent-kafka-python/#schemaregistryclient
46 | schema_registry_conf = {
47 | 'url': conf['schema.registry.url'],
48 | 'basic.auth.user.info': conf['basic.auth.user.info']}
49 |
50 | schema_registry_client = SchemaRegistryClient(schema_registry_conf)
51 |
52 | name_avro_serializer = AvroSerializer(schema_registry_client = schema_registry_client,
53 | schema_str = ccloud_lib.name_schema,
54 | to_dict = ccloud_lib.Name.name_to_dict)
55 | count_avro_serializer = AvroSerializer(schema_registry_client = schema_registry_client,
56 | schema_str = ccloud_lib.count_schema,
57 | to_dict = ccloud_lib.Count.count_to_dict)
58 |
59 | # for full list of configurations, see:
60 | # https://docs.confluent.io/platform/current/clients/confluent-kafka-python/#serializingproducer
61 | producer_conf = ccloud_lib.pop_schema_registry_params_from_config(conf)
62 | producer_conf['key.serializer'] = name_avro_serializer
63 | producer_conf['value.serializer'] = count_avro_serializer
64 | producer = SerializingProducer(producer_conf)
65 |
66 | delivered_records = 0
67 |
68 | # Optional per-message on_delivery handler (triggered by poll() or flush())
69 | # when a message has been successfully delivered or
70 | # permanently failed delivery (after retries).
71 | def acked(err, msg):
72 | global delivered_records
73 | """Delivery report handler called on
74 | successful or failed delivery of message
75 | """
76 | if err is not None:
77 | print("Failed to deliver message: {}".format(err))
78 | else:
79 | delivered_records += 1
80 | print("Produced record to topic {} partition [{}] @ offset {}"
81 | .format(msg.topic(), msg.partition(), msg.offset()))
82 |
83 | for n in range(10):
84 | name_object = ccloud_lib.Name()
85 | name_object.name = "alice"
86 | count_object = ccloud_lib.Count()
87 | count_object.count = n
88 | print("Producing Avro record: {}\t{}".format(name_object.name, count_object.count))
89 | producer.produce(topic=topic, key=name_object, value=count_object, on_delivery=acked)
90 | producer.poll(0)
91 |
92 | producer.flush()
93 |
94 | print("{} messages were produced to topic {}!".format(delivered_records, topic))
95 |
--------------------------------------------------------------------------------
/kafka-producer-application/src/main/java/io/confluent/developer/KafkaProducerApplication.java:
--------------------------------------------------------------------------------
1 | package io.confluent.developer;
2 |
3 |
4 | import org.apache.kafka.clients.producer.KafkaProducer;
5 | import org.apache.kafka.clients.producer.Producer;
6 | import org.apache.kafka.clients.producer.ProducerRecord;
7 | import org.apache.kafka.clients.producer.RecordMetadata;
8 |
9 | import java.io.FileInputStream;
10 | import java.io.IOException;
11 | import java.nio.file.Files;
12 | import java.nio.file.Paths;
13 | import java.util.Collection;
14 | import java.util.List;
15 | import java.util.Properties;
16 | import java.util.concurrent.ExecutionException;
17 | import java.util.concurrent.Future;
18 | import java.util.stream.Collectors;
19 |
20 | public class KafkaProducerApplication {
21 |
22 | private final Producer producer;
23 | final String outTopic;
24 |
25 | public KafkaProducerApplication(final Producer producer,
26 | final String topic) {
27 | this.producer = producer;
28 | outTopic = topic;
29 | }
30 |
31 | public Future produce(final String message) {
32 | final String[] parts = message.split("-");
33 | final String key, value;
34 | if (parts.length > 1) {
35 | key = parts[0];
36 | value = parts[1];
37 | } else {
38 | key = null;
39 | value = parts[0];
40 | }
41 | final ProducerRecord producerRecord = new ProducerRecord<>(outTopic, key, value);
42 | return producer.send(producerRecord);
43 | }
44 |
45 | public void shutdown() {
46 | producer.close();
47 | }
48 |
49 | public static Properties loadProperties(String fileName) throws IOException {
50 | final Properties envProps = new Properties();
51 | final FileInputStream input = new FileInputStream(fileName);
52 | envProps.load(input);
53 | input.close();
54 |
55 | return envProps;
56 | }
57 |
58 | public void printMetadata(final Collection> metadata,
59 | final String fileName) {
60 | System.out.println("Offsets and timestamps committed in batch from " + fileName);
61 | metadata.forEach(m -> {
62 | try {
63 | final RecordMetadata recordMetadata = m.get();
64 | System.out.println("Record written to offset " + recordMetadata.offset() + " timestamp " + recordMetadata.timestamp());
65 | } catch (InterruptedException | ExecutionException e) {
66 | if (e instanceof InterruptedException) {
67 | Thread.currentThread().interrupt();
68 | }
69 | }
70 | });
71 | }
72 |
73 | public static void main(String[] args) throws Exception {
74 | if (args.length < 2) {
75 | throw new IllegalArgumentException(
76 | "This program takes two arguments: the path to an environment configuration file and" +
77 | "the path to the file with records to send");
78 | }
79 |
80 | final Properties props = KafkaProducerApplication.loadProperties(args[0]);
81 | final String topic = props.getProperty("output.topic.name");
82 | final Producer producer = new KafkaProducer<>(props);
83 | final KafkaProducerApplication producerApp = new KafkaProducerApplication(producer, topic);
84 |
85 | String filePath = args[1];
86 | try {
87 | List linesToProduce = Files.readAllLines(Paths.get(filePath));
88 | List> metadata = linesToProduce.stream()
89 | .filter(l -> !l.trim().isEmpty())
90 | .map(producerApp::produce)
91 | .collect(Collectors.toList());
92 | producerApp.printMetadata(metadata, filePath);
93 |
94 | } catch (IOException e) {
95 | System.err.printf("Error reading file %s due to %s %n", filePath, e);
96 | }
97 | finally {
98 | producerApp.shutdown();
99 | }
100 | }
101 | }
102 |
103 |
--------------------------------------------------------------------------------
/kafka-python-application/consumer_ccsr.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # Copyright 2020 Confluent Inc.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 |
18 | # =============================================================================
19 | #
20 | # Consume messages from Confluent Cloud
21 | # Using Confluent Python Client for Apache Kafka
22 | # Reads Avro data, integration with Confluent Cloud Schema Registry
23 | #
24 | # =============================================================================
25 |
26 | from confluent_kafka import DeserializingConsumer
27 | from confluent_kafka.schema_registry import SchemaRegistryClient
28 | from confluent_kafka.schema_registry.avro import AvroDeserializer
29 | from confluent_kafka.serialization import StringDeserializer
30 |
31 | import json
32 | import ccloud_lib
33 |
34 |
35 | if __name__ == '__main__':
36 |
37 | # Read arguments and configurations and initialize
38 | args = ccloud_lib.parse_args()
39 | config_file = args.config_file
40 | topic = args.topic
41 | conf = ccloud_lib.read_ccloud_config(config_file)
42 |
43 | schema_registry_conf = {
44 | 'url': conf['schema.registry.url'],
45 | 'basic.auth.user.info': conf['basic.auth.user.info']}
46 | schema_registry_client = SchemaRegistryClient(schema_registry_conf)
47 |
48 | name_avro_deserializer = AvroDeserializer(schema_registry_client = schema_registry_client,
49 | schema_str = ccloud_lib.name_schema,
50 | from_dict = ccloud_lib.Name.dict_to_name)
51 | count_avro_deserializer = AvroDeserializer(schema_registry_client = schema_registry_client,
52 | schema_str = ccloud_lib.count_schema,
53 | from_dict = ccloud_lib.Count.dict_to_count)
54 |
55 | # for full list of configurations, see:
56 | # https://docs.confluent.io/platform/current/clients/confluent-kafka-python/#deserializingconsumer
57 | consumer_conf = ccloud_lib.pop_schema_registry_params_from_config(conf)
58 | consumer_conf['key.deserializer'] = name_avro_deserializer
59 | consumer_conf['value.deserializer'] = count_avro_deserializer
60 | consumer_conf['group.id'] = 'python_example_group_2'
61 | consumer_conf['auto.offset.reset'] = 'earliest'
62 | consumer = DeserializingConsumer(consumer_conf)
63 |
64 | # Subscribe to topic
65 | consumer.subscribe([topic])
66 |
67 | # Process messages
68 | total_count = 0
69 | while True:
70 | try:
71 | msg = consumer.poll(1.0)
72 | if msg is None:
73 | # No message available within timeout.
74 | # Initial message consumption may take up to
75 | # `session.timeout.ms` for the consumer group to
76 | # rebalance and start consuming
77 | print("Waiting for message or event/error in poll()")
78 | continue
79 | elif msg.error():
80 | print('error: {}'.format(msg.error()))
81 | else:
82 | name_object = msg.key()
83 | count_object = msg.value()
84 | count = count_object.count
85 | total_count += count
86 | print("Consumed record with key {} and value {}, \
87 | and updated total count to {}"
88 | .format(name_object.name, count, total_count))
89 | except KeyboardInterrupt:
90 | break
91 | except SerializerError as e:
92 | # Report malformed record, discard results, continue polling
93 | print("Message deserialization failed {}".format(e))
94 | pass
95 |
96 | # Leave group and commit final offsets
97 | consumer.close()
98 |
--------------------------------------------------------------------------------
/kafka-producer-application/README.md:
--------------------------------------------------------------------------------
1 | # JAVA
2 |
3 |
4 | ## Create Cluster Config Information file
5 |
6 | rename [ccloud-template.properties](configuration/ccloud-template.properties) file to ccloud.properties in configuration directory
7 |
8 | replace below values with yours:
9 |
10 | - {{ CLUSTER_API_KEY }}
11 | - {{ CLUSTER_API_SECRET }}
12 | - {{ SR_URL }}
13 | - {{ SR_API_KEY }}
14 | - {{ SR_API_SECRET }}
15 |
16 |
17 | ## Create a new topic
18 |
19 | create a new topic named "output-topic"
20 |
21 |
22 | ## Configure a gradle java project
23 |
24 | ### Create a gradle build file
25 |
26 | add a new gradle build file named [build.gradle](build.gradle)
27 |
28 |
29 | Then run this command to obtain the Gradle wrapper
30 | ```
31 | gradle wrapper
32 | ```
33 |
34 | ### Create new development config file
35 |
36 | rename [dev-template.properties](configuration/dev-template.properties) to dev.properties
37 |
38 |
39 | key.serializer - The serializer the KafkaProducer will use to serialize the key.
40 |
41 | value.serializer - The serializer the KafkaProducer will use to serialize the value.
42 |
43 | acks - The KafkaProducer uses the acks configuration to tell the lead broker how many acknowledgments to wait for to consider a produce request complete. Acceptable values for acks are: 0, 1 (the default), -1, or all. Setting acks to -1 is the same as setting it to all.
44 |
45 | acks=0: "fire and forget", once the producer sends the record batch it is considered successful
46 |
47 | acks=1: leader broker added the records to its local log but didn’t wait for any acknowledgment from the followers
48 |
49 | acks=all: highest data durability guarantee, the leader broker persisted the record to its log and received acknowledgment of replication from all in-sync replicas. When using acks=all, it’s strongly recommended to update min.insync.replicas as well.
50 |
51 |
52 |
53 | ### Update the properties file
54 |
55 | append ccloud.properties file's configuration informations to dev.properties file
56 |
57 | ```
58 | cat configuration/ccloud.properties >> configuration/dev.properties
59 |
60 | ```
61 |
62 | ### create the java kafka producer application
63 |
64 | ```
65 | mkdir -p src/main/java/io/confluent/developer
66 |
67 | ```
68 |
69 | a new java class file [KafkaProducerApplication.java](src/main/java/io/confluent/developer/KafkaProducerApplication.java)
70 |
71 |
72 | ### create data to produce to Kafka
73 |
74 | create a [input.txt](input.txt) file
75 |
76 |
77 |
78 | ### compile and run this application
79 |
80 | ```
81 | ./gradlew shadowJar
82 | ```
83 |
84 | ```
85 | java -jar build/libs/kafka-producer-application-standalone-0.0.1.jar configuration/dev.properties input.txt
86 | ```
87 |
88 | ### Create a test configuration file
89 |
90 | add a file named [test.properties](configuration/test.properties)
91 |
92 | ### write a unit test
93 |
94 | ```
95 | mkdir -p src/test/java/io/confluent/developer
96 |
97 | ```
98 |
99 | add a new test java class file [KafkaProducerApplicationTest.java](src/test/java/io/confluent/developer/KafkaProducerApplicationTest.java)
100 |
101 | >The KafkaProducer.send method is asynchronous and returns as soon as the provided record is placed in the buffer of records to be sent to the broker. Once the broker acknowledges that the record has been appended to its log, the broker completes the produce request, which the application receives as RecordMetadata—information about the committed message. This tutorial prints the timestamp and offset for each record sent using the RecordMetadata object. Note that calling Future.get() for any record will block until the produce request completes.
102 |
103 |
104 | ### Run the test
105 |
106 | ```
107 | ./gradlew test
108 | ```
109 |
110 | ## Test it with production
111 |
112 | rename this file [prod-template.properties](configuration/prod-template.properties) to prod.properties
113 |
114 |
115 | ### Build a docker image
116 |
117 | ```
118 | gradle jibDockerBuild --image=io.confluent.developer/kafka-producer-application-join:0.0.1
119 |
120 | ```
121 |
122 | ### Launch a container
123 |
124 | ```
125 | docker run -v $PWD/configuration/prod.properties:/config.properties io.confluent.developer/kafka-producer-application-join:0.0.1 config.properties
126 | ```
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/ProducerAvroExample.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2020 Confluent Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.confluent.examples.clients.cloud;
17 |
18 | import io.confluent.kafka.serializers.KafkaAvroSerializer;
19 | import org.apache.kafka.clients.admin.AdminClient;
20 | import org.apache.kafka.clients.admin.NewTopic;
21 | import org.apache.kafka.clients.producer.Callback;
22 | import org.apache.kafka.clients.producer.KafkaProducer;
23 | import org.apache.kafka.clients.producer.Producer;
24 | import org.apache.kafka.clients.producer.ProducerConfig;
25 | import org.apache.kafka.clients.producer.ProducerRecord;
26 | import org.apache.kafka.clients.producer.RecordMetadata;
27 | import org.apache.kafka.common.errors.TopicExistsException;
28 |
29 | import java.io.IOException;
30 | import java.util.Collections;
31 | import java.util.Optional;
32 | import java.util.Properties;
33 | import java.util.concurrent.ExecutionException;
34 |
35 | import static io.confluent.examples.clients.cloud.Util.loadConfig;
36 |
37 | public class ProducerAvroExample {
38 |
39 | // Create topic in Confluent Cloud
40 | public static void createTopic(final String topic,
41 | final Properties cloudConfig) {
42 | final NewTopic newTopic = new NewTopic(topic, Optional.empty(), Optional.empty());
43 | try (final AdminClient adminClient = AdminClient.create(cloudConfig)) {
44 | adminClient.createTopics(Collections.singletonList(newTopic)).all().get();
45 | } catch (final InterruptedException | ExecutionException e) {
46 | // Ignore if TopicExistsException, which may be valid if topic exists
47 | if (!(e.getCause() instanceof TopicExistsException)) {
48 | throw new RuntimeException(e);
49 | }
50 | }
51 | }
52 |
53 | public static void main(final String[] args) throws IOException {
54 | if (args.length != 2) {
55 | System.out.println("Please provide command line arguments: configPath topic");
56 | System.exit(1);
57 | }
58 |
59 | // Load properties from a local configuration file
60 | // Create the configuration file (e.g. at '$HOME/.confluent/java.config') with configuration parameters
61 | // to connect to your Kafka cluster, which can be on your local host, Confluent Cloud, or any other cluster.
62 | // Follow these instructions to create this file: https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/java.html
63 | final Properties props = loadConfig(args[0]);
64 |
65 | // Create topic if needed
66 | final String topic = args[1];
67 | createTopic(topic, props);
68 |
69 | // Add additional properties.
70 | props.put(ProducerConfig.ACKS_CONFIG, "all");
71 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
72 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaAvroSerializer.class);
73 |
74 | Producer producer = new KafkaProducer(props);
75 |
76 | // Produce sample data
77 | final Long numMessages = 10L;
78 | for (Long i = 0L; i < numMessages; i++) {
79 | String key = "alice";
80 | DataRecordAvro record = new DataRecordAvro(i);
81 |
82 | System.out.printf("Producing record: %s\t%s%n", key, record);
83 | producer.send(new ProducerRecord(topic, key, record), new Callback() {
84 | @Override
85 | public void onCompletion(RecordMetadata m, Exception e) {
86 | if (e != null) {
87 | e.printStackTrace();
88 | } else {
89 | System.out.printf("Produced record to topic %s partition [%d] @ offset %d%n", m.topic(), m.partition(), m.offset());
90 | }
91 | }
92 | });
93 | }
94 |
95 | producer.flush();
96 |
97 | System.out.printf("10 messages were produced to topic %s%n", topic);
98 |
99 | producer.close();
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/ProducerExample.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright 2020 Confluent Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.confluent.examples.clients.cloud;
17 |
18 | import io.confluent.examples.clients.cloud.model.DataRecord;
19 | import org.apache.kafka.clients.admin.AdminClient;
20 | import org.apache.kafka.clients.admin.NewTopic;
21 | import org.apache.kafka.clients.producer.Callback;
22 | import org.apache.kafka.clients.producer.KafkaProducer;
23 | import org.apache.kafka.clients.producer.Producer;
24 | import org.apache.kafka.clients.producer.ProducerConfig;
25 | import org.apache.kafka.clients.producer.ProducerRecord;
26 | import org.apache.kafka.clients.producer.RecordMetadata;
27 | import org.apache.kafka.common.errors.TopicExistsException;
28 |
29 | import java.io.IOException;
30 | import java.util.Collections;
31 | import java.util.Optional;
32 | import java.util.Properties;
33 | import java.util.concurrent.ExecutionException;
34 |
35 | import static io.confluent.examples.clients.cloud.Util.loadConfig;
36 |
37 | public class ProducerExample {
38 |
39 | // Create topic in Confluent Cloud
40 | public static void createTopic(final String topic,
41 | final Properties cloudConfig) {
42 | final NewTopic newTopic = new NewTopic(topic, Optional.empty(), Optional.empty());
43 | try (final AdminClient adminClient = AdminClient.create(cloudConfig)) {
44 | adminClient.createTopics(Collections.singletonList(newTopic)).all().get();
45 | } catch (final InterruptedException | ExecutionException e) {
46 | // Ignore if TopicExistsException, which may be valid if topic exists
47 | if (!(e.getCause() instanceof TopicExistsException)) {
48 | throw new RuntimeException(e);
49 | }
50 | }
51 | }
52 |
53 | public static void main(final String[] args) throws IOException {
54 | if (args.length != 2) {
55 | System.out.println("Please provide command line arguments: configPath topic");
56 | System.exit(1);
57 | }
58 |
59 | // Load properties from a local configuration file
60 | // Create the configuration file (e.g. at '$HOME/.confluent/java.config') with configuration parameters
61 | // to connect to your Kafka cluster, which can be on your local host, Confluent Cloud, or any other cluster.
62 | // Follow these instructions to create this file: https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/java.html
63 | final Properties props = loadConfig(args[0]);
64 |
65 | // Create topic if needed
66 | final String topic = args[1];
67 | createTopic(topic, props);
68 |
69 | // Add additional properties.
70 | props.put(ProducerConfig.ACKS_CONFIG, "all");
71 | props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
72 | props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "io.confluent.kafka.serializers.KafkaJsonSerializer");
73 |
74 | Producer producer = new KafkaProducer(props);
75 |
76 | // Produce sample data
77 | final Long numMessages = 10L;
78 | for (Long i = 0L; i < numMessages; i++) {
79 | String key = "alice";
80 | DataRecord record = new DataRecord(i);
81 |
82 | System.out.printf("Producing record: %s\t%s%n", key, record);
83 | producer.send(new ProducerRecord(topic, key, record), new Callback() {
84 | @Override
85 | public void onCompletion(RecordMetadata m, Exception e) {
86 | if (e != null) {
87 | e.printStackTrace();
88 | } else {
89 | System.out.printf("Produced record to topic %s partition [%d] @ offset %d%n", m.topic(), m.partition(), m.offset());
90 | }
91 | }
92 | });
93 | }
94 |
95 | producer.flush();
96 |
97 | System.out.printf("10 messages were produced to topic %s%n", topic);
98 |
99 | producer.close();
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/kafka-java-maven-application/src/main/java/io/confluent/examples/clients/cloud/StreamsExample.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2020 Confluent Inc.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 |
17 | package io.confluent.examples.clients.cloud;
18 |
19 | import io.confluent.examples.clients.cloud.model.DataRecord;
20 | import io.confluent.kafka.serializers.KafkaJsonDeserializer;
21 | import io.confluent.kafka.serializers.KafkaJsonSerializer;
22 | import org.apache.kafka.clients.consumer.ConsumerConfig;
23 | import org.apache.kafka.common.serialization.Deserializer;
24 | import org.apache.kafka.common.serialization.Serde;
25 | import org.apache.kafka.common.serialization.Serdes;
26 | import org.apache.kafka.common.serialization.Serializer;
27 | import org.apache.kafka.streams.KafkaStreams;
28 | import org.apache.kafka.streams.KeyValue;
29 | import org.apache.kafka.streams.StreamsBuilder;
30 | import org.apache.kafka.streams.StreamsConfig;
31 | import org.apache.kafka.streams.kstream.Consumed;
32 | import org.apache.kafka.streams.kstream.Grouped;
33 | import org.apache.kafka.streams.kstream.KStream;
34 | import org.apache.kafka.streams.kstream.Printed;
35 |
36 | import java.util.HashMap;
37 | import java.util.Map;
38 | import java.util.Properties;
39 |
40 | import static io.confluent.examples.clients.cloud.Util.loadConfig;
41 |
42 | public class StreamsExample {
43 |
44 | public static void main(String[] args) throws Exception {
45 |
46 | if (args.length != 2) {
47 | System.out.println("Please provide command line arguments: configPath topic");
48 | System.exit(1);
49 | }
50 |
51 | final String topic = args[1];
52 |
53 | // Load properties from a local configuration file
54 | // Create the configuration file (e.g. at '$HOME/.confluent/java.config') with configuration parameters
55 | // to connect to your Kafka cluster, which can be on your local host, Confluent Cloud, or any other cluster.
56 | // Follow these instructions to create this file: https://docs.confluent.io/platform/current/tutorials/examples/clients/docs/java.html
57 | final Properties props = loadConfig(args[0]);
58 |
59 | // Add additional properties.
60 | props.put(StreamsConfig.APPLICATION_ID_CONFIG, "demo-streams-1");
61 | // Disable caching to print the aggregation value after each record
62 | props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
63 | props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
64 |
65 | final Serde DataRecord = getJsonSerde();
66 |
67 | final StreamsBuilder builder = new StreamsBuilder();
68 | final KStream records = builder.stream(topic, Consumed.with(Serdes.String(), DataRecord));
69 |
70 | KStream counts = records.map((k, v) -> new KeyValue(k, v.getCount()));
71 | counts.print(Printed.toSysOut().withLabel("Consumed record"));
72 |
73 | // Aggregate values by key
74 | KStream countAgg = counts.groupByKey(Grouped.with(Serdes.String(), Serdes.Long()))
75 | .reduce(
76 | (aggValue, newValue) -> aggValue + newValue)
77 | .toStream();
78 | countAgg.print(Printed.toSysOut().withLabel("Running count"));
79 |
80 | KafkaStreams streams = new KafkaStreams(builder.build(), props);
81 | streams.start();
82 |
83 | // Add shutdown hook to respond to SIGTERM and gracefully close Kafka Streams
84 | Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
85 |
86 | }
87 |
88 | private static Serde getJsonSerde(){
89 |
90 | Map serdeProps = new HashMap<>();
91 | serdeProps.put("json.value.type", DataRecord.class);
92 |
93 | final Serializer mySerializer = new KafkaJsonSerializer<>();
94 | mySerializer.configure(serdeProps, false);
95 |
96 | final Deserializer myDeserializer = new KafkaJsonDeserializer<>();
97 | myDeserializer.configure(serdeProps, false);
98 |
99 | return Serdes.serdeFrom(mySerializer, myDeserializer);
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/ConfluentCLI-KM.md:
--------------------------------------------------------------------------------
1 | # Confluent CLI knowledge Management
2 |
3 |
4 | ## switch envionment with CLI
5 |
6 |
7 | available environment list :
8 |
9 | ```
10 | confluent environment list
11 | ```
12 |
13 | choose one environement
14 | ```
15 | confluent environment use 0
16 | ```
17 |
18 | ## Switch Cluster with CLI
19 |
20 |
21 | get all clusters in current environment
22 | ```
23 | confluent kafka cluster list
24 | ```
25 |
26 | ```
27 | confluent kafka cluster use
28 | ```
29 |
30 |
31 | ## API Key configuration
32 |
33 |
34 | - create new API KEY in confluent console
35 |
36 | get key and secret informations from download file
37 |
38 | ```
39 | confluent api-key store --resource
40 | ```
41 |
42 | - create a new one in the terminal with CLI
43 |
44 |
45 | ## Create a schema for your records
46 |
47 | a file named **orders-avro-schema.json**
48 |
49 | ```
50 | {
51 | "type": "record",
52 | "namespace": "io.confluent.tutorial",
53 | "name": "OrderDetail",
54 | "fields": [
55 | {"name": "number", "type": "long", "doc": "The order number."},
56 | {"name": "date", "type": "long", "logicalType": "date", "doc": "The date the order was submitted."},
57 | {"name": "shipping_address", "type": "string", "doc": "The shipping address."},
58 | {"name": "subtotal", "type": "double", "doc": "The amount without shipping cost and tax."},
59 | {"name": "shipping_cost", "type": "double", "doc": "The shipping cost."},
60 | {"name": "tax", "type": "double", "doc": "The applicable tax."},
61 | {"name": "grand_total", "type": "double", "doc": "The order grand total ."}
62 | ]
63 | }
64 | ```
65 |
66 |
67 | ## Create a topic
68 |
69 | - normal example:
70 |
71 | ```
72 | confluent kafka topic create orders --partitions 1
73 |
74 | ```
75 |
76 | - Schema Registry example:
77 | ```
78 | confluent kafka topic create orders-avro --partitions 1
79 | ```
80 |
81 |
82 | ## Produce and consume messages
83 |
84 |
85 | ### Produce and consume message by default
86 |
87 | ```
88 | confluent kafka topic produce orders
89 |
90 | confluent kafka topic consume orders
91 | ```
92 |
93 |
94 | ### Produce and consume records with full key-value paris
95 |
96 | ```
97 | confluent kafka topic produce orders --parse-key --delimiter ":"
98 |
99 | confluent kafka topic consume orders --print-key --delimiter "-" --from-beginning
100 |
101 | ```
102 |
103 | An example :
104 |
105 | ```
106 | 6:{"number":6,"date":18505,"shipping_address":"9182 Shipyard Drive, Raleigh, NC. 27609","subtotal":72.00,"tax":3.00,"grand_total":75.00,"shipping_cost":0.00}
107 | 7:{"number":7,"date":18506,"shipping_address":"644 Lagon Street, Chicago, IL. 07712","subtotal":11.00,"tax":1.00,"grand_total":14.00,"shipping_cost":2.00}
108 | ```
109 |
110 | ### Produce events to Kafka topic with Schema Registry:
111 |
112 | ```
113 | confluent kafka topic produce orders-avro --value-format avro --schema orders-avro-schema.json
114 | ```
115 |
116 | Test data :
117 | ```
118 | {"number":1,"date":18500,"shipping_address":"ABC Sesame Street,Wichita, KS. 12345","subtotal":110.00,"tax":10.00,"grand_total":120.00,"shipping_cost":0.00}
119 | {"number":2,"date":18501,"shipping_address":"123 Cross Street,Irving, CA. 12345","subtotal":5.00,"tax":0.53,"grand_total":6.53,"shipping_cost":1.00}
120 | {"number":3,"date":18502,"shipping_address":"5014 Pinnickinick Street, Portland, WA. 97205","subtotal":93.45,"tax":9.34,"grand_total":102.79,"shipping_cost":0.00}
121 | {"number":4,"date":18503,"shipping_address":"4082 Elmwood Avenue, Tempe, AX. 85281","subtotal":50.00,"tax":1.00,"grand_total":51.00,"shipping_cost":0.00}
122 | {"number":5,"date":18504,"shipping_address":"123 Cross Street,Irving, CA. 12345","subtotal":33.00,"tax":3.33,"grand_total":38.33,"shipping_cost":2.00}
123 |
124 | ```
125 |
126 | Produce records with full key-value pairs
127 |
128 | ```
129 | confluent kafka topic produce orders-avro --value-format avro --schema orders-avro-schema.json --parse-key --delimiter ":"
130 |
131 | ```
132 |
133 | Sample data:
134 |
135 | ```
136 | 6:{"number":6,"date":18505,"shipping_address":"9182 Shipyard Drive, Raleigh, NC. 27609","subtotal":72.00,"tax":3.00,"grand_total":75.00,"shipping_cost":0.00}
137 | 7:{"number":7,"date":18506,"shipping_address":"644 Lagon Street, Chicago, IL. 07712","subtotal":11.00,"tax":1.00,"grand_total":14.00,"shipping_cost":2.00}
138 |
139 | ```
140 |
141 | ## Consume with full key-value pairs
142 |
143 |
144 | ```
145 | confluent kafka topic consume orders-avro --value-format avro --print-key --delimiter "-" --from-beginning
146 | ```
147 |
148 | ## Get Schema Registry API informations:
149 |
150 | Environement -> Credentials (right side menu) -> Add new key
151 |
152 | ## Fetch schema information from Schema Registry
153 |
154 | tell the consumer to fetch the Avro schema for this topic from Schema Registry and deserialize the data first
155 |
156 | ```
157 | confluent kafka topic consume --value-format avro --sr-api-key {API Key} --sr-api-secret {API Secret} orders
158 | ```
159 |
160 |
161 |
162 |
163 |
--------------------------------------------------------------------------------
/kafka-streams/app/src/main/java/io/m03315/learning/kafka/StreamsJoin.java:
--------------------------------------------------------------------------------
1 | package io.m03315.learning.kafka;
2 |
3 |
4 | import io.m03315.learning.kafka.avro.ApplianceOrder;
5 | import io.m03315.learning.kafka.avro.ElectronicOrder;
6 | import io.m03315.learning.kafka.avro.User;
7 | import io.m03315.learning.kafka.avro.CombinedOrder;
8 | import io.confluent.kafka.streams.serdes.avro.SpecificAvroSerde;
9 | import org.apache.avro.specific.SpecificRecord;
10 | import org.apache.kafka.common.serialization.Serdes;
11 | import org.apache.kafka.streams.KafkaStreams;
12 | import org.apache.kafka.streams.StreamsBuilder;
13 | import org.apache.kafka.streams.StreamsConfig;
14 | import org.apache.kafka.streams.kstream.Consumed;
15 | import org.apache.kafka.streams.kstream.JoinWindows;
16 | import org.apache.kafka.streams.kstream.Joined;
17 | import org.apache.kafka.streams.kstream.KStream;
18 | import org.apache.kafka.streams.kstream.KTable;
19 | import org.apache.kafka.streams.kstream.Materialized;
20 | import org.apache.kafka.streams.kstream.Produced;
21 | import org.apache.kafka.streams.kstream.StreamJoined;
22 | import org.apache.kafka.streams.kstream.ValueJoiner;
23 |
24 | import java.io.IOException;
25 | import java.time.Duration;
26 | import java.time.Instant;
27 | import java.util.Map;
28 | import java.util.Properties;
29 |
30 | public class StreamsJoin {
31 |
32 | static SpecificAvroSerde getSpecificAvroSerde(final Map serdeConfig) {
33 | final SpecificAvroSerde specificAvroSerde = new SpecificAvroSerde<>();
34 | specificAvroSerde.configure(serdeConfig, false);
35 | return specificAvroSerde;
36 | }
37 |
38 | public static void main(String[] args) throws IOException {
39 | Properties streamsProps = StreamsUtils.loadProperties();
40 | streamsProps.put(StreamsConfig.APPLICATION_ID_CONFIG, "joining-streams");
41 |
42 | StreamsBuilder builder = new StreamsBuilder();
43 | String streamOneInput = streamsProps.getProperty("stream_one.input.topic");
44 | String streamTwoInput = streamsProps.getProperty("stream_two.input.topic");
45 | String tableInput = streamsProps.getProperty("table.input.topic");
46 | String outputTopic = streamsProps.getProperty("joins.output.topic");
47 |
48 | Map configMap = StreamsUtils.propertiesToMap(streamsProps);
49 |
50 | SpecificAvroSerde applianceSerde = getSpecificAvroSerde(configMap);
51 | SpecificAvroSerde electronicSerde = getSpecificAvroSerde(configMap);
52 | SpecificAvroSerde combinedSerde = getSpecificAvroSerde(configMap);
53 | SpecificAvroSerde userSerde = getSpecificAvroSerde(configMap);
54 |
55 | ValueJoiner orderJoiner = (applianceOrder,
56 | electronicOrder) -> CombinedOrder.newBuilder()
57 | .setApplianceOrderId(applianceOrder.getOrderId())
58 | .setApplianceId(applianceOrder.getApplianceId())
59 | .setElectronicOrderId(electronicOrder.getOrderId())
60 | .setTime(Instant.now().toEpochMilli())
61 | .build();
62 |
63 | ValueJoiner enrichmentJoiner = (combined, user) -> {
64 | if (user != null) {
65 | combined.setUserName(user.getName());
66 | }
67 | return combined;
68 | };
69 |
70 | KStream applianceStream = builder
71 | .stream(streamOneInput, Consumed.with(Serdes.String(), applianceSerde))
72 | .peek((key, value) -> System.out
73 | .println("Appliance stream incoming record key " + key + " value " + value));
74 |
75 | KStream electronicStream = builder
76 | .stream(streamTwoInput, Consumed.with(Serdes.String(), electronicSerde))
77 | .peek((key, value) -> System.out
78 | .println("Electronic stream incoming record " + key + " value " + value));
79 |
80 | KTable userTable = builder.table(tableInput, Materialized.with(Serdes.String(), userSerde));
81 |
82 | KStream combinedStream = applianceStream.join(
83 | electronicStream,
84 | orderJoiner,
85 | JoinWindows.of(Duration.ofMinutes(30)),
86 | StreamJoined.with(Serdes.String(), applianceSerde, electronicSerde))
87 | .peek((key, value) -> System.out.println("Stream-Stream Join record key " + key + " value " + value));
88 |
89 | combinedStream.leftJoin(
90 | userTable,
91 | enrichmentJoiner,
92 | Joined.with(Serdes.String(), combinedSerde, userSerde))
93 | .peek((key, value) -> System.out.println("Stream-Table Join record key " + key + " value " + value))
94 | .to(outputTopic, Produced.with(Serdes.String(), combinedSerde));
95 |
96 | KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), streamsProps);
97 | TopicLoader.runProducer();
98 | kafkaStreams.start();
99 | }
100 | }
--------------------------------------------------------------------------------
/initial/README.md:
--------------------------------------------------------------------------------
1 | # Initialise an Apache Kafka quick start enivrement
2 |
3 | ## Docker-Compose
4 |
5 | ### Set up a kafka broker with [docker-compose.yml](docker-compose.yml)
6 |
7 | 1. start a kafka broker
8 |
9 | ```
10 | docker-compose up -d
11 | ```
12 |
13 | 2. create a topic
14 |
15 | It’s good practice to explicitly create them before using them, even if Kafka is configured to automagically create them when referenced.
16 |
17 | ```
18 | docker exec broker \
19 | kafka-topics --bootstrap-server broker:9092 \
20 | --create \
21 | --topic quickstart
22 | ```
23 |
24 | 3. write messages to the topic
25 |
26 | ```
27 | docker exec broker \
28 | kafka-topics --bootstrap-server broker:9092 \
29 | --create \
30 | --topic quickstart
31 | ```
32 |
33 | 4. Read messages from the topic
34 |
35 | ```
36 | docker exec --interactive --tty broker \
37 | kafka-console-consumer --bootstrap-server broker:9092 \
38 | --topic quickstart \
39 | --from-beginning
40 | ```
41 |
42 | 5. Stop the kafka broker
43 |
44 | ```
45 | docker-compose down
46 | ```
47 |
48 | > https://developer.confluent.io/quickstart/kafka-docker/
49 |
50 | ## Confluent cloud
51 |
52 | 1. Build docker image from [Dockerfile](Dockerfile)
53 | ```
54 | docker build -t confluent-cli:latest .
55 | ```
56 |
57 | 2. Run a confluent cli container
58 | ```
59 | docker run -it confluent-cli:latest sh
60 | ```
61 |
62 | if you have no confluent cloud account, use cloud-signup command to create a new one, otherwise you could use directly login command
63 |
64 | ```
65 | confluent cloud-signup
66 | ```
67 |
68 | ```
69 | confluent login
70 | ```
71 |
72 |
73 | 3. Create a Kafka cluster
74 |
75 |
76 | - provider: aws, azure, gcp
77 | - region: cloud provider region
78 |
79 | get availables values :
80 |
81 | ```
82 | confluent kafka region list --cloud
83 | ```
84 |
85 | ```
86 | confluent kafka cluster create quickstart --cloud --region
87 | ```
88 | Example:
89 | ```
90 | confluent kafka cluster create quickstart --cloud aws --region us-east-1
91 | confluent kafka cluster create quickstart --cloud azure --region eastus
92 | confluent kafka cluster create quickstart --cloud gcp --region us-east1
93 | ```
94 |
95 | 4. Wait for cluster to be running
96 | ```
97 | confluent kafka cluster list
98 | ```
99 | Example :
100 | ```
101 | confluent kafka cluster list
102 | Id | Name | Type | Provider | Region | Availability | Status
103 | ---------------+------------+-------+----------+-------------+--------------+---------
104 | lkc-123456 | quickstart | BASIC | gcp | us-east1 | single-zone | UP
105 | ```
106 |
107 |
108 | 5. Set active cluster
109 |
110 | set a default cluster, no need to specify it
111 |
112 | ```
113 | confluent kafka cluster use
114 | ```
115 |
116 | Example:
117 | ```
118 | confluent kafka cluster use lkc-123456
119 | Set Kafka cluster "lkc-123456" as the active cluster for environment "env-123456".
120 | ```
121 |
122 | 6. Create a topic
123 |
124 | Create a topic named *quickstart* that has 1 *partition* :
125 |
126 | ```
127 | confluent kafka topic create quickstart --partitions 1
128 | ```
129 |
130 | 7. Create an API key
131 |
132 | API key will be used to produce and consume messages:
133 |
134 | ```
135 | confluent api-key create --resource
136 | ```
137 |
138 | Set Active API key :
139 | ```
140 | confluent api-key use --resource
141 | ```
142 |
143 | 8. Produce a message to the topic
144 |
145 | Produce a message to the quickstart topic:
146 | ```
147 | confluent kafka topic produce quickstart
148 | ```
149 |
150 | enter Ctrl-C or Ctrl-D to exit:
151 | ```
152 | Starting Kafka Producer. Use Ctrl-C or Ctrl-D to exit.
153 | hello world
154 | ^C
155 | ```
156 |
157 | 9. Consume the message from the topic
158 |
159 | ```
160 | confluent kafka topic consume quickstart --from-beginning
161 | ```
162 |
163 | > https://developer.confluent.io/quickstart/kafka-on-confluent-cloud/
164 |
165 | ## Local enivrement
166 |
167 | Install Apache Kafka
168 |
169 | 1. get install package
170 | ```
171 | wget https://packages.confluent.io/archive/7.0/confluent-community-7.0.1.tar.gz
172 | ```
173 |
174 | 2. unzip the file :
175 | ```
176 | tar -xf confluent-community-7.0.1.tar.gz
177 | ```
178 |
179 | ```
180 | cd confluent-7.0.1
181 | ```
182 |
183 | 3. Start the Kafka broker
184 |
185 | lauch the broker in KRaft mode, which means that it runs without ZooKeeper.
186 |
187 | Configure the storage :
188 | ```
189 | ./bin/kafka-storage format \
190 | --config ./etc/kafka/kraft/server.properties \
191 | --cluster-id $(./bin/kafka-storage random-uuid)
192 | ```
193 | ```
194 | ./bin/kafka-server-start ./etc/kafka/kraft/server.properties
195 | ```
196 | 4. Create a topic
197 | ```
198 | ./bin/kafka-topics --bootstrap-server localhost:9092 \
199 | --create \
200 | --topic quickstart
201 | ```
202 | 5. Write messages to the topic
203 |
204 | ```
205 | ./bin/kafka-console-producer --bootstrap-server localhost:9092 \
206 | --topic quickstart
207 | ```
208 |
209 | 6. Read messages from the topic
210 |
211 | ```
212 | ./bin/kafka-console-consumer --bootstrap-server localhost:9092 \
213 | --topic quickstart \
214 | --from-beginning
215 | ```
216 |
217 | 7. Write some more messages
218 |
219 | ```
220 | ./bin/kafka-console-producer --bootstrap-server localhost:9092 \
221 | --topic quickstart
222 | ```
223 | 8. Stop the kafka broker
224 |
225 | Press Ctrl-C in the terminal
226 |
227 | > https://developer.confluent.io/quickstart/kafka-local/
228 |
229 |
--------------------------------------------------------------------------------