├── .env
├── .github
└── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
├── .gitignore
├── CONTRIBUTING.adoc
├── LICENSE
├── README.adoc
├── acls
├── config
│ ├── acl-config.yaml
│ ├── admin.properties
│ ├── alice.properties
│ └── run-acl.sh
└── docker-compose.yml
├── admin-client
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── admin
│ │ └── AdminClientWrapper.java
│ └── resources
│ └── admin.properties
├── avro
├── car_v1.avsc
├── car_v2.avsc
└── car_v3.avsc
├── cdc-debezium-informix
├── config
│ ├── debezium-source-informix.json
│ └── informix_ddl_sample.sql
├── docker-compose.yml
└── jars
│ ├── ifx-changestream-client-1.1.3.jar
│ ├── jdbc-4.50.10.jar
│ └── manifest.json
├── cdc-debezium-mongo
├── config
│ ├── debezium-source-mongo-with-capture-scope-database.json
│ └── debezium-source-mongo.json
└── docker-compose.yml
├── cdc-debezium-postgres
├── config
│ ├── create-tables.sql
│ └── debezium-source-pgsql.json
└── docker-compose.yml
├── claim-check
├── docker-compose.yml
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── demo
│ │ └── claimcheck
│ │ ├── Runner.java
│ │ └── model
│ │ ├── Item.java
│ │ └── ItemJsonSerializer.java
│ └── resources
│ └── producer.properties
├── compression
├── client-gzip.properties
├── client-lz4.properties
├── client-none.properties
├── client-snappy.properties
└── client-zstd.properties
├── confluent-avro-hierarchy-event
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── demo
│ │ └── avro
│ │ └── references
│ │ ├── CarInfo.java
│ │ ├── CarTelemetryData.java
│ │ └── app
│ │ ├── RunnerConsumer.java
│ │ └── RunnerProducer.java
│ └── resources
│ ├── car-info.avsc
│ ├── car-telemetry-data.avsc
│ ├── consumer.properties
│ └── producer.properties
├── confluent-avro-multi-event
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── demo
│ │ └── avro
│ │ └── references
│ │ ├── CarInfo.java
│ │ ├── CarTelemetryData.java
│ │ ├── RunnerConsumer.java
│ │ └── RunnerProducer.java
│ └── resources
│ ├── car-info.avsc
│ ├── car-telemetry-data.avsc
│ ├── car-telemetry.avsc
│ ├── consumer.properties
│ └── producer.properties
├── confluent-avro-specific-record
├── pom.xml
├── produce-avro-records.sh
└── src
│ ├── main
│ ├── java
│ │ └── org
│ │ │ └── hifly
│ │ │ └── kafka
│ │ │ └── demo
│ │ │ └── avro
│ │ │ ├── CDCProducer.java
│ │ │ ├── CarConsumer.java
│ │ │ ├── CarProducer.java
│ │ │ └── domain
│ │ │ ├── Car.java
│ │ │ └── cdc
│ │ │ ├── Data.java
│ │ │ ├── DataRecord.java
│ │ │ ├── Headers.java
│ │ │ ├── KeyRecord.java
│ │ │ └── operation.java
│ └── resources
│ │ ├── car.avsc
│ │ ├── car_v1.avsc
│ │ ├── car_v2.avsc
│ │ ├── car_v3.avsc
│ │ ├── cdc-key.avsc
│ │ ├── cdc-value.avsc
│ │ ├── consumer.properties
│ │ └── producer.properties
│ └── test
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── producer
│ └── serializer
│ ├── json
│ └── JsonProducerMockTest.java
│ └── string
│ └── StringProducerMockTest.java
├── confluent-for-kubernetes
└── k8s
│ ├── confluent-platform-reducted.yaml
│ └── confluent-platform.yaml
├── docker-compose.yml
├── flink-window-tumbling-heartbeat
├── docker-compose.yml
└── sql
│ ├── Dockerfile
│ ├── app
│ └── heartbeats.sql
│ ├── bin
│ └── sql-client.sh
│ ├── conf
│ └── flink-conf.yaml
│ └── docker-entrypoint.sh
├── images
├── minio.png
├── minio2.png
├── outbox_table.png
├── quotas.png
└── traces.png
├── interceptors
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── interceptor
│ │ ├── consumer
│ │ ├── CreditCardConsumerInterceptor.java
│ │ ├── CreditCardJsonDeserializer.java
│ │ └── Runner.java
│ │ └── producer
│ │ ├── CreditCard.java
│ │ ├── CreditCardJsonSerializer.java
│ │ ├── CreditCardProducerInterceptor.java
│ │ └── Runner.java
│ └── resources
│ └── consumer-interceptor.properties
├── kafka-clients-graalvm
├── examples
│ ├── consumer.properties
│ └── producer.properties
├── kerberos
│ ├── bind9
│ │ ├── Dockerfile
│ │ ├── db.confluent.io
│ │ └── named.conf
│ ├── client
│ │ ├── Dockerfile
│ │ ├── client.sasl.jaas.config
│ │ ├── command.properties
│ │ ├── confluent.repo
│ │ ├── consumer-nokeytab.properties
│ │ ├── consumer.properties
│ │ ├── producer-nokeytab.properties
│ │ └── producer.properties
│ ├── docker-compose-dns.yml
│ ├── docker-compose.yml
│ ├── kafka
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kdc
│ │ ├── Dockerfile
│ │ ├── krb5.conf
│ │ └── krb5_dns.conf
│ ├── up
│ └── zookeeper
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── zookeeper.properties
│ │ └── zookeeper.sasl.jaas.config
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── demo
│ │ └── KafkaClient.java
│ └── resources
│ └── META-INF
│ └── native-image
│ ├── jni-config.json
│ ├── predefined-classes-config.json
│ ├── proxy-config.json
│ ├── reflect-config.json
│ ├── resource-config.json
│ └── serialization-config.json
├── kafka-connect-sink-http
├── config
│ └── http_sink.json
├── docker-compose.yml
└── rest-controller
│ ├── pom.xml
│ └── src
│ └── main
│ ├── java
│ └── io
│ │ └── confluent
│ │ └── springboot
│ │ └── kafka
│ │ └── demo
│ │ ├── Application.java
│ │ └── controller
│ │ └── Controller.java
│ └── resources
│ └── application.yml
├── kafka-connect-sink-s3
├── config
│ ├── jr.kafka.properties
│ ├── jr.kafka.schema.registry.properties
│ ├── s3_parquet_sink.json
│ └── s3_sink.json
└── docker-compose.yml
├── kafka-connect-source-event-router
├── config
│ ├── connector_jdbc_source.json
│ └── create-tables.sql
└── docker-compose.yml
├── kafka-connect-source-sap-hana
├── config
│ └── sap_hana_source.json
├── docker-compose.yml
└── post_start
│ ├── 201_hxe_optimize
│ ├── 203_set_hxe_info
│ ├── 999_import_dump
│ └── hxe_scripts
│ └── hxe_optimize.sh
├── kafka-connect-task-distribution
├── config
│ └── connector_datagen.json
└── docker-compose.yml
├── kafka-consumer-retry-topics
├── pom.xml
└── src
│ └── main
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── consumer
│ └── retry
│ ├── ConsumerRetries.java
│ └── RetryHandle.java
├── kafka-consumer
├── docker-compose.yml
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── org
│ │ │ └── hifly
│ │ │ └── kafka
│ │ │ └── demo
│ │ │ └── consumer
│ │ │ ├── core
│ │ │ ├── AbstractConsumerHandle.java
│ │ │ ├── ConsumerInstance.java
│ │ │ ├── ConsumerRecordUtil.java
│ │ │ ├── GenericConsumer.java
│ │ │ ├── KafkaConfig.java
│ │ │ ├── Runner.java
│ │ │ └── impl
│ │ │ │ ├── ConsumerHandle.java
│ │ │ │ └── GenericConsumerImpl.java
│ │ │ ├── deserializer
│ │ │ └── avro
│ │ │ │ ├── Runner.java
│ │ │ │ └── SchemaRegistry.java
│ │ │ ├── offset
│ │ │ └── OffsetManager.java
│ │ │ ├── partition
│ │ │ └── PartitionListener.java
│ │ │ ├── rack
│ │ │ └── Runner.java
│ │ │ ├── staticmembership
│ │ │ └── Runner.java
│ │ │ └── tx
│ │ │ └── Runner.java
│ └── resources
│ │ ├── assignment.json
│ │ ├── consumer-ffetching.properties
│ │ ├── consumer-member1.properties
│ │ ├── consumer-member2.properties
│ │ ├── consumer-member3.properties
│ │ └── consumer.properties
│ └── test
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── consumer
│ └── string
│ ├── BasicConsumerTest.java
│ └── ConsumerHandleTest.java
├── kafka-distributed-tracing
├── app
│ └── opentelemetry-javaagent.jar
├── docker-compose-tracing.yml
└── otel-collector-config.yaml
├── kafka-microprofile2-consumer
├── Dockerfile
├── README.txt
├── infrastructure
│ ├── envs
│ │ ├── template.vars
│ │ └── test
│ │ │ └── jvm.options
│ └── template
│ │ └── alm-portal.yml
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── order
│ │ └── process
│ │ ├── OrderProcessApp.java
│ │ ├── consumer
│ │ ├── OrderEventJsonDeserializer.java
│ │ └── handle
│ │ │ └── OrderProcessHandle.java
│ │ ├── controller
│ │ └── OrderProcessController.java
│ │ ├── event
│ │ ├── ItemEvent.java
│ │ └── OrderEvent.java
│ │ └── shipment
│ │ └── ShipmentClient.java
│ └── test
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── order
│ └── process
│ └── test
│ └── ShipmentClientTest.java
├── kafka-microprofile2-producer
├── Dockerfile
├── README.txt
├── infrastructure
│ ├── envs
│ │ ├── template.vars
│ │ └── test
│ │ │ └── jvm.options
│ └── template
│ │ └── kafka-producer.yml
├── pom.xml
└── src
│ └── main
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── order
│ ├── OrderApp.java
│ ├── controller
│ └── OrderController.java
│ ├── event
│ ├── ItemEvent.java
│ └── OrderEvent.java
│ ├── model
│ ├── Order.java
│ └── OrderItem.java
│ └── producer
│ └── OrderEventJsonSerializer.java
├── kafka-oauth-kip-768
├── client-oauth.properties
├── docker-compose-idp.yml
└── docker-compose-oauth.yml
├── kafka-orders-tx
├── pom.xml
└── src
│ └── main
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── orders
│ ├── ItemsConsumer.java
│ ├── ItemsProducer.java
│ ├── controller
│ └── ItemController.java
│ ├── kafka
│ ├── KafkaConfig.java
│ ├── consumer
│ │ └── ItemJsonDeserializer.java
│ └── producer
│ │ ├── ItemJsonSerializer.java
│ │ └── OrderJsonSerializer.java
│ └── model
│ ├── Item.java
│ └── Order.java
├── kafka-producer
├── docker-compose-apicurio.yml
├── docker-compose-cflt-sr.yml
├── docker-compose-hortonworks-sr.yml
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── org
│ │ │ └── hifly
│ │ │ └── kafka
│ │ │ └── demo
│ │ │ └── producer
│ │ │ ├── AbstractKafkaProducer.java
│ │ │ ├── IKafkaProducer.java
│ │ │ ├── KafkaConfig.java
│ │ │ ├── ProducerCallback.java
│ │ │ ├── RecordMetadataUtil.java
│ │ │ ├── partitioner
│ │ │ └── custom
│ │ │ │ ├── Runner.java
│ │ │ │ └── UserPartitioner.java
│ │ │ ├── serializer
│ │ │ ├── avro
│ │ │ │ ├── AvroDataProducer.java
│ │ │ │ ├── Runner.java
│ │ │ │ ├── SchemaRegistry.java
│ │ │ │ └── model
│ │ │ │ │ └── Car.java
│ │ │ ├── json
│ │ │ │ ├── AuditItemJsonSerializer.java
│ │ │ │ ├── CustomDataJsonSerializer.java
│ │ │ │ ├── JsonProducer.java
│ │ │ │ ├── Runner.java
│ │ │ │ └── RunnerAuditItem.java
│ │ │ ├── model
│ │ │ │ ├── AuditItem.java
│ │ │ │ └── CustomData.java
│ │ │ └── string
│ │ │ │ ├── Runner.java
│ │ │ │ └── StringProducer.java
│ │ │ └── tx
│ │ │ ├── Runner.java
│ │ │ └── StringTXProducer.java
│ └── resources
│ │ ├── car.avsc
│ │ └── producer.properties
│ └── test
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── producer
│ └── serializer
│ ├── json
│ └── JsonProducerMockTest.java
│ └── string
│ └── StringProducerMockTest.java
├── kafka-python-consumer
└── consumer.py
├── kafka-python-producer
└── producer.py
├── kafka-quarkus
├── .gitignore
├── .mvn
│ └── wrapper
│ │ ├── MavenWrapperDownloader.java
│ │ └── maven-wrapper.properties
├── mvnw
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── demo
│ │ └── kafka
│ │ └── quarkus
│ │ ├── domain
│ │ └── Vote.java
│ │ └── messaging
│ │ ├── Consumer.java
│ │ └── Sender.java
│ └── resources
│ └── application.properties
├── kafka-smt-aspectj
├── Dockerfile
├── agent
│ └── aspectjweaver-1.9.19.jar
├── config
│ ├── connector_mongo.json
│ └── test.json
├── docker-compose.yml
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── smt
│ │ └── aspectj
│ │ └── SMTAspect.java
│ └── resources
│ └── META-INF
│ └── aop.xml
├── kafka-smt-custom
├── Dockerfile
├── config
│ ├── connector_mongo.json
│ └── test.json
├── docker-compose.yml
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── smt
│ │ └── KeyFromFields.java
│ └── test
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── smt
│ └── KeyFromFieldsTest.java
├── kafka-springboot-consumer
├── .gitignore
├── pom.xml
└── src
│ └── main
│ ├── fabric8
│ ├── deployment.yml
│ ├── kafka-configmap.yml
│ └── svc.yml
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── demo
│ │ └── kafka
│ │ ├── KafkaApplication.java
│ │ ├── kafka
│ │ ├── OrderException.java
│ │ └── Receiver.java
│ │ ├── model
│ │ └── Order.java
│ │ └── mongo
│ │ └── OrderRepository.java
│ └── resources
│ ├── application.yml
│ └── test.json
├── kafka-springboot-producer
├── .gitignore
├── pom.xml
└── src
│ └── main
│ ├── fabric8
│ ├── deployment.yml
│ ├── kafka-configmap.yml
│ ├── route.yml
│ └── svc.yml
│ ├── java
│ └── org
│ │ └── hifly
│ │ └── demo
│ │ └── kafka
│ │ ├── KafkaApplication.java
│ │ ├── controller
│ │ └── KafkaRestController.java
│ │ ├── kafka
│ │ └── Producer.java
│ │ └── model
│ │ └── Order.java
│ └── resources
│ ├── application.yml
│ └── test.json
├── kafka-streams-k8s
├── Dockerfile
├── dependency-reduced-pom.xml
├── k8s
│ ├── statefulset.yml
│ ├── svc-prometheus.yml
│ └── svc.yml
├── pom.xml
├── prometheus
│ ├── jmx_prometheus_javaagent-0.20.0.jar
│ └── kafka_streams.yml
└── src
│ └── main
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── streams
│ └── stream
│ ├── BoundedMemoryRocksDBConfig.java
│ └── WindowedWordCountApp.java
├── kafka-streams-processor
├── pom.xml
└── src
│ └── main
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── streams
│ └── processor
│ ├── ExpiredMessagesApplication.java
│ ├── ExpiredMessagesProcessor.java
│ ├── JSONArrayRemoveProcessor.java
│ └── JSONArrayRemoveProcessorApplication.java
├── kafka-streams
├── dependency-reduced-pom.xml
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── org
│ │ └── hifly
│ │ └── kafka
│ │ └── demo
│ │ └── streams
│ │ ├── domain
│ │ ├── CarInfo.java
│ │ ├── CarSensor.java
│ │ └── SpeedInfo.java
│ │ ├── queries
│ │ └── QueryController.java
│ │ ├── serializer
│ │ ├── CarInfoDeserializer.java
│ │ ├── CarInfoSerializer.java
│ │ ├── CarSensorDeserializer.java
│ │ ├── CarSensorSerializer.java
│ │ ├── SpeedInfoDeserializer.java
│ │ └── SpeedInfoSerializer.java
│ │ └── stream
│ │ ├── CarBrandStream.java
│ │ ├── CarSensorStream.java
│ │ ├── StreamCounter.java
│ │ └── StreamSum.java
│ └── test
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── streams
│ ├── CarBrandStreamTest.java
│ └── CarSensorStreamTest.java
├── kafka-unixcommand-connector
├── Dockerfile
├── build-image.sh
├── config
│ └── source.quickstart.json
├── docker-compose.yml
├── pom.xml
└── src
│ ├── assembly
│ ├── assembly.xml
│ └── manifest.json
│ └── main
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── demo
│ └── connector
│ ├── UnixCommandSourceConnector.java
│ └── UnixCommandSourceTask.java
├── ksqldb-join
├── config
│ ├── connector_device_maintenance_jdbc_source.json
│ ├── connector_jdbc_source.json
│ ├── connector_rabbitmq_source.json
│ ├── create-tables.sql
│ └── rabbit_producer.py
├── docker-compose.yml
└── ksql
│ ├── ksql-statements-rj.sh
│ ├── ksql-statements.sh
│ ├── statements-rj.sql
│ └── statements.sql
├── ksqldb-saga-example
├── ksql
│ ├── insert.sql
│ ├── ksql-insert.sh
│ ├── ksql-statements.sh
│ └── statements.sql
├── pom.xml
└── src
│ └── main
│ ├── avro
│ ├── accounts-value.avsc
│ ├── order_actions-value.avsc
│ ├── order_actions_ack-value.avsc
│ └── orders-value.avsc
│ └── java
│ └── org
│ └── hifly
│ └── saga
│ └── payment
│ ├── OrderSaga.java
│ └── model
│ ├── Account.java
│ ├── Order.java
│ ├── OrderAction.java
│ └── OrderActionAck.java
├── ksqldb-window-session-tripsegments
└── ksql
│ ├── insert.sql
│ ├── ksql-insert.sh
│ ├── ksql-statements.sh
│ └── statements.sql
├── ksqldb-window-tumbling-heartbeat
└── ksql
│ ├── insert.sql
│ ├── ksql-insert.sh
│ ├── ksql-statements.sh
│ └── statements.sql
├── ksqldb
└── docker-compose.yml
├── mirror-maker2
├── config
│ ├── mm2-extra.properties
│ ├── mm2-extra2.properties
│ ├── mm2-no-alias.properties
│ ├── mm2-ssl.properties
│ └── mm2.properties
└── docker-compose.yml
├── monitoring
├── docker-compose.yml
└── list_mbeans.sh
├── mtls-listener
├── config
│ └── client.properties
└── docker-compose-mtls.yml
├── multi-listener
├── config
│ └── client.properties
└── docker-compose.yml
├── performance
├── Dockerfile
├── build-image.sh
├── docker-compose.yml
└── trogdor
│ ├── agent
│ ├── trogdor-agent0.conf
│ ├── trogdor-agent0.sh
│ ├── trogdor-agent1.conf
│ ├── trogdor-agent1.sh
│ ├── trogdor-agent2.conf
│ └── trogdor-agent2.sh
│ └── coordinator
│ ├── compression
│ ├── lz4
│ │ ├── node0.json
│ │ ├── node1.json
│ │ ├── node2.json
│ │ ├── trogdor-task-cancel.sh
│ │ ├── trogdor-task-status.sh
│ │ └── trogdor-task.sh
│ └── nocompression
│ │ ├── node0.json
│ │ ├── node1.json
│ │ ├── node2.json
│ │ ├── trogdor-task-cancel.sh
│ │ ├── trogdor-task-status.sh
│ │ └── trogdor-task.sh
│ ├── trogdor-coordinator.conf
│ └── trogdor-coordinator.sh
├── pom.xml
├── postgres-to-mongo
├── config
│ ├── connector_mongo_sink.json
│ ├── create-tables.sql
│ └── jdbc_psql_source.json
└── docker-compose.yml
├── principal-builder
├── config
│ ├── client.properties
│ └── client2.properties
├── docker-compose.yml
├── pom.xml
└── src
│ └── main
│ └── java
│ └── org
│ └── hifly
│ └── kafka
│ └── principal
│ └── CustomPrincipalBuilder.java
├── proxy
└── kafkaproxy.go
├── quotas
├── config
│ ├── alice.properties
│ └── prometheus
│ │ └── prometheus.yml
└── docker-compose.yml
├── release
└── build.sh
├── sasl-ssl
├── config
│ ├── broker_jaas.conf
│ └── client.properties
└── docker-compose.yml
├── scripts
├── bootstrap-acls.sh
├── bootstrap-apicurio.sh
├── bootstrap-cdc-informix.sh
├── bootstrap-cdc-mongo.sh
├── bootstrap-cdc.sh
├── bootstrap-cflt-schema-registry.sh
├── bootstrap-claim-check.sh
├── bootstrap-connect-event-router.sh
├── bootstrap-connect-sink-http.sh
├── bootstrap-connect-sink-s3-parquet.sh
├── bootstrap-connect-sink-s3.sh
├── bootstrap-connect-source-sap-hana.sh
├── bootstrap-connect-tasks.sh
├── bootstrap-flink.sh
├── bootstrap-hortonworks-sr.sh
├── bootstrap-isolated.sh
├── bootstrap-ksqldb-join.sh
├── bootstrap-ksqldb.sh
├── bootstrap-mm2.sh
├── bootstrap-monitoring.sh
├── bootstrap-mtls.sh
├── bootstrap-multi-listener.sh
├── bootstrap-oauth.sh
├── bootstrap-performance.sh
├── bootstrap-postgres-to-mongo.sh
├── bootstrap-principal.sh
├── bootstrap-quotas.sh
├── bootstrap-racks.sh
├── bootstrap-sasl-ssl.sh
├── bootstrap-smt-aspectj.sh
├── bootstrap-smt-connector.sh
├── bootstrap-tracing.sh
├── bootstrap-unixcommand-connector.sh
├── bootstrap.sh
├── tear-down-acls.sh
├── tear-down-apicurio.sh
├── tear-down-cdc-informix.sh
├── tear-down-cdc-mongo.sh
├── tear-down-cdc.sh
├── tear-down-cflt-schema-registry.sh
├── tear-down-claim-check.sh
├── tear-down-connect-event-router.sh
├── tear-down-connect-sink-http.sh
├── tear-down-connect-sink-s3.sh
├── tear-down-connect-source-sap-hana.sh
├── tear-down-connect-tasks.sh
├── tear-down-flink.sh
├── tear-down-hortonworks-sr.sh
├── tear-down-isolated.sh
├── tear-down-ksqldb-join.sh
├── tear-down-ksqldb.sh
├── tear-down-mm2.sh
├── tear-down-monitoring.sh
├── tear-down-mtls.sh
├── tear-down-multi-listener.sh
├── tear-down-oauth.sh
├── tear-down-performance.sh
├── tear-down-postgres-to-mongo.sh
├── tear-down-principal.sh
├── tear-down-quotas.sh
├── tear-down-racks.sh
├── tear-down-sasl-ssl.sh
├── tear-down-smt-aspectj.sh
├── tear-down-smt-connector.sh
├── tear-down-tracing.sh
├── tear-down-unixcommand-connector.sh
└── tear-down.sh
└── udp-proxy
├── pom.xml
├── src
└── main
│ └── java
│ └── org
│ └── hifly
│ └── udp
│ └── kafka
│ └── multicast
│ ├── Application.java
│ └── MulticastReceiver.java
├── start.sh
├── stop.sh
└── udp_stress_client.py
/.env:
--------------------------------------------------------------------------------
1 | KAFKA_VERSION=4.0.0
2 | CONFLUENT_VERSION=7.9.1
3 | POSTGRES_VERSION=10.5
4 | POSTGRES_ALPINE_VERSION=14.1-alpine
5 | KEYCLOAK_VERSION=legacy
6 | JAEGER_VERSION=latest
7 | OTEL_VERSION=latest
8 | APICURIO_VERSION=2.4.2.Final
9 | KCAT_VERSION=latest
10 | FLINK_VERSION=1.18.1-java17
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Smartphone (please complete the following information):**
32 | - Device: [e.g. iPhone6]
33 | - OS: [e.g. iOS8.1]
34 | - Browser [e.g. stock browser, safari]
35 | - Version [e.g. 22]
36 |
37 | **Additional context**
38 | Add any other context about the problem here.
39 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled class file
2 | *.class
3 |
4 | # Log file
5 | *.log
6 |
7 | # BlueJ files
8 | *.ctxt
9 |
10 | # Mobile Tools for Java (J2ME)
11 | .mtj.tmp/
12 |
13 | #Idea
14 | .idea
15 | *.iml
16 | *.tlog
17 |
18 | target/
19 | .settings/
20 |
21 | postgres-data/
22 |
23 | # Package Files #
24 | *.war
25 | *.ear
26 | *.zip
27 | *.tar.gz
28 | *.rar
29 |
30 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
31 | hs_err_pid*
32 |
33 | #eclipse
34 | *.project
35 | *.classpath
36 |
37 | #vscode
38 | .vscode
39 |
40 | README.pdf
41 | README.html
42 |
43 | release/search.sh
44 | multi-listener/ssl
45 |
46 | .DS_Store
47 |
--------------------------------------------------------------------------------
/CONTRIBUTING.adoc:
--------------------------------------------------------------------------------
1 | # Pull Request
2 |
3 | All contributions are more than welcome. Just have fun!!!
4 |
5 | 1. First, submit a link:https://github.com/hifly81/kafka-examples/issues[issue] or a link:https://github.com/hifly81/kafka-examples/issues[feature request] using the existing template and describe your proposed change.
6 | 2. Fork the repository.
7 | 3. Create a branch on your local repository.
8 | 4. Create a new branch on your local repository and use it only to make changes for the aforementioned issue or feature request.
9 | 5. Open a pull request against the _main_ branch and describe changes and mention the issue or the feature request.
10 | 6. Add at least 1 reviewer.
11 | 7. Eventually, resolve discussions opened by the reviewer/s on the pull request.
12 | 8. Reviewer will merge the pull request when all discussions and checks have been solved.
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Giovanni Marigi
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/acls/config/acl-config.yaml:
--------------------------------------------------------------------------------
1 | name: "Alice Topic Access"
2 | action: --add
3 | allow_principal: User:alice
4 | operations:
5 | - WRITE
6 | - READ
7 |
8 | resource_pattern_type: literal
9 |
10 | topics:
11 | - test
12 |
13 |
--------------------------------------------------------------------------------
/acls/config/admin.properties:
--------------------------------------------------------------------------------
1 | sasl.mechanism=PLAIN
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
4 | username="admin" \
5 | password="admin-secret";
--------------------------------------------------------------------------------
/acls/config/alice.properties:
--------------------------------------------------------------------------------
1 | sasl.mechanism=PLAIN
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
4 | username="alice" \
5 | password="alice-secret";
--------------------------------------------------------------------------------
/admin-client/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 |
5 |
6 | org.hifly.kafka
7 | kafka-play
8 | 1.2.1
9 |
10 |
11 | admin-client
12 | jar
13 |
14 |
15 |
16 | org.apache.kafka
17 | kafka_2.13
18 |
19 |
20 |
21 | org.apache.kafka
22 | kafka-clients
23 |
24 |
25 |
26 | org.slf4j
27 | slf4j-simple
28 |
29 |
30 |
31 | junit
32 | junit
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/admin-client/src/main/resources/admin.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=broker:9092
--------------------------------------------------------------------------------
/avro/car_v1.avsc:
--------------------------------------------------------------------------------
1 | {"schema": "{ \"type\": \"record\", \"name\": \"Car\", \"namespace\": \"org.hifly.kafka.demo.producer.serializer.avro\",\"fields\": [ {\"name\": \"model\",\"type\": \"string\"},{\"name\": \"brand\",\"type\": \"string\"}] }" }
--------------------------------------------------------------------------------
/avro/car_v2.avsc:
--------------------------------------------------------------------------------
1 | {"schema": "{ \"type\": \"record\", \"name\": \"Car\", \"namespace\": \"org.hifly.kafka.demo.producer.serializer.avro\",\"fields\": [ {\"name\": \"engine\",\"type\": \"string\", \"default\":\"diesel\"}, {\"name\": \"model\",\"type\": \"string\"},{\"name\": \"brand\",\"type\": \"string\"}] }" }
--------------------------------------------------------------------------------
/avro/car_v3.avsc:
--------------------------------------------------------------------------------
1 | {"schema": "{ \"type\": \"record\", \"name\": \"Car\", \"namespace\": \"org.hifly.kafka.demo.producer.serializer.avro\",\"fields\": [ {\"name\": \"engine\",\"type\": \"string\"}, {\"name\": \"model\",\"type\": \"string\"},{\"name\": \"brand\",\"type\": \"string\"}] }" }
--------------------------------------------------------------------------------
/cdc-debezium-informix/config/debezium-source-informix.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "informix-connector",
3 | "config": {
4 | "connector.class": "io.debezium.connector.informix.InformixConnector",
5 | "database.hostname": "ifx",
6 | "database.port": "9088",
7 | "database.user": "informix",
8 | "database.password": "in4mix",
9 | "database.dbname": "iot",
10 | "topic.prefix": "test",
11 | "table.include.list": "iot.informix.cust_db",
12 | "schema.history.internal.kafka.bootstrap.servers": "broker:9092",
13 | "schema.history.internal.kafka.topic": "schemahistory.test",
14 | "schema.history.internal.store.only.captured.tables.ddl": "true",
15 | "snapshot.mode": "always"
16 | }
17 | }
--------------------------------------------------------------------------------
/cdc-debezium-informix/config/informix_ddl_sample.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE cust_db (
2 | c_key decimal(11,0) NOT NULL,
3 | c_status char(1),
4 | c_date date,
5 | PRIMARY KEY (c_key) CONSTRAINT cust_db_pk
6 | );
7 |
8 | INSERT INTO cust_db (c_key, c_status, c_date) VALUES(1111, 'Z','2022-04-18');
9 | INSERT INTO cust_db (c_key, c_status, c_date) VALUES(2222, 'Z','2021-04-18');
10 | INSERT INTO cust_db (c_key, c_status, c_date) VALUES(3333, 'Z','2020-04-18');
11 | INSERT INTO cust_db (c_key, c_status, c_date) VALUES(4444, 'Z','2019-04-18');
12 | INSERT INTO cust_db (c_key, c_status, c_date) VALUES(5555, 'Z','2018-04-18');
13 | INSERT INTO cust_db (c_key, c_status, c_date) VALUES(6666, 'Z','2017-04-18');
--------------------------------------------------------------------------------
/cdc-debezium-informix/jars/ifx-changestream-client-1.1.3.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/cdc-debezium-informix/jars/ifx-changestream-client-1.1.3.jar
--------------------------------------------------------------------------------
/cdc-debezium-informix/jars/jdbc-4.50.10.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/cdc-debezium-informix/jars/jdbc-4.50.10.jar
--------------------------------------------------------------------------------
/cdc-debezium-informix/jars/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "debezium-connector-informix",
3 | "version" : "2.6.1",
4 | "title" : "Kafka Connect Debezium Informix Connector",
5 | "description" : "Kafka Connect Debezium Informix Connector",
6 | "owner" : {
7 | "username" : "",
8 | "name" : ""
9 | },
10 | "support" : {
11 | "summary" : ""
12 | },
13 | "tags" : [ "debezium" ],
14 | "features" : {
15 | "supported_encodings" : [ "any" ],
16 | "single_message_transforms" : true,
17 | "confluent_control_center_integration" : true,
18 | "kafka_connect_api" : true
19 | },
20 | "documentation_url" : "",
21 | "docker_image" : { },
22 | "license" : [ {
23 | "name" : "",
24 | "url" : ""
25 | } ],
26 | "component_types" : [ "source" ],
27 | "release_date" : ""
28 | }
--------------------------------------------------------------------------------
/cdc-debezium-postgres/config/create-tables.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE accounts (
2 | user_id serial PRIMARY KEY,
3 | username VARCHAR ( 50 ) UNIQUE NOT NULL,
4 | password VARCHAR ( 50 ) NOT NULL,
5 | email VARCHAR ( 255 ) UNIQUE NOT NULL,
6 | created_on TIMESTAMP NOT NULL,
7 | last_login TIMESTAMP
8 | );
9 |
10 |
11 | insert into accounts (user_id, username, password, email, created_on, last_login) values (1, 'foo', 'bar', 'foo@bar.com', current_timestamp, current_timestamp);
12 | insert into accounts (user_id, username, password, email, created_on, last_login) values (2, 'foo2', 'bar2', 'foo2@bar.com', current_timestamp, current_timestamp);
--------------------------------------------------------------------------------
/cdc-debezium-postgres/config/debezium-source-pgsql.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "pg-connector",
3 | "config": {
4 | "connector.class": "io.debezium.connector.postgresql.PostgresConnector",
5 | "plugin.name": "pgoutput",
6 | "database.hostname": "postgres",
7 | "database.port": "5432",
8 | "database.user": "postgres",
9 | "database.password": "postgres",
10 | "database.dbname" : "postgres",
11 | "database.server.name": "postgres",
12 | "schema.whitelist": "public"
13 | }
14 | }
--------------------------------------------------------------------------------
/claim-check/src/main/java/org/hifly/kafka/demo/claimcheck/model/Item.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.claimcheck.model;
2 |
3 | public class Item {
4 |
5 | private String id;
6 | private String url;
7 |
8 | public String getId() {
9 | return id;
10 | }
11 |
12 | public void setId(String id) {
13 | this.id = id;
14 | }
15 |
16 | public String getUrl() {
17 | return url;
18 | }
19 |
20 | public void setUrl(String url) {
21 | this.url = url;
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/claim-check/src/main/java/org/hifly/kafka/demo/claimcheck/model/ItemJsonSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.claimcheck.model;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.apache.kafka.common.serialization.Serializer;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import java.util.Map;
9 |
10 | public class ItemJsonSerializer implements Serializer- {
11 |
12 | private Logger log = LoggerFactory.getLogger(ItemJsonSerializer.class);
13 |
14 | @Override
15 | public void configure(Map configs, boolean isKey) {}
16 |
17 | @Override
18 | public byte[] serialize(String topic, Item data) {
19 | byte[] retVal = null;
20 | ObjectMapper objectMapper = new ObjectMapper();
21 | try {
22 | retVal = objectMapper.writeValueAsString(data).getBytes();
23 | } catch (Exception exception) {
24 | log.error("Error in serializing object {}", data, exception);
25 | }
26 | return retVal;
27 |
28 | }
29 |
30 | @Override
31 | public void close() {}
32 |
33 | }
--------------------------------------------------------------------------------
/claim-check/src/main/resources/producer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=
3 | sasl.kerberos.service.name=
4 | sasl.mechanism=
5 | sasl.jaas.config=
6 | ssl.truststore.location=
7 | ssl.truststore.password=
8 | schema.registry.url=
9 | schema.registry.ssl.truststore.location=
10 | schema.registry.ssl.truststore.password=
11 | basic.auth.credentials.source=
12 | basic.auth.user.info=
--------------------------------------------------------------------------------
/compression/client-gzip.properties:
--------------------------------------------------------------------------------
1 | compression.type=gzip
2 | linger.ms=0
--------------------------------------------------------------------------------
/compression/client-lz4.properties:
--------------------------------------------------------------------------------
1 | compression.type=lz4
2 | linger.ms=0
--------------------------------------------------------------------------------
/compression/client-none.properties:
--------------------------------------------------------------------------------
1 | compression.type=none
2 | linger.ms=0
--------------------------------------------------------------------------------
/compression/client-snappy.properties:
--------------------------------------------------------------------------------
1 | compression.type=snappy
2 | linger.ms=0
--------------------------------------------------------------------------------
/compression/client-zstd.properties:
--------------------------------------------------------------------------------
1 | compression.type=zstd
2 | linger.ms=0
--------------------------------------------------------------------------------
/confluent-avro-hierarchy-event/src/main/resources/car-info.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "type": "record",
3 | "name": "CarInfo",
4 | "namespace": "org.hifly.kafka.demo.avro.references",
5 | "fields": [
6 | {
7 | "name": "model",
8 | "type": "string"
9 | },
10 | {
11 | "name": "brand",
12 | "type": "string"
13 | }
14 | ]
15 | }
--------------------------------------------------------------------------------
/confluent-avro-hierarchy-event/src/main/resources/car-telemetry-data.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "type": "record",
3 | "name": "CarTelemetryData",
4 | "namespace": "org.hifly.kafka.demo.avro.references",
5 | "fields": [
6 | {
7 | "name": "speed",
8 | "type": "double"
9 | },
10 | {
11 | "name": "latitude",
12 | "type": "string"
13 | },
14 | {
15 | "name": "longitude",
16 | "type": "string"
17 | },
18 | {
19 | "name": "info",
20 | "type": "org.hifly.kafka.demo.avro.references.CarInfo"
21 | }
22 |
23 | ]
24 | }
--------------------------------------------------------------------------------
/confluent-avro-hierarchy-event/src/main/resources/consumer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=
3 | sasl.kerberos.service.name=
4 | sasl.mechanism=
5 | sasl.jaas.config=
6 | ssl.truststore.location=
7 | ssl.truststore.password=
8 | schema.registry.url=
9 | schema.registry.ssl.truststore.location=
10 | schema.registry.ssl.truststore.password=
11 | basic.auth.credentials.source=
12 | basic.auth.user.info=
--------------------------------------------------------------------------------
/confluent-avro-hierarchy-event/src/main/resources/producer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=
3 | sasl.kerberos.service.name=
4 | sasl.mechanism=
5 | sasl.jaas.config=
6 | ssl.truststore.location=
7 | ssl.truststore.password=
8 | schema.registry.url=
9 | schema.registry.ssl.truststore.location=
10 | schema.registry.ssl.truststore.password=
11 | basic.auth.credentials.source=
12 | basic.auth.user.info=
--------------------------------------------------------------------------------
/confluent-avro-multi-event/src/main/resources/car-info.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "type": "record",
3 | "name": "CarInfo",
4 | "namespace": "org.hifly.kafka.demo.avro.references",
5 | "fields": [
6 | {
7 | "name": "model",
8 | "type": "string"
9 | },
10 | {
11 | "name": "brand",
12 | "type": "string"
13 | }
14 | ]
15 | }
--------------------------------------------------------------------------------
/confluent-avro-multi-event/src/main/resources/car-telemetry-data.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "type": "record",
3 | "name": "CarTelemetryData",
4 | "namespace": "org.hifly.kafka.demo.avro.references",
5 | "fields": [
6 | {
7 | "name": "speed",
8 | "type": "double"
9 | },
10 | {
11 | "name": "latitude",
12 | "type": "string"
13 | },
14 | {
15 | "name": "longitude",
16 | "type": "string"
17 | }
18 | ]
19 | }
--------------------------------------------------------------------------------
/confluent-avro-multi-event/src/main/resources/car-telemetry.avsc:
--------------------------------------------------------------------------------
1 | [
2 | "org.hifly.kafka.demo.avro.references.CarInfo",
3 | "org.hifly.kafka.demo.avro.references.CarTelemetryData"
4 | ]
--------------------------------------------------------------------------------
/confluent-avro-multi-event/src/main/resources/consumer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=
3 | sasl.kerberos.service.name=
4 | sasl.mechanism=
5 | sasl.jaas.config=
6 | ssl.truststore.location=
7 | ssl.truststore.password=
8 | schema.registry.url=
9 | schema.registry.ssl.truststore.location=
10 | schema.registry.ssl.truststore.password=
11 | basic.auth.credentials.source=
12 | basic.auth.user.info=
--------------------------------------------------------------------------------
/confluent-avro-multi-event/src/main/resources/producer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=
3 | sasl.kerberos.service.name=
4 | sasl.mechanism=
5 | sasl.jaas.config=
6 | ssl.truststore.location=
7 | ssl.truststore.password=
8 | schema.registry.url=
9 | schema.registry.ssl.truststore.location=
10 | schema.registry.ssl.truststore.password=
11 | basic.auth.credentials.source=
12 | basic.auth.user.info=
--------------------------------------------------------------------------------
/confluent-avro-specific-record/produce-avro-records.sh:
--------------------------------------------------------------------------------
1 | # Produce messages
2 | echo -e "\n# Produce messages to cars"
3 | num_messages=1
4 | (for i in `seq 1 $num_messages`; do echo "{\"model\":\"Dino\",\"brand\":\"Ferrari\",\"fuel_supply\":\"diesel\"}" ; done) | \
5 | kafka-avro-console-producer --topic cars \
6 | --broker-list localhost:9092 \
7 | --property value.schema.id=2 \
8 | --property schema.registry.url=http://localhost:8081
--------------------------------------------------------------------------------
/confluent-avro-specific-record/src/main/java/org/hifly/kafka/demo/avro/domain/cdc/operation.java:
--------------------------------------------------------------------------------
1 | /**
2 | * Autogenerated by Avro
3 | *
4 | * DO NOT EDIT DIRECTLY
5 | */
6 | package org.hifly.kafka.demo.avro.domain.cdc;
7 | @org.apache.avro.specific.AvroGenerated
8 | public enum operation implements org.apache.avro.generic.GenericEnumSymbol {
9 | INSERT, UPDATE, DELETE, REFRESH ;
10 | public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"enum\",\"name\":\"operation\",\"namespace\":\"org.hifly.kafka.demo.avro.domain.cdc\",\"symbols\":[\"INSERT\",\"UPDATE\",\"DELETE\",\"REFRESH\"]}");
11 | public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
12 |
13 | @Override
14 | public org.apache.avro.Schema getSchema() { return SCHEMA$; }
15 | }
16 |
--------------------------------------------------------------------------------
/confluent-avro-specific-record/src/main/resources/car.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "type": "record",
3 | "name": "Car",
4 | "namespace": "org.hifly.kafka.demo.avro.domain",
5 | "fields": [
6 | {
7 | "name": "model",
8 | "type": "string"
9 | },
10 | {
11 | "name": "brand",
12 | "type": "string"
13 | }
14 | ]
15 | }
--------------------------------------------------------------------------------
/confluent-avro-specific-record/src/main/resources/car_v1.avsc:
--------------------------------------------------------------------------------
1 | {"schema": "{\"type\": \"record\",\"name\": \"Car\",\"namespace\": \"org.hifly.kafka.demo.avro.domain\",\"fields\": [{\"name\": \"model\",\"type\": \"string\"},{\"name\": \"brand\",\"type\": \"string\"}]}"}
2 |
--------------------------------------------------------------------------------
/confluent-avro-specific-record/src/main/resources/car_v2.avsc:
--------------------------------------------------------------------------------
1 | {"schema": "{\"type\": \"record\",\"name\": \"Car\",\"namespace\": \"org.hifly.kafka.demo.avro.domain\",\"fields\": [{\"name\": \"model\",\"type\": \"string\"},{\"name\": \"brand\",\"type\": \"string\"},{ \"name\": \"fuel_supply\",\"type\": \"string\",\"default\": \"diesel\"}]}"}
2 |
--------------------------------------------------------------------------------
/confluent-avro-specific-record/src/main/resources/car_v3.avsc:
--------------------------------------------------------------------------------
1 | {"schema": "{\"type\": \"record\",\"name\": \"Car\",\"namespace\": \"org.hifly.kafka.demo.avro.domain\",\"fields\": [{\"engine\": \"model\",\"type\": \"string\"},{\"name\": \"model\",\"type\": \"string\"},{\"name\": \"brand\",\"type\": \"string\"},{ \"name\": \"fuel_supply\",\"type\": \"string\",\"default\": \"diesel\"}]}"}
2 |
--------------------------------------------------------------------------------
/confluent-avro-specific-record/src/main/resources/cdc-key.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "type": "record",
3 | "name": "KeyRecord",
4 | "namespace": "org.hifly.kafka.demo.avro.domain.cdc",
5 | "fields": [
6 | {
7 | "name": "FIELD1",
8 | "type": [
9 | "null",
10 | {
11 | "type": "bytes",
12 | "logicalType": "decimal",
13 | "precision": 3,
14 | "scale": 0
15 | }
16 | ],
17 | "default": null
18 | },
19 | {
20 | "name": "FIELD2",
21 | "type": [
22 | "null",
23 | {
24 | "type": "bytes",
25 | "logicalType": "decimal",
26 | "precision": 3,
27 | "scale": 0
28 | }
29 | ],
30 | "default": null
31 | },
32 | {
33 | "name": "FIELD3",
34 | "type": [
35 | "null",
36 | {
37 | "type": "bytes",
38 | "logicalType": "decimal",
39 | "precision": 6,
40 | "scale": 0
41 | }
42 | ],
43 | "default": null
44 | },
45 | {
46 | "name": "FIELD4",
47 | "type": [
48 | "null",
49 | {
50 | "type": "bytes",
51 | "logicalType": "decimal",
52 | "precision": 8,
53 | "scale": 0
54 | }
55 | ],
56 | "default": null
57 | }
58 | ]
59 | }
--------------------------------------------------------------------------------
/confluent-avro-specific-record/src/main/resources/consumer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=
3 | sasl.kerberos.service.name=
4 | sasl.mechanism=
5 | sasl.jaas.config=
6 | ssl.truststore.location=
7 | ssl.truststore.password=
8 | schema.registry.url=
9 | schema.registry.ssl.truststore.location=
10 | schema.registry.ssl.truststore.password=
11 | basic.auth.credentials.source=
12 | basic.auth.user.info=
--------------------------------------------------------------------------------
/confluent-avro-specific-record/src/main/resources/producer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | key.serializer=
3 | value.serializer=
4 | security.protocol=
5 | sasl.jaas.config=
6 | sasl.mechanism=
7 | client.dns.lookup=
8 | session.timeout.ms=
9 | acks=
10 | client.id=
11 | schema.registry.url=
12 | basic.auth.credentials.source=
13 | basic.auth.user.info=
--------------------------------------------------------------------------------
/confluent-for-kubernetes/k8s/confluent-platform-reducted.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: platform.confluent.io/v1beta1
3 | kind: KRaftController
4 | metadata:
5 | name: kraftcontroller
6 | namespace: confluent
7 | spec:
8 | dataVolumeCapacity: 1G
9 | image:
10 | application: docker.io/confluentinc/cp-kafka:7.9.0
11 | init: confluentinc/confluent-init-container:2.11.0
12 | replicas: 1
13 | ---
14 | apiVersion: platform.confluent.io/v1beta1
15 | kind: Kafka
16 | metadata:
17 | name: kafka
18 | namespace: confluent
19 | spec:
20 | replicas: 3
21 | image:
22 | application: confluentinc/cp-kafka:7.9.0
23 | init: confluentinc/confluent-init-container:2.11.0
24 | dataVolumeCapacity: 1Gi
25 | dependencies:
26 | kRaftController:
27 | clusterRef:
28 | name: kraftcontroller
29 | metricReporter:
30 | enabled: false
31 | ---
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 |
4 | broker:
5 | image: apache/kafka:${KAFKA_VERSION}
6 | hostname: broker
7 | container_name: broker
8 | ports:
9 | - "9092:9092"
10 | environment:
11 | KAFKA_NODE_ID: 1
12 | KAFKA_PROCESS_ROLES: 'broker,controller'
13 | KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker:29093'
14 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT'
15 | KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://broker:9092'
16 | KAFKA_LISTENERS: 'PLAINTEXT://broker:9092,CONTROLLER://broker:29093'
17 | KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
18 | KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
19 | KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
20 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
21 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
22 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
23 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
24 | KAFKA_TOOLS_LOG4J_LOGLEVEL: ERROR
25 | CLUSTER_ID: 'QTnB2tAgTWa1ec5wYon2jg'
26 |
27 | kcat:
28 | image: confluentinc/cp-kcat:${KCAT_VERSION}
29 | hostname: kcat
30 | container_name: kcat
31 | entrypoint: /bin/bash
32 | tty: true
--------------------------------------------------------------------------------
/flink-window-tumbling-heartbeat/sql/Dockerfile:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | # inspired by
3 | # - https://github.com/wuchong/flink-sql-demo/tree/v1.11-EN/sql-client
4 | # - https://github.com/theodorecurtil/flink_sql_job
5 | ###############################################################################
6 |
7 | FROM flink:1.18.1-java17
8 |
9 | COPY sql/bin/* /opt/sql-client/
10 | RUN mkdir -p /opt/sql-client/lib
11 |
12 | RUN wget -P /opt/sql-client/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-sql-connector-kafka/3.1.0-1.18/flink-sql-connector-kafka-3.1.0-1.18.jar; \
13 | wget -P /opt/sql-client/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-json/1.18.1/flink-json-1.18.1.jar; \
14 | wget -P /opt/sql-client/lib/ https://repo.maven.apache.org/maven2/org/apache/flink/flink-sql-avro-confluent-registry/1.18.1/flink-sql-avro-confluent-registry-1.18.1.jar;
15 |
16 | COPY sql/conf/* /opt/flink/conf/
17 | COPY sql/app/* /opt/sql-client/app/
18 |
19 | WORKDIR /opt/sql-client
20 | ENV SQL_CLIENT_HOME /opt/sql-client
21 |
22 | COPY sql/docker-entrypoint.sh /
23 | RUN ["chmod", "+x", "/docker-entrypoint.sh"]
24 | ENTRYPOINT ["/docker-entrypoint.sh"]
--------------------------------------------------------------------------------
/flink-window-tumbling-heartbeat/sql/bin/sql-client.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ${FLINK_HOME}/bin/sql-client.sh embedded -d ${FLINK_HOME}/conf/sql-client-conf.yaml -l ${SQL_CLIENT_HOME}/lib
--------------------------------------------------------------------------------
/flink-window-tumbling-heartbeat/sql/conf/flink-conf.yaml:
--------------------------------------------------------------------------------
1 | jobmanager.rpc.address: jobmanager
2 | rest.port: 18081
3 | state.backend: rocksdb
4 | state.backend.incremental: true
5 | state.checkpoint-storage: filesystem
--------------------------------------------------------------------------------
/flink-window-tumbling-heartbeat/sql/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ${FLINK_HOME}/bin/sql-client.sh
4 |
5 | tail -f /dev/null
--------------------------------------------------------------------------------
/images/minio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/images/minio.png
--------------------------------------------------------------------------------
/images/minio2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/images/minio2.png
--------------------------------------------------------------------------------
/images/outbox_table.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/images/outbox_table.png
--------------------------------------------------------------------------------
/images/quotas.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/images/quotas.png
--------------------------------------------------------------------------------
/images/traces.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/images/traces.png
--------------------------------------------------------------------------------
/interceptors/src/main/java/org/hifly/kafka/interceptor/consumer/CreditCardConsumerInterceptor.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.interceptor.consumer;
2 |
3 | import org.apache.kafka.clients.consumer.ConsumerInterceptor;
4 | import org.apache.kafka.clients.consumer.ConsumerRecord;
5 | import org.apache.kafka.clients.consumer.ConsumerRecords;
6 | import org.apache.kafka.clients.consumer.OffsetAndMetadata;
7 | import org.apache.kafka.common.TopicPartition;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | import java.util.Map;
12 |
13 | public class CreditCardConsumerInterceptor implements ConsumerInterceptor {
14 |
15 | private static final Logger LOGGER = LoggerFactory.getLogger(CreditCardConsumerInterceptor.class);
16 |
17 | @Override
18 | public ConsumerRecords onConsume(ConsumerRecords consumerRecords) {
19 | for (ConsumerRecord record : consumerRecords) {
20 | LOGGER.info("record headers: {}", record.headers());
21 | }
22 | return consumerRecords;
23 | }
24 |
25 | @Override
26 | public void onCommit(Map map) {
27 |
28 | }
29 |
30 | @Override
31 | public void close() {
32 |
33 | }
34 |
35 | @Override
36 | public void configure(Map map) {
37 |
38 | }
39 | }
--------------------------------------------------------------------------------
/interceptors/src/main/java/org/hifly/kafka/interceptor/consumer/CreditCardJsonDeserializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.interceptor.consumer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.apache.kafka.common.errors.SerializationException;
5 | import org.apache.kafka.common.serialization.Deserializer;
6 | import org.hifly.kafka.interceptor.producer.CreditCard;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 |
10 | import java.util.Map;
11 |
12 | public class CreditCardJsonDeserializer implements Deserializer {
13 |
14 | private static final Logger LOGGER = LoggerFactory.getLogger(CreditCardJsonDeserializer.class);
15 |
16 | private ObjectMapper objectMapper = new ObjectMapper();
17 |
18 | @Override
19 | public void configure(Map configs, boolean isKey) {}
20 |
21 | @Override
22 | public CreditCard deserialize(String topic, byte[] data) {
23 | try {
24 | if (data == null){
25 | LOGGER.info("Null received at deserializing");
26 | return null;
27 | }
28 | return objectMapper.readValue(new String(data, "UTF-8"), CreditCard.class);
29 | } catch (Exception e) {
30 | throw new SerializationException("Error when deserializing byte[] to MessageDto");
31 | }
32 | }
33 |
34 |
35 | @Override
36 | public void close() {}
37 |
38 | }
--------------------------------------------------------------------------------
/interceptors/src/main/java/org/hifly/kafka/interceptor/consumer/Runner.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.interceptor.consumer;
2 |
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 | import org.hifly.kafka.demo.consumer.core.ConsumerInstance;
5 | import org.hifly.kafka.demo.consumer.core.KafkaConfig;
6 | import org.hifly.kafka.demo.consumer.core.impl.ConsumerHandle;
7 | import org.hifly.kafka.interceptor.producer.CreditCard;
8 |
9 | import java.io.IOException;
10 | import java.util.UUID;
11 |
12 | public class Runner {
13 |
14 | public static void main (String [] args) throws Exception {
15 | pollAutoCommit();
16 | }
17 |
18 | private static void pollAutoCommit() throws IOException {
19 |
20 | KafkaConsumer consumer = new KafkaConsumer<>(
21 | KafkaConfig.loadConfig("consumer-interceptor.properties"));
22 |
23 | new ConsumerInstance(
24 | UUID.randomUUID().toString(),
25 | "test_custom_data",
26 | consumer,
27 | 100,
28 | -1,
29 | true,
30 | false,
31 | true,
32 | new ConsumerHandle(null)).consume();
33 | }
34 |
35 |
36 | }
37 |
38 |
39 |
--------------------------------------------------------------------------------
/interceptors/src/main/java/org/hifly/kafka/interceptor/producer/CreditCard.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.interceptor.producer;
2 |
3 | import java.io.Serializable;
4 |
5 | public class CreditCard implements Serializable {
6 |
7 | private static final long serialVersionUID = 1L;
8 |
9 | private String creditCard;
10 |
11 | public CreditCard() {}
12 |
13 | public String getCreditCard() {
14 | return creditCard;
15 | }
16 |
17 | public void setCreditCard(String creditCard) {
18 | this.creditCard = creditCard;
19 | }
20 |
21 | public String toString() {
22 | return creditCard;
23 | }
24 |
25 |
26 | }
--------------------------------------------------------------------------------
/interceptors/src/main/java/org/hifly/kafka/interceptor/producer/CreditCardJsonSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.interceptor.producer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.apache.kafka.common.serialization.Serializer;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import java.util.Map;
9 |
10 | public class CreditCardJsonSerializer implements Serializer {
11 |
12 | private static final Logger LOGGER = LoggerFactory.getLogger(CreditCardJsonSerializer.class);
13 |
14 | @Override
15 | public void configure(Map configs, boolean isKey) {}
16 |
17 | @Override
18 | public byte[] serialize(String topic, CreditCard data) {
19 | byte[] retVal = null;
20 | ObjectMapper objectMapper = new ObjectMapper();
21 | try {
22 | retVal = objectMapper.writeValueAsString(data).getBytes();
23 | } catch (Exception exception) {
24 | LOGGER.error("Error in serializing object {}", data);
25 | }
26 | return retVal;
27 |
28 | }
29 |
30 | @Override
31 | public void close() {}
32 |
33 | }
--------------------------------------------------------------------------------
/interceptors/src/main/java/org/hifly/kafka/interceptor/producer/CreditCardProducerInterceptor.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.interceptor.producer;
2 |
3 | import org.apache.kafka.clients.producer.ProducerInterceptor;
4 | import org.apache.kafka.clients.producer.ProducerRecord;
5 | import org.apache.kafka.clients.producer.RecordMetadata;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class CreditCardProducerInterceptor implements ProducerInterceptor {
12 |
13 | private static final Logger LOGGER = LoggerFactory.getLogger(CreditCardProducerInterceptor.class);
14 |
15 | @Override
16 | public ProducerRecord onSend(ProducerRecord producerRecord) {
17 | CreditCard creditCard = (CreditCard) producerRecord.value();
18 | creditCard.setCreditCard("XXXXXX");
19 | LOGGER.info("record is:{}", producerRecord.value());
20 | return producerRecord;
21 | }
22 |
23 | @Override
24 | public void onAcknowledgement(RecordMetadata recordMetadata, Exception e) {
25 | }
26 |
27 | @Override
28 | public void close() {
29 |
30 | }
31 |
32 | @Override
33 | public void configure(Map map) {
34 |
35 | }
36 | }
--------------------------------------------------------------------------------
/interceptors/src/main/resources/consumer-interceptor.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=localhost:9092
2 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
3 | value.deserializer=org.hifly.kafka.interceptor.consumer.CreditCardJsonDeserializer
4 | group.id=consumer-interceptor-g2
5 | auto.offset.reset=earliest
6 | interceptor.classes=org.hifly.kafka.interceptor.consumer.CreditCardConsumerInterceptor
--------------------------------------------------------------------------------
/kafka-clients-graalvm/examples/consumer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=pkc-03vj5.europe-west8.gcp.confluent.cloud:9092
2 | security.protocol=SASL_SSL
3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='' password='';
4 | sasl.mechanism=PLAIN
5 | client.dns.lookup=use_all_dns_ips
6 | session.timeout.ms=45000
7 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
8 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
9 | group.id=test-group-cc
--------------------------------------------------------------------------------
/kafka-clients-graalvm/examples/producer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=pkc-03vj5.europe-west8.gcp.confluent.cloud:9092
2 | security.protocol=SASL_SSL
3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='' password='';
4 | sasl.mechanism=PLAIN
5 | client.dns.lookup=use_all_dns_ips
6 | session.timeout.ms=45000
7 | acks=all
8 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
9 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/bind9/Dockerfile:
--------------------------------------------------------------------------------
1 | # Dockerfile for DNS (Bind9)
2 | FROM debian:latest
3 |
4 | RUN apt-get update && apt-get install -y bind9 && apt-get clean
5 |
6 | # Expose the DNS ports
7 | EXPOSE 53/udp 53/tcp
8 |
9 | CMD ["named", "-g"]
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/bind9/db.confluent.io:
--------------------------------------------------------------------------------
1 | $TTL 86400
2 | @ IN SOA dns.confluent.io. root.confluent.io. (
3 | 1 ; Serial
4 | 604800 ; Refresh
5 | 86400 ; Retry
6 | 2419200 ; Expire
7 | 604800) ; Negative Cache TTL
8 |
9 | ; Name servers
10 | IN NS dns.confluent.io.
11 |
12 | ; KDC server
13 | kdc IN A 192.168.0.3
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/bind9/named.conf:
--------------------------------------------------------------------------------
1 | options {
2 | directory "/etc/bind";
3 | forwarders { 8.8.8.8; }; # Optional, to forward queries to external DNS if needed
4 | dnssec-validation no;
5 | listen-on { any; };
6 | };
7 |
8 | zone "confluent.io" {
9 | type master;
10 | file "/etc/bind/zones/db.confluent.io";
11 | };
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/client/client.sasl.jaas.config:
--------------------------------------------------------------------------------
1 | /*
2 | * Credentials to use when connecting to ZooKeeper directly.
3 | *
4 | * Whenever possible you should use the Kafka AdminClient API instead of ZooKeeper.
5 | */
6 | Client {
7 | com.sun.security.auth.module.Krb5LoginModule required
8 | useTicketCache=true;
9 | };
10 |
11 |
12 | /*
13 | * Credentials to connect to Kafka.
14 | */
15 | KafkaClient {
16 | com.sun.security.auth.module.Krb5LoginModule required
17 | useTicketCache=true;
18 | };
19 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/client/command.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=kafka:9093
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
4 | serviceName=kafka \
5 | useTicketCache=true;
6 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/client/confluent.repo:
--------------------------------------------------------------------------------
1 | [Confluent.dist]
2 | name=Confluent repository (dist)
3 | baseurl=https://packages.confluent.io/rpm/5.4/7
4 | gpgcheck=1
5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
6 | enabled=1
7 |
8 | [Confluent]
9 | name=Confluent repository
10 | baseurl=https://packages.confluent.io/rpm/5.4
11 | gpgcheck=1
12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
13 | enabled=1
14 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/client/consumer-nokeytab.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=kafka:9093
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.kerberos.service.name=kafka
4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
5 | useTicketCache=true;
6 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
7 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
8 | group.id=test-group-cc
9 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/client/consumer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=kafka:9093
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.kerberos.service.name=kafka
4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required useTicketCache=true useKeyTab=true keyTab="/var/lib/secret/kafka-admin.key" principal="admin/for-kafka@TEST.CONFLUENT.IO";
5 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
7 | group.id=test-group-cc
8 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/client/producer-nokeytab.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=kafka:9093
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.kerberos.service.name=kafka
4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required debug=true useTicketCache=true;
5 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
6 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
7 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/client/producer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=kafka:9093
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.kerberos.service.name=kafka
4 | sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required useTicketCache=true useKeyTab=true keyTab="/var/lib/secret/kafka-admin.key" principal="admin/for-kafka@TEST.CONFLUENT.IO";
5 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
6 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
7 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/kafka/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:centos8
2 | MAINTAINER d.gasparina@gmail.com
3 | ENV container docker
4 |
5 | # 0. Fixing Mirror list for Centos
6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
8 |
9 | # 1. Adding Confluent repository
10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo
12 | RUN yum clean all
13 |
14 | # 2. Install zookeeper and kafka
15 | RUN yum install -y java-11-openjdk
16 | RUN yum install -y confluent-kafka-2.12
17 | RUN yum install -y confluent-control-center
18 |
19 | # 3. Configure Kafka for Kerberos
20 | RUN yum install -y krb5-workstation krb5-libs
21 | COPY server.properties /etc/kafka/server.properties
22 | COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
23 |
24 | EXPOSE 9093
25 |
26 | ENV KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice"
27 |
28 | CMD kafka-server-start /etc/kafka/server.properties
29 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/kafka/confluent.repo:
--------------------------------------------------------------------------------
1 | [Confluent.dist]
2 | name=Confluent repository (dist)
3 | baseurl=https://packages.confluent.io/rpm/5.4/7
4 | gpgcheck=1
5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
6 | enabled=1
7 |
8 | [Confluent]
9 | name=Confluent repository
10 | baseurl=https://packages.confluent.io/rpm/5.4
11 | gpgcheck=1
12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
13 | enabled=1
14 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/kafka/kafka.sasl.jaas.config:
--------------------------------------------------------------------------------
1 | /*
2 | * The service principal
3 | */
4 | KafkaServer {
5 | com.sun.security.auth.module.Krb5LoginModule required
6 | useKeyTab=true
7 | storeKey=true
8 | keyTab="/var/lib/secret/kafka.key"
9 | principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO";
10 | };
11 |
12 | /*
13 | * Zookeeper client principal
14 | */
15 | Client {
16 | com.sun.security.auth.module.Krb5LoginModule required
17 | useKeyTab=true
18 | storeKey=true
19 | useTicketCache=false
20 | keyTab="/var/lib/secret/zookeeper-client.key"
21 | principal="zkclient@TEST.CONFLUENT.IO";
22 | };
23 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/kdc/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:centos8
2 | MAINTAINER d.gasparina@gmail.com
3 | ENV container docker
4 |
5 | # 0. Fixing Mirror list for Centos
6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
8 |
9 | # 1. Installing Kerberos server, admin and client
10 | RUN yum install -y krb5-server krb5-libs
11 | RUN yum install -y krb5-workstation krb5-libs
12 |
13 | # 2. Configuring Kerberos and KDC
14 | COPY krb5.conf /etc/krb5.conf
15 | RUN mkdir /var/log/kerberos
16 | RUN mkdir /etc/kdc
17 | RUN mkdir -p /var/kerberos/krb5kdc/
18 | RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf
19 |
20 | EXPOSE 88/tcp
21 | EXPOSE 88/udp
22 | EXPOSE 464/tcp
23 | EXPOSE 464/udp
24 |
25 | RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s
26 |
27 | CMD /usr/sbin/krb5kdc -n
28 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/kdc/krb5.conf:
--------------------------------------------------------------------------------
1 | [libdefaults]
2 | default_realm = TEST.CONFLUENT.IO
3 | forwardable = true
4 | rdns = false
5 | dns_lookup_kdc = no
6 | dns_lookup_realm = no
7 |
8 | [realms]
9 | TEST.CONFLUENT.IO = {
10 | kdc = kdc
11 | admin_server = kadmin
12 | }
13 |
14 | [domain_realm]
15 | .test.confluent.io = TEST.CONFLUENT.IO
16 | test.confluent.io = TEST.CONFLUENT.IO
17 | kerberos-demo.local = TEST.CONFLUENT.IO
18 | .kerberos-demo.local = TEST.CONFLUENT.IO
19 |
20 | [logging]
21 | kdc = FILE:/var/log/kerberos/krb5kdc.log
22 | admin_server = FILE:/var/log/kerberos/kadmin.log
23 | default = FILE:/var/log/kerberos/krb5lib.log
24 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/kdc/krb5_dns.conf:
--------------------------------------------------------------------------------
1 | [libdefaults]
2 | default_realm = TEST.CONFLUENT.IO
3 | forwardable = true
4 | rdns = false
5 | dns_lookup_kdc = true
6 | dns_lookup_realm = no
7 |
8 | [realms]
9 | TEST.CONFLUENT.IO = {
10 | admin_server = kadmin
11 | }
12 |
13 | [domain_realm]
14 | .test.confluent.io = TEST.CONFLUENT.IO
15 | test.confluent.io = TEST.CONFLUENT.IO
16 | kerberos-demo.local = TEST.CONFLUENT.IO
17 | .kerberos-demo.local = TEST.CONFLUENT.IO
18 |
19 | [logging]
20 | kdc = FILE:/var/log/kerberos/krb5kdc.log
21 | admin_server = FILE:/var/log/kerberos/kadmin.log
22 | default = FILE:/var/log/kerberos/krb5lib.log
23 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/zookeeper/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:centos8
2 | MAINTAINER d.gasparina@gmail.com
3 | ENV container docker
4 |
5 | # 0. Fixing Mirror list for Centos
6 | RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
7 | RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
8 |
9 | # 1. Adding Confluent repository
10 | RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
11 | COPY confluent.repo /etc/yum.repos.d/confluent.repo
12 | RUN yum clean all
13 |
14 | # 2. Install zookeeper and kafka
15 | RUN yum install -y java-11-openjdk
16 | RUN yum install -y confluent-kafka-2.12
17 |
18 | # 3. Configure zookeeper for Kerberos
19 | RUN yum install -y krb5-workstation krb5-libs
20 | COPY zookeeper.properties /etc/kafka/zookeeper.properties
21 | COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
22 |
23 | EXPOSE 2181
24 |
25 | ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
26 |
27 | CMD zookeeper-server-start /etc/kafka/zookeeper.properties
28 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/zookeeper/confluent.repo:
--------------------------------------------------------------------------------
1 | [Confluent.dist]
2 | name=Confluent repository (dist)
3 | baseurl=https://packages.confluent.io/rpm/5.4/7
4 | gpgcheck=1
5 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
6 | enabled=1
7 |
8 | [Confluent]
9 | name=Confluent repository
10 | baseurl=https://packages.confluent.io/rpm/5.4
11 | gpgcheck=1
12 | gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
13 | enabled=1
14 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/zookeeper/zookeeper.properties:
--------------------------------------------------------------------------------
1 | dataDir=/var/lib/zookeeper
2 | clientPort=2181
3 | maxClientCnxns=0
4 | authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
5 | zookeeper.allowSaslFailedClients=false
6 | requireClientAuthScheme=sasl
7 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/kerberos/zookeeper/zookeeper.sasl.jaas.config:
--------------------------------------------------------------------------------
1 | Server {
2 | com.sun.security.auth.module.Krb5LoginModule required
3 | useKeyTab=true
4 | keyTab="/var/lib/secret/zookeeper.key"
5 | storeKey=true
6 | useTicketCache=false
7 | principal="zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO";
8 | };
9 |
10 | Client {
11 | com.sun.security.auth.module.Krb5LoginModule required
12 | useTicketCache=true;
13 | };
14 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/src/main/resources/META-INF/native-image/predefined-classes-config.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "type":"agent-extracted",
4 | "classes":[
5 | ]
6 | }
7 | ]
8 |
9 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/src/main/resources/META-INF/native-image/proxy-config.json:
--------------------------------------------------------------------------------
1 | [
2 | ]
3 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/src/main/resources/META-INF/native-image/resource-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "resources":{
3 | "includes":[{
4 | "pattern":"\\Qkafka/kafka-version.properties\\E"
5 | }, {
6 | "pattern":"\\Qorg/slf4j/impl/StaticLoggerBinder.class\\E"
7 | }]},
8 | "bundles": [
9 | {
10 | "name": "sun.security.util.Resources",
11 | "locales": ["en", "it_IT"]
12 | }
13 | ]
14 | }
15 |
--------------------------------------------------------------------------------
/kafka-clients-graalvm/src/main/resources/META-INF/native-image/serialization-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "types":[
3 | ],
4 | "lambdaCapturingTypes":[
5 | ],
6 | "proxies":[
7 | ]
8 | }
9 |
--------------------------------------------------------------------------------
/kafka-connect-sink-http/config/http_sink.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "SimpleHttpSink",
3 | "config":
4 | {
5 | "topics": "topicA",
6 | "tasks.max": "2",
7 | "connector.class": "io.confluent.connect.http.HttpSinkConnector",
8 | "http.api.url": "http://host.docker.internal:8010/api/message",
9 | "value.converter": "org.apache.kafka.connect.storage.StringConverter",
10 | "confluent.topic.bootstrap.servers": "broker:9092",
11 | "confluent.topic.replication.factor": "1",
12 | "reporter.bootstrap.servers": "broker:9092",
13 | "reporter.result.topic.name": "success-responses",
14 | "reporter.result.topic.replication.factor": "1",
15 | "reporter.error.topic.name": "error-responses",
16 | "reporter.error.topic.replication.factor": "1",
17 | "consumer.override.max.poll.interval.ms": "5000"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/kafka-connect-sink-http/rest-controller/src/main/java/io/confluent/springboot/kafka/demo/Application.java:
--------------------------------------------------------------------------------
1 | package io.confluent.springboot.kafka.demo;
2 |
3 | import org.springframework.boot.SpringApplication;
4 | import org.springframework.boot.autoconfigure.SpringBootApplication;
5 |
6 | @SpringBootApplication
7 | public class Application {
8 |
9 | public static void main(String[] args) {
10 | SpringApplication.run(Application.class, args);
11 | }
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/kafka-connect-sink-http/rest-controller/src/main/java/io/confluent/springboot/kafka/demo/controller/Controller.java:
--------------------------------------------------------------------------------
1 | package io.confluent.springboot.kafka.demo.controller;
2 |
3 | import org.springframework.http.HttpStatus;
4 | import org.springframework.http.ResponseEntity;
5 | import org.springframework.web.bind.annotation.*;
6 |
7 |
8 | @RestController
9 | public class Controller {
10 |
11 | private int request = 0;
12 |
13 | @PostMapping(value="/api/message")
14 | public ResponseEntity send(@RequestBody String message) {
15 | System.out.println("\n\nRequest:" + request);
16 | if(request < 2) {
17 | try {
18 | request++;
19 | System.out.println("Sleeping...");
20 | Thread.sleep(8000);
21 |
22 | } catch (InterruptedException e) {
23 | throw new RuntimeException(e);
24 | }
25 | }
26 | System.out.println("Message:" + message);
27 | return new ResponseEntity<>(HttpStatus.OK);
28 | }
29 | }
--------------------------------------------------------------------------------
/kafka-connect-sink-http/rest-controller/src/main/resources/application.yml:
--------------------------------------------------------------------------------
1 | server:
2 | port: 8010
--------------------------------------------------------------------------------
/kafka-connect-sink-s3/config/jr.kafka.properties:
--------------------------------------------------------------------------------
1 | # Kafka configuration
2 | # https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md
3 |
4 | bootstrap.servers=broker:9092
5 | client.id= JR
6 | compression.type=gzip
7 | compression.level=9
8 | # statistics.interval.ms=1000
--------------------------------------------------------------------------------
/kafka-connect-sink-s3/config/jr.kafka.schema.registry.properties:
--------------------------------------------------------------------------------
1 | schemaRegistryURL=http://schema-registry:8081
2 | schemaRegistryUser=
3 | schemaRegistryPassword=
--------------------------------------------------------------------------------
/kafka-connect-sink-s3/config/s3_parquet_sink.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "sink-parquet-s3",
3 | "config":
4 | {
5 | "topics": "gaming-player-activity",
6 | "tasks.max": "1",
7 | "connector.class": "io.confluent.connect.s3.S3SinkConnector",
8 | "store.url": "http://minio:9000",
9 | "s3.region": "us-west-2",
10 | "s3.bucket.name": "gaming-player-activity-bucket",
11 | "s3.part.size": "5242880",
12 | "flush.size": "100",
13 | "storage.class": "io.confluent.connect.s3.storage.S3Storage",
14 | "partitioner.class": "io.confluent.connect.storage.partitioner.DefaultPartitioner",
15 | "format.class": "io.confluent.connect.s3.format.parquet.ParquetFormat",
16 | "parquet.codec": "snappy",
17 | "schema.registry.url": "http://schema-registry:8081",
18 | "value.converter": "io.confluent.connect.avro.AvroConverter",
19 | "key.converter": "org.apache.kafka.connect.storage.StringConverter",
20 | "value.converter.schema.registry.url": "http://schema-registry:8081"
21 | }
22 | }
--------------------------------------------------------------------------------
/kafka-connect-sink-s3/config/s3_sink.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "sink-s3",
3 | "config":
4 | {
5 | "topics": "gaming-player-activity",
6 | "tasks.max": "1",
7 | "connector.class": "io.confluent.connect.s3.S3SinkConnector",
8 | "store.url": "http://minio:9000",
9 | "s3.region": "us-west-2",
10 | "s3.bucket.name": "gaming-player-activity-bucket",
11 | "s3.part.size": "5242880",
12 | "flush.size": "100",
13 | "storage.class": "io.confluent.connect.s3.storage.S3Storage",
14 | "format.class": "io.confluent.connect.s3.format.avro.AvroFormat",
15 | "schema.generator.class": "io.confluent.connect.storage.hive.schema.DefaultSchemaGenerator",
16 | "partitioner.class": "io.confluent.connect.storage.partitioner.DefaultPartitioner",
17 | "schema.compatibility": "NONE"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/kafka-connect-source-event-router/config/create-tables.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE outbox_table (
2 | id serial PRIMARY KEY,
3 | aggregate VARCHAR ( 255 ) NOT NULL,
4 | operation VARCHAR ( 255 ) NOT NULL,
5 | payload VARCHAR NOT NULL,
6 | event_time VARCHAR ( 255 ) NOT NULL
7 | );
8 |
9 | insert into outbox_table (id, aggregate, operation, payload, event_time) values (1, 'Consumer Loan', 'CREATE', '{\"event\": {\"type\":\"Mortgage Opening\",\"timestamp\":\"2023-11-20T10:00:00\",\"data\":{\"mortgageId\":\"ABC123\",\"customer\":\"John Doe\",\"amount\":200000,\"duration\": 20}}}','2023-11-20 10:00:00');
10 | insert into outbox_table (id, aggregate, operation, payload, event_time) values (2, 'Consumer Loan', 'INSTALLMENT_PAYMENT', '{\"event\": {\"type\":\"Mortgage Opening\",\"timestamp\":\"2023-11-20T10:00:00\",\"data\":{\"mortgageId\":\"ABC123\",\"customer\":\"John Doe\",\"amount\":200000,\"duration\": 20}}}','2023-12-01 09:30:00');
11 | insert into outbox_table (id, aggregate, operation, payload, event_time) values (3, 'Consumer Loan', 'EARLY_LOAN_CLOSURE', '{\"event\":{\"type\":\"Early Loan Closure\",\"timestamp\":\"2023-11-25T14:15:00\",\"data\":{\"mortgageId\":\"ABC12\",\"closureAmount\":150000,\"closureDate\":\"2023-11-25\",\"paymentMethod\":\"Bank Transfer\",\"transactionNumber\":\"PQR456\"}}}','2023-11-25 09:30:00');
--------------------------------------------------------------------------------
/kafka-connect-source-sap-hana/config/sap_hana_source.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "sap-hana-source",
3 | "config":
4 | {
5 | "topics": "testtopic",
6 | "tasks.max": "1",
7 | "connector.class": "com.sap.kafka.connect.source.hana.HANASourceConnector",
8 | "connection.url": "jdbc:sap://sap:39041/?databaseName=HXE&reconnect=true&statementCacheSize=512",
9 | "connection.user": "LOCALDEV",
10 | "connection.password" : "Localdev1",
11 | "value.converter.schema.registry.url": "http://schema-registry:8081",
12 | "auto.create": "true",
13 | "testtopic.table.name": "\"LOCALDEV\".\"TEST\"",
14 | "key.converter": "io.confluent.connect.avro.AvroConverter",
15 | "key.converter.schema.registry.url": "http://schema-registry:8081",
16 | "value.converter": "io.confluent.connect.avro.AvroConverter",
17 | "value.converter.schema.registry.url": "http://schema-registry:8081"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/kafka-connect-source-sap-hana/post_start/201_hxe_optimize:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | # Run hxe_optimize.sh
6 | function main() {
7 | case "$_HOOK_START_TYPE" in
8 | initial)
9 | echo "Optimizing HDB server..."
10 | /hana/hooks/post_start/hxe_scripts/hxe_optimize.sh -d
11 | ;;
12 | esac
13 | }
14 |
15 | main
16 |
17 |
--------------------------------------------------------------------------------
/kafka-connect-source-sap-hana/post_start/203_set_hxe_info:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | # Set INSTALL_DATE in hxe_info.txt
6 | function main() {
7 | case "$_HOOK_START_TYPE" in
8 | initial|update)
9 | install_date=`date --utc`
10 | sed -i "s/^INSTALL_DATE.*=.*/INSTALL_DATE=$install_date/" /usr/sap/${SAPSYSTEMNAME}/SYS/global/hdb/hxe_info.txt
11 | ;;
12 | esac
13 | }
14 |
15 | main
16 |
--------------------------------------------------------------------------------
/kafka-connect-source-sap-hana/post_start/999_import_dump:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | #found in /run_hana.sh, hxe_optimize.sh
6 | #durinng the 'initial' phase there is key for SYSTEM available
7 | declare -r tenant_store_key=us_key_tenantdb
8 |
9 | # import dump
10 | function main() {
11 | case "$_HOOK_START_TYPE" in
12 | initial)
13 | # create user
14 | /usr/sap/HXE/HDB90/exe/hdbsql -a -x -i 90 -d HXE -U ${tenant_store_key} -B UTF8 "CREATE USER $SCHEMA_NAME PASSWORD \"$SCHEMA_PWD\" NO FORCE_FIRST_PASSWORD_CHANGE" 2>&1
15 | /usr/sap/HXE/HDB90/exe/hdbsql -a -x -i 90 -d HXE -U ${tenant_store_key} -B UTF8 "ALTER USER $SCHEMA_NAME DISABLE PASSWORD LIFETIME" 2>&1
16 | esac
17 | }
18 |
19 | main
20 |
--------------------------------------------------------------------------------
/kafka-connect-task-distribution/config/connector_datagen.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "datagen-sample",
3 | "config": {
4 | "connector.class": "io.confluent.kafka.connect.datagen.DatagenConnector",
5 | "kafka.topic" : "topic1",
6 | "key.converter": "org.apache.kafka.connect.storage.StringConverter",
7 | "quickstart" : "pageviews",
8 | "tasks.max" : "4",
9 | "max.interval": 1000,
10 | "iterations": 10000000,
11 | "topic.creation.default.replication.factor": 1,
12 | "topic.creation.default.partitions": 10
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/kafka-consumer-retry-topics/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 |
5 |
6 | org.hifly.kafka
7 | kafka-play
8 | 1.2.1
9 |
10 |
11 | kafka-consumer-retry-topics
12 | jar
13 |
14 |
15 |
16 |
17 | org.hifly.kafka
18 | kafka-producer
19 | 1.2.1
20 |
21 |
22 |
23 | org.hifly.kafka
24 | kafka-consumer
25 | 1.2.1
26 |
27 |
28 |
29 | junit
30 | junit
31 |
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/kafka-consumer/src/main/java/org/hifly/kafka/demo/consumer/core/AbstractConsumerHandle.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.consumer.core;
2 |
3 | import java.util.Map;
4 |
5 | import org.apache.kafka.clients.consumer.ConsumerRecords;
6 | import org.apache.kafka.clients.consumer.OffsetAndMetadata;
7 | import org.apache.kafka.common.TopicPartition;
8 |
9 | public abstract class AbstractConsumerHandle {
10 |
11 | public abstract void addOffsets(Map offsets);
12 | public abstract void process(ConsumerRecords consumerRecords, String groupId, String consumerId);
13 | }
14 |
--------------------------------------------------------------------------------
/kafka-consumer/src/main/java/org/hifly/kafka/demo/consumer/core/ConsumerRecordUtil.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.consumer.core;
2 |
3 | import org.apache.kafka.clients.consumer.ConsumerRecord;
4 | import org.slf4j.Logger;
5 | import org.slf4j.LoggerFactory;
6 |
7 | public class ConsumerRecordUtil {
8 |
9 | private static final Logger LOGGER = LoggerFactory.getLogger(ConsumerRecordUtil.class);
10 |
11 | public static void prettyPrinter(String groupId, String consumerId, ConsumerRecord consumerRecord) {
12 | if(consumerRecord != null) {
13 | System.out.printf("Group id %s - Consumer id: %s - Topic: %s - Partition: %s - Offset: %s - Key: %s - Value: %s\n",
14 | groupId,
15 | consumerId,
16 | consumerRecord.topic(),
17 | consumerRecord.partition(),
18 | consumerRecord.offset(),
19 | consumerRecord.key(),
20 | consumerRecord.value());
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/kafka-consumer/src/main/java/org/hifly/kafka/demo/consumer/core/GenericConsumer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.consumer.core;
2 |
3 | import java.util.List;
4 |
5 | public interface GenericConsumer {
6 |
7 | void subscribe(String groupId, String topic, boolean autoCommit);
8 |
9 | void poll(int size, long duration, boolean commitSync);
10 |
11 | boolean assign(String topic, List partitions, boolean autoCommit);
12 |
13 | void shutdown();
14 |
15 | }
--------------------------------------------------------------------------------
/kafka-consumer/src/main/java/org/hifly/kafka/demo/consumer/deserializer/avro/SchemaRegistry.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.consumer.deserializer.avro;
2 |
3 | public enum SchemaRegistry {
4 | CONFLUENT, APICURIO, HORTONWORKS;
5 | }
6 |
--------------------------------------------------------------------------------
/kafka-consumer/src/main/java/org/hifly/kafka/demo/consumer/rack/Runner.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.consumer.rack;
2 |
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 | import org.hifly.kafka.demo.consumer.core.ConsumerInstance;
5 | import org.hifly.kafka.demo.consumer.core.KafkaConfig;
6 | import org.hifly.kafka.demo.consumer.core.impl.ConsumerHandle;
7 |
8 | import java.io.IOException;
9 | import java.util.UUID;
10 |
11 | public class Runner {
12 |
13 | public static void main (String [] args) throws Exception {
14 | pollAutoCommit();
15 | }
16 |
17 | private static void pollAutoCommit() throws IOException {
18 |
19 | KafkaConsumer consumer = new KafkaConsumer<>(
20 | KafkaConfig.loadConfig("consumer-ffetching.properties"));
21 |
22 | new ConsumerInstance(
23 | UUID.randomUUID().toString(),
24 | "topic-regional",
25 | consumer,
26 | 100,
27 | 15500,
28 | true,
29 | false,
30 | true,
31 | new ConsumerHandle(null)).consume();
32 | }
33 |
34 |
35 | }
--------------------------------------------------------------------------------
/kafka-consumer/src/main/java/org/hifly/kafka/demo/consumer/staticmembership/Runner.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.consumer.staticmembership;
2 |
3 | import org.apache.kafka.clients.consumer.KafkaConsumer;
4 | import org.hifly.kafka.demo.consumer.core.ConsumerInstance;
5 | import org.hifly.kafka.demo.consumer.core.KafkaConfig;
6 | import org.hifly.kafka.demo.consumer.core.impl.ConsumerHandle;
7 |
8 | import java.io.IOException;
9 | import java.util.UUID;
10 |
11 | public class Runner {
12 |
13 | public static void main (String [] args) throws Exception {
14 | String fileName = args[0];
15 |
16 | pollAutoCommit(fileName);
17 | }
18 |
19 | private static void pollAutoCommit(String fileName) throws IOException {
20 |
21 | KafkaConsumer consumer = new KafkaConsumer<>(
22 | KafkaConfig.loadConfig(fileName));
23 |
24 | new ConsumerInstance(
25 | UUID.randomUUID().toString(),
26 | "topic1",
27 | consumer,
28 | 100,
29 | 3000000,
30 | true,
31 | false,
32 | true,
33 | new ConsumerHandle(null)).consume();
34 | }
35 |
36 |
37 | }
--------------------------------------------------------------------------------
/kafka-consumer/src/main/java/org/hifly/kafka/demo/consumer/tx/Runner.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.consumer.tx;
2 |
3 | import org.apache.kafka.common.serialization.StringDeserializer;
4 | import org.hifly.kafka.demo.consumer.core.impl.ConsumerHandle;
5 | import org.hifly.kafka.demo.consumer.core.ConsumerInstance;
6 |
7 | import java.util.UUID;
8 |
9 | public class Runner {
10 |
11 | public static void main (String [] args) {
12 | pollAutoCommit();
13 | }
14 |
15 | private static void pollAutoCommit() {
16 |
17 | new ConsumerInstance(
18 | UUID.randomUUID().toString(),
19 | UUID.randomUUID().toString(),
20 | "test-idempotent",
21 | StringDeserializer.class.getName(),
22 | StringDeserializer.class.getName(),
23 | "org.apache.kafka.clients.consumer.RangeAssignor",
24 | "read_committed",
25 | 100,
26 | -1,
27 | true,
28 | false,
29 | true,
30 | new ConsumerHandle(null)).consume();
31 | }
32 | }
33 |
34 |
35 |
--------------------------------------------------------------------------------
/kafka-consumer/src/main/resources/assignment.json:
--------------------------------------------------------------------------------
1 | {"version":1,"partitions":[{"topic":"topic-regional","partition":0,"replicas":[1,2,3]},{"topic":"topic-regional","partition":1,"replicas":[3,2,1]},{"topic":"topic-regional","partition":2,"replicas":[1,3,2]}]}
--------------------------------------------------------------------------------
/kafka-consumer/src/main/resources/consumer-ffetching.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=broker:9092,broker2:9082,broker3:9072
2 | group.id=test-ff
3 | client.rack=dc2
4 | auto.offset.reset=earliest
5 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
--------------------------------------------------------------------------------
/kafka-consumer/src/main/resources/consumer-member1.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=broker:9092
2 | group.id=test-membership
3 | group.instance.id=app-1
4 | auto.offset.reset=earliest
5 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
--------------------------------------------------------------------------------
/kafka-consumer/src/main/resources/consumer-member2.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=broker:9092
2 | group.id=test-membership
3 | group.instance.id=app-2
4 | auto.offset.reset=earliest
5 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
--------------------------------------------------------------------------------
/kafka-consumer/src/main/resources/consumer-member3.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=broker:9092
2 | group.id=test-membership
3 | group.instance.id=app-3
4 | auto.offset.reset=earliest
5 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
--------------------------------------------------------------------------------
/kafka-consumer/src/main/resources/consumer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | security.protocol=
3 | sasl.kerberos.service.name=
4 | sasl.mechanism=
5 | sasl.jaas.config=
6 | ssl.truststore.location=
7 | ssl.truststore.password=
8 | schema.registry.url=
9 | schema.registry.ssl.truststore.location=
10 | schema.registry.ssl.truststore.password=
11 | basic.auth.credentials.source=
12 | basic.auth.user.info=
--------------------------------------------------------------------------------
/kafka-distributed-tracing/app/opentelemetry-javaagent.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/kafka-distributed-tracing/app/opentelemetry-javaagent.jar
--------------------------------------------------------------------------------
/kafka-distributed-tracing/otel-collector-config.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | otlp:
3 | protocols:
4 | grpc:
5 | endpoint:
6 |
7 | exporters:
8 | otlp:
9 | endpoint: jaeger:4317
10 | tls:
11 | insecure: true
12 |
13 | processors:
14 | batch:
15 |
16 | extensions:
17 | health_check:
18 |
19 | service:
20 | extensions: [health_check]
21 | pipelines:
22 | traces:
23 | receivers: [otlp]
24 | processors: [batch]
25 | exporters: [otlp]
26 |
--------------------------------------------------------------------------------
/kafka-microprofile2-consumer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM websphere-liberty:microProfile2
2 |
3 | COPY target/kafka-microprofile2-consumer-1.2.1.war /config/dropins/
--------------------------------------------------------------------------------
/kafka-microprofile2-consumer/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/kafka-microprofile2-consumer/README.txt
--------------------------------------------------------------------------------
/kafka-microprofile2-consumer/infrastructure/envs/template.vars:
--------------------------------------------------------------------------------
1 | APP=alm-portal
2 | DOCKER_IMAGE=
3 | TAG=latest
--------------------------------------------------------------------------------
/kafka-microprofile2-consumer/infrastructure/envs/test/jvm.options:
--------------------------------------------------------------------------------
1 | -Xshareclasses:name=docker
--------------------------------------------------------------------------------
/kafka-microprofile2-consumer/src/main/java/org/hifly/kafka/order/process/OrderProcessApp.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.order.process;
2 |
3 | import org.hifly.kafka.order.process.controller.OrderProcessController;
4 |
5 | import javax.servlet.ServletException;
6 | import javax.servlet.annotation.WebServlet;
7 | import javax.servlet.http.HttpServlet;
8 | import javax.servlet.http.HttpServletRequest;
9 | import javax.servlet.http.HttpServletResponse;
10 | import java.io.IOException;
11 |
12 | @WebServlet(name = "OrderApp", urlPatterns = "/order")
13 | public class OrderProcessApp extends HttpServlet {
14 |
15 | private static final long serialVersionUID = 1L;
16 |
17 | @Override
18 | protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
19 | OrderProcessController orderProcessController = new OrderProcessController();
20 | orderProcessController.receiveOrders(3, "group-1", -1, 10);
21 | }
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/kafka-microprofile2-consumer/src/main/java/org/hifly/kafka/order/process/consumer/OrderEventJsonDeserializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.order.process.consumer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.order.process.event.OrderEvent;
5 | import org.apache.kafka.common.serialization.Deserializer;
6 |
7 | import java.io.IOException;
8 | import java.util.Map;
9 |
10 | public class OrderEventJsonDeserializer implements Deserializer {
11 |
12 | private ObjectMapper objectMapper;
13 |
14 | @Override
15 | public void configure(Map configs, boolean isKey) {
16 | this.objectMapper = new ObjectMapper();
17 | }
18 |
19 |
20 | @Override
21 | public OrderEvent deserialize(String s, byte[] data) {
22 | try {
23 | return objectMapper.readValue(data, OrderEvent.class);
24 | } catch (IOException e) {
25 | e.printStackTrace();
26 | }
27 | return null;
28 | }
29 |
30 | @Override
31 | public void close() { }
32 | }
--------------------------------------------------------------------------------
/kafka-microprofile2-consumer/src/main/java/org/hifly/kafka/order/process/event/ItemEvent.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.order.process.event;
2 |
3 | import java.util.Date;
4 |
5 | public class ItemEvent {
6 |
7 | private String id;
8 | private String name;
9 | private Date timestamp;
10 | private String orderId;
11 | private double price;
12 |
13 | public String getId() {
14 | return id;
15 | }
16 |
17 | public void setId(String id) {
18 | this.id = id;
19 | }
20 |
21 | public String getName() {
22 | return name;
23 | }
24 |
25 | public void setName(String name) {
26 | this.name = name;
27 | }
28 |
29 | public Date getTimestamp() {
30 | return timestamp;
31 | }
32 |
33 | public void setTimestamp(Date timestamp) {
34 | this.timestamp = timestamp;
35 | }
36 |
37 | public String getOrderId() {
38 | return orderId;
39 | }
40 |
41 | public void setOrderId(String orderId) {
42 | this.orderId = orderId;
43 | }
44 |
45 | public double getPrice() {
46 | return price;
47 | }
48 |
49 | public void setPrice(double price) {
50 | this.price = price;
51 | }
52 |
53 | public String toString() {
54 | return "ItemEvent: " + id + "-" + orderId + "-" + timestamp;
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/kafka-microprofile2-producer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM websphere-liberty:microProfile2
2 |
3 | COPY target/kafka-microprofile2-producer-1.2.1.war /config/dropins/
--------------------------------------------------------------------------------
/kafka-microprofile2-producer/README.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/kafka-microprofile2-producer/README.txt
--------------------------------------------------------------------------------
/kafka-microprofile2-producer/infrastructure/envs/template.vars:
--------------------------------------------------------------------------------
1 | APP=alm-portal
2 | DOCKER_IMAGE=
3 | TAG=latest
--------------------------------------------------------------------------------
/kafka-microprofile2-producer/infrastructure/envs/test/jvm.options:
--------------------------------------------------------------------------------
1 | -Xshareclasses:name=docker
--------------------------------------------------------------------------------
/kafka-microprofile2-producer/src/main/java/org/hifly/kafka/order/event/ItemEvent.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.order.event;
2 |
3 | import java.util.Date;
4 |
5 | public class ItemEvent {
6 |
7 | private String id;
8 | private String name;
9 | private Date timestamp;
10 | private String orderId;
11 | private double price;
12 |
13 | public String getId() {
14 | return id;
15 | }
16 |
17 | public void setId(String id) {
18 | this.id = id;
19 | }
20 |
21 | public String getName() {
22 | return name;
23 | }
24 |
25 | public void setName(String name) {
26 | this.name = name;
27 | }
28 |
29 | public Date getTimestamp() {
30 | return timestamp;
31 | }
32 |
33 | public void setTimestamp(Date timestamp) {
34 | this.timestamp = timestamp;
35 | }
36 |
37 | public String getOrderId() {
38 | return orderId;
39 | }
40 |
41 | public void setOrderId(String orderId) {
42 | this.orderId = orderId;
43 | }
44 |
45 | public double getPrice() {
46 | return price;
47 | }
48 |
49 | public void setPrice(double price) {
50 | this.price = price;
51 | }
52 |
53 | public String toString() {
54 | return "ItemEvent: " + id + "-" + orderId + "-" + timestamp;
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/kafka-microprofile2-producer/src/main/java/org/hifly/kafka/order/model/Order.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.order.model;
2 |
3 | import java.util.List;
4 |
5 | public class Order {
6 |
7 | private String id;
8 | private String name;
9 |
10 | private List items;
11 |
12 | public String getId() {
13 | return id;
14 | }
15 |
16 | public void setId(String id) {
17 | this.id = id;
18 | }
19 |
20 | public String getName() {
21 | return name;
22 | }
23 |
24 | public void setName(String name) {
25 | this.name = name;
26 | }
27 |
28 | public List getItems() {
29 | return items;
30 | }
31 |
32 | public void setItems(List items) {
33 | this.items = items;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/kafka-microprofile2-producer/src/main/java/org/hifly/kafka/order/model/OrderItem.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.order.model;
2 |
3 | public class OrderItem {
4 |
5 | private String id;
6 | private String name;
7 | private double price;
8 | private Order order;
9 |
10 |
11 | public String getId() {
12 | return id;
13 | }
14 |
15 | public void setId(String id) {
16 | this.id = id;
17 | }
18 |
19 | public String getName() {
20 | return name;
21 | }
22 |
23 | public void setName(String name) {
24 | this.name = name;
25 | }
26 |
27 | public double getPrice() {
28 | return price;
29 | }
30 |
31 | public void setPrice(double price) {
32 | this.price = price;
33 | }
34 |
35 | public Order getOrder() {
36 | return order;
37 | }
38 |
39 | public void setOrder(Order order) {
40 | this.order = order;
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/kafka-microprofile2-producer/src/main/java/org/hifly/kafka/order/producer/OrderEventJsonSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.order.producer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.order.event.OrderEvent;
5 | import org.apache.kafka.common.serialization.Serializer;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class OrderEventJsonSerializer implements Serializer {
12 |
13 | private Logger log = LoggerFactory.getLogger(OrderEventJsonSerializer.class);
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public byte[] serialize(String topic, OrderEvent data) {
20 | byte[] retVal = null;
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | try {
23 | retVal = objectMapper.writeValueAsString(data).getBytes();
24 | } catch (Exception exception) {
25 | log.error("Error in serializing object {}", data, exception);
26 | }
27 | return retVal;
28 |
29 | }
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-oauth-kip-768/client-oauth.properties:
--------------------------------------------------------------------------------
1 | security.protocol=SASL_PLAINTEXT
2 | sasl.mechanism=OAUTHBEARER
3 | sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler
4 | sasl.login.connect.timeout.ms=15000
5 | sasl.oauthbearer.token.endpoint.url=http://localhost:8080/auth/realms/kafka/protocol/openid-connect/token
6 | sasl.oauthbearer.expected.audience=account
7 | sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required clientId="kafka_user" clientSecret="";
8 |
--------------------------------------------------------------------------------
/kafka-oauth-kip-768/docker-compose-idp.yml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 |
4 | postgres:
5 | image: postgres:${POSTGRES_ALPINE_VERSION}
6 | container_name: postgres
7 | restart: always
8 | environment:
9 | - POSTGRES_USER=postgres
10 | - POSTGRES_PASSWORD=postgres
11 | ports:
12 | - '5432:5432'
13 | volumes:
14 | - db:/var/lib/postgresql/data
15 |
16 | keycloak:
17 | image: quay.io/keycloak/keycloak:${KEYCLOAK_VERSION}
18 | container_name: keycloak
19 | environment:
20 | DB_VENDOR: POSTGRES
21 | DB_ADDR: postgres
22 | DB_DATABASE: postgres
23 | DB_USER: postgres
24 | DB_SCHEMA: public
25 | DB_PASSWORD: postgres
26 | KEYCLOAK_USER: admin
27 | KEYCLOAK_PASSWORD: Pa55w0rd
28 | ports:
29 | - 8080:8080
30 | depends_on:
31 | - postgres
32 |
33 | volumes:
34 | db:
35 | driver: local
36 |
--------------------------------------------------------------------------------
/kafka-orders-tx/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 |
5 |
6 | org.hifly.kafka
7 | kafka-play
8 | 1.2.1
9 |
10 |
11 | kafka-orders-tx
12 | jar
13 |
14 |
15 |
16 |
17 | org.apache.kafka
18 | kafka_2.13
19 |
20 |
21 |
22 | org.hifly.kafka
23 | kafka-producer
24 | 1.2.1
25 |
26 |
27 |
28 | org.slf4j
29 | slf4j-simple
30 |
31 |
32 |
33 | junit
34 | junit
35 |
36 |
37 |
38 | org.apache.curator
39 | curator-test
40 |
41 |
42 |
43 |
44 |
45 |
--------------------------------------------------------------------------------
/kafka-orders-tx/src/main/java/org/hifly/kafka/demo/orders/ItemsConsumer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.orders;
2 |
3 | import org.hifly.kafka.demo.orders.controller.ItemController;
4 |
5 | public class ItemsConsumer {
6 |
7 | public static void main(String[] args) {
8 |
9 | ItemController itemController = new ItemController();
10 | itemController.generateOrders(false, 60, false);
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/kafka-orders-tx/src/main/java/org/hifly/kafka/demo/orders/ItemsProducer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.orders;
2 |
3 | import org.hifly.kafka.demo.orders.controller.ItemController;
4 | import org.hifly.kafka.demo.orders.model.Item;
5 |
6 | import java.util.Arrays;
7 |
8 | public class ItemsProducer {
9 |
10 | public static void main(String [] args) {
11 |
12 | Item item1 = new Item();
13 | item1.setId("111");
14 | item1.setCost(15.5f);
15 | item1.setDescription("Laptop Bag");
16 | //same order id
17 | item1.setOrderId("OD001");
18 |
19 | Item item2 = new Item();
20 | item2.setId("112");
21 | item2.setCost(25.8f);
22 | item2.setDescription("Gameboy");
23 | //same order id
24 | item2.setOrderId("OD001");
25 |
26 | ItemController itemController = new ItemController();
27 | itemController.sendItems(Arrays.asList(item1, item2), 5000);
28 |
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/kafka-orders-tx/src/main/java/org/hifly/kafka/demo/orders/kafka/consumer/ItemJsonDeserializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.orders.kafka.consumer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.orders.model.Item;
5 | import org.apache.kafka.common.serialization.Deserializer;
6 |
7 | import java.io.IOException;
8 | import java.util.Map;
9 |
10 | public class ItemJsonDeserializer implements Deserializer
- {
11 |
12 | private ObjectMapper objectMapper;
13 |
14 | @Override
15 | public void configure(Map configs, boolean isKey) {
16 | this.objectMapper = new ObjectMapper();
17 | }
18 |
19 |
20 | @Override
21 | public Item deserialize(String s, byte[] data) {
22 | try {
23 | return objectMapper.readValue(data, Item.class);
24 | } catch (IOException e) {
25 | e.printStackTrace();
26 | }
27 | return null;
28 | }
29 |
30 | @Override
31 | public void close() { }
32 | }
--------------------------------------------------------------------------------
/kafka-orders-tx/src/main/java/org/hifly/kafka/demo/orders/kafka/producer/ItemJsonSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.orders.kafka.producer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.orders.model.Item;
5 | import org.apache.kafka.common.serialization.Serializer;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class ItemJsonSerializer implements Serializer
- {
12 |
13 | private Logger log = LoggerFactory.getLogger(ItemJsonSerializer.class);
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public byte[] serialize(String topic, Item data) {
20 | byte[] retVal = null;
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | try {
23 | retVal = objectMapper.writeValueAsString(data).getBytes();
24 | } catch (Exception exception) {
25 | log.error("Error in serializing object {}", data, exception);
26 | }
27 | return retVal;
28 |
29 | }
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-orders-tx/src/main/java/org/hifly/kafka/demo/orders/kafka/producer/OrderJsonSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.orders.kafka.producer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.orders.model.Order;
5 | import org.apache.kafka.common.serialization.Serializer;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class OrderJsonSerializer implements Serializer {
12 |
13 | private Logger log = LoggerFactory.getLogger(OrderJsonSerializer.class);
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public byte[] serialize(String topic, Order data) {
20 | byte[] retVal = null;
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | try {
23 | retVal = objectMapper.writeValueAsString(data).getBytes();
24 | } catch (Exception exception) {
25 | log.error("Error in serializing object {}", data, exception);
26 | }
27 | return retVal;
28 |
29 | }
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-orders-tx/src/main/java/org/hifly/kafka/demo/orders/model/Item.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.orders.model;
2 |
3 | public class Item {
4 |
5 | private String id;
6 | private String description;
7 | private Float cost;
8 | private String orderId;
9 |
10 |
11 | public String getId() {
12 | return id;
13 | }
14 |
15 | public void setId(String id) {
16 | this.id = id;
17 | }
18 |
19 | public String getDescription() {
20 | return description;
21 | }
22 |
23 | public void setDescription(String description) {
24 | this.description = description;
25 | }
26 |
27 | public Float getCost() {
28 | return cost;
29 | }
30 |
31 | public void setCost(Float cost) {
32 | this.cost = cost;
33 | }
34 |
35 | public String getOrderId() {
36 | return orderId;
37 | }
38 |
39 | public void setOrderId(String orderId) {
40 | this.orderId = orderId;
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/kafka-orders-tx/src/main/java/org/hifly/kafka/demo/orders/model/Order.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.orders.model;
2 |
3 | import java.util.List;
4 |
5 | public class Order {
6 |
7 | private String id;
8 | private List
- items;
9 |
10 | public String getId() {
11 | return id;
12 | }
13 |
14 | public void setId(String id) {
15 | this.id = id;
16 | }
17 |
18 | public List
- getItems() {
19 | return items;
20 | }
21 |
22 | public void setItems(List
- items) {
23 | this.items = items;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/AbstractKafkaProducer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer;
2 |
3 | import org.apache.kafka.clients.producer.Producer;
4 |
5 | public abstract class AbstractKafkaProducer {
6 |
7 | protected Producer producer;
8 |
9 | public Producer getProducer() {
10 | return producer;
11 | }
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/IKafkaProducer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer;
2 |
3 | import org.apache.kafka.clients.producer.Callback;
4 | import org.apache.kafka.clients.producer.Producer;
5 | import org.apache.kafka.clients.producer.ProducerRecord;
6 | import org.apache.kafka.clients.producer.RecordMetadata;
7 |
8 | import java.util.concurrent.Future;
9 |
10 | public interface IKafkaProducer {
11 |
12 | void start();
13 |
14 | void start(Producer kafkaProducer);
15 |
16 | void stop();
17 |
18 | Future produceFireAndForget(ProducerRecord producerRecord);
19 |
20 | RecordMetadata produceSync(ProducerRecord producerRecord);
21 |
22 | void produceAsync(ProducerRecord producerRecord, Callback callback);
23 |
24 | }
25 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/ProducerCallback.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer;
2 |
3 | import org.apache.kafka.clients.producer.Callback;
4 | import org.apache.kafka.clients.producer.RecordMetadata;
5 |
6 | public class ProducerCallback implements Callback {
7 | @Override
8 | public void onCompletion(RecordMetadata recordMetadata, Exception e) {
9 | if (e != null)
10 | e.printStackTrace();
11 | RecordMetadataUtil.prettyPrinter(recordMetadata);
12 | }
13 | }
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/RecordMetadataUtil.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer;
2 |
3 | import org.apache.kafka.clients.producer.RecordMetadata;
4 |
5 | public class RecordMetadataUtil {
6 |
7 | public static void prettyPrinter(RecordMetadata recordMetadata) {
8 | if(recordMetadata != null) {
9 | System.out.printf("Topic: %s - Partition: %d - Offset: %d\n",
10 | recordMetadata.topic(),
11 | recordMetadata.partition(),
12 | recordMetadata.offset());
13 | }
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/partitioner/custom/UserPartitioner.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer.partitioner.custom;
2 |
3 | import org.apache.kafka.clients.producer.Partitioner;
4 | import org.apache.kafka.common.Cluster;
5 | import org.apache.kafka.common.PartitionInfo;
6 |
7 | import java.util.List;
8 | import java.util.Map;
9 |
10 | public class UserPartitioner implements Partitioner {
11 |
12 | public void configure(Map configs) { }
13 |
14 |
15 | @Override
16 | public int partition(String topic, Object key, byte[] bytes, Object o1, byte[] bytes1, Cluster cluster) {
17 | List partitions = cluster.partitionsForTopic(topic);
18 | int numPartitions = partitions.size();
19 |
20 | if(numPartitions < 3)
21 | throw new IllegalStateException("not enough partitions!");
22 |
23 | if (( key).equals("Mark"))
24 | return 0;
25 |
26 | if (( key).equals("Antony"))
27 | return 1;
28 |
29 | if (( key).equals("Paul"))
30 | return 2;
31 |
32 | return 0;
33 | }
34 |
35 | public void close() { }
36 | }
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/serializer/avro/SchemaRegistry.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer.serializer.avro;
2 |
3 | public enum SchemaRegistry {
4 | CONFLUENT, APICURIO, HORTONWORKS;
5 | }
6 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/serializer/avro/model/Car.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer.serializer.avro.model;
2 |
3 | import java.io.Serializable;
4 |
5 | public class Car implements Serializable {
6 |
7 | private static final long serialVersionUID = 6214326742498643495L;
8 |
9 | private String model;
10 |
11 | private String brand;
12 |
13 | public String getModel() {
14 | return model;
15 | }
16 |
17 | public void setModel(String model) {
18 | this.model = model;
19 | }
20 |
21 | public String getBrand() {
22 | return brand;
23 | }
24 |
25 | public void setBrand(String brand) {
26 | this.brand = brand;
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/serializer/json/AuditItemJsonSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer.serializer.json;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.producer.serializer.model.AuditItem;
5 | import org.apache.kafka.common.serialization.Serializer;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class AuditItemJsonSerializer implements Serializer {
12 |
13 | private static final Logger LOGGER = LoggerFactory.getLogger(AuditItemJsonSerializer.class);
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public byte[] serialize(String topic, AuditItem data) {
20 | byte[] retVal = null;
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | try {
23 | retVal = objectMapper.writeValueAsString(data).getBytes();
24 | } catch (Exception exception) {
25 | LOGGER.error("Error in serializing object {}", data);
26 | }
27 | return retVal;
28 |
29 | }
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/serializer/json/CustomDataJsonSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer.serializer.json;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.producer.serializer.model.CustomData;
5 | import org.apache.kafka.common.serialization.Serializer;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class CustomDataJsonSerializer implements Serializer {
12 |
13 | private static final Logger LOGGER = LoggerFactory.getLogger(CustomDataJsonSerializer.class);
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public byte[] serialize(String topic, CustomData data) {
20 | byte[] retVal = null;
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | try {
23 | retVal = objectMapper.writeValueAsBytes(data);
24 | } catch (Exception exception) {
25 | LOGGER.error("Error in serializing object {}", data);
26 | }
27 | return retVal;
28 |
29 | }
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/serializer/json/RunnerAuditItem.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer.serializer.json;
2 |
3 | import org.hifly.kafka.demo.producer.RecordMetadataUtil;
4 | import org.hifly.kafka.demo.producer.serializer.model.AuditItem;
5 | import org.hifly.kafka.demo.producer.serializer.model.CustomData;
6 | import org.apache.kafka.clients.producer.ProducerRecord;
7 | import org.apache.kafka.clients.producer.RecordMetadata;
8 |
9 |
10 | public class RunnerAuditItem {
11 |
12 | public static void main (String [] args) {
13 | JsonProducer jsonProducer = new JsonProducer<>("org.hifly.kafka.demo.producer.serializer.json.AuditItemJsonSerializer");
14 | jsonProducer.start();
15 | bunchOfMessages("audit", jsonProducer);
16 | }
17 |
18 | public static void bunchOfMessages(String topic, JsonProducer jsonProducer) {
19 | RecordMetadata lastRecord = null;
20 | for (int i= 0; i < 2; i++ ) {
21 | AuditItem auditItem = new AuditItem();
22 | auditItem.setMethod("test");
23 | lastRecord = jsonProducer.produceSync(new ProducerRecord<>(topic, auditItem));
24 | }
25 | RecordMetadataUtil.prettyPrinter(lastRecord);
26 |
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/java/org/hifly/kafka/demo/producer/serializer/model/CustomData.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.producer.serializer.model;
2 |
3 | import java.io.Serializable;
4 |
5 | public class CustomData implements Serializable {
6 |
7 | private static final long serialVersionUID = 1L;
8 | private Integer index;
9 |
10 | public CustomData() {}
11 |
12 | public CustomData(Integer index) {
13 | this.index = index;
14 | }
15 |
16 | public Integer getIndex() {
17 | return index;
18 | }
19 |
20 | public void setIndex(Integer index) {
21 | this.index = index;
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/resources/car.avsc:
--------------------------------------------------------------------------------
1 | {
2 | "type": "record",
3 | "name": "Car",
4 | "namespace": "org.hifly.kafka.demo.producer.serializer.avro",
5 | "fields": [
6 | {
7 | "name": "model",
8 | "type": "string"
9 | },
10 | {
11 | "name": "brand",
12 | "type": "string"
13 | }
14 | ]
15 | }
16 |
--------------------------------------------------------------------------------
/kafka-producer/src/main/resources/producer.properties:
--------------------------------------------------------------------------------
1 | bootstrap.servers=
2 | key.serializer=
3 | value.serializer
4 | security.protocol=
5 | sasl.kerberos.service.name=
6 | sasl.mechanism=
7 | sasl.jaas.config=
8 | ssl.truststore.location=
9 | ssl.truststore.password=
10 | client.id=
11 | enable.metrics.push=
12 | schema.registry.url=
13 | schema.registry.ssl.truststore.location=
14 | schema.registry.ssl.truststore.password=
15 | basic.auth.credentials.source=
16 | basic.auth.user.info=
--------------------------------------------------------------------------------
/kafka-python-consumer/consumer.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import Consumer
2 |
3 | conf = {'bootstrap.servers': "localhost:9092", 'group.id': 'pythongroup', 'auto.offset.reset': 'earliest'}
4 | topic = 'test-python'
5 |
6 | consumer = Consumer(conf)
7 |
8 | consumer.subscribe([topic])
9 |
10 | while True:
11 | msg = consumer.poll(1.0)
12 |
13 | if msg is None:
14 | continue
15 | if msg.error():
16 | print("Error: {}".format(msg.error()))
17 | continue
18 |
19 | print('Message: {}'.format(msg.value().decode('utf-8')))
20 |
21 | consumer.close()
22 |
--------------------------------------------------------------------------------
/kafka-python-producer/producer.py:
--------------------------------------------------------------------------------
1 | from confluent_kafka import Producer
2 |
3 |
4 | def ack(error, message):
5 | print("error={}".format(error))
6 | print("topic={}".format(message.topic()))
7 | print("timestamp={}".format(message.timestamp()))
8 | print("key={}".format(message.key()))
9 | print("value={}".format(message.value()))
10 | print("partition={}".format(message.partition()))
11 | print("offset={}".format(message.offset()))
12 |
13 |
14 | conf = {'bootstrap.servers': "localhost:9092"}
15 |
16 | producer = Producer(conf)
17 |
18 | topic = 'test-python'
19 | producer.produce(topic, key="1", value="Hello World", callback=ack)
20 | producer.poll(1)
21 |
--------------------------------------------------------------------------------
/kafka-quarkus/.mvn/wrapper/maven-wrapper.properties:
--------------------------------------------------------------------------------
1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.2/apache-maven-3.6.2-bin.zip
2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.5/maven-wrapper-0.5.5.jar
3 |
--------------------------------------------------------------------------------
/kafka-quarkus/src/main/java/org/hifly/demo/kafka/quarkus/domain/Vote.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka.quarkus.domain;
2 |
3 | import io.quarkus.runtime.annotations.RegisterForReflection;
4 |
5 | import java.util.Objects;
6 |
7 | @RegisterForReflection
8 | public class Vote {
9 |
10 | private String id;
11 | private Long pollId;
12 | private Integer option;
13 |
14 |
15 | public String getId() {
16 | return id;
17 | }
18 |
19 | public void setId(String id) {
20 | this.id = id;
21 | }
22 |
23 | public Long getPollId() {
24 | return pollId;
25 | }
26 |
27 | public void setPollId(Long pollId) {
28 | this.pollId = pollId;
29 | }
30 |
31 | public Integer getOption() {
32 | return option;
33 | }
34 |
35 | public void setOption(Integer option) {
36 | this.option = option;
37 | }
38 |
39 | @Override
40 | public boolean equals(Object o) {
41 | if (this == o) return true;
42 | if (o == null || getClass() != o.getClass()) return false;
43 | Vote vote = (Vote) o;
44 | return Objects.equals(id, vote.id) &&
45 | Objects.equals(pollId, vote.pollId) &&
46 | Objects.equals(option, vote.option);
47 | }
48 |
49 | @Override
50 | public int hashCode() {
51 | return Objects.hash(id, pollId, option);
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/kafka-quarkus/src/main/java/org/hifly/demo/kafka/quarkus/messaging/Consumer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka.quarkus.messaging;
2 |
3 | import com.fasterxml.jackson.core.JsonProcessingException;
4 | import com.fasterxml.jackson.databind.JsonNode;
5 | import com.fasterxml.jackson.databind.ObjectMapper;
6 | import io.smallrye.reactive.messaging.kafka.KafkaRecord;
7 | import org.eclipse.microprofile.reactive.messaging.Incoming;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | import jakarta.enterprise.context.ApplicationScoped;
12 | import jakarta.transaction.Transactional;
13 | import java.util.concurrent.CompletionStage;
14 |
15 | @ApplicationScoped
16 | public class Consumer {
17 |
18 | private final ObjectMapper objectMapper = new ObjectMapper();
19 |
20 | private static final Logger LOGGER = LoggerFactory.getLogger(Consumer.class);
21 |
22 |
23 | @Transactional
24 | @Incoming("demo")
25 | public CompletionStage> onMessage(KafkaRecord message) {
26 | JsonNode json = null;
27 | try {
28 | json = objectMapper.readTree(message.getPayload());
29 | LOGGER.info("Received message from kafka with the message: " + json);
30 | } catch (JsonProcessingException e) {
31 | e.printStackTrace();
32 | }
33 |
34 | return message.ack();
35 | }
36 |
37 | }
38 |
--------------------------------------------------------------------------------
/kafka-quarkus/src/main/resources/application.properties:
--------------------------------------------------------------------------------
1 | quarkus.http.port=8080
2 |
3 | #### Logging
4 |
5 | quarkus.log.console.enable=true
6 | quarkus.log.console.level=DEBUG
7 | quarkus.log.console.color=false
8 | quarkus.log.console.format=%d{HH:mm:ss} %-5p [%c{2.}]] (%t) %s%e%n
9 |
10 | #### Messaging
11 |
12 | mp.messaging.outgoing.demo-prod.bootstrap.servers=my-cluster-kafka-bootstrap:9092
13 | mp.messaging.outgoing.demo-prod.topic=demo
14 | mp.messaging.outgoing.demo-prod.connector=smallrye-kafka
15 | mp.messaging.outgoing.demo-prod.key.serializer=org.apache.kafka.common.serialization.StringSerializer
16 | mp.messaging.outgoing.demo-prod.value.serializer=org.apache.kafka.common.serialization.StringSerializer
17 | mp.messaging.outgoing.demo-prod.voting.acks=1
18 |
19 | mp.messaging.incoming.demo.bootstrap.servers=my-cluster-kafka-bootstrap:9092
20 | mp.messaging.incoming.demo.topic=demo
21 | mp.messaging.incoming.demo.connector=smallrye-kafka
22 | mp.messaging.incoming.demo.key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
23 | mp.messaging.incoming.demo.value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
24 |
25 | #### OpenShift
26 | quarkus.openshift.expose=true
27 | quarkus.kubernetes-client.trust-certs=true
28 | quarkus.s2i.base-jvm-image=fabric8/s2i-java:3.0-java8
29 |
--------------------------------------------------------------------------------
/kafka-smt-aspectj/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM confluentinc/cp-kafka-connect-base:7.9.0
2 |
3 | COPY agent/aspectjweaver-1.9.19.jar /usr/share/java/aspectjweaver-1.9.19.jar
4 |
5 | COPY target/kafka-smt-aspectj-1.2.1.jar /etc/kafka-connect/jars/kafka-smt-aspectj-1.2.1.jar
--------------------------------------------------------------------------------
/kafka-smt-aspectj/agent/aspectjweaver-1.9.19.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/kafka-smt-aspectj/agent/aspectjweaver-1.9.19.jar
--------------------------------------------------------------------------------
/kafka-smt-aspectj/config/connector_mongo.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "mongo-sink",
3 | "config": {
4 | "connector.class": "com.mongodb.kafka.connect.MongoSinkConnector",
5 | "topics": "test",
6 | "connection.uri": "mongodb://admin:password@mongo:27017",
7 | "key.converter": "org.apache.kafka.connect.storage.StringConverter",
8 | "value.converter": "org.apache.kafka.connect.storage.StringConverter",
9 | "key.converter.schemas.enable": false,
10 | "value.converter.schemas.enable": false,
11 | "database": "Tutorial2",
12 | "collection": "pets",
13 | "transforms": "Filter",
14 | "transforms.Filter.type": "org.apache.kafka.connect.transforms.Filter",
15 | "transforms.Filter.predicate": "IsFoo",
16 | "predicates": "IsFoo",
17 | "predicates.IsFoo.type": "org.apache.kafka.connect.transforms.predicates.TopicNameMatches",
18 | "predicates.IsFoo.pattern": "test"
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/kafka-smt-aspectj/config/test.json:
--------------------------------------------------------------------------------
1 | {"FIELD1": "01","FIELD2": "20400","FIELD3": "001","FIELD4": "0006084655017","FIELD5": "20221117","FIELD6": 9000018}
--------------------------------------------------------------------------------
/kafka-smt-aspectj/src/main/java/org/hifly/kafka/smt/aspectj/SMTAspect.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.smt.aspectj;
2 |
3 |
4 | import org.aspectj.lang.JoinPoint;
5 | import org.aspectj.lang.annotation.Aspect;
6 | import org.aspectj.lang.annotation.Before;
7 | import org.aspectj.lang.annotation.Pointcut;
8 |
9 | import org.slf4j.Logger;
10 | import org.slf4j.LoggerFactory;
11 | @Aspect
12 | public class SMTAspect {
13 |
14 | private static final Logger LOGGER = LoggerFactory.getLogger(SMTAspect.class);
15 |
16 | @Pointcut("execution(* org.apache.kafka.connect.transforms.*.apply(..)) && !execution(* org.apache.kafka.connect.runtime.PredicatedTransformation.apply(..))")
17 | public void standardMethod() {}
18 |
19 | @Before("standardMethod()")
20 | public void log(JoinPoint jp) throws Throwable {
21 |
22 | Object[] array = jp.getArgs();
23 | if(array != null) {
24 | for(Object tmp: array)
25 | LOGGER.info(tmp.toString());
26 | }
27 | }
28 |
29 | }
--------------------------------------------------------------------------------
/kafka-smt-aspectj/src/main/resources/META-INF/aop.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/kafka-smt-custom/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM confluentinc/cp-kafka-connect-base:7.9.0
2 |
3 | COPY target/kafka-smt-custom-1.2.1.jar /usr/share/java/kafkaconnect_smt-1.2.1.jar
4 |
--------------------------------------------------------------------------------
/kafka-smt-custom/config/connector_mongo.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "mongo-sink",
3 | "config": {
4 | "connector.class": "com.mongodb.kafka.connect.MongoSinkConnector",
5 | "topics": "test",
6 | "connection.uri": "mongodb://admin:password@mongo:27017",
7 | "key.converter": "org.apache.kafka.connect.storage.StringConverter",
8 | "value.converter": "org.apache.kafka.connect.storage.StringConverter",
9 | "key.converter.schemas.enable": false,
10 | "value.converter.schemas.enable": false,
11 | "database": "Tutorial2",
12 | "collection": "pets",
13 | "transforms": "createKey",
14 | "transforms.createKey.type": "org.hifly.kafka.smt.KeyFromFields",
15 | "transforms.createKey.fields": "FIELD1,FIELD2,FIELD3"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/kafka-smt-custom/config/test.json:
--------------------------------------------------------------------------------
1 | {"FIELD1": "01","FIELD2": "20400","FIELD3": "001","FIELD4": "0006084655017","FIELD5": "20221117","FIELD6": 9000018}
--------------------------------------------------------------------------------
/kafka-springboot-consumer/src/main/fabric8/kafka-configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: "v1"
2 | kind: "ConfigMap"
3 | metadata:
4 | name: "kafka-consumer-service-cm"
5 | data:
6 | broker-url: "amq-streams-cluster-kafka-bootstrap.xpaas-amq-streams.svc:9092"
7 | group-id: "group-1"
8 | topic-name: "demoTopic"
9 | spring.data.mongodb.host: "mongodb-26-rhel7.xpaas-amq-streams.svc"
10 | spring.data.mongodb.port: "27017"
11 | spring.data.mongodb.database: "orderdb"
12 | spring.data.mongodb.username: "mongo"
13 | spring.data.mongodb.password: "mongo"
--------------------------------------------------------------------------------
/kafka-springboot-consumer/src/main/fabric8/svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ${project.artifactId}
5 | spec:
6 | ports:
7 | - protocol: TCP
8 | port: 8080
9 | targetPort: 8080
10 | type: ClusterIP
--------------------------------------------------------------------------------
/kafka-springboot-consumer/src/main/java/org/hifly/demo/kafka/KafkaApplication.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka;
2 |
3 | import org.springframework.boot.SpringApplication;
4 | import org.springframework.boot.autoconfigure.SpringBootApplication;
5 |
6 | @SpringBootApplication
7 | public class KafkaApplication {
8 |
9 | public static void main(String[] args) {
10 | SpringApplication.run(KafkaApplication.class, args);
11 | }
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/kafka-springboot-consumer/src/main/java/org/hifly/demo/kafka/kafka/OrderException.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka.kafka;
2 |
3 | public class OrderException extends RuntimeException {
4 | }
5 |
--------------------------------------------------------------------------------
/kafka-springboot-consumer/src/main/java/org/hifly/demo/kafka/model/Order.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka.model;
2 |
3 | import org.springframework.data.annotation.Id;
4 | import org.springframework.data.mongodb.core.mapping.Document;
5 |
6 | import java.io.Serializable;
7 |
8 | @Document(collection = "Order")
9 | public class Order implements Serializable {
10 |
11 | private static final long serialVersionUID = -4369275673459274085L;
12 |
13 | @Id
14 | private Long id;
15 |
16 | private String name;
17 |
18 |
19 | public Long getId() {
20 | return id;
21 | }
22 |
23 | public void setId(Long id) {
24 | this.id = id;
25 | }
26 |
27 | public String getName() {
28 | return name;
29 | }
30 |
31 | public void setName(String name) {
32 | this.name = name;
33 | }
34 |
35 | public String toString() {
36 | return id + "-" + name;
37 | }
38 | }
--------------------------------------------------------------------------------
/kafka-springboot-consumer/src/main/java/org/hifly/demo/kafka/mongo/OrderRepository.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka.mongo;
2 |
3 | import org.hifly.demo.kafka.model.Order;
4 | import org.springframework.data.mongodb.repository.MongoRepository;
5 |
6 |
7 | public interface OrderRepository extends MongoRepository {
8 |
9 | }
--------------------------------------------------------------------------------
/kafka-springboot-consumer/src/main/resources/application.yml:
--------------------------------------------------------------------------------
1 | server:
2 | port: 8090
3 | spring:
4 | kafka:
5 | topic:
6 | name: demoTopic
7 | bootstrap-servers: localhost:9092
8 | consumer:
9 | key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
10 | value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
11 | allow-auto-create-topics: false
12 | group-id: group-1
13 | auto-offset-reset: earliest
14 | backoff-interval: 15000
15 | backoff-max_failure: 2
16 | # Needed for DLT Producer
17 | producer:
18 | key-serializer: org.apache.kafka.common.serialization.StringSerializer
19 | value-serializer: org.apache.kafka.common.serialization.StringDeserializer
20 | properties:
21 | schema.registry.url: http://localhost:8081
22 | data:
23 | mongodb:
24 | user: mongo
25 | password: mongo
26 | host: localhost
27 | port: 27017
28 | database: orderdb
--------------------------------------------------------------------------------
/kafka-springboot-consumer/src/main/resources/test.json:
--------------------------------------------------------------------------------
1 | { "id" : 1484825894873, "name" : "test"}
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/fabric8/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Deployment
3 | metadata:
4 | name: ${project.artifactId}
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - image: ${project.artifactId}:${project.version}
10 | name: ${project.artifactId}
11 | ports:
12 | - containerPort: 8010
13 | env:
14 | - name: broker-url
15 | valueFrom:
16 | configMapKeyRef:
17 | name: kafka-producer-service-cm
18 | key: broker-url
19 | - name: topic-name
20 | valueFrom:
21 | configMapKeyRef:
22 | name: kafka-producer-service-cm
23 | key: topic-name
24 | - name: JAVA_OPTIONS
25 | value: "-Dspring.profiles.active=openshift"
26 | readinessProbe:
27 | tcpSocket:
28 | port: 8010
29 | initialDelaySeconds: 15
30 | timeoutSeconds: 2
31 | livelinessProbe:
32 | tcpSocket:
33 | port: 8010
34 | initialDelaySeconds: 15
35 | timeoutSeconds: 2
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/fabric8/kafka-configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: "v1"
2 | kind: "ConfigMap"
3 | metadata:
4 | name: "kafka-producer-service-cm"
5 | data:
6 | broker-url: "amq-streams-cluster-kafka-bootstrap.xpaas-amq-streams.svc:9092"
7 | topic-name: "demoTopic"
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/fabric8/route.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Route
3 | metadata:
4 | name: ${project.artifactId}
5 | spec:
6 | port:
7 | targetPort: 8010
8 | to:
9 | kind: Service
10 | name: ${project.artifactId}
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/fabric8/svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ${project.artifactId}
5 | spec:
6 | ports:
7 | - protocol: TCP
8 | port: 8010
9 | targetPort: 8010
10 | type: ClusterIP
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/java/org/hifly/demo/kafka/KafkaApplication.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka;
2 |
3 | import org.springframework.boot.SpringApplication;
4 | import org.springframework.boot.autoconfigure.SpringBootApplication;
5 |
6 | @SpringBootApplication
7 | public class KafkaApplication {
8 |
9 | public static void main(String[] args) {
10 | SpringApplication.run(KafkaApplication.class, args);
11 | }
12 |
13 |
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/java/org/hifly/demo/kafka/controller/KafkaRestController.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka.controller;
2 |
3 | import org.hifly.demo.kafka.model.Order;
4 | import org.hifly.demo.kafka.controller.kafka.Producer;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 | import org.springframework.beans.factory.annotation.Autowired;
8 | import org.springframework.beans.factory.annotation.Value;
9 | import org.springframework.http.HttpStatus;
10 | import org.springframework.http.ResponseEntity;
11 | import org.springframework.web.bind.annotation.PostMapping;
12 | import org.springframework.web.bind.annotation.RequestBody;
13 | import org.springframework.web.bind.annotation.RestController;
14 |
15 | @RestController
16 | public class KafkaRestController {
17 |
18 | Logger logger = LoggerFactory.getLogger(KafkaRestController.class);
19 |
20 | private final Producer producer;
21 |
22 | @Autowired
23 | public KafkaRestController(Producer producer) {
24 | this.producer = producer;
25 | }
26 |
27 | @PostMapping(value="/api/order")
28 | public ResponseEntity send(@RequestBody Order order) {
29 | logger.info("sending order to kafka: {0}", order);
30 | this.producer.send(order.toString());
31 | return new ResponseEntity<>(HttpStatus.OK);
32 | }
33 |
34 |
35 | }
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/java/org/hifly/demo/kafka/model/Order.java:
--------------------------------------------------------------------------------
1 | package org.hifly.demo.kafka.model;
2 |
3 | import java.io.Serializable;
4 |
5 | public class Order implements Serializable {
6 |
7 | private Long id;
8 |
9 | private String name;
10 |
11 |
12 | public Long getId() {
13 | return id;
14 | }
15 |
16 | public void setId(Long id) {
17 | this.id = id;
18 | }
19 |
20 | public String getName() {
21 | return name;
22 | }
23 |
24 | public void setName(String name) {
25 | this.name = name;
26 | }
27 |
28 | public String toString() {
29 | return id + "-" + name;
30 | }
31 | }
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/resources/application.yml:
--------------------------------------------------------------------------------
1 | server:
2 | port: 8010
3 | spring:
4 | kafka:
5 | topic:
6 | name: demoTopic
7 | bootstrap-servers: localhost:9092
8 | producer:
9 | key-serializer: org.apache.kafka.common.serialization.StringSerializer
10 | value-serializer: org.apache.kafka.common.serialization.StringSerializer
11 | properties:
12 | schema.registry.url: http://localhost:8081
--------------------------------------------------------------------------------
/kafka-springboot-producer/src/main/resources/test.json:
--------------------------------------------------------------------------------
1 | { "id" : 1484825894873, "name" : "test"}
--------------------------------------------------------------------------------
/kafka-streams-k8s/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openjdk:17.0.1-jdk-slim
2 |
3 | # Install curl and procps (for ps command)
4 | RUN apt-get update && apt-get install -y curl procps && rm -rf /var/lib/apt/lists/*
5 |
6 |
7 | WORKDIR /app
8 |
9 | COPY target/kafka-streams-k8s-1.2.1.jar /app/kafka-streams-k8s-1.2.1.jar
10 |
11 | COPY prometheus/kafka_streams.yml /app/kafka_streams.yml
12 | COPY prometheus/jmx_prometheus_javaagent-0.20.0.jar /app/jmx_prometheus_javaagent-0.20.0.jar
13 |
14 | EXPOSE 8080
15 | EXPOSE 1234
16 |
17 | ENV JAVA_OPTS="-javaagent:/app/jmx_prometheus_javaagent-0.20.0.jar=1234:/app/kafka_streams.yml"
18 |
19 | # ENTRYPOINT ["java", "-jar", "/app/kafka-streams-k8s-1.2.1.jar"]
20 |
21 | CMD java $JAVA_OPTS -jar /app/kafka-streams-k8s-1.2.1.jar
--------------------------------------------------------------------------------
/kafka-streams-k8s/k8s/svc-prometheus.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: kafka-streams-prometheus-service
5 | labels:
6 | app: kafka-streams-app
7 | spec:
8 | type: NodePort
9 | selector:
10 | app: kafka-streams-prometheus-service
11 | ports:
12 | - protocol: TCP
13 | port: 1234
14 | targetPort: 1234
15 | nodePort: 31234
16 |
--------------------------------------------------------------------------------
/kafka-streams-k8s/k8s/svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: kafka-streams-service
5 | labels:
6 | app: kafka-streams-app
7 | spec:
8 | ports:
9 | - port: 8080
10 | name: http
11 | clusterIP: None # Headless service
12 | selector:
13 | app: kafka-streams-app
14 |
--------------------------------------------------------------------------------
/kafka-streams-k8s/prometheus/jmx_prometheus_javaagent-0.20.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hifly81/kafka-examples/e7f54a50de7463a435127cc896dd96c6e98a6087/kafka-streams-k8s/prometheus/jmx_prometheus_javaagent-0.20.0.jar
--------------------------------------------------------------------------------
/kafka-streams/src/main/java/org/hifly/kafka/demo/streams/domain/CarSensor.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.streams.domain;
2 |
3 | public class CarSensor {
4 |
5 | private String id;
6 | private float speed;
7 | private float lat;
8 | private float lng;
9 |
10 | public String getId() {
11 | return id;
12 | }
13 |
14 | public void setId(String id) {
15 | this.id = id;
16 | }
17 |
18 | public float getSpeed() {
19 | return speed;
20 | }
21 |
22 | public void setSpeed(float speed) {
23 | this.speed = speed;
24 | }
25 |
26 | public float getLat() {
27 | return lat;
28 | }
29 |
30 | public void setLat(float lat) {
31 | this.lat = lat;
32 | }
33 |
34 | public float getLng() {
35 | return lng;
36 | }
37 |
38 | public void setLng(float lng) {
39 | this.lng = lng;
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/kafka-streams/src/main/java/org/hifly/kafka/demo/streams/serializer/CarInfoDeserializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.streams.serializer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.streams.domain.CarInfo;
5 | import org.apache.kafka.common.serialization.Deserializer;
6 |
7 | import java.nio.charset.Charset;
8 | import java.nio.charset.StandardCharsets;
9 | import java.util.Map;
10 |
11 | public class CarInfoDeserializer implements Deserializer {
12 |
13 | private static final Charset CHARSET = StandardCharsets.UTF_8;
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public CarInfo deserialize(String s, byte[] bytes) {
20 | try {
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | String carInfo = new String(bytes, CHARSET);
23 | return objectMapper.readValue(carInfo, CarInfo.class);
24 | } catch (Exception e) {
25 | throw new IllegalArgumentException("Error reading bytes! Yanlış", e);
26 | }
27 | }
28 |
29 | @Override
30 | public void close() {}
31 |
32 | }
--------------------------------------------------------------------------------
/kafka-streams/src/main/java/org/hifly/kafka/demo/streams/serializer/CarInfoSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.streams.serializer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.streams.domain.CarInfo;
5 | import org.apache.kafka.common.serialization.Serializer;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class CarInfoSerializer implements Serializer {
12 |
13 | private static final Logger LOGGER = LoggerFactory.getLogger(CarInfoSerializer.class);
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public byte[] serialize(String topic, CarInfo data) {
20 | byte[] retVal = null;
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | try {
23 | retVal = objectMapper.writeValueAsString(data).getBytes();
24 | } catch (Exception exception) {
25 | LOGGER.info("Error in serializing object"+ data);
26 | }
27 | return retVal;
28 |
29 | }
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-streams/src/main/java/org/hifly/kafka/demo/streams/serializer/CarSensorDeserializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.streams.serializer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.apache.kafka.common.serialization.Deserializer;
5 | import org.hifly.kafka.demo.streams.domain.CarSensor;
6 |
7 | import java.nio.charset.Charset;
8 | import java.nio.charset.StandardCharsets;
9 | import java.util.Map;
10 |
11 | public class CarSensorDeserializer implements Deserializer {
12 |
13 | private static final Charset CHARSET = StandardCharsets.UTF_8;
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public CarSensor deserialize(String s, byte[] bytes) {
20 | try {
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | String carSensor = new String(bytes, CHARSET);
23 | return objectMapper.readValue(carSensor, CarSensor.class);
24 | } catch (Exception e) {
25 | throw new IllegalArgumentException("Error reading bytes! Yanlış", e);
26 | }
27 | }
28 |
29 | @Override
30 | public void close() {}
31 |
32 | }
--------------------------------------------------------------------------------
/kafka-streams/src/main/java/org/hifly/kafka/demo/streams/serializer/CarSensorSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.streams.serializer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.apache.kafka.common.serialization.Serializer;
5 | import org.hifly.kafka.demo.streams.domain.CarSensor;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class CarSensorSerializer implements Serializer {
12 |
13 | private static final Logger LOGGER = LoggerFactory.getLogger(CarSensorSerializer.class);
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public byte[] serialize(String topic, CarSensor data) {
20 | byte[] retVal = null;
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | try {
23 | retVal = objectMapper.writeValueAsString(data).getBytes();
24 | } catch (Exception exception) {
25 | LOGGER.error("Error in serializing object {}", data);
26 | }
27 | return retVal;
28 |
29 | }
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-streams/src/main/java/org/hifly/kafka/demo/streams/serializer/SpeedInfoDeserializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.streams.serializer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.streams.domain.SpeedInfo;
5 | import org.apache.kafka.common.serialization.Deserializer;
6 |
7 | import java.nio.charset.Charset;
8 | import java.nio.charset.StandardCharsets;
9 | import java.util.Map;
10 |
11 | public class SpeedInfoDeserializer implements Deserializer {
12 |
13 | private static final Charset CHARSET = StandardCharsets.UTF_8;
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public SpeedInfo deserialize(String s, byte[] bytes) {
20 | try {
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | String speedInfo = new String(bytes, CHARSET);
23 | return objectMapper.readValue(speedInfo, SpeedInfo.class);
24 | } catch (Exception e) {
25 | throw new IllegalArgumentException("Error reading bytes! Yanlış", e);
26 | }
27 | }
28 |
29 |
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-streams/src/main/java/org/hifly/kafka/demo/streams/serializer/SpeedInfoSerializer.java:
--------------------------------------------------------------------------------
1 | package org.hifly.kafka.demo.streams.serializer;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.hifly.kafka.demo.streams.domain.SpeedInfo;
5 | import org.apache.kafka.common.serialization.Serializer;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | public class SpeedInfoSerializer implements Serializer {
12 |
13 | private static final Logger LOGGER = LoggerFactory.getLogger(SpeedInfoSerializer.class);
14 |
15 | @Override
16 | public void configure(Map configs, boolean isKey) {}
17 |
18 | @Override
19 | public byte[] serialize(String topic, SpeedInfo data) {
20 | byte[] retVal = null;
21 | ObjectMapper objectMapper = new ObjectMapper();
22 | try {
23 | retVal = objectMapper.writeValueAsString(data).getBytes();
24 | } catch (Exception exception) {
25 | LOGGER.error("Error in serializing object {}", data);
26 | }
27 | return retVal;
28 |
29 | }
30 |
31 | @Override
32 | public void close() {}
33 |
34 | }
--------------------------------------------------------------------------------
/kafka-unixcommand-connector/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM confluentinc/cp-kafka-connect-base:7.9.0
2 |
3 | COPY target/kafka-unixcommand-connector-1.2.1-package.zip /tmp/kafka-unixcommand-connector-1.2.1-package.zip
4 |
5 | RUN confluent-hub install --no-prompt /tmp/kafka-unixcommand-connector-1.2.1-package.zip
--------------------------------------------------------------------------------
/kafka-unixcommand-connector/build-image.sh:
--------------------------------------------------------------------------------
1 | mvn clean package
2 |
3 | docker build . -t connect-custom-image:1.0.0
--------------------------------------------------------------------------------
/kafka-unixcommand-connector/config/source.quickstart.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "unixcommandsource",
3 | "config": {
4 | "connector.class" : "org.hifly.kafka.demo.connector.UnixCommandSourceConnector",
5 | "command" : "fortune",
6 | "topic": "unixcommands",
7 | "poll.ms" : 5000,
8 | "tasks.max": 1
9 | }
10 | }
--------------------------------------------------------------------------------
/kafka-unixcommand-connector/src/assembly/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "kafka-unixcommand",
3 | "version" : "1.2.1",
4 | "title" : "Kafka Connect unixcommand",
5 | "description" : "For demos only: A Kafka Connect connector for generating mock data, not suitable for production",
6 | "owner" : {
7 | "username" : "hifly81",
8 | "type" : "personal",
9 | "name" : "Hifly",
10 | "url" : "",
11 | "logo" : "assets/test.png"
12 | },
13 | "support" : {
14 | "summary" : "This connector is open source at https://github.com/hifly81/kafka-play and supported by community members.",
15 | "url" : "https://github.com/hifly81/kafka-play",
16 | "provider_name" : "Community Support"
17 | },
18 | "tags" : [ "unix", "linux", "demo" ],
19 | "requirements" : [ "Confluent Platform 4.x or later", "Apache Kafka 1.x or later" ],
20 | "features" : {
21 | "supported_encodings" : [ "any" ],
22 | "single_message_transforms" : true,
23 | "confluent_control_center_integration" : true,
24 | "kafka_connect_api" : true
25 | },
26 | "documentation_url" : "https://github.com/hifly81/kafka-play/README.md",
27 | "source_url" : "https://github.com/hifly81/kafka-play",
28 | "docker_image" : { },
29 | "license" : [ {
30 | "name" : "Apache License 2.0",
31 | "url" : "https://www.apache.org/licenses/LICENSE-2.0"
32 | } ],
33 | "component_types" : [ "source" ],
34 | "release_date" : "2021-03-19"
35 | }
--------------------------------------------------------------------------------
/ksqldb-join/config/connector_device_maintenance_jdbc_source.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "pgsql-sample2-source",
3 | "config": {
4 | "connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
5 | "connection.url": "jdbc:postgresql://postgres:5432/postgres",
6 | "connection.user": "postgres",
7 | "connection.password": "postgres",
8 | "topic.prefix": "",
9 | "poll.interval.ms" : 3600000,
10 | "table.whitelist" : "public.maintenance",
11 | "mode":"bulk",
12 | "key.converter": "org.apache.kafka.connect.storage.StringConverter",
13 | "transforms":"createKey,extractInt",
14 | "transforms.createKey.type":"org.apache.kafka.connect.transforms.ValueToKey",
15 | "transforms.createKey.fields":"id",
16 | "transforms.extractInt.type":"org.apache.kafka.connect.transforms.ExtractField$Key",
17 | "transforms.extractInt.field":"id"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/ksqldb-join/config/connector_jdbc_source.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "pgsql-sample-source",
3 | "config": {
4 | "connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
5 | "connection.url": "jdbc:postgresql://postgres:5432/postgres",
6 | "connection.user": "postgres",
7 | "connection.password": "postgres",
8 | "topic.prefix": "",
9 | "poll.interval.ms" : 3600000,
10 | "table.whitelist" : "public.device",
11 | "mode":"bulk",
12 | "key.converter": "org.apache.kafka.connect.storage.StringConverter",
13 | "transforms":"createKey,extractInt",
14 | "transforms.createKey.type":"org.apache.kafka.connect.transforms.ValueToKey",
15 | "transforms.createKey.fields":"id",
16 | "transforms.extractInt.type":"org.apache.kafka.connect.transforms.ExtractField$Key",
17 | "transforms.extractInt.field":"id"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/ksqldb-join/config/connector_rabbitmq_source.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "rabbitmq-sample-source",
3 | "config": {
4 | "connector.class": "io.confluent.connect.rabbitmq.RabbitMQSourceConnector",
5 | "confluent.topic.bootstrap.servers": "broker:9092",
6 | "confluent.topic.replication.factor": "1",
7 | "kafka.topic": "temperature.data",
8 | "rabbitmq.queue" : "temperature.queue",
9 | "rabbitmq.host" : "rabbitmq",
10 | "rabbitmq.username" : "guest",
11 | "rabbitmq.password" : "guest",
12 | "value.converter": "org.apache.kafka.connect.converters.ByteArrayConverter",
13 | "key.converter": "org.apache.kafka.connect.storage.StringConverter"
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/ksqldb-join/config/create-tables.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE maintenance (
2 | id serial PRIMARY KEY,
3 | maintenance VARCHAR ( 255 ) UNIQUE NOT NULL
4 | );
5 |
6 | CREATE TABLE device (
7 | id serial PRIMARY KEY,
8 | fullname VARCHAR ( 255 ) UNIQUE NOT NULL
9 | );
10 |
11 |
12 |
13 | insert into maintenance (id, maintenance) values (1, '2023-03-01 15:00:00 16:00:00');
14 |
15 | insert into device (id, fullname) values (1, 'foo11111');
16 | insert into device (id, fullname) values (2, 'foo22222');
17 | insert into device (id, fullname) values (10, 'foo1010101010');
18 | insert into device (id, fullname) values (15, 'foo1515151515');
--------------------------------------------------------------------------------
/ksqldb-join/config/rabbit_producer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import pika
3 | import sys
4 | import json
5 | import random
6 |
7 |
8 | def get_temperature(device_id: int) -> str:
9 | if device_id == 1:
10 | return random.randrange(10, 35)
11 | elif device_id == 2:
12 | return random.randrange(2, 23)
13 | else:
14 | return random.randrange(0, 40)
15 |
16 |
17 | if len(sys.argv) != 3:
18 | print("Usage: " + sys.argv[0] + " ")
19 | sys.exit(1)
20 |
21 | queue = sys.argv[1]
22 | count = int(sys.argv[2])
23 |
24 | print("count:\t%d\nqueue:\t%s" % (count, queue) )
25 |
26 | msgBody = {
27 | "id" : 0 ,
28 | "body" : "010101010101010101010101010101010101010101010101010101010101010101010"
29 | }
30 |
31 | connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
32 | channel = connection.channel()
33 | channel.queue_declare(queue = queue)
34 |
35 | properties = pika.BasicProperties(content_type='application/json', delivery_mode=1, priority=1, content_encoding='utf-8')
36 | for i in range(count):
37 | msgBody["id"] = i
38 | msgBody["body"] = get_temperature(i)
39 | jsonStr = json.dumps(msgBody)
40 | properties.message_id = str(i)
41 | channel.basic_publish(exchange = '', routing_key = queue, body = jsonStr, properties = properties)
42 | print("Send\t%r" % msgBody)
43 |
44 | connection.close()
45 | print('Exiting')
--------------------------------------------------------------------------------
/ksqldb-join/ksql/ksql-statements-rj.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | tr '\n' ' ' < statements-rj.sql | \
4 | sed 's/;/;\'$'\n''/g' | \
5 | while read stmt; do
6 | echo '{"ksql":"'$stmt'", "streamsProperties": {}}' | \
7 | curl -s -X "POST" "http://ksqldb-server:8088/ksql" \
8 | -H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
9 | -d @- | \
10 | jq
11 | done
12 |
--------------------------------------------------------------------------------
/ksqldb-join/ksql/ksql-statements.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | tr '\n' ' ' < statements.sql | \
4 | sed 's/;/;\'$'\n''/g' | \
5 | while read stmt; do
6 | echo '{"ksql":"'$stmt'", "streamsProperties": {}}' | \
7 | curl -s -X "POST" "http://ksqldb-server:8088/ksql" \
8 | -H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
9 | -d @- | \
10 | jq
11 | done
12 |
--------------------------------------------------------------------------------
/ksqldb-join/ksql/statements-rj.sql:
--------------------------------------------------------------------------------
1 | SET 'auto.offset.reset' = 'earliest';
2 |
3 | CREATE STREAM DEVICE (id BIGINT, fullname VARCHAR) WITH (KAFKA_TOPIC='device',VALUE_FORMAT='AVRO');
4 | CREATE STREAM DEVICE_DATA AS SELECT ID,FULLNAME FROM DEVICE PARTITION BY id;
5 | CREATE TABLE DEVICE_DATA_TBL(id BIGINT PRiMARY KEY, FULLNAME VARCHAR) WITH (KAFKA_TOPIC='DEVICE_DATA',VALUE_FORMAT='AVRO');
6 |
7 | CREATE STREAM MAINTENANCE (id BIGINT, maintenance VARCHAR) WITH (KAFKA_TOPIC='maintenance',VALUE_FORMAT='AVRO');
8 | CREATE STREAM MAINTENANCE_DATA AS SELECT ID,MAINTENANCE FROM MAINTENANCE PARTITION BY id;
9 | CREATE TABLE MAINTENANCE_DATA_TBL(id BIGINT PRiMARY KEY, MAINTENANCE VARCHAR) WITH (KAFKA_TOPIC='MAINTENANCE_DATA',VALUE_FORMAT='AVRO');
10 |
11 | CREATE TABLE DEVICE_MAINTENANCE AS
12 | SELECT A.ID as DEVICE_ID,
13 | FULLNAME,
14 | MAINTENANCE
15 | FROM MAINTENANCE_DATA_TBL A
16 | RIGHT JOIN DEVICE_DATA_TBL B
17 | ON A.ID = B.ID;
18 |
19 |
20 |
--------------------------------------------------------------------------------
/ksqldb-join/ksql/statements.sql:
--------------------------------------------------------------------------------
1 | SET 'auto.offset.reset' = 'earliest';
2 |
3 | CREATE STREAM TEMPERATURE_DATA (id BIGINT, body BIGINT) WITH (KAFKA_TOPIC='temperature.data',VALUE_FORMAT='JSON');
4 | CREATE STREAM TEMPERATURE_DATA_STL AS SELECT ID,BODY FROM TEMPERATURE_DATA PARTITION BY id;
5 |
6 | CREATE STREAM DEVICE (id BIGINT, fullname VARCHAR) WITH (KAFKA_TOPIC='device',VALUE_FORMAT='AVRO');
7 | CREATE STREAM DEVICE_DATA AS SELECT ID,FULLNAME FROM DEVICE PARTITION BY id;
8 | CREATE TABLE DEVICE_DATA_TBL(id BIGINT PRiMARY KEY, FULLNAME VARCHAR) WITH (KAFKA_TOPIC='DEVICE_DATA',VALUE_FORMAT='AVRO');
9 |
10 | CREATE STREAM DEVICE_TEMPERATURE AS
11 | SELECT A.ID as DEVICE_ID,
12 | FULLNAME,
13 | BODY as TEMPERATURE
14 | FROM TEMPERATURE_DATA_STL A
15 | JOIN DEVICE_DATA_TBL B
16 | ON A.ID = B.ID;
17 |
18 | CREATE STREAM DEVICE_TEMPERATURE_LJ AS
19 | SELECT A.ID as DEVICE_ID,
20 | FULLNAME,
21 | BODY as TEMPERATURE
22 | FROM TEMPERATURE_DATA_STL A
23 | LEFT JOIN DEVICE_DATA_TBL B
24 | ON A.ID = B.ID;
25 |
26 |
27 |
--------------------------------------------------------------------------------
/ksqldb-saga-example/ksql/insert.sql:
--------------------------------------------------------------------------------
1 | insert into accounts values('AAA', 'Jimmy Best');
2 | insert into orders values('AAA', 150, 'Item0', 'A123', 'Jimmy Best', 'Transfer funds', '2020-04-22 03:19:51');
3 | insert into orders values('AAA', -110, 'Item1', 'A123', 'amazon.it', 'Purchase', '2020-04-22 03:19:55');
4 | insert into orders values('AAA', -100, 'Item2', 'A123', 'ebike.com', 'Purchase', '2020-04-22 03:19:58');
--------------------------------------------------------------------------------
/ksqldb-saga-example/ksql/ksql-insert.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | tr '\n' ' ' < insert.sql | \
4 | sed 's/;/;\'$'\n''/g' | \
5 | while read stmt; do
6 | echo '{"ksql":"'$stmt'", "streamsProperties": {}}' | \
7 | curl -s -X "POST" "http://ksqldb-server:8088/ksql" \
8 | -H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
9 | -d @- | \
10 | jq
11 | done
12 |
--------------------------------------------------------------------------------
/ksqldb-saga-example/ksql/ksql-statements.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | tr '\n' ' ' < statements.sql | \
4 | sed 's/;/;\'$'\n''/g' | \
5 | while read stmt; do
6 | echo '{"ksql":"'$stmt'", "streamsProperties": {}}' | \
7 | curl -s -X "POST" "http://ksqldb-server:8088/ksql" \
8 | -H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
9 | -d @- | \
10 | jq
11 | done
12 |
--------------------------------------------------------------------------------
/ksqldb-saga-example/src/main/avro/accounts-value.avsc:
--------------------------------------------------------------------------------
1 | {"type":"record","name":"Account","namespace":"org.hifly.saga.payment.model","fields":[{"name":"FULLNAME","type":["null","string"],"default":null}],"connect.name":"org.hifly.saga.payment.model.Account"}
--------------------------------------------------------------------------------
/ksqldb-saga-example/src/main/avro/order_actions-value.avsc:
--------------------------------------------------------------------------------
1 | {"type":"record","name":"OrderAction","namespace":"org.hifly.saga.payment.model","fields":[{"name":"TX_ID","type":["null","string"],"default":null},{"name":"TX_ACTION","type":["null","int"],"default":null},{"name":"ACCOUNT","type":["null","string"],"default":null},{"name":"ITEMS","type":["null",{"type":"array","items":["null","string"]}],"default":null},{"name":"ORDER","type":["null","string"],"default":null}],"connect.name":"org.hifly.saga.payment.model.OrderAction"}
--------------------------------------------------------------------------------
/ksqldb-saga-example/src/main/avro/order_actions_ack-value.avsc:
--------------------------------------------------------------------------------
1 | {"type":"record","name":"OrderActionAck","namespace":"org.hifly.saga.payment.model","fields":[{"name":"TX_ID","type":["null","string"],"default":null},{"name":"PARTICIPIANT_ID","type":["null","string"],"default":null},{"name":"ORDER_ID","type":["null","string"],"default":null},{"name":"TIMESTAMP","type":["null","string"],"default":null}],"connect.name":"org.hifly.saga.payment.model.OrderActionAck"}
--------------------------------------------------------------------------------
/ksqldb-saga-example/src/main/avro/orders-value.avsc:
--------------------------------------------------------------------------------
1 | {"type":"record","name":"Order","namespace":"org.hifly.saga.payment.model","fields":[{"name":"ACCOUNT_ID","type":["null","string"],"default":null},{"name":"AMOUNT","type":["null","double"],"default":null},{"name":"ITEM_ID","type":["null","string"],"default":null},{"name":"ORDER_ID","type":["null","string"],"default":null},{"name":"PAYEE","type":["null","string"],"default":null},{"name":"TYPOLOGY","type":["null","string"],"default":null},{"name":"TIMESTAMP","type":["null","string"],"default":null}],"connect.name":"org.hifly.saga.payment.model.Order"}
--------------------------------------------------------------------------------
/ksqldb-window-session-tripsegments/ksql/insert.sql:
--------------------------------------------------------------------------------
1 | INSERT INTO vehicle_position (vehicle_id, latitude, longitude, timestamp) VALUES ('VH1', 42.21, 17.12, '2023-02-18 15:10:00');
2 | INSERT INTO vehicle_position (vehicle_id, latitude, longitude, timestamp) VALUES ('VH1', 42.23, 17.13, '2023-02-18 15:11:00');
3 | INSERT INTO vehicle_position (vehicle_id, latitude, longitude, timestamp) VALUES ('VH1', 42.25, 17.14, '2023-02-18 15:12:00');
4 | INSERT INTO vehicle_position (vehicle_id, latitude, longitude, timestamp) VALUES ('VH1', 42.27, 17.15, '2023-02-18 15:12:30');
5 | INSERT INTO vehicle_position (vehicle_id, latitude, longitude, timestamp) VALUES ('VH1', 42.28, 17.16, '2023-02-18 15:13:00');
6 | INSERT INTO vehicle_position (vehicle_id, latitude, longitude, timestamp) VALUES ('VH1', 42.31, 17.17, '2023-02-18 15:20:00');
7 | INSERT INTO vehicle_position (vehicle_id, latitude, longitude, timestamp) VALUES ('VH1', 42.33, 17.18, '2023-02-18 15:22:00');
8 |
--------------------------------------------------------------------------------
/ksqldb-window-session-tripsegments/ksql/ksql-insert.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | tr '\n' ' ' < insert.sql | \
4 | sed 's/;/;\'$'\n''/g' | \
5 | while read stmt; do
6 | echo '{"ksql":"'$stmt'", "streamsProperties": {}}' | \
7 | curl -s -X "POST" "http://ksqldb-server:8088/ksql" \
8 | -H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
9 | -d @- | \
10 | jq
11 | done
12 |
--------------------------------------------------------------------------------
/ksqldb-window-session-tripsegments/ksql/ksql-statements.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | tr '\n' ' ' < statements.sql | \
4 | sed 's/;/;\'$'\n''/g' | \
5 | while read stmt; do
6 | echo '{"ksql":"'$stmt'", "streamsProperties": {}}' | \
7 | curl -s -X "POST" "http://ksqldb-server:8088/ksql" \
8 | -H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
9 | -d @- | \
10 | jq
11 | done
12 |
--------------------------------------------------------------------------------
/ksqldb-window-session-tripsegments/ksql/statements.sql:
--------------------------------------------------------------------------------
1 | CREATE STREAM vehicle_position (
2 | vehicle_id VARCHAR,
3 | latitude DOUBLE,
4 | longitude DOUBLE,
5 | timestamp VARCHAR
6 | ) WITH (
7 | kafka_topic = 'vehicle_position',
8 | timestamp='timestamp',
9 | timestamp_format='yyyy-MM-dd HH:mm:ss',
10 | partitions = 1,
11 | value_format = 'avro'
12 | );
13 |
14 |
15 | CREATE TABLE trips
16 | WITH (kafka_topic='trips') AS
17 | SELECT vehicle_id,
18 | COUNT(*) AS positions_sent,
19 | EARLIEST_BY_OFFSET(latitude) AS start_latitude,
20 | EARLIEST_BY_OFFSET(longitude) AS start_longitude,
21 | LATEST_BY_OFFSET(latitude) AS end_latitude,
22 | LATEST_BY_OFFSET(longitude) AS end_longitude,
23 | WINDOWSTART AS window_start,
24 | WINDOWEND AS window_end
25 | FROM vehicle_position
26 | WINDOW SESSION (5 MINUTES)
27 | GROUP BY vehicle_id;
28 |
--------------------------------------------------------------------------------
/ksqldb-window-tumbling-heartbeat/ksql/ksql-insert.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | tr '\n' ' ' < insert.sql | \
4 | sed 's/;/;\'$'\n''/g' | \
5 | while read stmt; do
6 | echo '{"ksql":"'$stmt'", "streamsProperties": {}}' | \
7 | curl -s -X "POST" "http://ksqldb-server:8088/ksql" \
8 | -H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
9 | -d @- | \
10 | jq
11 | done
12 |
--------------------------------------------------------------------------------
/ksqldb-window-tumbling-heartbeat/ksql/ksql-statements.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | tr '\n' ' ' < statements.sql | \
4 | sed 's/;/;\'$'\n''/g' | \
5 | while read stmt; do
6 | echo '{"ksql":"'$stmt'", "streamsProperties": {}}' | \
7 | curl -s -X "POST" "http://ksqldb-server:8088/ksql" \
8 | -H "Content-Type: application/vnd.ksql.v1+json; charset=utf-8" \
9 | -d @- | \
10 | jq
11 | done
12 |
--------------------------------------------------------------------------------
/ksqldb-window-tumbling-heartbeat/ksql/statements.sql:
--------------------------------------------------------------------------------
1 | CREATE STREAM heartbeat (
2 | person_id VARCHAR,
3 | heartbeat_value DOUBLE,
4 | timestamp VARCHAR
5 | ) WITH (
6 | kafka_topic = 'heartbeat',
7 | timestamp='timestamp',
8 | timestamp_format='yyyy-MM-dd HH:mm:ss',
9 | partitions = 1,
10 | value_format = 'avro'
11 | );
12 |
13 |
14 | CREATE TABLE heartbeat_60sec
15 | WITH (kafka_topic='heartbeat_60sec') AS
16 | SELECT person_id,
17 | COUNT(*) AS beat_over_threshold_count,
18 | WINDOWSTART AS window_start,
19 | WINDOWEND AS window_end
20 | FROM heartbeat
21 | WINDOW TUMBLING (SIZE 1 MINUTES)
22 | where heartbeat_value > 120
23 | GROUP BY person_id;
24 |
--------------------------------------------------------------------------------
/mirror-maker2/config/mm2-extra.properties:
--------------------------------------------------------------------------------
1 | # specify any number of cluster aliases
2 | clusters=DC-X,DC-Y
3 |
4 | # connection information for each cluster
5 | DC-X.bootstrap.servers=broker:9092
6 | DC-Y.bootstrap.servers=broker-destination:9082
7 |
8 | # enable and configure individual replication flows
9 | DC-X->DC-Y.enabled = true
10 | DC-X->DC-Y.topics = .*
11 |
12 | offset.storage.replication.factor=1
13 | status.storage.replication.factor=1
14 | config.storage.replication.factor=1
15 | replication.factor = 1
16 | checkpoints.topic.replication.factor=1
17 | heartbeats.topic replication.factor=1
18 |
--------------------------------------------------------------------------------
/mirror-maker2/config/mm2-extra2.properties:
--------------------------------------------------------------------------------
1 | # specify any number of cluster aliases
2 | clusters=DC-X,DC-Y
3 |
4 | # connection information for each cluster
5 | DC-X.bootstrap.servers=broker:9092
6 | DC-Y.bootstrap.servers=broker-destination:9082
7 |
8 | # enable and configure individual replication flows
9 | DC-Y->DC-X.enabled = true
10 | DC-Y->DC-X.topics = TopicB
11 | replication.policy.separator=
12 | source.cluster.alias=
13 | target.cluster.alias=
14 |
15 | offset.storage.replication.factor=1
16 | status.storage.replication.factor=1
17 | config.storage.replication.factor=1
18 | replication.factor = 1
19 | checkpoints.topic.replication.factor=1
20 | heartbeats.topic replication.factor=1
21 |
22 |
23 |
--------------------------------------------------------------------------------
/mirror-maker2/config/mm2-no-alias.properties:
--------------------------------------------------------------------------------
1 | # specify any number of cluster aliases
2 | clusters=DC-X,DC-Y
3 |
4 | # connection information for each cluster
5 | DC-X.bootstrap.servers=broker:9092
6 | DC-Y.bootstrap.servers=broker-destination:9082
7 |
8 | # enable and configure individual replication flows
9 | DC-X->DC-Y.enabled = true
10 | DC-X->DC-Y.topics = .*
11 | DC-Y->DC-X.enabled = true
12 | DC-Y->DC-X.topics = .*
13 |
14 | # customize as needed
15 | sync.topic.acls.enabled=true
16 |
17 |
18 | # no alias
19 | "replication.policy.separator": ""
20 | "source.cluster.alias": "",
21 | "target.cluster.alias": "",
22 |
23 | offset.storage.replication.factor=1
24 | status.storage.replication.factor=1
25 | config.storage.replication.factor=1
26 | replication.factor = 1
27 | checkpoints.topic.replication.factor=1
28 | heartbeats.topic replication.factor=1
29 |
30 |
31 |
--------------------------------------------------------------------------------
/mirror-maker2/config/mm2-ssl.properties:
--------------------------------------------------------------------------------
1 | # specify any number of cluster aliases
2 | clusters=DC-X,DC-Y
3 |
4 | # connection information for each cluster
5 | DC-X.bootstrap.servers=broker:9092
6 | DC-Y.bootstrap.servers=broker-destination:9082
7 |
8 | # enable and configure individual replication flows
9 | DC-X->DC-Y.enabled = true
10 | DC-X->DC-Y.topics = .*
11 | DC-Y->DC-X.enabled = true
12 | DC-Y->DC-X.topics = .*
13 |
14 | # customize as needed
15 | sync.topic.acls.enabled=true
16 |
17 | # SSL
18 | DC-X.security.protocol=SSL
19 | DC-X.ssl.endpoint.identification.algorithm=
20 | DC-X.ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
21 | DC-X.ssl.truststore.location=[CHANGEME!!!]
22 | DC-X.ssl.truststore.password=[CHANGEME!!!]
23 |
24 | DC-Y.security.protocol=SSL
25 | DC-Y.ssl.endpoint.identification.algorithm=
26 | DC-Y.ssl.enabled.protocols=TLSv1.2,TLSv1.1,TLSv1
27 | DC-Y.ssl.truststore.location=[CHANGEME!!!]
28 | DC-Y.ssl.truststore.password=[CHANGEME!!!]
29 |
30 | offset.storage.replication.factor=1
31 | status.storage.replication.factor=1
32 | config.storage.replication.factor=1
33 | replication.factor = 1
34 | checkpoints.topic.replication.factor=1
35 | heartbeats.topic replication.factor=1
36 |
--------------------------------------------------------------------------------
/mirror-maker2/config/mm2.properties:
--------------------------------------------------------------------------------
1 | # specify any number of cluster aliases
2 | clusters=DC-X,DC-Y
3 |
4 | # connection information for each cluster
5 | DC-X.bootstrap.servers=broker:9092
6 | DC-Y.bootstrap.servers=broker-destination:9082
7 |
8 | # enable and configure individual replication flows
9 | DC-X->DC-Y.enabled = true
10 | DC-X->DC-Y.topics = TopicA
11 | DC-Y->DC-X.enabled = true
12 | DC-Y->DC-X.topics = TopicB
13 |
14 | # customize as needed
15 | sync.topic.acls.enabled=true
16 | sync.topic.configs.enabled=true
17 |
18 | offset.storage.replication.factor=1
19 | status.storage.replication.factor=1
20 | config.storage.replication.factor=1
21 | replication.factor = 1
22 | checkpoints.topic.replication.factor=1
23 | heartbeats.topic replication.factor=1
24 |
--------------------------------------------------------------------------------
/monitoring/list_mbeans.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | HOST=$1
4 | PORT=$2
5 |
6 | wget https://github.com/jiaqi/jmxterm/releases/download/v1.0.2/jmxterm-1.0.2-uber.jar
7 |
8 | # Connect to the JMX server using jmxterm
9 | java -jar jmxterm-1.0.2-uber.jar --url $HOST:$PORT </home/appuser/trogdor-agent.log 2>&1 &
--------------------------------------------------------------------------------
/performance/trogdor/agent/trogdor-agent1.conf:
--------------------------------------------------------------------------------
1 | {
2 | "platform": "org.apache.kafka.trogdor.basic.BasicPlatform", "nodes": {
3 | "node1": {
4 | "hostname": "broker2",
5 | "trogdor.agent.port": 8888,
6 | "trogdor.coordinator.port": 8889
7 | }
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/performance/trogdor/agent/trogdor-agent1.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | /usr/bin/trogdor agent -c /tmp/trogdor/agent/trogdor-agent.conf -n node1 >/home/appuser/trogdor-agent.log 2>&1 &
--------------------------------------------------------------------------------
/performance/trogdor/agent/trogdor-agent2.conf:
--------------------------------------------------------------------------------
1 | {
2 | "platform": "org.apache.kafka.trogdor.basic.BasicPlatform", "nodes": {
3 | "node2": {
4 | "hostname": "broker3",
5 | "trogdor.agent.port": 8888,
6 | "trogdor.coordinator.port": 8889
7 | }
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/performance/trogdor/agent/trogdor-agent2.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | /usr/bin/trogdor agent -c /tmp/trogdor/agent/trogdor-agent.conf -n node2 >/home/appuser/trogdor-agent.log 2>&1 &
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/lz4/node0.json:
--------------------------------------------------------------------------------
1 | {
2 | "class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
3 | "durationMs": 60000,
4 | "producerNode": "node0",
5 | "targetMessagesPerSec": 200,
6 | "maxMessages": 10000,
7 | "bootstrapServers": "broker:9092,broker2:9093,broker3:9094",
8 | "valueGenerator": {
9 | "type": "sequential",
10 | "size": 2048
11 | },
12 | "producerConf": {
13 | "compression.type": "lz4",
14 | "linger.ms": "10",
15 | "batch.size": "16384",
16 | "retries": "2147483647",
17 | "acks": "all"
18 | },
19 | "commonClientConf": {
20 | },
21 | "activeTopics": {
22 | "topic-perf" : {
23 | "numPartitions": 6,
24 | "replicationFactor": 3
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/lz4/node1.json:
--------------------------------------------------------------------------------
1 | {
2 | "class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
3 | "durationMs": 60000,
4 | "producerNode": "node1",
5 | "targetMessagesPerSec": 200,
6 | "maxMessages": 10000,
7 | "bootstrapServers": "broker:9092,broker2:9093,broker3:9094",
8 | "valueGenerator": {
9 | "type": "sequential",
10 | "size": 2048
11 | },
12 | "producerConf": {
13 | "compression.type": "lz4",
14 | "linger.ms": "10",
15 | "batch.size": "16384",
16 | "retries": "2147483647",
17 | "acks": "all"
18 | },
19 | "commonClientConf": {
20 | },
21 | "activeTopics": {
22 | "topic-perf" : {
23 | "numPartitions": 6,
24 | "replicationFactor": 3
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/lz4/node2.json:
--------------------------------------------------------------------------------
1 | {
2 | "class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
3 | "durationMs": 60000,
4 | "producerNode": "node2",
5 | "targetMessagesPerSec": 200,
6 | "maxMessages": 10000,
7 | "bootstrapServers": "broker:9092,broker2:9093,broker3:9094",
8 | "valueGenerator": {
9 | "type": "sequential",
10 | "size": 2048
11 | },
12 | "producerConf": {
13 | "compression.type": "lz4",
14 | "linger.ms": "10",
15 | "batch.size": "16384",
16 | "retries": "2147483647",
17 | "acks": "all"
18 | },
19 | "commonClientConf": {
20 | },
21 | "activeTopics": {
22 | "topic-perf" : {
23 | "numPartitions": 6,
24 | "replicationFactor": 3
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/lz4/trogdor-task-cancel.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | start=0
5 | end=3
6 | increment=1
7 |
8 | current=$start
9 |
10 | while(( $(echo "$current < $end" | /usr/bin/bc -l) )); do
11 | /kafka_2.13-4.0.0/bin/trogdor.sh client destroyTask -t localhost:8889 -i node-$current
12 | current=$(echo "$current + $increment" | /usr/bin/bc -l)
13 | done
14 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/lz4/trogdor-task-status.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | start=0
5 | end=3
6 | increment=1
7 |
8 | current=$start
9 |
10 | while(( $(echo "$current < $end" | /usr/bin/bc -l) )); do
11 | /kafka_2.13-4.0.0/bin/trogdor.sh client showTask -t localhost:8889 -i node-$current --show-status
12 | current=$(echo "$current + $increment" | /usr/bin/bc -l)
13 | done
14 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/lz4/trogdor-task.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | start=0
5 | end=3
6 | increment=1
7 |
8 | current=$start
9 |
10 | while(( $(echo "$current < $end" | /usr/bin/bc -l) )); do
11 | /kafka_2.13-4.0.0/bin/trogdor.sh client createTask -t localhost:8889 -i node-$current --spec /tmp/trogdor/coordinator/compression/nocompression/node$current.json
12 | current=$(echo "$current + $increment" | /usr/bin/bc -l)
13 | done
14 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/nocompression/node0.json:
--------------------------------------------------------------------------------
1 | {
2 | "class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
3 | "durationMs": 60000,
4 | "producerNode": "node0",
5 | "targetMessagesPerSec": 200,
6 | "maxMessages": 10000,
7 | "bootstrapServers": "broker:9092,broker2:9093,broker3:9094",
8 | "valueGenerator": {
9 | "type": "sequential",
10 | "size": 2048
11 | },
12 | "producerConf": {
13 | "linger.ms": "10",
14 | "batch.size": "16384",
15 | "retries": "2147483647",
16 | "acks": "all"
17 | },
18 | "commonClientConf": {
19 | },
20 | "activeTopics": {
21 | "topic-perf" : {
22 | "numPartitions": 6,
23 | "replicationFactor": 3
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/nocompression/node1.json:
--------------------------------------------------------------------------------
1 | {
2 | "class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
3 | "durationMs": 60000,
4 | "producerNode": "node1",
5 | "targetMessagesPerSec": 200,
6 | "maxMessages": 10000,
7 | "bootstrapServers": "broker:9092,broker2:9093,broker3:9094",
8 | "valueGenerator": {
9 | "type": "sequential",
10 | "size": 2048
11 | },
12 | "producerConf": {
13 | "linger.ms": "10",
14 | "batch.size": "16384",
15 | "retries": "2147483647",
16 | "acks": "all"
17 | },
18 | "commonClientConf": {
19 | },
20 | "activeTopics": {
21 | "topic-perf" : {
22 | "numPartitions": 6,
23 | "replicationFactor": 3
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/nocompression/node2.json:
--------------------------------------------------------------------------------
1 | {
2 | "class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
3 | "durationMs": 60000,
4 | "producerNode": "node2",
5 | "targetMessagesPerSec": 200,
6 | "maxMessages": 10000,
7 | "bootstrapServers": "broker:9092,broker2:9093,broker3:9094",
8 | "valueGenerator": {
9 | "type": "sequential",
10 | "size": 2048
11 | },
12 | "producerConf": {
13 | "linger.ms": "10",
14 | "batch.size": "16384",
15 | "retries": "2147483647",
16 | "acks": "all"
17 | },
18 | "commonClientConf": {
19 | },
20 | "activeTopics": {
21 | "topic-perf" : {
22 | "numPartitions": 6,
23 | "replicationFactor": 3
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/nocompression/trogdor-task-cancel.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | start=0
5 | end=3
6 | increment=1
7 |
8 | current=$start
9 |
10 | while(( $(echo "$current < $end" | /usr/bin/bc -l) )); do
11 | /kafka_2.13-4.0.0/bin/trogdor.sh client destroyTask -t localhost:8889 -i node-$current
12 | current=$(echo "$current + $increment" | /usr/bin/bc -l)
13 | done
14 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/nocompression/trogdor-task-status.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | start=0
5 | end=3
6 | increment=1
7 |
8 | current=$start
9 |
10 | while(( $(echo "$current < $end" | /usr/bin/bc -l) )); do
11 | /kafka_2.13-4.0.0/bin/trogdor.sh client showTask -t localhost:8889 -i node-$current --show-status
12 | current=$(echo "$current + $increment" | /usr/bin/bc -l)
13 | done
14 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/compression/nocompression/trogdor-task.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | start=0
5 | end=3
6 | increment=1
7 |
8 | current=$start
9 |
10 | while(( $(echo "$current < $end" | /usr/bin/bc -l) )); do
11 | /kafka_2.13-4.0.0/bin/trogdor.sh client createTask -t localhost:8889 -i node-$current --spec /tmp/trogdor/coordinator/compression/nocompression/node$current.json
12 | current=$(echo "$current + $increment" | /usr/bin/bc -l)
13 | done
14 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/trogdor-coordinator.conf:
--------------------------------------------------------------------------------
1 | {
2 | "platform": "org.apache.kafka.trogdor.basic.BasicPlatform", "nodes": {
3 | "node0": {
4 | "hostname": "broker",
5 | "trogdor.agent.port": 8888,
6 | "trogdor.coordinator.port": 8889
7 | },
8 | "node1": {
9 | "hostname": "broker2",
10 | "trogdor.agent.port": 8888,
11 | "trogdor.coordinator.port": 8889
12 | },
13 | "node2": {
14 | "hostname": "broker3",
15 | "trogdor.agent.port": 8888,
16 | "trogdor.coordinator.port": 8889
17 | }
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/performance/trogdor/coordinator/trogdor-coordinator.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -x
4 | /kafka_2.13-4.0.0/bin/trogdor.sh coordinator -c /tmp/trogdor/coordinator/trogdor-coordinator.conf -n node0 >/tmp/trogdor/coordinator/trogdor-coordinator.log 2>&1 &
5 |
--------------------------------------------------------------------------------
/postgres-to-mongo/config/connector_mongo_sink.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "mongo-sink-dlq",
3 | "config": {
4 | "connector.class": "com.mongodb.kafka.connect.MongoSinkConnector",
5 | "errors.tolerance": "all",
6 | "topics": "jdbc_accounts",
7 | "errors.deadletterqueue.topic.name": "dlq-mongo-accounts",
8 | "errors.deadletterqueue.topic.replication.factor": "1",
9 | "errors.deadletterqueue.context.headers.enable": "true",
10 | "connection.uri": "mongodb://admin:password@mongo:27017",
11 | "database": "Employee",
12 | "collection": "account",
13 | "mongo.errors.log.enable":"true",
14 | "delete.on.null.values": "true",
15 | "document.id.strategy.overwrite.existing": "true",
16 | "document.id.strategy": "com.mongodb.kafka.connect.sink.processor.id.strategy.FullKeyStrategy",
17 | "delete.writemodel.strategy": "com.mongodb.kafka.connect.sink.writemodel.strategy.DeleteOneDefaultStrategy",
18 | "publish.full.document.only": "true",
19 | "value.converter": "io.confluent.connect.avro.AvroConverter",
20 | "key.converter": "org.apache.kafka.connect.json.JsonConverter",
21 | "key.converter.schemas.enable": "false",
22 | "value.converter.schema.registry.url": "http://schema-registry:8081"
23 | }
24 | }
--------------------------------------------------------------------------------
/postgres-to-mongo/config/create-tables.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE accounts (
2 | SEQ_ID SERIAL PRIMARY KEY
3 | ID VARCHAR,
4 | SSN VARCHAR
5 | );
6 |
7 | INSERT INTO accounts (ID, SSN) VALUES ('1', 'AAAA');
8 | INSERT INTO accounts (ID, SSN) VALUES ('2', 'BBBB');
9 | INSERT INTO accounts (ID, SSN) VALUES ('3', 'CCCC');
10 | INSERT INTO accounts (ID, SSN) VALUES ('4', 'DDDD');
11 | INSERT INTO accounts (ID, SSN) VALUES ('5', 'EEEE');
--------------------------------------------------------------------------------
/postgres-to-mongo/config/jdbc_psql_source.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "jdbc-source-connector",
3 | "config": {
4 | "connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
5 | "tasks.max": "1",
6 | "connection.url": "jdbc:postgresql://postgres:5432/postgres",
7 | "connection.user": "postgres",
8 | "connection.password": "postgres",
9 | "table.whitelist": "accounts",
10 | "mode": "incrementing",
11 | "incrementing.column.name": "seq_id",
12 | "topic.prefix": "jdbc_",
13 | "poll.interval.ms": "5000",
14 | "numeric.mapping": "best_fit",
15 | "value.converter": "io.confluent.connect.avro.AvroConverter",
16 | "key.converter": "org.apache.kafka.connect.json.JsonConverter",
17 | "key.converter.schemas.enable": "false",
18 | "value.converter.schema.registry.url": "http://schema-registry:8081",
19 | "transforms": "createKey,nestKey",
20 | "transforms.createKey.type": "org.apache.kafka.connect.transforms.ValueToKey",
21 | "transforms.createKey.fields": "id",
22 | "transforms.nestKey.type": "org.apache.kafka.connect.transforms.ReplaceField$Key",
23 | "transforms.nestKey.renames": "id:originalId"
24 | }
25 | }
--------------------------------------------------------------------------------
/principal-builder/config/client.properties:
--------------------------------------------------------------------------------
1 | security.protocol=SSL
2 | ssl.truststore.location=/etc/kafka/secrets/kafka.client.truststore.jks
3 | ssl.truststore.password=changeit
4 | ssl.keystore.location=/etc/kafka/secrets/kafka.client.keystore.jks
5 | ssl.keystore.password=changeit
6 | ssl.key.password=changeit
7 | ssl.endpoint.identification.algorithm=https
--------------------------------------------------------------------------------
/principal-builder/config/client2.properties:
--------------------------------------------------------------------------------
1 | security.protocol=SSL
2 | ssl.truststore.location=./ssl/kafka.client2.truststore.jks
3 | ssl.truststore.password=changeit
4 | ssl.keystore.location=./ssl/kafka.client2.keystore.jks
5 | ssl.keystore.password=changeit
6 | ssl.key.password=changeit
7 | ssl.endpoint.identification.algorithm=https
--------------------------------------------------------------------------------
/principal-builder/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 |
5 |
6 | org.hifly.kafka
7 | kafka-play
8 | 1.2.1
9 |
10 |
11 | principal-builder
12 | jar
13 |
14 |
15 |
16 | org.apache.kafka
17 | kafka_2.13
18 |
19 |
20 |
21 | org.apache.kafka
22 | kafka-clients
23 |
24 |
25 |
26 | org.slf4j
27 | slf4j-simple
28 |
29 |
30 |
31 | junit
32 | junit
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/quotas/config/alice.properties:
--------------------------------------------------------------------------------
1 | sasl.mechanism=PLAIN
2 | security.protocol=SASL_PLAINTEXT
3 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
4 | username="alice" \
5 | password="alice-secret";
--------------------------------------------------------------------------------
/quotas/config/prometheus/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | # How frequently to scrape targets by default.
3 | # Default 15s
4 | scrape_interval: 60s
5 | # How frequently to evaluate rules.
6 | # Default 15s
7 | evaluation_interval: 15s
8 | # How long until a scrape request times out.
9 | # Default to 10s.
10 | # Required because cp-demo is using cpu throttling, so let's leave enough time to fetch the metrics in particular for the first time as it needs to compile all rexps
11 | scrape_timeout: 30s
12 |
13 |
14 | scrape_configs:
15 | - job_name: "prometheus"
16 | static_configs:
17 | - targets: ["localhost:9090"]
18 |
19 |
20 | - job_name: "kafka-broker"
21 | static_configs:
22 | - targets:
23 | - "broker:1234"
24 | labels:
25 | env: "dev"
26 | relabel_configs:
27 | - source_labels: [__address__]
28 | target_label: hostname
29 | regex: '([^:]+)(:[0-9]+)?'
30 | replacement: '${1}'
--------------------------------------------------------------------------------
/release/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | PRJ_HOME=..
4 | SPRING_PRODUCER_HOME=../kafka-springboot-producer
5 | SPRING_CONSUMER_HOME=../kafka-springboot-consumer
6 |
7 |
8 | function compile {
9 | printf "\nCompiling..\n"
10 | mvn -f $1/pom.xml clean compile
11 | }
12 |
13 | function release {
14 | printf "\nPackaging..\n"
15 | mvn -f $1/pom.xml clean install
16 | }
17 |
18 | if [ "$1" == "compile" ]; then
19 | compile $PRJ_HOME
20 | compile $SPRING_PRODUCER_HOME
21 | compile $SPRING_CONSUMER_HOME
22 | elif [ "$1" == "release" ]; then
23 | release $PRJ_HOME
24 | release $SPRING_PRODUCER_HOME
25 | release $SPRING_CONSUMER_HOME
26 | else
27 | printf "\nNo option selected ..\n"
28 | exit -1
29 | fi
--------------------------------------------------------------------------------
/sasl-ssl/config/broker_jaas.conf:
--------------------------------------------------------------------------------
1 | KafkaServer {
2 | org.apache.kafka.common.security.plain.PlainLoginModule required
3 | username="broker"
4 | password="broker-secret"
5 | user_broker="broker-secret"
6 | user_client="client-secret"
7 | user_schema-registry="schema-registry-secret"
8 | user_restproxy="restproxy-secret"
9 | user_clientrestproxy="clientrestproxy-secret"
10 | user_badclient="badclient-secret";
11 | };
--------------------------------------------------------------------------------
/sasl-ssl/config/client.properties:
--------------------------------------------------------------------------------
1 | security.protocol=SASL_SSL
2 | ssl.truststore.location=/etc/kafka/secrets/kafka.client.truststore.jks
3 | ssl.truststore.password=confluent
4 | ssl.keystore.location=/etc/kafka/secrets/kafka.client.keystore.jks
5 | ssl.keystore.password=confluent
6 | ssl.key.password=confluent
7 | sasl.mechanism=PLAIN
8 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
9 | username=\"client\" \
10 | password=\"client-secret\";
--------------------------------------------------------------------------------
/scripts/bootstrap-acls.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f acls/docker-compose.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-apicurio.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f kafka-producer/docker-compose-apicurio.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-cdc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose -f cdc-debezium-postgres/docker-compose.yml --env-file .env up -d
--------------------------------------------------------------------------------
/scripts/bootstrap-cflt-schema-registry.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f kafka-producer/docker-compose-cflt-sr.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-claim-check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f claim-check/docker-compose.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-connect-event-router.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose -f kafka-connect-source-event-router/docker-compose.yml --env-file .env up -d
7 |
8 | echo "Wait 20 seconds..."
9 |
10 | sleep 20
11 |
12 | echo "Installing jdbc source..."
13 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @kafka-connect-source-event-router/config/connector_jdbc_source.json
14 |
15 | echo "Wait 3 seconds..."
16 |
17 | sleep 3
18 |
19 | echo "connectors status..."
20 | curl -v http://localhost:8083/connectors?expand=status
--------------------------------------------------------------------------------
/scripts/bootstrap-connect-sink-http.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose -f kafka-connect-sink-http/docker-compose.yml --env-file .env up -d
7 |
8 | echo "Wait 20 seconds..."
9 |
10 | sleep 20
11 |
12 | echo "Create topicA..."
13 | docker exec -it broker kafka-topics --bootstrap-server broker:9092 --create --topic topicA --replication-factor 1 --partitions 1
14 |
15 | echo "Installing http sink..."
16 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @kafka-connect-sink-http/config/http_sink.json
17 |
18 | echo "Wait 3 seconds..."
19 |
20 | sleep 3
21 |
22 | echo "connectors status..."
23 | curl -v http://localhost:8083/connectors?expand=status
24 |
25 | sleep 5
26 |
27 | echo "start http demo..."
28 | cd kafka-connect-sink-http/rest-controller
29 | mvn spring-boot:run
--------------------------------------------------------------------------------
/scripts/bootstrap-connect-sink-s3-parquet.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down-connect-sink-s3.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f kafka-connect-sink-s3/docker-compose.yml --env-file .env up -d
7 |
8 | echo "Wait 20 seconds..."
9 |
10 | sleep 20
11 |
12 | echo "Create gaming-player-activity..."
13 | docker exec -it broker kafka-topics --bootstrap-server broker:9092 --create --topic gaming-player-activity --replication-factor 1 --partitions 1
14 |
15 | echo "Installing s3 parquet format sink..."
16 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @kafka-connect-sink-s3/config/s3_parquet_sink.json
17 |
18 | echo "Wait 3 seconds..."
19 |
20 | sleep 3
21 |
22 | echo "connectors status..."
23 | curl -v http://localhost:8083/connectors?expand=status
--------------------------------------------------------------------------------
/scripts/bootstrap-connect-sink-s3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down-connect-sink-s3.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f kafka-connect-sink-s3/docker-compose.yml --env-file .env up -d
7 |
8 | echo "Wait 20 seconds..."
9 |
10 | sleep 20
11 |
12 | echo "Create gaming-player-activity..."
13 | docker exec -it broker kafka-topics --bootstrap-server broker:9092 --create --topic gaming-player-activity --replication-factor 1 --partitions 1
14 |
15 | echo "Installing s3 sink..."
16 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @kafka-connect-sink-s3/config/s3_sink.json
17 |
18 | echo "Wait 3 seconds..."
19 |
20 | sleep 3
21 |
22 | echo "connectors status..."
23 | curl -v http://localhost:8083/connectors?expand=status
--------------------------------------------------------------------------------
/scripts/bootstrap-connect-source-sap-hana.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down-connect-source-sap-hana.sh
4 |
5 | wget -P kafka-connect-source-sap-hana/config https://github.com/SAP/kafka-connect-sap/releases/download/0.9.4/kafka-connector-hana_2.13-0.9.4.jar
6 | wget -P kafka-connect-source-sap-hana/config https://repo1.maven.org/maven2/com/google/guava/guava/31.0.1-jre/guava-31.0.1-jre.jar
7 | wget -P kafka-connect-source-sap-hana/config https://repo1.maven.org/maven2/com/sap/cloud/db/jdbc/ngdbc/2.10.14/ngdbc-2.10.14.jar
8 |
9 | mkdir -p kafka-connect-source-sap-hana/data/hana
10 | chmod a+rwx kafka-connect-source-sap-hana/data/hana
11 |
12 | echo "Starting Kafka cluster..."
13 | docker-compose -f kafka-connect-source-sap-hana/docker-compose.yml --env-file .env up -d
14 |
15 | echo "Wait 120 seconds..."
16 |
17 | sleep 120
18 |
19 | echo "Create testtopic..."
20 | docker exec -it broker kafka-topics --bootstrap-server broker:9092 --create --topic testtopic --replication-factor 1 --partitions 1
21 |
22 | echo "Installing sap hana source..."
23 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @kafka-connect-source-sap-hana/config/sap_hana_source.json
24 |
25 | echo "Wait 3 seconds..."
26 |
27 | sleep 3
28 |
29 | echo "connectors status..."
30 | curl -v http://localhost:8083/connectors?expand=status
--------------------------------------------------------------------------------
/scripts/bootstrap-connect-tasks.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose -f kafka-connect-task-distribution/docker-compose.yml --env-file .env up -d
7 |
8 | echo "Wait 50 seconds..."
9 |
10 | sleep 50
11 |
12 | echo "Installing datagen pageviews..."
13 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @kafka-connect-task-distribution/config/connector_datagen.json
14 |
15 | echo "Wait 15 seconds..."
16 |
17 | sleep 15
18 |
19 | echo "datagen pageviews status..."
20 | curl -v http://localhost:8083/connectors?expand=status
21 |
22 | sleep 15
23 |
24 | echo "Stop connect2..."
25 | docker stop connect2
26 |
27 | echo "datagen pageviews status..."
28 | curl -v http://localhost:8083/connectors?expand=status
--------------------------------------------------------------------------------
/scripts/bootstrap-flink.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down-flink.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose -f flink-window-tumbling-heartbeat/docker-compose.yml --env-file .env up -d
--------------------------------------------------------------------------------
/scripts/bootstrap-hortonworks-sr.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f kafka-producer/docker-compose-hortonworks-sr.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-isolated.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down-isolated.sh
4 |
5 | wget -cO - https://raw.githubusercontent.com/apache/kafka/trunk/docker/examples/jvm/cluster/isolated/plaintext/docker-compose.yml > docker-compose-isolated.yml
6 |
7 | echo "Starting docker containers..."
8 | docker-compose -f docker-compose-isolated.yml --env-file .env up -d
9 |
--------------------------------------------------------------------------------
/scripts/bootstrap-ksqldb-join.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose -f ksqldb-join/docker-compose.yml --env-file .env up -d
--------------------------------------------------------------------------------
/scripts/bootstrap-ksqldb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f ksqldb/docker-compose.yml --env-file .env up -d
--------------------------------------------------------------------------------
/scripts/bootstrap-mm2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f mirror-maker2/docker-compose.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-monitoring.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down-monitoring.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose -f monitoring/docker-compose.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-oauth.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting IDP... (wait 15 seconds)"
6 | docker-compose -f kafka-oauth-kip-768/docker-compose-idp.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-performance.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | cd performance
6 | ls -ltr
7 |
8 | echo "Build kafka perf image..."
9 | sh build-image.sh
10 |
11 | sleep 3
12 |
13 | cd ..
14 |
15 | echo "Starting docker containers..."
16 | docker-compose -f performance/docker-compose.yml --env-file .env up -d
17 |
--------------------------------------------------------------------------------
/scripts/bootstrap-postgres-to-mongo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose -f postgres-to-mongo/docker-compose.yml --env-file .env up -d
7 |
8 | sleep 30
9 |
10 | echo "Create jdbc_accounts topic..."
11 | docker exec -it broker kafka-topics --bootstrap-server broker:9092 --create --topic jdbc_accounts --replication-factor 1 --partitions 1
12 |
13 | echo "Installing jdbc source..."
14 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @postgres-to-mongo/config/jdbc_psql_source.json
15 |
16 | sleep 5
17 |
18 | echo "Installing mongo sink..."
19 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @postgres-to-mongo/config/connector_mongo_sink.json
--------------------------------------------------------------------------------
/scripts/bootstrap-quotas.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "download jmx exporter files..."
6 | mkdir -p quotas/config/agent
7 |
8 | wget -P quotas/config/agent https://raw.githubusercontent.com/confluentinc/jmx-monitoring-stacks/main/shared-assets/jmx-exporter/kafka_broker.yml
9 | wget -P quotas/config/agent https://github.com/confluentinc/jmx-monitoring-stacks/blob/main/shared-assets/jmx-exporter/jmx_prometheus_javaagent-0.20.0.jar
10 |
11 | echo "download grafana files..."
12 | mkdir -p quotas/config/grafana/provisioning/dashboards
13 | mkdir -p quotas/config/grafana/provisioning/datasources
14 |
15 | wget -P quotas/config/grafana/provisioning/datasources https://raw.githubusercontent.com/confluentinc/jmx-monitoring-stacks/main/jmxexporter-prometheus-grafana/assets/grafana/provisioning/datasources/datasource.yml
16 | wget -P quotas/config/grafana/provisioning/dashboards https://raw.githubusercontent.com/confluentinc/jmx-monitoring-stacks/main/jmxexporter-prometheus-grafana/assets/grafana/provisioning/dashboards/dashboard.yml
17 | wget -P quotas/config/grafana/provisioning/dashboards https://raw.githubusercontent.com/confluentinc/jmx-monitoring-stacks/main/jmxexporter-prometheus-grafana/assets/grafana/provisioning/dashboards/kafka-quotas.json
18 |
19 |
20 | echo "Starting Kafka cluster..."
21 | docker-compose -f quotas/docker-compose.yml --env-file .env up -d
22 |
--------------------------------------------------------------------------------
/scripts/bootstrap-racks.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f kafka-consumer/docker-compose.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-smt-aspectj.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo $PWD
6 |
7 | cd kafka-smt-aspectj
8 |
9 | echo "Create custom SMT jar..."
10 | mvn clean install
11 |
12 | echo "Build SMT custom Connector image..."
13 | docker build . -t connect-custom-smt-image:1.0.0
14 |
15 | sleep 3
16 |
17 | cd ..
18 |
19 | echo "Starting docker containers..."
20 | docker-compose -f kafka-smt-aspectj/docker-compose.yml --env-file .env up -d
21 |
22 | echo "Wait 30 seconds..."
23 |
24 | sleep 30
25 |
26 | echo "Create topic test..."
27 | kafka-topics --bootstrap-server localhost:9092 --create --topic test --replication-factor 1 --partitions 3
28 |
29 | sleep 5
30 |
31 | echo "Installing mongodb sink..."
32 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @kafka-smt-aspectj/config/connector_mongo.json
33 |
34 | sleep 5
35 |
36 | echo "Produce records..."
37 | kafka-console-producer --broker-list localhost:9092 --topic test < kafka-smt-aspectj/config/test.json
--------------------------------------------------------------------------------
/scripts/bootstrap-smt-connector.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo $PWD
6 |
7 | cd kafka-smt-custom
8 |
9 | echo "Create custom SMT jar..."
10 | mvn clean install
11 |
12 | echo "Build SMT custom Connector image..."
13 | docker build . -t connect-custom-smt-image:1.0.0
14 |
15 | sleep 3
16 |
17 | cd ..
18 |
19 | echo "Starting docker containers..."
20 | docker-compose -f kafka-smt-custom/docker-compose.yml --env-file .env up -d
21 |
22 | echo "Wait 50 seconds..."
23 |
24 | sleep 50
25 |
26 | echo "Create topic test..."
27 | kafka-topics --bootstrap-server localhost:9092 --create --topic test --replication-factor 1 --partitions 3
28 |
29 | sleep 5
30 |
31 | echo "Installing mongodb sink..."
32 | curl -X POST -H Accept:application/json -H Content-Type:application/json http://localhost:8083/connectors/ -d @kafka-smt-custom/config/connector_mongo.json
33 |
34 | sleep 5
35 |
36 | echo -e "Produce records..."
37 | kafka-console-producer --broker-list localhost:9092 --topic test < kafka-smt-custom/config/test.json
--------------------------------------------------------------------------------
/scripts/bootstrap-tracing.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting Kafka cluster..."
6 | docker-compose -f kafka-distributed-tracing/docker-compose-tracing.yml --env-file .env up -d
7 |
--------------------------------------------------------------------------------
/scripts/bootstrap-unixcommand-connector.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Build unixcommand Connector image..."
6 | cd kafka-unixcommand-connector
7 | sh build-image.sh
8 |
9 | sleep 3
10 |
11 | cd ..
12 |
13 | echo "Starting docker containers..."
14 | docker-compose -f kafka-unixcommand-connector/docker-compose.yml --env-file .env up -d
15 |
--------------------------------------------------------------------------------
/scripts/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sh scripts/tear-down.sh
4 |
5 | echo "Starting docker containers..."
6 | docker-compose up -d
7 |
--------------------------------------------------------------------------------
/scripts/tear-down-acls.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f acls/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-apicurio.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-producer/docker-compose-apicurio.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-cdc-informix.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping kafka docker containers..."
4 | docker-compose -f cdc-debezium-informix/docker-compose.yml --env-file .env down --volumes
5 |
6 | echo "Stopping ifx.."
7 | docker rm -f ifx
8 |
9 | rm -rf cdc-debezium-informix/jars/debezium-connector-informix
10 | rm -rf cdc-debezium-informix/jars/debezium-connector-informix-2.6.1.Final-plugin.tar.gz
11 |
--------------------------------------------------------------------------------
/scripts/tear-down-cdc-mongo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping kafka docker containers..."
4 | docker-compose -f cdc-debezium-mongo/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-cdc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f cdc-debezium-postgres/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-cflt-schema-registry.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-producer/docker-compose-cflt-sr.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-claim-check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f claim-check/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-connect-event-router.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-connect-source-event-router/docker-compose.yml --env-file .env down --volumes
5 | rm -Rf kafka-connect-source-event-router/postgres-data
6 |
--------------------------------------------------------------------------------
/scripts/tear-down-connect-sink-http.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-connect-sink-http/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-connect-sink-s3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-connect-sink-s3/docker-compose.yml --env-file .env down --volumes
--------------------------------------------------------------------------------
/scripts/tear-down-connect-source-sap-hana.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-connect-source-sap-hana/docker-compose.yml --env-file .env down --volumes
5 |
6 | rm -rf kafka-connect-source-sap-hana/data
7 | rm -rf kafka-connect-source-sap-hana/config/*.jar
--------------------------------------------------------------------------------
/scripts/tear-down-connect-tasks.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-connect-task-distribution/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-flink.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f flink-window-tumbling-heartbeat/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-hortonworks-sr.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-producer/docker-compose-hortonworks-sr.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-isolated.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f docker-compose-isolated.yml --env-file .env down --volumes
5 | rm -rf docker-compose-isolated.yml
--------------------------------------------------------------------------------
/scripts/tear-down-ksqldb-join.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f ksqldb-join/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-ksqldb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f ksqldb/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-mm2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f mirror-maker2/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-monitoring.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f monitoring/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-mtls.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f mtls-listener/docker-compose-mtls.yml --env-file .env down --volumes
5 | rm -rf mtls-listener/ssl
6 |
--------------------------------------------------------------------------------
/scripts/tear-down-multi-listener.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f multi-listener/docker-compose.yml --env-file .env down --volumes
5 | rm -rf multi-listener/ssl
6 |
--------------------------------------------------------------------------------
/scripts/tear-down-oauth.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-oauth-kip-768/docker-compose-idp.yml --env-file .env down --volumes
5 | docker-compose -f kafka-oauth-kip-768/docker-compose-oauth.yml --env-file .env down --volumes
6 |
--------------------------------------------------------------------------------
/scripts/tear-down-performance.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f performance/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-postgres-to-mongo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f postgres-to-mongo/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-principal.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f principal-builder/docker-compose.yml --env-file .env down --volumes
5 | rm -rf principal-builder/ssl
6 |
--------------------------------------------------------------------------------
/scripts/tear-down-quotas.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f quotas/docker-compose.yml --env-file .env down --volumes
5 |
6 | rm -Rf quotas/config/agent
7 | rm -Rf quotas/config/grafana
8 |
--------------------------------------------------------------------------------
/scripts/tear-down-racks.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-consumer/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-sasl-ssl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | rm -rf sasl-ssl/secrets
4 |
5 | echo "Stopping docker containers..."
6 | docker-compose -f sasl-ssl/docker-compose.yml --env-file .env down --volumes
7 |
--------------------------------------------------------------------------------
/scripts/tear-down-smt-aspectj.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-smt-aspectj/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-smt-connector.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-smt-custom/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-tracing.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-distributed-tracing/docker-compose-tracing.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down-unixcommand-connector.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose -f kafka-unixcommand-connector/docker-compose.yml --env-file .env down --volumes
5 |
--------------------------------------------------------------------------------
/scripts/tear-down.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stopping docker containers..."
4 | docker-compose down --volumes
5 |
--------------------------------------------------------------------------------
/udp-proxy/pom.xml:
--------------------------------------------------------------------------------
1 |
3 | 4.0.0
4 |
5 |
6 | org.hifly.kafka
7 | kafka-play
8 | 1.2.1
9 |
10 |
11 | udp-proxy
12 | jar
13 |
14 |
15 |
16 | org.apache.kafka
17 | kafka_2.13
18 |
19 |
20 |
21 | org.slf4j
22 | slf4j-simple
23 |
24 |
25 |
26 | junit
27 | junit
28 |
29 |
30 |
31 | org.apache.curator
32 | curator-test
33 |
34 |
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/udp-proxy/src/main/java/org/hifly/udp/kafka/multicast/Application.java:
--------------------------------------------------------------------------------
1 | package org.hifly.udp.kafka.multicast;
2 |
3 | public class Application {
4 |
5 | private static int RECEIVERS = 5;
6 | private static String BIND_ADDRESS = "230.0.0.0";
7 | private static int BIND_PORT = 4446;
8 | private static String KAFKA_TOPIC = "telemetry";
9 |
10 | public static void main (String [] args) throws Exception {
11 |
12 | if(args !=null && args.length == 4) {
13 | RECEIVERS = args[0] != null && !args[0].isEmpty() ? Integer.parseInt(args[0]): RECEIVERS;
14 | BIND_ADDRESS = args[1] != null && !args[1].isEmpty() ? args[1]: BIND_ADDRESS;
15 | BIND_PORT = args[2] != null && !args[2].isEmpty() ? Integer.parseInt(args[2]): BIND_PORT;
16 | KAFKA_TOPIC = args[3] != null && !args[3].isEmpty() ? args[3]: KAFKA_TOPIC;
17 | }
18 |
19 | for (int i = 0; i < RECEIVERS; i++) {
20 | new MulticastReceiver(BIND_ADDRESS, BIND_PORT, KAFKA_TOPIC).start();
21 | }
22 | }
23 |
24 | }
--------------------------------------------------------------------------------
/udp-proxy/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ "$#" -ne 4 ]
4 | then
5 | echo "Incorrect number of arguments. Use with 4 args "
6 | exit 1
7 | fi
8 |
9 |
10 | mvn clean compile
11 |
12 | echo "Starting $1 UDP receivers listening on $2 and port $3 for kafka topic $4..."
13 | nohup mvn exec:java -Dexec.mainClass="org.hifly.udp.kafka.multicast.Application" -Dexec.args=$1 -Dexec.args=$2 -Dexec.args=$3 -Dexec.args=$4 >/dev/null 2>&1 &
--------------------------------------------------------------------------------
/udp-proxy/stop.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Stop UDP receivers..."
4 | echo -n "end" | nc -4u -w0 230.0.0.0 4446
--------------------------------------------------------------------------------