├── .gitignore ├── img └── apicurio-registry-artifacts.png ├── src └── main │ ├── olm │ └── operator-group.yml │ ├── resources │ ├── log4j2.properties │ ├── schemas │ │ └── message.avsc │ └── application.properties │ ├── apicurio │ ├── topics │ │ └── kafkasql-journal-topic.yml │ ├── operator │ │ ├── subscription-k8s.yml │ │ └── subscription.yml │ └── service-registry.yml │ ├── strimzi │ ├── operator │ │ ├── subscription-k8s.yml │ │ └── subscription.yml │ ├── topics │ │ ├── kafkatopic-messages.yml │ │ └── kafkatopic-messages-ha.yml │ ├── users │ │ ├── service-registry-user-tls.yml │ │ ├── service-registry-user-scram.yml │ │ └── application-user-scram.yml │ └── kafka │ │ ├── kafka.yml │ │ └── kafka-ha.yml │ ├── java │ └── com │ │ └── rmarting │ │ └── kafka │ │ ├── Application.java │ │ ├── dto │ │ ├── MessageListDTO.java │ │ └── MessageDTO.java │ │ ├── listener │ │ └── ConsumerListener.java │ │ ├── config │ │ ├── OpenAPIConfig.java │ │ └── KafkaConfig.java │ │ ├── api │ │ ├── ConsumerController.java │ │ └── ProducerController.java │ │ └── service │ │ └── MessageService.java │ ├── k8s │ └── role.yml │ └── jkube │ └── deployment.yml ├── mvnw.cmd ├── pom.xml ├── mvnw ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | .project 3 | .classpath 4 | .settings/ 5 | .idea/ 6 | *.iml 7 | .vscode/ 8 | -------------------------------------------------------------------------------- /img/apicurio-registry-artifacts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rmarting/kafka-clients-sb-sample/HEAD/img/apicurio-registry-artifacts.png -------------------------------------------------------------------------------- /src/main/olm/operator-group.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1 3 | kind: OperatorGroup 4 | metadata: 5 | name: amq-streams-demo-og 6 | namespace: amq-streams-demo 7 | spec: 8 | targetNamespaces: 9 | - amq-streams-demo 10 | -------------------------------------------------------------------------------- /src/main/resources/log4j2.properties: -------------------------------------------------------------------------------- 1 | appender.stdout.type = Console 2 | appender.stdout.name = stdout 3 | appender.stdout.layout.type = PatternLayout 4 | appender.stdout.layout.pattern = %d [%-15.15t] %-5p %-30.30c{1} - %m%n 5 | 6 | rootLogger.level = INFO 7 | rootLogger.appenderRef.stdout.ref = stdout 8 | -------------------------------------------------------------------------------- /src/main/apicurio/topics/kafkasql-journal-topic.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaTopic 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-kafka 6 | name: kafkasql-journal 7 | spec: 8 | partitions: 1 9 | replicas: 1 10 | config: 11 | cleanup.policy: compact 12 | -------------------------------------------------------------------------------- /src/main/apicurio/operator/subscription-k8s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: apicurio-registry 6 | namespace: operators 7 | spec: 8 | channel: 2.x 9 | name: apicurio-registry 10 | source: operatorhubio-catalog 11 | sourceNamespace: olm 12 | -------------------------------------------------------------------------------- /src/main/strimzi/operator/subscription-k8s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: strimzi-kafka-operator 6 | namespace: operators 7 | spec: 8 | channel: stable 9 | name: strimzi-kafka-operator 10 | source: operatorhubio-catalog 11 | sourceNamespace: olm 12 | -------------------------------------------------------------------------------- /src/main/strimzi/topics/kafkatopic-messages.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaTopic 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-kafka 6 | name: messages 7 | spec: 8 | config: 9 | min.insync.replicas: "1" 10 | retention.ms: "7200000" 11 | segment.bytes: "1073741824" 12 | partitions: 1 13 | replicas: 1 14 | -------------------------------------------------------------------------------- /src/main/strimzi/topics/kafkatopic-messages-ha.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: KafkaTopic 3 | metadata: 4 | labels: 5 | strimzi.io/cluster: my-kafka 6 | name: messages 7 | spec: 8 | config: 9 | min.insync.replicas: "2" 10 | retention.ms: "7200000" 11 | segment.bytes: "1073741824" 12 | partitions: 3 13 | replicas: 3 14 | -------------------------------------------------------------------------------- /src/main/apicurio/operator/subscription.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: apicurio-registry 6 | namespace: amq-streams-demo 7 | spec: 8 | channel: 2.x 9 | installPlanApproval: Automatic 10 | name: apicurio-registry 11 | source: community-operators 12 | sourceNamespace: openshift-marketplace 13 | -------------------------------------------------------------------------------- /src/main/strimzi/operator/subscription.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: strimzi-kafka-operator 6 | namespace: amq-streams-demo 7 | spec: 8 | channel: stable 9 | installPlanApproval: Automatic 10 | name: strimzi-kafka-operator 11 | source: community-operators 12 | sourceNamespace: openshift-marketplace -------------------------------------------------------------------------------- /src/main/resources/schemas/message.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Message", 3 | "namespace": "com.rmarting.kafka.schema.avro", 4 | "type": "record", 5 | "doc": "Schema for a Message.", 6 | "fields": [ 7 | { 8 | "name": "timestamp", 9 | "type": "long", 10 | "doc": "Message timestamp." 11 | }, 12 | { 13 | "name": "content", 14 | "type": "string", 15 | "doc": "Message content." 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/Application.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka; 2 | 3 | import org.springframework.boot.SpringApplication; 4 | import org.springframework.boot.autoconfigure.SpringBootApplication; 5 | import org.springframework.kafka.annotation.EnableKafka; 6 | 7 | @SpringBootApplication 8 | @EnableKafka 9 | public class Application { 10 | 11 | public static void main(String[] args) { 12 | SpringApplication.run(Application.class, args); 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /src/main/apicurio/service-registry.yml: -------------------------------------------------------------------------------- 1 | apiVersion: registry.apicur.io/v1 2 | kind: ApicurioRegistry 3 | metadata: 4 | name: service-registry 5 | spec: 6 | configuration: 7 | persistence: "kafkasql" 8 | kafkasql: 9 | bootstrapServers: "my-kafka-kafka-bootstrap:9093" 10 | security: 11 | scram: 12 | mechanism: SCRAM-SHA-512 13 | user: service-registry-scram 14 | passwordSecretName: service-registry-scram 15 | truststoreSecretName: my-kafka-cluster-ca-cert 16 | ui: 17 | readOnly: false 18 | logLevel: INFO 19 | deployment: 20 | replicas: 1 21 | -------------------------------------------------------------------------------- /src/main/k8s/role.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | namespace: amq-streams-demo 6 | name: namespace-reader 7 | rules: 8 | - apiGroups: ["", "extensions", "apps"] 9 | resources: ["configmaps", "pods", "services", "endpoints", "secrets"] 10 | verbs: ["get", "list", "watch"] 11 | --- 12 | kind: RoleBinding 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | metadata: 15 | name: namespace-reader-binding 16 | namespace: amq-streams-demo 17 | subjects: 18 | - kind: ServiceAccount 19 | name: default 20 | apiGroup: "" 21 | roleRef: 22 | kind: Role 23 | name: namespace-reader 24 | apiGroup: "" 25 | -------------------------------------------------------------------------------- /src/main/strimzi/users/service-registry-user-tls.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kafka.strimzi.io/v1beta2 3 | kind: KafkaUser 4 | metadata: 5 | name: service-registry-tls 6 | labels: 7 | strimzi.io/cluster: my-kafka 8 | spec: 9 | authentication: 10 | type: tls 11 | authorization: 12 | type: simple 13 | acls: 14 | # Group Id to consume information for the different topics used by the Service Registry. 15 | # Name equals to metadata.name property in ApicurioRegistry object 16 | - resource: 17 | type: group 18 | name: '*' 19 | patternType: literal 20 | operation: Read 21 | # Rules for the kafkasql-journal topic 22 | - resource: 23 | type: topic 24 | name: kafkasql-journal 25 | patternType: literal 26 | operation: All 27 | -------------------------------------------------------------------------------- /src/main/strimzi/users/service-registry-user-scram.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kafka.strimzi.io/v1beta2 3 | kind: KafkaUser 4 | metadata: 5 | name: service-registry-scram 6 | labels: 7 | strimzi.io/cluster: my-kafka 8 | spec: 9 | authentication: 10 | type: scram-sha-512 11 | authorization: 12 | type: simple 13 | acls: 14 | # Group Id to consume information for the different topics used by the Service Registry. 15 | # Name equals to metadata.name property in ApicurioRegistry object 16 | - resource: 17 | type: group 18 | name: '*' 19 | patternType: literal 20 | operation: Read 21 | # Rules for the kafkasql-journal topic 22 | - resource: 23 | type: topic 24 | name: kafkasql-journal 25 | patternType: literal 26 | operation: All 27 | 28 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/dto/MessageListDTO.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka.dto; 2 | 3 | import io.swagger.v3.oas.annotations.media.Schema; 4 | 5 | import java.io.Serializable; 6 | import java.util.ArrayList; 7 | import java.util.List; 8 | 9 | public class MessageListDTO implements Serializable { 10 | 11 | private static final long serialVersionUID = 138669946178014325L; 12 | 13 | @Schema(description = "List of messages", required = false) 14 | private List list = new ArrayList<>(); 15 | 16 | /** 17 | * @return the messages 18 | */ 19 | public List getMessages() { 20 | return list; 21 | } 22 | 23 | /** 24 | * @param messageDTOS the messages to set 25 | */ 26 | public void setMessages(List messageDTOS) { 27 | this.list = messageDTOS; 28 | } 29 | 30 | public void addCustomMessage(MessageDTO messageDTO) { 31 | if (null == list) { 32 | list = new ArrayList<>(); 33 | } 34 | list.add(messageDTO); 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /src/main/strimzi/users/application-user-scram.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kafka.strimzi.io/v1beta2 3 | kind: KafkaUser 4 | metadata: 5 | name: application 6 | labels: 7 | strimzi.io/cluster: my-kafka 8 | spec: 9 | authentication: 10 | type: scram-sha-512 11 | authorization: 12 | type: simple 13 | acls: 14 | # Consumer Group Id 15 | - resource: 16 | type: group 17 | name: kafka-client-sb-consumer 18 | operation: Read 19 | - resource: 20 | type: group 21 | name: spring-kafka-clients-sb-sample-group 22 | operation: Read 23 | # Rules for the messages topic 24 | - resource: 25 | type: topic 26 | name: messages 27 | operation: Read 28 | - resource: 29 | type: topic 30 | name: messages 31 | operation: Describe 32 | - resource: 33 | type: topic 34 | name: messages 35 | operation: Write 36 | - resource: 37 | type: topic 38 | name: messages 39 | operation: Create 40 | -------------------------------------------------------------------------------- /src/main/jkube/deployment.yml: -------------------------------------------------------------------------------- 1 | metadata: 2 | name: ${project.artifactId} 3 | labels: 4 | group: ${project.groupId} 5 | project: ${project.artifactId} 6 | version: ${project.version} 7 | provider: jkube 8 | spec: 9 | triggers: 10 | - type: ConfigChange 11 | template: 12 | spec: 13 | containers: 14 | - env: 15 | - name: KAFKA_USER_PASSWORD 16 | valueFrom: 17 | secretKeyRef: 18 | key: password 19 | name: application 20 | livenessProbe: 21 | failureThreshold: 3 22 | httpGet: 23 | path: /actuator/health 24 | port: 8080 25 | scheme: HTTP 26 | initialDelaySeconds: 180 27 | periodSeconds: 10 28 | successThreshold: 1 29 | timeoutSeconds: 1 30 | readinessProbe: 31 | failureThreshold: 3 32 | httpGet: 33 | path: /actuator/health 34 | port: 8080 35 | scheme: HTTP 36 | initialDelaySeconds: 10 37 | periodSeconds: 10 38 | successThreshold: 1 39 | timeoutSeconds: 1 40 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/listener/ConsumerListener.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka.listener; 2 | 3 | import com.rmarting.kafka.schema.avro.Message; 4 | import org.slf4j.Logger; 5 | import org.slf4j.LoggerFactory; 6 | import org.springframework.kafka.annotation.KafkaListener; 7 | import org.springframework.kafka.support.Acknowledgment; 8 | import org.springframework.kafka.support.KafkaHeaders; 9 | import org.springframework.messaging.handler.annotation.Headers; 10 | import org.springframework.messaging.handler.annotation.Payload; 11 | import org.springframework.stereotype.Component; 12 | 13 | import java.util.Map; 14 | 15 | @Component 16 | public class ConsumerListener { 17 | 18 | private static final Logger LOGGER = LoggerFactory.getLogger(ConsumerListener.class); 19 | 20 | @KafkaListener(topics = {"messages"}) 21 | public void handleMessages(@Payload Message message, 22 | @Headers Map headers, 23 | Acknowledgment acknowledgment) { 24 | LOGGER.info("Received record from Topic-Partition '{}-{}' with Offset '{}' -> Key: '{}' - Value '{}'", 25 | headers.get(KafkaHeaders.RECEIVED_TOPIC), 26 | headers.get(KafkaHeaders.RECEIVED_PARTITION_ID), 27 | headers.get(KafkaHeaders.OFFSET), 28 | headers.get(KafkaHeaders.MESSAGE_KEY), 29 | message.get("content")); 30 | 31 | // Commit message 32 | acknowledgment.acknowledge(); 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/config/OpenAPIConfig.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka.config; 2 | 3 | import io.swagger.v3.oas.models.Components; 4 | import io.swagger.v3.oas.models.OpenAPI; 5 | import io.swagger.v3.oas.models.info.Info; 6 | import io.swagger.v3.oas.models.info.License; 7 | import io.swagger.v3.oas.models.tags.Tag; 8 | import org.springdoc.core.GroupedOpenApi; 9 | import org.springframework.beans.factory.annotation.Value; 10 | import org.springframework.context.annotation.Bean; 11 | import org.springframework.context.annotation.Configuration; 12 | 13 | @Configuration 14 | public class OpenAPIConfig { 15 | 16 | @Bean 17 | public GroupedOpenApi openApi() { 18 | String[] paths = { "/producer/**", "/consumer/**" }; 19 | return GroupedOpenApi.builder() 20 | .setGroup("kafka-client-api") 21 | .packagesToScan("com.rmarting.kafka.api") 22 | .pathsToMatch(paths) 23 | .build(); 24 | } 25 | 26 | @Bean 27 | public OpenAPI customOpenAPI(@Value("${springdoc.version}") String appVersion) { 28 | return new OpenAPI() 29 | .components(new Components()) 30 | .info(new Info() 31 | .title("Kafka Client Spring Boot Application API") 32 | .version(appVersion) 33 | .license(new License().name("Apache 2.0").url("http://springdoc.org")) 34 | .description( 35 | "Sample Spring Boot REST service using springdoc-openapi and OpenAPI 3 to" + 36 | "produce and consume messages from a Kafka Cluster")) 37 | .addTagsItem(new Tag().name("producer")) 38 | .addTagsItem(new Tag().name("consumer")); 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/dto/MessageDTO.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka.dto; 2 | 3 | import io.swagger.v3.oas.annotations.media.Schema; 4 | 5 | import java.io.Serializable; 6 | 7 | public class MessageDTO implements Serializable { 8 | 9 | private static final long serialVersionUID = 1305278483346223763L; 10 | 11 | @Schema(description = "Key to identify this message.", required = false, example = "1") 12 | private String key; 13 | 14 | @Schema(description = "Timestamp.", required = true) 15 | private long timestamp; 16 | 17 | @Schema(description = "Content.", required = true, example = "Simple message") 18 | private String content; 19 | 20 | @Schema(description = "Partition number.", required = false, accessMode = Schema.AccessMode.READ_ONLY) 21 | private int partition; 22 | 23 | @Schema(description = "Offset in the partition.", required = false, accessMode = Schema.AccessMode.READ_ONLY) 24 | private long offset; 25 | 26 | public String getKey() { 27 | return key; 28 | } 29 | 30 | public void setKey(String key) { 31 | this.key = key; 32 | } 33 | 34 | public long getTimestamp() { 35 | return timestamp; 36 | } 37 | 38 | public void setTimestamp(long timestamp) { 39 | this.timestamp = timestamp; 40 | } 41 | 42 | public String getContent() { 43 | return content; 44 | } 45 | 46 | public void setContent(String content) { 47 | this.content = content; 48 | } 49 | 50 | /** 51 | * @return the partition 52 | */ 53 | public int getPartition() { 54 | return partition; 55 | } 56 | 57 | /** 58 | * @param partition the partition to set 59 | */ 60 | public void setPartition(int partition) { 61 | this.partition = partition; 62 | } 63 | 64 | /** 65 | * @return the offset 66 | */ 67 | public long getOffset() { 68 | return offset; 69 | } 70 | 71 | /** 72 | * @param offset the offset to set 73 | */ 74 | public void setOffset(long offset) { 75 | this.offset = offset; 76 | } 77 | 78 | } 79 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/api/ConsumerController.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka.api; 2 | 3 | import com.rmarting.kafka.dto.MessageListDTO; 4 | import com.rmarting.kafka.service.MessageService; 5 | import io.swagger.v3.oas.annotations.Operation; 6 | import io.swagger.v3.oas.annotations.Parameter; 7 | import io.swagger.v3.oas.annotations.media.Content; 8 | import io.swagger.v3.oas.annotations.media.Schema; 9 | import io.swagger.v3.oas.annotations.responses.ApiResponse; 10 | import io.swagger.v3.oas.annotations.responses.ApiResponses; 11 | import io.swagger.v3.oas.annotations.tags.Tag; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | import org.springframework.beans.factory.annotation.Value; 15 | import org.springframework.http.HttpStatus; 16 | import org.springframework.http.MediaType; 17 | import org.springframework.http.ResponseEntity; 18 | import org.springframework.web.bind.annotation.*; 19 | 20 | @RestController 21 | @RequestMapping("/consumer") 22 | @Tag(name = "consumer", description = "Operations to consume messages from a Kafka Cluster") 23 | public class ConsumerController { 24 | 25 | private static final Logger LOGGER = LoggerFactory.getLogger(ConsumerController.class); 26 | 27 | private MessageService messageService; 28 | 29 | public ConsumerController(MessageService messageService) { 30 | this.messageService = messageService; 31 | } 32 | 33 | @Value("${consumer.poolTimeout}") 34 | private Long poolTimeout; 35 | 36 | @Operation(summary = "Get a list of records from a topic", tags = {"consumer"}) 37 | @ApiResponses(value = { 38 | @ApiResponse( 39 | responseCode = "200", 40 | description = "List of records from topic", 41 | content = @Content(schema = @Schema(implementation = MessageListDTO.class))), 42 | @ApiResponse(responseCode = "404", description = "Not records in topic"), 43 | @ApiResponse(responseCode = "500", description = "Internal Server Error")}) 44 | @GetMapping(value = "/kafka/{topicName}", produces = MediaType.APPLICATION_JSON_VALUE) 45 | public ResponseEntity pollFromTopic( 46 | @Parameter(description = "Topic name", required = true) @PathVariable String topicName, 47 | @Parameter(description = "Commit results", required = false) @RequestParam(defaultValue = "true") boolean commit, 48 | @Parameter(description = "Partition ID", required = false) @RequestParam(required = false) Integer partition) { 49 | MessageListDTO messageListDTO = messageService.pollEvents(topicName, partition, commit); 50 | 51 | // Prepare response 52 | if (messageListDTO.getMessages().isEmpty()) { 53 | LOGGER.debug("Not found messages (404) in topic {}", topicName); 54 | 55 | return ResponseEntity.status(HttpStatus.NOT_FOUND).body(messageListDTO); 56 | } 57 | 58 | LOGGER.debug("Pulled successfully messages (200) from topic {}", topicName); 59 | 60 | return ResponseEntity.ok(messageListDTO); 61 | } 62 | 63 | } 64 | -------------------------------------------------------------------------------- /src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Spring Boot Properties 3 | # 4 | #server.port=8181 5 | 6 | # 7 | # Kafka Clients Properties 8 | # 9 | # Kafka Bootstrap Servers 10 | #kafka.bootstrap-servers = localhost:9092 11 | kafka.bootstrap-servers = my-kafka-kafka-bootstrap:9092 12 | # Kafka User Credentials 13 | kafka.user.name = application 14 | kafka.user.password = ${KAFKA_USER_PASSWORD} 15 | # To use with plain connections 16 | kafka.security.protocol = SASL_PLAINTEXT 17 | #kafka.security.protocol = SASL_SSL 18 | 19 | # Producer Properties 20 | producer.clienId = kafka-client-sb-producer-client 21 | # No ACK 22 | #producer.acks = 0 23 | # Leader 24 | #producer.acks = 1 25 | # In-Sync 26 | producer.acks = -1 27 | 28 | # Consumer Properties 29 | consumer.groupId = kafka-client-sb-consumer 30 | consumer.clientId = kafka-client-sb-consumer-client 31 | # Pooling properties 32 | consumer.maxPoolRecords = 1000 33 | consumer.maxPartitionFetchBytes = 1048576 34 | # Auto commit 35 | consumer.autoCommit = false 36 | # latest | earliest 37 | consumer.offsetReset = earliest 38 | # Seconds 39 | consumer.poolTimeout = 10 40 | 41 | # Service Registry 42 | #apicurio.registry.url = http://localhost:8080/apis/registry/v2 43 | apicurio.registry.url = http://service-registry-service:8080/apis/registry/v2 44 | 45 | # 46 | # Spring Cloud Kubernetes Properties 47 | # 48 | spring.application.name=kafka-clients-sb-sample 49 | spring.cloud.kubernetes.reload.enabled=true 50 | #spring.cloud.kubernetes.reload.strategy=restart_context 51 | spring.cloud.kubernetes.config.name=kafka-clients-sb-sample 52 | spring.cloud.kubernetes.config.namespace=amq-streams-demo 53 | 54 | # 55 | # Spring Kafka Properties 56 | # 57 | spring.kafka.bootstrap-servers = ${kafka.bootstrap-servers} 58 | spring.kafka.properties.security.protocol=${kafka.security.protocol} 59 | spring.kafka.properties.sasl.mechanism=SCRAM-SHA-512 60 | spring.kafka.properties.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="${kafka.user.name}" password="${kafka.user.password}"; 61 | # Spring Kafka Producer 62 | spring.kafka.producer.acks= ${producer.acks} 63 | spring.kafka.producer.client-id = spring-kafka-clients-sb-producer-client 64 | spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer 65 | spring.kafka.producer.value-serializer=io.apicurio.registry.serde.avro.AvroKafkaSerializer 66 | # Producer Properties 67 | spring.kafka.producer.properties.apicurio.registry.url = ${apicurio.registry.url} 68 | spring.kafka.producer.properties.apicurio.registry.artifact-resolver-strategy = io.apicurio.registry.serde.avro.strategy.RecordIdStrategy 69 | # Spring Kafka Consumer 70 | spring.kafka.listener.ack-mode = manual 71 | spring.kafka.consumer.group-id = spring-kafka-clients-sb-sample-group 72 | spring.kafka.consumer.auto-offset-reset = earliest 73 | spring.kafka.consumer.enable-auto-commit=false 74 | spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.StringDeserializer 75 | spring.kafka.consumer.value-deserializer = io.apicurio.registry.serde.avro.AvroKafkaDeserializer 76 | # Consumer Properties 77 | spring.kafka.consumer.properties.partition.assignment.strategy = org.apache.kafka.clients.consumer.RoundRobinAssignor 78 | # Consumer Properties - Service Registry Integration 79 | spring.kafka.consumer.properties.apicurio.registry.url = ${apicurio.registry.url} 80 | # Use Specific Avro classes instead of the GenericRecord class definition 81 | spring.kafka.consumer.properties.apicurio.registry.use-specific-avro-reader = true 82 | 83 | # 84 | # Swagger UI Properties 85 | # 86 | springdoc.version = @project.version@ 87 | springdoc.api-docs.enabled = true 88 | springdoc.swagger-ui.path = /swagger-ui.html 89 | springdoc.swagger-ui.displayRequestDuration = true 90 | -------------------------------------------------------------------------------- /src/main/strimzi/kafka/kafka.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | labels: 5 | app: my-kafka 6 | name: my-kafka 7 | spec: 8 | kafka: 9 | version: 2.7.0 10 | config: 11 | # Log message format 12 | log.message.format.version: "2.7" 13 | # default replication factors for automatically created topics 14 | default.replication.factor: 1 15 | # The default number of log partitions per topic 16 | num.partitions: 3 17 | # Enable auto creation of topic on the server 18 | auto.create.topics.enable: false 19 | # When a producer sets acks to "all" (or "-1"), min.insync.replicas specifies the minimum number of replicas that 20 | # must acknowledge a write for the write to be considered successful. 21 | # When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical 22 | # scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and 23 | # produce with acks of "all". This will ensure that the producer raises an exception if a 24 | # majority of replicas do not receive a write. 25 | min.insync.replicas: 1 26 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" 27 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3. 28 | offsets.topic.replication.factor: 1 29 | transaction.state.log.replication.factor: 1 30 | transaction.state.log.min.isr: 1 31 | # The minimum age of a log file to be eligible for deletion due to age. Default value: 168 32 | # The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property 33 | log.retention.hours: 48 34 | # The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies. 35 | # Valid policies are: "delete" and "compact". Default value: "delete" 36 | log.cleanup.policy: delete 37 | # Enable the log cleaner process to run on the server. Should be enabled if using any topics with a 38 | # cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted 39 | # and continually grow in size. 40 | log.cleaner.enable: true 41 | # How long are delete records retained?. Default value: 86400000 (24 hours) 42 | log.cleaner.delete.retention.ms: 86400000 43 | listeners: 44 | - name: plain 45 | port: 9092 46 | tls: false 47 | type: internal 48 | authentication: 49 | type: scram-sha-512 50 | - name: tls 51 | port: 9093 52 | tls: true 53 | type: internal 54 | authentication: 55 | type: scram-sha-512 56 | authorization: 57 | type: simple 58 | livenessProbe: 59 | initialDelaySeconds: 90 60 | timeoutSeconds: 5 61 | readinessProbe: 62 | initialDelaySeconds: 60 63 | timeoutSeconds: 5 64 | replicas: 1 65 | storage: 66 | type: jbod 67 | volumes: 68 | - id: 0 69 | type: persistent-claim 70 | size: 5Gi 71 | deleteClaim: true 72 | template: 73 | pod: 74 | metadata: 75 | labels: 76 | custom-strimzi-label: my-kafka 77 | zookeeper: 78 | livenessProbe: 79 | initialDelaySeconds: 90 80 | timeoutSeconds: 5 81 | readinessProbe: 82 | initialDelaySeconds: 60 83 | timeoutSeconds: 5 84 | replicas: 1 85 | storage: 86 | deleteClaim: true 87 | size: 2Gi 88 | type: persistent-claim 89 | entityOperator: 90 | topicOperator: {} 91 | userOperator: {} 92 | -------------------------------------------------------------------------------- /src/main/strimzi/kafka/kafka-ha.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kafka.strimzi.io/v1beta2 2 | kind: Kafka 3 | metadata: 4 | labels: 5 | app: my-kafka 6 | name: my-kafka 7 | spec: 8 | kafka: 9 | version: 2.7.0 10 | config: 11 | # Log message format 12 | log.message.format.version: "2.7" 13 | # default replication factors for automatically created topics 14 | default.replication.factor: 3 15 | # The default number of log partitions per topic 16 | num.partitions: 3 17 | # Enable auto creation of topic on the server 18 | auto.create.topics.enable: false 19 | # When a producer sets acks to "all" (or "-1"), min.insync.replicas specifies the minimum number of replicas that 20 | # must acknowledge a write for the write to be considered successful. 21 | # When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. A typical 22 | # scenario would be to create a topic with a replication factor of 3, set min.insync.replicas to 2, and 23 | # produce with acks of "all". This will ensure that the producer raises an exception if a 24 | # majority of replicas do not receive a write. 25 | min.insync.replicas: 2 26 | # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" 27 | # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3. 28 | offsets.topic.replication.factor: 3 29 | transaction.state.log.replication.factor: 3 30 | transaction.state.log.min.isr: 2 31 | # The minimum age of a log file to be eligible for deletion due to age. Default value: 168 32 | # The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property 33 | log.retention.hours: 48 34 | # The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies. 35 | # Valid policies are: "delete" and "compact". Default value: "delete" 36 | log.cleanup.policy: delete 37 | # Enable the log cleaner process to run on the server. Should be enabled if using any topics with a 38 | # cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted 39 | # and continually grow in size. 40 | log.cleaner.enable: true 41 | # How long are delete records retained?. Default value: 86400000 (24 hours) 42 | log.cleaner.delete.retention.ms: 86400000 43 | listeners: 44 | - name: plain 45 | port: 9092 46 | tls: false 47 | type: internal 48 | authentication: 49 | type: scram-sha-512 50 | - name: tls 51 | port: 9093 52 | tls: true 53 | type: internal 54 | authentication: 55 | type: scram-sha-512 56 | livenessProbe: 57 | initialDelaySeconds: 90 58 | timeoutSeconds: 5 59 | readinessProbe: 60 | initialDelaySeconds: 60 61 | timeoutSeconds: 5 62 | replicas: 3 63 | storage: 64 | type: jbod 65 | volumes: 66 | - id: 0 67 | type: persistent-claim 68 | size: 25Gi 69 | deleteClaim: true 70 | template: 71 | pod: 72 | metadata: 73 | labels: 74 | custom-strimzi-label: my-kafka 75 | zookeeper: 76 | livenessProbe: 77 | initialDelaySeconds: 90 78 | timeoutSeconds: 5 79 | readinessProbe: 80 | initialDelaySeconds: 60 81 | timeoutSeconds: 5 82 | replicas: 3 83 | storage: 84 | deleteClaim: true 85 | size: 10Gi 86 | type: persistent-claim 87 | entityOperator: 88 | topicOperator: 89 | reconciliationIntervalSeconds: 60 90 | userOperator: 91 | reconciliationIntervalSeconds: 60 92 | kafkaExporter: 93 | topicRegex: ".*" 94 | groupRegex: ".*" 95 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/api/ProducerController.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka.api; 2 | 3 | import com.rmarting.kafka.dto.MessageDTO; 4 | import com.rmarting.kafka.service.MessageService; 5 | import io.swagger.v3.oas.annotations.Operation; 6 | import io.swagger.v3.oas.annotations.Parameter; 7 | import io.swagger.v3.oas.annotations.media.Content; 8 | import io.swagger.v3.oas.annotations.media.Schema; 9 | import io.swagger.v3.oas.annotations.responses.ApiResponse; 10 | import io.swagger.v3.oas.annotations.responses.ApiResponses; 11 | import io.swagger.v3.oas.annotations.tags.Tag; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | import org.springframework.http.MediaType; 15 | import org.springframework.http.ResponseEntity; 16 | import org.springframework.web.bind.annotation.*; 17 | 18 | @RestController 19 | @RequestMapping("/producer") 20 | @Tag(name = "producer", description = "Operations to produce messages to a Kafka Cluster") 21 | public class ProducerController { 22 | 23 | private static final Logger LOGGER = LoggerFactory.getLogger(ProducerController.class); 24 | 25 | private MessageService messageService; 26 | 27 | public ProducerController(MessageService messageService) { 28 | this.messageService = messageService; 29 | } 30 | 31 | @Operation(summary = "Send a message synchronously using the Kafka Client Producer API", tags = { "producer"}) 32 | @ApiResponses(value = { 33 | @ApiResponse( 34 | responseCode = "200", 35 | description = "Message sent", 36 | content = @Content(schema = @Schema(implementation = MessageDTO.class))), 37 | @ApiResponse(responseCode = "404", description = "Message not sent"), 38 | @ApiResponse(responseCode = "500", description = "Internal Server Error") 39 | }) 40 | @PostMapping(value = "/kafka/{topicName}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) 41 | public ResponseEntity sendToTopic( 42 | @Parameter(description = "Topic name", required = true) @PathVariable String topicName, 43 | @Parameter(description = "Message to send", required = true) @RequestBody MessageDTO messageDTO) { 44 | messageDTO = messageService.publishSync(topicName, messageDTO); 45 | 46 | LOGGER.debug("Published successfully message (200) into topic {}", topicName); 47 | 48 | return ResponseEntity.ok(messageDTO); 49 | } 50 | 51 | @Operation(summary = "Send a message asynchronously using the Kafka Client Producer API", tags = { "producer"}) 52 | @ApiResponses(value = { 53 | @ApiResponse( 54 | responseCode = "200", 55 | description = "Message sent", 56 | content = @Content(schema = @Schema(implementation = MessageDTO.class))), 57 | @ApiResponse(responseCode = "404", description = "Message not sent"), 58 | @ApiResponse(responseCode = "500", description = "Internal Server Error") 59 | }) 60 | @PostMapping(value = "/kafka/async/{topicName}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) 61 | public ResponseEntity sendToTopicAsync( 62 | @Parameter(description = "Topic name", required = true) @PathVariable String topicName, 63 | @Parameter(description = "Topic name", required = true) @RequestBody MessageDTO messageDTO) { 64 | messageDTO = messageService.publishAsync(topicName, messageDTO); 65 | 66 | LOGGER.debug("Published successfully async message (200) into topic {}", topicName); 67 | 68 | return ResponseEntity.ok(messageDTO); 69 | } 70 | 71 | @Operation(summary = "Send a message synchronously using the Spring Kafka KafkaTemplate API", tags = { "producer"}) 72 | @ApiResponses(value = { 73 | @ApiResponse( 74 | responseCode = "200", 75 | description = "Message sent", 76 | content = @Content(schema = @Schema(implementation = MessageDTO.class))), 77 | @ApiResponse(responseCode = "404", description = "Message not sent"), 78 | @ApiResponse(responseCode = "500", description = "Internal Server Error") 79 | }) 80 | @PostMapping(value = "/spring/{topicName}", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) 81 | public ResponseEntity sendToTopicBySpring( 82 | @Parameter(description = "Topic name", required = true) @PathVariable String topicName, 83 | @Parameter(description = "Message to send", required = true) @RequestBody MessageDTO messageDTO) { 84 | messageDTO = messageService.sendMessage(topicName, messageDTO); 85 | 86 | LOGGER.debug("Sent successfully message (200) into topic {}", topicName); 87 | 88 | return ResponseEntity.ok(messageDTO); 89 | } 90 | 91 | } 92 | -------------------------------------------------------------------------------- /mvnw.cmd: -------------------------------------------------------------------------------- 1 | @REM ---------------------------------------------------------------------------- 2 | @REM Licensed to the Apache Software Foundation (ASF) under one 3 | @REM or more contributor license agreements. See the NOTICE file 4 | @REM distributed with this work for additional information 5 | @REM regarding copyright ownership. The ASF licenses this file 6 | @REM to you under the Apache License, Version 2.0 (the 7 | @REM "License"); you may not use this file except in compliance 8 | @REM with the License. You may obtain a copy of the License at 9 | @REM 10 | @REM http://www.apache.org/licenses/LICENSE-2.0 11 | @REM 12 | @REM Unless required by applicable law or agreed to in writing, 13 | @REM software distributed under the License is distributed on an 14 | @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 | @REM KIND, either express or implied. See the License for the 16 | @REM specific language governing permissions and limitations 17 | @REM under the License. 18 | @REM ---------------------------------------------------------------------------- 19 | 20 | @REM ---------------------------------------------------------------------------- 21 | @REM Maven2 Start Up Batch script 22 | @REM 23 | @REM Required ENV vars: 24 | @REM JAVA_HOME - location of a JDK home dir 25 | @REM 26 | @REM Optional ENV vars 27 | @REM M2_HOME - location of maven2's installed home dir 28 | @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands 29 | @REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending 30 | @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven 31 | @REM e.g. to debug Maven itself, use 32 | @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 33 | @REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files 34 | @REM ---------------------------------------------------------------------------- 35 | 36 | @REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' 37 | @echo off 38 | @REM set title of command window 39 | title %0 40 | @REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' 41 | @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% 42 | 43 | @REM set %HOME% to equivalent of $HOME 44 | if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") 45 | 46 | @REM Execute a user defined script before this one 47 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre 48 | @REM check for pre script, once with legacy .bat ending and once with .cmd ending 49 | if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" 50 | if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" 51 | :skipRcPre 52 | 53 | @setlocal 54 | 55 | set ERROR_CODE=0 56 | 57 | @REM To isolate internal variables from possible post scripts, we use another setlocal 58 | @setlocal 59 | 60 | @REM ==== START VALIDATION ==== 61 | if not "%JAVA_HOME%" == "" goto OkJHome 62 | 63 | echo. 64 | echo Error: JAVA_HOME not found in your environment. >&2 65 | echo Please set the JAVA_HOME variable in your environment to match the >&2 66 | echo location of your Java installation. >&2 67 | echo. 68 | goto error 69 | 70 | :OkJHome 71 | if exist "%JAVA_HOME%\bin\java.exe" goto init 72 | 73 | echo. 74 | echo Error: JAVA_HOME is set to an invalid directory. >&2 75 | echo JAVA_HOME = "%JAVA_HOME%" >&2 76 | echo Please set the JAVA_HOME variable in your environment to match the >&2 77 | echo location of your Java installation. >&2 78 | echo. 79 | goto error 80 | 81 | @REM ==== END VALIDATION ==== 82 | 83 | :init 84 | 85 | @REM Find the project base dir, i.e. the directory that contains the folder ".mvn". 86 | @REM Fallback to current working directory if not found. 87 | 88 | set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% 89 | IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir 90 | 91 | set EXEC_DIR=%CD% 92 | set WDIR=%EXEC_DIR% 93 | :findBaseDir 94 | IF EXIST "%WDIR%"\.mvn goto baseDirFound 95 | cd .. 96 | IF "%WDIR%"=="%CD%" goto baseDirNotFound 97 | set WDIR=%CD% 98 | goto findBaseDir 99 | 100 | :baseDirFound 101 | set MAVEN_PROJECTBASEDIR=%WDIR% 102 | cd "%EXEC_DIR%" 103 | goto endDetectBaseDir 104 | 105 | :baseDirNotFound 106 | set MAVEN_PROJECTBASEDIR=%EXEC_DIR% 107 | cd "%EXEC_DIR%" 108 | 109 | :endDetectBaseDir 110 | 111 | IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig 112 | 113 | @setlocal EnableExtensions EnableDelayedExpansion 114 | for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a 115 | @endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% 116 | 117 | :endReadAdditionalConfig 118 | 119 | SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" 120 | set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" 121 | set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain 122 | 123 | set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" 124 | FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( 125 | IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B 126 | ) 127 | 128 | @REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central 129 | @REM This allows using the maven wrapper in projects that prohibit checking in binary data. 130 | if exist %WRAPPER_JAR% ( 131 | echo Found %WRAPPER_JAR% 132 | ) else ( 133 | echo Couldn't find %WRAPPER_JAR%, downloading it ... 134 | echo Downloading from: %DOWNLOAD_URL% 135 | powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" 136 | echo Finished downloading %WRAPPER_JAR% 137 | ) 138 | @REM End of extension 139 | 140 | %MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* 141 | if ERRORLEVEL 1 goto error 142 | goto end 143 | 144 | :error 145 | set ERROR_CODE=1 146 | 147 | :end 148 | @endlocal & set ERROR_CODE=%ERROR_CODE% 149 | 150 | if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost 151 | @REM check for post script, once with legacy .bat ending and once with .cmd ending 152 | if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" 153 | if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" 154 | :skipRcPost 155 | 156 | @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' 157 | if "%MAVEN_BATCH_PAUSE%" == "on" pause 158 | 159 | if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% 160 | 161 | exit /B %ERROR_CODE% 162 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 4.0.0 3 | 4 | 5 | org.springframework.boot 6 | spring-boot-starter-parent 7 | 2.3.2.RELEASE 8 | 9 | 10 | 11 | com.rmarting.kafka 12 | kafka-clients-sb-sample 13 | 2.7.0-SNAPSHOT 14 | jar 15 | 16 | 17 | UTF-8 18 | 11 19 | 20 | 2.7.0 21 | 22 | 1.4.3 23 | 24 | 1.10.2 25 | 26 | 2.0.1.Final 27 | 28 | http://service-registry.amq-streams-demo.apps-crc.testing/apis/registry/v2 29 | 30 | NodePort 31 | 32 | 33 | 34 | 35 | org.springframework.boot 36 | spring-boot-starter-web 37 | 38 | 39 | org.springframework.boot 40 | spring-boot-starter-actuator 41 | 42 | 43 | 44 | 45 | org.springdoc 46 | springdoc-openapi-ui 47 | ${springdoc-openapi-ui.version} 48 | 49 | 50 | 51 | 52 | org.springframework.kafka 53 | spring-kafka 54 | 55 | 56 | 57 | 58 | org.apache.kafka 59 | kafka-clients 60 | ${kafka-clients.version} 61 | 62 | 63 | 64 | 65 | org.apache.avro 66 | avro 67 | ${avro.version} 68 | 69 | 70 | 71 | 72 | io.apicurio 73 | apicurio-registry-serdes-avro-serde 74 | ${apicurio.version} 75 | 76 | 77 | 78 | 79 | org.springframework.cloud 80 | spring-cloud-starter-kubernetes-config 81 | 1.1.7.RELEASE 82 | 83 | 84 | 85 | 86 | org.springframework.boot 87 | spring-boot-starter-test 88 | test 89 | 90 | 91 | org.junit.vintage 92 | junit-vintage-engine 93 | 94 | 95 | 96 | 97 | org.springframework.kafka 98 | spring-kafka-test 99 | test 100 | 101 | 102 | 103 | 104 | kafka-clients-sb-sample 105 | 106 | 107 | 108 | org.springframework.boot 109 | spring-boot-maven-plugin 110 | 111 | 112 | 113 | 114 | org.apache.avro 115 | avro-maven-plugin 116 | ${avro.version} 117 | 118 | 119 | 120 | generate-sources 121 | 122 | schema 123 | 124 | 125 | 126 | ${project.basedir}/src/main/resources/schemas 127 | 128 | 129 | **/*.avsc 130 | 131 | ${project.build.directory}/generated-sources/schemas 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | org.codehaus.mojo 140 | build-helper-maven-plugin 141 | 3.1.0 142 | 143 | 144 | 145 | generate-sources 146 | 147 | add-source 148 | 149 | 150 | 151 | 152 | ${project.build.directory}/generated-sources/schemas 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | apicurio 164 | 165 | 166 | 167 | io.apicurio 168 | apicurio-registry-maven-plugin 169 | ${apicurio.version} 170 | 171 | 172 | generate-sources 173 | 174 | register 175 | 176 | 177 | ${apicurio.registry.url} 178 | 179 | 180 | 181 | default 182 | messages 183 | AVRO 184 | 185 | ${project.basedir}/src/main/resources/schemas/message.avsc 186 | 187 | RETURN_OR_UPDATE 188 | true 189 | 190 | 191 | 192 | 193 | default 194 | messages-value 195 | AVRO 196 | 197 | ${project.basedir}/src/main/resources/schemas/message.avsc 198 | 199 | RETURN_OR_UPDATE 200 | true 201 | 202 | 203 | 204 | 205 | com.rmarting.kafka.schema.avro 206 | Message 207 | AVRO 208 | 209 | ${project.basedir}/src/main/resources/schemas/message.avsc 210 | 211 | RETURN_OR_UPDATE 212 | true 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | openshift 225 | 226 | 227 | 228 | 229 | org.eclipse.jkube 230 | openshift-maven-plugin 231 | 1.3.0 232 | 233 | 234 | 235 | 236 | 237 | kubernetes 238 | 239 | 240 | 241 | org.eclipse.jkube 242 | kubernetes-maven-plugin 243 | 1.3.0 244 | 245 | 246 | 247 | 248 | 249 | 250 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/service/MessageService.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka.service; 2 | 3 | import com.rmarting.kafka.dto.MessageDTO; 4 | import com.rmarting.kafka.dto.MessageListDTO; 5 | import com.rmarting.kafka.schema.avro.Message; 6 | import org.apache.kafka.clients.consumer.Consumer; 7 | import org.apache.kafka.clients.consumer.ConsumerRecords; 8 | import org.apache.kafka.clients.producer.Producer; 9 | import org.apache.kafka.clients.producer.ProducerRecord; 10 | import org.apache.kafka.clients.producer.RecordMetadata; 11 | import org.apache.kafka.common.TopicPartition; 12 | import org.slf4j.Logger; 13 | import org.slf4j.LoggerFactory; 14 | import org.springframework.beans.factory.ObjectFactory; 15 | import org.springframework.beans.factory.annotation.Value; 16 | import org.springframework.kafka.core.KafkaTemplate; 17 | import org.springframework.kafka.support.SendResult; 18 | import org.springframework.stereotype.Service; 19 | 20 | import javax.validation.constraints.NotEmpty; 21 | import javax.validation.constraints.NotNull; 22 | import java.time.Duration; 23 | import java.util.Collections; 24 | import java.util.concurrent.ExecutionException; 25 | 26 | /** 27 | * Service for Kafka service which includes a set of primitives to manage events and topics such as: 28 | * 1. consume events from topic 29 | * 2. send event to topic 30 | * 3. subscribe topic 31 | *

32 | * Additionally, it only processes messages of type {@link Message} 33 | * 34 | * @author rmarting 35 | */ 36 | @Service 37 | public class MessageService { 38 | 39 | private static final Logger LOGGER = LoggerFactory.getLogger(MessageService.class); 40 | 41 | private ObjectFactory> producer; 42 | 43 | private ObjectFactory> consumer; 44 | 45 | private KafkaTemplate kafkaTemplate; 46 | 47 | @Value("${consumer.poolTimeout}") 48 | private Long poolTimeout; 49 | 50 | public MessageService(ObjectFactory> producer, 51 | ObjectFactory> consumer, 52 | KafkaTemplate kafkaTemplate) { 53 | this.consumer = consumer; 54 | this.producer = producer; 55 | this.kafkaTemplate = kafkaTemplate; 56 | } 57 | 58 | public MessageDTO publishSync(final @NotEmpty String topicName, final @NotNull MessageDTO messageDTO) { 59 | return publishRawMessage(topicName, messageDTO, false); 60 | } 61 | 62 | public MessageDTO publishAsync(final @NotEmpty String topicName, final @NotNull MessageDTO messageDTO) { 63 | return publishRawMessage(topicName, messageDTO, true); 64 | } 65 | 66 | private MessageDTO publishRawMessage(final @NotEmpty String topicName, 67 | final @NotNull MessageDTO messageDTO, 68 | final boolean async) { 69 | // Message to send 70 | // TODO Create a Mapper 71 | Message message = new Message(); 72 | message.setContent(messageDTO.getContent()); 73 | message.setTimestamp(System.currentTimeMillis()); 74 | 75 | // Record with a CustomMessage as value 76 | ProducerRecord record = null; 77 | 78 | if (null == messageDTO.getKey()) { 79 | // Value as CustomMessage 80 | record = new ProducerRecord<>(topicName, message); 81 | } else { 82 | // Value as CustomMessage 83 | record = new ProducerRecord<>(topicName, messageDTO.getKey(), message); 84 | } 85 | 86 | // Local instance (prototype) 87 | Producer localProducer = producer.getObject(); 88 | 89 | try { 90 | if (async) { 91 | localProducer.send(record, (metadata, exception) -> { 92 | LOGGER.info("Record ASYNCHRONOUSLY sent to partition {} with offset {}", 93 | metadata.partition(), metadata.offset()); 94 | 95 | // Update model 96 | messageDTO.setPartition(metadata.partition()); 97 | messageDTO.setOffset(metadata.offset()); 98 | }).get(); 99 | } else { 100 | RecordMetadata metadata = localProducer.send(record).get(); 101 | 102 | LOGGER.info("Record sent to partition {} with offset {}", metadata.partition(), metadata.offset()); 103 | 104 | // Update model 105 | messageDTO.setPartition(metadata.partition()); 106 | messageDTO.setOffset(metadata.offset()); 107 | } 108 | } catch (ExecutionException e) { 109 | LOGGER.warn("Execution Error in sending record", e); 110 | } catch (InterruptedException e) { 111 | LOGGER.warn("Interrupted Error in sending record", e); 112 | } finally { 113 | localProducer.flush(); 114 | localProducer.close(); 115 | } 116 | 117 | return messageDTO; 118 | } 119 | 120 | public MessageDTO sendMessage(final @NotEmpty String topicName, 121 | final @NotNull MessageDTO messageDTO) { 122 | // Message to send 123 | // TODO Create a Mapper 124 | Message message = new Message(); 125 | message.setContent(messageDTO.getContent()); 126 | message.setTimestamp(System.currentTimeMillis()); 127 | 128 | SendResult record = null; 129 | 130 | try { 131 | if (null == messageDTO.getKey()) { 132 | // Value as CustomMessage 133 | record = kafkaTemplate.send(topicName, message).get(); 134 | } else { 135 | // Value as CustomMessage 136 | record = kafkaTemplate.send(topicName, messageDTO.getKey(), message).get(); 137 | } 138 | 139 | LOGGER.info("Record sent to partition {} with offset {}", 140 | record.getRecordMetadata().partition(), record.getRecordMetadata().offset()); 141 | 142 | // Update model 143 | messageDTO.setPartition(record.getRecordMetadata().partition()); 144 | messageDTO.setOffset(record.getRecordMetadata().offset()); 145 | } catch (ExecutionException e) { 146 | LOGGER.warn("Execution Error in sending record", e); 147 | } catch (InterruptedException e) { 148 | LOGGER.warn("Interrupted Error in sending record", e); 149 | } 150 | 151 | return messageDTO; 152 | } 153 | 154 | public MessageListDTO pollEvents(final @NotEmpty String topicName, final Integer partition, final boolean commit) { 155 | // Response objects 156 | MessageListDTO messageListDTO = new MessageListDTO(); 157 | 158 | // Local instance (prototype) 159 | Consumer localConsumer = consumer.getObject(); 160 | 161 | try { 162 | // Assign to partition defined 163 | if (null != partition) { 164 | TopicPartition topicPartition = new TopicPartition(topicName, partition); 165 | localConsumer.assign(Collections.singletonList(topicPartition)); 166 | 167 | LOGGER.info("Consumer assigned to topic {} and partition {}", topicName, partition); 168 | } else { 169 | // Subscribe to Topic 170 | localConsumer.subscribe(Collections.singletonList(topicName)); 171 | 172 | LOGGER.info("Consumer subscribed to topic {}", topicName); 173 | } 174 | 175 | LOGGER.info("Polling records from topic {}", topicName); 176 | 177 | ConsumerRecords consumerRecords = localConsumer.poll(Duration.ofSeconds(poolTimeout)); 178 | 179 | LOGGER.info("Polled #{} records from topic {}", consumerRecords.count(), topicName); 180 | 181 | consumerRecords.forEach(record -> { 182 | MessageDTO messageDTO = new MessageDTO(); 183 | // TODO Create a Mapper 184 | messageDTO.setTimestamp((Long) record.value().get("timestamp")); 185 | messageDTO.setContent(record.value().get("content").toString()); 186 | // Record Metadata 187 | messageDTO.setKey((null != record.key() ? record.key() : null)); 188 | messageDTO.setPartition(record.partition()); 189 | messageDTO.setOffset(record.offset()); 190 | 191 | messageListDTO.addCustomMessage(messageDTO); 192 | }); 193 | 194 | // Commit consumption 195 | if (commit) { 196 | localConsumer.commitAsync(); 197 | 198 | LOGGER.info("Records committed in topic {} from consumer", topicName); 199 | } 200 | } finally { 201 | localConsumer.close(); 202 | } 203 | 204 | return messageListDTO; 205 | } 206 | 207 | } 208 | -------------------------------------------------------------------------------- /src/main/java/com/rmarting/kafka/config/KafkaConfig.java: -------------------------------------------------------------------------------- 1 | package com.rmarting.kafka.config; 2 | 3 | import com.rmarting.kafka.schema.avro.Message; 4 | 5 | import io.apicurio.registry.serde.AbstractKafkaSerDe; 6 | import io.apicurio.registry.serde.AbstractKafkaSerializer; 7 | import io.apicurio.registry.serde.SerdeConfig; 8 | import io.apicurio.registry.serde.avro.AvroDatumProvider; 9 | import io.apicurio.registry.serde.avro.AvroKafkaDeserializer; 10 | import io.apicurio.registry.serde.avro.AvroKafkaSerdeConfig; 11 | import io.apicurio.registry.serde.avro.AvroKafkaSerializer; 12 | import io.apicurio.registry.serde.strategy.TopicIdStrategy; 13 | import org.apache.kafka.clients.admin.AdminClientConfig; 14 | import org.apache.kafka.clients.consumer.Consumer; 15 | import org.apache.kafka.clients.consumer.ConsumerConfig; 16 | import org.apache.kafka.clients.consumer.KafkaConsumer; 17 | import org.apache.kafka.clients.producer.KafkaProducer; 18 | import org.apache.kafka.clients.producer.Producer; 19 | import org.apache.kafka.clients.producer.ProducerConfig; 20 | import org.apache.kafka.common.config.SaslConfigs; 21 | import org.apache.kafka.common.serialization.StringDeserializer; 22 | import org.apache.kafka.common.serialization.StringSerializer; 23 | import org.springframework.beans.factory.annotation.Value; 24 | import org.springframework.beans.factory.config.ConfigurableBeanFactory; 25 | import org.springframework.boot.autoconfigure.kafka.KafkaProperties; 26 | import org.springframework.context.annotation.Bean; 27 | import org.springframework.context.annotation.Configuration; 28 | import org.springframework.context.annotation.Scope; 29 | import org.springframework.kafka.core.DefaultKafkaProducerFactory; 30 | import org.springframework.kafka.core.KafkaTemplate; 31 | import org.springframework.kafka.core.ProducerFactory; 32 | 33 | import java.net.InetAddress; 34 | import java.net.UnknownHostException; 35 | import java.util.Map; 36 | import java.util.Properties; 37 | 38 | @Configuration 39 | public class KafkaConfig { 40 | 41 | @Value("${kafka.bootstrap-servers:localhost:8080}") 42 | private String kafkaBrokers; 43 | 44 | @Value("${kafka.user.name}") 45 | private String kafkaUser; 46 | 47 | @Value("${kafka.user.password}") 48 | private String kafkaPassword; 49 | 50 | @Value("${kafka.security.protocol}") 51 | private String kafkaSecurityProtocol; 52 | 53 | @Value("${producer.clienId:kafka-client-sb-producer-client}") 54 | private String producerClientId; 55 | 56 | @Value("${producer.acks:1}") 57 | private String acks; 58 | 59 | @Value("${consumer.groupId:kafka-client-sb-consumer}") 60 | private String consumerGroupId; 61 | 62 | @Value("${consumer.clientId:kafka-client-sb-consumer-client}") 63 | private String consumerClientId; 64 | 65 | @Value("${consumer.maxPoolRecords:1000}") 66 | private String maxPoolRecords; 67 | 68 | @Value("${consumer.offsetReset:earliest}") 69 | private String offsetReset; 70 | 71 | @Value("${consumer.autoCommit:false}") 72 | private String autoCommit; 73 | 74 | @Value("${apicurio.registry.url:http://localhost:8080/api}") 75 | private String serviceRegistryUrl; 76 | 77 | private String getHostname() { 78 | try { 79 | return InetAddress.getLocalHost().getHostName(); 80 | } catch (UnknownHostException e) { 81 | return "UnknownHost"; 82 | } 83 | } 84 | 85 | @Bean 86 | @Scope(scopeName = ConfigurableBeanFactory.SCOPE_PROTOTYPE) 87 | public Producer createProducer() { 88 | Properties props = new Properties(); 89 | 90 | // Kafka Bootstrap 91 | props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBrokers); 92 | 93 | // Security 94 | props.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, kafkaSecurityProtocol); 95 | props.put(SaslConfigs.SASL_MECHANISM, "SCRAM-SHA-512"); 96 | props.put(SaslConfigs.SASL_JAAS_CONFIG, 97 | "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"" + kafkaUser 98 | + "\" password=\"" + kafkaPassword + "\";"); 99 | 100 | // Producer Client 101 | props.putIfAbsent(ProducerConfig.CLIENT_ID_CONFIG, producerClientId + "-" + getHostname()); 102 | 103 | // Serializers for Keys and Values 104 | props.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); 105 | props.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, AvroKafkaSerializer.class.getName()); 106 | 107 | // Service Registry 108 | props.putIfAbsent(SerdeConfig.REGISTRY_URL, serviceRegistryUrl); 109 | // Artifact Id Strategies (implementations of ArtifactIdStrategy) 110 | // Simple Topic Id Strategy (schema = topicName) 111 | //props.putIfAbsent(SerdeConfig.ARTIFACT_RESOLVER_STRATEGY, SimpleTopicIdStrategy.class.getName()); 112 | // Topic Id Strategy (schema = topicName-(key|value)) - Default Strategy 113 | props.putIfAbsent(SerdeConfig.ARTIFACT_RESOLVER_STRATEGY, TopicIdStrategy.class.getName()); 114 | // Record Id Strategy (schema = full name of the schema (namespace.name)) 115 | //props.putIfAbsent(SerdeConfig.ARTIFACT_RESOLVER_STRATEGY, RecordIdStrategy.class.getName()); 116 | // Topic Record Id Strategy (schema = topic name and the full name of the schema (topicName-namespace.name) 117 | //props.putIfAbsent(SerdeConfig.ARTIFACT_RESOLVER_STRATEGY, TopicRecordIdStrategy.class.getName()); 118 | 119 | // Global Id Strategies (implementations of GlobalIdStrategy) 120 | //props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class.getName()); 121 | //props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindBySchemaIdStrategy.class.getName()); 122 | //props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, GetOrCreateIdStrategy.class.getName()); 123 | //props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, AutoRegisterIdStrategy.class.getName()); 124 | 125 | // Acknowledgement 126 | props.putIfAbsent(ProducerConfig.ACKS_CONFIG, acks); 127 | 128 | return new KafkaProducer<>(props); 129 | } 130 | 131 | @Bean 132 | @Scope(scopeName = ConfigurableBeanFactory.SCOPE_PROTOTYPE) 133 | public Consumer createConsumer() { 134 | Properties props = new Properties(); 135 | 136 | // Kafka Bootstrap 137 | props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBrokers); 138 | 139 | // Security 140 | props.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, kafkaSecurityProtocol); 141 | props.put(SaslConfigs.SASL_MECHANISM, "SCRAM-SHA-512"); 142 | props.put(SaslConfigs.SASL_JAAS_CONFIG, 143 | "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"" + kafkaUser 144 | + "\" password=\"" + kafkaPassword + "\";"); 145 | 146 | /* 147 | * With group id, kafka broker ensures that the same message is not consumed more then once by a 148 | * consumer group meaning a message can be only consumed by any one member a consumer group. 149 | * 150 | * Consumer groups is also a way of supporting parallel consumption of the data i.e. different consumers of 151 | * the same consumer group consume data in parallel from different partitions. 152 | */ 153 | props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId); 154 | 155 | /* 156 | * In addition to group.id, each consumer also identifies itself to the Kafka broker using consumer.id. 157 | * This is used by Kafka to identify the currently ACTIVE consumers of a particular consumer group. 158 | */ 159 | props.put(ConsumerConfig.CLIENT_ID_CONFIG, consumerClientId + "-" + getHostname()); 160 | 161 | // Deserializers for Keys and Values 162 | props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); 163 | props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, AvroKafkaDeserializer.class.getName()); 164 | 165 | // Pool size 166 | props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPoolRecords); 167 | 168 | /* 169 | * If true the consumer's offset will be periodically committed in the background. 170 | * Disabled to allow commit or not under some circumstances 171 | */ 172 | props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit); 173 | 174 | /* 175 | * What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the 176 | * server: 177 | * earliest: automatically reset the offset to the earliest offset 178 | * latest: automatically reset the offset to the latest offset 179 | */ 180 | props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offsetReset); 181 | 182 | // Service Registry Integration 183 | props.put(SerdeConfig.REGISTRY_URL, serviceRegistryUrl); 184 | // Use Specific Avro classes instead of the GenericRecord class definition 185 | props.put(AvroKafkaSerdeConfig.USE_SPECIFIC_AVRO_READER, true); 186 | 187 | return new KafkaConsumer<>(props); 188 | } 189 | 190 | @Bean 191 | public ProducerFactory producerFactory(KafkaProperties kafkaProperties) { 192 | Map configProps = kafkaProperties.buildProducerProperties(); 193 | 194 | return new DefaultKafkaProducerFactory<>(configProps); 195 | } 196 | 197 | @Bean 198 | public KafkaTemplate kafkaTemplate(KafkaProperties kafkaProperties) { 199 | return new KafkaTemplate<>(producerFactory(kafkaProperties)); 200 | } 201 | 202 | } 203 | -------------------------------------------------------------------------------- /mvnw: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # ---------------------------------------------------------------------------- 3 | # Licensed to the Apache Software Foundation (ASF) under one 4 | # or more contributor license agreements. See the NOTICE file 5 | # distributed with this work for additional information 6 | # regarding copyright ownership. The ASF licenses this file 7 | # to you under the Apache License, Version 2.0 (the 8 | # "License"); you may not use this file except in compliance 9 | # with the License. You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, 14 | # software distributed under the License is distributed on an 15 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 | # KIND, either express or implied. See the License for the 17 | # specific language governing permissions and limitations 18 | # under the License. 19 | # ---------------------------------------------------------------------------- 20 | 21 | # ---------------------------------------------------------------------------- 22 | # Maven2 Start Up Batch script 23 | # 24 | # Required ENV vars: 25 | # ------------------ 26 | # JAVA_HOME - location of a JDK home dir 27 | # 28 | # Optional ENV vars 29 | # ----------------- 30 | # M2_HOME - location of maven2's installed home dir 31 | # MAVEN_OPTS - parameters passed to the Java VM when running Maven 32 | # e.g. to debug Maven itself, use 33 | # set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 34 | # MAVEN_SKIP_RC - flag to disable loading of mavenrc files 35 | # ---------------------------------------------------------------------------- 36 | 37 | if [ -z "$MAVEN_SKIP_RC" ] ; then 38 | 39 | if [ -f /etc/mavenrc ] ; then 40 | . /etc/mavenrc 41 | fi 42 | 43 | if [ -f "$HOME/.mavenrc" ] ; then 44 | . "$HOME/.mavenrc" 45 | fi 46 | 47 | fi 48 | 49 | # OS specific support. $var _must_ be set to either true or false. 50 | cygwin=false; 51 | darwin=false; 52 | mingw=false 53 | case "`uname`" in 54 | CYGWIN*) cygwin=true ;; 55 | MINGW*) mingw=true;; 56 | Darwin*) darwin=true 57 | # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home 58 | # See https://developer.apple.com/library/mac/qa/qa1170/_index.html 59 | if [ -z "$JAVA_HOME" ]; then 60 | if [ -x "/usr/libexec/java_home" ]; then 61 | export JAVA_HOME="`/usr/libexec/java_home`" 62 | else 63 | export JAVA_HOME="/Library/Java/Home" 64 | fi 65 | fi 66 | ;; 67 | esac 68 | 69 | if [ -z "$JAVA_HOME" ] ; then 70 | if [ -r /etc/gentoo-release ] ; then 71 | JAVA_HOME=`java-config --jre-home` 72 | fi 73 | fi 74 | 75 | if [ -z "$M2_HOME" ] ; then 76 | ## resolve links - $0 may be a link to maven's home 77 | PRG="$0" 78 | 79 | # need this for relative symlinks 80 | while [ -h "$PRG" ] ; do 81 | ls=`ls -ld "$PRG"` 82 | link=`expr "$ls" : '.*-> \(.*\)$'` 83 | if expr "$link" : '/.*' > /dev/null; then 84 | PRG="$link" 85 | else 86 | PRG="`dirname "$PRG"`/$link" 87 | fi 88 | done 89 | 90 | saveddir=`pwd` 91 | 92 | M2_HOME=`dirname "$PRG"`/.. 93 | 94 | # make it fully qualified 95 | M2_HOME=`cd "$M2_HOME" && pwd` 96 | 97 | cd "$saveddir" 98 | # echo Using m2 at $M2_HOME 99 | fi 100 | 101 | # For Cygwin, ensure paths are in UNIX format before anything is touched 102 | if $cygwin ; then 103 | [ -n "$M2_HOME" ] && 104 | M2_HOME=`cygpath --unix "$M2_HOME"` 105 | [ -n "$JAVA_HOME" ] && 106 | JAVA_HOME=`cygpath --unix "$JAVA_HOME"` 107 | [ -n "$CLASSPATH" ] && 108 | CLASSPATH=`cygpath --path --unix "$CLASSPATH"` 109 | fi 110 | 111 | # For Mingw, ensure paths are in UNIX format before anything is touched 112 | if $mingw ; then 113 | [ -n "$M2_HOME" ] && 114 | M2_HOME="`(cd "$M2_HOME"; pwd)`" 115 | [ -n "$JAVA_HOME" ] && 116 | JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" 117 | # TODO classpath? 118 | fi 119 | 120 | if [ -z "$JAVA_HOME" ]; then 121 | javaExecutable="`which javac`" 122 | if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then 123 | # readlink(1) is not available as standard on Solaris 10. 124 | readLink=`which readlink` 125 | if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then 126 | if $darwin ; then 127 | javaHome="`dirname \"$javaExecutable\"`" 128 | javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" 129 | else 130 | javaExecutable="`readlink -f \"$javaExecutable\"`" 131 | fi 132 | javaHome="`dirname \"$javaExecutable\"`" 133 | javaHome=`expr "$javaHome" : '\(.*\)/bin'` 134 | JAVA_HOME="$javaHome" 135 | export JAVA_HOME 136 | fi 137 | fi 138 | fi 139 | 140 | if [ -z "$JAVACMD" ] ; then 141 | if [ -n "$JAVA_HOME" ] ; then 142 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 143 | # IBM's JDK on AIX uses strange locations for the executables 144 | JAVACMD="$JAVA_HOME/jre/sh/java" 145 | else 146 | JAVACMD="$JAVA_HOME/bin/java" 147 | fi 148 | else 149 | JAVACMD="`which java`" 150 | fi 151 | fi 152 | 153 | if [ ! -x "$JAVACMD" ] ; then 154 | echo "Error: JAVA_HOME is not defined correctly." >&2 155 | echo " We cannot execute $JAVACMD" >&2 156 | exit 1 157 | fi 158 | 159 | if [ -z "$JAVA_HOME" ] ; then 160 | echo "Warning: JAVA_HOME environment variable is not set." 161 | fi 162 | 163 | CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher 164 | 165 | # traverses directory structure from process work directory to filesystem root 166 | # first directory with .mvn subdirectory is considered project base directory 167 | find_maven_basedir() { 168 | 169 | if [ -z "$1" ] 170 | then 171 | echo "Path not specified to find_maven_basedir" 172 | return 1 173 | fi 174 | 175 | basedir="$1" 176 | wdir="$1" 177 | while [ "$wdir" != '/' ] ; do 178 | if [ -d "$wdir"/.mvn ] ; then 179 | basedir=$wdir 180 | break 181 | fi 182 | # workaround for JBEAP-8937 (on Solaris 10/Sparc) 183 | if [ -d "${wdir}" ]; then 184 | wdir=`cd "$wdir/.."; pwd` 185 | fi 186 | # end of workaround 187 | done 188 | echo "${basedir}" 189 | } 190 | 191 | # concatenates all lines of a file 192 | concat_lines() { 193 | if [ -f "$1" ]; then 194 | echo "$(tr -s '\n' ' ' < "$1")" 195 | fi 196 | } 197 | 198 | BASE_DIR=`find_maven_basedir "$(pwd)"` 199 | if [ -z "$BASE_DIR" ]; then 200 | exit 1; 201 | fi 202 | 203 | ########################################################################################## 204 | # Extension to allow automatically downloading the maven-wrapper.jar from Maven-central 205 | # This allows using the maven wrapper in projects that prohibit checking in binary data. 206 | ########################################################################################## 207 | if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then 208 | if [ "$MVNW_VERBOSE" = true ]; then 209 | echo "Found .mvn/wrapper/maven-wrapper.jar" 210 | fi 211 | else 212 | if [ "$MVNW_VERBOSE" = true ]; then 213 | echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." 214 | fi 215 | jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" 216 | while IFS="=" read key value; do 217 | case "$key" in (wrapperUrl) jarUrl="$value"; break ;; 218 | esac 219 | done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" 220 | if [ "$MVNW_VERBOSE" = true ]; then 221 | echo "Downloading from: $jarUrl" 222 | fi 223 | wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" 224 | 225 | if command -v wget > /dev/null; then 226 | if [ "$MVNW_VERBOSE" = true ]; then 227 | echo "Found wget ... using wget" 228 | fi 229 | wget "$jarUrl" -O "$wrapperJarPath" 230 | elif command -v curl > /dev/null; then 231 | if [ "$MVNW_VERBOSE" = true ]; then 232 | echo "Found curl ... using curl" 233 | fi 234 | curl -o "$wrapperJarPath" "$jarUrl" 235 | else 236 | if [ "$MVNW_VERBOSE" = true ]; then 237 | echo "Falling back to using Java to download" 238 | fi 239 | javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" 240 | if [ -e "$javaClass" ]; then 241 | if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then 242 | if [ "$MVNW_VERBOSE" = true ]; then 243 | echo " - Compiling MavenWrapperDownloader.java ..." 244 | fi 245 | # Compiling the Java class 246 | ("$JAVA_HOME/bin/javac" "$javaClass") 247 | fi 248 | if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then 249 | # Running the downloader 250 | if [ "$MVNW_VERBOSE" = true ]; then 251 | echo " - Running MavenWrapperDownloader.java ..." 252 | fi 253 | ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") 254 | fi 255 | fi 256 | fi 257 | fi 258 | ########################################################################################## 259 | # End of extension 260 | ########################################################################################## 261 | 262 | export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} 263 | if [ "$MVNW_VERBOSE" = true ]; then 264 | echo $MAVEN_PROJECTBASEDIR 265 | fi 266 | MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" 267 | 268 | # For Cygwin, switch paths to Windows format before running java 269 | if $cygwin; then 270 | [ -n "$M2_HOME" ] && 271 | M2_HOME=`cygpath --path --windows "$M2_HOME"` 272 | [ -n "$JAVA_HOME" ] && 273 | JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` 274 | [ -n "$CLASSPATH" ] && 275 | CLASSPATH=`cygpath --path --windows "$CLASSPATH"` 276 | [ -n "$MAVEN_PROJECTBASEDIR" ] && 277 | MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` 278 | fi 279 | 280 | WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain 281 | 282 | exec "$JAVACMD" \ 283 | $MAVEN_OPTS \ 284 | -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ 285 | "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ 286 | ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" 287 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kafka Clients on Spring Boot Sample 2 | 3 | ⚠️ **No more longer under development** ⚠️ 4 | 5 | This repo is no more longer under developer as it was migrated and refactored to Quarkus. 6 | 7 | My comments about how was that migration are available in my [blog post](https://blog.jromanmartin.io/2021/12/03/lessons-learned-migrating-spring-boot-quarkus.html). 8 | 9 | The new repo is avaible [here](https://github.com/rmarting/kafka-clients-quarkus-sample). 10 | 11 | ⚠️ **No more longer under development** ⚠️ 12 | 13 | This sample project demonstrates how to use [Kafka Clients](https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients) 14 | and [Spring Kafka](https://spring.io/projects/spring-kafka) on Spring Boot to send and consume messages from an 15 | [Apache Kafka](https://kafka.apache.org/) cluster. The Apache Kafka cluster is operated by [Strimzi](https://strimzi.io/) 16 | operator deployed on a Kubernetes or OpenShift Platform. These messages will be validated by a Schema Registry or Service Registry 17 | operated by [Apicurio](https://www.apicur.io/registry/docs/apicurio-registry/index.html#intro-to-the-registry) operator. 18 | 19 | Apache Kafka is an open-sourced distributed event streaming platform for high-performance data pipelines, 20 | streaming analytics, data integration, and mission-critical applications. 21 | 22 | Service Registry is a datastore for sharing standard event schemas and API designs across API and event-driven architectures. 23 | You can use Service Registry to decouple the structure of your data from your client applications, and to share and 24 | manage your data types and API descriptions at runtime using a REST interface. 25 | 26 | The example includes a simple REST API with the following operations: 27 | 28 | * Send messages to a Topic 29 | * Consume messages from a Topic 30 | 31 | To deploy this application into a Kubernetes/OpenShift environment, we use the amazing [JKube](https://www.eclipse.org/jkube/). 32 | 33 | ## Environment 34 | 35 | This project requires a Kubernetes or OpenShift platform available. If you do not have one, you could use 36 | one of the following resources to deploy locally a Kubernetes or OpenShift Cluster: 37 | 38 | * [Red Hat CodeReady Containers - OpenShift 4 on your Laptop](https://github.com/code-ready/crc) 39 | * [Minikube - Running Kubernetes Locally](https://kubernetes.io/docs/setup/minikube/) 40 | 41 | This repo was tested with the following latest versions of Red Hat CodeReady Containers and Minikube: 42 | 43 | ```shell 44 | ❯ crc version 45 | CodeReady Containers version: 1.28.0+08de64bd 46 | OpenShift version: 4.7.13 (embedded in executable) 47 | ❯ minikube version 48 | minikube version: v1.21.0 49 | commit: 76d74191d82c47883dc7e1319ef7cebd3e00ee11 50 | ``` 51 | 52 | > Note: Whatever the platform you are using (Kubernetes or OpenShift), you could use the 53 | > Kubernetes CLI (```kubectl```) or OpenShift CLI (```oc```) to execute the commands described in this repo. 54 | > To reduce the length of this document, the commands displayed will use the Kubernetes CLI. When a specific 55 | > command is only valid for Kubernetes or OpenShift it will be identified. 56 | 57 | To deploy the resources, we will create a new ```amq-streams-demo``` namespace in the cluster in the case of Kubernetes: 58 | 59 | ```shell 60 | ❯ kubectl create namespace amq-streams-demo 61 | ``` 62 | 63 | If you are using OpenShift, then we will create a project: 64 | 65 | ```shell 66 | ❯ oc new-project amq-streams-demo 67 | ``` 68 | 69 | > Note: All the commands should be executed in this namespace. You could permanently save the namespace for 70 | > all subsequent ```kubectl``` commands in that context. 71 | > 72 | > In Kubernetes: 73 | > 74 | > ```shell 75 | > ❯ kubectl config set-context --current --namespace=amq-streams-demo 76 | > ``` 77 | > 78 | > In OpenShift: 79 | > 80 | > ```shell 81 | > ❯ oc project amq-streams-demo 82 | > ``` 83 | 84 | ### Start Red Hat CodeReady Containers 85 | 86 | To start up your local OpenShift 4 cluster: 87 | 88 | ```shell 89 | ❯ crc setup 90 | ❯ crc start -p /PATH/TO/your-pull-secret-file.json 91 | ``` 92 | 93 | You could promote `developer` user as `cluster-admin` with the following command: 94 | 95 | ```shell 96 | ❯ oc adm policy add-cluster-role-to-user cluster-admin developer 97 | clusterrole.rbac.authorization.k8s.io/cluster-admin added: "developer" 98 | ``` 99 | 100 | ### Start Minikube 101 | 102 | To start up your local Kubernetes cluster: 103 | 104 | ```shell 105 | ❯ minikube start 106 | ❯ minikube addons enable ingress 107 | ❯ minikube addons enable registry 108 | ``` 109 | 110 | To install the OLM v0.18.2 in Kubernetes, execute the following commands: 111 | 112 | ```shell 113 | ❯ kubectl apply -f "https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.18.2/crds.yaml" 114 | ❯ kubectl apply -f "https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.18.2/olm.yaml" 115 | ``` 116 | 117 | > Note: There is an addon in minikube to install OLM, however at the moment of writing this 118 | > repo, the latest version available not include the latest version of the operators. 119 | 120 | ### Deploying Strimzi and Apicurio Operators 121 | 122 | > **NOTE**: Only *cluster-admin* users could deploy Kubernetes Operators. This section must 123 | > be executed with one of them. 124 | 125 | To deploy the Strimzi and Apicurio Operators only to inspect our namespace, we need to use 126 | an ```OperatorGroup```. An OperatorGroup is an OLM resource that provides multitenant configuration to 127 | OLM-installed Operators. For more information about this object, please review the 128 | official documentation [here](https://docs.openshift.com/container-platform/4.7/operators/understanding/olm/olm-understanding-operatorgroups.html). 129 | 130 | ```shell 131 | ❯ kubectl apply -f src/main/olm/operator-group.yml 132 | operatorgroup.operators.coreos.com/amq-streams-demo-og created 133 | ``` 134 | 135 | Now we are ready to deploy the Strimzi and Apicurio Operators: 136 | 137 | For Kubernetes use the following subscriptions: 138 | 139 | ```shell 140 | ❯ kubectl apply -f src/main/strimzi/operator/subscription-k8s.yml 141 | subscription.operators.coreos.com/strimzi-kafka-operator created 142 | ❯ kubectl apply -f src/main/apicurio/operator/subscription-k8s.yml 143 | subscription.operators.coreos.com/apicurio-registry created 144 | ``` 145 | 146 | For OpenShift use the following subscriptions: 147 | 148 | ```shell 149 | ❯ oc apply -f src/main/strimzi/operator/subscription.yml 150 | subscription.operators.coreos.com/strimzi-kafka-operator created 151 | ❯ oc apply -f src/main/apicurio/operator/subscription.yml 152 | subscription.operators.coreos.com/apicurio-registry created 153 | ``` 154 | 155 | You could check that operators are successfully registered with the following command: 156 | 157 | ```shell 158 | ❯ kubectl get csv 159 | NAME DISPLAY VERSION REPLACES PHASE 160 | apicurio-registry-operator.v1.0.0-v2.0.0.final Apicurio Registry Operator 1.0.0-v2.0.0.final Succeeded 161 | strimzi-cluster-operator.v0.24.0 Strimzi 0.24.0 strimzi-cluster-operator.v0.23.0 Succeeded 162 | ``` 163 | 164 | or verify the pods are running: 165 | 166 | ```shell 167 | ❯ kubectl get pod 168 | NAME READY STATUS RESTARTS AGE 169 | apicurio-registry-operator-598fff6985-xplll 1/1 Running 0 47m 170 | strimzi-cluster-operator-v0.24.0-9d5c6b6d-qqxjx 1/1 Running 0 47m 171 | ``` 172 | 173 | For more information about how to install Operators using the CLI command, please review this [article]( 174 | https://docs.openshift.com/container-platform/4.7/operators/admin/olm-adding-operators-to-cluster.html#olm-installing-operator-from-operatorhub-using-cli_olm-adding-operators-to-a-cluster) 175 | 176 | ### Deploying Kafka 177 | 178 | ```src/main/strimzi``` folder includes a set of custom resource to deploy a Kafka Cluster 179 | and some Kafka Topics using the Strimzi Operators. 180 | 181 | To deploy the Kafka Cluster: 182 | 183 | ```shell 184 | ❯ kubectl apply -f src/main/strimzi/kafka/kafka.yml 185 | kafka.kafka.strimzi.io/my-kafka created 186 | ``` 187 | 188 | > If you want to deploy a Kafka Cluster with HA capabilities, there is a definition 189 | > in [kafka-ha.yml](./src/main/strimzi/kafka/kafka-ha.yml) file. 190 | 191 | To deploy the Kafka Topics: 192 | 193 | ```shell 194 | ❯ kubectl apply -f src/main/strimzi/topics/kafkatopic-messages.yml 195 | kafkatopic.kafka.strimzi.io/messages created 196 | ``` 197 | 198 | > If you want to use a Kafka Topic with HA capabilities, there is a definition 199 | > in [kafkatopic-messages-ha.yml](./src/main/strimzi/topics/kafkatopic-messages-ha.yml) file. 200 | 201 | There is a set of different users to connect to Kafka Cluster. We will deploy here to be used later. 202 | 203 | ```shell 204 | ❯ kubectl apply -f src/main/strimzi/users/ 205 | kafkauser.kafka.strimzi.io/application created 206 | kafkauser.kafka.strimzi.io/service-registry-scram created 207 | kafkauser.kafka.strimzi.io/service-registry-tls created 208 | ``` 209 | 210 | After some minutes Kafka Cluster will be deployed: 211 | 212 | ```shell 213 | ❯ kubectl get pod 214 | NAME READY STATUS RESTARTS AGE 215 | apicurio-registry-operator-598fff6985-xplll 1/1 Running 0 51m 216 | my-kafka-entity-operator-67b75cbd47-ccblt 3/3 Running 0 63s 217 | my-kafka-kafka-0 1/1 Running 0 2m8s 218 | my-kafka-zookeeper-0 1/1 Running 0 3m27s 219 | strimzi-cluster-operator-v0.23.0-9d5c6b6d-qqxjx 1/1 Running 0 51m 220 | ``` 221 | 222 | ### Service Registry 223 | 224 | Service Registry needs a set of Kafka Topics to store schemas and metadata of them. We need to execute the following 225 | commands to create the KafkaTopics and to deploy an instance of Service Registry: 226 | 227 | ```shell 228 | ❯ kubectl apply -f src/main/apicurio/topics/ 229 | kafkatopic.kafka.strimzi.io/kafkasql-journal created 230 | ❯ kubectl apply -f src/main/apicurio/service-registry.yml 231 | apicurioregistry.apicur.io/service-registry created 232 | ``` 233 | 234 | A new Deployment/DeploymentConfig is created with the prefix ```service-registry-deployment-``` and a new route is 235 | created with the prefix ```service-registry-ingress-```. We must inspect it 236 | to get the route created to expose the Service Registry API. 237 | 238 | In Kubernetes we will use an ingress entry based with ```NodePort```. To get the ingress entry: 239 | 240 | ```shell 241 | ❯ kubectl get deployment | grep service-registry-deployment 242 | service-registry-deployment 243 | ❯ kubectl expose deployment service-registry-deployment --type=NodePort --port=8080 244 | service/service-registry-deployment exposed 245 | ❯ kubectl get service/service-registry-deployment 246 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 247 | service-registry-deployment NodePort 10.110.126.48 8080:30456/TCP 15s 248 | ❯ minikube service service-registry-deployment --url -n amq-streams-demo 249 | http://192.168.50.115:30456 250 | ``` 251 | 252 | In OpenShift, we only need to check the ```host``` attribute from the OpenShift Route: 253 | 254 | ```shell 255 | ❯ oc get route 256 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD 257 | service-registry-ingress-b828r service-registry.amq-streams-demo.apps-crc.testing / service-registry-service None 258 | ``` 259 | 260 | While few minutes until your Service Registry has deployed. 261 | 262 | The Service Registry Web Console and API endpoints will be available from: 263 | 264 | * **Web Console**: http:///ui/ 265 | * **API REST**: http://KUBERNETES_OPENSHIFT_SR_ROUTE_SERVICE_HOST/apis/registry/v2 266 | 267 | Set up the ```apicurio.registry.url``` property in the ```pom.xml``` file the Service Registry url before to publish the 268 | schemas used by this application: 269 | 270 | ```shell 271 | ❯ oc get route -l app=service-registry -o jsonpath='{.items[0].spec.host}' 272 | ``` 273 | 274 | To register the schemas in Service Registry running in Kubernetes: 275 | 276 | ```shell 277 | ❯ mvn clean generate-sources -Papicurio \ 278 | -Dapicurio.registry.url=$(minikube service service-registry-deployment --url -n amq-streams-demo)/apis/registry/v2 279 | ``` 280 | 281 | To register the schemas in Service Registry running in OpenShift: 282 | 283 | ```shell 284 | ❯ mvn clean generate-sources -Papicurio 285 | ``` 286 | 287 | The next screenshot shows the schemas registered in the Web Console: 288 | 289 | ![Artifacts registered in Apicurio Registry](./img/apicurio-registry-artifacts.png) 290 | 291 | # Build and Deploy 292 | 293 | Before we build the application we need to set up some values in ```src/main/resources/application.properties``` file. 294 | 295 | Review and set up the right values from your Kafka Cluster 296 | 297 | * **Kafka Bootstrap Servers**: Kafka brokers are defined by a Kubernetes or OpenShift service created by Strimzi when 298 | the Kafka cluster is deployed. This service, called *cluster-name*-kafka-bootstrap exposes 9092 port for plain 299 | traffic and 9093 for encrypted traffic. 300 | 301 | ```text 302 | kafka.bootstrap-servers = my-kafka-kafka-bootstrap:9092 303 | 304 | spring.kafka.bootstrap-servers = ${kafka.bootstrap-servers} 305 | ``` 306 | 307 | * **Kafka User Credentials**: Kafka Cluster requires authentication and we need to set up the Kafka User credentials 308 | in our application (```kafka.user.*``` properties in ```application.properties``` file). Each KafkaUser has its own 309 | secret to store the password. This secret must be checked to extract the password for our user. 310 | 311 | To extract the password of the KafkaUser and declare as Environment Variable: 312 | 313 | ```shell 314 | ❯ export KAFKA_USER_PASSWORD=$(kubectl get secret application -o jsonpath='{.data.password}' | base64 -d) 315 | ``` 316 | 317 | It is a best practice use directly the secret as variable in our deployment in Kubernetes or OpenShift. We could do 318 | it declaring the variable in the container spec as: 319 | 320 | ```yaml 321 | spec: 322 | template: 323 | spec: 324 | containers: 325 | - env: 326 | - name: KAFKA_USER_PASSWORD 327 | valueFrom: 328 | secretKeyRef: 329 | key: password 330 | name: application 331 | ``` 332 | 333 | There is a deployment definition in [deployment.yml](./src/main/jkube/deployment.yml) file. This file will be used 334 | by JKube to deploy our application in Kubernetes or OpenShift. 335 | 336 | * **Service Registry API Endpoint**: Avro Serde classes need to communicate with the Service Registry API to check and 337 | validate the schemas. 338 | 339 | ```text 340 | apicurio.registry.url = http://service-registry.amq-streams-demo.apps-crc.testing/apis/registry/v2 341 | ``` 342 | 343 | To build the application: 344 | 345 | ```shell 346 | ❯ mvn clean package 347 | ``` 348 | 349 | To run locally: 350 | 351 | ```shell 352 | ❯ export KAFKA_USER_PASSWORD=$(kubectl get secret application -o jsonpath='{.data.password}' | base64 -d) 353 | ❯ mvn spring-boot:run 354 | ``` 355 | 356 | Or you can deploy into Kubernetes or OpenShift platform using [Eclipse JKube](https://github.com/eclipse/jkube) Maven Plug-ins: 357 | 358 | To deploy the application using the Kubernetes Maven Plug-In: 359 | 360 | ```shell 361 | ❯ mvn package k8s:resource k8s:build k8s:push k8s:apply -Pkubernetes -Djkube.build.strategy=jib 362 | ``` 363 | 364 | To deploy the application using the OpenShift Maven Plug-In (only valid for OpenShift Platform): 365 | 366 | ```shell 367 | ❯ oc adm policy add-role-to-user view system:serviceaccount:amq-streams-demo:default 368 | ❯ mvn package oc:resource oc:build oc:apply -Popenshift 369 | ``` 370 | 371 | To deploy the application in Minikube: 372 | 373 | ```shell 374 | ❯ eval $(minikube docker-env) 375 | ❯ kubectl create -f src/main/k8s/role.yml 376 | ❯ mvn package k8s:resource k8s:build k8s:apply -Pkubernetes 377 | ``` 378 | 379 | # REST API 380 | 381 | REST API is available from a Swagger UI at: 382 | 383 | ```text 384 | http:///swagger-ui.html 385 | ``` 386 | 387 | **KUBERNETES_OPENSHIFT_ROUTE_SERVICE_HOST** will be the route create on Kubernetes or OpenShift to expose outside the 388 | service. 389 | 390 | To get the route the following command in OpenShift give you the host: 391 | 392 | ```shell 393 | ❯ oc get route kafka-clients-sb-sample -o jsonpath='{.spec.host}' 394 | ``` 395 | 396 | There are two groups to manage a topic from a Kafka Cluster. 397 | 398 | * **Producer**: Send messageDTOS to a topic 399 | * **Consumer**: Consume messageDTOS from a topic 400 | 401 | ## Producer REST API 402 | 403 | Sample REST API to send messages to a Kafka Topic. 404 | Parameters: 405 | 406 | * **topicName**: Topic Name 407 | * **messageDTO**: Message content based in a custom messageDTO: 408 | 409 | Model: 410 | 411 | ```text 412 | MessageDTO { 413 | key (integer, optional): Key to identify this message, 414 | timestamp (string, optional, read only): Timestamp, 415 | content (string): Content, 416 | partition (integer, optional, read only): Partition number, 417 | offset (integer, optional, read only): Offset in the partition 418 | } 419 | ``` 420 | 421 | Simple Sample: 422 | 423 | ```shell 424 | ❯ curl -X POST http://$(oc get route kafka-clients-sb-sample -o jsonpath='{.spec.host}')/producer/kafka/messages \ 425 | -H "Content-Type:application/json" -d '{"content": "Simple message"}' | jq 426 | { 427 | "key": null, 428 | "timestamp": 1581087543362, 429 | "content": "Simple message", 430 | "partition": 0, 431 | "offset": 3 432 | } 433 | ``` 434 | 435 | With Minikube: 436 | 437 | ```shell 438 | ❯ curl $(minikube ip):$(kubectl get svc kafka-clients-sb-sample -o jsonpath='{.spec.ports[].nodePort}')/producer/kafka/messages \ 439 | -H "Content-Type:application/json" -d '{"content": "Simple message from Minikube"}' | jq 440 | { 441 | "key": null, 442 | "timestamp": 1596203271368, 443 | "content": "Simple message from Minikube", 444 | "partition": 0, 445 | "offset": 4 446 | } 447 | ``` 448 | 449 | ## Consumer REST API 450 | 451 | Sample REST API to consume messages from a Kafka Topic. 452 | Parameters: 453 | 454 | * **topicName**: Topic Name (Required) 455 | * **partition**: Number of the partition to consume (Optional) 456 | * **commit**: Commit messaged consumed. Values: true|false 457 | 458 | Simple Sample: 459 | 460 | ```shell 461 | ❯ curl -v "http://$(oc get route kafka-clients-sb-sample -o jsonpath='{.spec.host}')/consumer/kafka/messages?commit=true&partition=0" | jq 462 | { 463 | "messages": [ 464 | { 465 | "key": null, 466 | "timestamp": 1581087539350, 467 | "content": "Simple message", 468 | "partition": 0, 469 | "offset": 0 470 | }, 471 | ... 472 | { 473 | "key": null, 474 | "timestamp": 1581087584266, 475 | "content": "Simple message", 476 | "partition": 0, 477 | "offset": 3 478 | } 479 | ] 480 | } 481 | ``` 482 | 483 | With Minikube: 484 | 485 | ```shell 486 | ❯ curl $(minikube ip):$(kubectl get svc kafka-clients-sb-sample -o jsonpath='{.spec.ports[].nodePort}')"/consumer/kafka/messages?commit=true&partition=0" | jq 487 | { 488 | "messages":[ 489 | { 490 | "key": null, 491 | "timestamp": 1596203271368, 492 | "content": "Simple message from Minikube", 493 | "partition": 0, 494 | "offset": 4 495 | } 496 | ] 497 | } 498 | ``` 499 | 500 | That is! You have been deployed a full stack of components to produce and consume checked and valid messages using 501 | a schema declared. Congratulations!. 502 | 503 | ## Main References 504 | 505 | * [Strimzi](https://strimzi.io/) 506 | * [Apicurio](https://www.apicur.io/) 507 | * [OperatorHub - Strimzi](https://operatorhub.io/operator/strimzi-kafka-operator) 508 | * [OperatorHub - Apicurio Registry](https://operatorhub.io/operator/apicurio-registry) 509 | * [Eclipse JKube](https://www.eclipse.org/jkube/) 510 | --------------------------------------------------------------------------------