├── .gitignore ├── LICENSE ├── README.md ├── avro ├── README.md ├── avro-tools-1.9.2.jar ├── com │ └── kafkadefinitiveguide │ │ └── producer │ │ └── serializer │ │ └── avroserializer │ │ └── Customer.java └── customer.avsc ├── build.gradle ├── docs ├── command_memo.md ├── installation.md ├── kafka_connect.md └── pics │ ├── book-cover.jpg │ ├── mysql-binlog.png │ └── mysql-configuration-file-path.png ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── kafka-definitive-guide-chapter3 ├── .gitignore ├── build.gradle ├── src │ └── java │ │ ├── .keep │ │ └── com │ │ └── kafkadefinitiveguide │ │ └── producer │ │ ├── partitioner │ │ └── custompartitioner │ │ │ └── BananaPartitioner.java │ │ ├── send │ │ ├── AsynchronouslySend.java │ │ ├── DemoProducerCallback.java │ │ ├── FireAndForgetSend.java │ │ └── SynchronouslySend.java │ │ └── serializer │ │ ├── avroserializer │ │ ├── Customer.java │ │ ├── CustomerGenerator.java │ │ ├── CustomerProducer.java │ │ └── GenericAvroRecordProducer.java │ │ └── customserializer │ │ ├── Customer.java │ │ └── CustomerSerializer.java └── test │ └── unit │ └── .keep ├── kafka-definitive-guide-chapter4 ├── .gitignore ├── build.gradle └── src │ └── java │ └── com │ └── kafkadefinitiveguide │ └── consumer │ ├── assignpartitions │ └── AssignParitionsExample.java │ ├── commit │ ├── AsynchronousCommit.java │ ├── AsynchronousCommitWithCallback.java │ ├── CombineSynchronousAndAsynchronousCommit.java │ ├── SpecifiedOffsetCommit.java │ └── SynchronousCommit.java │ ├── deserializer │ ├── avrodeserializer │ │ ├── Customer.java │ │ └── CustomerConsumer.java │ └── customdeserializer │ │ ├── Customer.java │ │ └── CustomerDeserializer.java │ ├── exitpollloop │ └── ExitPollLoopExample.java │ ├── rebalancelisteners │ ├── CommitOffsetToDbExample.java │ └── RebalanceListenersExample.java │ └── receive │ └── ExampleConsumer.java └── settings.gradle /.gitignore: -------------------------------------------------------------------------------- 1 | /.gradle/ 2 | /log/** 3 | /bin/** 4 | /build/** 5 | .classpath 6 | .project 7 | .settings/ 8 | /bin/ 9 | /build/ 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kafka-definitive-guide 2 | 3 | [![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)](https://opensource.org/licenses/Apache-2.0) 4 | 5 | This repository contains the sample code the book "[Kafka: The Definitive Guide (Gwen Shapira, Neha Narkhede, and Todd Palino)](http://shop.oreilly.com/product/0636920044123.do)" and the personal study note of Apache Kafka. 6 | 7 | ![](docs/pics/book-cover.jpg) 8 | 9 | ## Study Notes 10 | - [Google Doc](https://docs.google.com/document/d/1JJqllxpVwzTJLrGILxJ10LT5_lhi8ZbKlHcrE54A6Rc/edit?usp=sharing) 11 | 12 | ## Documentation 13 | - [Avro](avro/README.md) 14 | - [Kafka Connect](docs/kafka_connect.md) 15 | - [Command Memo](docs/command_memo.md) 16 | - [Installation](docs/installation.md) 17 | 18 | ## Package Introduction 19 | ### Chapter 3 20 | | Package | Description | 21 | |----|----| 22 | | `com.kafkadefinitiveguide.producer.send` | The basic examples of producers for sending messages in different styles: fire-and-forget, synchronous and asynchronous. | 23 | | `com.kafkadefinitiveguide.producer.serializer.customserializer` | The example of writing a custom serializer for a POJO class. | 24 | | `com.kafkadefinitiveguide.producer.serializer.avroserializer` | The example of sending messages by using Avro serializer. | 25 | | `com.kafkadefinitiveguide.producer.partitioner.custompartitioner` | The example of writing a custom partitioner. | 26 | 27 | ### Chapter 4 28 | | Package | Description | 29 | |----|----| 30 | | `com.kafkadefinitiveguide.consumer.receive` | The basic example of a consumer. | 31 | | `com.kafkadefinitiveguide.consumer.commit` | The examples of different offset commit strategies: synchronous, asynchronous, combination of synchronous and asynchronous, specifying offset. | 32 | | `com.kafkadefinitiveguide.consumer.rebalancelisteners` | The examples of using rebalance listeners. | 33 | | `com.kafkadefinitiveguide.consumer.exitpollloop` | The example of how to exit a poll loop safely. | 34 | | `com.kafkadefinitiveguide.consumer.deserializer.customdeserializer` | The example of writing a custom deserializer for a POJO class. | 35 | | `com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer` | The example of receiving messages by using Avro deserializer. | 36 | | `com.kafkadefinitiveguide.consumer.assignpartitions` | The example of assigning a consumer to partitions instead of letting the consumer subscribing a topic. | 37 | -------------------------------------------------------------------------------- /avro/README.md: -------------------------------------------------------------------------------- 1 | # Avro 2 | 3 | ## Define Avro Schema 4 | 5 | ## Generate Classes Based on Avro Schema 6 | ```bash 7 | java -jar /path/to/avro-tools-1.9.2.jar compile schema 8 | ``` 9 | 10 | ## References 11 | - [Apache Avro™ 1.9.2 Getting Started (Java)](https://avro.apache.org/docs/1.9.2/gettingstartedjava.html) 12 | -------------------------------------------------------------------------------- /avro/avro-tools-1.9.2.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wuyichen24/kafka-definitive-guide/3375555e46a6ba7cb78b05e40ba64d7af85a0faa/avro/avro-tools-1.9.2.jar -------------------------------------------------------------------------------- /avro/com/kafkadefinitiveguide/producer/serializer/avroserializer/Customer.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Autogenerated by Avro 3 | * 4 | * DO NOT EDIT DIRECTLY 5 | */ 6 | package com.kafkadefinitiveguide.producer.serializer.avroserializer; 7 | 8 | import org.apache.avro.generic.GenericArray; 9 | import org.apache.avro.specific.SpecificData; 10 | import org.apache.avro.util.Utf8; 11 | import org.apache.avro.message.BinaryMessageEncoder; 12 | import org.apache.avro.message.BinaryMessageDecoder; 13 | import org.apache.avro.message.SchemaStore; 14 | 15 | @org.apache.avro.specific.AvroGenerated 16 | public class Customer extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { 17 | private static final long serialVersionUID = -2681329921521656134L; 18 | public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Customer\",\"namespace\":\"com.kafkadefinitiveguide.producer.serializer.avroserializer\",\"fields\":[{\"name\":\"id\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"email\",\"type\":[\"string\",\"null\"],\"default\":\"null\"}]}"); 19 | public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } 20 | 21 | private static SpecificData MODEL$ = new SpecificData(); 22 | 23 | private static final BinaryMessageEncoder ENCODER = 24 | new BinaryMessageEncoder(MODEL$, SCHEMA$); 25 | 26 | private static final BinaryMessageDecoder DECODER = 27 | new BinaryMessageDecoder(MODEL$, SCHEMA$); 28 | 29 | /** 30 | * Return the BinaryMessageEncoder instance used by this class. 31 | * @return the message encoder used by this class 32 | */ 33 | public static BinaryMessageEncoder getEncoder() { 34 | return ENCODER; 35 | } 36 | 37 | /** 38 | * Return the BinaryMessageDecoder instance used by this class. 39 | * @return the message decoder used by this class 40 | */ 41 | public static BinaryMessageDecoder getDecoder() { 42 | return DECODER; 43 | } 44 | 45 | /** 46 | * Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}. 47 | * @param resolver a {@link SchemaStore} used to find schemas by fingerprint 48 | * @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore 49 | */ 50 | public static BinaryMessageDecoder createDecoder(SchemaStore resolver) { 51 | return new BinaryMessageDecoder(MODEL$, SCHEMA$, resolver); 52 | } 53 | 54 | /** 55 | * Serializes this Customer to a ByteBuffer. 56 | * @return a buffer holding the serialized data for this instance 57 | * @throws java.io.IOException if this instance could not be serialized 58 | */ 59 | public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException { 60 | return ENCODER.encode(this); 61 | } 62 | 63 | /** 64 | * Deserializes a Customer from a ByteBuffer. 65 | * @param b a byte buffer holding serialized data for an instance of this class 66 | * @return a Customer instance decoded from the given buffer 67 | * @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class 68 | */ 69 | public static Customer fromByteBuffer( 70 | java.nio.ByteBuffer b) throws java.io.IOException { 71 | return DECODER.decode(b); 72 | } 73 | 74 | private int id; 75 | private java.lang.CharSequence name; 76 | private java.lang.CharSequence email; 77 | 78 | /** 79 | * Default constructor. Note that this does not initialize fields 80 | * to their default values from the schema. If that is desired then 81 | * one should use newBuilder(). 82 | */ 83 | public Customer() {} 84 | 85 | /** 86 | * All-args constructor. 87 | * @param id The new value for id 88 | * @param name The new value for name 89 | * @param email The new value for email 90 | */ 91 | public Customer(java.lang.Integer id, java.lang.CharSequence name, java.lang.CharSequence email) { 92 | this.id = id; 93 | this.name = name; 94 | this.email = email; 95 | } 96 | 97 | public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; } 98 | public org.apache.avro.Schema getSchema() { return SCHEMA$; } 99 | // Used by DatumWriter. Applications should not call. 100 | public java.lang.Object get(int field$) { 101 | switch (field$) { 102 | case 0: return id; 103 | case 1: return name; 104 | case 2: return email; 105 | default: throw new org.apache.avro.AvroRuntimeException("Bad index"); 106 | } 107 | } 108 | 109 | // Used by DatumReader. Applications should not call. 110 | @SuppressWarnings(value="unchecked") 111 | public void put(int field$, java.lang.Object value$) { 112 | switch (field$) { 113 | case 0: id = (java.lang.Integer)value$; break; 114 | case 1: name = (java.lang.CharSequence)value$; break; 115 | case 2: email = (java.lang.CharSequence)value$; break; 116 | default: throw new org.apache.avro.AvroRuntimeException("Bad index"); 117 | } 118 | } 119 | 120 | /** 121 | * Gets the value of the 'id' field. 122 | * @return The value of the 'id' field. 123 | */ 124 | public int getId() { 125 | return id; 126 | } 127 | 128 | 129 | /** 130 | * Sets the value of the 'id' field. 131 | * @param value the value to set. 132 | */ 133 | public void setId(int value) { 134 | this.id = value; 135 | } 136 | 137 | /** 138 | * Gets the value of the 'name' field. 139 | * @return The value of the 'name' field. 140 | */ 141 | public java.lang.CharSequence getName() { 142 | return name; 143 | } 144 | 145 | 146 | /** 147 | * Sets the value of the 'name' field. 148 | * @param value the value to set. 149 | */ 150 | public void setName(java.lang.CharSequence value) { 151 | this.name = value; 152 | } 153 | 154 | /** 155 | * Gets the value of the 'email' field. 156 | * @return The value of the 'email' field. 157 | */ 158 | public java.lang.CharSequence getEmail() { 159 | return email; 160 | } 161 | 162 | 163 | /** 164 | * Sets the value of the 'email' field. 165 | * @param value the value to set. 166 | */ 167 | public void setEmail(java.lang.CharSequence value) { 168 | this.email = value; 169 | } 170 | 171 | /** 172 | * Creates a new Customer RecordBuilder. 173 | * @return A new Customer RecordBuilder 174 | */ 175 | public static com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder newBuilder() { 176 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(); 177 | } 178 | 179 | /** 180 | * Creates a new Customer RecordBuilder by copying an existing Builder. 181 | * @param other The existing builder to copy. 182 | * @return A new Customer RecordBuilder 183 | */ 184 | public static com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder newBuilder(com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder other) { 185 | if (other == null) { 186 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(); 187 | } else { 188 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(other); 189 | } 190 | } 191 | 192 | /** 193 | * Creates a new Customer RecordBuilder by copying an existing Customer instance. 194 | * @param other The existing instance to copy. 195 | * @return A new Customer RecordBuilder 196 | */ 197 | public static com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder newBuilder(com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer other) { 198 | if (other == null) { 199 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(); 200 | } else { 201 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(other); 202 | } 203 | } 204 | 205 | /** 206 | * RecordBuilder for Customer instances. 207 | */ 208 | @org.apache.avro.specific.AvroGenerated 209 | public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase 210 | implements org.apache.avro.data.RecordBuilder { 211 | 212 | private int id; 213 | private java.lang.CharSequence name; 214 | private java.lang.CharSequence email; 215 | 216 | /** Creates a new Builder */ 217 | private Builder() { 218 | super(SCHEMA$); 219 | } 220 | 221 | /** 222 | * Creates a Builder by copying an existing Builder. 223 | * @param other The existing Builder to copy. 224 | */ 225 | private Builder(com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder other) { 226 | super(other); 227 | if (isValidValue(fields()[0], other.id)) { 228 | this.id = data().deepCopy(fields()[0].schema(), other.id); 229 | fieldSetFlags()[0] = other.fieldSetFlags()[0]; 230 | } 231 | if (isValidValue(fields()[1], other.name)) { 232 | this.name = data().deepCopy(fields()[1].schema(), other.name); 233 | fieldSetFlags()[1] = other.fieldSetFlags()[1]; 234 | } 235 | if (isValidValue(fields()[2], other.email)) { 236 | this.email = data().deepCopy(fields()[2].schema(), other.email); 237 | fieldSetFlags()[2] = other.fieldSetFlags()[2]; 238 | } 239 | } 240 | 241 | /** 242 | * Creates a Builder by copying an existing Customer instance 243 | * @param other The existing instance to copy. 244 | */ 245 | private Builder(com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer other) { 246 | super(SCHEMA$); 247 | if (isValidValue(fields()[0], other.id)) { 248 | this.id = data().deepCopy(fields()[0].schema(), other.id); 249 | fieldSetFlags()[0] = true; 250 | } 251 | if (isValidValue(fields()[1], other.name)) { 252 | this.name = data().deepCopy(fields()[1].schema(), other.name); 253 | fieldSetFlags()[1] = true; 254 | } 255 | if (isValidValue(fields()[2], other.email)) { 256 | this.email = data().deepCopy(fields()[2].schema(), other.email); 257 | fieldSetFlags()[2] = true; 258 | } 259 | } 260 | 261 | /** 262 | * Gets the value of the 'id' field. 263 | * @return The value. 264 | */ 265 | public int getId() { 266 | return id; 267 | } 268 | 269 | 270 | /** 271 | * Sets the value of the 'id' field. 272 | * @param value The value of 'id'. 273 | * @return This builder. 274 | */ 275 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder setId(int value) { 276 | validate(fields()[0], value); 277 | this.id = value; 278 | fieldSetFlags()[0] = true; 279 | return this; 280 | } 281 | 282 | /** 283 | * Checks whether the 'id' field has been set. 284 | * @return True if the 'id' field has been set, false otherwise. 285 | */ 286 | public boolean hasId() { 287 | return fieldSetFlags()[0]; 288 | } 289 | 290 | 291 | /** 292 | * Clears the value of the 'id' field. 293 | * @return This builder. 294 | */ 295 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder clearId() { 296 | fieldSetFlags()[0] = false; 297 | return this; 298 | } 299 | 300 | /** 301 | * Gets the value of the 'name' field. 302 | * @return The value. 303 | */ 304 | public java.lang.CharSequence getName() { 305 | return name; 306 | } 307 | 308 | 309 | /** 310 | * Sets the value of the 'name' field. 311 | * @param value The value of 'name'. 312 | * @return This builder. 313 | */ 314 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder setName(java.lang.CharSequence value) { 315 | validate(fields()[1], value); 316 | this.name = value; 317 | fieldSetFlags()[1] = true; 318 | return this; 319 | } 320 | 321 | /** 322 | * Checks whether the 'name' field has been set. 323 | * @return True if the 'name' field has been set, false otherwise. 324 | */ 325 | public boolean hasName() { 326 | return fieldSetFlags()[1]; 327 | } 328 | 329 | 330 | /** 331 | * Clears the value of the 'name' field. 332 | * @return This builder. 333 | */ 334 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder clearName() { 335 | name = null; 336 | fieldSetFlags()[1] = false; 337 | return this; 338 | } 339 | 340 | /** 341 | * Gets the value of the 'email' field. 342 | * @return The value. 343 | */ 344 | public java.lang.CharSequence getEmail() { 345 | return email; 346 | } 347 | 348 | 349 | /** 350 | * Sets the value of the 'email' field. 351 | * @param value The value of 'email'. 352 | * @return This builder. 353 | */ 354 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder setEmail(java.lang.CharSequence value) { 355 | validate(fields()[2], value); 356 | this.email = value; 357 | fieldSetFlags()[2] = true; 358 | return this; 359 | } 360 | 361 | /** 362 | * Checks whether the 'email' field has been set. 363 | * @return True if the 'email' field has been set, false otherwise. 364 | */ 365 | public boolean hasEmail() { 366 | return fieldSetFlags()[2]; 367 | } 368 | 369 | 370 | /** 371 | * Clears the value of the 'email' field. 372 | * @return This builder. 373 | */ 374 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder clearEmail() { 375 | email = null; 376 | fieldSetFlags()[2] = false; 377 | return this; 378 | } 379 | 380 | @Override 381 | @SuppressWarnings("unchecked") 382 | public Customer build() { 383 | try { 384 | Customer record = new Customer(); 385 | record.id = fieldSetFlags()[0] ? this.id : (java.lang.Integer) defaultValue(fields()[0]); 386 | record.name = fieldSetFlags()[1] ? this.name : (java.lang.CharSequence) defaultValue(fields()[1]); 387 | record.email = fieldSetFlags()[2] ? this.email : (java.lang.CharSequence) defaultValue(fields()[2]); 388 | return record; 389 | } catch (org.apache.avro.AvroMissingFieldException e) { 390 | throw e; 391 | } catch (java.lang.Exception e) { 392 | throw new org.apache.avro.AvroRuntimeException(e); 393 | } 394 | } 395 | } 396 | 397 | @SuppressWarnings("unchecked") 398 | private static final org.apache.avro.io.DatumWriter 399 | WRITER$ = (org.apache.avro.io.DatumWriter)MODEL$.createDatumWriter(SCHEMA$); 400 | 401 | @Override public void writeExternal(java.io.ObjectOutput out) 402 | throws java.io.IOException { 403 | WRITER$.write(this, SpecificData.getEncoder(out)); 404 | } 405 | 406 | @SuppressWarnings("unchecked") 407 | private static final org.apache.avro.io.DatumReader 408 | READER$ = (org.apache.avro.io.DatumReader)MODEL$.createDatumReader(SCHEMA$); 409 | 410 | @Override public void readExternal(java.io.ObjectInput in) 411 | throws java.io.IOException { 412 | READER$.read(this, SpecificData.getDecoder(in)); 413 | } 414 | 415 | @Override protected boolean hasCustomCoders() { return true; } 416 | 417 | @Override public void customEncode(org.apache.avro.io.Encoder out) 418 | throws java.io.IOException 419 | { 420 | out.writeInt(this.id); 421 | 422 | out.writeString(this.name); 423 | 424 | if (this.email == null) { 425 | out.writeIndex(1); 426 | out.writeNull(); 427 | } else { 428 | out.writeIndex(0); 429 | out.writeString(this.email); 430 | } 431 | 432 | } 433 | 434 | @Override public void customDecode(org.apache.avro.io.ResolvingDecoder in) 435 | throws java.io.IOException 436 | { 437 | org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff(); 438 | if (fieldOrder == null) { 439 | this.id = in.readInt(); 440 | 441 | this.name = in.readString(this.name instanceof Utf8 ? (Utf8)this.name : null); 442 | 443 | if (in.readIndex() != 0) { 444 | in.readNull(); 445 | this.email = null; 446 | } else { 447 | this.email = in.readString(this.email instanceof Utf8 ? (Utf8)this.email : null); 448 | } 449 | 450 | } else { 451 | for (int i = 0; i < 3; i++) { 452 | switch (fieldOrder[i].pos()) { 453 | case 0: 454 | this.id = in.readInt(); 455 | break; 456 | 457 | case 1: 458 | this.name = in.readString(this.name instanceof Utf8 ? (Utf8)this.name : null); 459 | break; 460 | 461 | case 2: 462 | if (in.readIndex() != 0) { 463 | in.readNull(); 464 | this.email = null; 465 | } else { 466 | this.email = in.readString(this.email instanceof Utf8 ? (Utf8)this.email : null); 467 | } 468 | break; 469 | 470 | default: 471 | throw new java.io.IOException("Corrupt ResolvingDecoder."); 472 | } 473 | } 474 | } 475 | } 476 | } 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | -------------------------------------------------------------------------------- /avro/customer.avsc: -------------------------------------------------------------------------------- 1 | { 2 | "namespace": "com.kafkadefinitiveguide.producer.serializer.avroserializer", 3 | "type": "record", 4 | "name": "Customer", 5 | "fields": [ 6 | {"name": "id", "type": "int"}, 7 | {"name": "name", "type": "string"}, 8 | {"name": "email", "type": ["string", "null"], "default": "null"} 9 | ] 10 | } -------------------------------------------------------------------------------- /build.gradle: -------------------------------------------------------------------------------- 1 | subprojects { 2 | apply plugin: 'java' 3 | 4 | version = '1.0' 5 | 6 | buildDir = new File(rootProject.projectDir, "build/" + project.name) 7 | 8 | repositories { 9 | mavenCentral() 10 | jcenter() 11 | } 12 | 13 | compileJava { 14 | sourceCompatibility = '1.8' 15 | } 16 | 17 | sourceSets { 18 | main { 19 | java.srcDirs = [file('src/java')] 20 | resources.srcDirs = [file('src/resources')] 21 | } 22 | test { 23 | java.srcDirs = [file('test/unit')] 24 | resources.srcDirs = [file('test/resources')] 25 | } 26 | } 27 | 28 | jar { 29 | manifest { 30 | attributes('Implementation-Title': project.name, 31 | 'Implementation-Version': version) 32 | } 33 | } 34 | 35 | dependencies { 36 | compile group: 'org.apache.kafka', name: 'kafka-clients', version: '2.5.0' 37 | compile group: 'com.google.code.gson', name: 'gson', version: '2.3.1' 38 | compile group: 'com.google.guava', name: 'guava', version: 'r05' 39 | compile group: 'org.apache.logging.log4j', name: 'log4j-core', version: '2.13.3' 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /docs/command_memo.md: -------------------------------------------------------------------------------- 1 | # Command Memo 2 | 3 | - **ZooKeeper** 4 | - Path: `/Users/wuyichen/zookeeper-3.4.14/bin` 5 | - Commands 6 | - Start: `sh zkServer.sh start` 7 | - Stop: `sh zkServer.sh stop` 8 | - Port: 2181 9 | - **Kafka** 10 | - Path: `/Users/wuyichen/kafka_2.11-2.2.0/bin` 11 | - Commands 12 | - Start: `sh kafka-server-start.sh ../config/server.properties` 13 | - Stop: Press Crtl+C 14 | - List all topics: `bash kafka-topics.sh --list --zookeeper localhost:2181` 15 | - Create a topic: `bash kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic ` 16 | - Product a message to a topic: `bash kafka-console-producer.sh --broker-list localhost:9092 --topic ` 17 | - Consume a message from a topic: `bash kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic --from-beginning` 18 | - Port: 9092 19 | - **Kafka Connector Worker** 20 | - Path: `/Users/wuyichen/kafka_2.11-2.2.0/bin` 21 | - Commands 22 | - Start: `sh connect-distributed.sh ../config/connect-distributed.properties` 23 | - Stop: Press Crtl+C 24 | - Port: 8083 25 | -------------------------------------------------------------------------------- /docs/installation.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | - [**Install Apache ZooKeeper**](#install-apache-zookeeper) 3 | - [**Install Apache Kafka**](#install-apache-kafka) 4 | 5 | --- 6 | 7 | ## Install Apache ZooKeeper 8 | ### Step 1: Verify Java Installation 9 | Use this command to verify the Java environment 10 | ``` 11 | java -version 12 | ``` 13 | 14 | ### Step 2: Install ZooKeeper 15 | #### Step 2.1: Download ZooKeeper 16 | Use this [**link**](http://zookeeper.apache.org/releases.html) for downloading the latest version of ZooKeeper. 17 | 18 | #### Step 2.2: Extract the tar file 19 | ``` 20 | tar -zxf zookeeper-3.4.14.tar.gz 21 | cd zookeeper-3.4.14 22 | ``` 23 | 24 | #### Step 2.3: Create a directory for data 25 | Create a directory for storing the snapshot 26 | ``` 27 | mkdir data 28 | ``` 29 | 30 | #### Step 2.4: Create configuration file 31 | Create **zoo.cfg** under the **conf** directory: 32 | ``` 33 | nano conf/zoo.cfg 34 | ``` 35 | Set the content of **zoo.cfg**: 36 | ``` 37 | tickTime = 2000 38 | dataDir = /Users/wuyichen/zookeeper-3.4.14/data (Change this path for your case, see Step 2.3) 39 | clientPort = 2181 40 | initLimit = 5 41 | syncLimit = 2 42 | ``` 43 | 44 | #### Step 2.5: Start ZooKeeper server 45 | ``` 46 | sh bin/zkServer.sh start 47 | ``` 48 | After executing this command, you should see the following message: 49 | ``` 50 | ZooKeeper JMX enabled by default 51 | Using config: /Users/wuyichen/zookeeper-3.4.14/bin/../conf/zoo.cfg 52 | -n Starting zookeeper ... 53 | STARTED 54 | ``` 55 | 56 | #### Other useful command 57 | - Start ZooKeeper client: `sh bin/zkCli.sh` 58 | - Stop ZooKeeper server: `sh bin/zkServer.sh stop` 59 | - Check ZooKeeper server status: `sh bin/zkServer.sh status` 60 | 61 | --- 62 | 63 | ## Install Apache Kafka 64 | ### Step 1: Verify Java Installation 65 | Use this command to verify the Java environment 66 | ``` 67 | java -version 68 | ``` 69 | 70 | ### Step 2: Install ZooKeeper 71 | ZooKeeper is the prerequisite for Kafka, you need to install it before running Kafka. 72 | 73 | ### Step 3: Install Kafka 74 | #### Step 3.1: Download Kafka 75 | Use this [**link**](https://kafka.apache.org/downloads) for downloading the latest version of Kafka. Use the binary downloads like "kafka_a.b-x.y.z.tgz". 76 | 77 | #### Step 3.2: Extract the tar file 78 | ``` 79 | tar -zxf kafka_2.11-2.2.0.tar.gz 80 | cd kafka_2.11-2.2.0 81 | ``` 82 | 83 | #### Step 3.3: Start Kafka server 84 | ``` 85 | sh bin/kafka-server-start.sh config/server.properties 86 | ``` 87 | **NOTE: Please make sure the ZooKeeper server is running also.** 88 | -------------------------------------------------------------------------------- /docs/kafka_connect.md: -------------------------------------------------------------------------------- 1 | # Kafka Connect 2 | 3 | - [**General Setup**](#general-setup) 4 | - [Install And Run Zookeeper And Kafka](#step-1-install-and-run-zookeeper-and-kafka) 5 | - [Setup The Directory for Connector Plugins](#step-2-setup-the-directory-for-connector-plugins) 6 | - [Configure Kafka Connect worker](#step-3-configure-kafka-connect-worker) 7 | - [Run Kafka Connect worker](#step-4-run-kafka-connect-worker) 8 | - [Manage connectors by Kafka Connect REST APIs](#step-5-manage-connectors-by-kafka-connect-rest-apis) 9 | - [**Connect to MySQL (Debezium)**](#connect-to-mysql-debezium) 10 | - [Download And Deploy MySQL Connector Plugin](#step-1-download-and-deploy-mysql-connector-plugin) 11 | - [Enable MySQL Binary Logging (binlog)](#step-2-enable-mysql-binary-logging-binlog) 12 | - [Create Database And Tables](#step-3-create-database-and-tables-for-demo-only) 13 | - [Create Connector](#step-4-create-connector) 14 | - [Verify](#step-5-verify) 15 | - [**References**](#references) 16 | 17 | ## General Setup 18 | ### Step 1: Install And Run Zookeeper And Kafka 19 | ### Step 2: Setup The Directory for Connector Plugins 20 | - For connecting with a certain data store, you may use a specific connector plugin. For loading those connector plugins, you have to create a directory. 21 | ```bash 22 | mkdir /plugins 23 | ``` 24 | - For letting Kafka Connect know where is the plugin directory, you have to change the configuration of Kafka Connect worker. 25 | 26 | **connect-distributed.properties** 27 | ```bash 28 | plugin.path=/plugins 29 | ``` 30 | ### Step 3: Configure Kafka Connect worker 31 | - The Kafka Connect worker is configured by the configuration file `connect-distributed.properties`. 32 | - The `config` directory has the `connect-distributed.properties` file by default, you can modify it directly. 33 | - Avaliable properties: 34 | | Property | Description | 35 | |---|---| 36 | | `bootstrap.servers` | A list of Kafka brokers that Connect will work with. | 37 | | `group.id` | `All workers with the same group ID are part of the same Connect cluster.` | 38 | | `key.converter` | The converter for keys. | 39 | | `value.converter` | The converter for values. | 40 | | `key.converter.schemas.enable` | Enable the schema for the key converter. | 41 | | `value.converter.schemas.enable` | Enable the schema for the value converter. | 42 | | `rest.host.name` | The hostname of the REST API for configuring and monitoring Connect. | 43 | | `rest.port` | The port of the REST API for configuring and monitoring Connect. | 44 | | `plugin.path` | A list of directories for loading connector plugins. | 45 | - Example of `connect-distributed.properties` 46 | ```properties 47 | bootstrap.servers=localhost:9092 48 | group.id=connect-cluster 49 | key.converter=org.apache.kafka.connect.json.JsonConverter 50 | value.converter=org.apache.kafka.connect.json.JsonConverter 51 | key.converter.schemas.enable=true 52 | value.converter.schemas.enable=true 53 | offset.storage.topic=connect-offsets 54 | offset.storage.replication.factor=1 55 | config.storage.topic=connect-configs 56 | config.storage.replication.factor=1 57 | status.storage.topic=connect-status 58 | status.storage.replication.factor=1 59 | offset.flush.interval.ms=10000 60 | plugin.path=/Users/wuyichen/kafka_2.11-2.2.0/plugins 61 | ``` 62 | ### Step 4: Run Kafka Connect Worker 63 | ```bash 64 | cd bin 65 | sh connect-distributed.sh ../config/connect-distributed.properties 66 | ``` 67 | ### Step 5: Manage Connectors by Kafka Connect REST APIs 68 | - Common APIs 69 | | Method | URL | Body | Description | 70 | |---|---|---|---| 71 | | GET | `http://localhost:8083/` | | Check the worker is running. | 72 | | GET | `http://localhost:8083/connector-plugins` | | Display all the available connector plugins. | 73 | | POST | `http://localhost:8083/connectors` | Connector configuration (JSON) | Add a new connector. | 74 | | GET | `http://localhost:8083/connectors` | | Display all the running connectors. | 75 | | GET | `http://localhost:8083/connectors/` | | Display the detailed info of a connector. | 76 | | DELETE | `http://localhost:8083/connectors/` | | Delete a connector. | 77 | 78 | ## Connect to MySQL (Debezium) 79 | ### Step 1: Download And Deploy MySQL Connector Plugin 80 | - Download the Debezium MySQL Connector plugin from [here](https://repo1.maven.org/maven2/io/debezium/debezium-connector-mysql/). 81 | - Extract the Debezium MySQL Connector plugin in the plugins directory. 82 | ```bash 83 | cd /plugins 84 | tar -zxf debezium-connector-mysql-1.1.2.Final-plugin.tar.gz 85 | ``` 86 | ### Step 2: Enable MySQL Binary Logging (binlog) 87 | - MySQL binary logging is not enabled by default. You have to enable it manually. 88 | - Add 2 properties into the `mysqld` section of the `my.cnf`. 89 | ```cnf 90 | [mysqld] 91 | log-bin=mysql-bin 92 | server-id=1 93 | binlog-format=row 94 | ``` 95 | - Restart MySQL server. 96 | - Verify the binary logging is enabled by checking the `log_bin` variable is `ON`. 97 | ```sql 98 | show variables like 'log_bin'; 99 | ``` 100 | - Troubleshooting 101 | - You may need to create a new `my.cnf` file if it is not existing. 102 | - You can specify the path of the new `my.cnf` file in MySQL Workbench by clicking the wrench icon next to "INSTANCE". 103 | ![](https://github.com/wuyichen24/kafka-definitive-guide/blob/master/docs/pics/mysql-configuration-file-path.png) 104 | - You can use MySQL Workbench to create a new `my.cnf` file by clicking the "Options File" under "INSTANCE". 105 | ![](https://github.com/wuyichen24/kafka-definitive-guide/blob/master/docs/pics/mysql-binlog.png) 106 | - If restarting MySQL server cannot apply the new parameter values of the `my.cnf` file into the system variables, you need to start MySQL server by command-line. 107 | ```bash 108 | sudo /usr/local/mysql/bin/mysqld_safe 109 | (Press Control-Z) 110 | bg 111 | ``` 112 | ### Step 3: Create Database And Tables (For Demo Only) 113 | - Assume you have database (schema): testdb 114 | - Assume you have tables 115 | - customers 116 | - orders 117 | 118 | ### Step 4: Create Connector 119 | - Compose the connector configration in JSON 120 | ```json 121 | { 122 | "name": "mysql-source-connector", 123 | "config": { 124 | "connector.class": "io.debezium.connector.mysql.MySqlConnector", 125 | "tasks.max": "1", 126 | "database.hostname": "localhost", 127 | "database.port": "3306", 128 | "database.user": "root", 129 | "database.password": "6ytow2-;S3lA", 130 | "database.server.id": "001", 131 | "database.server.name": "mysqlserver1", 132 | "database.whitelist": "testdb", 133 | "database.serverTimezone": "UTC", 134 | "database.history.kafka.bootstrap.servers": "localhost:9092", 135 | "database.history.kafka.topic": "schema-changes.testdb" 136 | } 137 | } 138 | ``` 139 | - Explanation of parameters 140 | | Parameter | Description | 141 | |---|---| 142 | | `name` | The unique name of the connector. | 143 | | `connector.class` | The name of the Java class for the connector. Always use a value of `io.debezium.connector.mysql.MySqlConnector` for the MySQL connector. | 144 | | `tasks.max` | The maximum number of tasks that should be created for this connector. The MySQL connector always uses a single task and therefore does not use this value, so the default is always acceptable. | 145 | | `database.hostname` | The IP address or hostname of the MySQL database server. | 146 | | `database.port` | The port of the MySQL database server. | 147 | | `database.user` | The username for connecting the MySQL database server. | 148 | | `database.password` | The password for connecting the MySQL database server. | 149 | | `database.server.id` | The numeric ID of the MySQL database server. | 150 | | `database.server.name` | The name of the MySQL database server. This name is the logical identifier for the MySQL server or cluster of servers. This name will be used as the prefix for all Kafka topics. | 151 | | `database.whitelist` | The list of databases (schemas) will be monitored. (comma-separated) | 152 | | `database.blacklist` | The list of databases (schemas) will be be excluded from monitoring. (comma-separated) | 153 | | `table.whitelist` | The list of tables will be monitored. (comma-separated) | 154 | | `table.blacklist` | The list of tables will be be excluded from monitoring. (comma-separated) | 155 | | `database.serverTimezone` | The timezone for the MySQL database server. If not specify, the MySQL database server will throw an error like "The server time zone value 'XXX' is unrecognized or represents more than one time zone. You must configure either the server or JDBC driver (via the serverTimezone configuration property) to use a more specifc time zone value if you want to utilize time zone support." | 156 | | `database.history.kafka.bootstrap.servers` | The list of the hostname and port pairs for Kafka brokers. | 157 | | `database.history.kafka.topic` | The topic to store the schema change history of the database. | 158 | 159 | For more parameters, check this [page](https://debezium.io/documentation/reference/1.1/connectors/mysql.html#mysql-connector-configuration-properties_debezium). 160 | - Make a HTTP request to Kafka connect worker to create this new connector 161 | | Method | URL | Body | 162 | |---|---|---| 163 | | POST | `http://localhost:8083/connectors` | Connector configuration (JSON) | 164 | 165 | ### Step 5: Verify 166 | - Insert new records to `customers` table and `orders` table (for this demo only). 167 | - List all the topics from Kafka, make sure you can see those topics (for this demo only): 168 | - `mysqlserver1.testdb.customers` 169 | - `mysqlserver1.testdb.orders` 170 | - If there is no record in the table, the connector will not create a topic for that table. The connector will only create a topic for that table only there is a change for that table. 171 | - Consume the messages in those topics, you can see the changes in those tables can be captured as JSON messages. 172 | ```json 173 | { 174 | "schema":{ 175 | "type":"struct", 176 | "fields":[ 177 | { 178 | "type":"struct", 179 | "fields":[ 180 | { 181 | "type":"int64", 182 | "optional":false, 183 | "field":"id" 184 | }, 185 | { 186 | "type":"string", 187 | "optional":true, 188 | "field":"first_name" 189 | }, 190 | { 191 | "type":"string", 192 | "optional":true, 193 | "field":"last_name" 194 | }, 195 | { 196 | "type":"string", 197 | "optional":true, 198 | "field":"email" 199 | } 200 | ], 201 | "optional":true, 202 | "name":"mysqlserver1.testdb.customers.Value", 203 | "field":"before" 204 | }, 205 | { 206 | "type":"struct", 207 | "fields":[ 208 | { 209 | "type":"int64", 210 | "optional":false, 211 | "field":"id" 212 | }, 213 | { 214 | "type":"string", 215 | "optional":true, 216 | "field":"first_name" 217 | }, 218 | { 219 | "type":"string", 220 | "optional":true, 221 | "field":"last_name" 222 | }, 223 | { 224 | "type":"string", 225 | "optional":true, 226 | "field":"email" 227 | } 228 | ], 229 | "optional":true, 230 | "name":"mysqlserver1.testdb.customers.Value", 231 | "field":"after" 232 | }, 233 | { 234 | "type":"struct", 235 | "fields":[ 236 | { 237 | "type":"string", 238 | "optional":false, 239 | "field":"version" 240 | }, 241 | { 242 | "type":"string", 243 | "optional":false, 244 | "field":"connector" 245 | }, 246 | { 247 | "type":"string", 248 | "optional":false, 249 | "field":"name" 250 | }, 251 | { 252 | "type":"int64", 253 | "optional":false, 254 | "field":"ts_ms" 255 | }, 256 | { 257 | "type":"string", 258 | "optional":true, 259 | "name":"io.debezium.data.Enum", 260 | "version":1, 261 | "parameters":{ 262 | "allowed":"true,last,false" 263 | }, 264 | "default":"false", 265 | "field":"snapshot" 266 | }, 267 | { 268 | "type":"string", 269 | "optional":false, 270 | "field":"db" 271 | }, 272 | { 273 | "type":"string", 274 | "optional":true, 275 | "field":"table" 276 | }, 277 | { 278 | "type":"int64", 279 | "optional":false, 280 | "field":"server_id" 281 | }, 282 | { 283 | "type":"string", 284 | "optional":true, 285 | "field":"gtid" 286 | }, 287 | { 288 | "type":"string", 289 | "optional":false, 290 | "field":"file" 291 | }, 292 | { 293 | "type":"int64", 294 | "optional":false, 295 | "field":"pos" 296 | }, 297 | { 298 | "type":"int32", 299 | "optional":false, 300 | "field":"row" 301 | }, 302 | { 303 | "type":"int64", 304 | "optional":true, 305 | "field":"thread" 306 | }, 307 | { 308 | "type":"string", 309 | "optional":true, 310 | "field":"query" 311 | } 312 | ], 313 | "optional":false, 314 | "name":"io.debezium.connector.mysql.Source", 315 | "field":"source" 316 | }, 317 | { 318 | "type":"string", 319 | "optional":false, 320 | "field":"op" 321 | }, 322 | { 323 | "type":"int64", 324 | "optional":true, 325 | "field":"ts_ms" 326 | }, 327 | { 328 | "type":"struct", 329 | "fields":[ 330 | { 331 | "type":"string", 332 | "optional":false, 333 | "field":"id" 334 | }, 335 | { 336 | "type":"int64", 337 | "optional":false, 338 | "field":"total_order" 339 | }, 340 | { 341 | "type":"int64", 342 | "optional":false, 343 | "field":"data_collection_order" 344 | } 345 | ], 346 | "optional":true, 347 | "field":"transaction" 348 | } 349 | ], 350 | "optional":false, 351 | "name":"mysqlserver1.testdb.customers.Envelope" 352 | }, 353 | "payload":{ 354 | "before":null, 355 | "after":{ 356 | "id":2, 357 | "first_name":"Joe", 358 | "last_name":"Doe", 359 | "email":"joedoe@gmail.com" 360 | }, 361 | "source":{ 362 | "version":"1.1.2.Final", 363 | "connector":"mysql", 364 | "name":"localdb", 365 | "ts_ms":1592006840000, 366 | "snapshot":"false", 367 | "db":"debezium", 368 | "table":"customers", 369 | "server_id":1, 370 | "gtid":null, 371 | "file":"mysql-bin.000002", 372 | "pos":666, 373 | "row":0, 374 | "thread":3, 375 | "query":null 376 | }, 377 | "op":"c", 378 | "ts_ms":1592006840764, 379 | "transaction":null 380 | } 381 | } 382 | ``` 383 | 384 | ## References 385 | - [Debezium Tutorial](https://debezium.io/documentation/reference/1.1/tutorial.html) 386 | - [Debezium Connector for MySQL](https://debezium.io/documentation/reference/1.1/connectors/mysql.html) 387 | - [MySQL Setting the Replication Master Configuration](https://dev.mysql.com/doc/refman/5.7/en/replication-howto-masterbaseconfig.html) 388 | - [Debezium MySQL Source Connector for Confluent Platform](https://docs.confluent.io/current/connect/debezium-connect-mysql/index.html) 389 | -------------------------------------------------------------------------------- /docs/pics/book-cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wuyichen24/kafka-definitive-guide/3375555e46a6ba7cb78b05e40ba64d7af85a0faa/docs/pics/book-cover.jpg -------------------------------------------------------------------------------- /docs/pics/mysql-binlog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wuyichen24/kafka-definitive-guide/3375555e46a6ba7cb78b05e40ba64d7af85a0faa/docs/pics/mysql-binlog.png -------------------------------------------------------------------------------- /docs/pics/mysql-configuration-file-path.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wuyichen24/kafka-definitive-guide/3375555e46a6ba7cb78b05e40ba64d7af85a0faa/docs/pics/mysql-configuration-file-path.png -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wuyichen24/kafka-definitive-guide/3375555e46a6ba7cb78b05e40ba64d7af85a0faa/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | zipStoreBase=GRADLE_USER_HOME 4 | zipStorePath=wrapper/dists 5 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.3.1-bin.zip 6 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/build.gradle: -------------------------------------------------------------------------------- 1 | repositories { 2 | maven { 3 | url 'https://packages.confluent.io/maven/' 4 | } 5 | } 6 | 7 | dependencies { 8 | compile group: 'org.apache.avro', name: 'avro', version: '1.9.2' 9 | compile group: 'io.confluent', name: 'kafka-avro-serializer', version: '5.5.0' 10 | } -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wuyichen24/kafka-definitive-guide/3375555e46a6ba7cb78b05e40ba64d7af85a0faa/kafka-definitive-guide-chapter3/src/java/.keep -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/partitioner/custompartitioner/BananaPartitioner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.partitioner.custompartitioner; 17 | 18 | import java.util.List; 19 | import java.util.Map; 20 | 21 | import org.apache.kafka.clients.producer.Partitioner; 22 | import org.apache.kafka.common.Cluster; 23 | import org.apache.kafka.common.PartitionInfo; 24 | import org.apache.kafka.common.utils.Utils; 25 | 26 | /** 27 | * Custom partitioner for route specify keys to certain partition. 28 | * 29 | * @author Wuyi Chen 30 | * @date 06/03/2020 31 | * @version 1.0 32 | * @since 1.0 33 | */ 34 | public class BananaPartitioner implements Partitioner { 35 | public void configure(Map configs) {} 36 | 37 | public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { 38 | List partitions = cluster.partitionsForTopic(topic); 39 | int numPartitions = partitions.size(); 40 | 41 | if ((keyBytes == null) || (!(key instanceof String))) { // Only expect String keys, so we throw an exception if that is not the case. 42 | throw new IllegalArgumentException("We expect all messages to have customer name as key"); 43 | } 44 | 45 | if (((String) key).equals("Banana")) { // Banana will always go to last partition 46 | return numPartitions - 1; 47 | } 48 | 49 | return (Math.abs(Utils.murmur2(keyBytes)) % (numPartitions - 1)); // Other records will get hashed to the rest of the partitions 50 | } 51 | 52 | public void close() {} 53 | } 54 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/send/AsynchronouslySend.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.send; 17 | 18 | import java.util.Properties; 19 | 20 | import org.apache.kafka.clients.producer.KafkaProducer; 21 | import org.apache.kafka.clients.producer.ProducerRecord; 22 | 23 | /** 24 | * Send messages asynchronously. 25 | * 26 | * @author Wuyi Chen 27 | * @date 06/03/2020 28 | * @version 1.0 29 | * @since 1.0 30 | */ 31 | public class AsynchronouslySend { 32 | public static void main(String[] args) { 33 | Properties kafkaProps = new Properties(); 34 | kafkaProps.put("bootstrap.servers", "localhost:9092"); 35 | kafkaProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 36 | kafkaProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 37 | 38 | try (KafkaProducer producer = new KafkaProducer<>(kafkaProps)) { 39 | ProducerRecord record = new ProducerRecord<>("CustomerCountry", "Biomedical Materials", "USA"); 40 | producer.send(record, new DemoProducerCallback()); // Use callback to get result of sending asynchronously. 41 | } catch (Exception e) { 42 | e.printStackTrace(); 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/send/DemoProducerCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.send; 17 | 18 | import org.apache.kafka.clients.producer.Callback; 19 | import org.apache.kafka.clients.producer.RecordMetadata; 20 | 21 | /** 22 | * Callback for asynchronous send. 23 | * 24 | * @author Wuyi Chen 25 | * @date 06/03/2020 26 | * @version 1.0 27 | * @since 1.0 28 | */ 29 | public class DemoProducerCallback implements Callback { 30 | @Override 31 | public void onCompletion(RecordMetadata recordMetadata, Exception e) { 32 | if (e != null) { 33 | e.printStackTrace(); 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/send/FireAndForgetSend.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.send; 17 | 18 | import java.util.Properties; 19 | 20 | import org.apache.kafka.clients.producer.KafkaProducer; 21 | import org.apache.kafka.clients.producer.ProducerRecord; 22 | 23 | /** 24 | * Send messages in fire-and-forget style. 25 | * 26 | * @author Wuyi Chen 27 | * @date 06/03/2020 28 | * @version 1.0 29 | * @since 1.0 30 | */ 31 | public class FireAndForgetSend { 32 | public static void main(String[] args) { 33 | Properties kafkaProps = new Properties(); 34 | kafkaProps.put("bootstrap.servers", "localhost:9092"); 35 | kafkaProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 36 | kafkaProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 37 | 38 | ProducerRecord record = new ProducerRecord<>("CustomerCountry", "Precision Products", "France"); 39 | 40 | try (KafkaProducer producer = new KafkaProducer<>(kafkaProps)) { 41 | producer.send(record); // Ignore the returned value, no way to know the message was sent successfully or not. 42 | } catch (Exception e) { 43 | // If the producer encountered errors before sending the message to Kafka. 44 | e.printStackTrace(); 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/send/SynchronouslySend.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.send; 17 | 18 | import java.util.Properties; 19 | 20 | import org.apache.kafka.clients.producer.KafkaProducer; 21 | import org.apache.kafka.clients.producer.ProducerRecord; 22 | 23 | /** 24 | * Send messages synchronously. 25 | * 26 | * @author Wuyi Chen 27 | * @date 06/03/2020 28 | * @version 1.0 29 | * @since 1.0 30 | */ 31 | public class SynchronouslySend { 32 | public static void main(String[] args) { 33 | Properties kafkaProps = new Properties(); 34 | kafkaProps.put("bootstrap.servers", "localhost:9092"); 35 | kafkaProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 36 | kafkaProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 37 | 38 | ProducerRecord record = new ProducerRecord<>("CustomerCountry", "Precision Products", "France"); 39 | 40 | try (KafkaProducer producer = new KafkaProducer<>(kafkaProps)) { 41 | producer.send(record).get(); // get() will wait for a reply from Kafka and will throw an exception if the record is not sent successfully to Kafka. 42 | } catch (Exception e) { 43 | // If the producer encountered errors before sending the message to Kafka. 44 | e.printStackTrace(); 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/serializer/avroserializer/Customer.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Autogenerated by Avro 3 | * 4 | * DO NOT EDIT DIRECTLY 5 | */ 6 | package com.kafkadefinitiveguide.producer.serializer.avroserializer; 7 | 8 | import org.apache.avro.generic.GenericArray; 9 | import org.apache.avro.specific.SpecificData; 10 | import org.apache.avro.util.Utf8; 11 | import org.apache.avro.message.BinaryMessageEncoder; 12 | import org.apache.avro.message.BinaryMessageDecoder; 13 | import org.apache.avro.message.SchemaStore; 14 | 15 | @org.apache.avro.specific.AvroGenerated 16 | public class Customer extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { 17 | private static final long serialVersionUID = -2681329921521656134L; 18 | public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Customer\",\"namespace\":\"com.kafkadefinitiveguide.producer.serializer.avroserializer\",\"fields\":[{\"name\":\"id\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"email\",\"type\":[\"string\",\"null\"],\"default\":\"null\"}]}"); 19 | public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } 20 | 21 | private static SpecificData MODEL$ = new SpecificData(); 22 | 23 | private static final BinaryMessageEncoder ENCODER = 24 | new BinaryMessageEncoder(MODEL$, SCHEMA$); 25 | 26 | private static final BinaryMessageDecoder DECODER = 27 | new BinaryMessageDecoder(MODEL$, SCHEMA$); 28 | 29 | /** 30 | * Return the BinaryMessageEncoder instance used by this class. 31 | * @return the message encoder used by this class 32 | */ 33 | public static BinaryMessageEncoder getEncoder() { 34 | return ENCODER; 35 | } 36 | 37 | /** 38 | * Return the BinaryMessageDecoder instance used by this class. 39 | * @return the message decoder used by this class 40 | */ 41 | public static BinaryMessageDecoder getDecoder() { 42 | return DECODER; 43 | } 44 | 45 | /** 46 | * Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}. 47 | * @param resolver a {@link SchemaStore} used to find schemas by fingerprint 48 | * @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore 49 | */ 50 | public static BinaryMessageDecoder createDecoder(SchemaStore resolver) { 51 | return new BinaryMessageDecoder(MODEL$, SCHEMA$, resolver); 52 | } 53 | 54 | /** 55 | * Serializes this Customer to a ByteBuffer. 56 | * @return a buffer holding the serialized data for this instance 57 | * @throws java.io.IOException if this instance could not be serialized 58 | */ 59 | public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException { 60 | return ENCODER.encode(this); 61 | } 62 | 63 | /** 64 | * Deserializes a Customer from a ByteBuffer. 65 | * @param b a byte buffer holding serialized data for an instance of this class 66 | * @return a Customer instance decoded from the given buffer 67 | * @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class 68 | */ 69 | public static Customer fromByteBuffer( 70 | java.nio.ByteBuffer b) throws java.io.IOException { 71 | return DECODER.decode(b); 72 | } 73 | 74 | private int id; 75 | private java.lang.CharSequence name; 76 | private java.lang.CharSequence email; 77 | 78 | /** 79 | * Default constructor. Note that this does not initialize fields 80 | * to their default values from the schema. If that is desired then 81 | * one should use newBuilder(). 82 | */ 83 | public Customer() {} 84 | 85 | /** 86 | * All-args constructor. 87 | * @param id The new value for id 88 | * @param name The new value for name 89 | * @param email The new value for email 90 | */ 91 | public Customer(java.lang.Integer id, java.lang.CharSequence name, java.lang.CharSequence email) { 92 | this.id = id; 93 | this.name = name; 94 | this.email = email; 95 | } 96 | 97 | public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; } 98 | public org.apache.avro.Schema getSchema() { return SCHEMA$; } 99 | // Used by DatumWriter. Applications should not call. 100 | public java.lang.Object get(int field$) { 101 | switch (field$) { 102 | case 0: return id; 103 | case 1: return name; 104 | case 2: return email; 105 | default: throw new org.apache.avro.AvroRuntimeException("Bad index"); 106 | } 107 | } 108 | 109 | // Used by DatumReader. Applications should not call. 110 | @SuppressWarnings(value="unchecked") 111 | public void put(int field$, java.lang.Object value$) { 112 | switch (field$) { 113 | case 0: id = (java.lang.Integer)value$; break; 114 | case 1: name = (java.lang.CharSequence)value$; break; 115 | case 2: email = (java.lang.CharSequence)value$; break; 116 | default: throw new org.apache.avro.AvroRuntimeException("Bad index"); 117 | } 118 | } 119 | 120 | /** 121 | * Gets the value of the 'id' field. 122 | * @return The value of the 'id' field. 123 | */ 124 | public int getId() { 125 | return id; 126 | } 127 | 128 | 129 | /** 130 | * Sets the value of the 'id' field. 131 | * @param value the value to set. 132 | */ 133 | public void setId(int value) { 134 | this.id = value; 135 | } 136 | 137 | /** 138 | * Gets the value of the 'name' field. 139 | * @return The value of the 'name' field. 140 | */ 141 | public java.lang.CharSequence getName() { 142 | return name; 143 | } 144 | 145 | 146 | /** 147 | * Sets the value of the 'name' field. 148 | * @param value the value to set. 149 | */ 150 | public void setName(java.lang.CharSequence value) { 151 | this.name = value; 152 | } 153 | 154 | /** 155 | * Gets the value of the 'email' field. 156 | * @return The value of the 'email' field. 157 | */ 158 | public java.lang.CharSequence getEmail() { 159 | return email; 160 | } 161 | 162 | 163 | /** 164 | * Sets the value of the 'email' field. 165 | * @param value the value to set. 166 | */ 167 | public void setEmail(java.lang.CharSequence value) { 168 | this.email = value; 169 | } 170 | 171 | /** 172 | * Creates a new Customer RecordBuilder. 173 | * @return A new Customer RecordBuilder 174 | */ 175 | public static com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder newBuilder() { 176 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(); 177 | } 178 | 179 | /** 180 | * Creates a new Customer RecordBuilder by copying an existing Builder. 181 | * @param other The existing builder to copy. 182 | * @return A new Customer RecordBuilder 183 | */ 184 | public static com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder newBuilder(com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder other) { 185 | if (other == null) { 186 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(); 187 | } else { 188 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(other); 189 | } 190 | } 191 | 192 | /** 193 | * Creates a new Customer RecordBuilder by copying an existing Customer instance. 194 | * @param other The existing instance to copy. 195 | * @return A new Customer RecordBuilder 196 | */ 197 | public static com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder newBuilder(com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer other) { 198 | if (other == null) { 199 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(); 200 | } else { 201 | return new com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder(other); 202 | } 203 | } 204 | 205 | /** 206 | * RecordBuilder for Customer instances. 207 | */ 208 | @org.apache.avro.specific.AvroGenerated 209 | public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase 210 | implements org.apache.avro.data.RecordBuilder { 211 | 212 | private int id; 213 | private java.lang.CharSequence name; 214 | private java.lang.CharSequence email; 215 | 216 | /** Creates a new Builder */ 217 | private Builder() { 218 | super(SCHEMA$); 219 | } 220 | 221 | /** 222 | * Creates a Builder by copying an existing Builder. 223 | * @param other The existing Builder to copy. 224 | */ 225 | private Builder(com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder other) { 226 | super(other); 227 | if (isValidValue(fields()[0], other.id)) { 228 | this.id = data().deepCopy(fields()[0].schema(), other.id); 229 | fieldSetFlags()[0] = other.fieldSetFlags()[0]; 230 | } 231 | if (isValidValue(fields()[1], other.name)) { 232 | this.name = data().deepCopy(fields()[1].schema(), other.name); 233 | fieldSetFlags()[1] = other.fieldSetFlags()[1]; 234 | } 235 | if (isValidValue(fields()[2], other.email)) { 236 | this.email = data().deepCopy(fields()[2].schema(), other.email); 237 | fieldSetFlags()[2] = other.fieldSetFlags()[2]; 238 | } 239 | } 240 | 241 | /** 242 | * Creates a Builder by copying an existing Customer instance 243 | * @param other The existing instance to copy. 244 | */ 245 | private Builder(com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer other) { 246 | super(SCHEMA$); 247 | if (isValidValue(fields()[0], other.id)) { 248 | this.id = data().deepCopy(fields()[0].schema(), other.id); 249 | fieldSetFlags()[0] = true; 250 | } 251 | if (isValidValue(fields()[1], other.name)) { 252 | this.name = data().deepCopy(fields()[1].schema(), other.name); 253 | fieldSetFlags()[1] = true; 254 | } 255 | if (isValidValue(fields()[2], other.email)) { 256 | this.email = data().deepCopy(fields()[2].schema(), other.email); 257 | fieldSetFlags()[2] = true; 258 | } 259 | } 260 | 261 | /** 262 | * Gets the value of the 'id' field. 263 | * @return The value. 264 | */ 265 | public int getId() { 266 | return id; 267 | } 268 | 269 | 270 | /** 271 | * Sets the value of the 'id' field. 272 | * @param value The value of 'id'. 273 | * @return This builder. 274 | */ 275 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder setId(int value) { 276 | validate(fields()[0], value); 277 | this.id = value; 278 | fieldSetFlags()[0] = true; 279 | return this; 280 | } 281 | 282 | /** 283 | * Checks whether the 'id' field has been set. 284 | * @return True if the 'id' field has been set, false otherwise. 285 | */ 286 | public boolean hasId() { 287 | return fieldSetFlags()[0]; 288 | } 289 | 290 | 291 | /** 292 | * Clears the value of the 'id' field. 293 | * @return This builder. 294 | */ 295 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder clearId() { 296 | fieldSetFlags()[0] = false; 297 | return this; 298 | } 299 | 300 | /** 301 | * Gets the value of the 'name' field. 302 | * @return The value. 303 | */ 304 | public java.lang.CharSequence getName() { 305 | return name; 306 | } 307 | 308 | 309 | /** 310 | * Sets the value of the 'name' field. 311 | * @param value The value of 'name'. 312 | * @return This builder. 313 | */ 314 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder setName(java.lang.CharSequence value) { 315 | validate(fields()[1], value); 316 | this.name = value; 317 | fieldSetFlags()[1] = true; 318 | return this; 319 | } 320 | 321 | /** 322 | * Checks whether the 'name' field has been set. 323 | * @return True if the 'name' field has been set, false otherwise. 324 | */ 325 | public boolean hasName() { 326 | return fieldSetFlags()[1]; 327 | } 328 | 329 | 330 | /** 331 | * Clears the value of the 'name' field. 332 | * @return This builder. 333 | */ 334 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder clearName() { 335 | name = null; 336 | fieldSetFlags()[1] = false; 337 | return this; 338 | } 339 | 340 | /** 341 | * Gets the value of the 'email' field. 342 | * @return The value. 343 | */ 344 | public java.lang.CharSequence getEmail() { 345 | return email; 346 | } 347 | 348 | 349 | /** 350 | * Sets the value of the 'email' field. 351 | * @param value The value of 'email'. 352 | * @return This builder. 353 | */ 354 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder setEmail(java.lang.CharSequence value) { 355 | validate(fields()[2], value); 356 | this.email = value; 357 | fieldSetFlags()[2] = true; 358 | return this; 359 | } 360 | 361 | /** 362 | * Checks whether the 'email' field has been set. 363 | * @return True if the 'email' field has been set, false otherwise. 364 | */ 365 | public boolean hasEmail() { 366 | return fieldSetFlags()[2]; 367 | } 368 | 369 | 370 | /** 371 | * Clears the value of the 'email' field. 372 | * @return This builder. 373 | */ 374 | public com.kafkadefinitiveguide.producer.serializer.avroserializer.Customer.Builder clearEmail() { 375 | email = null; 376 | fieldSetFlags()[2] = false; 377 | return this; 378 | } 379 | 380 | @Override 381 | @SuppressWarnings("unchecked") 382 | public Customer build() { 383 | try { 384 | Customer record = new Customer(); 385 | record.id = fieldSetFlags()[0] ? this.id : (java.lang.Integer) defaultValue(fields()[0]); 386 | record.name = fieldSetFlags()[1] ? this.name : (java.lang.CharSequence) defaultValue(fields()[1]); 387 | record.email = fieldSetFlags()[2] ? this.email : (java.lang.CharSequence) defaultValue(fields()[2]); 388 | return record; 389 | } catch (org.apache.avro.AvroMissingFieldException e) { 390 | throw e; 391 | } catch (java.lang.Exception e) { 392 | throw new org.apache.avro.AvroRuntimeException(e); 393 | } 394 | } 395 | } 396 | 397 | @SuppressWarnings("unchecked") 398 | private static final org.apache.avro.io.DatumWriter 399 | WRITER$ = (org.apache.avro.io.DatumWriter)MODEL$.createDatumWriter(SCHEMA$); 400 | 401 | @Override public void writeExternal(java.io.ObjectOutput out) 402 | throws java.io.IOException { 403 | WRITER$.write(this, SpecificData.getEncoder(out)); 404 | } 405 | 406 | @SuppressWarnings("unchecked") 407 | private static final org.apache.avro.io.DatumReader 408 | READER$ = (org.apache.avro.io.DatumReader)MODEL$.createDatumReader(SCHEMA$); 409 | 410 | @Override public void readExternal(java.io.ObjectInput in) 411 | throws java.io.IOException { 412 | READER$.read(this, SpecificData.getDecoder(in)); 413 | } 414 | 415 | @Override protected boolean hasCustomCoders() { return true; } 416 | 417 | @Override public void customEncode(org.apache.avro.io.Encoder out) 418 | throws java.io.IOException 419 | { 420 | out.writeInt(this.id); 421 | 422 | out.writeString(this.name); 423 | 424 | if (this.email == null) { 425 | out.writeIndex(1); 426 | out.writeNull(); 427 | } else { 428 | out.writeIndex(0); 429 | out.writeString(this.email); 430 | } 431 | 432 | } 433 | 434 | @Override public void customDecode(org.apache.avro.io.ResolvingDecoder in) 435 | throws java.io.IOException 436 | { 437 | org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff(); 438 | if (fieldOrder == null) { 439 | this.id = in.readInt(); 440 | 441 | this.name = in.readString(this.name instanceof Utf8 ? (Utf8)this.name : null); 442 | 443 | if (in.readIndex() != 0) { 444 | in.readNull(); 445 | this.email = null; 446 | } else { 447 | this.email = in.readString(this.email instanceof Utf8 ? (Utf8)this.email : null); 448 | } 449 | 450 | } else { 451 | for (int i = 0; i < 3; i++) { 452 | switch (fieldOrder[i].pos()) { 453 | case 0: 454 | this.id = in.readInt(); 455 | break; 456 | 457 | case 1: 458 | this.name = in.readString(this.name instanceof Utf8 ? (Utf8)this.name : null); 459 | break; 460 | 461 | case 2: 462 | if (in.readIndex() != 0) { 463 | in.readNull(); 464 | this.email = null; 465 | } else { 466 | this.email = in.readString(this.email instanceof Utf8 ? (Utf8)this.email : null); 467 | } 468 | break; 469 | 470 | default: 471 | throw new java.io.IOException("Corrupt ResolvingDecoder."); 472 | } 473 | } 474 | } 475 | } 476 | } 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/serializer/avroserializer/CustomerGenerator.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.serializer.avroserializer; 17 | 18 | public class CustomerGenerator { 19 | public static Customer getNext() { 20 | // TODO Auto-generated method stub 21 | return null; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/serializer/avroserializer/CustomerProducer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.serializer.avroserializer; 17 | 18 | import java.util.Properties; 19 | 20 | import org.apache.kafka.clients.producer.KafkaProducer; 21 | import org.apache.kafka.clients.producer.Producer; 22 | import org.apache.kafka.clients.producer.ProducerRecord; 23 | 24 | /** 25 | * Send Customer (message) by Avro serializer. 26 | * 27 | *

Customer is not Pojo object, it is Avro object generated by avro-tools.jar. 28 | * 29 | * @author Wuyi Chen 30 | * @date 06/03/2020 31 | * @version 1.0 32 | * @since 1.0 33 | */ 34 | public class CustomerProducer { 35 | public static void main(String[] args) { 36 | Properties props = new Properties(); 37 | props.put("bootstrap.servers", "localhost:9092"); 38 | props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 39 | props.put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer"); 40 | props.put("schema.registry.url", "localhost:8081"); // URL points to the schema registry. 41 | 42 | String topic = "customerContacts"; 43 | 44 | Producer producer = new KafkaProducer(props); 45 | 46 | // We keep producing new events until someone ctrl-c 47 | while (true) { 48 | Customer customer = CustomerGenerator.getNext(); 49 | System.out.println("Generated customer " + customer.toString()); 50 | ProducerRecord record = new ProducerRecord(topic, customer.getName().toString(), customer); 51 | producer.send(record); 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/serializer/avroserializer/GenericAvroRecordProducer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.serializer.avroserializer; 17 | 18 | import java.util.Properties; 19 | 20 | import org.apache.avro.Schema; 21 | import org.apache.avro.generic.GenericData; 22 | import org.apache.avro.generic.GenericRecord; 23 | import org.apache.kafka.clients.producer.KafkaProducer; 24 | import org.apache.kafka.clients.producer.Producer; 25 | import org.apache.kafka.clients.producer.ProducerRecord; 26 | 27 | /** 28 | * Send GenericRecord (message) by Avro serializer. 29 | * 30 | *

Customer is not Pojo object, it is Avro object generated by avro-tools.jar. 31 | * 32 | * @author Wuyi Chen 33 | * @date 06/03/2020 34 | * @version 1.0 35 | * @since 1.0 36 | */ 37 | public class GenericAvroRecordProducer { 38 | public static void main(String[] args) { 39 | Properties props = new Properties(); 40 | props.put("bootstrap.servers", "localhost:9092"); 41 | props.put("key.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer"); 42 | props.put("value.serializer", "io.confluent.kafka.serializers.KafkaAvroSerializer"); 43 | props.put("schema.registry.url", "localhost:8081"); // URL points to the schema registry. 44 | 45 | String schemaString = "{\"namespace\": \"customerManagement.avro\"," + // Provide the Avro schema 46 | "\"type\": \"record\", " + 47 | "\"name\": \"Customer\"," + 48 | "\"fields\": [" + 49 | "{\"name\": \"id\", \"type\": \"int\"}," + 50 | "{\"name\": \"name\", \"type\": \"string\"}," + 51 | "{\"name\": \"email\", \"type\": " + "[\"string\",\"null\"], " + 52 | "\"default\":\"null\" }" + 53 | "]}"; 54 | 55 | Producer producer = new KafkaProducer(props); 56 | 57 | Schema.Parser parser = new Schema.Parser(); 58 | Schema schema = parser.parse(schemaString); 59 | 60 | int customers = 10; 61 | for (int nCustomers = 0; nCustomers < customers; nCustomers++) { 62 | String name = "exampleCustomer" + nCustomers; 63 | String email = "example " + nCustomers + "@example.com"; 64 | 65 | GenericRecord customer = new GenericData.Record(schema); 66 | customer.put("id", nCustomers); 67 | customer.put("name", name); 68 | customer.put("email", email); 69 | 70 | ProducerRecord data = new ProducerRecord("customerContacts", name, customer); 71 | producer.send(data); 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/serializer/customserializer/Customer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.serializer.customserializer; 17 | 18 | /** 19 | * Customer Pojo. 20 | * 21 | * @author Wuyi Chen 22 | * @date 06/03/2020 23 | * @version 1.0 24 | * @since 1.0 25 | */ 26 | public class Customer { 27 | private int customerId; 28 | private String customerName; 29 | 30 | public Customer(int Id, String name) { 31 | this.customerId = Id; 32 | this.customerName = name; 33 | } 34 | 35 | public int getId() { 36 | return customerId; 37 | } 38 | 39 | public String getName() { 40 | return customerName; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/src/java/com/kafkadefinitiveguide/producer/serializer/customserializer/CustomerSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.producer.serializer.customserializer; 17 | 18 | import org.apache.kafka.common.errors.SerializationException; 19 | import org.apache.kafka.common.serialization.Serializer; 20 | 21 | import java.nio.ByteBuffer; 22 | import java.util.Map; 23 | 24 | /** 25 | * The customer serializer for serializing Customer Pojo. 26 | * 27 | * @author Wuyi Chen 28 | * @date 06/03/2020 29 | * @version 1.0 30 | * @since 1.0 31 | */ 32 | public class CustomerSerializer implements Serializer { 33 | @Override 34 | public void configure(Map configs, boolean isKey) { 35 | // nothing to configure 36 | } 37 | 38 | /* (non-Javadoc) 39 | * @see org.apache.kafka.common.serialization.Serializer#serialize(java.lang.String, java.lang.Object) 40 | * 41 | * We are serializing Customer as: 42 | * - 4 byte int representing customerId 43 | * - 4 byte int representing length of customerName in UTF-8 bytes (0 if name is Null) 44 | * - N bytes representing customerName in UTF-8 45 | */ 46 | @Override 47 | public byte[] serialize(String topic, Customer data) { 48 | try { 49 | byte[] serializedName; 50 | int stringSize; 51 | if (data == null) 52 | return null; 53 | else { 54 | if (data.getName() != null) { 55 | serializedName = data.getName().getBytes("UTF-8"); 56 | stringSize = serializedName.length; 57 | } else { 58 | serializedName = new byte[0]; 59 | stringSize = 0; 60 | } 61 | } 62 | 63 | ByteBuffer buffer = ByteBuffer.allocate(4 + 4 + stringSize); 64 | buffer.putInt(data.getId()); 65 | buffer.putInt(stringSize); 66 | buffer.put(serializedName); 67 | 68 | return buffer.array(); 69 | } catch (Exception e) { 70 | throw new SerializationException( 71 | "Error when serializing Customer to byte[] " + e); 72 | } 73 | } 74 | 75 | @Override 76 | public void close() { 77 | // nothing to close 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter3/test/unit/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wuyichen24/kafka-definitive-guide/3375555e46a6ba7cb78b05e40ba64d7af85a0faa/kafka-definitive-guide-chapter3/test/unit/.keep -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/build.gradle: -------------------------------------------------------------------------------- 1 | repositories { 2 | maven { 3 | url 'https://packages.confluent.io/maven/' 4 | } 5 | } 6 | 7 | dependencies { 8 | compile group: 'org.apache.avro', name: 'avro', version: '1.9.2' 9 | compile group: 'io.confluent', name: 'kafka-avro-serializer', version: '5.5.0' 10 | compile group: 'org.json', name: 'json', version: '20200518' 11 | } -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/assignpartitions/AssignParitionsExample.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.assignpartitions; 17 | 18 | import java.time.Duration; 19 | import java.util.ArrayList; 20 | import java.util.List; 21 | import java.util.Properties; 22 | 23 | import org.apache.kafka.clients.consumer.ConsumerRecord; 24 | import org.apache.kafka.clients.consumer.ConsumerRecords; 25 | import org.apache.kafka.clients.consumer.KafkaConsumer; 26 | import org.apache.kafka.common.PartitionInfo; 27 | import org.apache.kafka.common.TopicPartition; 28 | 29 | /** 30 | * Example of assigning the consumer to partitions instead of subscribing a topic. 31 | * 32 | * @author Wuyi Chen 33 | * @date 06/05/2020 34 | * @version 1.0 35 | * @since 1.0 36 | */ 37 | public class AssignParitionsExample { 38 | public static void main(String[] args) { 39 | Properties props = new Properties(); 40 | props.put("bootstrap.servers", "localhost:9092"); 41 | props.put("group.id", "CountryCounter"); // Specifies the consumer group the KafkaConsumer instance belongs to. 42 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 43 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 44 | 45 | KafkaConsumer consumer = new KafkaConsumer<>(props); 46 | 47 | List partitionInfos = null; 48 | partitionInfos = consumer.partitionsFor("customerCountries"); // Get all the available partitions for that topic. 49 | 50 | List partitions = new ArrayList<>(); 51 | if (partitionInfos != null) { 52 | for (PartitionInfo partition : partitionInfos) { 53 | partitions.add(new TopicPartition(partition.topic(), partition.partition())); 54 | } 55 | consumer.assign(partitions); // Assign the consumer to specific partitions instead of scribing a topic. 56 | 57 | while (true) { 58 | ConsumerRecords records = consumer.poll(Duration.ofMillis(1000)); 59 | 60 | for (ConsumerRecord record: records) { 61 | System.out.printf("topic = %s, partition = %s, offset = %d, customer = %s, country = %s%n", 62 | record.topic(), record.partition(), record.offset(), record.key(), record.value()); 63 | } 64 | consumer.commitSync(); 65 | } 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/commit/AsynchronousCommit.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.commit; 17 | 18 | import java.time.Duration; 19 | import java.util.Collections; 20 | import java.util.Properties; 21 | 22 | import org.apache.kafka.clients.consumer.ConsumerRecord; 23 | import org.apache.kafka.clients.consumer.ConsumerRecords; 24 | import org.apache.kafka.clients.consumer.KafkaConsumer; 25 | 26 | /** 27 | * Asynchronously commit the offset. 28 | * 29 | * @author Wuyi Chen 30 | * @date 06/05/2020 31 | * @version 1.0 32 | * @since 1.0 33 | */ 34 | public class AsynchronousCommit { 35 | public static void main(String[] args) { 36 | Properties props = new Properties(); 37 | props.put("bootstrap.servers", "localhost:9092"); 38 | props.put("group.id", "CountryCounter"); 39 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 40 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 41 | props.put("enable.auto.commit", "false"); // Disable automatic commit 42 | 43 | KafkaConsumer consumer = new KafkaConsumer<>(props); 44 | 45 | consumer.subscribe(Collections.singletonList("customerCountries")); 46 | 47 | try { 48 | while (true) { 49 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); 50 | for (ConsumerRecord record : records) { 51 | System.out.printf("topic = %s, partition = %s, offset = %d, customer = %s, country = %s%n", 52 | record.topic(), record.partition(), record.offset(), 53 | record.key(), record.value()); 54 | } 55 | consumer.commitAsync(); // Commit the last offset and carry on 56 | } 57 | } finally { 58 | consumer.close(); 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/commit/AsynchronousCommitWithCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.commit; 17 | 18 | import java.time.Duration; 19 | import java.util.Collections; 20 | import java.util.Map; 21 | import java.util.Properties; 22 | 23 | import org.apache.kafka.clients.consumer.ConsumerRecord; 24 | import org.apache.kafka.clients.consumer.ConsumerRecords; 25 | import org.apache.kafka.clients.consumer.KafkaConsumer; 26 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 27 | import org.apache.kafka.clients.consumer.OffsetCommitCallback; 28 | import org.apache.kafka.common.TopicPartition; 29 | import org.slf4j.Logger; 30 | import org.slf4j.LoggerFactory; 31 | 32 | /** 33 | * Asynchronously commit the offset with the callback function for handling the response. 34 | * 35 | * @author Wuyi Chen 36 | * @date 06/05/2020 37 | * @version 1.0 38 | * @since 1.0 39 | */ 40 | public class AsynchronousCommitWithCallback { 41 | private static Logger logger = LoggerFactory.getLogger(AsynchronousCommitWithCallback.class); 42 | 43 | public static void main(String[] args) { 44 | Properties props = new Properties(); 45 | props.put("bootstrap.servers", "localhost:9092"); 46 | props.put("group.id", "CountryCounter"); 47 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 48 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 49 | props.put("enable.auto.commit", "false"); // Disable automatic commit 50 | 51 | KafkaConsumer consumer = new KafkaConsumer<>(props); 52 | 53 | consumer.subscribe(Collections.singletonList("customerCountries")); 54 | 55 | try { 56 | while (true) { 57 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); 58 | for (ConsumerRecord record : records) { 59 | System.out.printf("topic = %s, partition = %s, offset = %d, customer = %s, country = %s%n", 60 | record.topic(), record.partition(), record.offset(), record.key(), record.value()); 61 | } 62 | consumer.commitAsync(new OffsetCommitCallback() { 63 | public void onComplete(Map offsets, Exception e) { 64 | if (e != null) 65 | logger.error("Commit failed for offsets {}", offsets, e); // If the commit fails, the failure and the offsets will be logged. 66 | } 67 | }); 68 | } 69 | } finally { 70 | consumer.close(); 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/commit/CombineSynchronousAndAsynchronousCommit.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.commit; 17 | 18 | import java.time.Duration; 19 | import java.util.Collections; 20 | import java.util.Properties; 21 | 22 | import org.apache.kafka.clients.consumer.ConsumerRecord; 23 | import org.apache.kafka.clients.consumer.ConsumerRecords; 24 | import org.apache.kafka.clients.consumer.KafkaConsumer; 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | 28 | /** 29 | * Combine synchronous commit with asynchronous commit. 30 | * 31 | * @author Wuyi Chen 32 | * @date 06/05/2020 33 | * @version 1.0 34 | * @since 1.0 35 | */ 36 | public class CombineSynchronousAndAsynchronousCommit { 37 | private static Logger logger = LoggerFactory.getLogger(CombineSynchronousAndAsynchronousCommit.class); 38 | 39 | public static void main(String[] args) { 40 | Properties props = new Properties(); 41 | props.put("bootstrap.servers", "localhost:9092"); 42 | props.put("group.id", "CountryCounter"); 43 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 44 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 45 | props.put("enable.auto.commit", "false"); // Disable automatic commit 46 | 47 | KafkaConsumer consumer = new KafkaConsumer<>(props); 48 | 49 | consumer.subscribe(Collections.singletonList("customerCountries")); 50 | 51 | try { 52 | while (true) { 53 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); 54 | for (ConsumerRecord record : records) { 55 | System.out.printf("topic = %s, partition = %s, offset = %d, customer = %s, country = %s%n", 56 | record.topic(), record.partition(), record.offset(), record.key(), record.value()); 57 | } 58 | consumer.commitAsync(); // Use commitAsync() first because of it is faster 59 | } 60 | } catch (Exception e) { 61 | logger.error("Unexpected error", e); 62 | } finally { 63 | try { 64 | consumer.commitSync(); // Use commitSync() as retry 65 | } finally { 66 | consumer.close(); 67 | } 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/commit/SpecifiedOffsetCommit.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.commit; 17 | 18 | import java.time.Duration; 19 | import java.util.Collections; 20 | import java.util.HashMap; 21 | import java.util.Map; 22 | import java.util.Properties; 23 | 24 | import org.apache.kafka.clients.consumer.ConsumerRecord; 25 | import org.apache.kafka.clients.consumer.ConsumerRecords; 26 | import org.apache.kafka.clients.consumer.KafkaConsumer; 27 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 28 | import org.apache.kafka.common.TopicPartition; 29 | 30 | /** 31 | * Commit a specific offset. 32 | * 33 | * @author Wuyi Chen 34 | * @date 06/05/2020 35 | * @version 1.0 36 | * @since 1.0 37 | */ 38 | public class SpecifiedOffsetCommit { 39 | public static void main(String[] args) { 40 | Properties props = new Properties(); 41 | props.put("bootstrap.servers", "localhost:9092"); 42 | props.put("group.id", "CountryCounter"); 43 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 44 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 45 | props.put("enable.auto.commit", "false"); // Disable automatic commit 46 | 47 | KafkaConsumer consumer = new KafkaConsumer<>(props); 48 | 49 | consumer.subscribe(Collections.singletonList("customerCountries")); 50 | 51 | try { 52 | Map currentOffsets = new HashMap<>(); // Use this map to manually track offsets 53 | int count = 0; 54 | 55 | while (true) { 56 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); 57 | for (ConsumerRecord record : records) { 58 | System.out.printf("topic = %s, partition = %s, offset = %d, customer = %s, country = %s%n", 59 | record.topic(), record.partition(), record.offset(), record.key(), record.value()); 60 | currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset()+1, "no metadata")); // Update the offsets map with the offset of the next message we expect to process. 61 | if (count % 1000 == 0) { 62 | consumer.commitAsync(currentOffsets, null); // Commit current offsets every 1,000 records 63 | } 64 | count++; 65 | } 66 | } 67 | } finally { 68 | consumer.close(); 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/commit/SynchronousCommit.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.commit; 17 | 18 | import java.time.Duration; 19 | import java.util.Collections; 20 | import java.util.Properties; 21 | 22 | import org.apache.kafka.clients.consumer.CommitFailedException; 23 | import org.apache.kafka.clients.consumer.ConsumerRecord; 24 | import org.apache.kafka.clients.consumer.ConsumerRecords; 25 | import org.apache.kafka.clients.consumer.KafkaConsumer; 26 | import org.slf4j.Logger; 27 | import org.slf4j.LoggerFactory; 28 | 29 | /** 30 | * Synchronously commit the offset. 31 | * 32 | * @author Wuyi Chen 33 | * @date 06/05/2020 34 | * @version 1.0 35 | * @since 1.0 36 | */ 37 | public class SynchronousCommit { 38 | private static Logger logger = LoggerFactory.getLogger(SynchronousCommit.class); 39 | 40 | public static void main(String[] args) { 41 | Properties props = new Properties(); 42 | props.put("bootstrap.servers", "localhost:9092"); 43 | props.put("group.id", "CountryCounter"); 44 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 45 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 46 | props.put("enable.auto.commit", "false"); // Disable automatic commit 47 | 48 | KafkaConsumer consumer = new KafkaConsumer<>(props); 49 | 50 | consumer.subscribe(Collections.singletonList("customerCountries")); 51 | 52 | try { 53 | while (true) { 54 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); 55 | for (ConsumerRecord record : records) { 56 | System.out.printf("topic = %s, partition = %d, offset = %d, customer = %s, country = %s%n", 57 | record.topic(), record.partition(), record.offset(), record.key(), record.value()); 58 | } 59 | try { 60 | consumer.commitSync(); // Commit the last offset in the batch 61 | } catch (CommitFailedException e) { 62 | logger.error("commit failed", e); 63 | } 64 | } 65 | } finally { 66 | consumer.close(); 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/deserializer/avrodeserializer/Customer.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Autogenerated by Avro 3 | * 4 | * DO NOT EDIT DIRECTLY 5 | */ 6 | package com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer; 7 | 8 | import org.apache.avro.generic.GenericArray; 9 | import org.apache.avro.specific.SpecificData; 10 | import org.apache.avro.util.Utf8; 11 | import org.apache.avro.message.BinaryMessageEncoder; 12 | import org.apache.avro.message.BinaryMessageDecoder; 13 | import org.apache.avro.message.SchemaStore; 14 | 15 | @org.apache.avro.specific.AvroGenerated 16 | public class Customer extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord { 17 | private static final long serialVersionUID = -2681329921521656134L; 18 | public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"Customer\",\"namespace\":\"com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer\",\"fields\":[{\"name\":\"id\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"email\",\"type\":[\"string\",\"null\"],\"default\":\"null\"}]}"); 19 | public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; } 20 | 21 | private static SpecificData MODEL$ = new SpecificData(); 22 | 23 | private static final BinaryMessageEncoder ENCODER = 24 | new BinaryMessageEncoder(MODEL$, SCHEMA$); 25 | 26 | private static final BinaryMessageDecoder DECODER = 27 | new BinaryMessageDecoder(MODEL$, SCHEMA$); 28 | 29 | /** 30 | * Return the BinaryMessageEncoder instance used by this class. 31 | * @return the message encoder used by this class 32 | */ 33 | public static BinaryMessageEncoder getEncoder() { 34 | return ENCODER; 35 | } 36 | 37 | /** 38 | * Return the BinaryMessageDecoder instance used by this class. 39 | * @return the message decoder used by this class 40 | */ 41 | public static BinaryMessageDecoder getDecoder() { 42 | return DECODER; 43 | } 44 | 45 | /** 46 | * Create a new BinaryMessageDecoder instance for this class that uses the specified {@link SchemaStore}. 47 | * @param resolver a {@link SchemaStore} used to find schemas by fingerprint 48 | * @return a BinaryMessageDecoder instance for this class backed by the given SchemaStore 49 | */ 50 | public static BinaryMessageDecoder createDecoder(SchemaStore resolver) { 51 | return new BinaryMessageDecoder(MODEL$, SCHEMA$, resolver); 52 | } 53 | 54 | /** 55 | * Serializes this Customer to a ByteBuffer. 56 | * @return a buffer holding the serialized data for this instance 57 | * @throws java.io.IOException if this instance could not be serialized 58 | */ 59 | public java.nio.ByteBuffer toByteBuffer() throws java.io.IOException { 60 | return ENCODER.encode(this); 61 | } 62 | 63 | /** 64 | * Deserializes a Customer from a ByteBuffer. 65 | * @param b a byte buffer holding serialized data for an instance of this class 66 | * @return a Customer instance decoded from the given buffer 67 | * @throws java.io.IOException if the given bytes could not be deserialized into an instance of this class 68 | */ 69 | public static Customer fromByteBuffer( 70 | java.nio.ByteBuffer b) throws java.io.IOException { 71 | return DECODER.decode(b); 72 | } 73 | 74 | private int id; 75 | private java.lang.CharSequence name; 76 | private java.lang.CharSequence email; 77 | 78 | /** 79 | * Default constructor. Note that this does not initialize fields 80 | * to their default values from the schema. If that is desired then 81 | * one should use newBuilder(). 82 | */ 83 | public Customer() {} 84 | 85 | /** 86 | * All-args constructor. 87 | * @param id The new value for id 88 | * @param name The new value for name 89 | * @param email The new value for email 90 | */ 91 | public Customer(java.lang.Integer id, java.lang.CharSequence name, java.lang.CharSequence email) { 92 | this.id = id; 93 | this.name = name; 94 | this.email = email; 95 | } 96 | 97 | public org.apache.avro.specific.SpecificData getSpecificData() { return MODEL$; } 98 | public org.apache.avro.Schema getSchema() { return SCHEMA$; } 99 | // Used by DatumWriter. Applications should not call. 100 | public java.lang.Object get(int field$) { 101 | switch (field$) { 102 | case 0: return id; 103 | case 1: return name; 104 | case 2: return email; 105 | default: throw new org.apache.avro.AvroRuntimeException("Bad index"); 106 | } 107 | } 108 | 109 | // Used by DatumReader. Applications should not call. 110 | @SuppressWarnings(value="unchecked") 111 | public void put(int field$, java.lang.Object value$) { 112 | switch (field$) { 113 | case 0: id = (java.lang.Integer)value$; break; 114 | case 1: name = (java.lang.CharSequence)value$; break; 115 | case 2: email = (java.lang.CharSequence)value$; break; 116 | default: throw new org.apache.avro.AvroRuntimeException("Bad index"); 117 | } 118 | } 119 | 120 | /** 121 | * Gets the value of the 'id' field. 122 | * @return The value of the 'id' field. 123 | */ 124 | public int getId() { 125 | return id; 126 | } 127 | 128 | 129 | /** 130 | * Sets the value of the 'id' field. 131 | * @param value the value to set. 132 | */ 133 | public void setId(int value) { 134 | this.id = value; 135 | } 136 | 137 | /** 138 | * Gets the value of the 'name' field. 139 | * @return The value of the 'name' field. 140 | */ 141 | public java.lang.CharSequence getName() { 142 | return name; 143 | } 144 | 145 | 146 | /** 147 | * Sets the value of the 'name' field. 148 | * @param value the value to set. 149 | */ 150 | public void setName(java.lang.CharSequence value) { 151 | this.name = value; 152 | } 153 | 154 | /** 155 | * Gets the value of the 'email' field. 156 | * @return The value of the 'email' field. 157 | */ 158 | public java.lang.CharSequence getEmail() { 159 | return email; 160 | } 161 | 162 | 163 | /** 164 | * Sets the value of the 'email' field. 165 | * @param value the value to set. 166 | */ 167 | public void setEmail(java.lang.CharSequence value) { 168 | this.email = value; 169 | } 170 | 171 | /** 172 | * Creates a new Customer RecordBuilder. 173 | * @return A new Customer RecordBuilder 174 | */ 175 | public static com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder newBuilder() { 176 | return new com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder(); 177 | } 178 | 179 | /** 180 | * Creates a new Customer RecordBuilder by copying an existing Builder. 181 | * @param other The existing builder to copy. 182 | * @return A new Customer RecordBuilder 183 | */ 184 | public static com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder newBuilder(com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder other) { 185 | if (other == null) { 186 | return new com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder(); 187 | } else { 188 | return new com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder(other); 189 | } 190 | } 191 | 192 | /** 193 | * Creates a new Customer RecordBuilder by copying an existing Customer instance. 194 | * @param other The existing instance to copy. 195 | * @return A new Customer RecordBuilder 196 | */ 197 | public static com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder newBuilder(com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer other) { 198 | if (other == null) { 199 | return new com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder(); 200 | } else { 201 | return new com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder(other); 202 | } 203 | } 204 | 205 | /** 206 | * RecordBuilder for Customer instances. 207 | */ 208 | @org.apache.avro.specific.AvroGenerated 209 | public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase 210 | implements org.apache.avro.data.RecordBuilder { 211 | 212 | private int id; 213 | private java.lang.CharSequence name; 214 | private java.lang.CharSequence email; 215 | 216 | /** Creates a new Builder */ 217 | private Builder() { 218 | super(SCHEMA$); 219 | } 220 | 221 | /** 222 | * Creates a Builder by copying an existing Builder. 223 | * @param other The existing Builder to copy. 224 | */ 225 | private Builder(com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder other) { 226 | super(other); 227 | if (isValidValue(fields()[0], other.id)) { 228 | this.id = data().deepCopy(fields()[0].schema(), other.id); 229 | fieldSetFlags()[0] = other.fieldSetFlags()[0]; 230 | } 231 | if (isValidValue(fields()[1], other.name)) { 232 | this.name = data().deepCopy(fields()[1].schema(), other.name); 233 | fieldSetFlags()[1] = other.fieldSetFlags()[1]; 234 | } 235 | if (isValidValue(fields()[2], other.email)) { 236 | this.email = data().deepCopy(fields()[2].schema(), other.email); 237 | fieldSetFlags()[2] = other.fieldSetFlags()[2]; 238 | } 239 | } 240 | 241 | /** 242 | * Creates a Builder by copying an existing Customer instance 243 | * @param other The existing instance to copy. 244 | */ 245 | private Builder(com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer other) { 246 | super(SCHEMA$); 247 | if (isValidValue(fields()[0], other.id)) { 248 | this.id = data().deepCopy(fields()[0].schema(), other.id); 249 | fieldSetFlags()[0] = true; 250 | } 251 | if (isValidValue(fields()[1], other.name)) { 252 | this.name = data().deepCopy(fields()[1].schema(), other.name); 253 | fieldSetFlags()[1] = true; 254 | } 255 | if (isValidValue(fields()[2], other.email)) { 256 | this.email = data().deepCopy(fields()[2].schema(), other.email); 257 | fieldSetFlags()[2] = true; 258 | } 259 | } 260 | 261 | /** 262 | * Gets the value of the 'id' field. 263 | * @return The value. 264 | */ 265 | public int getId() { 266 | return id; 267 | } 268 | 269 | 270 | /** 271 | * Sets the value of the 'id' field. 272 | * @param value The value of 'id'. 273 | * @return This builder. 274 | */ 275 | public com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder setId(int value) { 276 | validate(fields()[0], value); 277 | this.id = value; 278 | fieldSetFlags()[0] = true; 279 | return this; 280 | } 281 | 282 | /** 283 | * Checks whether the 'id' field has been set. 284 | * @return True if the 'id' field has been set, false otherwise. 285 | */ 286 | public boolean hasId() { 287 | return fieldSetFlags()[0]; 288 | } 289 | 290 | 291 | /** 292 | * Clears the value of the 'id' field. 293 | * @return This builder. 294 | */ 295 | public com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder clearId() { 296 | fieldSetFlags()[0] = false; 297 | return this; 298 | } 299 | 300 | /** 301 | * Gets the value of the 'name' field. 302 | * @return The value. 303 | */ 304 | public java.lang.CharSequence getName() { 305 | return name; 306 | } 307 | 308 | 309 | /** 310 | * Sets the value of the 'name' field. 311 | * @param value The value of 'name'. 312 | * @return This builder. 313 | */ 314 | public com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder setName(java.lang.CharSequence value) { 315 | validate(fields()[1], value); 316 | this.name = value; 317 | fieldSetFlags()[1] = true; 318 | return this; 319 | } 320 | 321 | /** 322 | * Checks whether the 'name' field has been set. 323 | * @return True if the 'name' field has been set, false otherwise. 324 | */ 325 | public boolean hasName() { 326 | return fieldSetFlags()[1]; 327 | } 328 | 329 | 330 | /** 331 | * Clears the value of the 'name' field. 332 | * @return This builder. 333 | */ 334 | public com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder clearName() { 335 | name = null; 336 | fieldSetFlags()[1] = false; 337 | return this; 338 | } 339 | 340 | /** 341 | * Gets the value of the 'email' field. 342 | * @return The value. 343 | */ 344 | public java.lang.CharSequence getEmail() { 345 | return email; 346 | } 347 | 348 | 349 | /** 350 | * Sets the value of the 'email' field. 351 | * @param value The value of 'email'. 352 | * @return This builder. 353 | */ 354 | public com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder setEmail(java.lang.CharSequence value) { 355 | validate(fields()[2], value); 356 | this.email = value; 357 | fieldSetFlags()[2] = true; 358 | return this; 359 | } 360 | 361 | /** 362 | * Checks whether the 'email' field has been set. 363 | * @return True if the 'email' field has been set, false otherwise. 364 | */ 365 | public boolean hasEmail() { 366 | return fieldSetFlags()[2]; 367 | } 368 | 369 | 370 | /** 371 | * Clears the value of the 'email' field. 372 | * @return This builder. 373 | */ 374 | public com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer.Customer.Builder clearEmail() { 375 | email = null; 376 | fieldSetFlags()[2] = false; 377 | return this; 378 | } 379 | 380 | @Override 381 | @SuppressWarnings("unchecked") 382 | public Customer build() { 383 | try { 384 | Customer record = new Customer(); 385 | record.id = fieldSetFlags()[0] ? this.id : (java.lang.Integer) defaultValue(fields()[0]); 386 | record.name = fieldSetFlags()[1] ? this.name : (java.lang.CharSequence) defaultValue(fields()[1]); 387 | record.email = fieldSetFlags()[2] ? this.email : (java.lang.CharSequence) defaultValue(fields()[2]); 388 | return record; 389 | } catch (org.apache.avro.AvroMissingFieldException e) { 390 | throw e; 391 | } catch (java.lang.Exception e) { 392 | throw new org.apache.avro.AvroRuntimeException(e); 393 | } 394 | } 395 | } 396 | 397 | @SuppressWarnings("unchecked") 398 | private static final org.apache.avro.io.DatumWriter 399 | WRITER$ = (org.apache.avro.io.DatumWriter)MODEL$.createDatumWriter(SCHEMA$); 400 | 401 | @Override public void writeExternal(java.io.ObjectOutput out) 402 | throws java.io.IOException { 403 | WRITER$.write(this, SpecificData.getEncoder(out)); 404 | } 405 | 406 | @SuppressWarnings("unchecked") 407 | private static final org.apache.avro.io.DatumReader 408 | READER$ = (org.apache.avro.io.DatumReader)MODEL$.createDatumReader(SCHEMA$); 409 | 410 | @Override public void readExternal(java.io.ObjectInput in) 411 | throws java.io.IOException { 412 | READER$.read(this, SpecificData.getDecoder(in)); 413 | } 414 | 415 | @Override protected boolean hasCustomCoders() { return true; } 416 | 417 | @Override public void customEncode(org.apache.avro.io.Encoder out) 418 | throws java.io.IOException 419 | { 420 | out.writeInt(this.id); 421 | 422 | out.writeString(this.name); 423 | 424 | if (this.email == null) { 425 | out.writeIndex(1); 426 | out.writeNull(); 427 | } else { 428 | out.writeIndex(0); 429 | out.writeString(this.email); 430 | } 431 | 432 | } 433 | 434 | @Override public void customDecode(org.apache.avro.io.ResolvingDecoder in) 435 | throws java.io.IOException 436 | { 437 | org.apache.avro.Schema.Field[] fieldOrder = in.readFieldOrderIfDiff(); 438 | if (fieldOrder == null) { 439 | this.id = in.readInt(); 440 | 441 | this.name = in.readString(this.name instanceof Utf8 ? (Utf8)this.name : null); 442 | 443 | if (in.readIndex() != 0) { 444 | in.readNull(); 445 | this.email = null; 446 | } else { 447 | this.email = in.readString(this.email instanceof Utf8 ? (Utf8)this.email : null); 448 | } 449 | 450 | } else { 451 | for (int i = 0; i < 3; i++) { 452 | switch (fieldOrder[i].pos()) { 453 | case 0: 454 | this.id = in.readInt(); 455 | break; 456 | 457 | case 1: 458 | this.name = in.readString(this.name instanceof Utf8 ? (Utf8)this.name : null); 459 | break; 460 | 461 | case 2: 462 | if (in.readIndex() != 0) { 463 | in.readNull(); 464 | this.email = null; 465 | } else { 466 | this.email = in.readString(this.email instanceof Utf8 ? (Utf8)this.email : null); 467 | } 468 | break; 469 | 470 | default: 471 | throw new java.io.IOException("Corrupt ResolvingDecoder."); 472 | } 473 | } 474 | } 475 | } 476 | } 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/deserializer/avrodeserializer/CustomerConsumer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.deserializer.avrodeserializer; 17 | 18 | import java.time.Duration; 19 | import java.util.Collections; 20 | import java.util.Properties; 21 | 22 | import org.apache.kafka.clients.consumer.ConsumerRecord; 23 | import org.apache.kafka.clients.consumer.ConsumerRecords; 24 | import org.apache.kafka.clients.consumer.KafkaConsumer; 25 | 26 | /** 27 | * Receive Customer (message) by Avro deserializer. 28 | * 29 | *

Customer is not Pojo object, it is Avro object generated by avro-tools.jar. 30 | * 31 | * @author Wuyi Chen 32 | * @date 06/06/2020 33 | * @version 1.0 34 | * @since 1.0 35 | */ 36 | public class CustomerConsumer { 37 | public static void main(String[] args) { 38 | Properties props = new Properties(); 39 | props.put("bootstrap.servers", "localhost:9092"); 40 | props.put("group.id", "CountryCounter"); 41 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 42 | props.put("value.deserializer", "io.confluent.kafka.serializers.KafkaAvroDeserializer"); 43 | props.put("specific.avro.reader", "true"); 44 | props.put("schema.registry.url", "localhost:8081"); 45 | 46 | String topic = "customerContacts"; 47 | 48 | KafkaConsumer consumer = new KafkaConsumer<>(props); 49 | consumer.subscribe(Collections.singletonList(topic)); 50 | 51 | System.out.println("Reading topic:" + topic); 52 | 53 | while (true) { 54 | ConsumerRecords records = consumer.poll(Duration.ofMillis(1000)); 55 | 56 | for (ConsumerRecord record: records) { 57 | System.out.println("Current customer name is: " + record.value().getName()); 58 | } 59 | consumer.commitSync(); 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/deserializer/customdeserializer/Customer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.deserializer.customdeserializer; 17 | 18 | /** 19 | * Customer Pojo. 20 | * 21 | * @author Wuyi Chen 22 | * @date 06/03/2020 23 | * @version 1.0 24 | * @since 1.0 25 | */ 26 | public class Customer { 27 | private int customerId; 28 | private String customerName; 29 | 30 | public Customer(int Id, String name) { 31 | this.customerId = Id; 32 | this.customerName = name; 33 | } 34 | 35 | public int getId() { 36 | return customerId; 37 | } 38 | 39 | public String getName() { 40 | return customerName; 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/deserializer/customdeserializer/CustomerDeserializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.deserializer.customdeserializer; 17 | 18 | import org.apache.kafka.common.errors.SerializationException; 19 | import org.apache.kafka.common.serialization.Deserializer; 20 | 21 | import java.nio.ByteBuffer; 22 | import java.util.Map; 23 | 24 | /** 25 | * The customer deserializer for deserializing Customer Pojo. 26 | * 27 | * @author Wuyi Chen 28 | * @date 06/03/2020 29 | * @version 1.0 30 | * @since 1.0 31 | */ 32 | public class CustomerDeserializer implements Deserializer { 33 | @Override 34 | public void configure(Map configs, boolean isKey) { 35 | // nothing to configure 36 | } 37 | 38 | /* (non-Javadoc) 39 | * @see org.apache.kafka.common.serialization.Deserializer#deserialize(java.lang.String, byte[]) 40 | * 41 | * We are deserializing Customer as: 42 | * - 4 byte int representing customerId 43 | * - 4 byte int representing length of customerName in UTF-8 bytes (0 if name is Null) 44 | * - N bytes representing customerName in UTF-8 45 | */ 46 | @Override 47 | public Customer deserialize(String topic, byte[] data) { 48 | int id; 49 | int nameSize; 50 | String name; 51 | 52 | try { 53 | if (data == null) { 54 | return null; 55 | } 56 | if (data.length < 16) { 57 | throw new SerializationException("Size of data received " + "by deserializer is shorter than expected"); 58 | } 59 | 60 | ByteBuffer buffer = ByteBuffer.wrap(data); 61 | id = buffer.getInt(); // 4 byte int representing customerId 62 | nameSize = buffer.getInt(); // 4 byte int representing length of customerName in UTF-8 bytes (0 if name is Null) 63 | 64 | byte[] nameBytes = new byte[nameSize]; // N bytes representing customerName in UTF-8 65 | buffer.get(nameBytes); 66 | name = new String(nameBytes, "UTF-8"); 67 | 68 | return new Customer(id, name); 69 | } catch (Exception e) { 70 | throw new SerializationException("Error when deserializing byte[] to Customer " + e); 71 | } 72 | } 73 | 74 | @Override 75 | public void close() { 76 | // nothing to close 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/exitpollloop/ExitPollLoopExample.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.exitpollloop; 17 | 18 | import java.time.Duration; 19 | import java.util.Collections; 20 | import java.util.Properties; 21 | 22 | import org.apache.kafka.clients.consumer.ConsumerRecord; 23 | import org.apache.kafka.clients.consumer.ConsumerRecords; 24 | import org.apache.kafka.clients.consumer.KafkaConsumer; 25 | import org.slf4j.Logger; 26 | import org.slf4j.LoggerFactory; 27 | 28 | /** 29 | * A example of exiting the poll loop by calling wakeup() in ShutdownHook. 30 | * 31 | * @author Wuyi Chen 32 | * @date 06/05/2020 33 | * @version 1.0 34 | * @since 1.0 35 | */ 36 | public class ExitPollLoopExample { 37 | private static Logger logger = LoggerFactory.getLogger(ExitPollLoopExample.class); 38 | 39 | public static void main(String[] args) { 40 | Properties props = new Properties(); 41 | props.put("bootstrap.servers", "localhost:9092"); 42 | props.put("group.id", "CountryCounter"); // Specifies the consumer group the KafkaConsumer instance belongs to. 43 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 44 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 45 | 46 | KafkaConsumer consumer = new KafkaConsumer<>(props); 47 | 48 | final Thread mainThread = Thread.currentThread(); 49 | 50 | // Registering a shutdown hook so we can exit cleanly 51 | Runtime.getRuntime().addShutdownHook(new Thread() { // ShutdownHook runs in a separate thread. 52 | public void run() { 53 | System.out.println("Starting exit..."); 54 | 55 | consumer.wakeup(); // It is safe to call wakeup() in another thread. 56 | try { 57 | mainThread.join(); 58 | } catch (InterruptedException e) { 59 | e.printStackTrace(); 60 | } 61 | } 62 | }); 63 | 64 | try { 65 | consumer.subscribe(Collections.singletonList("customerCountries")); // Specifies the list of topics the KafkaConsumer instance subscribe to. 66 | 67 | while (true) { 68 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); // It is a timeout interval and controls how long poll() will block if data is not available in the consumer buffer. 69 | for (ConsumerRecord record : records) { 70 | logger.debug("topic = {}, partition = {}, offset = {}, customer = {}, country = {}", 71 | record.topic(), record.partition(), record.offset(), record.key(), record.value()); 72 | 73 | } 74 | } 75 | } finally { 76 | consumer.close(); // Close the consumer and will also trigger a rebalance immediately. 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/rebalancelisteners/CommitOffsetToDbExample.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.rebalancelisteners; 17 | 18 | import java.time.Duration; 19 | import java.util.Collection; 20 | import java.util.Collections; 21 | import java.util.Properties; 22 | 23 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 24 | import org.apache.kafka.clients.consumer.ConsumerRecord; 25 | import org.apache.kafka.clients.consumer.ConsumerRecords; 26 | import org.apache.kafka.clients.consumer.KafkaConsumer; 27 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 28 | import org.apache.kafka.common.TopicPartition; 29 | 30 | public class CommitOffsetToDbExample { 31 | public static void main(String[] args) { 32 | Properties props = new Properties(); 33 | props.put("bootstrap.servers", "localhost:9092"); 34 | props.put("group.id", "CountryCounter"); 35 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 36 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 37 | props.put("enable.auto.commit", "false"); // Disable automatic commit. 38 | 39 | KafkaConsumer consumer = new KafkaConsumer<>(props); 40 | 41 | class SaveOffsetsOnRebalance implements ConsumerRebalanceListener { 42 | public void onPartitionsRevoked(Collection partitions) { 43 | commitDBTransaction(); // Commit the transactions to DB before losing the ownership of the partition. 44 | } 45 | 46 | public void onPartitionsAssigned(Collection partitions) { 47 | for(TopicPartition partition: partitions) { 48 | consumer.seek(partition, getOffsetFromDB(partition)); // Get the offset of each partition and use seek() to correct the offset of each partition. 49 | } 50 | } 51 | } 52 | 53 | consumer.subscribe(Collections.singletonList("customerCountries"), new SaveOffsetsOnRebalance()); 54 | consumer.poll(Duration.ofMillis(0)); // Make sure this consumer join a consumer group and get assigned partitions. 55 | 56 | for (TopicPartition partition: consumer.assignment()) { 57 | consumer.seek(partition, getOffsetFromDB(partition)); // Correct offset in the partitions we are assigned to. 58 | } 59 | try { 60 | while (true) { 61 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); 62 | for (ConsumerRecord record : records) { 63 | processRecord(record); // Process the record. 64 | storeRecordInDB(record); // Store the record into DB. 65 | storeOffsetInDB(record.topic(), record.partition(), record.offset()); // Store the new offset into DB. 66 | } 67 | commitDBTransaction(); 68 | } 69 | } finally { 70 | consumer.close(); 71 | } 72 | } 73 | 74 | private static void commitDBTransaction() {} 75 | private static OffsetAndMetadata getOffsetFromDB(TopicPartition partition) { return null; } 76 | private static void processRecord(ConsumerRecord record) {} 77 | private static void storeRecordInDB(ConsumerRecord record) {} 78 | private static void storeOffsetInDB(String topic, int parition, long offset) {} 79 | } 80 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/rebalancelisteners/RebalanceListenersExample.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.rebalancelisteners; 17 | 18 | import java.time.Duration; 19 | import java.util.Collection; 20 | import java.util.Collections; 21 | import java.util.HashMap; 22 | import java.util.Map; 23 | import java.util.Properties; 24 | 25 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 26 | import org.apache.kafka.clients.consumer.ConsumerRecord; 27 | import org.apache.kafka.clients.consumer.ConsumerRecords; 28 | import org.apache.kafka.clients.consumer.KafkaConsumer; 29 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 30 | import org.apache.kafka.common.TopicPartition; 31 | import org.apache.kafka.common.errors.WakeupException; 32 | import org.slf4j.Logger; 33 | import org.slf4j.LoggerFactory; 34 | 35 | /** 36 | * The example code of committing the offset after the consumer stopped consuming messages 37 | * but before losing ownership of a partition. 38 | * 39 | * @author Wuyi Chen 40 | * @date 06/05/2020 41 | * @version 1.0 42 | * @since 1.0 43 | */ 44 | public class RebalanceListenersExample { 45 | private static Logger logger = LoggerFactory.getLogger(RebalanceListenersExample.class); 46 | 47 | public static void main(String[] args) { 48 | Properties props = new Properties(); 49 | props.put("bootstrap.servers", "localhost:9092"); 50 | props.put("group.id", "CountryCounter"); 51 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 52 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 53 | props.put("enable.auto.commit", "false"); // Disable automatic commit. 54 | 55 | KafkaConsumer consumer = new KafkaConsumer<>(props); 56 | 57 | Map currentOffsets = new HashMap<>(); 58 | 59 | class HandleRebalance implements ConsumerRebalanceListener { 60 | public void onPartitionsAssigned(Collection partitions) { 61 | /* Nothing needs to do when this consumer is assigned a new partition */ 62 | } 63 | 64 | public void onPartitionsRevoked(Collection partitions) { // Commit the offset before losing the ownership of the partition. 65 | System.out.println("Lost partitions in rebalance. " + "Committing current offsets:" + currentOffsets); 66 | consumer.commitSync(currentOffsets); 67 | } 68 | } 69 | 70 | try { 71 | consumer.subscribe(Collections.singletonList("customerCountries"), new HandleRebalance()); // Pass the ConsumerRebalanceListener to the subscribe() method so it will get invoked by the consumer. 72 | 73 | while (true) { 74 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); 75 | for (ConsumerRecord record : records) { 76 | System.out.printf("topic = %s, partition = %s, offset = %d, customer = %s, country = %s%n", 77 | record.topic(), record.partition(), record.offset(), record.key(), record.value()); 78 | currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset()+1, null)); 79 | } 80 | consumer.commitAsync(currentOffsets, null); 81 | } 82 | } catch (WakeupException e) { 83 | // ignore, we're closing 84 | } catch (Exception e) { 85 | logger.error("Unexpected error", e); 86 | } finally { 87 | try { 88 | consumer.commitSync(currentOffsets); 89 | } finally { 90 | consumer.close(); 91 | System.out.println("Closed consumer and we are done"); 92 | } 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /kafka-definitive-guide-chapter4/src/java/com/kafkadefinitiveguide/consumer/receive/ExampleConsumer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Wuyi Chen. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.kafkadefinitiveguide.consumer.receive; 17 | 18 | import java.time.Duration; 19 | import java.util.Collections; 20 | import java.util.HashMap; 21 | import java.util.Map; 22 | import java.util.Properties; 23 | 24 | import org.apache.kafka.clients.consumer.ConsumerRecord; 25 | import org.apache.kafka.clients.consumer.ConsumerRecords; 26 | import org.apache.kafka.clients.consumer.KafkaConsumer; 27 | import org.json.JSONObject; 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | /** 32 | * A basic example of a consumer. 33 | * 34 | * @author Wuyi Chen 35 | * @date 06/05/2020 36 | * @version 1.0 37 | * @since 1.0 38 | */ 39 | public class ExampleConsumer { 40 | private static Logger logger = LoggerFactory.getLogger(ExampleConsumer.class); 41 | 42 | public static void main(String[] args) { 43 | Properties props = new Properties(); 44 | props.put("bootstrap.servers", "localhost:9092"); 45 | props.put("group.id", "CountryCounter"); // Specifies the consumer group the KafkaConsumer instance belongs to. 46 | props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 47 | props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 48 | 49 | KafkaConsumer consumer = new KafkaConsumer<>(props); 50 | 51 | consumer.subscribe(Collections.singletonList("customerCountries")); // Specifies the list of topics the KafkaConsumer instance subscribe to. 52 | 53 | Map customerCountryMap = new HashMap<>(); // Captures the count of customers from each county. 54 | 55 | try { 56 | while (true) { 57 | ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); // It is a timeout interval and controls how long poll() will block if data is not available in the consumer buffer. 58 | for (ConsumerRecord record : records) { 59 | logger.debug("topic = {}, partition = {}, offset = {}, customer = {}, country = {}", 60 | record.topic(), record.partition(), record.offset(), record.key(), record.value()); 61 | 62 | int updatedCount = 1; 63 | if (customerCountryMap.containsKey(record.value())) { 64 | updatedCount = customerCountryMap.get(record.value()) + 1; 65 | } 66 | customerCountryMap.put(record.value(), updatedCount); 67 | 68 | JSONObject json = new JSONObject(customerCountryMap); 69 | System.out.println(json.toString(4)); 70 | } 71 | } 72 | } finally { 73 | consumer.close(); // Close the consumer and will also trigger a rebalance immediately. 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | rootProject.name = 'kafka-definitive-guide' 2 | 3 | include 'kafka-definitive-guide-chapter3' 4 | include 'kafka-definitive-guide-chapter4' 5 | 6 | // for running gradle for certain project, just enter the command: 7 | // gradle modules:javautil_clients:compileJava 8 | --------------------------------------------------------------------------------