├── .gitignore ├── Jenkinsfile ├── LICENSE ├── README.md ├── bin └── debug.sh ├── config ├── connect-avro-docker.properties └── sink.properties ├── docker-compose.yml ├── pom.xml └── src ├── main └── java │ └── com │ └── github │ └── jcustenborder │ └── kafka │ └── connect │ └── redis │ ├── RedisConnectorConfig.java │ ├── RedisSession.java │ ├── RedisSessionFactory.java │ ├── RedisSessionFactoryImpl.java │ ├── RedisSinkConnector.java │ ├── RedisSinkConnectorConfig.java │ ├── RedisSinkTask.java │ ├── SinkOffsetState.java │ ├── SinkOperation.java │ └── package-info.java └── test ├── java └── com │ └── github │ └── jcustenborder │ └── kafka │ └── connect │ └── redis │ ├── DocumentationTest.java │ ├── RedisSinkConnectorConfigTest.java │ ├── RedisSinkTaskIT.java │ ├── RedisSinkTaskReconnectIT.java │ └── RedisSinkTaskTest.java └── resources ├── docker-compose.yml └── logback.xml /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled class file 2 | *.class 3 | 4 | # Log file 5 | *.log 6 | 7 | # BlueJ files 8 | *.ctxt 9 | 10 | # Mobile Tools for Java (J2ME) 11 | .mtj.tmp/ 12 | 13 | # Package Files # 14 | *.jar 15 | *.war 16 | *.ear 17 | *.zip 18 | *.tar.gz 19 | *.rar 20 | 21 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 22 | hs_err_pid* 23 | .okhttpcache 24 | .idea 25 | target 26 | *.ipr 27 | *.iws 28 | *.iml -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | #!groovy 2 | @Library('jenkins-pipeline') import com.github.jcustenborder.jenkins.pipeline.KafkaConnectPipeline 3 | 4 | def pipe = new KafkaConnectPipeline() 5 | pipe.execute() -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Introduction 3 | 4 | The Redis plugin is a collection of connectors that are used to interact with a Redis cluster. 5 | 6 | 7 | 8 | # Sink Connectors 9 | 10 | 11 | ## Redis Sink Connector 12 | 13 | The Redis Sink Connector is used to write data from Kafka to a Redis cache. 14 | 15 | ### Important 16 | 17 | This connector expects records from Kafka to have a key and value that are stored as bytes or a string. If your data is already in Kafka in the format that you want in Redis consider using the ByteArrayConverter or the StringConverter for this connector. Keep in this does not need to be configured in the worker properties and can be configured at the connector level. If your data is not sitting in Kafka in the format you wish to persist in Redis consider using a Single Message Transformation to convert the data to a byte or string representation before it is written to Redis. 18 | ### Note 19 | 20 | This connector supports deletes. If the record stored in Kafka has a null value, this connector will send a delete with the corresponding key to Redis. 21 | 22 | 23 | ### Configuration 24 | 25 | #### General 26 | 27 | 28 | ##### `redis.hosts` 29 | 30 | The Redis hosts to connect to. 31 | 32 | *Importance:* High 33 | 34 | *Type:* List 35 | 36 | *Default Value:* [localhost:6379] 37 | 38 | 39 | 40 | ##### `redis.client.mode` 41 | 42 | The client mode to use when interacting with the Redis cluster. 43 | 44 | *Importance:* Medium 45 | 46 | *Type:* String 47 | 48 | *Default Value:* Standalone 49 | 50 | *Validator:* Matches: ``Standalone``, ``Cluster`` 51 | 52 | 53 | 54 | ##### `redis.database` 55 | 56 | Redis database to connect to. 57 | 58 | *Importance:* Medium 59 | 60 | *Type:* Int 61 | 62 | *Default Value:* 1 63 | 64 | 65 | 66 | ##### `redis.operation.timeout.ms` 67 | 68 | The amount of time in milliseconds before an operation is marked as timed out. 69 | 70 | *Importance:* Medium 71 | 72 | *Type:* Long 73 | 74 | *Default Value:* 10000 75 | 76 | *Validator:* [100,...] 77 | 78 | 79 | 80 | ##### `redis.password` 81 | 82 | Password used to connect to Redis. 83 | 84 | *Importance:* Medium 85 | 86 | *Type:* Password 87 | 88 | *Default Value:* [hidden] 89 | 90 | 91 | 92 | ##### `redis.ssl.enabled` 93 | 94 | Flag to determine if SSL is enabled. 95 | 96 | *Importance:* Medium 97 | 98 | *Type:* Boolean 99 | 100 | *Default Value:* false 101 | 102 | 103 | 104 | ##### `redis.ssl.keystore.password` 105 | 106 | The password for the SSL keystore. 107 | 108 | *Importance:* Medium 109 | 110 | *Type:* Password 111 | 112 | *Default Value:* [hidden] 113 | 114 | 115 | 116 | ##### `redis.ssl.keystore.path` 117 | 118 | The path to the SSL keystore. 119 | 120 | *Importance:* Medium 121 | 122 | *Type:* String 123 | 124 | 125 | 126 | ##### `redis.ssl.truststore.password` 127 | 128 | The password for the SSL truststore. 129 | 130 | *Importance:* Medium 131 | 132 | *Type:* Password 133 | 134 | *Default Value:* [hidden] 135 | 136 | 137 | 138 | ##### `redis.ssl.truststore.path` 139 | 140 | The path to the SSL truststore. 141 | 142 | *Importance:* Medium 143 | 144 | *Type:* String 145 | 146 | 147 | 148 | ##### `redis.auto.reconnect.enabled` 149 | 150 | Flag to determine if the Redis client should automatically reconnect. 151 | 152 | *Importance:* Low 153 | 154 | *Type:* Boolean 155 | 156 | *Default Value:* true 157 | 158 | 159 | 160 | ##### `redis.charset` 161 | 162 | The character set to use for String key and values. 163 | 164 | *Importance:* Low 165 | 166 | *Type:* String 167 | 168 | *Default Value:* UTF-8 169 | 170 | *Validator:* Valid values: 'Big5', 'Big5-HKSCS', 'CESU-8', 'EUC-JP', 'EUC-KR', 'GB18030', 'GB2312', 'GBK', 'IBM-Thai', 'IBM00858', 'IBM01140', 'IBM01141', 'IBM01142', 'IBM01143', 'IBM01144', 'IBM01145', 'IBM01146', 'IBM01147', 'IBM01148', 'IBM01149', 'IBM037', 'IBM1026', 'IBM1047', 'IBM273', 'IBM277', 'IBM278', 'IBM280', 'IBM284', 'IBM285', 'IBM290', 'IBM297', 'IBM420', 'IBM424', 'IBM437', 'IBM500', 'IBM775', 'IBM850', 'IBM852', 'IBM855', 'IBM857', 'IBM860', 'IBM861', 'IBM862', 'IBM863', 'IBM864', 'IBM865', 'IBM866', 'IBM868', 'IBM869', 'IBM870', 'IBM871', 'IBM918', 'ISO-2022-CN', 'ISO-2022-JP', 'ISO-2022-JP-2', 'ISO-2022-KR', 'ISO-8859-1', 'ISO-8859-13', 'ISO-8859-15', 'ISO-8859-2', 'ISO-8859-3', 'ISO-8859-4', 'ISO-8859-5', 'ISO-8859-6', 'ISO-8859-7', 'ISO-8859-8', 'ISO-8859-9', 'JIS_X0201', 'JIS_X0212-1990', 'KOI8-R', 'KOI8-U', 'Shift_JIS', 'TIS-620', 'US-ASCII', 'UTF-16', 'UTF-16BE', 'UTF-16LE', 'UTF-32', 'UTF-32BE', 'UTF-32LE', 'UTF-8', 'X-UTF-32BE-BOM', 'X-UTF-32LE-BOM', 'windows-1250', 'windows-1251', 'windows-1252', 'windows-1253', 'windows-1254', 'windows-1255', 'windows-1256', 'windows-1257', 'windows-1258', 'windows-31j', 'x-Big5-HKSCS-2001', 'x-Big5-Solaris', 'x-COMPOUND_TEXT', 'x-EUC-TW', 'x-IBM1006', 'x-IBM1025', 'x-IBM1046', 'x-IBM1097', 'x-IBM1098', 'x-IBM1112', 'x-IBM1122', 'x-IBM1123', 'x-IBM1124', 'x-IBM1364', 'x-IBM1381', 'x-IBM1383', 'x-IBM300', 'x-IBM33722', 'x-IBM737', 'x-IBM833', 'x-IBM834', 'x-IBM856', 'x-IBM874', 'x-IBM875', 'x-IBM921', 'x-IBM922', 'x-IBM930', 'x-IBM933', 'x-IBM935', 'x-IBM937', 'x-IBM939', 'x-IBM942', 'x-IBM942C', 'x-IBM943', 'x-IBM943C', 'x-IBM948', 'x-IBM949', 'x-IBM949C', 'x-IBM950', 'x-IBM964', 'x-IBM970', 'x-ISCII91', 'x-ISO-2022-CN-CNS', 'x-ISO-2022-CN-GB', 'x-JIS0208', 'x-JISAutoDetect', 'x-Johab', 'x-MS932_0213', 'x-MS950-HKSCS', 'x-MS950-HKSCS-XP', 'x-MacArabic', 'x-MacCentralEurope', 'x-MacCroatian', 'x-MacCyrillic', 'x-MacDingbat', 'x-MacGreek', 'x-MacHebrew', 'x-MacIceland', 'x-MacRoman', 'x-MacRomania', 'x-MacSymbol', 'x-MacThai', 'x-MacTurkish', 'x-MacUkraine', 'x-PCK', 'x-SJIS_0213', 'x-UTF-16LE-BOM', 'x-euc-jp-linux', 'x-eucJP-Open', 'x-iso-8859-11', 'x-mswin-936', 'x-windows-50220', 'x-windows-50221', 'x-windows-874', 'x-windows-949', 'x-windows-950', 'x-windows-iso2022jp' 171 | 172 | 173 | 174 | ##### `redis.request.queue.size` 175 | 176 | The maximum number of queued requests to Redis. 177 | 178 | *Importance:* Low 179 | 180 | *Type:* Int 181 | 182 | *Default Value:* 2147483647 183 | 184 | 185 | 186 | ##### `redis.socket.connect.timeout.ms` 187 | 188 | The amount of time in milliseconds to wait before timing out a socket when connecting. 189 | 190 | *Importance:* Low 191 | 192 | *Type:* Int 193 | 194 | *Default Value:* 10000 195 | 196 | 197 | 198 | ##### `redis.socket.keep.alive.enabled` 199 | 200 | Flag to enable a keepalive to Redis. 201 | 202 | *Importance:* Low 203 | 204 | *Type:* Boolean 205 | 206 | *Default Value:* false 207 | 208 | 209 | 210 | ##### `redis.socket.tcp.no.delay.enabled` 211 | 212 | Flag to enable TCP no delay should be used. 213 | 214 | *Importance:* Low 215 | 216 | *Type:* Boolean 217 | 218 | *Default Value:* true 219 | 220 | 221 | 222 | ##### `redis.ssl.provider` 223 | 224 | The SSL provider to use. 225 | 226 | *Importance:* Low 227 | 228 | *Type:* String 229 | 230 | *Default Value:* JDK 231 | 232 | *Validator:* Matches: ``OPENSSL``, ``JDK`` 233 | 234 | 235 | 236 | 237 | 238 | #### Examples 239 | 240 | 241 | ##### Standalone Example 242 | 243 | This configuration is used typically along with [standalone mode](http://docs.confluent.io/current/connect/concepts.html#standalone-workers). 244 | 245 | ```properties 246 | name=RedisSinkConnector1 247 | connector.class=com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector 248 | tasks.max=1 249 | topics=< Required Configuration > 250 | ``` 251 | 252 | ##### Distributed Example 253 | 254 | This configuration is used typically along with [distributed mode](http://docs.confluent.io/current/connect/concepts.html#distributed-workers). 255 | Write the following json to `connector.json`, configure all of the required values, and use the command below to 256 | post the configuration to one the distributed connect worker(s). 257 | 258 | ```json 259 | { 260 | "config" : { 261 | "name" : "RedisSinkConnector1", 262 | "connector.class" : "com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector", 263 | "tasks.max" : "1", 264 | "topics" : "< Required Configuration >" 265 | } 266 | } 267 | ``` 268 | 269 | Use curl to post the configuration to one of the Kafka Connect Workers. Change `http://localhost:8083/` the the endpoint of 270 | one of your Kafka Connect worker(s). 271 | 272 | Create a new instance. 273 | ```bash 274 | curl -s -X POST -H 'Content-Type: application/json' --data @connector.json http://localhost:8083/connectors 275 | ``` 276 | 277 | Update an existing instance. 278 | ```bash 279 | curl -s -X PUT -H 'Content-Type: application/json' --data @connector.json http://localhost:8083/connectors/TestSinkConnector1/config 280 | ``` 281 | 282 | 283 | 284 | -------------------------------------------------------------------------------- /bin/debug.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | : ${SUSPEND:='n'} 19 | 20 | set -e 21 | 22 | mvn clean package 23 | export KAFKA_DEBUG='y' 24 | connect-standalone config/connect-avro-docker.properties config/sink.properties -------------------------------------------------------------------------------- /config/connect-avro-docker.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | # Sample configuration for a standalone Kafka Connect worker that uses Avro serialization and 18 | # integrates the the SchemaConfig Registry. This sample configuration assumes a local installation of 19 | # Confluent Platform with all services running on their default ports. 20 | # Bootstrap Kafka servers. If multiple servers are specified, they should be comma-separated. 21 | bootstrap.servers=kafka:9092 22 | key.converter=io.confluent.connect.avro.AvroConverter 23 | key.converter.schema.registry.url=http://schema-registry:8081 24 | value.converter=io.confluent.connect.avro.AvroConverter 25 | value.converter.schema.registry.url=http://schema-registry:8081 26 | internal.key.converter=org.apache.kafka.connect.json.JsonConverter 27 | internal.value.converter=org.apache.kafka.connect.json.JsonConverter 28 | internal.key.converter.schemas.enable=false 29 | internal.value.converter.schemas.enable=false 30 | offset.storage.file.filename=/tmp/connect.offsets 31 | rest.port=10000 32 | plugin.path=target/components/packages -------------------------------------------------------------------------------- /config/sink.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | name=sink 18 | topics=twitter 19 | tasks.max=1 20 | connector.class=com.github.jcustenborder.kafka.connect.redis.RedisSinkConnector 21 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter 22 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | version: "2" 18 | services: 19 | zookeeper: 20 | image: confluentinc/cp-zookeeper:5.2.1 21 | environment: 22 | ZOOKEEPER_CLIENT_PORT: 2181 23 | zk_id: "1" 24 | ports: 25 | - "2181:2181" 26 | kafka: 27 | hostname: kafka 28 | image: confluentinc/cp-kafka:5.2.1 29 | depends_on: 30 | - zookeeper 31 | ports: 32 | - "9092:9092" 33 | environment: 34 | KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" 35 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://:9092" 36 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 37 | schema-registry: 38 | image: confluentinc/cp-schema-registry:5.2.1 39 | depends_on: 40 | - kafka 41 | - zookeeper 42 | ports: 43 | - "8081:8081" 44 | environment: 45 | SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: "zookeeper:2181" 46 | SCHEMA_REGISTRY_HOST_NAME: schema-registry 47 | redis: 48 | image: redis 49 | ports: 50 | - "6379:6379" 51 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 19 | 22 | 4.0.0 23 | 24 | com.github.jcustenborder.kafka.connect 25 | kafka-connect-parent 26 | 2.8.0-1 27 | 28 | kafka-connect-redis 29 | 0.0.2-SNAPSHOT 30 | kafka-connect-redis 31 | A Kafka Connect plugin for interacting with Redis. 32 | https://github.com/jcustenborder/kafka-connect-redis 33 | 2017 34 | 35 | 36 | The Apache License, Version 2.0 37 | https://www.apache.org/licenses/LICENSE-2.0 38 | repo 39 | 40 | 41 | 42 | 43 | jcustenborder 44 | Jeremy Custenborder 45 | https://github.com/jcustenborder 46 | 47 | Committer 48 | 49 | 50 | 51 | 52 | scm:git:https://github.com/jcustenborder/kafka-connect-redis.git 53 | scm:git:git@github.com:jcustenborder/kafka-connect-redis.git 54 | https://github.com/jcustenborder/kafka-connect-redis 55 | 56 | 57 | github 58 | https://github.com/jcustenborder/kafka-connect-redis/issues 59 | 60 | 61 | 62 | io.lettuce 63 | lettuce-core 64 | 5.2.1.RELEASE 65 | 66 | 67 | com.github.jcustenborder.kafka.connect 68 | connect-utils-jackson 69 | 70 | 71 | 72 | 73 | 74 | org.apache.maven.plugins 75 | maven-javadoc-plugin 76 | 77 | 8 78 | 79 | 80 | 81 | maven-assembly-plugin 82 | 83 | true 84 | 85 | 86 | 87 | io.confluent 88 | kafka-connect-maven-plugin 89 | 0.11.2 90 | 91 | 92 | hub 93 | 94 | kafka-connect 95 | 96 | 97 | true 98 | https://docs.confluent.io/current/connect/kafka-connect-redis/ 99 | 100 | 101 | sink 102 | 103 | 104 | key 105 | value 106 | cache 107 | redis 108 | 109 | Kafka Connect Redis 110 | Confluent, Inc. 111 | https://docs.confluent.io/current/connect/kafka-connect-redis/ 112 | 113 | supported by Confluent as part of a 114 | Confluent Platform subscription.]]> 115 | 116 | 117 | io.netty:* 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/RedisConnectorConfig.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.github.jcustenborder.kafka.connect.utils.config.ConfigKeyBuilder; 19 | import com.github.jcustenborder.kafka.connect.utils.config.ConfigUtils; 20 | import com.github.jcustenborder.kafka.connect.utils.config.ValidEnum; 21 | import com.google.common.base.Strings; 22 | import com.google.common.net.HostAndPort; 23 | import io.lettuce.core.ClientOptions; 24 | import io.lettuce.core.RedisURI; 25 | import io.lettuce.core.SocketOptions; 26 | import org.apache.kafka.common.config.AbstractConfig; 27 | import org.apache.kafka.common.config.ConfigDef; 28 | import org.slf4j.Logger; 29 | import org.slf4j.LoggerFactory; 30 | 31 | import java.io.File; 32 | import java.util.ArrayList; 33 | import java.util.Arrays; 34 | import java.util.List; 35 | import java.util.Map; 36 | 37 | class RedisConnectorConfig extends AbstractConfig { 38 | private static final Logger log = LoggerFactory.getLogger(RedisConnectorConfig.class); 39 | public static final String HOSTS_CONFIG = "redis.hosts"; 40 | static final String HOSTS_DOC = "The Redis hosts to connect to."; 41 | public static final String SSL_CONFIG = "redis.ssl.enabled"; 42 | static final String SSL_DOC = "Flag to determine if SSL is enabled."; 43 | public static final String PASSWORD_CONFIG = "redis.password"; 44 | static final String PASSWORD_DOC = "Password used to connect to Redis."; 45 | public static final String DATABASE_CONFIG = "redis.database"; 46 | static final String DATABASE_DOC = "Redis database to connect to."; 47 | public static final String CLIENT_MODE_CONFIG = "redis.client.mode"; 48 | static final String CLIENT_MODE_DOC = "The client mode to use when interacting with the Redis " + 49 | "cluster."; 50 | public static final String AUTO_RECONNECT_ENABLED_CONFIG = "redis.auto.reconnect.enabled"; 51 | static final String AUTO_RECONNECT_ENABLED_DOC = "Flag to determine if the Redis client should " + 52 | "automatically reconnect."; 53 | public static final String REQUEST_QUEUE_SIZE_CONFIG = "redis.request.queue.size"; 54 | static final String REQUEST_QUEUE_SIZE_DOC = "The maximum number of queued requests to Redis."; 55 | public static final String SOCKET_TCP_NO_DELAY_CONFIG = "redis.socket.tcp.no.delay.enabled"; 56 | static final String SOCKET_TCP_NO_DELAY_DOC = "Flag to enable TCP no delay should be used."; 57 | public static final String SOCKET_KEEP_ALIVE_CONFIG = "redis.socket.keep.alive.enabled"; 58 | static final String SOCKET_KEEP_ALIVE_DOC = "Flag to enable a keepalive to Redis."; 59 | public static final String SOCKET_CONNECT_TIMEOUT_CONFIG = "redis.socket.connect.timeout.ms"; 60 | static final String SOCKET_CONNECT_TIMEOUT_DOC = "The amount of time in milliseconds to wait " + 61 | "before timing out a socket when connecting."; 62 | public static final String SSL_PROVIDER_CONFIG = "redis.ssl.provider"; 63 | static final String SSL_PROVIDER_DOC = "The SSL provider to use."; 64 | public static final String SSL_KEYSTORE_PATH_CONFIG = "redis.ssl.keystore.path"; 65 | static final String SSL_KEYSTORE_PATH_DOC = "The path to the SSL keystore."; 66 | public static final String SSL_KEYSTORE_PASSWORD_CONFIG = "redis.ssl.keystore.password"; 67 | static final String SSL_KEYSTORE_PASSWORD_DOC = "The password for the SSL keystore."; 68 | public static final String SSL_TRUSTSTORE_PATH_CONFIG = "redis.ssl.truststore.path"; 69 | static final String SSL_TRUSTSTORE_PATH_DOC = "The path to the SSL truststore."; 70 | public static final String SSL_TRUSTSTORE_PASSWORD_CONFIG = "redis.ssl.truststore.password"; 71 | static final String SSL_TRUSTSTORE_PASSWORD_DOC = "The password for the SSL truststore."; 72 | 73 | public final static String CONNECTION_ATTEMPTS_CONF = "redis.connection.attempts"; 74 | public final static String CONNECTION_ATTEMPTS_DOC = "The number of attempt when connecting to redis."; 75 | 76 | public final static String CONNECTION_RETRY_DELAY_MS_CONF = "redis.connection.retry.delay.ms"; 77 | public final static String CONNECTION_RETRY_DELAY_MS_DOC = "The amount of milliseconds to wait between redis connection attempts."; 78 | 79 | public final ClientMode clientMode; 80 | public final List hosts; 81 | 82 | public final String password; 83 | public final int database; 84 | public final boolean autoReconnectEnabled; 85 | public final int requestQueueSize; 86 | 87 | public final boolean tcpNoDelay; 88 | public final boolean keepAliveEnabled; 89 | public final int connectTimeout; 90 | 91 | public final boolean sslEnabled; 92 | public final RedisSslProvider sslProvider; 93 | public final File keystorePath; 94 | public final String keystorePassword; 95 | public final File truststorePath; 96 | public final String truststorePassword; 97 | public final int retryDelay; 98 | public final int maxAttempts; 99 | 100 | 101 | public RedisConnectorConfig(ConfigDef config, Map originals) { 102 | super(config, originals); 103 | this.hosts = ConfigUtils.hostAndPorts(this, HOSTS_CONFIG, 6379); 104 | this.sslEnabled = getBoolean(SSL_CONFIG); 105 | this.password = getPassword(PASSWORD_CONFIG).value(); 106 | this.database = getInt(DATABASE_CONFIG); 107 | this.clientMode = ConfigUtils.getEnum(ClientMode.class, this, CLIENT_MODE_CONFIG); 108 | this.autoReconnectEnabled = getBoolean(AUTO_RECONNECT_ENABLED_CONFIG); 109 | this.requestQueueSize = getInt(REQUEST_QUEUE_SIZE_CONFIG); 110 | this.keepAliveEnabled = getBoolean(SOCKET_KEEP_ALIVE_CONFIG); 111 | this.tcpNoDelay = getBoolean(SOCKET_TCP_NO_DELAY_CONFIG); 112 | this.connectTimeout = getInt(SOCKET_CONNECT_TIMEOUT_CONFIG); 113 | this.sslProvider = ConfigUtils.getEnum(RedisSslProvider.class, this, SSL_PROVIDER_CONFIG); 114 | final String keystorePath = getString(SSL_KEYSTORE_PATH_CONFIG); 115 | final String trustStorePath = getString(SSL_TRUSTSTORE_PATH_CONFIG); 116 | this.keystorePath = Strings.isNullOrEmpty(keystorePath) ? null : new File(keystorePath); 117 | this.truststorePath = Strings.isNullOrEmpty(trustStorePath) ? null : new File(trustStorePath); 118 | final String keystorePassword = getPassword(SSL_KEYSTORE_PASSWORD_CONFIG).value(); 119 | final String trustPassword = getPassword(SSL_TRUSTSTORE_PASSWORD_CONFIG).value(); 120 | this.keystorePassword = Strings.isNullOrEmpty(keystorePassword) ? null : keystorePassword; 121 | this.truststorePassword = Strings.isNullOrEmpty(trustPassword) ? null : trustPassword; 122 | this.maxAttempts = getInt(CONNECTION_ATTEMPTS_CONF); 123 | this.retryDelay = getInt(CONNECTION_RETRY_DELAY_MS_CONF); 124 | } 125 | 126 | public static ConfigDef config() { 127 | return new ConfigDef() 128 | .define( 129 | ConfigKeyBuilder.of(HOSTS_CONFIG, ConfigDef.Type.LIST) 130 | .documentation(HOSTS_DOC) 131 | .defaultValue(Arrays.asList("localhost:6379")) 132 | .importance(ConfigDef.Importance.HIGH) 133 | .build() 134 | ).define( 135 | ConfigKeyBuilder.of(CLIENT_MODE_CONFIG, ConfigDef.Type.STRING) 136 | .documentation(CLIENT_MODE_DOC) 137 | .defaultValue(ClientMode.Standalone.toString()) 138 | .validator(ValidEnum.of(ClientMode.class)) 139 | .importance(ConfigDef.Importance.MEDIUM) 140 | .build() 141 | ).define( 142 | ConfigKeyBuilder.of(SSL_CONFIG, ConfigDef.Type.BOOLEAN) 143 | .documentation(SSL_DOC) 144 | .defaultValue(false) 145 | .importance(ConfigDef.Importance.MEDIUM) 146 | .build() 147 | ).define( 148 | ConfigKeyBuilder.of(PASSWORD_CONFIG, ConfigDef.Type.PASSWORD) 149 | .documentation(PASSWORD_DOC) 150 | .defaultValue("") 151 | .importance(ConfigDef.Importance.MEDIUM) 152 | .build() 153 | ).define( 154 | ConfigKeyBuilder.of(DATABASE_CONFIG, ConfigDef.Type.INT) 155 | .documentation(DATABASE_DOC) 156 | .defaultValue(1) 157 | .importance(ConfigDef.Importance.MEDIUM) 158 | .build() 159 | ).define( 160 | ConfigKeyBuilder.of(AUTO_RECONNECT_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN) 161 | .documentation(AUTO_RECONNECT_ENABLED_DOC) 162 | .defaultValue(ClientOptions.DEFAULT_AUTO_RECONNECT) 163 | .importance(ConfigDef.Importance.LOW) 164 | .build() 165 | ).define( 166 | ConfigKeyBuilder.of(REQUEST_QUEUE_SIZE_CONFIG, ConfigDef.Type.INT) 167 | .documentation(REQUEST_QUEUE_SIZE_DOC) 168 | .defaultValue(ClientOptions.DEFAULT_REQUEST_QUEUE_SIZE) 169 | .importance(ConfigDef.Importance.LOW) 170 | .build() 171 | ).define( 172 | ConfigKeyBuilder.of(SOCKET_TCP_NO_DELAY_CONFIG, ConfigDef.Type.BOOLEAN) 173 | .documentation(SOCKET_TCP_NO_DELAY_DOC) 174 | .defaultValue(true) 175 | .importance(ConfigDef.Importance.LOW) 176 | .build() 177 | ).define( 178 | ConfigKeyBuilder.of(SOCKET_KEEP_ALIVE_CONFIG, ConfigDef.Type.BOOLEAN) 179 | .documentation(SOCKET_KEEP_ALIVE_DOC) 180 | .defaultValue(SocketOptions.DEFAULT_SO_KEEPALIVE) 181 | .importance(ConfigDef.Importance.LOW) 182 | .build() 183 | ).define( 184 | ConfigKeyBuilder.of(SOCKET_CONNECT_TIMEOUT_CONFIG, ConfigDef.Type.INT) 185 | .documentation(SOCKET_CONNECT_TIMEOUT_DOC) 186 | .defaultValue((int) SocketOptions.DEFAULT_CONNECT_TIMEOUT_DURATION.toMillis()) 187 | .importance(ConfigDef.Importance.LOW) 188 | .build() 189 | ).define( 190 | ConfigKeyBuilder.of(SSL_PROVIDER_CONFIG, ConfigDef.Type.STRING) 191 | .documentation(SSL_PROVIDER_DOC) 192 | .defaultValue(RedisSslProvider.JDK.toString()) 193 | .importance(ConfigDef.Importance.LOW) 194 | .validator(ValidEnum.of(RedisSslProvider.class)) 195 | .build() 196 | ).define( 197 | ConfigKeyBuilder.of(SSL_KEYSTORE_PATH_CONFIG, ConfigDef.Type.STRING) 198 | .documentation(SSL_KEYSTORE_PATH_DOC) 199 | .defaultValue("") 200 | .importance(ConfigDef.Importance.MEDIUM) 201 | .build() 202 | ).define( 203 | ConfigKeyBuilder.of(SSL_KEYSTORE_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD) 204 | .documentation(SSL_KEYSTORE_PASSWORD_DOC) 205 | .defaultValue("") 206 | .importance(ConfigDef.Importance.MEDIUM) 207 | .build() 208 | ).define( 209 | ConfigKeyBuilder.of(SSL_TRUSTSTORE_PATH_CONFIG, ConfigDef.Type.STRING) 210 | .documentation(SSL_TRUSTSTORE_PATH_DOC) 211 | .defaultValue("") 212 | .importance(ConfigDef.Importance.MEDIUM) 213 | .build() 214 | ).define( 215 | ConfigKeyBuilder.of(SSL_TRUSTSTORE_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD) 216 | .documentation(SSL_TRUSTSTORE_PASSWORD_DOC) 217 | .defaultValue("") 218 | .importance(ConfigDef.Importance.MEDIUM) 219 | .build() 220 | ).define( 221 | ConfigKeyBuilder.of(CONNECTION_ATTEMPTS_CONF, ConfigDef.Type.INT) 222 | .documentation(CONNECTION_ATTEMPTS_DOC) 223 | .defaultValue(3) 224 | .importance(ConfigDef.Importance.MEDIUM) 225 | .validator(ConfigDef.Range.atLeast(1)) 226 | .build() 227 | ).define( 228 | ConfigKeyBuilder.of(CONNECTION_RETRY_DELAY_MS_CONF, ConfigDef.Type.INT) 229 | .documentation(CONNECTION_RETRY_DELAY_MS_DOC) 230 | .defaultValue(2000) 231 | .validator(ConfigDef.Range.atLeast(100)) 232 | .importance(ConfigDef.Importance.MEDIUM) 233 | .build() 234 | ); 235 | } 236 | 237 | public List redisURIs() { 238 | List result = new ArrayList<>(); 239 | 240 | for (HostAndPort host : this.hosts) { 241 | RedisURI.Builder builder = RedisURI.builder(); 242 | builder.withHost(host.getHost()); 243 | builder.withPort(host.getPort()); 244 | builder.withDatabase(this.database); 245 | if (!Strings.isNullOrEmpty(this.password)) { 246 | builder.withPassword(this.password); 247 | } 248 | builder.withSsl(this.sslEnabled); 249 | result.add(builder.build()); 250 | } 251 | 252 | return result; 253 | } 254 | 255 | public enum ClientMode { 256 | Standalone, 257 | Cluster 258 | } 259 | 260 | public enum RedisSslProvider { 261 | OPENSSL, 262 | JDK 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/RedisSession.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import io.lettuce.core.AbstractRedisClient; 19 | import io.lettuce.core.api.StatefulConnection; 20 | import io.lettuce.core.cluster.api.async.RedisClusterAsyncCommands; 21 | 22 | public interface RedisSession extends AutoCloseable { 23 | AbstractRedisClient client(); 24 | 25 | StatefulConnection connection(); 26 | 27 | RedisClusterAsyncCommands asyncCommands(); 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/RedisSessionFactory.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | interface RedisSessionFactory { 19 | RedisSession create(RedisConnectorConfig config); 20 | } 21 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/RedisSessionFactoryImpl.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import io.lettuce.core.AbstractRedisClient; 19 | import io.lettuce.core.ClientOptions; 20 | import io.lettuce.core.RedisClient; 21 | import io.lettuce.core.RedisConnectionException; 22 | import io.lettuce.core.SocketOptions; 23 | import io.lettuce.core.SslOptions; 24 | import io.lettuce.core.api.StatefulConnection; 25 | import io.lettuce.core.api.StatefulRedisConnection; 26 | import io.lettuce.core.cluster.ClusterClientOptions; 27 | import io.lettuce.core.cluster.RedisClusterClient; 28 | import io.lettuce.core.cluster.api.StatefulRedisClusterConnection; 29 | import io.lettuce.core.cluster.api.async.RedisClusterAsyncCommands; 30 | import io.lettuce.core.codec.ByteArrayCodec; 31 | import io.lettuce.core.codec.RedisCodec; 32 | import org.apache.kafka.common.utils.Time; 33 | import org.slf4j.Logger; 34 | import org.slf4j.LoggerFactory; 35 | 36 | import java.time.Duration; 37 | 38 | class RedisSessionFactoryImpl implements RedisSessionFactory { 39 | Time time = Time.SYSTEM; 40 | 41 | private static final Logger log = LoggerFactory.getLogger(RedisSessionFactoryImpl.class); 42 | 43 | @Override 44 | public RedisSession create(RedisConnectorConfig config) { 45 | int attempts = 0; 46 | RedisSession result; 47 | 48 | while (true) { 49 | attempts++; 50 | try { 51 | log.info("Creating Redis session. Attempt {} of {}", attempts, config.maxAttempts); 52 | result = RedisSessionImpl.create(config); 53 | break; 54 | } catch (RedisConnectionException ex) { 55 | if (attempts == config.maxAttempts) { 56 | throw ex; 57 | } else { 58 | log.warn("Exception thrown connecting to redis. Waiting {} ms to try again.", config.retryDelay); 59 | this.time.sleep(config.retryDelay); 60 | } 61 | } 62 | } 63 | 64 | return result; 65 | } 66 | 67 | private static class RedisSessionImpl implements RedisSession { 68 | private static final Logger log = LoggerFactory.getLogger(RedisSessionImpl.class); 69 | 70 | private final AbstractRedisClient client; 71 | private final StatefulConnection connection; 72 | private final RedisClusterAsyncCommands asyncCommands; 73 | private final RedisConnectorConfig config; 74 | 75 | RedisSessionImpl(AbstractRedisClient client, StatefulConnection connection, RedisClusterAsyncCommands asyncCommands, RedisConnectorConfig config) { 76 | this.client = client; 77 | this.connection = connection; 78 | this.asyncCommands = asyncCommands; 79 | this.config = config; 80 | } 81 | 82 | public AbstractRedisClient client() { 83 | return this.client; 84 | } 85 | 86 | public StatefulConnection connection() { 87 | return this.connection; 88 | } 89 | 90 | public RedisClusterAsyncCommands asyncCommands() { 91 | return this.asyncCommands; 92 | } 93 | 94 | public static RedisSessionImpl create(RedisConnectorConfig config) { 95 | RedisSessionImpl result; 96 | final RedisCodec codec = new ByteArrayCodec(); 97 | 98 | final SslOptions sslOptions; 99 | 100 | if (config.sslEnabled) { 101 | SslOptions.Builder builder = SslOptions.builder(); 102 | switch (config.sslProvider) { 103 | case JDK: 104 | builder.jdkSslProvider(); 105 | break; 106 | case OPENSSL: 107 | builder.openSslProvider(); 108 | break; 109 | default: 110 | throw new UnsupportedOperationException( 111 | String.format( 112 | "%s is not a supported value for %s.", 113 | config.sslProvider, 114 | RedisConnectorConfig.SSL_PROVIDER_CONFIG 115 | ) 116 | ); 117 | } 118 | if (null != config.keystorePath) { 119 | if (null != config.keystorePassword) { 120 | builder.keystore(config.keystorePath, config.keystorePassword.toCharArray()); 121 | } else { 122 | builder.keystore(config.keystorePath); 123 | } 124 | } 125 | if (null != config.truststorePath) { 126 | if (null != config.truststorePassword) { 127 | builder.truststore(config.truststorePath, config.keystorePassword); 128 | } else { 129 | builder.truststore(config.truststorePath); 130 | } 131 | } 132 | sslOptions = builder.build(); 133 | } else { 134 | sslOptions = null; 135 | } 136 | 137 | final SocketOptions socketOptions = SocketOptions.builder() 138 | .tcpNoDelay(config.tcpNoDelay) 139 | .connectTimeout(Duration.ofMillis(config.connectTimeout)) 140 | .keepAlive(config.keepAliveEnabled) 141 | .build(); 142 | 143 | 144 | if (RedisConnectorConfig.ClientMode.Cluster == config.clientMode) { 145 | ClusterClientOptions.Builder clientOptions = ClusterClientOptions.builder() 146 | .requestQueueSize(config.requestQueueSize) 147 | .autoReconnect(config.autoReconnectEnabled); 148 | if (config.sslEnabled) { 149 | clientOptions.sslOptions(sslOptions); 150 | } 151 | final RedisClusterClient client = RedisClusterClient.create(config.redisURIs()); 152 | client.setOptions(clientOptions.build()); 153 | 154 | final StatefulRedisClusterConnection connection = client.connect(codec); 155 | result = new RedisSessionImpl(client, connection, connection.async(), config); 156 | } else if (RedisConnectorConfig.ClientMode.Standalone == config.clientMode) { 157 | final ClientOptions.Builder clientOptions = ClientOptions.builder() 158 | .socketOptions(socketOptions) 159 | .requestQueueSize(config.requestQueueSize) 160 | .autoReconnect(config.autoReconnectEnabled); 161 | if (config.sslEnabled) { 162 | clientOptions.sslOptions(sslOptions); 163 | } 164 | final RedisClient client = RedisClient.create(config.redisURIs().get(0)); 165 | client.setOptions(clientOptions.build()); 166 | final StatefulRedisConnection connection = client.connect(codec); 167 | result = new RedisSessionImpl(client, connection, connection.async(), config); 168 | } else { 169 | throw new UnsupportedOperationException( 170 | String.format("%s is not supported", config.clientMode) 171 | ); 172 | } 173 | 174 | return result; 175 | } 176 | 177 | 178 | @Override 179 | public void close() throws Exception { 180 | this.connection.close(); 181 | this.client.shutdown(); 182 | } 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkConnector.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.github.jcustenborder.kafka.connect.utils.VersionUtil; 19 | import com.github.jcustenborder.kafka.connect.utils.config.Description; 20 | import com.github.jcustenborder.kafka.connect.utils.config.DocumentationImportant; 21 | import com.github.jcustenborder.kafka.connect.utils.config.DocumentationNote; 22 | import com.github.jcustenborder.kafka.connect.utils.config.TaskConfigs; 23 | import com.github.jcustenborder.kafka.connect.utils.config.Title; 24 | import org.apache.kafka.common.config.ConfigDef; 25 | import org.apache.kafka.connect.connector.Task; 26 | import org.apache.kafka.connect.sink.SinkConnector; 27 | 28 | import java.util.List; 29 | import java.util.Map; 30 | 31 | @Title("Redis Sink Connector") 32 | @Description("The Redis Sink Connector is used to write data from Kafka to a Redis cache.") 33 | @DocumentationImportant("This connector expects records from Kafka to have a key and value that are " + 34 | "stored as bytes or a string. If your data is already in Kafka in the format that you want in " + 35 | "Redis consider using the ByteArrayConverter or the StringConverter for this connector. Keep in " + 36 | "this does not need to be configured in the worker properties and can be configured at the " + 37 | "connector level. If your data is not sitting in Kafka in the format you wish to persist in Redis " + 38 | "consider using a Single Message Transformation to convert the data to a byte or string representation " + 39 | "before it is written to Redis.") 40 | @DocumentationNote("This connector supports deletes. If the record stored in Kafka has a null value, " + 41 | "this connector will send a delete with the corresponding key to Redis.") 42 | public class RedisSinkConnector extends SinkConnector { 43 | @Override 44 | public String version() { 45 | return VersionUtil.version(this.getClass()); 46 | } 47 | 48 | Map settings; 49 | 50 | @Override 51 | public void start(Map settings) { 52 | new RedisSinkConnectorConfig(settings); 53 | this.settings = settings; 54 | } 55 | 56 | @Override 57 | public Class taskClass() { 58 | return RedisSinkTask.class; 59 | } 60 | 61 | @Override 62 | public List> taskConfigs(int count) { 63 | return TaskConfigs.multiple(this.settings, count); 64 | } 65 | 66 | @Override 67 | public void stop() { 68 | 69 | } 70 | 71 | @Override 72 | public ConfigDef config() { 73 | return RedisSinkConnectorConfig.config(); 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkConnectorConfig.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.github.jcustenborder.kafka.connect.utils.config.ConfigKeyBuilder; 19 | import com.github.jcustenborder.kafka.connect.utils.config.recommenders.Recommenders; 20 | import com.github.jcustenborder.kafka.connect.utils.config.validators.Validators; 21 | import org.apache.kafka.common.config.ConfigDef; 22 | 23 | import java.nio.charset.Charset; 24 | import java.util.Map; 25 | 26 | class RedisSinkConnectorConfig extends RedisConnectorConfig { 27 | 28 | public final static String OPERATION_TIMEOUT_MS_CONF = "redis.operation.timeout.ms"; 29 | final static String OPERATION_TIMEOUT_MS_DOC = "The amount of time in milliseconds before an" + 30 | " operation is marked as timed out."; 31 | 32 | public final static String CHARSET_CONF = "redis.charset"; 33 | public final static String CHARSET_DOC = "The character set to use for String key and values."; 34 | 35 | public final long operationTimeoutMs; 36 | public final Charset charset; 37 | 38 | public RedisSinkConnectorConfig(Map originals) { 39 | super(config(), originals); 40 | this.operationTimeoutMs = getLong(OPERATION_TIMEOUT_MS_CONF); 41 | String charset = getString(CHARSET_CONF); 42 | this.charset = Charset.forName(charset); 43 | } 44 | 45 | public static ConfigDef config() { 46 | return RedisConnectorConfig.config() 47 | .define( 48 | ConfigKeyBuilder.of(OPERATION_TIMEOUT_MS_CONF, ConfigDef.Type.LONG) 49 | .documentation(OPERATION_TIMEOUT_MS_DOC) 50 | .defaultValue(10000L) 51 | .validator(ConfigDef.Range.atLeast(100L)) 52 | .importance(ConfigDef.Importance.MEDIUM) 53 | .build() 54 | ).define( 55 | ConfigKeyBuilder.of(CHARSET_CONF, ConfigDef.Type.STRING) 56 | .documentation(CHARSET_DOC) 57 | .defaultValue("UTF-8") 58 | .validator(Validators.validCharset()) 59 | .recommender(Recommenders.charset()) 60 | .importance(ConfigDef.Importance.LOW) 61 | .build() 62 | ); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkTask.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.fasterxml.jackson.core.JsonProcessingException; 19 | import com.github.jcustenborder.kafka.connect.utils.VersionUtil; 20 | import com.github.jcustenborder.kafka.connect.utils.jackson.ObjectMapperFactory; 21 | import com.google.common.base.Charsets; 22 | import io.lettuce.core.KeyValue; 23 | import io.lettuce.core.RedisFuture; 24 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 25 | import org.apache.kafka.common.TopicPartition; 26 | import org.apache.kafka.connect.errors.DataException; 27 | import org.apache.kafka.connect.errors.RetriableException; 28 | import org.apache.kafka.connect.sink.SinkRecord; 29 | import org.apache.kafka.connect.sink.SinkTask; 30 | import org.slf4j.Logger; 31 | import org.slf4j.LoggerFactory; 32 | 33 | import java.io.IOException; 34 | import java.util.ArrayList; 35 | import java.util.Collection; 36 | import java.util.HashMap; 37 | import java.util.List; 38 | import java.util.Map; 39 | import java.util.Objects; 40 | import java.util.Set; 41 | import java.util.concurrent.ExecutionException; 42 | import java.util.concurrent.TimeUnit; 43 | import java.util.concurrent.TimeoutException; 44 | import java.util.stream.Collectors; 45 | 46 | public class RedisSinkTask extends SinkTask { 47 | private static final Logger log = LoggerFactory.getLogger(RedisSinkTask.class); 48 | 49 | @Override 50 | public String version() { 51 | return VersionUtil.version(this.getClass()); 52 | } 53 | 54 | RedisSinkConnectorConfig config; 55 | RedisSessionFactory sessionFactory = new RedisSessionFactoryImpl(); 56 | RedisSession session; 57 | 58 | static SinkOffsetState state(KeyValue input) { 59 | if (!input.hasValue()) { 60 | return null; 61 | } 62 | try { 63 | return ObjectMapperFactory.INSTANCE.readValue(input.getValue(), SinkOffsetState.class); 64 | } catch (IOException e) { 65 | throw new DataException(e); 66 | } 67 | } 68 | 69 | @Override 70 | public void start(Map settings) { 71 | this.config = new RedisSinkConnectorConfig(settings); 72 | this.session = this.sessionFactory.create(this.config); 73 | 74 | final Set assignment = this.context.assignment(); 75 | if (!assignment.isEmpty()) { 76 | final byte[][] partitionKeys = assignment.stream() 77 | .map(RedisSinkTask::redisOffsetKey) 78 | .map(s -> s.getBytes(Charsets.UTF_8)) 79 | .toArray(byte[][]::new); 80 | 81 | final RedisFuture>> partitionKeyFuture = this.session.asyncCommands().mget(partitionKeys); 82 | final List sinkOffsetStates; 83 | try { 84 | final List> partitionKey = partitionKeyFuture.get(this.config.operationTimeoutMs, TimeUnit.MILLISECONDS); 85 | sinkOffsetStates = partitionKey.stream() 86 | .map(RedisSinkTask::state) 87 | .filter(Objects::nonNull) 88 | .collect(Collectors.toList()); 89 | } catch (InterruptedException | ExecutionException | TimeoutException e) { 90 | throw new RetriableException(e); 91 | } 92 | Map partitionOffsets = new HashMap<>(assignment.size()); 93 | for (SinkOffsetState state : sinkOffsetStates) { 94 | partitionOffsets.put(state.topicPartition(), state.offset()); 95 | log.info("Requesting offset {} for {}", state.offset(), state.topicPartition()); 96 | } 97 | for (TopicPartition topicPartition : assignment) { 98 | if (!partitionOffsets.containsKey(topicPartition)) { 99 | partitionOffsets.put(topicPartition, 0L); 100 | log.info("Requesting offset {} for {}", 0L, topicPartition); 101 | } 102 | } 103 | this.context.offset(partitionOffsets); 104 | } 105 | } 106 | 107 | private byte[] toBytes(String source, Object input) { 108 | final byte[] result; 109 | 110 | if (input instanceof String) { 111 | String s = (String) input; 112 | result = s.getBytes(this.config.charset); 113 | } else if (input instanceof byte[]) { 114 | result = (byte[]) input; 115 | } else if (null == input) { 116 | result = null; 117 | } else { 118 | throw new DataException( 119 | String.format( 120 | "The %s for the record must be String or Bytes. Consider using the ByteArrayConverter " + 121 | "or StringConverter if the data is stored in Kafka in the format needed in Redis. " + 122 | "Another option is to use a single message transformation to transform the data before " + 123 | "it is written to Redis.", 124 | source 125 | ) 126 | ); 127 | } 128 | 129 | return result; 130 | } 131 | 132 | static String formatLocation(SinkRecord record) { 133 | return String.format( 134 | "topic = %s partition = %s offset = %s", 135 | record.topic(), 136 | record.kafkaPartition(), 137 | record.kafkaOffset() 138 | ); 139 | } 140 | 141 | @Override 142 | public void put(Collection records) { 143 | log.debug("put() - Processing {} record(s)", records.size()); 144 | List operations = new ArrayList<>(records.size()); 145 | 146 | SinkOperation operation = SinkOperation.NONE; 147 | 148 | for (SinkRecord record : records) { 149 | log.trace("put() - Processing record " + formatLocation(record)); 150 | if (null == record.key()) { 151 | throw new DataException( 152 | "The key for the record cannot be null. " + formatLocation(record) 153 | ); 154 | } 155 | final byte[] key = toBytes("key", record.key()); 156 | if (null == key || key.length == 0) { 157 | throw new DataException( 158 | "The key cannot be an empty byte array. " + formatLocation(record) 159 | ); 160 | } 161 | 162 | final byte[] value = toBytes("value", record.value()); 163 | 164 | SinkOperation.Type currentOperationType; 165 | 166 | if (null == value) { 167 | currentOperationType = SinkOperation.Type.DELETE; 168 | } else { 169 | currentOperationType = SinkOperation.Type.SET; 170 | } 171 | 172 | if (currentOperationType != operation.type) { 173 | log.debug( 174 | "put() - Creating new operation. current={} last={}", 175 | currentOperationType, 176 | operation.type 177 | ); 178 | operation = SinkOperation.create(currentOperationType, this.config, records.size()); 179 | operations.add(operation); 180 | } 181 | operation.add(key, value); 182 | } 183 | 184 | log.debug( 185 | "put() - Found {} operation(s) in {} record{s}. Executing operations...", 186 | operations.size(), 187 | records.size() 188 | ); 189 | 190 | for (SinkOperation op : operations) { 191 | log.debug("put() - Executing {} operation with {} values", op.type, op.size()); 192 | try { 193 | op.execute(this.session.asyncCommands()); 194 | } catch (InterruptedException e) { 195 | log.warn("Exception thrown while executing operation", e); 196 | throw new RetriableException(e); 197 | } 198 | } 199 | } 200 | 201 | @Override 202 | public void flush(Map currentOffsets) { 203 | SinkOperation operation = SinkOperation.create(SinkOperation.Type.SET, this.config, currentOffsets.size()); 204 | 205 | List states = currentOffsets 206 | .entrySet().stream() 207 | .map(e -> ImmutableSinkOffsetState.builder() 208 | .topic(e.getKey().topic()) 209 | .partition(e.getKey().partition()) 210 | .offset(e.getValue().offset()) 211 | .build() 212 | ).collect(Collectors.toList()); 213 | 214 | for (SinkOffsetState e : states) { 215 | final byte[] key = String.format("__kafka.offset.%s.%s", e.topic(), e.partition()).getBytes(Charsets.UTF_8); 216 | final byte[] value; 217 | try { 218 | value = ObjectMapperFactory.INSTANCE.writeValueAsBytes(e); 219 | } catch (JsonProcessingException e1) { 220 | throw new DataException(e1); 221 | } 222 | operation.add(key, value); 223 | log.trace("put() - Setting offset: {}", e); 224 | } 225 | 226 | try { 227 | operation.execute(this.session.asyncCommands()); 228 | } catch (InterruptedException e) { 229 | log.warn("Exception thrown while executing operation", e); 230 | throw new RetriableException(e); 231 | } 232 | } 233 | 234 | private static String redisOffsetKey(TopicPartition topicPartition) { 235 | return String.format("__kafka.offset.%s.%s", topicPartition.topic(), topicPartition.partition()); 236 | } 237 | 238 | @Override 239 | public void stop() { 240 | try { 241 | if (null != this.session) { 242 | this.session.close(); 243 | } 244 | } catch (Exception e) { 245 | log.warn("Exception thrown", e); 246 | } 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/SinkOffsetState.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.fasterxml.jackson.annotation.JsonAutoDetect; 19 | import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; 20 | import com.fasterxml.jackson.annotation.JsonIgnore; 21 | import com.fasterxml.jackson.annotation.JsonProperty; 22 | import com.fasterxml.jackson.databind.annotation.JsonDeserialize; 23 | import org.apache.kafka.common.TopicPartition; 24 | import org.immutables.value.Value; 25 | 26 | @Value.Immutable 27 | @JsonDeserialize(as = ImmutableSinkOffsetState.class) 28 | @JsonAutoDetect( 29 | fieldVisibility = Visibility.NONE, 30 | getterVisibility = Visibility.NONE, 31 | setterVisibility = Visibility.NONE, 32 | isGetterVisibility = Visibility.NONE, 33 | creatorVisibility = Visibility.NONE) 34 | public interface SinkOffsetState { 35 | @JsonProperty("topic") 36 | String topic(); 37 | 38 | @JsonProperty("partition") 39 | Integer partition(); 40 | 41 | @JsonProperty("offset") 42 | Long offset(); 43 | 44 | @JsonIgnore 45 | @Value.Derived 46 | default TopicPartition topicPartition() { 47 | return new TopicPartition(topic(), partition()); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/SinkOperation.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import io.lettuce.core.RedisFuture; 19 | import io.lettuce.core.cluster.api.async.RedisClusterAsyncCommands; 20 | import org.apache.kafka.connect.errors.RetriableException; 21 | import org.slf4j.Logger; 22 | import org.slf4j.LoggerFactory; 23 | 24 | import java.util.ArrayList; 25 | import java.util.LinkedHashMap; 26 | import java.util.List; 27 | import java.util.Map; 28 | import java.util.concurrent.TimeUnit; 29 | 30 | abstract class SinkOperation { 31 | private static final Logger log = LoggerFactory.getLogger(SinkOperation.class); 32 | public static final SinkOperation NONE = new NoneOperation(null); 33 | 34 | public final Type type; 35 | private final RedisSinkConnectorConfig config; 36 | 37 | SinkOperation(Type type, RedisSinkConnectorConfig config) { 38 | this.type = type; 39 | this.config = config; 40 | } 41 | 42 | public enum Type { 43 | SET, 44 | DELETE, 45 | NONE 46 | } 47 | 48 | public abstract void add(byte[] key, byte[] value); 49 | 50 | public abstract void execute(RedisClusterAsyncCommands asyncCommands) throws InterruptedException; 51 | 52 | public abstract int size(); 53 | 54 | protected void wait(RedisFuture future) throws InterruptedException { 55 | log.debug("wait() - future = {}", future); 56 | if (!future.await(this.config.operationTimeoutMs, TimeUnit.MILLISECONDS)) { 57 | future.cancel(true); 58 | throw new RetriableException( 59 | String.format("Timeout after %s ms while waiting for operation to complete.", this.config.operationTimeoutMs) 60 | ); 61 | } 62 | } 63 | 64 | public static SinkOperation create(Type type, RedisSinkConnectorConfig config, int size) { 65 | SinkOperation result; 66 | 67 | switch (type) { 68 | case SET: 69 | result = new SetOperation(config, size); 70 | break; 71 | case DELETE: 72 | result = new DeleteOperation(config, size); 73 | break; 74 | default: 75 | throw new IllegalStateException( 76 | String.format("%s is not a supported operation.", type) 77 | ); 78 | } 79 | 80 | return result; 81 | } 82 | 83 | static class NoneOperation extends SinkOperation { 84 | NoneOperation(RedisSinkConnectorConfig config) { 85 | super(Type.NONE, config); 86 | } 87 | 88 | @Override 89 | public void add(byte[] key, byte[] value) { 90 | throw new UnsupportedOperationException( 91 | "This should never be called." 92 | ); 93 | } 94 | 95 | @Override 96 | public void execute(RedisClusterAsyncCommands asyncCommands) { 97 | 98 | } 99 | 100 | @Override 101 | public int size() { 102 | return 0; 103 | } 104 | } 105 | 106 | static class SetOperation extends SinkOperation { 107 | final Map sets; 108 | 109 | SetOperation(RedisSinkConnectorConfig config, int size) { 110 | super(Type.SET, config); 111 | this.sets = new LinkedHashMap<>(size); 112 | } 113 | 114 | @Override 115 | public void add(byte[] key, byte[] value) { 116 | this.sets.put(key, value); 117 | } 118 | 119 | @Override 120 | public void execute(RedisClusterAsyncCommands asyncCommands) throws InterruptedException { 121 | log.debug("execute() - Calling mset with {} value(s)", this.sets.size()); 122 | RedisFuture future = asyncCommands.mset(this.sets); 123 | wait(future); 124 | } 125 | 126 | @Override 127 | public int size() { 128 | return this.sets.size(); 129 | } 130 | } 131 | 132 | static class DeleteOperation extends SinkOperation { 133 | final List deletes; 134 | 135 | DeleteOperation(RedisSinkConnectorConfig config, int size) { 136 | super(Type.DELETE, config); 137 | this.deletes = new ArrayList<>(size); 138 | } 139 | 140 | @Override 141 | public void add(byte[] key, byte[] value) { 142 | this.deletes.add(key); 143 | } 144 | 145 | @Override 146 | public void execute(RedisClusterAsyncCommands asyncCommands) throws InterruptedException { 147 | log.debug("execute() - Calling del with {} value(s)", this.deletes.size()); 148 | byte[][] deletes = this.deletes.toArray(new byte[this.deletes.size()][]); 149 | RedisFuture future = asyncCommands.del(deletes); 150 | wait(future); 151 | } 152 | 153 | @Override 154 | public int size() { 155 | return this.deletes.size(); 156 | } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /src/main/java/com/github/jcustenborder/kafka/connect/redis/package-info.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | @Introduction("The Redis plugin is a collection of connectors that are used to interact with a " + 17 | "Redis cluster.") 18 | @Title("Redis") 19 | @PluginOwner("jcustenborder") 20 | @PluginName("kafka-connect-redis") 21 | package com.github.jcustenborder.kafka.connect.redis; 22 | 23 | import com.github.jcustenborder.kafka.connect.utils.config.Introduction; 24 | import com.github.jcustenborder.kafka.connect.utils.config.PluginName; 25 | import com.github.jcustenborder.kafka.connect.utils.config.PluginOwner; 26 | import com.github.jcustenborder.kafka.connect.utils.config.Title; -------------------------------------------------------------------------------- /src/test/java/com/github/jcustenborder/kafka/connect/redis/DocumentationTest.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.github.jcustenborder.kafka.connect.utils.BaseDocumentationTest; 19 | 20 | public class DocumentationTest extends BaseDocumentationTest { 21 | 22 | } 23 | -------------------------------------------------------------------------------- /src/test/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkConnectorConfigTest.java: -------------------------------------------------------------------------------- 1 | package com.github.jcustenborder.kafka.connect.redis; 2 | 3 | import com.google.common.net.HostAndPort; 4 | import org.apache.kafka.common.config.ConfigException; 5 | import org.junit.jupiter.api.Assertions; 6 | import org.junit.jupiter.api.BeforeEach; 7 | import org.junit.jupiter.api.Test; 8 | 9 | import java.io.File; 10 | import java.util.ArrayList; 11 | import java.util.HashMap; 12 | import java.util.List; 13 | import java.util.Map; 14 | 15 | import static org.junit.jupiter.api.Assertions.assertEquals; 16 | import static org.junit.jupiter.api.Assertions.assertTrue; 17 | 18 | public class RedisSinkConnectorConfigTest { 19 | 20 | private Map props; 21 | 22 | @BeforeEach 23 | public void setup() { 24 | props = new HashMap<>(); 25 | } 26 | 27 | @Test 28 | public void testDefaultOperationTimeoutsConfig() { 29 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 30 | assertEquals(config.operationTimeoutMs, 10000L); 31 | } 32 | 33 | @Test 34 | public void testSetOperationTimeoutConfig() { 35 | props.put(RedisSinkConnectorConfig.OPERATION_TIMEOUT_MS_CONF, "33000"); 36 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 37 | assertEquals(config.operationTimeoutMs, 33000L); 38 | } 39 | 40 | @Test 41 | public void testInvalidOperationTimeoutConfig() { 42 | props.put(RedisSinkConnectorConfig.OPERATION_TIMEOUT_MS_CONF, "99"); 43 | Assertions.assertThrows(ConfigException.class, () -> { 44 | new RedisSinkConnectorConfig(props); 45 | }); 46 | } 47 | 48 | @Test 49 | public void testDefaultCharsetConfig() { 50 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 51 | assertEquals(config.charset.toString(), "UTF-8"); 52 | } 53 | 54 | @Test 55 | public void testSetCharsetConfig() { 56 | props.put(RedisSinkConnectorConfig.CHARSET_CONF, "windows-1257"); 57 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 58 | assertEquals(config.charset.toString(), "windows-1257"); 59 | } 60 | 61 | @Test 62 | public void testInvalidCharsetConfig() { 63 | props.put(RedisSinkConnectorConfig.CHARSET_CONF, "Big56"); 64 | Assertions.assertThrows(ConfigException.class, () -> { 65 | new RedisSinkConnectorConfig(props); 66 | }); 67 | } 68 | 69 | @Test 70 | public void testSSLConfigs() { 71 | props.put(RedisSinkConnectorConfig.SSL_CONFIG, "true"); 72 | props.put(RedisSinkConnectorConfig.SSL_PROVIDER_CONFIG, "OPENSSL"); 73 | props.put(RedisSinkConnectorConfig.SSL_KEYSTORE_PASSWORD_CONFIG, "pass1234"); 74 | props.put(RedisSinkConnectorConfig.SSL_KEYSTORE_PATH_CONFIG, "/path1"); 75 | props.put(RedisSinkConnectorConfig.SSL_TRUSTSTORE_PASSWORD_CONFIG, "pass4321"); 76 | props.put(RedisSinkConnectorConfig.SSL_TRUSTSTORE_PATH_CONFIG, "/path2"); 77 | 78 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 79 | 80 | assertTrue(config.sslEnabled); 81 | assertEquals(config.sslProvider, RedisConnectorConfig.RedisSslProvider.OPENSSL); 82 | assertEquals(config.keystorePassword, "pass1234"); 83 | assertEquals(config.keystorePath, new File("/path1")); 84 | assertEquals(config.truststorePassword, "pass4321"); 85 | assertEquals(config.truststorePath, new File("/path2")); 86 | } 87 | 88 | @Test 89 | public void testDefaultHostConfig() { 90 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 91 | List result = new ArrayList<>(); 92 | HostAndPort hostAndPort = HostAndPort.fromString("localhost:6379"); 93 | result.add(hostAndPort); 94 | assertEquals(config.hosts, result); 95 | } 96 | 97 | @Test 98 | public void testSetHostsConfig() { 99 | props.put(RedisSinkConnectorConfig.HOSTS_CONFIG, "127.4.5.7:6345,152.4.3.2"); 100 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 101 | List result = new ArrayList<>(); 102 | HostAndPort hostAndPort1 = HostAndPort.fromString("127.4.5.7:6345"); 103 | HostAndPort hostAndPort2 = HostAndPort.fromString("152.4.3.2:6379"); 104 | result.add(hostAndPort1); 105 | result.add(hostAndPort2); 106 | assertEquals(config.hosts, result); 107 | } 108 | 109 | @Test 110 | public void testInvalidHostsConfig() { 111 | props.put(RedisSinkConnectorConfig.HOSTS_CONFIG, "333.3.2.1:66666"); 112 | Assertions.assertThrows(IllegalArgumentException.class, () -> { 113 | new RedisSinkConnectorConfig(props); 114 | }); 115 | } 116 | 117 | @Test 118 | public void testDefaultPasswordConfig() { 119 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 120 | assertEquals(config.password, ""); 121 | } 122 | 123 | @Test 124 | public void testSetPasswordConfig() { 125 | props.put(RedisSinkConnectorConfig.PASSWORD_CONFIG, "hocuspocus"); 126 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 127 | assertEquals(config.password, "hocuspocus"); 128 | } 129 | 130 | @Test 131 | public void testDefaultDatabaseConfig() { 132 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 133 | assertEquals(config.database, 1); 134 | } 135 | 136 | @Test 137 | public void testSetDatabaseConfig() { 138 | props.put(RedisSinkConnectorConfig.DATABASE_CONFIG, "4"); 139 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 140 | assertEquals(config.database, 4); 141 | } 142 | 143 | @Test 144 | public void testDefaultClientModeConfig() { 145 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 146 | assertEquals(config.clientMode, RedisSinkConnectorConfig.ClientMode.Standalone); 147 | } 148 | 149 | @Test 150 | public void testSetClientModeConfig() { 151 | props.put(RedisSinkConnectorConfig.CLIENT_MODE_CONFIG, "Cluster"); 152 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 153 | assertEquals(config.clientMode, RedisSinkConnectorConfig.ClientMode.Cluster); 154 | } 155 | 156 | @Test 157 | public void testDefaultAutoReconnectEnabledConfig() { 158 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 159 | assertEquals(config.autoReconnectEnabled, true); 160 | } 161 | 162 | @Test 163 | public void testSetAutoReconnectEnabledConfig() { 164 | props.put(RedisSinkConnectorConfig.AUTO_RECONNECT_ENABLED_CONFIG, "false"); 165 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 166 | assertEquals(config.autoReconnectEnabled, false); 167 | } 168 | 169 | @Test 170 | public void testInvalidAutoReconnectEnabledConfig() { 171 | props.put(RedisSinkConnectorConfig.AUTO_RECONNECT_ENABLED_CONFIG, "99"); 172 | Assertions.assertThrows(ConfigException.class, () -> { 173 | new RedisSinkConnectorConfig(props); 174 | }); 175 | } 176 | 177 | @Test 178 | public void testDefaultRequestQueueSizeConfig() { 179 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 180 | assertEquals(config.requestQueueSize, 2147483647); 181 | } 182 | 183 | @Test 184 | public void testSetRequestQueueSizeConfig() { 185 | props.put(RedisSinkConnectorConfig.REQUEST_QUEUE_SIZE_CONFIG, "456734"); 186 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 187 | assertEquals(config.requestQueueSize, 456734); 188 | } 189 | 190 | @Test 191 | public void testInvalidRequestQueueSizeConfig() { 192 | props.put(RedisSinkConnectorConfig.REQUEST_QUEUE_SIZE_CONFIG, "hello"); 193 | Assertions.assertThrows(ConfigException.class, () -> { 194 | new RedisSinkConnectorConfig(props); 195 | }); 196 | } 197 | 198 | @Test 199 | public void testDefaultSocketTCPNoDelayConfig() { 200 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 201 | assertEquals(config.tcpNoDelay, true); 202 | } 203 | 204 | @Test 205 | public void testSetSocketTCPNoDelayConfig() { 206 | props.put(RedisSinkConnectorConfig.SOCKET_TCP_NO_DELAY_CONFIG, "false"); 207 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 208 | assertEquals(config.tcpNoDelay, false); 209 | } 210 | 211 | @Test 212 | public void testInvalidSocketTCPNoDelayConfig() { 213 | props.put(RedisSinkConnectorConfig.SOCKET_TCP_NO_DELAY_CONFIG, "99"); 214 | Assertions.assertThrows(ConfigException.class, () -> { 215 | new RedisSinkConnectorConfig(props); 216 | }); 217 | } 218 | 219 | @Test 220 | public void testDefaultSocketKeepAliveConfig() { 221 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 222 | assertEquals(config.keepAliveEnabled, false); 223 | } 224 | 225 | @Test 226 | public void testSetSocketKeepAliveConfig() { 227 | props.put(RedisSinkConnectorConfig.SOCKET_KEEP_ALIVE_CONFIG, "true"); 228 | RedisSinkConnectorConfig config = new RedisSinkConnectorConfig(props); 229 | assertEquals(config.keepAliveEnabled, true); 230 | } 231 | 232 | @Test 233 | public void testInvalidSocketKeepAliveConfig() { 234 | props.put(RedisSinkConnectorConfig.SOCKET_KEEP_ALIVE_CONFIG, "99"); 235 | Assertions.assertThrows(ConfigException.class, () -> { 236 | new RedisSinkConnectorConfig(props); 237 | }); 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /src/test/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkTaskIT.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.github.jcustenborder.docker.junit5.Compose; 19 | import com.github.jcustenborder.docker.junit5.Port; 20 | import com.google.common.base.Charsets; 21 | import com.google.common.collect.ImmutableList; 22 | import com.google.common.collect.ImmutableMap; 23 | import com.google.common.collect.ImmutableSet; 24 | import io.lettuce.core.KeyValue; 25 | import io.lettuce.core.RedisFuture; 26 | import org.apache.kafka.common.TopicPartition; 27 | import org.apache.kafka.connect.data.Schema; 28 | import org.apache.kafka.connect.data.SchemaAndValue; 29 | import org.apache.kafka.connect.sink.SinkRecord; 30 | import org.apache.kafka.connect.sink.SinkTaskContext; 31 | import org.junit.jupiter.api.AfterEach; 32 | import org.junit.jupiter.api.BeforeEach; 33 | import org.junit.jupiter.api.Test; 34 | import org.slf4j.Logger; 35 | import org.slf4j.LoggerFactory; 36 | 37 | import java.net.InetSocketAddress; 38 | import java.util.ArrayList; 39 | import java.util.LinkedHashMap; 40 | import java.util.List; 41 | import java.util.Map; 42 | import java.util.concurrent.ExecutionException; 43 | import java.util.stream.Collectors; 44 | 45 | import static com.github.jcustenborder.kafka.connect.utils.SinkRecordHelper.delete; 46 | import static com.github.jcustenborder.kafka.connect.utils.SinkRecordHelper.write; 47 | import static org.junit.jupiter.api.Assertions.assertEquals; 48 | import static org.mockito.Mockito.mock; 49 | import static org.mockito.Mockito.when; 50 | 51 | @Compose( 52 | dockerComposePath = "src/test/resources/docker-compose.yml" 53 | ) 54 | public class RedisSinkTaskIT { 55 | private static final Logger log = LoggerFactory.getLogger(RedisSinkTaskIT.class); 56 | 57 | 58 | RedisSinkTask task; 59 | 60 | @BeforeEach 61 | public void before() { 62 | this.task = new RedisSinkTask(); 63 | } 64 | 65 | @Test 66 | public void emptyAssignment(@Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException { 67 | log.info("address = {}", address); 68 | final String topic = "putWrite"; 69 | SinkTaskContext context = mock(SinkTaskContext.class); 70 | when(context.assignment()).thenReturn(ImmutableSet.of()); 71 | this.task.initialize(context); 72 | this.task.start( 73 | ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) 74 | ); 75 | } 76 | 77 | @Test 78 | public void putEmpty(@Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException { 79 | log.info("address = {}", address); 80 | final String topic = "putWrite"; 81 | SinkTaskContext context = mock(SinkTaskContext.class); 82 | when(context.assignment()).thenReturn(ImmutableSet.of(new TopicPartition(topic, 1))); 83 | this.task.initialize(context); 84 | this.task.start( 85 | ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) 86 | ); 87 | 88 | this.task.put(ImmutableList.of()); 89 | } 90 | 91 | @Test 92 | public void putWrite(@Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException { 93 | log.info("address = {}", address); 94 | final String topic = "putWrite"; 95 | SinkTaskContext context = mock(SinkTaskContext.class); 96 | when(context.assignment()).thenReturn(ImmutableSet.of(new TopicPartition(topic, 1))); 97 | this.task.initialize(context); 98 | this.task.start( 99 | ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) 100 | ); 101 | 102 | final int count = 50; 103 | final Map expected = new LinkedHashMap<>(count); 104 | final List records = new ArrayList<>(count); 105 | 106 | for (int i = 0; i < count; i++) { 107 | final String key = String.format("putWrite%s", i); 108 | final String value = String.format("This is value %s", i); 109 | records.add( 110 | write(topic, 111 | new SchemaAndValue(Schema.STRING_SCHEMA, key), 112 | new SchemaAndValue(Schema.STRING_SCHEMA, value) 113 | ) 114 | ); 115 | 116 | expected.put(key, value); 117 | } 118 | this.task.put(records); 119 | 120 | final byte[][] keys = expected.keySet().stream() 121 | .map(s -> s.getBytes(Charsets.UTF_8)) 122 | .toArray(byte[][]::new); 123 | RedisFuture>> result = this.task.session.asyncCommands().mget(keys); 124 | List> actual = result.get(); 125 | assertEquals(count, actual.size()); 126 | for (KeyValue kv : actual) { 127 | final String key = new String(kv.getKey(), Charsets.UTF_8); 128 | final String value = new String(kv.getValue(), Charsets.UTF_8); 129 | assertEquals(value, expected.get(key), String.format("Value for key(%s) does not match.", key)); 130 | } 131 | } 132 | 133 | @Test 134 | public void putDelete(@Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException { 135 | log.info("address = {}", address); 136 | final String topic = "putDelete"; 137 | SinkTaskContext context = mock(SinkTaskContext.class); 138 | when(context.assignment()).thenReturn(ImmutableSet.of(new TopicPartition(topic, 1))); 139 | this.task.initialize(context); 140 | this.task.start( 141 | ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) 142 | ); 143 | 144 | final int count = 50; 145 | final Map expected = new LinkedHashMap<>(count); 146 | final List records = new ArrayList<>(count); 147 | 148 | for (int i = 0; i < count; i++) { 149 | final String key = String.format("putDelete%s", i); 150 | final String value = String.format("This is value %s", i); 151 | 152 | records.add( 153 | delete(topic, 154 | new SchemaAndValue(Schema.STRING_SCHEMA, key) 155 | ) 156 | ); 157 | 158 | expected.put(key, value); 159 | } 160 | final Map values = expected.entrySet().stream() 161 | .collect(Collectors.toMap( 162 | kv -> kv.getKey().getBytes(Charsets.UTF_8), 163 | kv -> kv.getValue().getBytes(Charsets.UTF_8) 164 | )); 165 | 166 | this.task.session.asyncCommands().mset(values).get(); 167 | this.task.put(records); 168 | final byte[][] keys = expected.keySet().stream() 169 | .map(s -> s.getBytes(Charsets.UTF_8)) 170 | .toArray(byte[][]::new); 171 | final long actual = this.task.session.asyncCommands().exists(keys).get(); 172 | assertEquals(0L, actual, "All of the keys should be removed from Redis."); 173 | } 174 | 175 | @AfterEach 176 | public void after() { 177 | if (null != this.task) { 178 | this.task.stop(); 179 | } 180 | } 181 | 182 | } 183 | -------------------------------------------------------------------------------- /src/test/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkTaskReconnectIT.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.github.jcustenborder.docker.junit5.CleanupMode; 19 | import com.github.jcustenborder.docker.junit5.Compose; 20 | import com.github.jcustenborder.docker.junit5.DockerContainer; 21 | import com.github.jcustenborder.docker.junit5.Port; 22 | import com.google.common.base.Charsets; 23 | import com.google.common.collect.ImmutableMap; 24 | import com.google.common.collect.ImmutableSet; 25 | import com.palantir.docker.compose.connection.Container; 26 | import io.lettuce.core.KeyValue; 27 | import io.lettuce.core.RedisFuture; 28 | import org.apache.kafka.common.utils.Time; 29 | import org.apache.kafka.connect.data.Schema; 30 | import org.apache.kafka.connect.data.SchemaAndValue; 31 | import org.apache.kafka.connect.errors.RetriableException; 32 | import org.apache.kafka.connect.sink.SinkRecord; 33 | import org.apache.kafka.connect.sink.SinkTaskContext; 34 | import org.junit.jupiter.api.AfterEach; 35 | import org.junit.jupiter.api.BeforeEach; 36 | import org.junit.jupiter.api.Test; 37 | import org.slf4j.Logger; 38 | import org.slf4j.LoggerFactory; 39 | 40 | import java.io.IOException; 41 | import java.net.InetSocketAddress; 42 | import java.util.ArrayList; 43 | import java.util.LinkedHashMap; 44 | import java.util.List; 45 | import java.util.Map; 46 | import java.util.concurrent.ExecutionException; 47 | import java.util.concurrent.ExecutorService; 48 | import java.util.concurrent.Executors; 49 | import java.util.concurrent.Future; 50 | 51 | import static com.github.jcustenborder.kafka.connect.utils.SinkRecordHelper.write; 52 | import static org.junit.jupiter.api.Assertions.assertEquals; 53 | import static org.junit.jupiter.api.Assertions.assertThrows; 54 | import static org.mockito.Mockito.mock; 55 | import static org.mockito.Mockito.when; 56 | 57 | @Compose( 58 | dockerComposePath = "src/test/resources/docker-compose.yml", 59 | cleanupMode = CleanupMode.AfterEach 60 | ) 61 | public class RedisSinkTaskReconnectIT { 62 | private static final Logger log = LoggerFactory.getLogger(RedisSinkTaskReconnectIT.class); 63 | 64 | 65 | RedisSinkTask task; 66 | 67 | @BeforeEach 68 | public void before() { 69 | this.task = new RedisSinkTask(); 70 | } 71 | 72 | @Test 73 | public void initialConnectionIssues( 74 | @DockerContainer(container = "redis") Container container, 75 | @Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException, IOException { 76 | log.info("address = {}", address); 77 | final String topic = "putWrite"; 78 | SinkTaskContext context = mock(SinkTaskContext.class); 79 | when(context.assignment()).thenReturn(ImmutableSet.of()); 80 | this.task.initialize(context); 81 | container.stop(); 82 | 83 | ExecutorService service = Executors.newSingleThreadExecutor(); 84 | Future future = service.submit(() -> task.start( 85 | ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort()) 86 | ) 87 | )); 88 | container.start(); 89 | Time.SYSTEM.sleep(2000); 90 | future.get(); 91 | } 92 | 93 | void sendAndVerifyRecords(RedisSinkTask task, String topic, int keyIndex) throws ExecutionException, InterruptedException { 94 | final int count = 50; 95 | final Map expected = new LinkedHashMap<>(count); 96 | final List records = new ArrayList<>(count); 97 | 98 | for (int i = 0; i < count; i++) { 99 | int k = i + keyIndex; 100 | final String key = String.format("putWrite%s", k); 101 | final String value = String.format("This is value %s", i); 102 | records.add( 103 | write(topic, 104 | new SchemaAndValue(Schema.STRING_SCHEMA, key), 105 | new SchemaAndValue(Schema.STRING_SCHEMA, value) 106 | ) 107 | ); 108 | 109 | expected.put(key, value); 110 | } 111 | this.task.put(records); 112 | 113 | final byte[][] keys = expected.keySet().stream() 114 | .map(s -> s.getBytes(Charsets.UTF_8)) 115 | .toArray(byte[][]::new); 116 | RedisFuture>> result = this.task.session.asyncCommands().mget(keys); 117 | List> actual = result.get(); 118 | assertEquals(count, actual.size()); 119 | for (KeyValue kv : actual) { 120 | final String key = new String(kv.getKey(), Charsets.UTF_8); 121 | final String value = new String(kv.getValue(), Charsets.UTF_8); 122 | assertEquals(value, expected.get(key), String.format("Value for key(%s) does not match.", key)); 123 | } 124 | } 125 | 126 | 127 | @Test 128 | public void serverReset( 129 | @DockerContainer(container = "redis") Container container, 130 | @Port(container = "redis", internalPort = 6379) InetSocketAddress address) throws ExecutionException, InterruptedException, IOException { 131 | log.info("address = {}", address); 132 | final String topic = "putWrite"; 133 | SinkTaskContext context = mock(SinkTaskContext.class); 134 | when(context.assignment()).thenReturn(ImmutableSet.of()); 135 | this.task.initialize(context); 136 | this.task.start( 137 | ImmutableMap.of(RedisSinkConnectorConfig.HOSTS_CONFIG, String.format("%s:%s", address.getHostString(), address.getPort())) 138 | ); 139 | 140 | sendAndVerifyRecords(task, topic, 0); 141 | container.stop(); 142 | 143 | assertThrows(RetriableException.class, () -> { 144 | sendAndVerifyRecords(task, topic, 100); 145 | }); 146 | container.start(); 147 | sendAndVerifyRecords(task, topic, 100); 148 | } 149 | 150 | 151 | @AfterEach 152 | public void after() { 153 | if (null != this.task) { 154 | this.task.stop(); 155 | } 156 | } 157 | 158 | } 159 | -------------------------------------------------------------------------------- /src/test/java/com/github/jcustenborder/kafka/connect/redis/RedisSinkTaskTest.java: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.github.jcustenborder.kafka.connect.redis; 17 | 18 | import com.google.common.base.Charsets; 19 | import com.google.common.base.Strings; 20 | import com.google.common.collect.ImmutableMap; 21 | import io.lettuce.core.RedisFuture; 22 | import io.lettuce.core.cluster.api.async.RedisAdvancedClusterAsyncCommands; 23 | import io.lettuce.core.cluster.api.async.RedisClusterAsyncCommands; 24 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 25 | import org.apache.kafka.common.TopicPartition; 26 | import org.apache.kafka.connect.data.Schema; 27 | import org.apache.kafka.connect.data.SchemaAndValue; 28 | import org.apache.kafka.connect.errors.DataException; 29 | import org.apache.kafka.connect.sink.SinkRecord; 30 | import org.junit.jupiter.api.BeforeEach; 31 | import org.junit.jupiter.api.Test; 32 | import org.mockito.InOrder; 33 | import org.mockito.Mockito; 34 | 35 | import java.util.Arrays; 36 | import java.util.List; 37 | import java.util.concurrent.TimeUnit; 38 | 39 | import static com.github.jcustenborder.kafka.connect.utils.SinkRecordHelper.write; 40 | import static org.junit.jupiter.api.Assertions.assertEquals; 41 | import static org.junit.jupiter.api.Assertions.assertThrows; 42 | import static org.mockito.ArgumentMatchers.any; 43 | import static org.mockito.ArgumentMatchers.anyLong; 44 | import static org.mockito.ArgumentMatchers.anyMap; 45 | import static org.mockito.Mockito.mock; 46 | import static org.mockito.Mockito.times; 47 | import static org.mockito.Mockito.when; 48 | import static org.mockito.Mockito.withSettings; 49 | 50 | public class RedisSinkTaskTest { 51 | long offset = 1; 52 | 53 | SinkRecord lastRecord; 54 | 55 | SinkRecord record(String k, String v) { 56 | final byte[] key = k.getBytes(Charsets.UTF_8); 57 | final Schema keySchema = Schema.BYTES_SCHEMA; 58 | final byte[] value; 59 | final Schema valueSchema; 60 | 61 | if (Strings.isNullOrEmpty(v)) { 62 | value = null; 63 | valueSchema = null; 64 | } else { 65 | value = v.getBytes(Charsets.UTF_8); 66 | valueSchema = Schema.BYTES_SCHEMA; 67 | } 68 | 69 | return lastRecord = new SinkRecord( 70 | "topic", 71 | 1, 72 | keySchema, 73 | key, 74 | valueSchema, 75 | value, 76 | offset++ 77 | ); 78 | 79 | } 80 | 81 | RedisSinkTask task; 82 | RedisClusterAsyncCommands asyncCommands; 83 | 84 | @BeforeEach 85 | public void before() throws InterruptedException { 86 | this.task = new RedisSinkTask(); 87 | this.task.session = mock(RedisSession.class); 88 | this.asyncCommands = mock(RedisAdvancedClusterAsyncCommands.class, withSettings().verboseLogging()); 89 | when(task.session.asyncCommands()).thenReturn(asyncCommands); 90 | 91 | RedisFuture setFuture = mock(RedisFuture.class); 92 | when(setFuture.await(anyLong(), any(TimeUnit.class))).thenReturn(true); 93 | RedisFuture deleteFuture = mock(RedisFuture.class); 94 | when(deleteFuture.await(anyLong(), any(TimeUnit.class))).thenReturn(true); 95 | when(asyncCommands.mset(anyMap())).thenReturn(setFuture); 96 | when(asyncCommands.del(any())).thenReturn(deleteFuture); 97 | task.config = new RedisSinkConnectorConfig( 98 | ImmutableMap.of() 99 | ); 100 | } 101 | 102 | 103 | @Test 104 | public void nonByteOrStringKey() { 105 | DataException exception = assertThrows(DataException.class, () -> { 106 | this.task.put( 107 | Arrays.asList( 108 | write("topic", 109 | new SchemaAndValue(Schema.INT32_SCHEMA, 1), 110 | new SchemaAndValue(Schema.INT32_SCHEMA, 1) 111 | ) 112 | ) 113 | ); 114 | }); 115 | assertEquals( 116 | "The key for the record must be String or Bytes. Consider using the ByteArrayConverter or StringConverter if the data is stored in Kafka in the format needed in Redis. Another option is to use a single message transformation to transform the data before it is written to Redis.", 117 | exception.getMessage()); 118 | } 119 | 120 | @Test 121 | public void nonByteOrStringValue() { 122 | DataException exception = assertThrows(DataException.class, () -> { 123 | this.task.put( 124 | Arrays.asList( 125 | write("topic", 126 | new SchemaAndValue(Schema.STRING_SCHEMA, "test"), 127 | new SchemaAndValue(Schema.INT32_SCHEMA, 1) 128 | ) 129 | ) 130 | ); 131 | }); 132 | 133 | assertEquals( 134 | "The value for the record must be String or Bytes. Consider using the ByteArrayConverter or StringConverter if the data is stored in Kafka in the format needed in Redis. Another option is to use a single message transformation to transform the data before it is written to Redis.", 135 | exception.getMessage() 136 | ); 137 | } 138 | 139 | @Test 140 | public void put() throws InterruptedException { 141 | List records = Arrays.asList( 142 | record("set1", "asdf"), 143 | record("set2", "asdf"), 144 | record("delete1", null), 145 | record("set3", "asdf"), 146 | record("set4", "asdf") 147 | ); 148 | 149 | task.put(records); 150 | 151 | InOrder inOrder = Mockito.inOrder(asyncCommands); 152 | inOrder.verify(asyncCommands).mset(anyMap()); 153 | inOrder.verify(asyncCommands).del(any(byte[].class)); 154 | 155 | task.flush(ImmutableMap.of(new TopicPartition(lastRecord.topic(), lastRecord.kafkaPartition()), new OffsetAndMetadata(lastRecord.kafkaOffset()))); 156 | inOrder.verify(asyncCommands, times(2)).mset(anyMap()); 157 | } 158 | 159 | } 160 | -------------------------------------------------------------------------------- /src/test/resources/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2017 Jeremy Custenborder (jcustenborder@gmail.com) 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | version: "2" 18 | services: 19 | redis: 20 | image: redis 21 | ports: 22 | - "56379:6379" 23 | -------------------------------------------------------------------------------- /src/test/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | --------------------------------------------------------------------------------